diff --git "a/6655.jsonl" "b/6655.jsonl" new file mode 100644--- /dev/null +++ "b/6655.jsonl" @@ -0,0 +1,2073 @@ +{"seq_id":"74442184888","text":"import os\nimport re\nimport sys\nfrom file_utils import file_utils\n\ndef get_dir_files(path):\n ctime = lambda f: os.stat(os.path.join(path, f)).st_ctime\n files = list(sorted(file_utils.get_files_in_directory_skip_hidden(path), key=ctime)) # files listed in created order\n\n regex = re.compile(r'Scan(\\s\\d+)?')\n return list(filter(regex.match, files)) # don't rename files already renamed\n\n\ndef rename_files(files, starting_nbr, tmp_dir):\n for filename in files:\n starting_nbr += 1 # just incase i forget to bump the nbr...\n print ('renaming {}'.format(filename))\n file_utils.move_file(tmp_dir + filename, tmp_dir + 'scan_' + str(starting_nbr) + '.jpeg')\n\n\ndef main():\n starting_nbr = 100844 # this could be figured out... meh\n tmp_dir = '/tmp/'\n\n files = get_dir_files(tmp_dir)\n rename_files(files, starting_nbr, tmp_dir)\n\nif __name__ == '__main__':\n main()\n","repo_name":"lynzt/python_rename_scanned","sub_path":"rename_pics.py","file_name":"rename_pics.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74627126009","text":"import asyncio\nfrom fastapi import FastAPI, Depends\nimport logging\nfrom uvicorn.config import Config\nfrom uvicorn.server import Server\nimport config\nfrom config import (\n PAGE_URL,\n)\nfrom dependencies import get_pool\nfrom routers.main_route import router as main_router\n\n\nlogger = logging.getLogger('yandex_maps_api')\n\n\nyandex_maps_kwargs = dict(\n page_url=PAGE_URL,\n headless=False,\n)\n\napp = FastAPI(\n openapi_prefix=\"/coordinates\",\n dependencies=[Depends(get_pool)],\n)\napp.include_router(main_router)\n\n\n# setup uvicorn logger\nlogging.getLogger(\"uvicorn.access\").setLevel(logging.WARNING)\n\n\n@app.on_event(\"startup\")\nasync def startup_event() -> None:\n pass\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_event() -> None:\n pool = get_pool()\n pool.close()\n try:\n await app.state.r.close()\n except:\n pass\n\nasync def run_fastapi():\n config = Config(app=app, host=\"0.0.0.0\", port=9000, lifespan=\"on\")\n server = Server(config)\n await server.serve()\n\n\nasync def main() -> None:\n pool = get_pool()\n await asyncio.gather(\n pool.add_model(1, **yandex_maps_kwargs),\n run_fastapi(),\n )\n\nif __name__ == \"__main__\":\n asyncio.run(main())","repo_name":"Hudrolax/coordinates","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37881050095","text":"import pytest\nfrom jenkinsapi.jenkins import Jenkins\nfrom jenkinsapi.nodes import Nodes\nfrom jenkinsapi.node import Node\n\n\nDATA0 = {\n 'assignedLabels': [{}],\n 'description': None,\n 'jobs': [],\n 'mode': 'NORMAL',\n 'nodeDescription': 'the master Jenkins node',\n 'nodeName': '',\n 'numExecutors': 2,\n 'overallLoad': {},\n 'primaryView': {'name': 'All', 'url': 'http://halob:8080/'},\n 'quietingDown': False,\n 'slaveAgentPort': 0,\n 'unlabeledLoad': {},\n 'useCrumbs': False,\n 'useSecurity': False,\n 'views': [\n {'name': 'All', 'url': 'http://halob:8080/'},\n {'name': 'FodFanFo', 'url': 'http://halob:8080/view/FodFanFo/'}\n ]\n}\n\nDATA1 = {\n 'busyExecutors': 0,\n 'computer': [\n {\n 'actions': [],\n 'displayName': 'master',\n 'executors': [{}, {}],\n 'icon': 'computer.png',\n 'idle': True,\n 'jnlpAgent': False,\n 'launchSupported': True,\n 'loadStatistics': {},\n 'manualLaunchAllowed': True,\n 'monitorData': {\n 'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',\n 'hudson.node_monitors.ClockMonitor': {'diff': 0},\n 'hudson.node_monitors.DiskSpaceMonitor': {\n 'path': '/var/lib/jenkins',\n 'size': 671924924416\n },\n 'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},\n 'hudson.node_monitors.SwapSpaceMonitor': {\n 'availablePhysicalMemory': 3174686720,\n 'availableSwapSpace': 17163087872,\n 'totalPhysicalMemory': 16810180608,\n 'totalSwapSpace': 17163087872\n },\n 'hudson.node_monitors.TemporarySpaceMonitor': {\n 'path': '/tmp',\n 'size': 671924924416\n }\n },\n 'numExecutors': 2,\n 'offline': False,\n 'offlineCause': None,\n 'oneOffExecutors': [],\n 'temporarilyOffline': False\n },\n {\n 'actions': [],\n 'displayName': 'bobnit',\n 'executors': [{}],\n 'icon': 'computer-x.png',\n 'idle': True,\n 'jnlpAgent': False,\n 'launchSupported': True,\n 'loadStatistics': {},\n 'manualLaunchAllowed': True,\n 'monitorData': {\n 'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',\n 'hudson.node_monitors.ClockMonitor': {'diff': 4261},\n 'hudson.node_monitors.DiskSpaceMonitor': {\n 'path': '/home/sal/jenkins',\n 'size': 169784860672\n },\n 'hudson.node_monitors.ResponseTimeMonitor': {'average': 29},\n 'hudson.node_monitors.SwapSpaceMonitor': {\n 'availablePhysicalMemory': 4570710016,\n 'availableSwapSpace': 12195983360,\n 'totalPhysicalMemory': 8374497280,\n 'totalSwapSpace': 12195983360\n },\n 'hudson.node_monitors.TemporarySpaceMonitor': {\n 'path': '/tmp',\n 'size': 249737277440\n }\n },\n 'numExecutors': 1,\n 'offline': True,\n 'offlineCause': {},\n 'oneOffExecutors': [],\n 'temporarilyOffline': False\n },\n {\n 'actions': [],\n 'displayName': 'halob',\n 'executors': [{}],\n 'icon': 'computer-x.png',\n 'idle': True,\n 'jnlpAgent': True,\n 'launchSupported': False,\n 'loadStatistics': {},\n 'manualLaunchAllowed': True,\n 'monitorData': {\n 'hudson.node_monitors.ArchitectureMonitor': None,\n 'hudson.node_monitors.ClockMonitor': None,\n 'hudson.node_monitors.DiskSpaceMonitor': None,\n 'hudson.node_monitors.ResponseTimeMonitor': None,\n 'hudson.node_monitors.SwapSpaceMonitor': None,\n 'hudson.node_monitors.TemporarySpaceMonitor': None\n },\n 'numExecutors': 1,\n 'offline': True,\n 'offlineCause': None,\n 'oneOffExecutors': [],\n 'temporarilyOffline': False\n }\n ],\n 'displayName': 'nodes',\n 'totalExecutors': 2\n}\n\nDATA2 = {\n 'actions': [],\n 'displayName': 'master',\n 'executors': [{}, {}],\n 'icon': 'computer.png',\n 'idle': True,\n 'jnlpAgent': False,\n 'launchSupported': True,\n 'loadStatistics': {},\n 'manualLaunchAllowed': True,\n 'monitorData': {\n 'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',\n 'hudson.node_monitors.ClockMonitor': {'diff': 0},\n 'hudson.node_monitors.DiskSpaceMonitor': {\n 'path': '/var/lib/jenkins',\n 'size': 671942561792\n },\n 'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},\n 'hudson.node_monitors.SwapSpaceMonitor': {\n 'availablePhysicalMemory': 2989916160,\n 'availableSwapSpace': 17163087872,\n 'totalPhysicalMemory': 16810180608,\n 'totalSwapSpace': 17163087872\n },\n 'hudson.node_monitors.TemporarySpaceMonitor': {\n 'path': '/tmp',\n 'size': 671942561792\n }\n },\n 'numExecutors': 2,\n 'offline': False,\n 'offlineCause': None,\n 'oneOffExecutors': [],\n 'temporarilyOffline': False\n}\n\nDATA3 = {\n 'actions': [],\n 'displayName': 'halob',\n 'executors': [{}],\n 'icon': 'computer-x.png',\n 'idle': True,\n 'jnlpAgent': True,\n 'launchSupported': False,\n 'loadStatistics': {},\n 'manualLaunchAllowed': True,\n 'monitorData': {\n 'hudson.node_monitors.ArchitectureMonitor': None,\n 'hudson.node_monitors.ClockMonitor': None,\n 'hudson.node_monitors.DiskSpaceMonitor': None,\n 'hudson.node_monitors.ResponseTimeMonitor': None,\n 'hudson.node_monitors.SwapSpaceMonitor': None,\n 'hudson.node_monitors.TemporarySpaceMonitor': None},\n 'numExecutors': 1,\n 'offline': True,\n 'offlineCause': None,\n 'oneOffExecutors': [],\n 'temporarilyOffline': False\n}\n\n\n@pytest.fixture(scope='function')\ndef nodes(monkeypatch):\n def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument\n return DATA0\n\n monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)\n\n def fake_nodes_poll(cls, tree=None): # pylint: disable=unused-argument\n return DATA1\n\n monkeypatch.setattr(Nodes, '_poll', fake_nodes_poll)\n jenkins = Jenkins('http://foo:8080')\n return jenkins.get_nodes()\n\n\ndef fake_node_poll(self, tree=None): # pylint: disable=unused-argument\n \"\"\"\n Fakes a poll of data by returning the correct section of the DATA1 test block.\n \"\"\"\n for node_poll in DATA1['computer']:\n if node_poll['displayName'] == self.name:\n return node_poll\n return DATA2\n\n\ndef test_repr(nodes):\n # Can we produce a repr string for this object\n repr(nodes)\n\n\ndef test_baseurl(nodes):\n assert nodes.baseurl == 'http://foo:8080/computer'\n\n\ndef test_get_master_node(nodes, monkeypatch):\n monkeypatch.setattr(Node, '_poll', fake_node_poll)\n\n node = nodes['master']\n assert isinstance(node, Node)\n\n\ndef test_get_nonmaster_node(nodes, monkeypatch):\n monkeypatch.setattr(Node, '_poll', fake_node_poll)\n\n node = nodes['halob']\n assert isinstance(node, Node)\n\n\ndef test_iterkeys(nodes):\n expected_names = set(['master', 'bobnit', 'halob'])\n actual_names = set([n for n in nodes.iterkeys()])\n assert actual_names == expected_names\n\n\ndef test_keys(nodes):\n expected_names = set(['master', 'bobnit', 'halob'])\n actual_names = set(nodes.keys())\n assert actual_names == expected_names\n\n\ndef items_test_case(nodes_method, monkeypatch):\n monkeypatch.setattr(Node, '_poll', fake_node_poll)\n\n expected_names = set(['master', 'bobnit', 'halob'])\n\n actual_names = set()\n for name, node in nodes_method():\n assert name == node.name\n assert isinstance(node, Node)\n\n actual_names.add(name)\n\n assert actual_names == expected_names\n\n\ndef test_iteritems(nodes, monkeypatch):\n items_test_case(nodes.iteritems, monkeypatch)\n\n\ndef test_items(nodes, monkeypatch):\n items_test_case(nodes.items, monkeypatch)\n\n\ndef values_test_case(nodes_method, monkeypatch):\n monkeypatch.setattr(Node, '_poll', fake_node_poll)\n\n expected_names = set(['master', 'bobnit', 'halob'])\n\n actual_names = set()\n for node in nodes_method():\n assert isinstance(node, Node)\n actual_names.add(node.name)\n\n assert actual_names == expected_names\n\n\ndef test_itervalues(nodes, monkeypatch):\n values_test_case(nodes.itervalues, monkeypatch)\n\n\ndef test_values(nodes, monkeypatch):\n values_test_case(nodes.values, monkeypatch)\n","repo_name":"ahs3/python-jenkinsapi","sub_path":"jenkinsapi_tests/unittests/test_nodes.py","file_name":"test_nodes.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"70101606649","text":"from PIL import Image\nfrom detector import DetectFace\nfrom detector_config import DetectorConfig\n\n#_______ Initialize the CONFIGURE Class _______#\nCONFIG = DetectorConfig()\n\n#_______ Import the Configuration Object _______#\nDETECTOR_SETTINGS = CONFIG.configuration()\n\nprint(\"========================= CONFIGURATIONS LOADED SUCCESSFULLY =========================\")\n\n#_______ Initialize the Detector Class _______#\nDETECTOR = DetectFace(input_image=DETECTOR_SETTINGS.INPUT_IMAGE,\n device=DETECTOR_SETTINGS.DEVICE,\n output_image=DETECTOR_SETTINGS.OUTPUT_IMAGE,\n output_image_size=DETECTOR_SETTINGS.OUTPUT_IMAGE_SIZE)\n\n\n#_______ Execute the main Face Detection function _______#\n# GET_FACE_DETAILS = DETECTOR.run_MTCNN()\nGET_FACE_DETAILS = DETECTOR.run_MTCNN(bool_get_bounding_box=DETECTOR_SETTINGS.BOOL_GET_BOUNDING_BOX , \n bool_get_probability=DETECTOR_SETTINGS.BOOL_GET_PROBABILITY ,\n bool_get_landmarks=DETECTOR_SETTINGS.BOOL_GET_LANDMARKS)\n \n\n#_______ Execute this function to save the detected face as image _______#\nDETECTOR.get_face(bounding_box=GET_FACE_DETAILS.BOUNDING_BOX)\n\n","repo_name":"Desmond167/Face_Recognition","sub_path":"FACE_DETECTION/01_run_face_detector.py","file_name":"01_run_face_detector.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44978720052","text":"import time\n\nfrom src.app import App\n\n\nif __name__ == \"__main__\":\n app = App('projects_and_contracts.db')\n app.db.create_tables()\n\n while True:\n print(\"1. Create Contract\")\n print(\"2. Confirm Contract\")\n print(\"3. Complete Contract\")\n print(\"4. Add Contract to Project\")\n print(\"5. Create Project\")\n print(\"6. Print Projects\")\n print(\"7. Print Contracts\")\n print(\"8. Quit\")\n\n choice = input(\"Enter option number: \")\n\n if choice == '1':\n name = input(\"Enter contract name: \")\n app.create_contract(name)\n print(\"Contract created successfully\")\n elif choice == '2':\n contract_id = input(\"Enter contract id: \")\n app.confirm_contract(int(contract_id))\n print(\"Contract confirmed successfully\")\n elif choice == '3':\n contract_id = input(\"Enter contract id: \")\n app.complete_contract(int(contract_id))\n print(\"Contract completed successfully\")\n elif choice == '4':\n contract_id = input(\"Enter contract id: \")\n project_id = input(\"Enter project id: \")\n app.add_contract_to_project(int(contract_id), int(project_id))\n print(\"Contract added to project successfully\")\n elif choice == '5':\n name = input(\"Enter project name: \")\n app.create_project(name)\n print(\"Project created successfully\")\n elif choice == '6':\n app.print_projects()\n elif choice == '7':\n app.print_contracts()\n elif choice == '8':\n break\n else:\n print(\"Invalid choice\")\n\n app.db.session.close()\n","repo_name":"Closidx/Contract_And_Projects","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35585283093","text":"import csv\nimport os\nfrom typing import Optional, List\nfrom collections import OrderedDict\n\n\ndef load_csv_file(file_path) -> Optional[List[OrderedDict]]:\n try:\n with open(file_path, mode='r', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n return [row for row in csv_reader]\n\n except OSError:\n return None\n\n\ndef load_markdown_file(file_path) -> Optional[str]:\n try:\n with open(file_path, mode='r', encoding='utf-8') as markdown_file:\n file_content = markdown_file.read()\n return file_content\n\n except OSError:\n return None\n\n\ndef get_markdown_file_names(folder_path: str) -> List[str]:\n file_names = []\n\n for root, directories, files in os.walk(folder_path):\n for file in files:\n if '.md' in file:\n file_names.append(file)\n\n return file_names\n","repo_name":"hpbl/WRITEME","sub_path":"API/parser/FileLoader.py","file_name":"FileLoader.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"16611402956","text":"from __future__ import absolute_import\nimport importlib\nimport json\n\nfrom ragdoll.config_model.kv_config import KvConfig\nfrom ragdoll.test import BaseTestCase\nfrom ragdoll.const.conf_handler_const import NOT_SYNCHRONIZE, SYNCHRONIZED\n\nBASE_PATH = \"ragdoll.config_model.\"\nCONFIG_MODEL_NAME = \"Config\"\nPROJECT_NAME = \"_config\"\nCONF_TYPE = \"kv\"\n\nCONF_INFO = \"# For more information about this file, see the ntp.conf(5) man page.\\n\" \\\n \"# Record the frequency of the system clock.\\n\" \\\n \"driftfile /var/lib/ntp/drift \\n\" \\\n \"# Permit time synchronization with our time source, but do not\\n\" \\\n \"# permit the source to query or modify the service on this system.\\n\" \\\n \"restrict default nomodify notrap nopeer noepeer noquery\\n\" \\\n \"# Permit association with pool servers.\\n\" \\\n \"restrict source nomodify notrap noepeer noquery\\n\" \\\n \"# Permit all access over the loopback interface. This could\\n\" \\\n \"# be tightened as well, but to do so would effect some of\\n\" \\\n \"# the administrative functions.\\n\" \\\n \"restrict 127.0.0.1\\n\" \\\n \"restrict ::1\\n\" \\\n \"# Hosts on local network are less restricted. \\n\" \\\n \"# restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap \\n\" \\\n \"# Use public servers from the pool.ntp.org project. \\n\" \\\n \"# Please consider joining the pool (http://www.pool.ntp.org/join.html). \\n\" \\\n \"# pool 2.openEuler.pool.ntp.org iburst \\n\" \\\n \"# Reduce the maximum number of servers used from the pool. \\n\" \\\n \"tos maxclock 5 \\n\" \\\n \"# Enable public key cryptography. \\n\" \\\n \"# crypto \\n\" \\\n \"includefile /etc/ntp/crypto/pw \\n\" \\\n \"# Key file containing the keys and key identifiers used when operating \\n\" \\\n \"# with symmetric key cryptography. \\n\" \\\n \"keys /etc/ntp/keys \\n\" \\\n \"# Specify the key identifiers which are trusted. \\n\" \\\n \"# trustedkey 4 8 42 \\n\" \\\n \"# Specify the key identifier to use with the ntpdc utility. \\n\" \\\n \"# requestkey 8 \\n\" \\\n \"# Specify the key identifier to use with the ntpq utility. \\n\" \\\n \"# controlkey 8 \\n\" \\\n \"# Enable writing of statistics records. \\n\" \\\n \"# statistics clockstats cryptostats loopstats peerstats \\n\"\n\nEQUAL_SPACER_CONF_INFO = \"kernel.sysrq=0\\n\" \\\n \"net.ipv4.ip_forward=0\\n\" \\\n \"net.ipv4.conf.all.send_redirects=0\\n\" \\\n \"kernel.dmesg_restrict=1\\n\" \\\n \"net.ipv6.conf.default.accept_redirects=0\\n\"\nNOT_SYNCHRONIZE_CONF = '[\\n' \\\n '{\\n' \\\n '\"keys\": \"/etc/ntp/keys\"\\n' \\\n '}, \\n' \\\n '{\\n' \\\n '\"statistics\": \"clockstats cryptostats loopstats peerstats\"\\n' \\\n '}' \\\n ']'\nSYNCHRONIZE_CONF = '[\\n' \\\n '{\\n' \\\n '\"driftfile\": \"/var/lib/ntp/drift\"\\n' \\\n '}, \\n' \\\n '{\\n' \\\n '\"restrict\": \"default nomodify notrap nopeer noepeer noquery\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"restrict\": \"source nomodify notrap noepeer noquery\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"restrict\": \"127.0.0.1\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"restrict\": \"::1\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"tos\": \"maxclock 5\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"includefile\": \"/etc/ntp/crypto/pw\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"keys\": \"/etc/ntp/keys\"\\n' \\\n '}' \\\n ']'\n\nNOT_SYNCHRONIZE_CONF_EQUAL = '[\\n' \\\n '{\\n' \\\n '\"kernel.sysrq\": \"1\"\\n' \\\n '}, \\n' \\\n '{\\n' \\\n '\"net.ipv4.ip_forward\": \"0\"\\n' \\\n '}' \\\n ']'\nSYNCHRONIZE_CONF_EQUAL = '[\\n' \\\n '{\\n' \\\n '\"kernel.sysrq\": \"0\"\\n' \\\n '}, \\n' \\\n '{\\n' \\\n '\"net.ipv4.ip_forward\": \"0\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"net.ipv4.conf.all.send_redirects\": \"0\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"kernel.dmesg_restrict\": \"1\"\\n' \\\n '},' \\\n '{\\n' \\\n '\"net.ipv6.conf.default.accept_redirects\": \"0\"\\n' \\\n '}' \\\n ']'\nNULL_CONF_INFO = \"\"\n\n\nclass TestSshdConfig(BaseTestCase):\n def create_conf_model(self):\n conf_model = \"\"\n project_name = CONF_TYPE + PROJECT_NAME # example: ini_config\n project_path = BASE_PATH + project_name # example: ragdoll.config_model.ini_config\n model_name = CONF_TYPE.capitalize() + CONFIG_MODEL_NAME # example: IniConfig\n\n try:\n project = importlib.import_module(project_path)\n except ImportError:\n conf_model = \"\"\n else:\n _conf_model_class = getattr(project, model_name, None) # example: IniConfig\n if _conf_model_class:\n conf_model = _conf_model_class() # example: IniConfig()\n\n return conf_model\n\n def test_parse_conf_to_dict_space_spacer(self):\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"\"}\n conf_dict_list = conf_model.parse_conf_to_dict(CONF_INFO, space_type, \"openEuler-kv_config\")\n self.assertEqual(len(conf_dict_list), 8)\n\n def test_parse_conf_to_dict_equal_spacer(self):\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"=\"}\n conf_dict_list = conf_model.parse_conf_to_dict(EQUAL_SPACER_CONF_INFO, space_type, \"openEuler-kv_config\")\n self.assertEqual(len(conf_dict_list), 5)\n\n def test_read_conf_null(self):\n conf_model = self.create_conf_model()\n conf_model.read_conf(NULL_CONF_INFO)\n self.assertEqual(len(conf_model.conf), 0)\n\n def test_conf_compare_space(self):\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"\"}\n conf_dict_list = conf_model.parse_conf_to_dict(CONF_INFO, space_type, \"openEuler-kv_config\")\n res = conf_model.conf_compare(NOT_SYNCHRONIZE_CONF, json.dumps(conf_dict_list))\n self.assertEqual(res, NOT_SYNCHRONIZE)\n\n res = conf_model.conf_compare(SYNCHRONIZE_CONF, json.dumps(conf_dict_list))\n self.assertEqual(res, SYNCHRONIZED)\n\n def test_conf_compare_equal(self):\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"=\"}\n conf_dict_list = conf_model.parse_conf_to_dict(EQUAL_SPACER_CONF_INFO, space_type, \"openEuler-kv_config\")\n res = conf_model.conf_compare(NOT_SYNCHRONIZE_CONF_EQUAL, json.dumps(conf_dict_list))\n self.assertEqual(res, NOT_SYNCHRONIZE)\n\n res = conf_model.conf_compare(SYNCHRONIZE_CONF_EQUAL, json.dumps(conf_dict_list))\n self.assertEqual(res, SYNCHRONIZED)\n\n def test_write_conf_space(self):\n kv_config = KvConfig()\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"\"}\n conf_dict_list = conf_model.parse_conf_to_dict(CONF_INFO, space_type, \"openEuler-kv_config\")\n kv_config.conf = conf_dict_list\n content = conf_model.write_conf(space_type={\"openEuler-kv_config\": \"\"}, yang_info=\"openEuler-kv_config\")\n self.assertTrue(len(content) > 0)\n\n def test_write_conf_equal(self):\n kv_config = KvConfig()\n conf_model = self.create_conf_model()\n space_type = {\"openEuler-kv_config\": \"=\"}\n conf_dict_list = conf_model.parse_conf_to_dict(EQUAL_SPACER_CONF_INFO, space_type, \"openEuler-kv_config\")\n kv_config.conf = conf_dict_list\n content = conf_model.write_conf(space_type={\"openEuler-kv_config\": \"=\"}, yang_info=\"openEuler-kv_config\")\n self.assertTrue(len(content) > 0)\n\n\nif __name__ == '__main__':\n import unittest\n\n unittest.main()\n","repo_name":"openeuler-mirror/A-Ops","sub_path":"gala-ragdoll/ragdoll/test/test_kv_config.py","file_name":"test_kv_config.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"21654327124","text":"import socket\nfrom typing import Tuple, Optional\n\n\nclass RequestResponseBuilder(object):\n REQUEST_CODE_LENGTH = 3\n REQUEST_BODY_LENGTH = 8\n RESPONSE_BODY_LENGTH = 10\n\n @staticmethod\n def __recvNbytes(connection: socket, count: int) -> bytearray:\n data = bytearray()\n while 0 < count:\n try:\n part = connection.recv(count)\n except socket.timeout:\n part = None\n except Exception:\n raise\n if part:\n data.extend(part)\n count -= len(part)\n else:\n break\n return data\n\n @staticmethod\n def __readInt(connection: socket, numberLength: int) -> Optional[int]:\n data = RequestResponseBuilder.__recvNbytes(connection, numberLength)\n if len(data) < numberLength:\n return None\n numberStr = data.decode(\"utf8\")\n number = int(numberStr)\n return number\n\n @staticmethod\n def __getRequestMask() -> str:\n mask = \"{:0\" + str(RequestResponseBuilder.REQUEST_CODE_LENGTH) + \"d}\" + \\\n \"{:0\" + str(RequestResponseBuilder.REQUEST_BODY_LENGTH) + \"d}\"\n return mask\n\n @staticmethod\n def __getResponseMask() -> str:\n mask = \"{:0\" + str(RequestResponseBuilder.RESPONSE_BODY_LENGTH) + \"d}\"\n return mask\n\n @staticmethod\n def __buildRequest(requestCode: int, requestBody: bytearray) -> bytearray:\n if requestBody and 0 < len(requestBody):\n requestBodyLength = len(requestBody)\n else:\n requestBodyLength = 0\n mask = RequestResponseBuilder.__getRequestMask()\n requestHeader = mask.format(requestCode, requestBodyLength)\n request = bytearray()\n request.extend(requestHeader.encode(\"utf8\"))\n if 0 < requestBodyLength:\n request.extend(requestBody)\n return request\n\n @staticmethod\n def __buildResponse(responseBody: bytearray) -> bytearray:\n if responseBody and 0 < len(responseBody):\n responseBodyLength = len(responseBody)\n else:\n responseBodyLength = 0\n mask = RequestResponseBuilder.__getResponseMask()\n responseHeader = mask.format(responseBodyLength)\n response = bytearray()\n response.extend(responseHeader.encode(\"utf8\"))\n if 0 < responseBodyLength:\n response.extend(responseBody)\n return response\n\n @staticmethod\n def readRequest(connection: socket) -> Tuple[int, bytearray]:\n requestBodyLength = None\n requestBody = None\n\n requestCode = RequestResponseBuilder.__readInt(connection, RequestResponseBuilder.REQUEST_CODE_LENGTH)\n if not (requestCode is None):\n requestBodyLength = RequestResponseBuilder.__readInt(connection, RequestResponseBuilder.REQUEST_BODY_LENGTH)\n\n if not (requestBodyLength is None):\n if 0 < requestBodyLength:\n requestBody = RequestResponseBuilder.__recvNbytes(connection, requestBodyLength)\n if len(requestBody) < requestBodyLength:\n requestBody = None\n else:\n requestBody = bytearray()\n\n return requestCode, requestBody\n\n @staticmethod\n def readResponse(connection: socket) -> bytearray:\n responseBody = None\n\n responseBodyLength = RequestResponseBuilder.__readInt(connection, RequestResponseBuilder.RESPONSE_BODY_LENGTH)\n\n if not (responseBodyLength is None):\n if 0 < responseBodyLength:\n responseBody = RequestResponseBuilder.__recvNbytes(connection, responseBodyLength)\n else:\n responseBody = bytearray\n\n return responseBody\n\n @staticmethod\n def writeRequest(connection: socket, requestCode: int, requestBody: bytearray):\n request = RequestResponseBuilder.__buildRequest(requestCode, requestBody)\n connection.sendall(request)\n\n @staticmethod\n def writeResponse(connection: socket, responseBody: bytearray):\n response = RequestResponseBuilder.__buildResponse(responseBody)\n connection.sendall(response)\n","repo_name":"vadimpechenin/RecognizingTextFromImageApp","sub_path":"CVServer/main/src/core/requests/requestResponseBuilder.py","file_name":"requestResponseBuilder.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20046433424","text":"\nimport sys\nimport os\nimport dbus\nimport struct\n\nfrom traceback import print_exc\ng_FreeDesktopDBus = ''\ng_AVMService = ''\n\n\n#####################################\n# connect to headunit and get service\n#####################################\ndef connectToService(ip,port):\n\n\tos.environ['DBUS_SESSION_BUS_ADDRESS'] = 'tcp:host=' + ip + ',' + 'port=' + port + ',family=ipv4'\n#\tos.system('export')\n\n\tbus = dbus.SessionBus()\n\n\ttry:\n\t\t#get org.freedesktop.dbus objet\n\t\tremote_object = bus.get_object('org.freedesktop.DBus',\n\t\t\t\t\t\t\t\t\t\t'/')\n\t\tglobal g_FreeDesktopDBus\n\t\tg_FreeDesktopDBus = dbus.Interface(remote_object,\n\t\t\t\t\t\t\t\t\tdbus_interface='org.freedesktop.DBus')\n\n\t\t#get AVMServiceInst0 objet\n\t\tremote_object = bus.get_object('com.harman.adas.AVMService_adas.AVMServiceInst0',\n\t\t\t\t\t\t\t\t\t\t'/adas/AVMServiceInst0')\n\t\tglobal g_AVMService\n\t\tg_AVMService = dbus.Interface(remote_object,\n\t\t\t\t\t\t\t\t\tdbus_interface='com.harman.adas.AVMService')\n\n\texcept dbus.DBusException:\n\t\tprint_exc()\n\t\tsys.exit(1)\n\n\n#####################################\n# org.freedesktop.dbus service \n# ListNames()\n#####################################\ndef getListNames():\n\n\tglobal g_FreeDesktopDBus\n\n\tresult = g_FreeDesktopDBus.ListNames()\n\n\treturn result\n\n#####################################\n# org.freedesktop.dbus service \n# ListActivatableNames()\n#####################################\ndef getListActivatableNames():\n\n\tglobal g_FreeDesktopDBus\n\n\tresult = g_FreeDesktopDBus.ListActivatableNames()\n\n\treturn result\n\n\n#####################################\n# org.freedesktop.dbus service \n# startServiceByName()\n#####################################\ndef startServiceByName(a,b=1):\n\n\tglobal g_FreeDesktopDBus\n\n\tresult = g_FreeDesktopDBus.StartServiceByName(a,b)\n\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# getAVMCamStatusAttribute()\n#####################################\ndef getAVMCamStatusAttribute():\n\n\tglobal g_AVMService\n\t#arg name=\"value\" type=\"(iiii)\" direction=\"out\"\n\tresult = g_AVMService.getAVMCamStatusAttribute()\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# getInterfaceVersion()\n#####################################\ndef getInterfaceVersion():\n\n\tglobal g_AVMService\n\t#arg name=\"value\" type=\"uu\" direction=\"out\"\n\tresult = g_AVMService.getInterfaceVersion()\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# getCTAStatusAttribute()\n#####################################\ndef getCTAStatusAttribute():\n\n\tglobal g_AVMService\n\t#arg name=\"value\" type=\"(ii)\" direction=\"out\"\n\tresult = g_AVMService.getCTAStatusAttribute()\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# getAutoStatusAttribute()\n#####################################\ndef getAutoStatusAttribute():\n\n\tglobal g_AVMService\n\t#arg name=\"value\" type=\"i\" direction=\"out\"\n\tresult = g_AVMService.getAutoStatusAttribute()\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# SwitchAVMView()\n#####################################\ndef SwitchAVMView(a):\n\n\tglobal g_AVMService\n\t#arg name=\"_m_eAVMViewMode\" type=\"i\" direction=\"in\"\n\tg_AVMService.SwitchAVMView(a)\n\treturn 1\n\n#####################################\n# AVMServiceInst0 service \n# SwitchGuideline()\n#####################################\ndef SwitchGuideline(a):\n\n\tglobal g_AVMService\n\t#arg name=\"_m_eAVMGuideLinewMode\" type=\"i\" direction=\"in\" \n\tg_AVMService.SwitchGuideline(a)\n\treturn 1\n\n#####################################\n# AVMServiceInst0 service \n# SwitchCTA()\n#####################################\ndef SwitchCTA(a):\n\n\tglobal g_AVMService\n\t#arg name=\"_m_eCATMode\" type=\"i\" direction=\"in\"\n\t#arg name=\"_iErrCTA\" type=\"y\" direction=\"out\"\n\tresult = g_AVMService.SwitchCTA(a)\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# SwitchAutoCalibrationAuto()\n#####################################\ndef SwitchAutoCalibrationAuto(a):\n\n\tglobal g_AVMService\n\t#arg name=\"_m_eAutoCaliMode\" type=\"i\" direction=\"in\"\n\t#arg name=\"_iErrAutoCali\" type=\"y\" direction=\"out\"\n\tresult = g_AVMService.SwitchAutoCalibrationAuto(a)\n\t\n\treturn result\n\n#####################################\n# AVMServiceInst0 service \n# SwitchAutoCalibrationManually()\n#####################################\ndef SwitchAutoCalibrationManually(a):\n\n\tglobal g_AVMService\n\t#arg name=\"_m_eAutoCaliMode\" type=\"i\" direction=\"in\"\n\t#arg name=\"_iErrAutoCali\" type=\"y\" direction=\"out\"\n\tresult = g_AVMService.SwitchAutoCalibrationManually(a)\n\t\n\treturn result\n#if __name__ == '__main__':\n# main()\n","repo_name":"BobDeng1974/cpm_adas_adservice-github","sub_path":"Test/RF_ADAS/adas_library/Dbus_Adas_IF.py","file_name":"Dbus_Adas_IF.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19805420253","text":"import random\n\n\ndef run():\n numero_random = random.randint(0, 100)\n user_num = int(input('Ingresa un numero entre 1 y 100: '))\n\n while user_num != numero_random:\n if user_num < numero_random:\n print('Busca un número más grande')\n else:\n print('Busca un número más pequeño')\n user_num = int(input('Elige otro número: '))\n\n if numero_random == user_num:\n print('El numero es correcto')\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"dorian-morones/basic_python","sub_path":"adivina_numero.py","file_name":"adivina_numero.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73361608569","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torch.autograd import Variable\nimport pickle\nimport numpy as np\nimport time\nimport random\nfrom collections import defaultdict\nfrom UV_Encoders import UV_Encoder\nfrom UV_Aggregators import UV_Aggregator\nfrom Social_Encoders import Social_Encoder\nfrom Social_Aggregators import Social_Aggregator\nimport torch.nn.functional as F\nimport torch.utils.data\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom math import sqrt\nimport datetime\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\n\n\"\"\"\nGraphRec: Graph Neural Networks for Social Recommendation.\nWenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin.\nIn Proceedings of the 28th International Conference on World Wide Web (WWW), 2019. Preprint[https://arxiv.org/abs/1902.07243]\n\nIf you use this code, please cite our paper:\n```\n@inproceedings{fan2019graph,\n title={Graph Neural Networks for Social Recommendation},\n author={Fan, Wenqi and Ma, Yao and Li, Qing and He, Yuan and Zhao, Eric and Tang, Jiliang and Yin, Dawei},\n booktitle={WWW},\n year={2019}\n}\n```\n\n\"\"\"\n\n\nclass GraphRec(nn.Module):\n\n def __init__(self, enc_u, enc_v_history, r2e):\n super(GraphRec, self).__init__()\n self.enc_u = enc_u\n self.enc_v_history = enc_v_history\n self.embed_dim = enc_u.embed_dim\n\n self.w_ur1 = nn.Linear(self.embed_dim, self.embed_dim)\n self.w_ur2 = nn.Linear(self.embed_dim, self.embed_dim)\n self.w_vr1 = nn.Linear(self.embed_dim, self.embed_dim)\n self.w_vr2 = nn.Linear(self.embed_dim, self.embed_dim)\n self.w_uv1 = nn.Linear(self.embed_dim * 2, self.embed_dim)\n self.w_uv2 = nn.Linear(self.embed_dim, 16)\n self.w_uv3 = nn.Linear(16, 5)\n self.r2e = r2e\n self.bn1 = nn.BatchNorm1d(self.embed_dim, momentum=0.5)\n self.bn2 = nn.BatchNorm1d(self.embed_dim, momentum=0.5)\n self.bn3 = nn.BatchNorm1d(self.embed_dim, momentum=0.5)\n self.bn4 = nn.BatchNorm1d(16, momentum=0.5)\n self.criterion = nn.MSELoss()\n\n def forward(self, nodes_u, nodes_v):\n embeds_u = self.enc_u(nodes_u)\n embeds_v = self.enc_v_history(nodes_v)\n\n x_u = F.relu(self.bn1(self.w_ur1(embeds_u)))\n x_u = F.dropout(x_u, training=self.training)\n x_u = self.w_ur2(x_u)\n x_v = F.relu(self.bn2(self.w_vr1(embeds_v)))\n x_v = F.dropout(x_v, training=self.training)\n x_v = self.w_vr2(x_v)\n\n x_uv = torch.cat((x_u, x_v), 1)\n x = F.relu(self.bn3(self.w_uv1(x_uv)))\n x = F.dropout(x, training=self.training)\n x = F.relu(self.bn4(self.w_uv2(x)))\n x = F.dropout(x, training=self.training)\n scores = self.w_uv3(x)\n return scores.squeeze()\n\n def loss(self, nodes_u, nodes_v, labels_list):\n scores = self.forward(nodes_u, nodes_v)\n return self.criterion(scores, labels_list)\n\n\ndef train(model, device, train_loader, optimizer, epoch, best_rmse, best_mae):\n model.train()\n running_loss = 0.0\n loss_values = []\n for i, data in enumerate(train_loader, 0):\n batch_nodes_u, batch_nodes_v, labels_list = data\n # TODO: Not sure why we end up with shape (batch_size, 1, 5).\n # Investigate if we can get rid of that 1 from the start\n labels_list = torch.squeeze(labels_list)\n optimizer.zero_grad()\n loss = model.loss(batch_nodes_u.to(device), batch_nodes_v.to(device), labels_list.to(device))\n loss.backward(retain_graph=True)\n optimizer.step()\n running_loss += loss.item()\n if i % 1000 == 0 and i > 0:\n print('[%d, %5d] loss: %.3f, The best rmse/mae: %.6f / %.6f' % (\n epoch, i, running_loss / (1000 * i), best_rmse, best_mae))\n # running_loss = 0.0\n\n loss_values.append(running_loss / len(train_loader))\n\n return loss_values\n\n\ndef test(model, device, test_loader):\n model.eval()\n tmp_pred = []\n target = []\n with torch.no_grad():\n for test_u, test_v, tmp_target in test_loader:\n test_u, test_v, tmp_target = test_u.to(device), test_v.to(device), tmp_target.to(device)\n val_output = model.forward(test_u, test_v)\n tmp_pred.append(list(val_output.data.cpu().numpy()))\n target.append(list(tmp_target.data.cpu().numpy()))\n\n tmp_pred = np.array(sum(tmp_pred, []))\n target = np.array(sum(target, []))\n # TODO: Same as above, not sure we we have an extra dim.\n target = np.squeeze(target)\n expected_rmses = [sqrt(mean_squared_error(tmp_pred[:, i], target[:, i])) for i in range(target.shape[1])]\n maes = [mean_absolute_error(tmp_pred[:, i], target[:, i]) for i in range(target.shape[1])]\n return expected_rmses, maes\n\n\ndef run(data, batch_size=128, embed_dim=64, r_hidden_dim=256, lr=0.001, test_batch_size=1000, epochs=100, use_similarity=False, gpu='0'):\n\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\n use_cuda = False\n if torch.cuda.is_available():\n use_cuda = True\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n history_u_lists, history_ur_lists, history_v_lists, history_vr_lists, train_u, train_v, train_r, val_u, val_v, val_r, test_u, test_v, test_r, item_adj_lists = data\n \"\"\"\n ## toy dataset\n history_u_lists, history_ur_lists: user's purchased history (item set in training set), and his/her rating score (dict)\n history_v_lists, history_vr_lists: user set (in training set) who have interacted with the item, and rating score (dict)\n\n train_u, train_v, train_r: training_set (user, item, rating)\n test_u, test_v, test_r: testing set (user, item, rating)\n\n # please add the validation set\n\n social_adj_lists: items connected neighborhoods\n ratings_list: rating value from 1.0 to 5.0 (9 possible values), for 5 fields.\n Let's use 9 embeddings of size n for each field, concatenate them, and end up with\n a 5n size rating embedding\n \"\"\"\n\n trainset = torch.utils.data.TensorDataset(torch.LongTensor(train_u), torch.LongTensor(train_v),\n torch.FloatTensor(train_r))\n valset = torch.utils.data.TensorDataset(torch.LongTensor(val_u), torch.LongTensor(val_v),\n torch.FloatTensor(val_r))\n testset = torch.utils.data.TensorDataset(torch.LongTensor(test_u), torch.LongTensor(test_v),\n torch.FloatTensor(test_r))\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=test_batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=True)\n num_users = history_u_lists.__len__()\n num_items = history_v_lists.__len__()\n # Not used for now\n # num_ratings = ratings_list.__len__()\n\n u2e = nn.Embedding(num_users, embed_dim).to(device)\n v2e = nn.Embedding(num_items, embed_dim).to(device)\n\n # Instead of using 9 embeddings, use a FC to compute the embedding given 5 review values.\n r2e = nn.Sequential(\n nn.Linear(5, r_hidden_dim),\n nn.ReLU(),\n nn.Linear(r_hidden_dim, embed_dim),\n ).to(device)\n\n # user feature\n # features: item * rating\n agg_u_history = UV_Aggregator(v2e, r2e, u2e, embed_dim, cuda=device, uv=True)\n enc_u_history = UV_Encoder(u2e, embed_dim, history_u_lists, history_ur_lists, agg_u_history, cuda=device, uv=True)\n # neighobrs\n # Removing this since we don't have user interaction data\n # agg_u_social = Social_Aggregator(lambda nodes: enc_u_history(nodes).t(), u2e, embed_dim, cuda=device)\n # enc_u = Social_Encoder(lambda nodes: enc_u_history(nodes).t(), embed_dim, social_adj_lists, agg_u_social,\n # base_model=enc_u_history, cuda=device)\n\n # item feature: user * rating\n agg_v_history = UV_Aggregator(v2e, r2e, u2e, embed_dim, cuda=device, uv=False)\n enc_v_history = UV_Encoder(v2e, embed_dim, history_v_lists, history_vr_lists, agg_v_history, cuda=device, uv=False)\n\n # we do have item similarity data so we will add this piece and see if it works\n # item adjancency\n # TODO Use item similarity later (enc_v).\n agg_v_similarity = Social_Aggregator(lambda nodes: enc_v_history(nodes).t(), v2e, embed_dim, cuda=device)\n enc_v = Social_Encoder(lambda nodes: enc_v_history(nodes).t(), embed_dim, item_adj_lists, agg_v_similarity,\n base_model=enc_v_history, cuda=device)\n\n # model\n if use_similarity:\n graphrec = GraphRec(enc_u_history, enc_v, r2e).to(device)\n else:\n graphrec = GraphRec(enc_u_history, enc_v_history, r2e).to(device)\n optimizer = torch.optim.RMSprop(graphrec.parameters(), lr=lr, alpha=0.9)\n\n best_rmse = 9999.0\n train_rmse_history = {}\n val_rmse_history = {}\n\n best_mae = 9999.0\n train_mae_history = {}\n val_mae_history = {}\n endure_count = 0\n fields = ['overall', 'review_aroma', 'review_appearance', 'review_palate', 'review_taste']\n loss_history = []\n\n for epoch in range(1, epochs + 1):\n\n loss_values = train(graphrec, device, train_loader, optimizer, epoch, best_rmse, best_mae)\n loss_history.extend(loss_values)\n\n # Metrics\n train_expected_rmses, train_maes = test(graphrec, device, train_loader)\n for idx, (expected_rmse, mae) in enumerate(zip(train_expected_rmses, train_maes)):\n train_rmse_history.setdefault(fields[idx], []).append(expected_rmse)\n train_mae_history.setdefault(fields[idx], []).append(mae)\n\n print(f'TRAIN metrics for field {fields[idx]}:')\n print(expected_rmse)\n print(mae)\n\n val_expected_rmses, val_maes = test(graphrec, device, val_loader)\n for idx, (expected_rmse, mae) in enumerate(zip(val_expected_rmses, val_maes)):\n val_rmse_history.setdefault(fields[idx], []).append(expected_rmse)\n val_mae_history.setdefault(fields[idx], []).append(mae)\n\n print(f'VALIDATION metrics for field {fields[idx]}:')\n print(expected_rmse)\n print(mae)\n\n expected_rmse = val_expected_rmses[0]\n mae = val_maes[0]\n # early stopping (no validation set in toy dataset)\n if best_rmse > expected_rmse:\n best_rmse = expected_rmse\n best_mae = mae\n endure_count = 0\n else:\n endure_count += 1\n print(\"val rmse: %.4f, val mae:%.4f \" % (expected_rmse, mae))\n\n if endure_count > 5:\n break\n\n plt.title(\"Loss history\")\n plt.plot(loss_history, label='Train Loss')\n plt.legend()\n plt.show()\n\n for field in fields:\n plt.title(f\"Metrics for {field}\")\n plt.plot(train_rmse_history[field], label='TRAIN RMSE')\n plt.plot(train_mae_history[field], label='TRAIN MAE')\n plt.plot(val_rmse_history[field], label='VAL RMSE')\n plt.plot(val_mae_history[field], label='VAL MAE')\n plt.legend()\n plt.show()\n\n expected_rmses, maes = test(graphrec, device, test_loader)\n for idx, (expected_rmse, mae) in enumerate(zip(expected_rmses, maes)):\n print(f'TEST metrics for field {fields[idx]}:')\n print(expected_rmse)\n print(mae)\n","repo_name":"fabalbertoni/beer-recommender","sub_path":"run_GraphRec_example.py","file_name":"run_GraphRec_example.py","file_ext":"py","file_size_in_byte":11363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21988661954","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport json\n\nfrom cellar.core.manager import ResourceNotFound, InvalidUpdate\nfrom cellar.core.patch import Replace, Create, Remove\nfrom cellar.core.resource import Resource\nfrom flask import make_response\nfrom flask import request\nfrom flask import render_template\n\n\nclass Api(object):\n def __init__(self, app, manager=None, resource_type_factory=None):\n super().__init__()\n\n self.manager = manager\n self.resource_type_factory = resource_type_factory\n\n app.add_url_rule('/v1/resources',\n view_func=self.create_resource,\n methods=['POST'])\n\n app.add_url_rule('/v1/resources/',\n view_func=self.update_resource,\n methods=['PATCH'])\n\n app.add_url_rule('/v1/resources',\n view_func=self.get_all_resources,\n methods=['GET'])\n\n app.add_url_rule('/v1/resources/',\n view_func=self.get_all_resources,\n methods=['GET'])\n\n app.add_url_rule('/v1/resources/',\n view_func=self.get_resource,\n methods=['GET'])\n\n app.add_url_rule('/v1/resource-types',\n view_func=self.get_all_resource_types,\n methods=['GET'])\n\n def create_resource(self):\n request_data = request.json\n\n resource = self.manager.create_resource(\n Resource(resource_type=self.resource_type_factory.get(request_data['type']),\n attributes=request_data['attributes'],\n relations=self._to_relations(request_data.get('relations', {}))))\n response = make_response(json.dumps(resource_to_api(resource)), 201)\n response.headers['Content-type'] = 'application/json'\n response.headers['Location'] = '/v1/resources/{}'.format(resource.uuid)\n\n return response\n\n def update_resource(self, uuid):\n request_data = request.json\n changes = request_to_patch_operation(request_data)\n\n try:\n resource = self.manager.update_resource(uuid, changes=changes)\n except InvalidUpdate:\n return make_response('', 400)\n\n response = make_response(json.dumps(resource_to_api(resource)), 200)\n response.headers['Content-type'] = 'application/json'\n response.headers['Location'] = '/v1/resources/{}'.format(resource.uuid)\n\n return response\n\n def get_all_resources(self):\n resources = self.manager.list_resources()\n\n if 'text/html' in request.accept_mimetypes:\n response = make_response('', 200)\n response.headers['Content-Type'] = 'text/html'\n return render_template('resources.html', resources=resources)\n else:\n api_response = []\n for resource in resources:\n api_response.append(resource_to_api(resource))\n\n response = make_response(json.dumps({\"resources\": api_response}), 200)\n response.headers['Content-Type'] = 'application/json'\n\n return response\n\n def get_resource(self, uuid):\n try:\n resource = self.manager.get_resource(uuid)\n except ResourceNotFound:\n return make_response('', 404)\n\n data = resource_to_api(resource)\n\n response = make_response(json.dumps(data), 200)\n response.headers['Content-Type'] = 'application/json'\n\n return response\n\n def get_all_resource_types(self):\n resource_types = self.resource_type_factory.list()\n\n api_response = [resource_type_to_api(resource_type)\n for resource_type in resource_types]\n\n response = make_response(json.dumps({\"resource_types\": api_response}), 200)\n response.headers['Content-Type'] = 'application/json'\n\n return response\n\n def _to_relations(self, raw_relations):\n return {k: self.manager.get_resource(v)\n for k, v in raw_relations.items()}\n\n\ndef request_to_patch_operation(request):\n patch_operations = {\n 'replace': Replace,\n 'add': Create,\n 'remove': Remove\n }\n return [patch_operations.get(change['op'])(change['path'].split('/')[1:], change['value']) for change in request]\n\n\ndef resource_to_api(resource):\n return {'uuid': resource.uuid,\n 'type': resource.resource_type.name if resource.resource_type else \"\",\n 'attributes': resource.attributes,\n 'relations': {k: v.uuid\n for k, v in resource.relations.items()}}\n\n\ndef resource_type_to_api(resource_type):\n return {'name': resource_type.name}\n","repo_name":"internap/cellar","sub_path":"cellar/interfaces/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6945705708","text":"# Word List File Writer.\n\ndef main():\n # Getting the number of words to write to file from the user.\n number_words = int(input('Enter the number of words to enter the file: '))\n\n # Opening the file. In write mode.\n outfile = open('word_list.txt', 'w')\n\n # Getting the word from the user and writing it to file.\n for word_number in range(number_words):\n word = input(\"Enter word \" + str(word_number + 1) + \": \")\n outfile.write(word + '\\n')\n \n # Closing the file.\n outfile.close()\n print('The file has been updated.')\n\n# Call the main function.\nmain()","repo_name":"sairamprogramming/python_book1","sub_path":"chapter6/programming_exercises/exercise7.py","file_name":"exercise7.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34810143338","text":"from openerp import models, _\nfrom openerp.exceptions import Warning\n\n\nclass account_invoice(models.Model):\n _inherit = 'account.invoice'\n\n def invoice_pay_customer(self, cr, uid, ids, context=None):\n res = super(account_invoice, self).invoice_pay_customer(\n cr, uid, ids, context=context)\n if not res:\n return res\n\n aj_obj = self.pool['account.journal']\n inv = self.browse(cr, uid, ids[0])\n if inv.type in ('in_invoice', 'out_refund'):\n domain = [('payment_method_out', '=', True)]\n aj_ids = aj_obj.search(cr, uid, domain, context=context)\n if aj_ids:\n default_journal = aj_obj.browse(\n cr, uid, aj_ids[0], context=context)\n if default_journal.payment_date_out == 'invoice_date':\n res['context'].update({\n 'default_period_id': inv.period_id.id,\n 'default_date': inv.date_invoice,\n })\n else:\n domain = [('payment_method_in', '=', True)]\n aj_ids = aj_obj.search(cr, uid, domain, context=context)\n if aj_ids:\n default_journal = aj_obj.browse(\n cr, uid, aj_ids[0], context=context)\n if default_journal.payment_date_in == 'invoice_date':\n res['context'].update({\n 'default_period_id': inv.period_id.id,\n 'default_date': inv.date_invoice,\n })\n\n if not aj_ids:\n raise Warning(\n _(\"No Payment Methods defined for \"\n \"the 'Register Payment' function\"))\n\n res['name'] = _(\"Register Payment\")\n res['context'].update({\n 'payment_journal_ids': aj_ids,\n 'default_journal_id': aj_ids[0],\n 'account_invoice_pay_filter': True,\n })\n return res\n","repo_name":"christophe-hanon/odoo8base","sub_path":"noviat-8.0/account_invoice_pay_filter/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14800465357","text":"# -*- coding: utf-8 -*-\n\nfrom bson import ObjectId\n\nfrom common import hashers\n\nfrom models.base import BaseModel\n\n\nclass UserModel(BaseModel):\n MONGO_COLLECTION = 'user'\n ROLE = ('user', 'broker')\n GENDER = ('male', 'female', 'not_specific')\n\n def __init__(self, *args, **kwargs):\n super(UserModel, self).__init__(*args, **kwargs)\n\n @property\n def specification(self):\n specification = super(UserModel, self).specification\n specification.extend([\n {\n 'key': 'name',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'last_name',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'email',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'mobile_number',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'mobile',\n 'type': dict,\n 'default': None\n },\n {\n 'key': 'password',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'gender',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'birthday',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'sns',\n 'type': dict,\n 'default': None\n },\n {\n 'key': 'terms',\n 'type': dict,\n 'default': None\n },\n {\n 'key': 'role',\n 'type': list,\n 'default': None\n },\n {\n 'key': 'enabled',\n 'type': bool,\n 'default': (lambda: True)\n },\n {\n 'key': 'image',\n 'type': dict,\n 'default': None\n },\n {\n 'key': 'terms',\n 'type': dict,\n 'default': (lambda: {'privacy': False, 'policy': False})\n },\n ])\n return specification\n\n def check_password(self, password):\n return hashers.check_password(password, self.data.get('password', ''))\n\n def set_password(self, password):\n self.data['password'] = hashers.make_password(password)\n\n\nclass UserAutologinModel(BaseModel):\n MONGO_COLLECTION = 'user_autologin'\n\n def __init__(self, *args, **kwargs):\n super(UserAutologinModel, self).__init__(*args, **kwargs)\n\n @property\n def specification(self):\n specification = super(UserAutologinModel, self).specification\n specification.extend([\n {\n 'key': 'usk',\n 'type': ObjectId,\n 'default': None\n },\n {\n 'key': 'content_oid',\n 'type': ObjectId,\n 'default': None\n },\n ])\n return specification\n\n\nclass UserSendHistoryModel(BaseModel):\n MONGO_COLLECTION = 'user_send_history'\n\n def __init__(self, *args, **kwargs):\n super(UserSendHistoryModel, self).__init__(*args, **kwargs)\n\n @property\n def specification(self):\n specification = super(UserSendHistoryModel, self).specification\n specification.extend([\n {\n 'key': 'user_oid',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'name',\n 'type': str,\n 'default': None\n },\n {\n 'key': 'mobile',\n 'type': dict,\n 'default': None\n },\n {\n 'key': 'enabled',\n 'type': bool,\n 'default': (lambda: True)\n },\n ])\n return specification\n","repo_name":"rjkorea/coconut","sub_path":"models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25135418461","text":"from sfun import *\nfrom iset import *\nfrom dempster_shafer import MassFunction\n\nG, T, R, O, S = 0, 1, 2, 3, 4\nV, cV = 0, 1\n\n\ndef main():\n mf = MassFunction.from_cardinality(5)\n\n c2r = imapset({\n V: [G, T],\n cV: [R, S, O]\n })\n print(c2r)\n\n\ndef main2():\n mf = MassFunction.from_cardinality(4)\n N = iset([0,1,2,3])\n A = iset([0,2])\n cA = idiff(N, A)\n\n mf.set(A, .9)\n mf.set(N, .1)\n print(mf.bel(A), mf.bel(cA), mf.bel(N))\n print(mf.pl(A), mf.pl(cA), mf.pl(N))\n\n\ndef main1():\n mf = MassFunction.from_cardinality(4)\n mf = MassFunction.from_bayesian(4)\n\n # G R T O S\n # 0 1 2 3 4\n # mf.set([2, 3], .9)\n\n print(mf.focal_values())\n\n bf = mf.belief_function()\n pl = mf.plausibility_function()\n bmf = bf.mass_function()\n pmf = pl.mass_function()\n\n mf.dump(header=\"mf\")\n bf.dump(header=\"bf\")\n pl.dump(header=\"pl\")\n bmf.dump(header=\"bmf\")\n pmf.dump(header=\"pmf\")\n\n print(bf[0])\n print(bf[15])\n# end\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"corradomio/python_projects","sub_path":"check_dempster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16247819865","text":"from django.urls import include, path \nfrom . import views\nfrom django.contrib.auth.decorators import login_required, permission_required\n\n\nurlpatterns = [ \n path('registreer_vandaag///', views.registreer_vandaag, name='registreer_vandaag' ),\n path('registreer_week///', views.registreer_week, name='registreer_week'),\n path('registreer_week_bevestigd///', views.registreer_week_bevestigd, name='registreer_week_bevestigd'),\n path('registreer_dag////', views.registreer_dag, name='registreer_dag'),\n\n path(\"registreer_wijzig//\", views.UrenWijzigen.as_view(), name=\"registreer_wijzig\"),\n path('registreer_verwijderen/', views.UrenVerwijderen.as_view(), name=\"uren_verwijderen\")\n\n ]\n","repo_name":"C-Claus/_claus_portaal","sub_path":"claus_uren/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15503556042","text":"import commands\nimport os\nimport xml.etree.ElementTree as ET\nfrom dateutil.parser import parse\nimport datetime\nimport urllib\nimport cgi\nimport sys\nfrom shutil import copyfile\n\n# date utilities\ndef html_name_to_date(h):\n\tr = ET.parse(h).getroot()\n\treturn parse(r[0].find(\"meta[@name='created']\").attrib['content'])\ndef html_name_to_tags(h):\n\tr = ET.parse(h).getroot()\n\tif r[0].find(\"meta[@name='keywords']\") is not None:\n\t\treturn r[0].find(\"meta[@name='keywords']\").attrib['content']\n\treturn ''\n\n\nif len(sys.argv) == 1:\n\tprint('Usage: python evernote_archiver.py [name of file listing paths to all evernote HTML exports]')\n\tsys.exit(1)\n\n# load paths to evernote html exports\narchive_paths = [l.strip() for l in open(sys.argv[1]).readlines()]\n\n# generate the \"table of contents\" of all the archives\narchive_index_file = open('index.html','w')\narchive_index_file.write('
    \\n')\nfor a in archive_paths: \n\tarchive_index_file.write('
  • '+a+'
  • \\n')\narchive_index_file.write('
')\narchive_index_file.close()\n\nfor archive_path in archive_paths:\n\tprint(archive_path)\n\tarchive_name = archive_path.split('/')[-1]\n\n\tcopyfile('nav.css',archive_path+'/nav.css')\n\tcopyfile('nav.js',archive_path+'/nav.js')\n\tos.chdir(archive_path)\n\n\t# find all the .html files (i.e, all the notes)\n\thtml_names = commands.getoutput('ls *.html').split('\\n')\n\thtml_names.remove('index.html')\n\tif 'frameview.html' in html_names: html_names.remove('frameview.html')\n\tif 'nav.html' in html_names: html_names.remove('nav.html') \n\thtml_names_sorted = sorted(html_names,key=html_name_to_date,reverse=True)\n\n\t# create the nav frame HTML file with links to all the note HTML files\n\tnav_file = open(archive_path+'/nav.html','w')\n\tnav_file.write('\\n')\n\tnav_file.write('nav')\n\tnav_file.write('')\n\tnav_file.write('\\n')\n\n\tfor i in range(len(html_names_sorted)):\n\t\th = html_names_sorted[i]\n\t\tp_class = ''\n\t\tif i==0: p_class = 'selected'\n\t\tnav_file.write('

\\n')\n\t\tnav_file.write(''+cgi.escape(h.split('.html')[0])+'
\\n')\n\t\tnav_file.write(html_name_to_date(h).strftime('%b %d, %Y @ %I:%M %p')+'\\n')\n\t\tnav_file.write(''+html_name_to_tags(h)+'')\n\t\tnav_file.write('

')\n\n\tnav_file.write('\\n')\n\tnav_file.write('\\n')\n\tnav_file.close()\n\n\n\t# create the evernote-style view with iframes with the nav frame on the left, and the note on the right\n\tframeview_file = open(archive_path+'/frameview.html','w')\n\n\tframeview_file.write('\\n\\n\\n'+archive_name+'\\n')\n\n\tframeview_file.write('\\n')\n\tframeview_file.write('\\n')\n\n\t# set the default focus to the nav frame so that the up and down key commands work right\n\t# when the page loads\n\tframeview_file.write('')\n\tframeview_file.write('')\n\tframeview_file.close()\n\n","repo_name":"abrahamneben/evernote_archiver","sub_path":"evernote_archiver.py","file_name":"evernote_archiver.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74131301048","text":"# This file is part of the Minecraft Overviewer.\n#\n# Minecraft Overviewer is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or (at\n# your option) any later version.\n#\n# Minecraft Overviewer is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with the Overviewer. If not, see .\n\nimport socket\nimport struct\nimport select\n\n\nclass RConException(Exception):\n def __init__(self, request_id, reason):\n self.request_id = request_id\n self.reason = reason\n\n def __str__(self):\n return (\"Failed RCon request with request ID %d, reason %s\" %\n (self.request_id, self.reason))\n\n\nclass RConConnection():\n rid = 0\n\n def __init__(self, target, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((target, port))\n\n def send(self, t, payload):\n self.rid = self.rid + 1\n pld_enc = payload.encode(\"utf-8\")\n header = struct.pack(\"\"\n if newPlaylistName in playlistNames:\n counter = 2\n newPlaylistName = \"\" % counter\n while newPlaylistName in playlistNames:\n counter += 1\n newPlaylistName = \"\" % counter\n return self.addPlaylist(newPlaylistName)\n\n def addPlaylist(self, playlistName):\n playlistId = self.model.addPlaylist(playlistName)\n return playlistId\n \n def addTracksToNewPlaylist(self, trackIds):\n newPlaylistId = self.addEmptyPlaylist()\n self.addTracksToPlaylist(trackIds, newPlaylistId)\n return newPlaylistId\n\n def addTracksToPlaylist(self, trackIds, playlistId=None):\n if playlistId is None:\n playlistId = self.currentPlaylist.playlistId\n playlistPosition = self.model.getPlaylistPosition(playlistId)\n playlist = self.view.widget(playlistPosition)\n playlist.addTracks(trackIds)\n\n def getPlaylistById(self, playlistId):\n return self.view.widget(self.model.getPlaylistPosition(playlistId))\n\n def getRandomPlaylist(self):\n return self.view.widget(random.randrange(len(self)))\n\n def setCurrentPlaylist(self, playlistId):\n playlistPosition = self.model.getPlaylistPosition(playlistId)\n self.view.setCurrentIndex(playlistPosition)\n\n\n\n# Qt 4.5 comes with support for moveable tabbars\n# furthermore in-place renaming can be achieve via setting the tabWidget\nclass PlaylistsTabBar(QTabBar):\n tabRenameRequested = pyqtSignal(int,QString)\n \n def __init__(self, parent=None):\n super(PlaylistsTabBar, self).__init__(parent)\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.setMovable(True)\n self.tabTitleEdit = QLineEdit()\n self.currentEditIndex = None\n self.connect(self.tabTitleEdit, SIGNAL(\"editingFinished()\"), self.renameDone)\n self.tabRemoveCandidate = None\n\n def mousePressEvent(self, event):\n if event.button() == Qt.MidButton:\n self.tabRemoveCandidate = self.tabAt(event.pos())\n super(PlaylistsTabBar, self).mousePressEvent(event)\n \n def mouseReleaseEvent(self, event):\n if self.tabRemoveCandidate is not None:\n if self.tabRemoveCandidate == self.tabAt(event.pos()):\n self.emit(SIGNAL(\"tabCloseRequested(int)\"), self.tabRemoveCandidate)\n self.tabRemoveCandidate = None\n super(PlaylistsTabBar, self).mousePressEvent(event)\n \n def mouseDoubleClickEvent(self, event):\n tabIndex = self.tabAt(event.pos())\n self.renameTab(tabIndex)\n \n def renameTab(self, tabIndex):\n self.setTabButton(tabIndex, QTabBar.LeftSide, self.tabTitleEdit)\n self.setFocusProxy(self.tabTitleEdit)\n self.tabTitleEdit.setText(self.tabText(tabIndex))\n self.tabTitleEdit.grabKeyboard()\n self.tabTitleEdit.setFocus()\n self.tabTitleEdit.selectAll()\n self.setTabText(tabIndex, \"\")\n self.currentEditIndex = tabIndex\n\n def renameDone(self):\n if self.currentEditIndex is None:\n logging.warning(\"dropping rename because no tabIndex is assosiated\")\n return\n self.tabTitleEdit.releaseKeyboard()\n self.setTabButton(self.currentEditIndex, QTabBar.LeftSide, None)\n newTabTitle = unicode(self.tabTitleEdit.text().toUtf8(), \"utf8\")\n self.tabIndexEdit = None\n self.setFocusProxy(None)\n# self.setTabText(self.currentEditIndex, self.tabTitleEdit.text())\n# self.tabRenameRequested.emit(self.currentEditIndex, newTabTitle)\n self.emit(SIGNAL(\"tabRenameRequested(int,QString)\"),\n self.currentEditIndex, newTabTitle)\n\n\nclass PlaylistsView(QTabWidget):\n mouseLeftDoubleClickEvent = pyqtSignal(QMouseEvent)\n \n def __init__(self, parent=None):\n super(PlaylistsView, self).__init__(parent)\n self.setTabBar(PlaylistsTabBar(self))\n self.setTabsClosable(True)\n self.setupConnections()\n self._model = None\n\n def setupConnections(self):\n self.connect(self.tabBar(), SIGNAL(\"customContextMenuRequested(const QPoint&)\"),\n partial(self.emit, SIGNAL(\"customContextMenuRequested(const QPoint&)\")))\n self.connect(self.tabBar(), SIGNAL(\"tabCloseRequested(int)\"), self.requestPlaylistClose)\n self.connect(self.tabBar(), SIGNAL(\"tabRenameRequested(int,QString)\"), self.requestPlaylistRename)\n self.connect(self.tabBar(), SIGNAL(\"tabMoved(int, int)\"), self.requestPlaylistMove)\n \n def tabInserted(self, index):\n logging.debug(\"tabInserted\")\n self.emit(SIGNAL(\"tabInserted\"), index, self.widget(index))\n\n def requestPlaylistClose(self, position):\n self.emit(SIGNAL(\"playlistCloseRequest(int)\"), self.widget(position).playlistId)\n\n def requestPlaylistRename(self, position, newPlaylistName):\n self.emit(SIGNAL(\"playlistRenameRequest(int,QString)\"), self.widget(position).playlistId, newPlaylistName)\n\n def requestPlaylistMove(self, fromRow, toRow):\n self.emit(SIGNAL(\"playlistMoveRequest(int, int)\"), fromRow, toRow)\n\n def mouseDoubleClickEvent(self, event):\n if event.buttons() == Qt.LeftButton:\n self.emit(SIGNAL(\"mouseLeftDoubleClickEvent(QMouseEvent)\"), event)\n\n def model(self):\n return self._model\n def setModel(self, model):\n self._model = model\n self.connect(model, SIGNAL(\"dataChanged(QModelIndex, QModelIndex)\"), self.updatePlaylist)\n self.connect(model, SIGNAL(\"modelReset()\"), self.refreshAll)\n self.connect(model, SIGNAL(\"rowsInserted(QModelIndex, int, int)\"),\n lambda parent, start, end: self.addPlaylist(start))\n self.connect(model, SIGNAL(\"rowsRemoved(QModelIndex, int, int)\"),\n lambda parent, start, end: self.removeTab(start))\n self.refreshAll()\n\n def updatePlaylist(self, topLeftIndex, bottomRightIndex):\n logging.debug(\"updatePlaylist\")\n assert topLeftIndex == bottomRightIndex, \"more than one database entry changed\"\n if topLeftIndex.column() == PlaylistsModel.PLAYLIST_NAME:\n tabIndex = topLeftIndex.sibling(topLeftIndex.row(), PlaylistsModel.POSITION).data().toInt()[0]\n tabTitle = unicode(topLeftIndex.data().toString().toUtf8(), \"utf8\")\n self.setTabText(tabIndex, tabTitle)\n\n def refreshAll(self):\n # remove all Plylists\n self.clear()\n for i in xrange(self.model().rowCount()):\n self.addPlaylist(i)\n\n def addPlaylist(self, tabIndex):\n newPlaylist = playlist.Playlist()\n playlistId = self.model().getPlaylistIdFromPosition(tabIndex)\n logging.debug(\"add playlist id: %d\" % playlistId)\n newPlaylist.init(playlistId)\n self.addTab(newPlaylist, self.model().getPlaylistName(playlistId))\n\n def onCloseTab(self, position):\n self.model().removePlaylist(self.widget(position).playlistId)\n\n def getPlaylistWidgetFromId(self, playlistId):\n tabIndex = self.model().getPlaylistPosition(playlistId)\n return self.widget(tabIndex)\n\n\n\nclass PlaylistsModel(QSqlTableModel):\n ID, PLAYLIST_NAME, POSITION = range(3)\n\n def init(self):\n db = QSqlDatabase.database()\n db.transaction()\n query = QSqlQuery()\n # the \"playlists\" table contains all open playlists \n query.exec_(\"\"\"CREATE TABLE IF NOT EXISTS playlists (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n playlistName TEXT,\n position INTEGER)\n \"\"\")\n query.finish()\n db.commit()\n #FIXME: this is fucked up! when setting the table rowCount the starts to behave strangely\n# self.setTable(\"playlists\")\n# self.setSort(PlaylistsModel.POSITION, Qt.AscendingOrder)\n playlist.init()\n# self.select()\n\n def index(self, row, column, parent=QModelIndex()):\n query = QSqlQuery()\n query.prepare(\"SELECT id FROM playlists WHERE position IS :row\")\n query.bindValue(\":row\", row)\n query.exec_()\n if not query.next():\n raise RuntimeError(\"could not create index: %s\" % query.lastError().text())\n return self.createIndex(row, column, query.value(0).toInt()[0])\n\n def rowCount(self, parent=QModelIndex()):\n query = QSqlQuery()\n query.exec_(\"SELECT count(*) FROM playlists\")\n if not query.next():\n raise RuntimeError(\"could not get rowCount: %s\" % query.lastError().text())\n rowCount = query.value(0).toInt()[0]\n query.finish()\n return rowCount\n\n def columnCount(self, parent=QModelIndex()):\n return 3\n \n def data(self, index, role=Qt.DisplayRole):\n if index.column() == self.PLAYLIST_NAME:\n return self.getPlaylistName(index.internalId())\n elif index.column() == self.POSITION:\n return self.getPlaylistPosition(index.internalId())\n elif index.column() == self.ID:\n return index.internalId()\n \n def setData(self, index, value, role=Qt.EditRole):\n if index.column() == self.ID:\n return False\n elif index.column() == self.PLAYLIST_NAME:\n newPlaylistName = unicode(value.toString().toUtf8(), \"utf8\")\n self.setPlaylistName(index.internalId(), newPlaylistName)\n elif index.column() == self.POSITION:\n newPlaylistPosition = value.toInt()[0]\n self.setPlaylistPosition(index.internalId(), newPlaylistPosition)\n else:\n return False\n self.emit(SIGNAL(\"dataChanged(QModelIndex, QModelIndex)\"), index, index)\n return True\n\n def flags(self, index):\n if index.column == self.ID:\n return Qt.NoItemFlags\n else:\n return super(PlaylistsModel, self).flags(index) | Qt.ItemIsEditable\n \n def getPlaylistPosition(self, playlistId):\n logging.debug(\"get Playlist pos for id %d\" % playlistId)\n query = QSqlQuery()\n query.prepare(\"SELECT position FROM playlists WHERE id IS :playlistId\")\n query.bindValue(\":playlistId\", playlistId)\n query.exec_()\n if not query.next():\n raise RuntimeError(\"could not get playlist position: %s\" % query.lastError().text())\n playlistPosition = query.value(0).toInt()[0]\n query.finish()\n return playlistPosition\n\n def setPlaylistPosition(self, playlistId, newPlaylistPosition):\n query = QSqlQuery()\n query.prepare(\"\"\"UPDATE playlists\n SET position = :newPlaylistPosition\n WHERE id IS :playlistId\"\"\")\n query.bindValue(\":newPlaylistPosition\", newPlaylistPosition)\n query.bindValue(\":playlistId\", playlistId)\n if not query.next():\n raise RuntimeError(\"could not set playistPosition: %s\" % query.lastError().text())\n query.finish()\n \n def getPlaylistName(self, playlistId):\n logging.debug(\"get playlistName for id %d\" % playlistId)\n query = QSqlQuery()\n query.prepare(\"SELECT playlistName FROM playlists WHERE id IS :playlistId\")\n query.bindValue(\":playlistId\", playlistId)\n query.exec_()\n if not query.next():\n raise RuntimeError(\"could not get playistName: %s\" % query.lastError().text())\n playlistName = unicode(query.value(0).toString().toUtf8(), \"utf8\")\n query.finish()\n return playlistName\n\n def setPlaylistName(self, playlistId, newPlaylistName):\n logging.debug(\"setPlaylistName\")\n query = QSqlQuery()\n query.prepare(\"\"\"UPDATE playlists\n SET playlistName =:newPlaylistName\n WHERE id IS :playlistId\"\"\")\n query.bindValue(\":newPlaylistName\", newPlaylistName)\n query.bindValue(\":playlistId\", playlistId)\n query.exec_()\n logging.error(\"SQL error: %s\" % query.lastError().text())\n query.finish()\n \n def getPlaylistNames(self):\n playlistNames = []\n query = QSqlQuery()\n query.exec_(\"SELECT playlistName FROM playlists ORDER BY position ASC\")\n while query.next():\n playlistNames.append(unicode(query.value(0).toString().toUtf8(), \"utf8\"))\n query.finish()\n return playlistNames\n\n def getPlaylistIds(self):\n playlistIds = []\n query = QSqlQuery()\n query.exec_(\"SELECT id FROM playlists ORDER BY position ASC\")\n while query.next():\n playlistIds.append(query.value(0).toInt()[0])\n query.finish()\n return playlistIds\n \n def addPlaylist(self, playlistName):\n logging.debug(\"adding Playlist %s\" % playlistName)\n rowCount = self.rowCount()\n self.beginInsertRows(QModelIndex(), rowCount, rowCount)\n db = QSqlDatabase.database()\n db.transaction()\n query = QSqlQuery()\n # TODO: I guess this can be done more elegantly\n query.exec_(\"SELECT count(*) FROM playlists\")\n query.next()\n query.prepare(\"\"\"INSERT INTO playlists (playlistName, position)\n SELECT :playlistName, COALESCE(MAX(position)+1,0)\n FROM playlists\n \"\"\")\n query.bindValue(\":playlistName\", playlistName)\n query.exec_()\n query.exec_(\"SELECT MAX(id) FROM playlists\")\n if not query.next():\n raise RuntimeError(\"Error in add Playlist: could not get playlistId\")\n playlistId = query.value(0).toInt()[0]\n logging.debug(\"newplaylistID: %d\" % playlistId)\n query.finish()\n db.commit()\n self.endInsertRows()\n return playlistId\n\n def removePlaylist(self, playlistId):\n logging.debug(\"removing PlaylistId %d\" % playlistId)\n position = self.getPlaylistPosition(playlistId)\n self.beginRemoveRows(QModelIndex(), position, position)\n db = QSqlDatabase.database()\n db.transaction()\n query = QSqlQuery()\n query.prepare(\"\"\"UPDATE playlists\n SET position = position - 1\n WHERE position > (SELECT position FROM playlists\n WHERE id IS :playlistId)\"\"\")\n query.bindValue(\":playlistId\", playlistId)\n query.exec_()\n query.prepare(\"\"\"DELETE FROM playlists\n WHERE id IS :playlistId\"\"\")\n query.bindValue(\":playlistId\", playlistId)\n query.exec_()\n query.finish()\n db.commit()\n self.endRemoveRows()\n\n def movePlaylist(self, fromPosition, toPosition):\n logging.debug(\"moving from %d to %d\" % (fromPosition, toPosition))\n db = QSqlDatabase.database()\n db.transaction()\n query = QSqlQuery()\n # Note: we could use named placeholder, but PyQt4-4.7.3 seems to not bind them correctly if they occure more than once\n if fromPosition > toPosition:\n query.prepare(\"\"\"UPDATE playlists\n SET position = CASE\n WHEN position == ? THEN ?\n ELSE position + 1 END\n WHERE position BETWEEN ? AND ?\"\"\")\n query.bindValue(0, fromPosition)\n query.bindValue(1, toPosition)\n query.bindValue(2, toPosition)\n query.bindValue(3, fromPosition)\n else:\n query.prepare(\"\"\"UPDATE playlists\n SET position = CASE\n WHEN position == ? THEN ?\n ELSE position - 1 END\n WHERE position BETWEEN ? AND ?\"\"\")\n query.bindValue(0, fromPosition)\n query.bindValue(1, toPosition)\n query.bindValue(2, fromPosition)\n query.bindValue(3, toPosition)\n query.exec_()\n query.finish()\n db.commit()\n # TODO: we should emit dataChanged even if it is ignored\n# self.dataChanged(index, index)\n\n def renamePlaylist(self, playlistId, newPlaylistName):\n logging.debug(\"renameing playlist to %s\" % newPlaylistName)\n row = self.getPlaylistPosition(playlistId)\n index = self.createIndex(row, self.PLAYLIST_NAME, playlistId)\n self.setData(index, QVariant(newPlaylistName))\n \n def getPlaylistIdFromPosition(self, position):\n logging.debug(\"getting id from pos %d\" % position)\n query = QSqlQuery()\n query.prepare(\"\"\"SELECT id FROM playlists WHERE position IS :position\"\"\")\n query.bindValue(\":position\", position)\n query.exec_()\n if not query.next():\n raise RuntimeError(\"could not retrieve playlistId from position: %s\" % query.lastError().text())\n playlistId = query.value(0).toInt()[0]\n query.finish()\n return playlistId\n \n","repo_name":"donlorenzo/audiophil","sub_path":"src/audiophil/playlists.py","file_name":"playlists.py","file_ext":"py","file_size_in_byte":22523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29309951569","text":"#Juego adivina mi número\nimport random\nn=random.randint(1,20)\nintentos=5\nwhile intentos>0:\n nj=int(input(\"ingrese un número \"))\n if nj==n:\n print(\"Adivinaste, mi número era \",n)\n break\n elif njn:\n print(\"numero muy GRANDE\")\n intentos-=1\n print(\"te quedan \", intentos,\" Intentos\")\nif intentos==0:\n print(\"No adivinaste, mi número era \",n)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_5d24da85d73aa1747c3bea521d6f16ab.py","file_name":"hito1_ej12_5d24da85d73aa1747c3bea521d6f16ab.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8723774260","text":"import os\nimport xml.etree.ElementTree as ElementTree\nET = ElementTree\n\nOUTPUT = os.path.join(os.getcwd(),'out')\nVERSION = \"2011\"\n\nclass Constant(object):\n def __init__(self, xml, value=\"\"):\n self.name = xml.findtext(\"name\")\n self.description = xml.findtext(\"helpstring\")\n self.value = xml.findtext(\"value\")\n\n if self.value is None:\n self.value = value\n pass\n\n def toXML(self):\n element = ET.Element(\"constant\")\n ET.SubElement(element, \"name\").text = self.name\n ET.SubElement(element, \"value\").text = str(self.value)\n ET.SubElement(element, \"description\").text = self.description\n return element\n\nclass Typedef(object):\n def __init__(self, xml):\n self.name = xml.findtext('name')\n self.version = VERSION\n self.constants = [Constant(x, i) for i, x in enumerate(xml.find('constants'))]\n\n def toXML(self):\n element = ET.Element(\"typedef\")\n element.attrib[\"name\"] = self.name\n element.attrib[\"version\"] = self.version\n constants = ET.SubElement(element, \"constants\")\n [constants.append(x.toXML()) for x in self.constants]\n return element\n\nclass Parameter(object):\n def __init__(self, xml):\n self.type = xml.findtext('type')\n self.name = xml.findtext('name')\n self.attributes = [x.text for x in xml.find('attributes')]\n self.default = xml.findtext('attributes/defaultvalue')\n\n self.retval = 'out' in self.attributes\n self.optional = 'optional' in self.attributes or self.default is not None\n\n def toXML(self):\n element = ET.Element(\"parameter\")\n if self.optional == True:\n element.attrib[\"optional\"] = str(self.optional)\n ET.SubElement(element, \"default\").text = self.default\n ET.SubElement(element, \"name\").text = self.name\n ET.SubElement(element, \"type\").text = self.type\n return element\n\nclass Member(object):\n def __init__(self, xml):\n self.name = xml.findtext(\"name\")\n self.description = xml.findtext(\"attributes/helpstring\")\n self.parameters = [Parameter(x) for x in xml.find('parameters')]\n self.version = VERSION\n self.type = None\n\n self.attributes = [x.text for x in xml.find('attributes')]\n self.is_property = 'propput' in self.attributes or 'propget' in self.attributes\n\n\n for x in self.parameters:\n if x.retval == True:\n self.type = x.type\n\n self.parameters = [x for x in self.parameters if x.retval is False]\n\n if self.is_property and len(self.parameters) == 1:\n self.parameters = []\n\n syntax = self.type if self.type is not None else \"void\"\n syntax += \" \" + self.name\n\n if len(self.parameters) > 0:\n syntax += \"(\\n \"\n syntax += \",\\n \".join(\n [\"{0} {1}\".format(\n x.type if x.type is not None else \"void\",\n x.name\n ) for x in self.parameters]\n )\n syntax += \"\\n)\"\n elif self.is_property is not True:\n syntax += \"()\"\n\n syntax += \";\"\n\n self.syntax = [\n syntax\n ]\n\n\n def toXML(self):\n element = ET.Element(\"member\")\n element.attrib[\"name\"] = self.name\n element.attrib[\"version\"] = self.version\n if self.is_property:\n element.attrib[\"type\"] = \"property\"\n else:\n element.attrib[\"type\"] = \"method\"\n\n returns_xml = ET.SubElement(element, \"returns\")\n type_xml = ET.SubElement(returns_xml, \"type\")\n type_xml.text = \"void\"\n if self.type is not None:\n type_xml.text = self.type\n\n ET.SubElement(element, \"description\").text = self.description\n\n def addSyntax(text):\n ET.SubElement(element, \"syntax\").text = text\n [addSyntax(x) for x in self.syntax]\n parameters = ET.SubElement(element, \"parameters\")\n [parameters.append(x.toXML()) for x in self.parameters if x.retval is False]\n return element\n\nclass Interface(object):\n def __init__(self, xml=None):\n if xml is not None:\n self.name = xml.findtext(\"name\")\n self.description = xml.findtext(\"attributes/helpstring\")\n self.version = VERSION\n self.members = {}\n\n definitions = xml.find(\"definitions\")\n if definitions is not None:\n member = None\n for x in definitions.findall('function'):\n self.addMember(x)\n\n methods = xml.find('methods')\n if methods is not None:\n for x in methods:\n self.addMember(x)\n\n properties = xml.find('properties')\n if properties is not None:\n for x in properties:\n self.addMember(x, True)\n\n def addMember(self, xml, is_property=False):\n member = Member(xml)\n\n if is_property is not False:\n member.is_property = True\n\n if member.name in self.members.keys():\n self.members[member.name] = self.combine_members(\n member,\n self.members[member.name]\n )\n else:\n self.members[member.name] = member\n\n def combine_members(self, m1, m2):\n temp = m1\n\n if m1.type is None and m2.type is not None:\n temp.type = m2.type\n if m2.type is None and m1.type is not None:\n temp.type = m1.type\n\n if m1.description != m2.description:\n temp.description = m1.description + \"\\n\" + m2.description\n\n if len(m1.parameters) > len(m2.parameters):\n temp.parameters = m1.parameters\n else:\n temp.parameters = m2.parameters\n\n if (m1.syntax is not None and\n m2.syntax is not None and\n m1.syntax != m2.syntax):\n temp.syntax = m1.syntax + m2.syntax\n\n return temp\n\n def toXML(self):\n element = ET.Element(\"interface\")\n element.attrib[\"name\"] = self.name\n element.attrib[\"version\"] = self.version\n ET.SubElement(element, \"description\").text = self.description\n members = ET.SubElement(element, \"members\")\n [members.append(self.members[x].toXML()) for x in self.members]\n return element\n\ndef combine(xml1, xml2):\n\n if xml2 is None:\n return xml1\n elif xml1 is None:\n return xml2\n\n root1 = xml1.getroot()\n root2 = xml2.getroot()\n\n if root1.tag != root2.tag:\n raise Exception(\"Root tag is not the same: {0} / {1}\".format(root1.tag, root2.tag))\n\n temp = ET.Element(root1.tag)\n temp.attrib = dict(\n list(root1.attrib.items()) +\n list(root2.attrib.items())\n )\n\n for root in [root1, root2]:\n [temp.append(x) for x in root if len(x) > 0]\n\n return ET.ElementTree(temp)\n\ndef make_interface(xml, directory):\n interface_xml = Interface(xml)\n\n output_file = os.path.join(\n directory,\n interface_xml.name + \".xml\"\n )\n\n tree = ET.ElementTree(interface_xml.toXML())\n if os.path.exists(output_file):\n tree = combine(tree, ET.parse(output_file))\n tree.write(output_file)\n\ndef make_typedef(xml, directory):\n typedef = Typedef(xml)\n\n output_file = os.path.join(\n directory,\n typedef.name + \".xml\"\n )\n\n tree = ET.ElementTree(typedef.toXML())\n if os.path.exists(output_file):\n print(output_file)\n tree = combine(tree, ET.parse(output_file))\n\n tree.write(output_file)\n\ndef parse_definitions(xml, out_dir):\n if xml is not None:\n for interface in xml.findall('interface'):\n make_interface(interface, out_dir)\n\n for typedef in xml.findall('typedef'):\n make_typedef(typedef, out_dir)\n\ndef parse_xml(filename, output):\n root = ElementTree.parse(filename).getroot()\n out_dir = os.path.join(OUTPUT, output)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n parse_definitions(root, out_dir)\n parse_definitions(root.find('definitions'), out_dir)\n\n for element in root.findall('library'):\n parse_definitions(element.find('definitions'), out_dir)\n\ndef main():\n idls = [\n ('idl/cwmfc.idl.xml', 'CWCom'),\n ('idl/cv32old.idl.xml', 'CVScripting'),\n ('idl/cv32def.idl.xml', 'CVScripting/Enumerators'),\n ('idl/cv32Gateway.idl.xml', 'CVCom'),\n ('idl/enum.idl.xml', 'CWCom/Enumerators')\n ]\n for idl in idls:\n print(*idl)\n parse_xml(*idl)\n\nif __name__ == '__main__':\n main()\n","repo_name":"jonathan-beckwith/midl-to-xml","sub_path":"transform_xml.py","file_name":"transform_xml.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"48336555483","text":"import logging\nfrom typing import List, Set\n\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.base_inference import BaseInference\nfrom beanmachine.ppl.inference.proposer.base_proposer import BaseProposer\nfrom beanmachine.ppl.inference.proposer.nmc import (\n SingleSiteHalfSpaceNMCProposer,\n SingleSiteRealSpaceNMCProposer,\n SingleSiteSimplexSpaceNMCProposer,\n)\nfrom beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (\n SingleSiteAncestralProposer,\n)\nfrom beanmachine.ppl.model.rv_identifier import RVIdentifier\nfrom beanmachine.ppl.world import World\nfrom beanmachine.ppl.world.utils import BetaDimensionTransform, is_constraint_eq\n\nLOGGER = logging.getLogger(\"beanmachine\")\n\n\nclass SingleSiteNewtonianMonteCarlo(BaseInference):\n \"\"\"\n Single site Newtonian Monte Carlo [1]. This algorithm selects a proposer\n based on the support of the random variable. Valid supports include real, positive real, and simplex.\n Each site is proposed independently.\n\n [1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`\n\n Args:\n real_space_alpha: alpha value for real space as specified in [1], defaults to 10.0\n real_space_beta: beta value for real space as specified in [1], defaults to 1.0\n \"\"\"\n\n def __init__(\n self,\n real_space_alpha: float = 10.0,\n real_space_beta: float = 1.0,\n ):\n self._proposers = {}\n self.alpha = real_space_alpha\n self.beta = real_space_beta\n\n def get_proposers(\n self,\n world: World,\n target_rvs: Set[RVIdentifier],\n num_adaptive_sample: int,\n ) -> List[BaseProposer]:\n proposers = []\n for node in target_rvs:\n if node not in self._proposers:\n self._proposers[node] = self._init_nmc_proposer(node, world)\n proposers.append(self._proposers[node])\n return proposers\n\n def _init_nmc_proposer(self, node: RVIdentifier, world: World) -> BaseProposer:\n \"\"\"\n A helper function that initialize a NMC proposer for the given node. The type\n of NMC proposer will be chosen based on a node's support.\n \"\"\"\n distribution = world.get_variable(node).distribution\n support = distribution.support\n if is_constraint_eq(support, dist.constraints.real):\n return SingleSiteRealSpaceNMCProposer(node, self.alpha, self.beta)\n elif any(\n is_constraint_eq(\n support,\n (dist.constraints.greater_than, dist.constraints.greater_than_eq),\n )\n ):\n return SingleSiteHalfSpaceNMCProposer(node)\n elif is_constraint_eq(support, dist.constraints.simplex) or (\n isinstance(support, dist.constraints.independent)\n and (support.base_constraint == dist.constraints.unit_interval)\n ):\n return SingleSiteSimplexSpaceNMCProposer(node)\n elif isinstance(distribution, dist.Beta):\n return SingleSiteSimplexSpaceNMCProposer(\n node, transform=BetaDimensionTransform()\n )\n else:\n LOGGER.warning(\n f\"Node {node} has unsupported constraints. \"\n + \"Proposer falls back to SingleSiteAncestralProposer.\\n\"\n )\n return SingleSiteAncestralProposer(node)\n","repo_name":"facebookresearch/beanmachine","sub_path":"src/beanmachine/ppl/inference/single_site_nmc.py","file_name":"single_site_nmc.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"77"} +{"seq_id":"15171714948","text":"def priority_of(letter):\n letter_ord = ord(letter)\n if letter_ord < ord('a'):\n return 27 + letter_ord - ord('A')\n return 1 + letter_ord - ord('a')\n\n\ndef find_dup_item(bag):\n half = int(len(bag) / 2)\n bag1 = bag[0 : half]\n bag2 = bag[half :]\n for item in bag1:\n if item in bag2:\n return item\n\n\ndef find_common_item(lines):\n for item in lines[0]:\n if item in lines[1] and item in lines[2]:\n return item\n\n\ndef main():\n badges_tally = 0\n\n file = open('input', 'r')\n lines = file.readlines()\n priority_tally = 0\n trio = []\n for line in lines:\n line = line.rstrip()\n if line == '':\n break\n\n trio.append(line)\n if (len(trio) == 3):\n badges_tally += priority_of(find_common_item(trio))\n trio = []\n\n print('badges tally: ' + str(badges_tally))\n\n\nif __name__ == '__main__':\n main()","repo_name":"Rexa-maker/aoc2022","sub_path":"day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29435596089","text":"# por favor escribe aquí tu función\ndef es_primo(numero):\n conta = 1\n divi = 1\n while (divi < numero):\n if numero % divi == 0:\n conta = conta+ 1\n divi = divi + 1\n if conta == 2:\n valor = True\n elif conta != 2:\n valor = False\n return valor","repo_name":"pabloschwarzenberg/grader","sub_path":"tema2_p1/tema2_p1_e938c0a684e850d6b3d142e180c35bf8.py","file_name":"tema2_p1_e938c0a684e850d6b3d142e180c35bf8.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27420885501","text":"'''\nUtility functions for working with requests\nContributors: \n - minhdq99hp@gmail.com\n'''\nfrom __future__ import unicode_literals\nfrom magic import from_buffer\nimport collections.abc\nimport io\nimport os\nimport traceback\nfrom pathlib import Path\nfrom time import sleep\nfrom uuid import uuid4\nfrom rest_framework.exceptions import ParseError\n\nimport requests\nimport magic\nfrom datetime import datetime\n# import youtube_dl\nfrom PIL import Image\n\nfrom utilities.image import convert_image\n\n\nclass RetryLimitExceededException(Exception):\n pass\n\ndef send_request(url, data={}, method='post', headers={}, timeout=60, max_retry=3, delay=0, silent=True, exc_msg=''):\n '''\n Send HTTP request to url with retry and delay if needed.\n\n The sending data should be json. Otherwise, you have to specific headers to send request. \n\n If not silent:\n RetryLimitExceededException will be raised after max_retry retries.\n else:\n return None\n '''\n\n retry = 0\n while retry < max_retry:\n try:\n if isinstance(data, dict):\n res = requests.request(method, url, json=data, timeout=timeout, headers=headers)\n elif data is not None:\n res = requests.request(method, url, data=data, timeout=timeout, headers=headers)\n else:\n res = requests.request(method, url, timeout=timeout, headers=headers)\n return res\n except requests.exceptions.RequestException:\n traceback.print_exc()\n retry += 1\n sleep(delay)\n\n if not silent:\n raise RetryLimitExceededException(f\"Unable to send request after {max_retry} retries.\")\n\n return None\n\n\nclass UnexpectedFileFormat(Exception):\n pass\n\nclass NotSupportedType(Exception):\n pass\n\ndef download_file_from_url(url, expected_types=[], headers={}, timeout=None, output_dir='', filename=None):\n '''\n Download file and write it to output_path.\n\n Args:\n url (str): direct link of the file\n expected_type: the expected type of the file, must be specify to guess the extension.\n\n Return:\n dict: \n {\n 'status': 0 if success\n 'mime_type': e.g. image/png, image/jpeg. Return None if the mime type can't be guessed.\n 'path': the downloaded filepath.\n }\n '''\n if filename is None:\n filename = f'{uuid4()}'\n filepath = os.path.join(output_dir, filename)\n\n mime_type = ''\n media_type = 'undefined'\n\n result = {\n 'status': 1,\n 'path': filepath,\n 'filename': filename,\n 'type': media_type\n }\n buf = b''\n\n try:\n with requests.get(url, headers=headers, stream=True, timeout=timeout) as r:\n r.raise_for_status()\n with open(filepath, 'wb') as f:\n for i, chunk in enumerate(r.iter_content(chunk_size=8192)):\n #if chunk: \n f.write(chunk)\n\n if i == 0:\n # get the first chunk to guess mime_type in the next step\n buf = chunk\n \n except requests.exceptions.RequestException:\n traceback.print_exc()\n Path(filepath).unlink(missing_ok=True)\n return result\n\n try:\n mime_type = magic.from_buffer(buf, mime=True)\n except:\n mime_type = ''\n \n result['mime_type'] = mime_type if mime_type else None\n\n # correct the filename with the guessed extension\n if mime_type.startswith(('video/', 'image/', 'audio/', 'text/')):\n filename = f'{filename}.{mime_type.split(\"/\")[1]}'\n new_filepath = os.path.join(output_dir, filename)\n os.rename(filepath, new_filepath)\n result['path'] = new_filepath\n result['filename'] = filename\n \n\n if mime_type == 'application/octet-stream':\n if 'sound' in expected_types:\n mime_type = 'audio/wav'\n result['media_type'] = 'sound'\n result['status'] = 0\n return result\n else:\n return result\n elif not expected_types:\n result['status'] = 0\n return result\n elif mime_type.startswith('image/') and 'image' in expected_types:\n media_type = 'image'\n elif mime_type.startswith('video/') and 'video' in expected_types:\n media_type = 'video'\n elif mime_type.startswith('audio/') and 'sound' in expected_types:\n media_type = 'sound'\n elif mime_type.startswith('text/') and 'text' in expected_types:\n media_type = 'text'\n else:\n return result\n \n result['type'] = media_type\n result['status'] = 0\n return result\n\n\ndef download_file_from_youtube_url(url, ydl_opts={}, output_dir='', filename=None):\n '''Download video from YouTube\n \n Will only return True if the extension is mp4.\n '''\n if filename is None:\n filename = f'{uuid4()}.mp4'\n \n filepath = os.path.join(output_dir, filename)\n extension = None\n\n result = {\n 'status': 1,\n 'path': filepath\n }\n\n opts = {\n 'noplaylist': True,\n 'format': 'bestvideo[height<=1080,ext=mp4]+bestaudio/best',\n 'quiet': True,\n # 'merge_output_format': 'mp4',\n 'outtmpl': filepath\n }\n opts.update(ydl_opts)\n\n if 'mp4' in opts['format']:\n extension = 'mp4'\n\n try:\n with youtube_dl.YoutubeDL(opts) as ydl:\n ydl.download([url])\n\n except Exception:\n Path(filepath).unlink(missing_ok=True)\n return result\n \n \n result['filename'] = filename\n result['mime_type'] = 'video/mp4'\n result['status'] = 0\n return result\n\n\n\ndef download_sound_file_from_youtube_url(url, ydl_opts={}, output_dir='', filename=None):\n '''Download sound from YouTube\n \n Will only return True if the extension is m4a.\n '''\n if filename is None:\n filename = f'{uuid4()}.m4a'\n filepath = os.path.join(output_dir, filename)\n extension = None\n\n result = {\n 'status': 1,\n 'path': filepath\n }\n\n opts = {\n 'noplaylist': True,\n 'format': 'bestaudio/best[ext=m4a]',\n 'quiet': True,\n 'outtmpl': filepath\n }\n opts.update(ydl_opts)\n\n if 'mp4' in opts['format']:\n extension = 'm4a'\n\n try:\n with youtube_dl.YoutubeDL(opts) as ydl:\n ydl.download([url])\n\n except Exception:\n return result\n \n \n result['filename'] = filename\n result['extension'] = extension\n result['status'] = 0\n return result\n\n\n\ndef parse_int_or_400(data, key, default=None):\n \"\"\"\n Parse the data[key] to integer.\n \"\"\"\n if key not in data:\n return default\n\n try:\n value = int(data.get(key))\n except ValueError:\n raise ParseError(detail=f\"Unable to parse param '{key}'.\")\n else:\n return value\n\n\ndef parse_bool_or_400(data, key, default=None):\n \"\"\"\n Parse the data[key] to boolean\n \"\"\"\n if key not in data:\n return default\n \n if isinstance(data[key], bool):\n return data[key]\n \n return data[key].lower() in ('true', '1')\n\n\ndef parse_datetime_or_400(data, key, default=None, format='%Y-%m-%d'):\n \"\"\"\n Parse the data[key] to datetime\n \"\"\"\n if key not in data:\n return default\n \n try:\n d = datetime.strptime(data[key], format)\n except ValueError:\n raise ParseError(detail=f\"Unable to parse param '{key}'.\")\n else:\n return d\n\n\ndef parse_string_array_or_400(data, key, default=None, delimiter=','):\n \"\"\"\n Parse the data[key] to array of string\n \"\"\"\n if key not in data:\n return default\n\n d = data[key].split(delimiter)\n return d\n\n\ndef parse_int_array_or_400(data, key, default=None, delimiter=','):\n \"\"\"\n Parse the data[key] to array of string\n \"\"\"\n if key not in data:\n return default\n\n if isinstance(data[key], list):\n return data[key]\n \n try:\n return [int(_) for _ in data[key].split(delimiter)]\n except ValueError:\n raise ParseError(detail=f\"Unable to parse param '{key}'.\")\n","repo_name":"hieutt99/aidudu","sub_path":"backend/utilities/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5954908883","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Optional\n\n\ndef get_video_files(source, files: Optional[list[str]]) -> list:\n # список файлов с видео для обработки\n list_of_videos = []\n\n source_path = Path(source)\n\n if source_path.is_dir():\n for entry in source_path.iterdir():\n # check if it is a file\n if entry.is_file() and entry.suffix == \".mp4\":\n if files is None:\n list_of_videos.append(str(entry))\n else:\n if entry.stem in files:\n list_of_videos.append(str(entry))\n\n else:\n list_of_videos.append(str(source))\n\n return list_of_videos\n\n\ndef create_session_folder(yolo_version, output_folder, task: str) -> str:\n now = datetime.now()\n\n session_folder_name = f\"{now.year:04d}_{now.month:02d}_{now.day:02d}_{now.hour:02d}_{now.minute:02d}_\" \\\n f\"{now.second:02d}_{yolo_version}_{task}\"\n\n session_folder = str(Path(output_folder) / session_folder_name)\n\n try:\n os.makedirs(session_folder, exist_ok=True)\n print(f\"Directory '{session_folder}' created successfully\")\n except OSError as error:\n print(f\"Directory '{session_folder}' can not be created. {error}\")\n\n return str(session_folder)\n","repo_name":"dimarsoft/yolov7","sub_path":"path_tools.py","file_name":"path_tools.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32524668299","text":"#!/usr/bin python\n# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport jieba\nfrom collections import OrderedDict\nimport os\n\ndef readListfromTxt(filePath):\n resultList = []\n fr = open(filePath,'r',encoding='utf-8')\n while True:\n line = fr.readline()\n if line:\n line = line.strip()\n if line:\n resultList.append(line)\n else:\n break\n fr.close()\n return resultList\n\ndef writeList2csv(filePath,infoList):\n fw = open(filePath,'w',encoding='utf-8')\n for itemList in infoList:\n line = ''\n if isinstance(itemList,list):\n line = ','.join(itemList)\n if isinstance(itemList,str):\n line = itemList.strip()\n if line:\n fw.write(line + '\\n')\n fw.close()\n\n\nif __name__ == '__main__':\n root_path = 'D:/data/cache'\n fnames = ['1_图.txt','2_钱.txt','3_床.txt','4_地址.txt']\n\n dic = OrderedDict()\n tags = []\n lines = []\n for f in fnames:\n tag = f.strip().split('.')[0].split('_')[-1]\n fpath = os.path.join(root_path,f)\n line_list = readListfromTxt(fpath)\n\n for line in line_list:\n dic[tag] = line\n tags.append(tag)\n lines.append(line)\n writeList2csv(root_path+'/input.txt',lines)\n writeList2csv(root_path+'/label.txt',tags)","repo_name":"michaelwangtd/mayiPlayground","sub_path":"otherserver/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39618058414","text":"#Reber Ferhat Uluca - 170401053\r\n\r\nimport random\r\nimport time\r\n\r\ndef A(x, y, z, d):\r\n return (~x & y) | (~d & z)\r\ndef B(x, y, z, d):\r\n return (x & ~y) | (d & ~z)\r\ndef C(x, y, z, d):\r\n return (x | (~y)) ^ (d | (~z))\r\ndef D(x, y, z, d):\r\n return ((~x) | y) ^ ((~d) | z)\r\ndef E(x, y, z, d):\r\n return x ^ y ^ z ^ d\r\ndef shift(k, bits):\r\n bits = bits % 8\r\n k = k % (2 ** 8)\r\n upper = (k << bits) % (2 ** 8)\r\n result = upper | (k >> (8 - bits))\r\n return result\r\n\r\ndef operation(x, y, z, d, c, s):\r\n f_a = A(x, y, z, d)\r\n f_b = B(x, y, z, d)\r\n f_c = C(x, y, z, d)\r\n f_d = D(x, y, z, d)\r\n f_e = E(f_a, f_b, f_c, f_d)\r\n sh = shift(x+c, s)\r\n return (f_e + sh) % 2 ** 8\r\n\r\ndef final(filename):\r\n f = open(filename, \"r\")\r\n msg = f.read()\r\n\r\n bits = []\r\n for i in msg:\r\n binary = bin(int(i))[2:]\r\n while len(binary) < 8:\r\n binary += '0' #padding for each 8 bits\r\n bits.append(int(binary, 2))\r\n\r\n while len(bits) % 4 != 0:\r\n padding = 0x00 #padding for all bits\r\n bits.append(padding)\r\n\r\n A = 0x67\r\n B = 0xaf\r\n C = 0x98\r\n D = 0x25\r\n\r\n cons = [0xfd, 0xa4, 0x4b, 0xf6, 0xbe,\r\n 0x28, 0xea, 0xd4, 0x48, 0xd9,\r\n 0x49, 0xf6, 0xc0, 0x26, 0xe9, 0xd]\r\n\r\n shifts = [7, 12, 17, 22, 5, 9, 14, 20,\r\n 4, 11, 16, 23, 6, 10, 15, 21]\r\n\r\n for i in range(len(bits)//4):\r\n for j in range(16):\r\n x = bits[i * 4 + 0]\r\n y = bits[i * 4 + 1]\r\n z = bits[i * 4 + 2]\r\n d = bits[i * 4 + 3]\r\n\r\n bits[i * 4 + 0] = d\r\n bits[i * 4 + 1] = operation(x, y, z, d, cons[j], shifts[j])\r\n bits[i * 4 + 2] = y\r\n bits[i * 4 + 3] = z\r\n\r\n A = (A + x) % (2 ** 8)\r\n B = (B + y) % (2 ** 8)\r\n C = (C + z) % (2 ** 8)\r\n D = (D + d) % (2 ** 8)\r\n\r\n f.close()\r\n return '{:008b}'.format(A)+'{:008b}'.format(B)+'{:008b}'.format(C)+'{:008b}'.format(D)\r\n\r\ndef main():\r\n start = time.time()\r\n\r\n hashsum = open(\"HASHSUM\", \"w\")\r\n hashed = final(\"001.txt\")\r\n hashsum.write(\"001.txt ozet:\" + hashed)\r\n\r\n for i in range(2, 101):\r\n if (time.time() - start) >= 10*60:\r\n print(\"timed out\")\r\n hashsum.close()\r\n f.close()\r\n return\r\n\r\n f = open(\"{0:03}.txt\".format(i), \"w\")\r\n x = random.getrandbits(32)\r\n hashed = int(hashed, 2)\r\n sum = bin(x + hashed)[2:]\r\n f.write(sum)\r\n f.close()\r\n\r\n hash2 = final(\"{0:03}.txt\".format(i))\r\n while hash2[:8] != \"00000000\":\r\n if (time.time() - start) >= 10*60:\r\n print(\"timed out\")\r\n hashsum.close()\r\n f.close()\r\n return\r\n\r\n f = open(\"{0:03}.txt\".format(i), \"w\")\r\n x = random.getrandbits(32)\r\n sum = bin(x + hashed)[2:]\r\n f.write(sum)\r\n f.close()\r\n hash2 = final(\"{0:03}.txt\".format(i))\r\n\r\n hashsum.write(\"\\n{0:03}.txt\".format(i) + \" ozet:\" + hash2 +\r\n \" random:\" + '{:032b}'.format(x))\r\n\r\n hashed = hash2\r\n\r\n hashsum.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"nyucel/kriptografi","sub_path":"final/170401053/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"29736270723","text":"import errno\nimport os\nfrom unittest.mock import Mock, call\n\nimport pytest\nfrom transmissionrpc import Torrent\n\nfrom plexpost import post_processor, show_flow\nfrom plexpost.sftp_factory import SFTPFactory\n\n\n@pytest.fixture\ndef automator(transmission, sftpserver, remote_base_dir, download_dir):\n return post_processor.PostProcessor(transmission,\n Mock(),\n SFTPFactory({'url': sftpserver.host,\n 'port': sftpserver.port,\n 'username': 'user',\n 'password': '',\n 'remote_dir': remote_base_dir}),\n show_flow.ShowPostProcessor({'download_dir_tag': download_dir}))\n\n\n@pytest.mark.parametrize('download_dir', ['tmp/tv'])\ndef test_should_process_any_download_when_they_are_under_the_shows_folder(automator, transmission,\n download_dir):\n torrents = [create_torrent(1, 0, 'tmp/tv/The Simpsons/1'),\n create_torrent(2, 0, 'tmp'),\n create_torrent(3, 0, 'tmp/tv/Another Show')]\n transmission.get_torrents.return_value = torrents\n automator.run()\n transmission.remove_torrent.assert_has_calls([call(1), call(3)], any_order=True)\n\n\n@pytest.mark.parametrize('download_dir', ['tmp/Show Name/2'])\ndef test_should_put_in_show_name_season_subdirectory(completed_torrents,\n automator,\n sftpclient,\n remote_base_dir,\n download_dir):\n video = 'show.mkv'\n completed_torrents.return_value = [completed_torrent_with_data_files(download_dir, [video])]\n automator.run()\n assert sftpclient.isfile(remote_base_dir + '/tv/Show Name/2/' + video)\n\n\ndef create_torrent(id, size_left, download_dir):\n name = 'Torrent ' + str(id)\n fields = {'id': id, 'name': name, 'sizeWhenDone': 1, 'leftUntilDone': size_left, 'downloadDir': download_dir}\n return Torrent(None, fields)\n\n\ndef completed_torrent_with_data_files(prefix, files):\n tor = Mock()\n tor.progress = 100\n tor.id = 1\n tor.downloadDir = prefix\n tor.name = 'Mock 1'\n data_files = {}\n for idx, f in enumerate(files):\n data_files[idx] = create_download_file(prefix, f)\n tor.files.return_value = data_files\n return tor\n\n\ndef create_download_file(prefix, filename):\n return {'selected': True, 'priority': 'normal', 'size': 1, 'name': touch(prefix, filename), 'completed': 1}\n\n\ndef touch(prefix, file):\n path = prefix + '/' + file\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(path, 'a'):\n os.utime(path)\n return file\n","repo_name":"fluffy-cat/plexpost","sub_path":"tests/test_show_flow.py","file_name":"test_show_flow.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5043870327","text":"import os\nfrom setuptools import setup\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.txt')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name = 'django-rsync',\n version = '0.1',\n packages = ['django_rsync'],\n include_package_data = True,\n license = 'Apache License, Version 2.0', \n description = 'Simple deployment script using rsync for django projects',\n long_description = README,\n url = 'https://github.com/hisie/django-rsync',\n author = 'hisie',\n author_email = 'dcebrian@serincas.com',\n classifiers = [\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)","repo_name":"hisie/django-rsync","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4491965839","text":"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math \n\n\ndf=pd.read_csv('https://raw.githubusercontent.com/vidoolytech/hiringtask/master/machine_learning/ad_org/data/mn/ad_org_train.csv')\n\n\n\ndur=[]\nfor a in df.duration:\n dur.append(a[2:])\n\n\ndur_sec=[]\nfor t in dur:\n h=t.find('H')\n m=t.find('M')\n s=t.find('S')\n sec=0\n if h>=0:\n sec=sec+int(t[:h])*60*60\n if m>=0:\n sec=sec+int(t[h+1:m])*60\n if s>=0:\n sec=sec+int(t[m+1:s])\n elif m>=0:\n sec=sec+int(t[:m])*60\n if s>=0:\n sec=sec+int(t[m+1:s])\n else:\n sec=sec+int(t[:s])\n dur_sec.append(sec)\n\n\ndf['dur_sec']=dur_sec\n\n\ndf.head()\n\n\ndf_new=pd.get_dummies(df,columns=['category'])\n\n\n\ndf_new.head()\n\n\ndel df_new['duration']\n\n\n\ndel df_new['published']\n\ndel df_new['vidid']\n\n\n\ndf_new.head()\n\nx=df_new.iloc[:,1:]\ny=df_new.iloc[:,0]\n\nx=x[x.views!='F']\n\nx=x[x.likes!='F']\n\n\nx=x[x.dislikes!='F']\n\n\nx=x[x.comment!='F']\n\nx.views=pd.to_numeric(x.iloc[:,0])\nx.likes=pd.to_numeric(x.iloc[:,1])\nx.dislikes=pd.to_numeric(x.iloc[:,2])\nx.comment=pd.to_numeric(x.iloc[:,3])\n\n\nfrom sklearn.linear_model import LinearRegression\n\nreg=LinearRegression()\n\nreg.fit(x,y[x.index])\n\n\nreg.coef_\n\nreg.intercept_\n\nreg.score(x,y[x.index])\n###0.002997376099930671\n\ndf_1=pd.read_csv('https://raw.githubusercontent.com/vidoolytech/hiringtask/master/machine_learning/ad_org/data/mn/ad_org_test.csv')\n\n\ndur=[]\nfor a in df_1.duration:\n dur.append(a[2:])\n\n\ndur_sec=[]\nfor t in dur:\n h=t.find('H')\n m=t.find('M')\n s=t.find('S')\n sec=0\n if h>=0:\n sec=sec+int(t[:h])*60*60\n if m>=0:\n sec=sec+int(t[h+1:m])*60\n if s>=0:\n sec=sec+int(t[m+1:s])\n elif m>=0:\n sec=sec+int(t[:m])*60\n if s>=0:\n sec=sec+int(t[m+1:s])\n else:\n sec=sec+int(t[:s])\n dur_sec.append(sec)\n\n\n\ndf_1['dur_sec']=dur_sec\n\ndf_1.head()\ndf_1.head()\n\n\n\ndf_new_1=pd.get_dummies(df_1,columns=['category'])\n\ndf_new.head()\n\ndel df_new_1['duration']\n\ndel df_new_1['published']\n\ndel df_new_1['vidid']\n\ndf_new_1.head()\nx_1 = df_new_1\nx_1=x_1[x_1.views!='F']\nx_1=x_1[x_1.likes!='F']\nx_1=x_1[x_1.dislikes!='F']\nx_1=x_1[x_1.comment!='F']\n\nx_1.views=pd.to_numeric(x_1.iloc[:,1])\nx_1.likes=pd.to_numeric(x_1.iloc[:,2])\nx_1.dislikes=pd.to_numeric(x_1.iloc[:,3])\nx_1.comment=pd.to_numeric(x_1.iloc[:,4])\ny_1 = reg.predict(x_1)\nY_predict=[]\nfor i in range(len(y_1)):\n if (y_1[i] >=0):\n Y_predict.append(y_1[i])\n else:\n Y_predict.append(0)","repo_name":"omiswami/Vidooly","sub_path":"ad_data.py","file_name":"ad_data.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32273107568","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Module summary description.\n\nMore detailed description.\n\"\"\"\n\nimport multiprocessing as mp\nfrom functools import partial\n\nimport numpy as np\nimport networkx as nx\n\nfrom math import sqrt as msqrt\n\nfrom numba import njit\nfrom shapely.errors import TopologicalError\nfrom shapely.geometry import MultiPolygon, GeometryCollection, Polygon, box, LineString, \\\n Point, MultiLineString, JOIN_STYLE\nfrom shapely.ops import cascaded_union, linemerge, unary_union, transform\n\nfrom gistools.coordinates import r_tree_idx\nfrom gistools.graph import part_graph\nfrom gistools.utils.check.type import is_iterable, type_assert\n\n\ndef add_points_to_line(line, threshold):\n \"\"\" Add point coordinates to line geometry\n\n :param line:\n :param threshold:\n :return:\n \"\"\"\n return linemerge(cut_(line, threshold))\n\n\ndef build_partitions(polygons, weights, nparts, tpweights,\n weight_attr, split_name, recursive, **metis_options):\n \"\"\" Build partitions\n\n Build partitions based on set of polygon's corresponding\n graph, with respect to given nb of partitions, constraints\n and target weights\n\n Parameters\n ----------\n polygons: list[shapely.geometry.Polygon]\n collection of polygons to aggregate\n weights: list[float]\n polygons' corresponding weight\n nparts: int\n number of partitions\n tpweights:\n list of target relative weights for each partition\n weight_attr:\n split_name: str\n Name of the method used for splitting\n recursive:\n metis_options:\n\n Returns\n -------\n \"\"\"\n if \"contig\" not in metis_options.keys():\n metis_options[\"contig\"] = False\n graph = polygon_collection_to_graph(polygons, weights, split_name,\n metis_options[\"contig\"], weight_attr)\n partition = part_graph(graph, nparts, weight_attr,\n tpweights, recursive, **metis_options)\n\n # Return unions of polygons belonging to each part (no multi-polygons)\n return explode([no_artifact_unary_union([polygons[n] for n in part]) for part in partition])\n\n\ndef area_partition_polygon(polygon, unit_area, disaggregation_factor, precision,\n recursive, split, **metis_options):\n \"\"\" Partition polygon into a subset of polygons of equal area\n\n :param polygon: polygon intended to be partitioned\n :param unit_area: area of a sub-polygon\n :param disaggregation_factor: factor use to discretize polygons before aggregation\n :param recursive: k-way or recursive method for partitioning\n :param precision: metric precision for sub-polygon area\n :param split: function used to split polygon into smaller unit blocks\n :param metis_options: specific METIS options (see METIS manual)\n :return:\n \"\"\"\n nparts = int(polygon.area/unit_area)\n\n if nparts <= 1 and (polygon.area - unit_area) < unit_area/disaggregation_factor:\n return [polygon]\n\n # Split polygon into sub-elements\n split_poly = split_polygon(polygon, split, unit_area/disaggregation_factor, get_explode=True)\n\n division = [unit_area/polygon.area] * nparts\n if polygon.area % unit_area != 0: # and (polygon.area - nparts * unit_area) >= unit_area/disaggregation_factor:\n division += [(polygon.area - nparts * unit_area)/polygon.area]\n nparts += 1\n\n area = [int(poly.area / precision) for poly in split_poly]\n\n return build_partitions(split_poly, area, nparts, [(d,) for d in division],\n \"area\", split.__name__, recursive, **metis_options)\n\n\ndef centroid(point_collection):\n \"\"\" Retrieve centroid of multiple points\n\n :param point_collection:\n :return:\n \"\"\"\n x_centroid = np.mean([pt.x for pt in point_collection])\n y_centroid = np.mean([pt.y for pt in point_collection])\n\n return Point([x_centroid, y_centroid])\n\n\ndef connect_lines_to_point(line_collection, point):\n \"\"\" Connect a set of lines to some point\n\n :param line_collection:\n :param point:\n :return:\n \"\"\"\n new_line_collection = []\n for line in line_collection:\n if Point(line.coords[0]).distance(point) < Point(line.coords[-1]).distance(point):\n new_line_collection.append(LineString(point.coords[:] + line.coords[:]))\n else:\n new_line_collection.append(LineString(line.coords[:] + point.coords[:]))\n\n return new_line_collection\n\n\ndef cut(line, threshold, count=0):\n \"\"\" Cut a line in segments\n\n Cut a line in segments whose length\n is below a threshold value. This method\n is more randomless regarding the final\n size of the line segments. See 'cut_'\n function for more accuracy\n :param line:\n :param threshold:\n :param count:\n :return:\n \"\"\"\n result = []\n if threshold < 0 or threshold >= line.length or count == 250:\n return [line]\n # Recursively cut line in 2 at midpoint\n p = line.interpolate(0.5, normalized=True)\n split_line = cut_at_point(line, p)\n for sub_line in split_line:\n result.extend(cut(sub_line, threshold, count + 1))\n\n return result\n\n\ndef cut_(line, threshold):\n \"\"\" Cut a line in segments (method 2)\n\n This method cuts a line in as many segments as necessary,\n depending on the given threshold. For instance, a line\n of 105m will be cut into 10 pieces of 10m + 1 piece of 5m\n if threshold=10\n :param line: LineString\n :param threshold: minimum sub line piece size\n :return:\n \"\"\"\n if threshold < 0 or threshold >= line.length:\n return [line]\n\n result = []\n\n while \"It remains line to cut\":\n split_line = cut_at_distance(line, threshold/line.length, normalized=True)\n result.append(split_line[0])\n\n if split_line[1].length > threshold:\n line = split_line[1]\n else:\n result.append(split_line[1])\n break\n\n return result\n\n\ndef cut_at_distance(line, distance, normalized=False):\n \"\"\" Cut line at given distance from starting point\n\n :param line:\n :param distance:\n :param normalized:\n :return:\n \"\"\"\n if normalized:\n length = 1\n else:\n length = line.length\n\n if distance <= 0.0 or distance >= length:\n return [line]\n coords = list(line.coords)\n for i, p in enumerate(coords):\n pd = line.project(Point(p), normalized=normalized)\n if pd == distance:\n return [LineString(coords[:i+1]), LineString(coords[i:])]\n elif pd > distance:\n cp = line.interpolate(distance, normalized=normalized)\n try:\n return [LineString(coords[:i] + [(cp.x, cp.y)]),\n LineString([(cp.x, cp.y)] + coords[i:])]\n except ValueError:\n return [LineString(coords[:i] + [(cp.x, cp.y, cp.z)]),\n LineString([(cp.x, cp.y, cp.z)] + coords[i:])]\n\n\ndef cut_at_point(line, point):\n \"\"\" Cut line at point\n\n Cut line at point, which can be within\n or without the geometry\n :param line:\n :param point:\n :return:\n \"\"\"\n d = line.project(point)\n return cut_at_distance(line, d)\n\n\ndef cut_at_points(line, points):\n \"\"\" Cut line at multiple points\n\n :param line:\n :param points:\n :return:\n \"\"\"\n cut_line = []\n distance = [line.project(point) for point in points]\n sorted_points = [point for _, point in sorted(zip(distance, points))]\n\n for idx, point in enumerate(sorted_points):\n cut_line.extend(cut_at_point(line, point))\n if idx < len(sorted_points) - 1:\n line = cut_line.pop()\n\n return cut_line\n\n\ndef dissolve(geometry_collection):\n \"\"\" Recursively join contiguous geometries in collection\n\n :param geometry_collection:\n :return:\n \"\"\"\n if not is_iterable(geometry_collection):\n raise TypeError(\"Input must be a collection but is '{}'\".format(type(geometry_collection)))\n\n while \"There is still geometries to aggregate\":\n\n joint = []\n idx = r_tree_idx(geometry_collection)\n geom_idx = []\n increment = 0\n\n while len(geom_idx) < len(geometry_collection):\n\n if increment not in geom_idx:\n geom = geometry_collection[increment]\n union_idx, union = intersecting_features(geom, geometry_collection, idx)\n\n if len(union) > 0:\n joint.append(cascaded_union(union))\n\n for ix in union_idx:\n idx.delete(ix, geometry_collection[ix].bounds)\n\n geom_idx.extend(union_idx)\n\n increment += 1\n\n if len(joint) < len(geometry_collection):\n geometry_collection = joint\n else:\n break\n\n return joint\n\n\ndef explode(geometry_collection):\n \"\"\" Convert multi-part geometry collection into single-part\n\n :param geometry_collection: valid geometry collection\n :return:\n \"\"\"\n single = []\n if not is_iterable(geometry_collection):\n geometry_collection = [geometry_collection]\n\n for geom in geometry_collection:\n try:\n single.extend(geom)\n except TypeError:\n single.append(geom)\n\n return single\n\n\ndef fishnet(polygon, threshold):\n \"\"\" Intersect polygon with a regular grid or \"fishnet\"\n\n :param polygon:\n :param threshold:\n :return:\n \"\"\"\n return polygon_to_mesh(polygon, threshold, mesh)\n\n\ndef hexana(polygon, threshold):\n \"\"\" Split a polygon using a honeycomb grid\n\n :param polygon: original polygon to split\n :param threshold: unit hexagon surface\n :return: list of polygons\n \"\"\"\n return polygon_to_mesh(polygon, threshold, honeycomb)\n\n\n# Thanks to https://gist.github.com/urschrei/17cf0be92ca90a244a91\n@njit()\ndef honeycomb_nb(startx, starty, endx, endy, radius):\n \"\"\"\n Calculate a grid of hexagon coordinates of the given radius\n given lower-left and upper-right coordinates\n Returns a list of lists containing 6 tuples of x, y point coordinates\n These can be used to construct valid regular hexagonal polygons\n\n - update 04/23/2019:\n * can give either radius or area of unit hexagon\n * return a list of shapely Polygon\n\n You will probably want to use projected coordinates for this\n \"\"\"\n\n # calculate side length given radius\n sl = (2 * radius) * np.tan(np.pi / 6)\n # calculate radius for a given side-length\n # (a * (math.cos(math.pi / 6) / math.sin(math.pi / 6)) / 2)\n # see http://www.calculatorsoup.com/calculators/geometry-plane/polygon.php\n\n # calculate coordinates of the hexagon points\n # sin(30)\n p = sl * 0.5\n b = sl * np.cos(np.radians(30))\n w = b * 2\n h = 2 * sl\n\n # offset start and end coordinates by hex widths and heights to guarantee coverage\n startx = startx - w\n starty = starty - h\n endx = endx + w\n endy = endy + h\n origx = startx\n\n # offsets for moving along and up rows\n xoffset = b\n yoffset = 3 * p\n\n row = 1\n\n while starty < endy:\n if row % 2 == 0:\n startx = origx + xoffset\n else:\n startx = origx\n while startx < endx:\n p1x = startx\n p1y = starty + p\n p2x = startx\n p2y = starty + (3 * p)\n p3x = startx + b\n p3y = starty + h\n p4x = startx + w\n p4y = starty + (3 * p)\n p5x = startx + w\n p5y = starty + p\n p6x = startx + b\n p6y = starty\n poly = [\n (p1x, p1y),\n (p2x, p2y),\n (p3x, p3y),\n (p4x, p4y),\n (p5x, p5y),\n (p6x, p6y),\n (p1x, p1y)]\n yield poly\n startx += w\n starty += yoffset\n row += 1\n\n\ndef honeycomb(startx, starty, endx, endy, radius=None, area=None):\n \"\"\"\n\n Parameters\n ----------\n startx\n starty\n endx\n endy\n radius\n area\n\n Returns\n -------\n\n \"\"\"\n\n if not radius:\n radius = msqrt(area / (2*msqrt(3)))\n\n return (Polygon(poly) for poly in honeycomb_nb(startx, starty, endx, endy, radius))\n\n\ndef intersecting_features(geometry, geometry_collection, r_tree=None):\n \"\"\" Return list of geometries intersecting with given geometry\n\n :param geometry:\n :param geometry_collection:\n :param r_tree: rtree index corresponding to geometry collection\n :return:\n \"\"\"\n is_intersecting = intersects(geometry, geometry_collection, r_tree)\n return [i for i in range(len(geometry_collection)) if is_intersecting[i]], \\\n [geom for i, geom in enumerate(geometry_collection) if is_intersecting[i]]\n\n\ndef intersects(geometry, geometry_collection, r_tree=None):\n \"\"\" Return if geometry intersects with geometries of collection\n\n Use this function with large geometry collections\n :param geometry:\n :param geometry_collection:\n :param r_tree:\n :return: list of boolean of length = length(geometry_collection)\n \"\"\"\n # Use Rtree to speed up !\n if r_tree is None:\n r_tree = r_tree_idx(geometry_collection)\n\n list_of_intersecting_features = list(r_tree.intersection(geometry.bounds))\n\n return [False if f not in list_of_intersecting_features\n else geometry.intersects(geometry_collection[f]) for f in\n range(len(geometry_collection))]\n\n\ndef is_in_collection(geometry, geometry_collection, r_tree):\n \"\"\" Test if geometry is present in collection (using shapely 'equals' method)\n\n :param geometry:\n :param geometry_collection:\n :param r_tree:\n :return:\n \"\"\"\n _, list_of_intersecting_features = intersecting_features(geometry, geometry_collection, r_tree)\n for geom in list_of_intersecting_features:\n if geometry.equals(geom):\n return True\n\n return False\n\n\ndef is_line_connected_to(line, geometry_collection):\n \"\"\" Is line connected to one of the geometries in collection ?\n\n :param line:\n :param geometry_collection:\n :return:\n \"\"\"\n\n return [other.intersects(Point(line.coords[0])) for other in geometry_collection], [other.intersects(Point(\n line.coords[-1])) for other in geometry_collection]\n\n\ndef katana(polygon, threshold, count=0):\n \"\"\" Split a polygon\n\n See https://snorfalorpagus.net/blog/2016/03/13/splitting-large-polygons-for-faster-intersections/\n\n Copyright (c) 2016, Joshua Arnott\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n following disclaimer in the documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED\n WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n :param polygon: Shapely polygon\n :param threshold:\n :param count:\n :return:\n \"\"\"\n if count == 0:\n if not polygon.is_valid:\n polygon = polygon.buffer(0, 0)\n\n result = []\n width = polygon.bounds[2] - polygon.bounds[0]\n height = polygon.bounds[3] - polygon.bounds[1]\n if width * height <= threshold or count == 250:\n return [polygon]\n if height >= width:\n a = box(polygon.bounds[0], polygon.bounds[1], polygon.bounds[2], polygon.bounds[1] + height/2)\n b = box(polygon.bounds[0], polygon.bounds[1] + height/2, polygon.bounds[2], polygon.bounds[3])\n else:\n a = box(polygon.bounds[0], polygon.bounds[1], polygon.bounds[0] + width/2, polygon.bounds[3])\n b = box(polygon.bounds[0] + width/2, polygon.bounds[1], polygon.bounds[2], polygon.bounds[3])\n\n for sword in (a, b,):\n split_poly = polygon.intersection(sword)\n if not isinstance(split_poly, GeometryCollection):\n split_poly = [split_poly]\n for sub_poly in split_poly:\n if isinstance(sub_poly, (Polygon, MultiPolygon)):\n result.extend(katana(sub_poly, threshold, count+1))\n\n return result\n\n\ndef katana_centroid(polygon, threshold, count=0):\n \"\"\" Split a polygon in equal areas\n\n Thanks to https://snorfalorpagus.net/blog/2016/03/13/splitting-large-polygons-for-faster-intersections/ and\n Daniel Harasty in http://community-gispython-org-community-projects.955323.n3.nabble.com/Community-Spliting-a\n -polygon- into-two-polygons-with-the-same-area-td4024026.html#a4024033, we merge here both approaches to split a\n polygon into a number of sub-polygons of almost equal areas.\n :param polygon: Shapely polygon\n :param threshold:\n :param count:\n :return:\n \"\"\"\n if count == 0:\n if not polygon.is_valid:\n polygon = polygon.buffer(0, 0)\n\n result = []\n width = polygon.bounds[2] - polygon.bounds[0]\n height = polygon.bounds[3] - polygon.bounds[1]\n if width * height <= threshold or count == 250:\n return [polygon]\n if height >= width:\n a = box(polygon.bounds[0], polygon.bounds[1], polygon.bounds[2], polygon.centroid.y)\n b = box(polygon.bounds[0], polygon.centroid.y, polygon.bounds[2], polygon.bounds[3])\n else:\n a = box(polygon.bounds[0], polygon.bounds[1], polygon.centroid.x, polygon.bounds[3])\n b = box(polygon.centroid.x, polygon.bounds[1], polygon.bounds[2], polygon.bounds[3])\n\n for sword in (a, b,):\n split_poly = polygon.intersection(sword)\n if not isinstance(split_poly, GeometryCollection):\n split_poly = [split_poly]\n for sub_poly in split_poly:\n if isinstance(sub_poly, (Polygon, MultiPolygon)):\n result.extend(katana_centroid(sub_poly, threshold, count+1))\n\n return result\n\n\ndef length_of_segments(line):\n \"\"\" Retrieve segment length in line\n\n :param line:\n :return:\n \"\"\"\n return np.diff([line.project(Point(p)) for p in line.coords])\n\n\ndef mask(polygon_collection, mask_collection, fast_intersection_surface):\n \"\"\" Geometry mask\n\n :param polygon_collection:\n :param mask_collection:\n :param fast_intersection_surface:\n :return:\n \"\"\"\n\n # Retrieve base layer and mask geometry, split it for faster intersection\n # and explode it (to be sure there is no multi-parts)\n geometry = split_polygon_collection(polygon_collection, fast_intersection_surface, get_explode=True)\n mask_geometry = split_polygon_collection(mask_collection, fast_intersection_surface, get_explode=True)\n\n # Use Rtree to speed up !\n idx = r_tree_idx(mask_geometry)\n\n # 0. Initialization\n result = []\n\n for geom in geometry:\n list_of_intersecting_mask = list(idx.intersection(geom.bounds))\n within = [geom.within(mask_geometry[n]) for n in list_of_intersecting_mask]\n if not any(within):\n is_intersecting = [geom.intersects(mask_geometry[n]) for n in list_of_intersecting_mask]\n if any(is_intersecting):\n difference = geom.difference(cascaded_union([mask_geometry[n] for n in list_of_intersecting_mask]))\n if not difference.is_empty:\n result.append(difference)\n else:\n result.append(geom)\n\n # Multi to single + dissolve coincident polygons\n result = explode(result)\n result = [no_artifact_unary_union(poly) for poly in dissolve(result)]\n\n return result\n\n\ndef merge(line_collection):\n \"\"\" Merge connected lines\n\n :param line_collection:\n :return:\n \"\"\"\n # Merge MultiLinestring objects returned by the \"join\" function\n merged_line = [linemerge(line) if isinstance(line, MultiLineString) else line for line in dissolve(line_collection)]\n\n # Keep only single parts\n return explode(merged_line)\n\n\ndef mesh(startx, starty, endx, endy, side=None, area=None):\n \"\"\" Compute a mesh grid\n\n :param startx:\n :param starty:\n :param endx:\n :param endy:\n :param side:\n :param area:\n :return:\n \"\"\"\n if not side:\n side = msqrt(area)\n\n startx = startx - side/2\n starty = starty - side/2\n endx = endx + side/2\n endy = endy + side/2\n origx = startx\n\n polygons = []\n while starty < endy:\n startx = origx\n while startx < endx:\n poly = [\n (startx, starty),\n (startx, starty + side),\n (startx + side, starty + side),\n (startx + side, starty)]\n polygons.append(Polygon(poly))\n startx += side\n starty += side\n\n return polygons\n\n\ndef nearest_feature(geometry, geometry_collection, r_tree=None):\n \"\"\" Return nearest feature from geometry collection to given geometry\n\n If some of the geometries intersect, the nearest feature is the one whose centroid is the closest to the centroid\n of the given geometry (but distance remains 0)\n :param geometry:\n :param geometry_collection:\n :param r_tree: rtree index corresponding to geometry collection\n :return: nearest feature index and corresponding distance\n \"\"\"\n # Use Rtree to speed up !\n if r_tree is None:\n r_tree = r_tree_idx(geometry_collection)\n\n # Look if some geometries intersect\n list_of_intersecting_features, _ = intersecting_features(geometry, geometry_collection, r_tree)\n\n if list_of_intersecting_features:\n distance = [geometry.centroid.distance(geometry_collection[n].centroid) for n in list_of_intersecting_features]\n return list_of_intersecting_features[np.argmin(distance)], 0\n else:\n list_of_nearest_features = list(r_tree.nearest(geometry.bounds, 1))\n distance = [geometry.distance(geometry_collection[n]) for n in list_of_nearest_features]\n return list_of_nearest_features[np.argmin(distance)], np.min(distance)\n\n\ndef no_artifact_unary_union(geoms, eps=0.00001):\n \"\"\" Make unary union that does not return artifacts\n\n Thanks to https://gis.stackexchange.com/questions/277334/shapely-polygon-union-results-in-strange-artifacts-of\n -tiny-non-overlapping-area\n :param geoms: list of geoms to aggregate\n :param eps: buffering precision\n :return:\n \"\"\"\n return unary_union(geoms).buffer(eps, 1, join_style=JOIN_STYLE.mitre\n ).buffer(-eps, 1, join_style=JOIN_STYLE.mitre)\n\n\ndef overlapping_features(geometry, geometry_collection, r_tree=None):\n \"\"\" Return list of geometries overlapping with given geometry\n\n Overlapping geometry is either overlapping in the shapely way,\n or within or containing the other geometry\n :param geometry:\n :param geometry_collection:\n :param r_tree:\n :return:\n \"\"\"\n idx, list_of_intersecting_features = intersecting_features(geometry, geometry_collection, r_tree)\n _overlaps = [[i, geom] for i, geom in zip(idx, list_of_intersecting_features) if geom.overlaps(geometry) or\n geom.within(geometry) or geom.contains(geometry)]\n\n return [overlap[0] for overlap in _overlaps], [overlap[1] for overlap in _overlaps]\n\n\ndef overlaps(geometry, geometry_collection, r_tree=None):\n \"\"\" Return if geometry overlaps with geometries of collection\n\n Overlapping is regarded as any area shared by two geometries\n :param geometry:\n :param geometry_collection:\n :param r_tree:\n :return:\n \"\"\"\n is_intersecting = intersects(geometry, geometry_collection, r_tree)\n return [False if not is_intersecting[i] else geom.overlaps(geometry) or geom.within(geometry) or geom.contains(\n geometry) for i, geom in enumerate(geometry_collection)]\n\n\ndef _intersect(polygon1, polygon2):\n if polygon1.within(polygon2):\n return polygon1\n elif polygon1.overlaps(polygon2):\n return polygon1.intersection(polygon2)\n\n\ndef polygon_to_mesh(polygon, threshold, method, nb_processes=mp.cpu_count(), chunksize=500):\n \"\"\" Convert polygon into mesh of smaller unit polygons\n\n Parameters\n ----------\n\n polygon:\n threshold:\n method:\n nb_processes\n chunksize\n\n Returns\n -------\n \"\"\"\n grid = method(*polygon.bounds, area=threshold)\n # split = []\n\n with mp.Pool(processes=nb_processes) as pool:\n meshes = list(pool.imap(partial(_intersect, polygon2=polygon),\n grid, chunksize=chunksize))\n\n # for unit in grid:\n # if unit.within(polygon):\n # split.append(unit)\n # elif unit.overlaps(polygon):\n # split.append(unit.intersection(polygon))\n\n return explode([m for m in meshes if m is not None])\n\n\ndef polygon_collection_to_graph(polygon_collection, weights, split_name,\n is_contiguous, weight_attr=\"weight\"):\n \"\"\" Convert collection of polygons to networkx graph\n\n Conversion of a polygon collection into a graph allows\n later graph partitioning\n\n Parameters\n ----------\n\n polygon_collection:\n weights:\n weight of each polygon in collection\n split_name: str\n Split method name\n is_contiguous: bool\n True or False (metis options)\n weight_attr: str\n name of weight attribute\n\n Returns\n -------\n \"\"\"\n if not is_iterable(polygon_collection):\n raise TypeError(\"Input must be a collection but is '{}'\".format(type(polygon_collection)))\n\n if 'katana' in split_name:\n is_katana = True\n else:\n is_katana = False\n\n r_tree = r_tree_idx(polygon_collection)\n graph = nx.Graph()\n\n for n, polygon in enumerate(polygon_collection):\n list_of_intersecting_features, _ = intersecting_features(polygon, polygon_collection,\n r_tree)\n list_of_intersecting_features.remove(n)\n if list_of_intersecting_features or not is_contiguous:\n if is_katana:\n graph.add_edges_from([(n, feature) for feature in list_of_intersecting_features\n if not isinstance(polygon.intersection(polygon_collection[feature]), Point)])\n else:\n graph.add_edges_from([(n, feature) for feature in list_of_intersecting_features])\n graph.add_node(n, **{weight_attr: weights[n]})\n\n return graph\n\n\ndef radius_of_curvature(line, method=\"osculating\"):\n \"\"\" Compute curvature radius of LineString\n\n :param line:\n :param method: method for computing radius of curvature {'circumscribe', 'osculating'}\n :return:\n \"\"\"\n def norm(xx, yy):\n return np.sqrt(xx ** 2 + yy ** 2)\n\n def tangent_vector(xi, yi):\n return (xi[2::] - xi[:-2]) / norm(xi[2::] - xi[:-2], yi[2::] - yi[:-2]), \\\n (yi[2::] - yi[:-2]) / norm(xi[2::] - xi[:-2], yi[2::] - yi[:-2])\n\n if method == \"osculating\":\n\n if len(line.coords) >= 3:\n x = np.array(line.coords.xy[0])\n y = np.array(line.coords.xy[1])\n xi1 = np.concatenate((x[1::], [x[-1]]))\n yi1 = np.concatenate((y[1::], [y[-1]]))\n xi_1 = np.concatenate(([x[0]], x[:-1]))\n yi_1 = np.concatenate(([y[0]], y[:-1]))\n\n tangent_vector_xi1, tangent_vector_yi1 = tangent_vector(xi1, yi1)\n tangent_vector_xi_1, tangent_vector_yi_1 = tangent_vector(xi_1, yi_1)\n\n coefficient_of_curvature = \\\n norm(tangent_vector_xi1 - tangent_vector_xi_1, tangent_vector_yi1 - tangent_vector_yi_1) /\\\n norm(x[2::] - x[:-2], y[2::] - y[:-2])\n coefficient_of_curvature[coefficient_of_curvature == 0] = 1e-6\n rad_of_curvature = 1 / coefficient_of_curvature\n else:\n return np.array([10000])\n\n elif method == \"circumscribe\":\n\n segment_length = length_of_segments(line)\n a, b = segment_length[:-1:], segment_length[1::]\n c = []\n if len(line.coords) > 3:\n length_2_by_2_start = length_of_segments(LineString(line.coords[::2]))\n length_2_by_2_end = length_of_segments(LineString(line.coords[1::2]))\n\n for n in range(len(length_2_by_2_end)):\n c.extend([length_2_by_2_start[n], length_2_by_2_end[n]])\n\n if len(length_2_by_2_start) > len(length_2_by_2_end):\n c.append(length_2_by_2_start[-1])\n\n elif len(line.coords) == 3:\n c = LineString(line.coords[::2]).length\n\n elif len(line.coords) < 3:\n return np.array([10000])\n\n heron = (a + b + c) * (b + c - a) * (c + a - b) * (a + b - c)\n heron[heron < 0] = 0\n divider = np.sqrt(heron)\n divider[divider == 0] = 0.1\n rad_of_curvature = a * b * c / divider\n\n else:\n rad_of_curvature = []\n\n # Return values and add replicate to beginning of array (as result of curvature computation returns an array with\n # length = length(line.coords) - 2): return array with length = length(line.coords) - 1\n return np.concatenate(([rad_of_curvature[0]], rad_of_curvature))\n\n\ndef shape_factor(polygon, convex_hull):\n \"\"\" Compute shape factor of given polygon\n\n Compute shape factor (here, circularity) of\n a given polygon using either convex hull or not\n :param polygon:\n :param convex_hull: should convex hull be used for computing shape ? (bool)\n :return:\n \"\"\"\n\n if convex_hull:\n return 4 * np.pi * polygon.convex_hull.area / (polygon.convex_hull.length ** 2)\n else:\n return 4 * np.pi * polygon.area / (polygon.length ** 2)\n\n\n@type_assert(polygon1=(Polygon, MultiPolygon), polygon2=(Polygon, MultiPolygon), normalized=bool)\ndef shared_area(polygon1, polygon2, normalized=False):\n \"\"\" Get area shared by 2 polygons\n\n :param polygon1:\n :param polygon2:\n :param normalized:\n :return:\n \"\"\"\n if not polygon1.intersects(polygon2):\n return 0\n else:\n new_poly = polygon1.intersection(polygon2)\n if normalized:\n return new_poly.area / polygon1.area\n else:\n return new_poly.area\n\n\n@type_assert(polygon=(Polygon, MultiPolygon), normalized=bool)\ndef shared_area_among_collection(polygon: Polygon, polygon_collection,\n normalized: bool = False, r_tree=None):\n \"\"\" Get area shared by a polygon with polygons from a collection\n\n :param polygon:\n :param polygon_collection:\n :param normalized:\n :param r_tree:\n :return:\n \"\"\"\n if not is_iterable(polygon_collection):\n raise TypeError(\"Input 2 must be a collection but is '{}'\".format(type(polygon_collection)))\n\n poly_intersects = intersects(polygon, polygon_collection, r_tree)\n\n return [shared_area(polygon, poly, normalized)\n if poly_intersects[n] else 0 for n, poly in enumerate(polygon_collection)]\n\n\ndef split_collection(geometry_collection, threshold, method, get_explode):\n \"\"\" Split geometry collection\n\n :param geometry_collection:\n :param threshold:\n :param method:\n :param get_explode:\n :return:\n \"\"\"\n if not is_iterable(geometry_collection):\n raise TypeError(\"Geometry must be a collection\")\n\n new_collection = []\n\n for geom in geometry_collection:\n try:\n new_collection.extend(method(geom, threshold))\n except TopologicalError:\n new_collection.append(geom)\n\n if get_explode:\n new_collection = explode(new_collection)\n\n # Return new collection\n return new_collection\n\n\ndef split_line_collection(line_collection, threshold, method=\"cut\", get_explode=False):\n \"\"\"\n\n :param line_collection:\n :param threshold:\n :param method:\n :param get_explode:\n :return:\n \"\"\"\n split_method = {'cut': cut}\n\n return split_collection(line_collection, threshold, split_method[method], get_explode)\n\n\ndef split_polygon(polygon, method, threshold, get_explode):\n \"\"\" Split polygon with respect to method\n\n Split polygon and return exploded (no multi part) if necessary\n :param polygon:\n :param method:\n :param threshold:\n :param get_explode: (boolean) return exploded collection\n :return:\n \"\"\"\n sp_poly = method(polygon, threshold)\n if get_explode:\n return explode(sp_poly)\n else:\n return sp_poly\n\n\ndef split_polygon_collection(polygon_collection, threshold, method=\"katana\", get_explode=False):\n \"\"\" Split a collection of polygons\n\n :param polygon_collection: collection of shapely polygons\n :param threshold: threshold surface under which no more splitting must be achieved\n :param method: method used for splitting\n :param get_explode:\n :return: new polygon collection with only Polygon geometries (no MultiPolygon geometries)\n \"\"\"\n split_method = {'katana': katana, 'katana_centroid': katana_centroid}\n\n return split_collection(polygon_collection, threshold, split_method[method], get_explode)\n\n\ndef to_2d(geometry):\n \"\"\" Convert 3D geometry to 2D\n\n Credit to @feenster and @hunt3ri from\n https://github.com/hotosm/tasking-manager/blob/master/server/services/grid/grid_service.py\n :param geometry:\n :return:\n \"\"\"\n def _to_2d(x, y, z):\n return tuple(filter(None, [x, y]))\n\n return transform(_to_2d, geometry)\n","repo_name":"benjaminpillot/gis-tools","sub_path":"gistools/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":33831,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"34703159423","text":"from main import mode4\n# please comment the last line \"main()\" in the file main.py before executing this file\n\ncomp_win = 0\ntotal_game = 0\nfor time in range(5): # iterate the whole experiment five times (each case five times) to make sure the test is reliable\n loser_list = []\n # iterate all possible size of board\n for size in range(2, 27):\n for row in range(size):\n for column in range(size):\n # iterate every possible starting position of queen\n if not(row == size - 1 and column == 0):\n # if the position is not the goal position\n # add this constraint because the queen starting position cannot be goal position\n position = (row, column)\n loser = mode4('4', size, position)\n loser_list.append(loser)\n comp_win += loser_list.count('Random Player')\n total_game += len(loser_list)\nprint(\"The computer that uses the best possible winning strategy wins \" + str(comp_win) + ' times out of ' + str(total_game) + 'games.')\nprint(\"The computer wins \" + '{:.2f}'.format(comp_win * total_game) + ' percent of the time.')\n\n# Result:\n# The computer that uses the best possible winning strategy wins 29485times out of 30875 games.\n# The computer wins 95.90 percent of the time.\n","repo_name":"xingjiehe/Queen","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72397302330","text":"from arches.app.models.system_settings import settings\nfrom arches.app.search.components.base import BaseSearchFilter\nfrom arches.app.utils.pagination import get_paginator\n\ndetails = {\n \"searchcomponentid\": \"\",\n \"name\": \"Paging\",\n \"icon\": \"\",\n \"modulename\": \"paging_filter.py\",\n \"classname\": \"PagingFilter\",\n \"type\": \"paging\",\n \"componentpath\": \"views/components/search/paging-filter\",\n \"componentname\": \"paging-filter\",\n \"sortorder\": \"0\",\n \"enabled\": True,\n}\n\n\nclass PagingFilter(BaseSearchFilter):\n def append_dsl(self, search_results_object, permitted_nodegroups, include_provisional):\n export = self.request.GET.get(\"export\", None)\n mobile_download = self.request.GET.get(\"mobiledownload\", None)\n page = 1 if self.request.GET.get(details[\"componentname\"]) == \"\" else int(self.request.GET.get(details[\"componentname\"], 1))\n\n if export is not None:\n limit = settings.SEARCH_RESULT_LIMIT\n elif mobile_download is not None:\n limit = self.request.GET[\"resourcecount\"]\n else:\n limit = settings.SEARCH_ITEMS_PER_PAGE\n limit = int(self.request.GET.get(\"limit\", limit))\n search_results_object[\"query\"].start = limit * int(page - 1)\n search_results_object[\"query\"].limit = limit\n\n def post_search_hook(self, search_results_object, results, permitted_nodegroups):\n total = (\n results[\"hits\"][\"total\"][\"value\"]\n if results[\"hits\"][\"total\"][\"value\"] <= settings.SEARCH_RESULT_LIMIT\n else settings.SEARCH_RESULT_LIMIT\n )\n page = 1 if self.request.GET.get(details[\"componentname\"]) == \"\" else int(self.request.GET.get(details[\"componentname\"], 1))\n\n paginator, pages = get_paginator(self.request, results, total, page, settings.SEARCH_ITEMS_PER_PAGE)\n page = paginator.page(page)\n\n ret = {}\n ret[\"current_page\"] = page.number\n ret[\"has_next\"] = page.has_next()\n ret[\"has_previous\"] = page.has_previous()\n ret[\"has_other_pages\"] = page.has_other_pages()\n ret[\"next_page_number\"] = page.next_page_number() if page.has_next() else None\n ret[\"previous_page_number\"] = page.previous_page_number() if page.has_previous() else None\n ret[\"start_index\"] = page.start_index()\n ret[\"end_index\"] = page.end_index()\n ret[\"pages\"] = pages\n\n if details[\"componentname\"] not in search_results_object:\n search_results_object[details[\"componentname\"]] = {}\n search_results_object[details[\"componentname\"]][\"paginator\"] = ret\n","repo_name":"archesproject/arches","sub_path":"arches/app/search/components/paging_filter.py","file_name":"paging_filter.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"77"} +{"seq_id":"593677018","text":"# This number was calcualted by adding all of the 1, 2, 3, and 4 digit primes together to get there total length\r\n# Then the number of 5 digit primes was calculated to give a digit length of 10,005 which is required for the question\r\n# The number of primes needed to get the requiered length was 2287\r\n# 20231 is the 2287th prime\r\nnum_for_prime_digit_list = 20231\r\nid_length = 5\r\n\r\n\r\ndef solution(i):\r\n minion_id = get_minion_id(i)\r\n return minion_id\r\n\r\n\r\ndef get_minion_id(hat_number):\r\n prime_list = get_prime_list_in_range(num_for_prime_digit_list)\r\n digit_list = convert_list_of_primes_to_digit_list(prime_list)\r\n minion_id = get_minion_id_from_digit_list(digit_list, hat_number)\r\n return minion_id\r\n\r\n\r\ndef get_minion_id_from_digit_list(digit_list, i):\r\n minion_id_slice = digit_list[i:i+id_length]\r\n minion_id = ''.join(minion_id_slice)\r\n return minion_id\r\n\r\n\r\ndef get_prime_list_in_range(num_to):\r\n \"\"\"\r\n Invented by the Greek mathematician Eratosthenes over two thousand years ago, is to sieve by repeatedly casting out multiples of primes.\r\n Begin by making a list of all numbers from 2 to the maximum desired prime n.\r\n Then repeatedly take the smallest uncrossed number and cross out all of its multiples;\r\n the numbers that remain uncrossed are prime.\r\n\r\n Optimisations have been made:\r\n The sieve automatically filters out all multiples of 2 apart from 2 itself as there are no even primes apart from 2\r\n The inner loop which sieves out non primes starts at the square of the current prime as all primes since all smaller primes have been caught in previous sift loops\r\n Cant get to work # The outer loop stops at the sqrt of the muber to find primes up to as all non primes will have been sieved out on previous loops\r\n Arguments:\r\n num_to {int} -- [number of primes up to]\r\n Returns:\r\n [list] -- [list of all primes]\r\n \"\"\"\r\n prime_list = list()\r\n sieve = _create_optimised_sieve(num_to)\r\n\r\n for outer_index in xrange(2, (num_to)+1):\r\n\r\n if (sieve[outer_index]):\r\n prime_list.append(outer_index)\r\n\r\n for inner_index in xrange(int(outer_index**2), num_to+1, outer_index):\r\n sieve[inner_index] = False\r\n\r\n return prime_list\r\n\r\n\r\ndef _create_optimised_sieve(num_to):\r\n sieve = [False\r\n if (index) % 2 == 0 and (index != 2)\r\n else True\r\n for index in xrange(num_to + 1)]\r\n return sieve\r\n\r\n\r\ndef convert_list_of_primes_to_digit_list(primes):\r\n strings = _convert_int_list_to_string_list(primes)\r\n prime_string = ''.join(strings)\r\n int_list = _convert_int_string_to_digit_list(prime_string)\r\n return int_list\r\n\r\n\r\ndef _convert_int_list_to_string_list(int_list):\r\n return [str(integer) for integer in int_list]\r\n\r\n\r\ndef _convert_int_string_to_digit_list(string):\r\n return [digit for digit in string]\r\n\r\n\r\nif __name__ == '__main__':\r\n solution(3)\r\n","repo_name":"VYeadon/GoogleFooBar","sub_path":"Level1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5327291419","text":"import os\nimport torch\nimport pickle\nimport numpy as np\n\n\ndef encode16(params, save_path):\n \"\"\"将params压缩到16bit,并保存到save_path\"\"\"\n custom_dict = {}\n for name, param in params.items():\n param = np.float64(param.numpy())\n # 有些变量不是ndarray而只是一个数字,这种变量不用压缩\n if type(param) == np.ndarray:\n custom_dict[name] = np.float16(param)\n else:\n custom_dict[name] = param\n\n pickle.dump(custom_dict, open(save_path, 'wb'))\n\ndef decode16(fname):\n '''读取16bit的权重,还原到torch.tensor后以state_dict形式存储'''\n params = pickle.load(open(fname, 'rb'))\n custom_dict = {}\n for (name, param) in params.items():\n param = torch.tensor(param)\n custom_dict[name] = param\n return custom_dict\n\ndef encode8(params, save_path):\n \"\"\"将params压缩到8bit,并保存到save_path\"\"\"\n custom_dict = {}\n for (name, param) in params.items():\n param = np.float64(param.numpy())\n if type(param) == np.ndarray:\n min_val = np.min(param)\n max_val = np.max(param)\n param = np.round((param - min_val) / (max_val - min_val) * 255)\n param = np.uint8(param)\n custom_dict[name] = (min_val, max_val, param)\n else:\n custom_dict[name] = param\n\n pickle.dump(custom_dict, open(save_path, 'wb'))\n\ndef decode8(fname):\n '''读取8bit的权重,还原到torch.tensor后以state_dict形式存储'''\n params = pickle.load(open(fname, 'rb'))\n custom_dict = {}\n for (name, param) in params.items():\n if type(param) == tuple:\n min_val, max_val, param = param\n param = np.float64(param)\n param = (param / 255 * (max_val - min_val)) + min_val\n param = torch.tensor(param)\n else:\n param = torch.tensor(param)\n\n custom_dict[name] = param\n return custom_dict\n\n\nif __name__ == '__main__':\n print(f\"Original Cost: {os.stat('./weights/student_model.bin').st_size} Bytes.\")\n old_params = torch.load('./weights/student_model.bin', map_location='cpu')\n encode16(old_params, './weights/student_model_16bit.bin')\n print(f\"16-bit Cost: {os.stat('./weights/student_model_16bit.bin').st_size} Bytes.\")\n encode8(old_params, './weights/student_model_8bit.bin')\n print(f\"8-bit Cost: {os.stat('./weights/student_model_8bit.bin').st_size} Bytes.\")\n","repo_name":"chouxianyu/LHY_ML2020_Codes","sub_path":"hw7_NetworkCompression/WeightQuantization.py","file_name":"WeightQuantization.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"77"} +{"seq_id":"16530463652","text":"from itertools import combinations\n\ndef combination_more_than(n, r, bound):\n num = [v for v in range((n-r)+1, n+1)]\n den = [v for v in range(1, r+1)]\n value = 1\n for v in num:\n value = value*v\n for v in den:\n value = int(value/v)\n return value > bound\n\nvalue = [combination_more_than(n, r, 1_000_000)\n for n in range(101) for r in range(n)]\nvalue = [v for v in value if v is True]\nprint(len(value))\n","repo_name":"pratamov/pe","sub_path":"53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3626425946","text":"from libs import *\nimport numpy as np\nimport pprint\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.spatial import procrustes\n\n\n\narucoCorners = FileIO.getFromPickle(\"pickles/corners_scorpion_07-06-2019_15:34:54.pickle\")\n\nprint(arucoCorners)\n\n\n\nfor i in range(4):\n print(\"wow\")\n indexStart = 4*3*i+0\n indexEnd = indexStart+10\n print(indexStart)\n print(indexEnd)\n \n cornerstart = arucoCorners[indexStart]\n cornerend = arucoCorners[indexEnd]\n\n\n\n norm = np.linalg.norm(cornerstart-cornerend)\n\n print(norm)","repo_name":"DonHaul/MultiCamCalAruco","sub_path":"tools/diagonalcangalhomeasurer.py","file_name":"diagonalcangalhomeasurer.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"29474853903","text":"\"\"\"\nЭто простейший даг.\nОн состоит из сенсора (ждёт 6am),\nбаш-оператора (выводит execution_date),\nдвух питон-операторов (выводят по строке в логи)333\n\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nimport logging\n\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\n\nDEFAULT_ARGS = {\n 'start_date': days_ago(2),\n 'owner': 'ana-usacheva',\n 'poke_interval': 600\n}\n\nwith DAG(\"au_test\",\n schedule_interval='@daily',\n default_args=DEFAULT_ARGS,\n max_active_runs=1,\n tags=['ana-usacheva']\n ) as dag:\n\n dummy = DummyOperator(task_id=\"dummy\")\n echo_ds = BashOperator(\n task_id='echo_ds',\n bash_command='echo {{ ds }}',\n dag=dag\n )\n\n def hello_world_func():\n logging.info(\"Hello World !\")\n\n hello_world = PythonOperator(\n task_id='first_task',\n python_callable=hello_world_func,\n dag=dag\n )\n\n dummy >> [echo_ds, hello_world]","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/dags/ana-usacheva/au_test.py","file_name":"au_test.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20428717460","text":"import discord\nfrom discord.ext import commands\nfrom discord import Embed, Colour\nfrom discord.ext.commands import guild_only, has_permissions, group, cooldown\nfrom db.db import *\n\n\nclass Config(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @group(invoke_without_command=True, name=\"config\", aliases=[\"settings\"], usage=\"config\")\n @has_permissions(manage_guild=True)\n @guild_only()\n @cooldown(1, 10, commands.BucketType.guild)\n async def config_commands(self, ctx):\n m = \"**{0}config lang** - to set the language in your guild\\n**{0}config prefix** - to set the prefix\"\n if get_lang(ctx) == \"ar\":\n m = \"**{0}config lang** - لتغير لغه البوت في خادمك\\n**{0}config prefix** - لتغير بادئه البوت في خادمك\"\n await ctx.send(embed=Embed(\n description=m.format(get_prefix(ctx)),\n color=Colour.red())\n )\n\n @config_commands.command(\n name=\"lang\", aliases=['set_lang', \"set-lang\", \"language\"], invoke_without_command=True, usage=\"config lang \")\n @has_permissions(manage_guild=True)\n @guild_only()\n @cooldown(1, 10, commands.BucketType.guild)\n async def language_command(self, ctx, new_lang):\n if new_lang == \"ar\":\n cr.execute(\"UPDATE guilds SET language = 'ar' WHERE guild_id = ?\", (ctx.guild.id,))\n await ctx.send(embed=discord.Embed(\n description=f\"تمت إعادة تعيين اللغة إلى `ar`\",\n color=discord.Colour.green()\n ))\n commit()\n elif new_lang == \"en\":\n cr.execute(\"UPDATE guilds SET language = 'en' WHERE guild_id = ?\", (ctx.guild.id,))\n await ctx.send(embed=discord.Embed(\n description=f\"the language has been reset to `en`\",\n color=discord.Colour.green()\n ))\n commit()\n else:\n await ctx.send(embed=discord.Embed(\n description=f\"Available languages are Arabic and English(ar, en)\",\n color=discord.Colour.red()\n ))\n\n @config_commands.command(name=\"prefix\", aliases=['set_prefix', \"set-prefix\", \"setprefix\"], invoke_without_command=True, usage=\"config prefix \")\n @has_permissions(manage_guild=True)\n @guild_only()\n @cooldown(1, 10, commands.BucketType.guild)\n async def prefix(self, ctx, new_prefix: str):\n m11, m12 = \"لا يمكن أن تكون البادئة أكثر من 5 أحرف.\", \"تمت إعادة ضبط البادئة إلى \"\n if get_lang(ctx) == \"en\":\n m11, m12 = \"The prefix cannot be more than 5 characters long.\", \"the prefix has been reset to\"\n if len(new_prefix) > 5:\n await ctx.send(embed=discord.Embed(\n description=m11,\n color=discord.Colour.red()\n ))\n return\n cr.execute(\"UPDATE guilds SET prefix = ? WHERE guild_id = ?\", (new_prefix, ctx.guild.id))\n commit()\n prefix = cr.execute(\"SELECT prefix FROM guilds WHERE guild_id = ?\", (ctx.guild.id,))\n await ctx.send(embed=discord.Embed(\n description=f\"{m12} `{prefix.fetchone()[0]}`\",\n color=discord.Colour.green()))\n return\n\n\ndef setup(client):\n client.add_cog(Config(client))\n","repo_name":"SumBot/SumBot","sub_path":"cogs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34834493289","text":"import os.path\nfrom distutils.core import setup\n\nimport drpc\nimport setuptools\n\nrequirements_filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'requirements.txt')\n\nwith open(requirements_filename) as fd:\n install_requires = [i.strip() for i in fd.readlines()]\n\nsetup(\n name='drpc',\n version=drpc.__version__,\n description='Implements the DRPC network protocol',\n long_description=open('README.md', 'rt').read(),\n author='Duo Security, Inc.',\n author_email='support@duosecurity.com',\n packages=setuptools.find_packages(exclude=['tests']),\n install_requires=install_requires,\n)\n","repo_name":"slayer/duoauthproxy-freebsd","sub_path":"duoauthproxy-5.0.0-b03e68d-src/pkgs/drpc-2.5.0/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2708441924","text":"import cv2 as cv\r\n\r\n#function to resize img or frame (video,live camera,image)\r\ndef rescaleFrame(frame,scale=0.75):\r\n width =int(frame.shape[1] * scale)\r\n height=int(frame.shape[0] * scale)\r\n dimentions = (width,height)\r\n\r\n return cv.resize(frame,dimentions,interpolation=cv.INTER_AREA)\r\n\r\n# #its only for live videos(like web cams)\r\n# def changeRes(width,height):\r\n# capture.set(3,width) #3 is reference for width\r\n# capture.set(4,height) #4 is reference for height\r\n\r\n\r\n# image resize\r\nimg = cv.imread('Resources/Photos/cat.jpg')\r\nresized_img = rescaleFrame(img)\r\ncv.imshow('res_img',resized_img)\r\n\r\n#read video frame by frame\r\ncapture = cv.VideoCapture('Resources/Videos/dog.mp4')\r\nwhile True:\r\n isTrue, frame = capture.read()\r\n frame_resized = rescaleFrame(frame,scale=0.2)\r\n #cv.imshow('Video',frame)\r\n cv.imshow('Video',frame_resized)\r\n if isTrue:\r\n cv.imshow('Video', frame)\r\n if cv.waitKey(20) & 0xFF==ord('d'):\r\n break\r\n else:\r\n break\r\n\r\n#capture.release()\r\ncapture_live.release()\r\ncv.destroyAllWindows()\r\n\r\n","repo_name":"chris-arvs/opencv-basic-operations","sub_path":"rescale.py","file_name":"rescale.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8115129482","text":"import pytest\n\nfrom backers.models import Backer, BackerCategory, BackerSubCategory, logo_upload_to\nfrom backers.factories import BackerFactory\nfrom aids.models import AidWorkflow\nfrom aids.factories import AidFactory\n\n\npytestmark = pytest.mark.django_db\n\n\ndef test_backer_slug():\n new_backer = BackerFactory(name=\"New Backer\")\n\n assert new_backer.slug == \"new-backer\"\n\n\ndef test_backer_filtering():\n BackerFactory()\n aid_draft = AidFactory(status=AidWorkflow.states.draft)\n BackerFactory(financed_aids=[aid_draft])\n aid_published_1 = AidFactory(status=AidWorkflow.states.published)\n aid_published_2 = AidFactory(status=AidWorkflow.states.published)\n BackerFactory(financed_aids=[aid_published_1, aid_published_2])\n\n assert Backer.objects.count() == 3\n assert Backer.objects.has_financed_aids().count() == 2\n assert Backer.objects.has_published_financed_aids().count() == 1\n\n\ndef test_logo_upload_to():\n backer = BackerFactory(name=\"Département imaginaire\")\n\n result = logo_upload_to(backer, \"0345.png\")\n\n assert result == \"backers/departement-imaginaire_logo.png\"\n\n\ndef test_backer_category():\n backer_cat = BackerCategory(name=\"Collectivités\")\n backer_cat.save()\n\n assert backer_cat.id_slug == \"1-collectivites\"\n\n\ndef test_backer_sub_category():\n backer_cat = BackerCategory(name=\"Collectivités\")\n backer_cat.save()\n backer_subcat = BackerSubCategory(\n name=\"Conseils départementaux\", category=backer_cat\n )\n backer_subcat.save()\n\n assert backer_subcat.id_slug == \"1-conseils-departementaux\"\n assert backer_cat == backer_subcat.category\n","repo_name":"MTES-MCT/aides-territoires","sub_path":"src/backers/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"14887554181","text":"from sys import argv\n\ndef getArgs(*args):\n \"\"\" Gets the arguments and stores them in a dictionnary.\n \n \"\"\"\n argDict = {}\n for i in range(len(args)):\n if argv.count(args[i]) != 0:\n if args[i] == '-help':\n argDict[args[i]] = 'on'\n else:\n j = argv.index(args[i])\n argDict[args[i]] = argv[j + 1]\n else:\n argDict[args[i]] = None\n return argDict\n","repo_name":"davidbrochart/turbocode","sub_path":"src/myhdl/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"71884787129","text":"\"\"\"Evalify utils module contains various utilites serving other modules.\"\"\"\nimport numpy as np\nimport psutil\n\nGB_TO_BYTE = 1024**3\n\n\ndef _validate_vectors(X, y):\n X = np.asarray(X, dtype=np.float32)\n y = np.asarray(y, dtype=np.int32)\n if X.ndim != 2:\n raise ValueError(\"Embeddings vector should be 2-D.\")\n if y.ndim != 1:\n raise ValueError(\"Target vector should be 1-D.\")\n return X, y\n\n\ndef _calc_available_memory():\n \"\"\"Calculate available memory in system\"\"\"\n mem = psutil.virtual_memory()\n return mem[1]\n\n\ndef calculate_best_batch_size(X, available_mem=None):\n \"\"\"Calculate maximum rows to fetch per batch without going out of memory.\n\n We need 3 big arrays to be held in memory (A, B, A*B)\n \"\"\"\n available_mem = _calc_available_memory() if available_mem is None else available_mem\n if available_mem > 2 * GB_TO_BYTE:\n max_total_rows = np.floor(available_mem - GB_TO_BYTE / X[0].nbytes)\n return max_total_rows // 3\n else:\n max_total_rows = np.floor(available_mem / X[0].nbytes)\n return max_total_rows // 5\n","repo_name":"ma7555/evalify","sub_path":"evalify/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"42314630019","text":"from bz2 import BZ2File as bzopen\nimport json\nimport codecs\nimport csv\nfrom collections import defaultdict\nimport numpy as np\nimport datetime\nimport os\nfrom scipy.stats import rankdata\nimport pandas as pd\n\nstart_date = datetime.datetime.fromtimestamp(float('1451606400'))\n\n\n###############################################################################################\n# Extract posts attributes AND feedback data\n###############################################################################################\ndef extract_active_users_posts():\n\n user_noPosts = defaultdict(int)\n user_posts = defaultdict(list)\n\n with open('reddit_FirstHalf2016.csv', 'r') as f:\n lines = f.readlines()\n\n for line in lines[1:]:\n line_data = line.strip().split(',')\n post_id, subbreddit_id, user, post_timestamp, num_comments, scores = line_data\n\n date_time = datetime.datetime.fromtimestamp(float(post_timestamp))\n post_time = ((date_time - start_date).total_seconds()) / 86400.0\n\n user_posts[user].append((post_id, subbreddit_id, post_timestamp, post_time, num_comments, scores))\n user_noPosts[user] += 1\n\n sorted_user_noPosts = sorted(user_noPosts.items(), key=lambda kv: kv[1], reverse=True)\n print('# of users ', len(sorted_user_noPosts))\n\n avg_noPost = np.mean(list(user_noPosts.values()))\n print('Average number of posts per user ', avg_noPost)\n\n per_subreddit_posts = defaultdict(list)\n per_user_posts = defaultdict(list)\n noUser_bigAve = 0\n min_noPosts = 50\n total_no_posts = 0\n for user, noPost in user_noPosts.items():\n if noPost >= min_noPosts:\n noUser_bigAve += 1\n total_no_posts += noPost\n\n per_user_posts[user] = user_posts[user]\n for post_data in user_posts[user]:\n post_id, subbreddit_id, post_timestamp, post_time, num_comments, scores = post_data\n\n\n per_subreddit_posts[subbreddit_id].append((post_id, post_timestamp, post_time, user, num_comments, scores))\n\n\n print('# users with posts bigger than 50 ', noUser_bigAve)\n print('# posts submitted by the users that posts at least than 50 times ', total_no_posts)\n\n print('\\n Active user posts extracted ')\n\n return per_subreddit_posts, per_user_posts\n\n\ndef extract_post_comments():\n\n post_comments_times = {}\n\n with open(\"reddit_comments.txt\") as f:\n for line in f:\n post_id_feedback = line.strip().split(',')\n post_id = post_id_feedback[0]\n feedback_times = post_id_feedback[1:]\n\n fb_time_list = [\n ((datetime.datetime.fromtimestamp(float(fb_time)) - start_date).total_seconds()) / 86400.0\n for fb_time in feedback_times]\n\n post_comments_times[post_id] = fb_time_list\n\n print(post_comments_times[post_id])\n\n return post_comments_times\n\n\n\n###############################################################################################\n# Rank number of comments (feedback)\n###############################################################################################\n\ndef build_feedback_ranking_matrix(post_times, post_ids, post_comments_times, num_posts, num_bins=100):\n delays = list(np.logspace(np.log10(1.0 / 86400.0), np.log10(30), num=num_bins - 1)) + [np.inf]\n delay_len = len(delays)\n post_delay_matrix = np.nan * np.ones(shape=(num_posts, delay_len))\n\n # build tweet delay matrix\n for t, post_id in enumerate(post_ids):\n if post_id not in post_comments_times: continue\n fb_times = np.array(post_comments_times[post_id])\n\n for d, delay in enumerate(delays):\n idx_before_next_post = np.where((fb_times <= post_times[t] + delay))[0]\n post_delay_matrix[t][d] = len(idx_before_next_post) # total number of fb within time range\n\n # rank the #FB in each delay bins\n # percentile score : 0.1, 0.2, ...,0.9, 1.0\n percentile_score = np.array(range(1, 11)) * 10\n delay_percentileScore_matrix = np.nan * np.ones(shape=(delay_len, len(percentile_score)))\n for p, percent in enumerate(percentile_score):\n delay_percentileScore_matrix[:, p] = np.nanpercentile(post_delay_matrix, percent, axis=0)\n\n return delay_percentileScore_matrix\n\n\ndef explore_feedback_representations(post_times, post_ids, post_comments_times):\n # build tweet delay matrix\n post_feedback = np.nan * np.ones(shape=(len(post_ids)-1,))\n post_feedback_div_delay = np.nan * np.ones(shape=(len(post_ids) - 1,))\n for t, post_id in enumerate(post_ids[:-1]):\n if post_id not in post_comments_times: continue\n fb_times = np.array(post_comments_times[post_id])\n\n idx_before_next_post = np.where((fb_times <= post_times[t+1]))[0]\n post_feedback[t] = len(idx_before_next_post) # total number of fb within time range\n delay = post_times[t+1] -post_times[t]\n if delay>0:\n post_feedback_div_delay[t] = len(idx_before_next_post) / (delay*24.0*60.0) # divide by delay in minutes\n else:\n post_feedback_div_delay[t] = np.nan\n\n non_null_len = len(np.where(~np.isnan(post_feedback_div_delay))[0])\n fb_div_delay_per = pd.Series(post_feedback_div_delay).rank().values\n fb_div_delay_per = fb_div_delay_per / non_null_len\n #fb_div_delay_per = rankdata(post_feedback_div_delay, \"average\") / len(post_feedback_div_delay)\n\n non_null_len = len(np.where(~np.isnan(post_feedback))[0])\n post_feedback_per = pd.Series(post_feedback).rank().values\n post_feedback_per = post_feedback_per / non_null_len\n #fb_div_delay_median = post_feedback_div_delay / np.nanmedian(post_feedback_div_delay)\n #print(fb_div_delay_median)\n\n post_feedback[np.isnan(post_feedback)] = -1\n post_feedback_div_delay[np.isnan(post_feedback_div_delay)] = -1\n fb_div_delay_per[np.isnan(fb_div_delay_per)] = -1\n post_feedback_per[np.isnan(post_feedback_per)] = -1\n #fb_div_delay_median[np.isinf(fb_div_delay_median)] = -1\n\n\n post_feedback = np.array(list(post_feedback)+[-1])\n post_feedback_div_delay = np.array(list(post_feedback_div_delay) + [-1])\n fb_div_delay_per = np.array(list(fb_div_delay_per) + [-1])\n post_feedback_per = np.array(list(post_feedback_per) + [-1])\n\n return post_feedback, post_feedback_div_delay, fb_div_delay_per, post_feedback_per\n\n\n\ndef choose_timeBin_and_percentileScore(delay, num_fbs, rankMatrix, num_bins=100):\n # choose time bin\n\n possible_delays = np.array(\n list(np.logspace(np.log10(1.0 / 86400.0), np.log10(30), num=num_bins - 1)) + [np.inf])\n\n some_inds = np.where(delay < possible_delays)[0]\n time_bin = sorted(some_inds)[0]\n time_bin_vector = rankMatrix[time_bin]\n\n some_inds = np.where(num_fbs <= time_bin_vector)[0]\n if len(some_inds) == 0:\n rank = 100\n print('surprise')\n else:\n rank = (sorted(some_inds)[0] + 1) * 10\n\n return rank\n\n\ndef countComment_before_nextPost_and_rank(per_user_posts, per_subreddit_posts, post_comments_times):\n\n # get the next post time of a user\n postID_postTimes = {}\n all_cur_posts = []\n all_next_times = []\n for i, user in enumerate(per_user_posts):\n if i % 1000 == 0: print(i, user)\n per_user_data = list(zip(*per_user_posts[user]))\n # [post_id, subbreddit_id, post_date, num_comments]\n post_ids = np.array(per_user_data[0], dtype=str)\n post_dates = np.array(per_user_data[3])\n sort_idxs = np.argsort(post_dates)\n\n\n cur_post_ids = post_ids[sort_idxs]\n next_post_times = post_dates[sort_idxs]\n next_post_times = list(next_post_times[1:]) + [180.0]\n\n\n all_cur_posts += list(cur_post_ids)\n all_next_times+= next_post_times\n\n postID_postTimes = dict(list(zip(all_cur_posts, all_next_times)))\n\n\n '''\n # build rank matrix of number of comments per subreddit\n subredditID_rankingMatrix = dict()\n for subreddit_id in per_subreddit_posts:\n subreddit_data = list(zip(*per_subreddit_posts[subreddit_id]))\n post_ids, post_timestamps, post_times, users, num_comments_list = subreddit_data\n\n rankMatrix = build_feedback_ranking_matrix(post_times, post_ids, post_comments_times, len(post_ids))\n subredditID_rankingMatrix[subreddit_id] = rankMatrix\n '''\n\n user_post_path = \"user_data\"\n\n if not os.path.exists(user_post_path):\n os.makedirs(user_post_path)\n\n # rank number of comments using rank matrix\n ranked_peruser_feedback = defaultdict(list)\n number_of_comments_dist = []\n delays_dist = []\n user_rankingMatrix = {}\n for i, user in enumerate(per_user_posts):\n if i%1000==0: print(\"reddit_user\", i, user)\n per_user_data = list(zip(*per_user_posts[user]))\n # [post_id, subbreddit_id, post_timestamp, post_time, num_comments]\n post_ids, subreddit_ids, post_timestamps, post_times, num_comments_list, scores_list = per_user_data\n sort_idxs = np.argsort(post_times)\n\n post_ids = np.array(post_ids, dtype=str)[sort_idxs]\n subreddit_ids = np.array(subreddit_ids, dtype=str)[sort_idxs]\n post_timestamps = np.array(post_timestamps, dtype=str)[sort_idxs]\n post_times = np.array(post_times)[sort_idxs]\n num_comments_list = np.array(num_comments_list)[sort_idxs]\n scores_list = np.array(scores_list)[sort_idxs]\n\n\n rankMatrix = build_feedback_ranking_matrix(post_times, post_ids, post_comments_times, len(post_ids))\n\n user_rankingMatrix[user] = rankMatrix\n\n\n post_feedback, post_feedback_div_delay, fb_div_delay_per, fb_div_delay_median = \\\n explore_feedback_representations(post_times, post_ids, post_comments_times)\n\n f = open(user_post_path + \"/\" + str(user) + \".csv\", 'w')\n try:\n writer = csv.writer(f, dialect='excel')\n writer.writerow(['timestamp', 'post_id', 'numFb', 'numFb_dt', 'percent_numFb_dt', 'per_numFb', 'feedback_rank', 'final_numComments', 'scores','subreddit_id'])\n\n for i, post_id in enumerate(post_ids):\n\n if post_id in post_comments_times:\n next_time = postID_postTimes[post_id]\n fb_times = np.array(post_comments_times[post_id])\n idx_curr_next = np.where(fb_times < next_time)[0]\n num_comments = len(idx_curr_next)\n number_of_comments_dist.append(num_comments)\n delay = next_time - post_times[i]\n delays_dist.append(delay)\n fb_rank = choose_timeBin_and_percentileScore(delay, num_comments, user_rankingMatrix[user])\n final_numComments = len(fb_times)\n # timestamp, post_id, total_comments, number_feedback, feedback_rank, subreddit_id\n writer.writerow([post_timestamps[i], post_id, post_feedback[i], post_feedback_div_delay[i],\n fb_div_delay_per[i], fb_div_delay_median[i], fb_rank, final_numComments, scores_list[i], subreddit_ids[i]])\n else:\n writer.writerow([post_timestamps[i], post_id, -1, -1,\n -1, -1, -1, -1, scores_list[i], subreddit_ids[i]])\n\n finally:\n f.close()\n\n np.savetxt('number_of_comments_dist.csv', number_of_comments_dist, delimiter=',', fmt='%5s')\n np.savetxt('delays_dist.csv', delays_dist, delimiter=',', fmt='%5s')\n\nif __name__ == '__main__':\n per_subreddit_posts, per_user_posts = extract_active_users_posts()\n post_comments_times = extract_post_comments()\n user_posts = countComment_before_nextPost_and_rank(per_user_posts, per_subreddit_posts, post_comments_times)\n\n\n\n\n","repo_name":"social-info-lab/community-feedback-effect","sub_path":"Reddit/5_explore_feedback_rep.py","file_name":"5_explore_feedback_rep.py","file_ext":"py","file_size_in_byte":11673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18092282988","text":"import diaper\nimport pytest\n\nfrom cfme.fixtures.pytest_store import store\nfrom cfme.utils import ssh\nfrom cfme.utils.log import logger\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_sessionfinish(session, exitstatus):\n \"\"\"Loop through the appliance stack and close ssh connections\"\"\"\n\n for ssh_client in store.ssh_clients_to_close:\n logger.debug('Closing ssh connection on %r', ssh_client)\n try:\n ssh_client.close()\n except Exception:\n logger.exception('Closing ssh connection on %r failed, but ignoring', ssh_client)\n for session in ssh._client_session:\n with diaper:\n session.close()\n yield\n","repo_name":"ManageIQ/integration_tests","sub_path":"cfme/fixtures/ssh_client.py","file_name":"ssh_client.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"77"} +{"seq_id":"21839418021","text":"# 함수 내부에 불필요한 print문이 있는 경우 오답으로 처리가 됩니다.\ndef get_final_position(N, mat, moves):\n position_x = 0\n position_y = 0\n for i in moves:\n if i == 0 :\n position_x -= 1\n if i == 1 :\n position_x += 1\n if i == 2 :\n position_y -= 1\n if i == 3 :\n position_y += 1\n \n return [position_x, position_y]\n\n # 여기에 코드를 작성하여 함수를 완성합니다.\n\n\n# 아래의 코드를 수정하거나 새롭게 추가하지 않습니다.\nif __name__ == '__main__':\n N = 3\n mat = [\n [1, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n ] \n moves1 = [1, 1, 3]\n print(get_final_position(N, mat, moves1)) # [2, 1]\n \n moves2 = [1, 3, 3]\n print(get_final_position(N, mat, moves2)) # [1, 2]","repo_name":"niinp28/TIL","sub_path":"PS/mthly_test/1st/problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35645848402","text":"from flask import Blueprint, jsonify, request, make_response\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom datetime import datetime\n\nfrom ..models import db, Trip, Access, River, User\n\n\ntrip = Blueprint('trips', __name__)\n\n\n@trip.route('/', methods=[\"GET\", \"POST\"])\n@jwt_required()\ndef trips():\n user_email = get_jwt_identity()\n user_obj = User.query.filter_by(email=user_email).first()\n\n # POST path\n if request.method == \"POST\":\n data = request.get_json()\n trip = Trip(\n scheduled_time=data['dateTime'],\n river_id=data['riverID'],\n trip_leader=user_obj.id,\n put_in=data['putinID'],\n take_out=data['takeoutID']\n )\n db.session.add(trip)\n db.session.commit()\n return jsonify(message=\"Success\"), 200\n\n # GET path\n else:\n user_trips = []\n user_boater_instances = user_obj.boaters\n for instance in user_boater_instances:\n user_trips.append(instance.trip_id)\n trips = []\n trip_objects = Trip.query.filter(Trip.id.in_(user_trips)).all()\n for trip in trip_objects:\n trips.append(trip.to_dict())\n return jsonify(trips=trips), 200\n\n\n@trip.route('/', methods=[\"GET\", \"PUT\", \"DELETE\"])\n@jwt_required()\ndef trip_by_id(id):\n trip_object = Trip.query.filter_by(id=id).first()\n\n # PUT path\n if request.method == \"PUT\":\n data = request.get_json()\n trip_object.scheduled_time = data['dateTime']\n trip_object.river_id = data['riverID']\n trip_object.trip_leader = data['userID']\n trip_object.put_in = data['putinID']\n trip_object.take_out = data['takeoutID']\n db.session.commit()\n return jsonify(message=\"Trip Successfully Updated\"), 200\n\n # DELETE path\n if request.method == \"DELETE\":\n user_email = get_jwt_identity()\n user = User.query.filter_by(email=user_email).first()\n if trip_object.trip_leader == user.id:\n db.session.delete(trip_object)\n db.session.commit()\n return jsonify(message=\"Trip Successfully Deleted\"), 200\n\n else:\n # return trip\n invites = []\n boaters = []\n boats = []\n vehicles = []\n access = []\n\n # package invite list\n invite_objects = trip_object.invites\n for invite in invite_objects:\n invites.append(invite.to_dict())\n\n # package boater list\n boater_objects = trip_object.boaters\n for boater in boater_objects:\n boaters.append(boater.to_dict())\n\n # package access points\n putin = Access.query.filter_by(id=trip_object['put_in']).first()\n takeout = Access.query.filter_by(id=trip_object['take_out']).first()\n access.append(putin.to_dict())\n access.append(takeout.to_dict())\n\n river = River.query.filter_by(id=trip_object['river_id']).first()\n trip_leader = User.query.filter_by(\n id=trip_object['trip_leader']).first()\n\n return jsonify(\n trip=trip_object.to_dict(),\n invites=invites,\n boaters=boaters,\n boats=boats,\n vehicles=vehicles,\n access=access,\n river=river.to_dict(),\n trip_leader=trip_leader.to_safe_object(),\n ), 200\n","repo_name":"NolanCrenshaw/dep_20210808_GoBoatN","sub_path":"backend/api/trips.py","file_name":"trips.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"11665924145","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport csv\nimport json\nimport subprocess\nimport shutil\n\ndef loadUncommentedJsonString(filename):\n\tlines = list()\n\twith open(filename) as jsonFile:\n\t\tfor line in jsonFile:\n\t\t\tlines.append(line.rstrip('\\n').split('//')[0])\n\treturn '\\n'.join(lines)\n\nif __name__ == '__main__':\n\twithRestarts = False\n\tif len(sys.argv) > 1:\n\t\tif sys.argv[1] == '--with-restarts':\n\t\t\twithRestarts = True\n\t\n\trootDir = os.path.dirname(__file__)\n\t\n\targs = [\n\t\t'java',\n\t\t'-ea',\n\t\t'-Xmx24G',\n\t\t'-XX:+UseSerialGC',\n\t\t'-cp',\n\t\tos.path.join(rootDir, 'lib', '*'),\n\t\t'antigen.Antigen'\n\t]\n\t\n\tparams = json.loads(loadUncommentedJsonString('parameters.json'))\n\tendDay = float(params['endDay'])\n\tprintStep = float(params['printStep'])\n\t\n\tdone = False\n\twhile not done:\n\t\tprocess = subprocess.Popen(args)\n\t\tprocess.wait()\n\t\t\n\t\tif withRestarts:\n\t\t\trestartLim = 10.0\n\t\t\trestartCount = 0.0\n\t\t\twith open('out.timeseries') as f:\n\t\t\t\tcr = csv.reader(f, delimiter='\\t')\n\t\t\t\tfor row in cr:\n\t\t\t\t\tpass\n\t\t\t\tlastDay = float(row[0]) * 365.0\n\t\t\t\n\t\t\t\tif (abs(lastDay - endDay) < printStep + 1.0 or restartCount >= restartLim):\n\t\t\t\t\tdone = True\n\t\t\t\telse:\n\t\t\t\t\tprint('Simulation aborted; restarting')\n\t\t\t\t\trestartCount = restartCount + 1.0\n\t\telse:\n\t\t\tdone = True\n\tprint('Simulation completed')\n","repo_name":"cobeylab/antigen-phylogeography","sub_path":"scripts/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"554050510","text":"import pytest\nimport numpy as np\nfrom classes.Endereco import Endereco\nimport http.client as httplib\nfrom requests.exceptions import ConnectionError\nimport requests\nimport json\nfrom classes.PessoaFisica import PessoaFisica\n\n@pytest.mark.main2\n@pytest.mark.teste_pessoa\n@pytest.mark.main3\n@pytest.mark.teste_com_internet\ndef test_criacao_pessoa_fisica():\n nome = 'Carlos'\n email = 'tiago@email.com'\n cpf = '524.222.452-6'\n pessoa1 = PessoaFisica(cpf,email,nome)\n assert pessoa1.nome == 'Carlos'\n\n@pytest.mark.main2\n@pytest.mark.teste_pessoa\n@pytest.mark.teste_com_internet\n@pytest.mark.main3\ndef test_pessoa_adicionar_endereco():\n nome = 'Carlos'\n email = 'tiago@email.com'\n cpf = '524.222.452-6'\n pessoa1 = PessoaFisica(cpf,email,nome)\n end1 = Endereco('08320330', 430)\n pessoa1.adicionar_endereco('casa',end1)\n assert 'Rua Clemente Falcão' == pessoa1.get_endereco('casa')['casa']\n\n@pytest.mark.main2\n@pytest.mark.teste_pessoa\n@pytest.mark.teste_com_internet\n@pytest.mark.main3\ndef test_pessoa_listar_endereco():\n nome = 'Carlos'\n email = 'tiago@email.com'\n cpf = '524.222.452-6'\n pessoa1 = PessoaFisica(cpf,email,nome)\n end1 = Endereco('08320330', 430)\n pessoa1.adicionar_endereco('casa',end1)\n list = pessoa1.listar_enderecos()\n assert list == {'casa': 'Rua Clemente Falcão'}\n \n\n\n@pytest.mark.teste_pessoa\n@pytest.mark.teste_com_internet\n@pytest.mark.main3\ndef test_pessoa_busca_nome():\n nome = 'Carlos'\n email = 'tiago@email.com'\n cpf = '524.222.452-6'\n pessoa1 = PessoaFisica(cpf,email,nome)\n pessoas = PessoaFisica.busca_nome('Carl')\n if len(pessoas) > 0:\n pessoa = pessoas[0]\n assert pessoa.get_name() == 'Carlos'","repo_name":"insper-classroom/refatoracao-de-endereco-pedido-e-criacao-de-testes-GustavoAntony","sub_path":"test/test_pessoafisica.py","file_name":"test_pessoafisica.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33328058362","text":"'''\nEscreva um programa em Python que simule uma eleição através de uma urna eletrônica.\nO algoritmo deve ler um conjunto indeterminado de votos e computar os votos de cada\ncandidato, exibindo o resultado após o encerramento da votação.\n\n\n'''\n\ndef existeNalista(lista, voto):\n for posicao in range(len(lista)+1):\n if voto in lista[posicao]:\n lista = computaOVoto(lista, posicao)\n break\n return lista\n\ndef computaOVoto(lista, candidato):\n lista[candidato][2] += 1\n return lista\n\ndef quemGanhou(lista):\n maiorVoto = lista[0][2]\n ganhador = lista[0][0]\n for nomeC, numC, votos in lista:\n if votos > maiorVoto:\n maiorVoto = votos\n ganhador = nomeC\n return ganhador\n\ndef candidatos():\n temMaisCandidadatos = 'S'\n lista = []\n while temMaisCandidadatos.upper()=='S':\n nomeCandidato = input(\"Digite o nome do candidato:\")\n numeroCandidato = int(input(\"Digite o número do candidato:\"))\n par = [nomeCandidato,numeroCandidato,0]\n lista.append(par)\n temMaisCandidadatos = input(\"Adicionar mais candidatos? S - N:\")\n return lista\n\n\nlistaCandidatos = candidatos()\n\nprint(\"Sem os votos:\",listaCandidatos)\nquerVotar = 'S'\nwhile querVotar.upper()=='S':\n \n for nomeC, numC, vot in listaCandidatos:\n print(\"\"\"\n Candidatos\n Nome: %s\n Número: %d\n -------------------------------\n \"\"\"%(nomeC, numC))\n \n print(\"Digite o número do candidato:\")\n voto = int(input(\"Número:\"))\n listaCandidatos = existeNalista(listaCandidatos,voto)\n querVotar = input(\"Quer votar novamente? S -N\")\n\nprint(\"Ganhador/Ganhadora:\")\nprint(quemGanhou(listaCandidatos))\n\n \n","repo_name":"WesleyVitor/alp_repository","sub_path":"ProgramacaoEstruturada/EstruturaDeRepeticao/unarEletronica.py","file_name":"unarEletronica.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29380709329","text":"a=input()\nn=int(input())\nl=[]\ni = 0\nwhile i1:\n z=0\n while z1:\n print(\"ninguna\")\n break\n else:\n z+=1\n print(a)\nelse:\n print(\"ninguna\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito2_ej3/hito2_ej3_d7e380d997d0a675e6c739e55455365f.py","file_name":"hito2_ej3_d7e380d997d0a675e6c739e55455365f.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29674718137","text":"# Import required library\n\nfrom level_number import Level\nimport turtle as t\nfrom time import sleep\n\nfrom models import Ball\nfrom screen_draw import end\n\n\nclass PaddleGame:\n lives = 3\n sc = t.Screen()\n active: bool\n level = 1\n state = False\n\n def __init__(self):\n self.__pen = t.Turtle()\n self.lives = self.lives\n self.sc.title(\"Pong game\")\n self.sc.bgcolor('#fff2b7')\n self.sc.setup(width=1000, height=600)\n\n self.sc.listen()\n self.sc.onkeypress(self.state_menu, \"p\")\n while True:\n self.sc.bgcolor('#fff2b7')\n self.__pen.penup()\n self.__pen.goto(0, 150)\n self.__pen.pendown()\n self.__pen.write(\"press P to start\", False, align=\"center\", font=(\"Courier\", 48, \"normal\"))\n self.__pen.penup()\n self.__pen.goto(0, 100)\n self.__pen.pendown()\n self.__pen.write(\"try not to drop the ball:\", False, align=\"center\", font=(\"Courier\", 35, \"normal\"))\n self.__pen.penup()\n self.__pen.goto(0, -50)\n self.__pen.pendown()\n self.__pen.write(\"How to play:\", False, align=\"center\", font=(\"Courier\", 35, \"normal\"))\n self.__pen.penup()\n self.__pen.goto(0, -100)\n self.__pen.pendown()\n self.__pen.write(\"< and >\", False, align=\"center\", font=(\"Courier\", 30, \"normal\"))\n self.__pen.hideturtle()\n if self.state is True:\n self.sc.clearscreen()\n break\n ####LEVEL 1\n self.transition(\"Level :{}\".format(self.level))\n position_x = [-150, -75, 0, 75, 150, -150, -75, 0, 75, 150]\n position_y = [150, 150, 150, 150, 150, 200, 200, 200, 200, 200]\n level_1 = Level(1, position_x, position_y, 4, 5)\n self.placar()\n while True:\n level_1.update()\n if len(level_1.enemies) == 0:\n self.level += 1\n print(\"------>\", self.level)\n break\n ####LEVEL 2\n self.transition(\"Level :{}\".format(self.level))\n position_x = [-150, -75, 0, 75, 150, -150, -75, 0, 75, 150]\n position_y = [150, 150, 150, 150, 150, 200, 200, 200, 200, 200]\n level_2 = Level(2, position_x, position_y, 5, 6)\n self.placar()\n while True:\n level_2.update()\n if len(level_2.enemies) == 0:\n self.level += 1\n print(\"------>\", self.level)\n break\n ####LEVEL 3\n self.transition(\"Level :{}\".format(self.level))\n position_x = [-150, -75, 0, 75, 150, -150, -75, 0, 75, 150, -150, -75, 0, 75, 150]\n position_y = [150, 150, 150, 150, 150, 200, 200, 200, 200, 200, 100, 100, 100, 100, 100]\n level_3 = Level(3, position_x, position_y, 5, 6)\n self.placar()\n while True:\n level_3.update()\n if len(level_3.enemies) == 0:\n self.level += 1\n print(\"------>\", self.level)\n break\n ####LEVEL 4\n self.transition(\"Level Bonus\")\n position_x = [0, -75, 0, 75, -150, -75, 0, 75, 150, -75, 0, 75, 0]\n position_y = [210, 180, 180, 180, 150, 150, 150, 150, 150, 120, 120, 120, 90]\n level_4 = Level(4, position_x, position_y, 5, 6)\n self.placar()\n while True:\n level_4.update()\n if len(level_4.enemies) == 0:\n self.level += 1\n print(\"------>\", self.level)\n break\n ###WIN\n end()\n sleep(3)\n self.sc.bye()\n\n def state_menu(self):\n print(\"close menu\")\n self.state = True\n\n # ###\n def placar(self):\n self.__pen.penup()\n self.__pen.goto(-100, 240)\n self.__pen.pendown()\n self.__pen.write(\"Level:{} \".format(self.level), align=\"center\",\n font=(\"Courier\", 24, \"normal\"))\n\n def transition(self, frase):\n self.sc.clearscreen()\n self.sc.bgcolor('#fff2b7')\n self.__pen.speed(0)\n self.__pen.penup()\n self.__pen.goto(0, 0)\n self.__pen.pendown()\n self.__pen.write(frase, False, align=\"center\", font=(\"Courier\", 48, \"normal\"))\n self.__pen.hideturtle()\n sleep(2)\n\n def clear(self):\n self.__pen = t.Turtle()\n self.__pen.speed(0)\n self.__pen.shape(\"square\")\n self.__pen.color(\"black\")\n self.__pen.shapesize(stretch_wid=1000, stretch_len=1000)\n\n # self.sc.update()\n\n def start(self):\n self.active = True\n","repo_name":"EduardoPavei00/Pong_Pong_Game","sub_path":"gameturtle.py","file_name":"gameturtle.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23604641797","text":"# Demonstrate simulate on synthetic gene expression model\n# different implementations are used and compared\n\n# import stuff\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n# custom files\nfrom pyssa.models.reaction_model import ReactionModel \nfrom pyssa.models.reaction_model import BaseReactionModel \nfrom pyssa.models.jit_kinetic_model import KineticModel as jitKinetic\nimport pyssa.models.standard_models as sm\nimport pyssa.ssa as ssa\nfrom numba import jit, jitclass, int32, float64\nimport time\n\n# activate or deactivate plotting\nplotting = True\n\n# propensity function\n#@jit(nopython=True)\ndef propfun(state):\n prop = np.zeros(6)\n # gene on\n prop[0] = state[0]\n # gene off\n prop[1] = state[1]\n # transcription\n prop[2] = state[1]\n # mrna degration\n prop[3] = state[2]\n # translation\n prop[4] = state[2]\n # protein degradation\n prop[5] = state[3]\n return(prop)\n\n# state = np.array([0, 1, 53, 436])\n# propfun(state)\n# start = time.time()\n# for i in range(100000):\n# test = propfun(state)\n# state[3] += 1\n# end = time.time()\n# print(end-start)\n\n# drived class with jit support\nspec = [\n ('num_reactions', int32), # a simple scalar field\n ('num_species', int32), \n ('rates', float64[:]), # an array field\n ('stoichiometry', float64[:, :])\n]\n\n@jitclass(spec)\nclass GeneExpression(BaseReactionModel):\n\n def propfun(self, state):\n prop = np.zeros(6)\n # gene on\n prop[0] = state[0]\n # gene off\n prop[1] = state[1]\n # transcription\n prop[2] = state[1]\n # mrna degration\n prop[3] = state[2]\n # translation\n prop[4] = state[2]\n # protein degradation\n prop[5] = state[3]\n return(prop)\n\n\n# set up the model\npre, post, rates = sm.get_standard_model(\"simple_gene_expression\")\nmodel = ReactionModel(np.array(post)-np.array(pre), propfun, np.array(rates))\nstoichiometry = (np.array(post)-np.array(pre)).astype('float64')\nmodel2 = GeneExpression(stoichiometry, np.array(rates))\n\nmodel3 = jitKinetic(np.array(pre, dtype='float64'), np.array(post, dtype='float64'), np.array(rates, dtype='float64'))\n\n# prepare initial conditions\ninitial = np.array([0.0, 1.0, 0.0, 0.0])\ntspan = np.array([0.0, 3e3])\ndelta_t = 300.0\nobs_times = np.arange(tspan[0]+0.5*delta_t, tspan[1], delta_t)\n\n# set up simulator\nsimulator = ssa.Simulator(model, initial)\n\n# get trajectory \n#trajectory = simulator.simulate(initial, tspan)\n#simulator.events2states(trajectory)\nstart = time.time()\nN = 10\nfor i in range(N):\n trajectory = model3.simulate(initial.astype('float64'), tspan.astype('float64'))\nstop = time.time()\nprint('Generated {0} samples in {1} seconds.'.format(N, stop-start))\n\n# get a subsampling for plotting\nt_plot = np.linspace(tspan[0], tspan[1], 200)\nstates_plot = ssa.discretize_trajectory(trajectory, t_plot)\n\n# get mean\nstates_avg = ssa.sample(model, initial, t_plot, num_samples=1, output='avg')\nprint(states_avg.shape)\n\n# plot result \nif plotting:\n plt.plot(t_plot, 100*states_plot[:, 1], '-k')\n plt.plot(t_plot, states_plot[:, 2], '-b')\n plt.plot(t_plot, states_plot[:, 3], '-r')\n #plt.plot(obs_times.numpy(), obs.numpy(), 'xk')\n plt.show()\n\n # plt.plot(t_plot, 100*states_avg[:, 1], '-k')\n # plt.plot(t_plot, states_avg[:, 2], '-b')\n # plt.plot(t_plot, states_avg[:, 3], '-r')\n # #plt.plot(obs_times.numpy(), obs.numpy(), 'xk')\n # plt.show()","repo_name":"saevus1991/pyssa","sub_path":"tests/test_mjp_jit_gene_expression.py","file_name":"test_mjp_jit_gene_expression.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23957766166","text":"\"\"\"\nCanvasSync by Mathias Perslev\nFebruary 2017\n\n--------------------------------------------\n\nassignment.py, CanvasEntity Class\n\nThe Assignment class stores a list of child File objects and creates HTML\npages representing the assignment description.\n\nIt is one level below the parent container AssignmentsFolder class and\ninherits from the CanvasEntity base class.\n\nAn AssignmentsFolder object is the parent object.\n\nSee developer_info.txt file for more information on the class hierarchy\nof entity objects.\n\"\"\"\n\n# Future imports\nfrom __future__ import print_function\n\n# Inbuilt modules\nimport re\nimport os\nimport io\n\n# Third party\nfrom six import text_type\n\n# CanvasSync module imports\nfrom CanvasSync.entities.canvas_entity import CanvasEntity\nfrom CanvasSync.entities.file import File\nfrom CanvasSync.entities.page import Page\nfrom CanvasSync.entities.linked_file import LinkedFile\nfrom CanvasSync.utilities.ANSI import ANSI\nfrom CanvasSync.utilities import helpers\n\n\nclass Assignment(CanvasEntity):\n def __init__(self, assignment_info, parent):\n \"\"\"\n Constructor method, initializes base CanvasEntity class\n\n assignment_info : dict | A dictionary of information on the Canvas assignment object\n parent : object | The parent object, an AssignmentsFolder object\n \"\"\"\n\n self.assignment_info = assignment_info\n assignment_id = self.assignment_info[u\"id\"]\n assignment_name = helpers.get_corrected_name(assignment_info[u\"name\"])\n assignment_path = parent.get_path() + assignment_name\n\n # Initialize base class\n CanvasEntity.__init__(self,\n id_number=assignment_id,\n name=assignment_name,\n sync_path=assignment_path,\n parent=parent,\n identifier=u\"assignment\")\n\n def __repr__(self):\n \"\"\" String representation, overwriting base class method \"\"\"\n status = ANSI.format(u\"[SYNCED]\", formatting=u\"green\")\n return status + u\" \" * 7 + u\"| \" + u\"\\t\" * self.indent + u\"%s: %s\" \\\n % (ANSI.format(u\"Assignment\", formatting=u\"assignment\"),\n self.name)\n\n def make_html(self):\n \"\"\" Create the main HTML description page of the assignment \"\"\"\n\n # Create URL pointing to Canvas live version of the assignment\n url = self.settings.domain + u\"/courses/%s/assignments/%s\" % (self.get_parent().get_parent().get_id(),\n self.get_id())\n\n if not os.path.exists(self.sync_path + self.name + u\".html\"):\n with io.open(self.sync_path + self.name + u\".html\", u\"w\", encoding=u\"utf-8\") as out_file:\n out_file.write(u\"

%s

\" % self.name)\n out_file.write(u\"Click here to \"\n u\"open the live page in Canvas\" % url)\n out_file.write(u\"
\")\n out_file.write(self.assignment_info.get(u\"description\")\n or u\"No description\")\n\n def add_files(self):\n \"\"\"\n Add all files that can be found in the description of the\n assignment to the list of children and sync\n \"\"\"\n # Get file URLs pointing to Canvas items\n try:\n canvas_file_urls = re.findall(r'data-api-endpoint=\\\"(.*?)\\\"',\n self.assignment_info.get(u\"description\") or u\"\")\n except:\n canvas_file_urls = []\n\n # Download information on all found files and add File objects\n # to the children\n for url in canvas_file_urls:\n file_info = self.api.download_item_information(url)\n\n if u'display_name' in file_info:\n item = File(file_info, parent=self)\n elif u'page_id' in file_info:\n item = Page(file_info, parent=self)\n else:\n # Unknown entity, skip it\n item = None\n if item:\n self.add_child(item)\n\n if self.settings.download_linked:\n # We also look for links to files downloaded from other servers\n # Get all URLs ending in a file name (determined as a ending with\n # a '.' and then between 1 and 10 of any characters after that).\n # This has 2 purposes:\n # 1) We do not try to re-download Canvas server files, since\n # they are not matched by this regex\n # 2) We should stay clear of all links to web-sites\n # (they could be large to download, we skip them here)\n urls = re.findall(r'href=\\\"([^ ]*[.]{1}.{1,10})\\\"',\n self.assignment_info.get(u\"description\") or u\"\")\n\n for url in urls:\n linked_file = LinkedFile(url, parent=self)\n if linked_file.url_is_valid():\n self.add_child(linked_file)\n else:\n del linked_file\n\n def walk(self, counter):\n \"\"\" Walk by adding all File objects to the list of children \"\"\"\n print(text_type(self))\n self.add_files()\n\n counter[0] += 1\n for file in self:\n file.walk(counter)\n\n def sync(self):\n \"\"\"\n 1) Adding all File and LinkedFile objects to the list of children\n 2) Synchronize all children objects\n \"\"\"\n print(text_type(self))\n\n self.add_files()\n self.make_html()\n\n for file in self:\n file.sync()\n\n def show(self):\n \"\"\" Show the folder hierarchy by printing every level \"\"\"\n print(text_type(self))\n\n for file in self:\n file.show()\n","repo_name":"perslev/CanvasSync","sub_path":"CanvasSync/entities/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"77"} +{"seq_id":"74211885048","text":"# 문자열 input을 받음\ns = input()\n\n# 크로아티아 알파벳 리스트 정의\ncroatian_alphabet = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z=']\n\n# 크로아티아 알파벳이 있는 경우 a로 치환해줌\n# 크로아티아 알파벳 길이를 1로 줄이는게 목적이므로 크로아티아 알파벳에 포함되지 않은 임의의 문자열로 replace 해주면 된다\nfor alphabet in croatian_alphabet:\n s = s.replace(alphabet, 'a')\n\n# Solution\nprint(len(s))","repo_name":"hyunha95/algorithm-study","sub_path":"YSW/baekjoon/P2941_크로아티아_알파벳.py","file_name":"P2941_크로아티아_알파벳.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18786004442","text":"from cProfile import label\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport csv\nimport math\n\nplt.rcParams['axes.unicode_minus'] = 'False'\nplt.rc('font',family = 'AppleGothic')\n\n\npath = os.getcwd()\nf = open(path+'/bigdata/data/age1.csv', encoding='utf-8')\ndata = csv.reader(f)\nresult = []\nfor row in data :\n if '태화동' in row[0] :\n for i in row[3:] :\n result.append(int(i))\nprint(result)\n\nplt.style.use('ggplot')\nplt.figure(figsize=(6,4))\nplt.plot(result)\nplt.show()\n\nf = open(path+'/bigdata/data/age2.csv')\ndata = csv.reader(f)\nresult = []\n\nm_location = input('검색 하고자 하는 동 이름을 입력 하세요 : ')\n\nfor row in data :\n if m_location in row[0] :\n for i in row[3:] :\n result.append(int(i))\n\nplt.style.use('ggplot')\nplt.figure(figsize=(6,4))\nplt.plot(result)\nplt.title(m_location+' 의 인구 구조')\nplt.show()\n\nf = open(path+'/bigdata/data/age2.csv')\ndata = csv.reader(f)\nresult = []\n\nfor row in data :\n if '무거동' in row[0] :\n for i in row[3:] :\n result.append(int(i))\n\nresult\n\nplt.figure(figsize=(6,4))\nplt.bar(range(101),result)\nplt.show()\n\nf = open(path+'/bigdata/data/gender2.csv')\ndata = csv.reader(f)\nm = []\nf = []\n\nfor row in data :\n if '무거동' in row[0] :\n for i in range(0,101) :\n m.append(int((row[i+3]))*-1)\n f.append(int(row[-(i+1)]))\n\nf.reverse()\n\nplt.figure(figsize=(6,4))\nplt.barh(range(101),m,label='남자')\nplt.barh(range(101),f,label='여자')\nplt.legend()\nplt.show()\n\nf = open(path+'/bigdata/data/gender2.csv')\ndata = csv.reader(f)\nm = []\nf = []\n\nm_location = input('검색하고자 하는 지역을 입력하세요 : ')\nfor row in data :\n if m_location in row[0] :\n for i in row[3:104] :\n m.append(int(i)*-1)\n for j in row[106:] :\n f.append(int(j))\n\nplt.style.use('ggplot')\nplt.figure(figsize=(6,4))\nplt.title(m_location+' 의 남녀 성별 인구 분포')\nplt.barh(range(101),m,label='남성')\nplt.barh(range(101),f,label='여성')\nplt.legend()\nplt.show()\n\n# 남여 인구 성비 비율을 남자 여자 Pie 차트로 표시 하기 ..\nf = open(path+'/bigdata/data/gender3.csv')\ndata = csv.reader(f)\nsize = []\n\nm_location = input('검색하고자 하는 지역을 입력하세요 : ')\nfor row in data :\n m = 0\n f = 0\n if m_location in row[0] :\n for i in range(101) :\n m += int(row[i+3])\n f += int(row[i+106])\n break\n\nsize.append(m)\nsize.append(f)\n\nplt.figure(figsize=(6,4))\ncolor = ['crimson','darkcyan']\nplt.axis('equal')\nplt.pie(size,labels=['남','여'],autopct='%.1f%%',colors=color,startangle=90)\nplt.title(m_location+' 지역의 남여 성비 비율')\nplt.show()\n\n# 인구구조를 남성 여성별로 꺽은선으로 구분해서 그리기 ..\nf = open(path+'/bigdata/data/gender4.csv')\ndata = csv.reader(f)\n\nm = []\nf = []\n\nm_name = input('원하는 동네 입력 : ')\nfor row in data :\n if m_name in row[0] :\n for i in range(3,104) :\n m.append(int(row[i]))\n f.append(int(row[i+103]))\n break\n\nplt.figure(figsize=(6,4))\nplt.plot(m,label='남성')\nplt.plot(f,label='여상')\nplt.legend()\nplt.show()\n\n# 인구구조를 남성, 여성 막대바로 그리기 ..\nf = open(path+'/bigdata/data/gender4.csv')\ndata = csv.reader(f)\ningu = []\n\nm_name = input('원하는 동네 입력 : ')\nfor row in data :\n if m_name in row[0] :\n for i in range(3,104) :\n ingu.append(int(row[i])-int(row[i+103]))\n break\n\nplt.figure(figsize=(6,4))\nplt.bar(range(101),ingu)\nplt.show()\n\n# 남녀 연령대별 성비 현황을 scatter 로 표시 하기 ( 남여 성비 차이 .. )\nf = open(path+'/bigdata/data/gender2.csv')\ndata = csv.reader(f)\n\nm = [] # 남자\nf = [] # 여자\nsize = []\n\nm_name = input('원하는 동네 입력 : ')\nfor row in data :\n if m_name in row[0] :\n for i in range(3,104) :\n m.append(int(row[i]))\n f.append(int(row[i+103]))\n size.append(math.sqrt(int(row[i])+int(row[i+103])))\n break\n\nplt.figure(figsize=(6,4), dpi=150)\nplt.style.use('ggplot')\nplt.scatter(m,f,s=size,c=range(101),alpha=0.5,cmap='jet')\nplt.plot(range(max(m)),range(max(m)),color='g')\nplt.colorbar()\nplt.title(m_name+' 지역의 성별 인구 비율')\nplt.xlabel('남성 인구수')\nplt.ylabel('여성 인구수')\nplt.show()\n","repo_name":"Baedoli/AIUlsan","sub_path":"bigdata/project/project03.py","file_name":"project03.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8514850074","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom .forms import RechargeWallet, WithdrawWallet, TransferMoney\nfrom .models import Wallet, wallet_ref_code_generator\nfrom django.template import loader\nfrom django.contrib.auth.decorators import login_required\nimport json\nimport requests\nfrom django.conf import settings\n\n\n# Create your views here\n@login_required\ndef view_wallet(request):\n user = request.user\n\n try:\n wallet = Wallet.objects.get(owner=user)\n\n except Exception:\n\n wallet = Wallet.objects.create(\n owner=user,\n owner_type=\"User\", )\n headers = {\n \"Authorization\": \"dskjdks\",\n \"Content-Type\": \"application/json\",\n \"sandbox-key\": settings.SANDBOX_KEY\n }\n url = \"https://fsi.ng/api/v1/flutterwave/v3/virtual-account-numbers\"\n data = ({\n \"email\": user.email,\n \"is_permanent\": True,\n \"bvn\": user.bvn,\n \"amount\": 500,\n \"phonenumber\": user.phone,\n \"firstname\": user.first_name,\n \"lastname\": user.last_name,\n \"narration\": \"Lamuni\",\n })\n\n my_data = requests.post(url=url, json=data, headers=headers)\n\n if my_data.status_code not in [200, 203]:\n message = \"There was an error creating account number : {}:{}\".format(\n my_data.status_code, my_data.text\n )\n print(message)\n\n data = {\n \"message\": message\n }\n return render(request, 'add_sales_record.html', context=data)\n\n json_data = my_data.json()\n\n real_data = json_data['data']\n\n wallet.account_number = real_data['account_number']\n wallet.account_name = real_data['note']\n wallet.bank = real_data['bank_name']\n\n wallet.save()\n\n template = loader.get_template('wallet_cabinet.html')\n\n # fill_up_wallet_form = RechargeWallet()\n # withdraw_money_form = WithdrawWallet()\n context = {\n 'wallet': wallet,\n # 'fillUp_wallet_form': fill_up_wallet_form,\n # 'withdraw_money_form': withdraw_money_form\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef recharge(request, *args, **kwargs):\n if request.method == \"POST\":\n recharge_form = RechargeWallet(request.POST)\n if recharge_form.is_valid():\n owner = request.user\n amount = recharge_form.cleaned_data['amount']\n description = recharge_form.cleaned_data['description']\n ref_code = wallet_ref_code_generator()\n # payment = pay_with_pay_stack(request, amount, ref_code)\n recharge_transaction = Wallet.recharge_wallet(amount, owner, description)\n return HttpResponseRedirect('/wallet/')\n\n else:\n recharge_form = RechargeWallet()\n\n context = {\n 'recharge_form': recharge_form\n }\n return render(request, 'recharge_form.html', context)\n\n\ndef withdraw(request, *args, **kwargs):\n if request.method == \"POST\":\n withdraw_form = WithdrawWallet(request.POST)\n if withdraw_form.is_valid():\n owner = request.user\n amount = withdraw_form.cleaned_data['amount']\n description = withdraw_form.cleaned_data['description']\n wallet_to_withdraw = get_object_or_404(Wallet, owner=owner)\n withdraw_transaction = Wallet.withdraw_wallet(amount, description, wallet_to_withdraw)\n return HttpResponseRedirect('/wallet/')\n\n else:\n withdraw_form = WithdrawWallet()\n\n context = {\n \"withdraw_form\": withdraw_form\n }\n return render(request, 'withdraw_form.html', context)\n\n\ndef transfer(request, *args, **kwargs):\n if request.method == \"POST\":\n transfer_form = TransferMoney(request.POST)\n sender = request.user\n if transfer_form.is_valid():\n amount = transfer_form.cleaned_data['amount']\n description = transfer_form.cleaned_data['description']\n receiver = transfer_form.cleaned_data['receiver']\n wallet_to_transfer = get_object_or_404(Wallet, owner=receiver)\n transfer_transaction = Wallet.transfer_money(amount, sender, receiver, description)\n return HttpResponseRedirect('/wallet/')\n\n else:\n transfer_form = TransferMoney()\n\n context = {\n \"transfer_form\": transfer_form,\n }\n return render(request, \"transfer_form.html\", context)\n","repo_name":"Bukharee/Lamuni","sub_path":"Wallet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34411533826","text":"# -*- coding: utf-8 -*-\n# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)\n# 2020 MinIO, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=too-many-lines\n\n\"\"\"\nResponse of ListBuckets, ListObjects, ListObjectsV2 and ListObjectVersions API.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport base64\nimport datetime\nimport json\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom urllib.parse import unquote_plus\nfrom xml.etree import ElementTree as ET\n\nfrom .credentials import Credentials\nfrom .helpers import check_bucket_name\nfrom .signer import get_credential_string, post_presign_v4\nfrom .time import from_iso8601utc, to_amz_date, to_iso8601utc\nfrom .xml import find, findall, findtext\n\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\n\nclass Bucket:\n \"\"\"Bucket information.\"\"\"\n\n def __init__(self, name, creation_date):\n self._name = name\n self._creation_date = creation_date\n\n @property\n def name(self):\n \"\"\"Get name.\"\"\"\n return self._name\n\n @property\n def creation_date(self):\n \"\"\"Get creation date.\"\"\"\n return self._creation_date\n\n def __repr__(self):\n return f\"{type(self).__name__}('{self.name}')\"\n\n def __str__(self):\n return self.name\n\n def __eq__(self, other):\n if isinstance(other, Bucket):\n return self.name == other.name\n if isinstance(other, str):\n return self.name == other\n return NotImplemented\n\n def __hash__(self):\n return hash(self.name)\n\n\nclass ListAllMyBucketsResult:\n \"\"\"LissBuckets API result.\"\"\"\n\n def __init__(self, buckets):\n self._buckets = buckets\n\n @property\n def buckets(self):\n \"\"\"Get buckets.\"\"\"\n return self._buckets\n\n @classmethod\n def fromxml(cls, element):\n \"\"\"Create new object with values from XML element.\"\"\"\n element = find(element, \"Buckets\")\n buckets = []\n if element is not None:\n elements = findall(element, \"Bucket\")\n for bucket in elements:\n name = findtext(bucket, \"Name\", True)\n creation_date = findtext(bucket, \"CreationDate\")\n if creation_date:\n creation_date = from_iso8601utc(creation_date)\n buckets.append(Bucket(name, creation_date))\n return cls(buckets)\n\n\nclass Object:\n \"\"\"Object information.\"\"\"\n\n def __init__(self, # pylint: disable=too-many-arguments\n bucket_name,\n object_name,\n last_modified=None, etag=None,\n size=None, metadata=None,\n version_id=None, is_latest=None, storage_class=None,\n owner_id=None, owner_name=None, content_type=None,\n is_delete_marker=False):\n self._bucket_name = bucket_name\n self._object_name = object_name\n self._last_modified = last_modified\n self._etag = etag\n self._size = size\n self._metadata = metadata\n self._version_id = version_id\n self._is_latest = is_latest\n self._storage_class = storage_class\n self._owner_id = owner_id\n self._owner_name = owner_name\n self._content_type = content_type\n self._is_delete_marker = is_delete_marker\n\n @property\n def bucket_name(self):\n \"\"\"Get bucket name.\"\"\"\n return self._bucket_name\n\n @property\n def object_name(self):\n \"\"\"Get object name.\"\"\"\n return self._object_name\n\n @property\n def is_dir(self):\n \"\"\"Get whether this key is a directory.\"\"\"\n return self._object_name.endswith(\"/\")\n\n @property\n def last_modified(self):\n \"\"\"Get last modified time.\"\"\"\n return self._last_modified\n\n @property\n def etag(self):\n \"\"\"Get etag.\"\"\"\n return self._etag\n\n @property\n def size(self):\n \"\"\"Get size.\"\"\"\n return self._size\n\n @property\n def metadata(self):\n \"\"\"Get metadata.\"\"\"\n return self._metadata\n\n @property\n def version_id(self):\n \"\"\"Get version ID.\"\"\"\n return self._version_id\n\n @property\n def is_latest(self):\n \"\"\"Get is-latest flag.\"\"\"\n return self._is_latest\n\n @property\n def storage_class(self):\n \"\"\"Get storage class.\"\"\"\n return self._storage_class\n\n @property\n def owner_id(self):\n \"\"\"Get owner ID.\"\"\"\n return self._owner_id\n\n @property\n def owner_name(self):\n \"\"\"Get owner name.\"\"\"\n return self._owner_name\n\n @property\n def is_delete_marker(self):\n \"\"\"Get whether this key is a delete marker.\"\"\"\n return self._is_delete_marker\n\n @property\n def content_type(self):\n \"\"\"Get content type.\"\"\"\n return self._content_type\n\n @classmethod\n def fromxml(cls, element, bucket_name, is_delete_marker=False,\n encoding_type=None):\n \"\"\"Create new object with values from XML element.\"\"\"\n tag = findtext(element, \"LastModified\")\n last_modified = None if tag is None else from_iso8601utc(tag)\n\n tag = findtext(element, \"ETag\")\n etag = None if tag is None else tag.replace('\"', \"\")\n\n tag = findtext(element, \"Size\")\n size = None if tag is None else int(tag)\n\n tag = find(element, \"Owner\")\n owner_id, owner_name = (\n (None, None) if tag is None\n else (findtext(tag, \"ID\"), findtext(tag, \"DisplayName\"))\n )\n\n tag = find(element, \"UserMetadata\") or []\n metadata = {}\n for child in tag:\n key = child.tag.split(\"}\")[1] if \"}\" in child.tag else child.tag\n metadata[key] = child.text\n\n object_name = findtext(element, \"Key\", True)\n if encoding_type == \"url\":\n object_name = unquote_plus(object_name)\n\n return cls(\n bucket_name,\n object_name,\n last_modified=last_modified,\n etag=etag,\n size=size,\n version_id=findtext(element, \"VersionId\"),\n is_latest=findtext(element, \"IsLatest\"),\n storage_class=findtext(element, \"StorageClass\"),\n owner_id=owner_id,\n owner_name=owner_name,\n metadata=metadata,\n is_delete_marker=is_delete_marker,\n )\n\n\ndef parse_list_objects(response, bucket_name=None):\n \"\"\"Parse ListObjects/ListObjectsV2/ListObjectVersions response.\"\"\"\n element = ET.fromstring(response.data.decode())\n bucket_name = findtext(element, \"Name\", True)\n encoding_type = findtext(element, \"EncodingType\")\n elements = findall(element, \"Contents\")\n objects = [\n Object.fromxml(tag, bucket_name, encoding_type=encoding_type)\n for tag in elements\n ]\n marker = objects[-1].object_name if objects else None\n\n elements = findall(element, \"Version\")\n objects += [\n Object.fromxml(tag, bucket_name, encoding_type=encoding_type)\n for tag in elements\n ]\n\n elements = findall(element, \"CommonPrefixes\")\n objects += [\n Object(\n bucket_name, unquote_plus(findtext(tag, \"Prefix\", True))\n if encoding_type == \"url\" else findtext(tag, \"Prefix\", True)\n ) for tag in elements\n ]\n\n elements = findall(element, \"DeleteMarker\")\n objects += [\n Object.fromxml(tag, bucket_name, is_delete_marker=True,\n encoding_type=encoding_type)\n for tag in elements\n ]\n\n is_truncated = (findtext(element, \"IsTruncated\") or \"\").lower() == \"true\"\n key_marker = findtext(element, \"NextKeyMarker\")\n if key_marker and encoding_type == \"url\":\n key_marker = unquote_plus(key_marker)\n version_id_marker = findtext(element, \"NextVersionIdMarker\")\n continuation_token = findtext(element, \"NextContinuationToken\")\n if key_marker is not None:\n continuation_token = key_marker\n if continuation_token is None:\n continuation_token = findtext(element, \"NextMarker\")\n if continuation_token and encoding_type == \"url\":\n continuation_token = unquote_plus(continuation_token)\n if continuation_token is None and is_truncated:\n continuation_token = marker\n return objects, is_truncated, continuation_token, version_id_marker\n\n\nclass CompleteMultipartUploadResult:\n \"\"\"CompleteMultipartUpload API result.\"\"\"\n\n def __init__(self, response):\n element = ET.fromstring(response.data.decode())\n self._bucket_name = findtext(element, \"Bucket\")\n self._object_name = findtext(element, \"Key\")\n self._location = findtext(element, \"Location\")\n self._etag = findtext(element, \"ETag\")\n if self._etag:\n self._etag = self._etag.replace('\"', \"\")\n self._version_id = response.headers.get(\"x-amz-version-id\")\n self._http_headers = response.headers\n\n @property\n def bucket_name(self):\n \"\"\"Get bucket name.\"\"\"\n return self._bucket_name\n\n @property\n def object_name(self):\n \"\"\"Get object name.\"\"\"\n return self._object_name\n\n @property\n def location(self):\n \"\"\"Get location.\"\"\"\n return self._location\n\n @property\n def etag(self):\n \"\"\"Get etag.\"\"\"\n return self._etag\n\n @property\n def version_id(self):\n \"\"\"Get version ID.\"\"\"\n return self._version_id\n\n @property\n def http_headers(self):\n \"\"\"Get HTTP headers.\"\"\"\n return self._http_headers\n\n\nclass Part:\n \"\"\"Part information of a multipart upload.\"\"\"\n\n def __init__(self, part_number, etag, last_modified=None, size=None):\n self._part_number = part_number\n self._etag = etag\n self._last_modified = last_modified\n self._size = size\n\n @property\n def part_number(self):\n \"\"\"Get part number. \"\"\"\n return self._part_number\n\n @property\n def etag(self):\n \"\"\"Get etag.\"\"\"\n return self._etag\n\n @property\n def last_modified(self):\n \"\"\"Get last-modified.\"\"\"\n return self._last_modified\n\n @property\n def size(self):\n \"\"\"Get size.\"\"\"\n return self._size\n\n @classmethod\n def fromxml(cls, element):\n \"\"\"Create new object with values from XML element.\"\"\"\n part_number = findtext(element, \"PartNumber\", True)\n etag = findtext(element, \"ETag\", True)\n etag = etag.replace('\"', \"\")\n tag = findtext(element, \"LastModified\")\n last_modified = None if tag is None else from_iso8601utc(tag)\n size = findtext(element, \"Size\")\n if size:\n size = int(size)\n return cls(part_number, etag, last_modified, size)\n\n\nclass ListPartsResult:\n \"\"\"ListParts API result.\"\"\"\n\n def __init__(self, response):\n element = ET.fromstring(response.data.decode())\n self._bucket_name = findtext(element, \"Bucket\")\n self._object_name = findtext(element, \"Key\")\n tag = find(element, \"Initiator\")\n self._initiator_id = (\n None if tag is None else findtext(tag, \"ID\")\n )\n self._initiator_name = (\n None if tag is None else findtext(tag, \"DisplayName\")\n )\n tag = find(element, \"Owner\")\n self._owner_id = (\n None if tag is None else findtext(tag, \"ID\")\n )\n self._owner_name = (\n None if tag is None else findtext(tag, \"DisplayName\")\n )\n self._storage_class = findtext(element, \"StorageClass\")\n self._part_number_marker = findtext(element, \"PartNumberMarker\")\n self._next_part_number_marker = findtext(\n element, \"NextPartNumberMarker\",\n )\n if self._next_part_number_marker:\n self._next_part_number_marker = int(self._next_part_number_marker)\n self._max_parts = findtext(element, \"MaxParts\")\n if self._max_parts:\n self._max_parts = int(self._max_parts)\n self._is_truncated = findtext(element, \"IsTruncated\")\n self._is_truncated = (\n self._is_truncated is not None and\n self._is_truncated.lower() == \"true\"\n )\n self._parts = [Part.fromxml(tag) for tag in findall(element, \"Part\")]\n\n @property\n def bucket_name(self):\n \"\"\"Get bucket name.\"\"\"\n return self._bucket_name\n\n @property\n def object_name(self):\n \"\"\"Get object name.\"\"\"\n return self._object_name\n\n @property\n def initiator_id(self):\n \"\"\"Get initiator ID.\"\"\"\n return self._initiator_id\n\n @property\n def initator_name(self):\n \"\"\"Get initiator name.\"\"\"\n return self._initiator_name\n\n @property\n def owner_id(self):\n \"\"\"Get owner ID.\"\"\"\n return self._owner_id\n\n @property\n def owner_name(self):\n \"\"\"Get owner name.\"\"\"\n return self._owner_name\n\n @property\n def storage_class(self):\n \"\"\"Get storage class.\"\"\"\n return self._storage_class\n\n @property\n def part_number_marker(self):\n \"\"\"Get part number marker.\"\"\"\n return self._part_number_marker\n\n @property\n def next_part_number_marker(self):\n \"\"\"Get next part number marker.\"\"\"\n return self._next_part_number_marker\n\n @property\n def max_parts(self):\n \"\"\"Get max parts.\"\"\"\n return self._max_parts\n\n @property\n def is_truncated(self):\n \"\"\"Get is-truncated flag.\"\"\"\n return self._is_truncated\n\n @property\n def parts(self):\n \"\"\"Get parts.\"\"\"\n return self._parts\n\n\nclass Upload:\n \"\"\" Upload information of a multipart upload.\"\"\"\n\n def __init__(self, element, encoding_type=None):\n self._encoding_type = encoding_type\n self._object_name = findtext(element, \"Key\", True)\n self._object_name = (\n unquote_plus(self._object_name) if self._encoding_type == \"url\"\n else self._object_name\n )\n self._upload_id = findtext(element, \"UploadId\")\n tag = find(element, \"Initiator\")\n self._initiator_id = (\n None if tag is None else findtext(tag, \"ID\")\n )\n self._initiator_name = (\n None if tag is None else findtext(tag, \"DisplayName\")\n )\n tag = find(element, \"Owner\")\n self._owner_id = (\n None if tag is None else findtext(tag, \"ID\")\n )\n self._owner_name = (\n None if tag is None else findtext(tag, \"DisplayName\")\n )\n self._storage_class = findtext(element, \"StorageClass\")\n self._initiated_time = findtext(element, \"Initiated\")\n if self._initiated_time:\n self._initiated_time = from_iso8601utc(self._initiated_time)\n\n @property\n def object_name(self):\n \"\"\"Get object name.\"\"\"\n return self._object_name\n\n @property\n def initiator_id(self):\n \"\"\"Get initiator ID.\"\"\"\n return self._initiator_id\n\n @property\n def initator_name(self):\n \"\"\"Get initiator name.\"\"\"\n return self._initiator_name\n\n @property\n def owner_id(self):\n \"\"\"Get owner ID.\"\"\"\n return self._owner_id\n\n @property\n def owner_name(self):\n \"\"\"Get owner name.\"\"\"\n return self._owner_name\n\n @property\n def storage_class(self):\n \"\"\"Get storage class.\"\"\"\n return self._storage_class\n\n @property\n def upload_id(self):\n \"\"\"Get upload ID.\"\"\"\n return self._upload_id\n\n @property\n def initiated_time(self):\n \"\"\"Get initiated time.\"\"\"\n return self._initiated_time\n\n\nclass ListMultipartUploadsResult:\n \"\"\"ListMultipartUploads API result.\"\"\"\n\n def __init__(self, response):\n element = ET.fromstring(response.data.decode())\n self._encoding_type = findtext(element, \"EncodingType\")\n self._bucket_name = findtext(element, \"Bucket\")\n self._key_marker = findtext(element, \"KeyMarker\")\n if self._key_marker:\n self._key_marker = (\n unquote_plus(self._key_marker) if self._encoding_type == \"url\"\n else self._key_marker\n )\n self._upload_id_marker = findtext(element, \"UploadIdMarker\")\n self._next_key_marker = findtext(element, \"NextKeyMarker\")\n if self._next_key_marker:\n self._next_key_marker = (\n unquote_plus(self._next_key_marker)\n if self._encoding_type == \"url\" else self._next_key_marker\n )\n self._next_upload_id_marker = findtext(element, \"NextUploadIdMarker\")\n self._max_uploads = findtext(element, \"MaxUploads\")\n if self._max_uploads:\n self._max_uploads = int(self._max_uploads)\n self._is_truncated = findtext(element, \"IsTruncated\")\n self._is_truncated = (\n self._is_truncated is not None and\n self._is_truncated.lower() == \"true\"\n )\n self._uploads = [\n Upload(tag, self._encoding_type)\n for tag in findall(element, \"Upload\")\n ]\n\n @property\n def bucket_name(self):\n \"\"\"Get bucket name.\"\"\"\n return self._bucket_name\n\n @property\n def key_marker(self):\n \"\"\"Get key marker.\"\"\"\n return self._key_marker\n\n @property\n def upload_id_marker(self):\n \"\"\"Get upload ID marker.\"\"\"\n return self._upload_id_marker\n\n @property\n def next_key_marker(self):\n \"\"\"Get next key marker.\"\"\"\n return self._next_key_marker\n\n @property\n def next_upload_id_marker(self):\n \"\"\"Get next upload ID marker.\"\"\"\n return self._next_upload_id_marker\n\n @property\n def max_uploads(self):\n \"\"\"Get max uploads.\"\"\"\n return self._max_uploads\n\n @property\n def is_truncated(self):\n \"\"\"Get is-truncated flag.\"\"\"\n return self._is_truncated\n\n @property\n def encoding_type(self):\n \"\"\"Get encoding type.\"\"\"\n return self._encoding_type\n\n @property\n def uploads(self):\n \"\"\"Get uploads.\"\"\"\n return self._uploads\n\n\n_RESERVED_ELEMENTS = (\n \"bucket\",\n \"x-amz-algorithm\",\n \"x-amz-credential\",\n \"x-amz-date\",\n \"policy\",\n \"x-amz-signature\",\n)\n_EQ = \"eq\"\n_STARTS_WITH = \"starts-with\"\n_ALGORITHM = \"AWS4-HMAC-SHA256\"\n\n\ndef _trim_dollar(value):\n \"\"\"Trim dollar character if present.\"\"\"\n return value[1:] if value.startswith(\"$\") else value\n\n\nclass PostPolicy:\n \"\"\"\n Post policy information to be used to generate presigned post policy\n form-data. Condition elements and respective condition for Post policy\n is available at\n https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions\n \"\"\"\n\n def __init__(self, bucket_name, expiration):\n check_bucket_name(bucket_name)\n if not isinstance(expiration, datetime.datetime):\n raise ValueError(\"expiration must be datetime.datetime type\")\n self._bucket_name = bucket_name\n self._expiration = expiration\n self._conditions = OrderedDict()\n self._conditions[_EQ] = OrderedDict()\n self._conditions[_STARTS_WITH] = OrderedDict()\n self._lower_limit = None\n self._upper_limit = None\n\n def add_equals_condition(self, element, value):\n \"\"\"Add equals condition of an element and value.\"\"\"\n if not element:\n raise ValueError(\"condition element cannot be empty\")\n element = _trim_dollar(element)\n if (\n element in [\n \"success_action_redirect\",\n \"redirect\",\n \"content-length-range\",\n ]\n ):\n raise ValueError(element + \" is unsupported for equals condition\")\n if element in _RESERVED_ELEMENTS:\n raise ValueError(element + \" cannot be set\")\n self._conditions[_EQ][element] = value\n\n def remove_equals_condition(self, element):\n \"\"\"Remove previously set equals condition of an element.\"\"\"\n if not element:\n raise ValueError(\"condition element cannot be empty\")\n self._conditions[_EQ].pop(element)\n\n def add_starts_with_condition(self, element, value):\n \"\"\"\n Add starts-with condition of an element and value. Value set to empty\n string does matching any content condition.\n \"\"\"\n if not element:\n raise ValueError(\"condition element cannot be empty\")\n element = _trim_dollar(element)\n if (\n element in [\"success_action_status\", \"content-length-range\"] or\n (\n element.startswith(\"x-amz-\") and\n not element.startswith(\"x-amz-meta-\")\n )\n ):\n raise ValueError(\n f\"{element} is unsupported for starts-with condition\",\n )\n if element in _RESERVED_ELEMENTS:\n raise ValueError(element + \" cannot be set\")\n self._conditions[_STARTS_WITH][element] = value\n\n def remove_starts_with_condition(self, element):\n \"\"\"Remove previously set starts-with condition of an element.\"\"\"\n if not element:\n raise ValueError(\"condition element cannot be empty\")\n self._conditions[_STARTS_WITH].pop(element)\n\n def add_content_length_range_condition( # pylint: disable=invalid-name\n self, lower_limit, upper_limit):\n \"\"\"Add content-length-range condition with lower and upper limits.\"\"\"\n if lower_limit < 0:\n raise ValueError(\"lower limit cannot be negative number\")\n if upper_limit < 0:\n raise ValueError(\"upper limit cannot be negative number\")\n if lower_limit > upper_limit:\n raise ValueError(\"lower limit cannot be greater than upper limit\")\n self._lower_limit = lower_limit\n self._upper_limit = upper_limit\n\n def remove_content_length_range_condition( # pylint: disable=invalid-name\n self):\n \"\"\"Remove previously set content-length-range condition.\"\"\"\n self._lower_limit = None\n self._upper_limit = None\n\n def form_data(self, creds, region):\n \"\"\"\n Return form-data of this post policy. The returned dict contains\n x-amz-algorithm, x-amz-credential, x-amz-security-token, x-amz-date,\n policy and x-amz-signature.\n \"\"\"\n if not isinstance(creds, Credentials):\n raise ValueError(\"credentials must be Credentials type\")\n if not region:\n raise ValueError(\"region cannot be empty\")\n if (\n \"key\" not in self._conditions[_EQ] and\n \"key\" not in self._conditions[_STARTS_WITH]\n ):\n raise ValueError(\"key condition must be set\")\n\n policy = OrderedDict()\n policy[\"expiration\"] = to_iso8601utc(self._expiration)\n policy[\"conditions\"] = [[_EQ, \"$bucket\", self._bucket_name]]\n for cond_key, conditions in self._conditions.items():\n for key, value in conditions.items():\n policy[\"conditions\"].append([cond_key, \"$\"+key, value])\n if self._lower_limit is not None and self._upper_limit is not None:\n policy[\"conditions\"].append(\n [\"content-length-range\", self._lower_limit, self._upper_limit],\n )\n utcnow = datetime.datetime.utcnow()\n credential = get_credential_string(creds.access_key, utcnow, region)\n amz_date = to_amz_date(utcnow)\n policy[\"conditions\"].append([_EQ, \"$x-amz-algorithm\", _ALGORITHM])\n policy[\"conditions\"].append([_EQ, \"$x-amz-credential\", credential])\n if creds.session_token:\n policy[\"conditions\"].append(\n [_EQ, \"$x-amz-security-token\", creds.session_token],\n )\n policy[\"conditions\"].append([_EQ, \"$x-amz-date\", amz_date])\n\n policy = base64.b64encode(json.dumps(policy).encode()).decode(\"utf-8\")\n signature = post_presign_v4(\n policy, creds.secret_key, utcnow, region,\n )\n form_data = {\n \"x-amz-algorithm\": _ALGORITHM,\n \"x-amz-credential\": credential,\n \"x-amz-date\": amz_date,\n \"policy\": policy,\n \"x-amz-signature\": signature,\n }\n if creds.session_token:\n form_data[\"x-amz-security-token\"] = creds.session_token\n return form_data\n\n @property\n def bucket_name(self):\n \"\"\"Get bucket name.\"\"\"\n return self._bucket_name\n\n\ndef parse_copy_object(response):\n \"\"\"Parse CopyObject/UploadPartCopy response.\"\"\"\n element = ET.fromstring(response.data.decode())\n etag = findtext(element, \"ETag\")\n if etag:\n etag = etag.replace('\"', \"\")\n last_modified = findtext(element, \"LastModified\")\n if last_modified:\n last_modified = from_iso8601utc(last_modified)\n return etag, last_modified\n\n\nclass EventIterable:\n \"\"\"Context manager friendly event iterable.\"\"\"\n\n def __init__(self, func):\n self._func = func\n self._response = None\n\n def _close_response(self):\n \"\"\"Close response.\"\"\"\n if self._response:\n self._response.close()\n self._response.release_conn()\n self._response = None\n\n def __iter__(self):\n return self\n\n def _get_records(self):\n \"\"\"Get event records from response stream.\"\"\"\n try:\n line = self._response.readline().strip()\n if not line:\n return None\n if hasattr(line, 'decode'):\n line = line.decode()\n event = json.loads(line)\n if event['Records']:\n return event\n except (StopIteration, JSONDecodeError):\n self._close_response()\n return None\n\n def __next__(self):\n records = None\n while not records:\n if not self._response or self._response.closed:\n self._response = self._func()\n records = self._get_records()\n return records\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, value, traceback):\n self._close_response()\n\n\nclass PeerSite:\n \"\"\"Represents a cluster/site to be added to the set of replicated sites.\"\"\"\n\n def __init__(self, name, endpoint, access_key, secret_key):\n self._name = name\n self._endpoint = endpoint\n self._access_key = access_key\n self._secret_key = secret_key\n\n def to_dict(self):\n \"\"\"Convert to dictionary.\"\"\"\n return {\n \"name\": self._name,\n \"endpoints\": self._endpoint,\n \"accessKey\": self._access_key,\n \"secretKey\": self._secret_key,\n }\n\n\nclass SiteReplicationStatusOptions:\n \"\"\"Represents site replication status options.\"\"\"\n ENTITY_TYPE = Enum(\n \"ENTITY_TYPE\",\n {\n \"BUCKET\": \"bucket\",\n \"POLICY\": \"policy\",\n \"USER\": \"user\",\n \"GROUP\": \"group\",\n },\n )\n\n def __init__(self):\n self._buckets = False\n self._policies = False\n self._users = False\n self._groups = False\n self._metrics = False\n self._entity = None\n self._entity_value = None\n self._show_deleted = False\n\n @property\n def buckets(self):\n \"\"\"Get buckets.\"\"\"\n return self._buckets\n\n @buckets.setter\n def buckets(self, value):\n \"\"\"Set buckets.\"\"\"\n self._buckets = value\n\n @property\n def policies(self):\n \"\"\"Get policies.\"\"\"\n return self._policies\n\n @policies.setter\n def policies(self, value):\n \"\"\"Set policies.\"\"\"\n self._policies = value\n\n @property\n def users(self):\n \"\"\"Get users.\"\"\"\n return self._users\n\n @users.setter\n def users(self, value):\n \"\"\"Set users.\"\"\"\n self._users = value\n\n @property\n def groups(self):\n \"\"\"Get groups.\"\"\"\n return self._groups\n\n @groups.setter\n def groups(self, value):\n \"\"\"Set groups.\"\"\"\n self._groups = value\n\n @property\n def metrics(self):\n \"\"\"Get metrics.\"\"\"\n return self._metrics\n\n @metrics.setter\n def metrics(self, value):\n \"\"\"Set metrics.\"\"\"\n self._metrics = value\n\n @property\n def entity(self):\n \"\"\"Get entity.\"\"\"\n return self._entity\n\n @entity.setter\n def entity(self, value):\n \"\"\"Set entity.\"\"\"\n self._entity = value\n\n @property\n def entity_value(self):\n \"\"\"Get entity value.\"\"\"\n return self._entity_value\n\n @entity_value.setter\n def entity_value(self, value):\n \"\"\"Set entity value.\"\"\"\n self._entity_value = value\n\n @property\n def show_deleted(self):\n \"\"\"Get show deleted.\"\"\"\n return self._show_deleted\n\n @show_deleted.setter\n def show_deleted(self, value):\n \"\"\"Set show deleted.\"\"\"\n self._show_deleted = value\n\n def to_query_params(self):\n \"\"\"Convert this options to query parameters.\"\"\"\n params = {\n \"buckets\": str(self._buckets).lower(),\n \"policies\": str(self._policies).lower(),\n \"users\": str(self._users).lower(),\n \"groups\": str(self._groups).lower(),\n \"metrics\": str(self._metrics).lower(),\n \"showDeleted\": str(self._show_deleted).lower(),\n }\n if self._entity and self._entity_value:\n params[\"entityvalue\"] = self._entity_value\n params[\"entity\"] = self._entity.value\n return params\n\n\nclass PeerInfo:\n \"\"\"Site replication peer information.\"\"\"\n\n def __init__(self, deployment_id, endpoint, bucket_bandwidth_limit,\n bucket_bandwidth_set):\n self._deployment_id = deployment_id\n self._endpoint = endpoint\n self._name = None\n self._sync_status = None\n self._bucket_bandwidth_limit = bucket_bandwidth_limit\n self._bucket_bandwidth_set = bucket_bandwidth_set\n self._bucket_bandwidth_updated_at = None\n\n @property\n def deployment_id(self):\n \"\"\"Get deployment ID.\"\"\"\n return self._deployment_id\n\n @deployment_id.setter\n def deployment_id(self, value):\n \"\"\"Set deployment ID.\"\"\"\n self._deployment_id = value\n\n @property\n def endpoint(self):\n \"\"\"Get endpoint.\"\"\"\n return self._endpoint\n\n @endpoint.setter\n def endpoint(self, value):\n \"\"\"Set endpoint.\"\"\"\n self._endpoint = value\n\n @property\n def name(self):\n \"\"\"Get name.\"\"\"\n return self._name\n\n @name.setter\n def name(self, value):\n \"\"\"Set name.\"\"\"\n self._name = value\n\n @property\n def sync_status(self):\n \"\"\"Get sync status.\"\"\"\n return self._sync_status\n\n @sync_status.setter\n def sync_status(self, value):\n \"\"\"Set sync status.\"\"\"\n self._sync_status = value\n\n @property\n def bucket_bandwidth_limit(self):\n \"\"\"Get bucket bandwidth limit.\"\"\"\n return self._bucket_bandwidth_limit\n\n @bucket_bandwidth_limit.setter\n def bucket_bandwidth_limit(self, value):\n \"\"\"Set bucket bandwidth limit.\"\"\"\n self._bucket_bandwidth_limit = value\n\n @property\n def bucket_bandwidth_set(self):\n \"\"\"Get bucket bandwidth set.\"\"\"\n return self._bucket_bandwidth_set\n\n @bucket_bandwidth_set.setter\n def bucket_bandwidth_set(self, value):\n \"\"\"Set bucket bandwidth set.\"\"\"\n self._bucket_bandwidth_set = value\n\n @property\n def bucket_bandwidth_updated_at(self):\n \"\"\"Get bucket bandwidth updated at.\"\"\"\n return self._bucket_bandwidth_updated_at\n\n @bucket_bandwidth_updated_at.setter\n def bucket_bandwidth_updated_at(self, value):\n \"\"\"Set bucket bandwidth updated at.\"\"\"\n self._bucket_bandwidth_updated_at = value\n\n def to_dict(self):\n \"\"\"Converts peer information to dictionary.\"\"\"\n data = {\n \"endpoint\": self._endpoint,\n \"deploymentID\": self._deployment_id,\n \"defaultbandwidth\": {\n \"bandwidthLimitPerBucket\": self._bucket_bandwidth_limit,\n \"set\": self._bucket_bandwidth_set,\n },\n }\n if self._name:\n data[\"name\"] = self._name\n if self._sync_status is not None:\n data[\"sync\"] = \"enable\" if self._sync_status else \"disable\"\n if self._bucket_bandwidth_updated_at:\n data[\"defaultbandwidth\"][\"updatedAt\"] = to_iso8601utc(\n self._bucket_bandwidth_updated_at,\n )\n return data\n","repo_name":"minio/minio-py","sub_path":"minio/datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":32951,"program_lang":"python","lang":"en","doc_type":"code","stars":706,"dataset":"github-code","pt":"77"} +{"seq_id":"2077241556","text":"#Une autre triche\n\n#Jusqu'à présent, nous avons utilisé break dans le while True boucle. break quitte complètement la boucle et exécute la prochaine ligne de code non indentée. Cependant, vous souhaiterez peut-être arrêter le code et recommencer la boucle depuis le haut. (C'est idéal pour créer des jeux !)\n\n#Dans le code ci-dessous, le jeu s'exécute et il est demandé à l'utilisateur s'il souhaite aller à gauche ou à droite. Si l'utilisateur choisit de partir, il tombe à mort et break expulsera l'utilisateur de la boucle. C'est le jeu.\n\nwhile True:\n print(\"You are in a corridor, do you go left or right?\")\n direction = input(\"> \")\n if direction == \"left\":\n print(\"You have fallen to your death\")\n break\n\n#Eh bien, c'est un peu nul et pas différent de ce que nous avons appris au jour 16... maintenant pour la triche.\n\n\n","repo_name":"franckdun/python_replit","sub_path":"Python_100_jours/jour-17/main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40165317329","text":"finput = input('Enter filename: ')\r\nif finput == 'na na boo boo':\r\n print('NA NA BOO BOO TO YOU - You have been punk`d!')\r\n quit()\r\ntry:\r\n fhand = open(finput)\r\nexcept:\r\n print('File cannot be opened: ', finput)\r\n quit()\r\nfor line in fhand:\r\n line = line.rstrip()\r\n line = line.upper()\r\n print(line)","repo_name":"einorjohn/Pycharm","sub_path":"Lec 7 Ex. 3.py","file_name":"Lec 7 Ex. 3.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"765153756","text":"import functools\nimport signal\nimport sys\nimport traceback\nfrom contextlib import contextmanager\nfrom enum import Enum\nfrom threading import Thread\nfrom pathlib import Path\nfrom types import MethodType\nfrom typing import Optional, List, Any, Tuple, Callable, Union, Dict, Sequence, NewType\n\nimport appnope\nimport qtpy.QtCore as qc\nimport qtpy.QtWidgets as qw\nimport attr\nfrom qtpy.QtCore import QModelIndex, Qt, QVariant\nfrom qtpy.QtGui import QFont, QCloseEvent, QDesktopServices\n\nimport corrscope\nimport corrscope.settings.global_prefs as gp\nfrom corrscope import cli\nfrom corrscope.channel import ChannelConfig, DefaultLabel\nfrom corrscope.config import CorrError, copy_config, yaml\nfrom corrscope.corrscope import CorrScope, Config, Arguments, template_config\nfrom corrscope.gui.history_file_dlg import (\n get_open_file_name,\n get_open_file_list,\n get_save_file_path,\n)\nfrom corrscope.gui.model_bind import (\n PresentationModel,\n map_gui,\n behead,\n rgetattr,\n rsetattr,\n Symbol,\n SymbolText,\n BoundComboBox,\n)\nfrom corrscope.gui.util import color2hex, Locked, find_ranges, TracebackDialog\nfrom corrscope.gui.version_common import QT6\nfrom corrscope.gui.view_mainwindow import MainWindow as Ui_MainWindow\nfrom corrscope.gui.widgets import ChannelTableView, ShortcutButton\nfrom corrscope.layout import Orientation, StereoOrientation\nfrom corrscope.outputs import IOutputConfig, FFplayOutputConfig\nfrom corrscope.renderer import LabelPosition\nfrom corrscope.settings import paths\nfrom corrscope.triggers import (\n CorrelationTriggerConfig,\n MainTriggerConfig,\n SpectrumConfig,\n ZeroCrossingTriggerConfig,\n)\nfrom corrscope.util import obj_name, iround, coalesce\nfrom corrscope.wave import Flatten\n\nFILTER_WAV_FILES = [\"WAV files (*.wav)\"]\nFILTER_IMAGES = [\"Images files (*.png *.jpg *.jpeg *.gif)\", \"All files (*)\"]\n\nAPP_NAME = f\"{corrscope.app_name} {corrscope.__version__}\"\nAPP_DIR = Path(__file__).parent\n\nPATH_uri = qc.QUrl.fromLocalFile(paths.PATH_dir)\n\n\ndef res(file: str) -> str:\n return str(APP_DIR / file)\n\n\n@contextmanager\ndef exception_as_dialog(window: qw.QWidget):\n def excepthook(exc_type, exc_val, exc_tb):\n TracebackDialog(window).showMessage(format_stack_trace(exc_val))\n\n orig = sys.excepthook\n try:\n sys.excepthook = excepthook\n yield\n finally:\n sys.excepthook = orig\n\n\ndef gui_main(cfg_or_path: Union[Config, Path]):\n # Allow Ctrl-C to exit\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n # qw.QApplication.setStyle('fusion')\n QApp = qw.QApplication\n if not QT6:\n QApp.setAttribute(qc.Qt.AA_EnableHighDpiScaling)\n\n app = qw.QApplication(sys.argv)\n\n # On Windows, Qt 5's default system font (MS Shell Dlg 2) is outdated.\n # Interestingly, the QMenu font is correct and comes from lfMenuFont.\n # So use it for the entire application.\n # Qt on Windows will finally switch default font to lfMessageFont=Segoe UI\n # (Vista, 2006)... in 2020 (Qt 6.0).\n if qc.QSysInfo.kernelType() == \"winnt\":\n QApp.setFont(QApp.font(\"QMenu\"))\n\n window = MainWindow(cfg_or_path)\n\n # Any exceptions raised within MainWindow() will be caught within exec_.\n # exception_as_dialog() turns it into a Qt dialog.\n with exception_as_dialog(window):\n ret = app.exec_()\n # Any exceptions raised after exec_ terminates will call\n # exception_as_dialog().__exit__ before being caught.\n # This produces a Python traceback.\n\n # On Linux, if signal.signal(signal.SIGINT, signal.SIG_DFL) and Ctrl+C pressed,\n # corrscope closes immediately.\n # ffmpeg receives SIGPIPE and terminates by itself (according to strace).\n corr_thread = window.corr_thread\n if corr_thread is not None:\n corr_thread.job.abort_terminate()\n corr_thread.join()\n\n sys.exit(ret)\n\n\nSafeProperty = NewType(\"SafeProperty\", property)\n\n\ndef safe_property(unsafe_getter: Callable, *args, **kwargs) -> SafeProperty:\n \"\"\"Prevents (AttributeError from leaking outside a property,\n which causes hasattr() to return False).\"\"\"\n\n @functools.wraps(unsafe_getter)\n def safe_getter(self):\n try:\n return unsafe_getter(self)\n except AttributeError as e:\n raise RuntimeError(e) from e\n\n # NewType(\"\", cls)(x) == x.\n return SafeProperty(property(safe_getter, *args, **kwargs))\n\n\nclass MainWindow(qw.QMainWindow, Ui_MainWindow):\n \"\"\"\n Main window.\n\n Control flow:\n __init__: either\n - load_cfg\n - load_cfg_from_path\n\n Opening a document:\n - load_cfg_from_path\n\n ## Dialog Directory/Filename Generation\n\n Save-dialog dir is persistent state, saved across program runs.\n Most recent of:\n - Any open/save dialog (unless separate_render_dir is True).\n - self.pref.file_dir_ref, .set()\n - Load YAML from CLI.\n - load_cfg_from_path(cfg_path) sets `self.pref.file_dir`.\n - Load .wav files from CLI.\n - if isinstance(cfg_or_path, Config):\n - save_dir = self.compute_save_dir(self.cfg)\n - self.pref.file_dir = save_dir (if not empty)\n\n Render-dialog dir is persistent state, = most recent render-save dialog.\n - self.pref.render_dir, .set()\n\n Save/render-dialog filename (no dir) is computed on demand, NOT persistent state.\n - (Currently loaded config path, or master audio, or channel 0) + ext.\n - Otherwise empty string.\n - self.get_save_filename() calls cli.get_file_stem().\n\n CLI YAML filename is the same,\n but defaults to \"corrscope.yaml\" instead of empty string.\n - cli._get_file_name() calls cli.get_file_stem().\n\n CLI video filename is explicitly specified by the user.\n \"\"\"\n\n def __init__(self, cfg_or_path: Union[Config, Path]):\n super().__init__()\n\n # Load settings.\n prefs_error = None\n try:\n self.pref = gp.load_prefs()\n if not isinstance(self.pref, gp.GlobalPrefs):\n raise TypeError(f\"prefs.yaml contains wrong type {type(self.pref)}\")\n except Exception as e:\n prefs_error = e\n self.pref = gp.GlobalPrefs()\n\n # Load UI.\n self.setupUi(self) # sets windowTitle\n\n # Bind UI buttons, etc. Functions block main thread, avoiding race conditions.\n self.master_audio_browse.clicked.connect(self.on_master_audio_browse)\n self.bg_image_browse.clicked.connect(self.on_bg_image_browse)\n\n self.channelUp.add_shortcut(self.channelsGroup, \"ctrl+shift+up\")\n self.channelDown.add_shortcut(self.channelsGroup, \"ctrl+shift+down\")\n\n self.channelUp.clicked.connect(self.channel_view.on_channel_up)\n self.channelDown.clicked.connect(self.channel_view.on_channel_down)\n self.channelAdd.clicked.connect(self.on_channel_add)\n self.channelDelete.clicked.connect(self.on_channel_delete)\n\n # Bind actions.\n self.action_separate_render_dir.setChecked(self.pref.separate_render_dir)\n self.action_separate_render_dir.toggled.connect(\n self.on_separate_render_dir_toggled\n )\n\n self.action_open_config_dir.triggered.connect(self.on_open_config_dir)\n\n self.actionNew.triggered.connect(self.on_action_new)\n self.actionOpen.triggered.connect(self.on_action_open)\n self.actionSave.triggered.connect(self.on_action_save)\n self.actionSaveAs.triggered.connect(self.on_action_save_as)\n self.actionPreview.triggered.connect(self.on_action_preview)\n self.actionRender.triggered.connect(self.on_action_render)\n\n self.actionWebsite.triggered.connect(self.on_action_website)\n self.actionHelp.triggered.connect(self.on_action_help)\n\n self.actionExit.triggered.connect(qw.QApplication.closeAllWindows)\n\n # Initialize CorrScope-thread attribute.\n self.corr_thread: Optional[CorrThread] = None\n\n # Setup UI.\n self.model = ConfigModel(template_config())\n self.model.edited.connect(self.on_model_edited)\n # Calls self.on_gui_edited() whenever GUI widgets change.\n map_gui(self, self.model)\n\n self.model.update_widget[\"render_stereo\"].append(self.on_render_stereo_changed)\n\n # Bind config to UI.\n if isinstance(cfg_or_path, Config):\n self.load_cfg(cfg_or_path, None)\n save_dir = self.compute_save_dir(self.cfg)\n if save_dir:\n self.pref.file_dir = save_dir\n elif isinstance(cfg_or_path, Path):\n self.load_cfg_from_path(cfg_or_path)\n else:\n raise TypeError(\n f\"argument cfg={cfg_or_path} has invalid type {obj_name(cfg_or_path)}\"\n )\n\n self.show()\n\n if prefs_error is not None:\n TracebackDialog(self).showMessage(\n \"Warning: failed to load global preferences, resetting to default.\\n\"\n + format_stack_trace(prefs_error)\n )\n\n _cfg_path: Optional[Path]\n\n # Whether document is dirty, changed, has unsaved changes\n _any_unsaved: bool\n\n @property\n def any_unsaved(self) -> bool:\n return self._any_unsaved\n\n @any_unsaved.setter\n def any_unsaved(self, value: bool):\n self._any_unsaved = value\n self._update_unsaved_title()\n\n # Config models\n model: Optional[\"ConfigModel\"] = None\n\n channel_model: \"ChannelModel\"\n channel_view: \"ChannelTableView\"\n channelsGroup: qw.QGroupBox\n\n def on_render_stereo_changed(self):\n self.layout__stereo_orientation.setEnabled(\n self.model.cfg.render_stereo is Flatten.Stereo\n )\n\n # Closing active document\n\n def _cancel_render_if_active(self, title: str) -> bool:\n \"\"\"\n :return: False if user cancels close-document action.\n \"\"\"\n if self.corr_thread is None:\n return True\n\n Msg = qw.QMessageBox\n\n message = self.tr(\"Cancel current {} and close project?\").format(\n self.preview_or_render\n )\n response = Msg.question(self, title, message, Msg.Yes | Msg.No, Msg.No)\n\n if response == Msg.Yes:\n # Closing ffplay preview (can't cancel render, the dialog is untouchable)\n # will set self.corr_thread to None while the dialog is active.\n # https://www.vikingsoftware.com/how-to-use-qthread-properly/ # QObject thread affinity\n # But since the dialog is modal,\n # self.corr_thread cannot have been replaced by a different thread.\n if self.corr_thread is not None:\n self.corr_thread.abort_terminate()\n return True\n\n return False\n\n def _prompt_if_unsaved(self, title: str) -> bool:\n \"\"\"\n :return: False if user cancels close-document action.\n \"\"\"\n if not self.any_unsaved:\n return True\n\n Msg = qw.QMessageBox\n\n message = f\"Save changes to {self.title_cache}?\"\n should_close = Msg.question(\n self, title, message, Msg.Save | Msg.Discard | Msg.Cancel\n )\n\n if should_close == Msg.Cancel:\n return False\n elif should_close == Msg.Discard:\n return True\n else:\n return self.on_action_save()\n\n def should_close_document(self, title: str) -> bool:\n \"\"\"\n Called when user is closing document\n (when opening a new document or closing the app).\n\n :return: False if user cancels close-document action.\n \"\"\"\n if not self._prompt_if_unsaved(title):\n return False\n if not self._cancel_render_if_active(title):\n # Saying Yes quits render immediately, so place this dialog last.\n return False\n return True\n\n def closeEvent(self, event: QCloseEvent) -> None:\n \"\"\"Called on closing window.\"\"\"\n if self.should_close_document(self.tr(\"Quit\")):\n gp.dump_prefs(self.pref)\n event.accept()\n else:\n event.ignore()\n\n def on_action_new(self):\n if not self.should_close_document(self.tr(\"New Project\")):\n return\n cfg = template_config()\n self.load_cfg(cfg, None)\n\n def on_action_open(self):\n if not self.should_close_document(self.tr(\"Open Project\")):\n return\n name = get_open_file_name(\n self, \"Open config\", self.pref.file_dir_ref, [\"YAML files (*.yaml)\"]\n )\n if name:\n cfg_path = Path(name)\n self.load_cfg_from_path(cfg_path)\n\n def load_cfg_from_path(self, cfg_path: Path):\n # Bind GUI to dummy config, in case loading cfg_path raises Exception.\n if self.model is None:\n self.load_cfg(template_config(), None)\n\n assert cfg_path.is_file()\n self.pref.file_dir = str(cfg_path.parent.resolve())\n\n # Raises YAML structural exceptions\n cfg = yaml.load(cfg_path)\n\n try:\n # Raises color getter exceptions\n self.load_cfg(cfg, cfg_path)\n except Exception as e:\n # FIXME if error halfway, clear \"file path\" and load empty model.\n TracebackDialog(self).showMessage(format_stack_trace(e))\n return\n\n def load_cfg(self, cfg: Config, cfg_path: Optional[Path]) -> None:\n self._cfg_path = cfg_path\n self._any_unsaved = False\n self.load_title()\n self.left_tabs.setCurrentIndex(0)\n\n self.model.set_cfg(cfg)\n\n self.channel_model = ChannelModel(cfg.channels)\n # Calling setModel again disconnects previous model.\n self.channel_view.setModel(self.channel_model)\n self.channel_model.dataChanged.connect(self.on_model_edited)\n self.channel_model.rowsInserted.connect(self.on_model_edited)\n self.channel_model.rowsMoved.connect(self.on_model_edited)\n self.channel_model.rowsRemoved.connect(self.on_model_edited)\n\n def on_model_edited(self):\n self.any_unsaved = True\n\n title_cache: str\n\n def load_title(self) -> None:\n self.title_cache = self.title\n self._update_unsaved_title()\n\n def _update_unsaved_title(self) -> None:\n if self.any_unsaved:\n undo_str = \"*\"\n else:\n undo_str = \"\"\n self.setWindowTitle(f\"{self.title_cache}{undo_str} - {APP_NAME}\")\n\n # GUI actions, etc.\n master_audio_browse: qw.QPushButton\n channelAdd: \"ShortcutButton\"\n channelDelete: \"ShortcutButton\"\n channelUp: \"ShortcutButton\"\n channelDown: \"ShortcutButton\"\n\n action_separate_render_dir: qw.QAction\n action_open_config_dir: qw.QAction\n\n # Loading mainwindow.ui changes menuBar from a getter to an attribute.\n menuBar: qw.QMenuBar\n actionNew: qw.QAction\n actionOpen: qw.QAction\n actionSave: qw.QAction\n actionSaveAs: qw.QAction\n actionPreview: qw.QAction\n actionRender: qw.QAction\n actionExit: qw.QAction\n\n def on_master_audio_browse(self):\n name = get_open_file_name(\n self, \"Open master audio file\", self.pref.file_dir_ref, FILTER_WAV_FILES\n )\n if name:\n master_audio = \"master_audio\"\n self.model[master_audio] = name\n self.model.update_all_bound(master_audio)\n\n def on_bg_image_browse(self):\n name = get_open_file_name(\n self, \"Open background image file\", self.pref.file_dir_ref, FILTER_IMAGES\n )\n if name:\n bg_image = \"render__bg_image\"\n self.model[bg_image] = name\n self.model.update_all_bound(bg_image)\n\n def on_separate_render_dir_toggled(self, checked: bool):\n self.pref.separate_render_dir = checked\n if checked:\n self.pref.render_dir = self.pref.file_dir\n else:\n self.pref.render_dir = \"\"\n\n def on_open_config_dir(self):\n appdata_uri = qc.QUrl.fromLocalFile(str(paths.appdata_dir))\n QDesktopServices.openUrl(appdata_uri)\n\n def on_channel_add(self):\n wavs = get_open_file_list(\n self, \"Add audio channels\", self.pref.file_dir_ref, FILTER_WAV_FILES\n )\n if wavs:\n self.channel_view.append_channels(wavs)\n\n def on_channel_delete(self):\n self.channel_view.delete_selected()\n\n def on_action_save(self) -> bool:\n \"\"\"\n :return: False if user cancels save action.\n \"\"\"\n if self._cfg_path is None:\n return self.on_action_save_as()\n\n yaml.dump(self.cfg, self._cfg_path)\n self.any_unsaved = False\n self._update_unsaved_title()\n return True\n\n def on_action_save_as(self) -> bool:\n \"\"\"\n :return: False if user cancels save action.\n \"\"\"\n\n # Name and extension (no folder).\n cfg_filename = self.get_save_filename(cli.YAML_NAME)\n\n # Folder is obtained from self.pref.file_dir_ref.\n filters = [\"YAML files (*.yaml)\", \"All files (*)\"]\n path = get_save_file_path(\n self,\n \"Save As\",\n self.pref.file_dir_ref,\n cfg_filename,\n filters,\n cli.YAML_NAME,\n )\n if path:\n self._cfg_path = path\n self.load_title()\n self.on_action_save()\n return True\n else:\n return False\n\n def on_action_preview(self):\n \"\"\"Launch CorrScope and ffplay.\"\"\"\n if self.corr_thread is not None:\n error_msg = self.tr(\"Cannot preview, another {} is active\").format(\n self.preview_or_render\n )\n qw.QMessageBox.critical(self, \"Error\", error_msg)\n return\n\n outputs = [FFplayOutputConfig()]\n self.play_thread(outputs, PreviewOrRender.preview, dlg=None)\n\n def on_action_render(self):\n \"\"\"Get file name. Then show a progress dialog while rendering to file.\"\"\"\n if self.corr_thread is not None:\n error_msg = self.tr(\"Cannot render to file, another {} is active\").format(\n self.preview_or_render\n )\n qw.QMessageBox.critical(self, \"Error\", error_msg)\n return\n\n # Name and extension (no folder).\n video_filename = self.get_save_filename(cli.VIDEO_NAME)\n filters = [\n \"MP4 files (*.mp4)\",\n \"Matroska files (*.mkv)\",\n \"WebM files (*.webm)\",\n \"All files (*)\",\n ]\n\n # Points to either `file_dir` or `render_dir`.\n # Folder is obtained from `dir_ref`.\n dir_ref = self.pref.render_dir_ref\n\n path = get_save_file_path(\n self, \"Render to Video\", dir_ref, video_filename, filters, cli.VIDEO_NAME\n )\n if path:\n name = str(path)\n dlg = CorrProgressDialog(self, \"Rendering video\")\n\n # FFmpegOutputConfig contains only hashable/immutable strs,\n # so get_ffmpeg_cfg() can be shared across threads without copying.\n # Optionally copy_config() first.\n\n outputs = [self.cfg.get_ffmpeg_cfg(name)]\n self.play_thread(outputs, PreviewOrRender.render, dlg)\n\n def play_thread(\n self,\n outputs: List[IOutputConfig],\n mode: \"PreviewOrRender\",\n dlg: Optional[\"CorrProgressDialog\"],\n ):\n assert self.model\n\n arg = self._get_args(outputs)\n cfg = copy_config(self.model.cfg)\n t = self.corr_thread = CorrThread(cfg, arg, mode)\n\n if dlg:\n # t.abort -> Locked.set() is thread-safe (hopefully).\n # It can be called from main thread (not just within CorrThread).\n dlg.canceled.connect(t.job.abort, Qt.DirectConnection)\n t.job.arg = attr.evolve(\n arg,\n on_begin=run_on_ui_thread(dlg.on_begin, (float, float)),\n progress=run_on_ui_thread(dlg.setValue, (int,)),\n on_end=run_on_ui_thread(dlg.reset, ()), # TODO dlg.close\n )\n\n t.job.finished.connect(self.on_play_thread_finished)\n t.job.error.connect(self.on_play_thread_error)\n t.job.ffmpeg_missing.connect(self.on_play_thread_ffmpeg_missing)\n t.start()\n\n @safe_property\n def preview_or_render(self) -> str:\n if self.corr_thread is not None:\n return self.tr(self.corr_thread.job.mode.value)\n return \"neither preview nor render\"\n\n def _get_args(self, outputs: List[IOutputConfig]):\n def raise_exception():\n raise RuntimeError(\n \"Arguments.is_aborted should be overwritten by CorrThread\"\n )\n\n arg = Arguments(\n cfg_dir=self.cfg_dir, outputs=outputs, is_aborted=raise_exception\n )\n return arg\n\n def on_play_thread_finished(self):\n self.corr_thread = None\n\n def on_play_thread_error(self, stack_trace: str):\n TracebackDialog(self).showMessage(stack_trace)\n\n def on_play_thread_ffmpeg_missing(self):\n DownloadFFmpegActivity(self)\n\n # File paths\n @safe_property\n def cfg_dir(self) -> str:\n \"\"\"Only used when generating Arguments when playing corrscope.\n Not used to determine default path of file dialogs.\"\"\"\n maybe_path = self._cfg_path or self.cfg.master_audio\n if maybe_path:\n # Windows likes to raise OSError when path contains *\n try:\n return str(Path(maybe_path).resolve().parent)\n except OSError:\n return \".\"\n\n return \".\"\n\n UNTITLED = \"Untitled\"\n\n @safe_property\n def title(self) -> str:\n if self._cfg_path:\n return self._cfg_path.name\n return self.UNTITLED\n\n def get_save_filename(self, suffix: str) -> str:\n \"\"\"\n If file name can be guessed, return \"filename.suffix\" (no dir).\n Otherwise return \"\".\n\n Used for saving file or video.\n \"\"\"\n stem = cli.get_file_stem(self._cfg_path, self.cfg, default=\"\")\n if stem:\n return stem + suffix\n else:\n return \"\"\n\n @staticmethod\n def compute_save_dir(cfg: Config) -> Optional[str]:\n \"\"\"Computes a \"save directory\" when constructing a config from CLI wav files.\"\"\"\n if cfg.master_audio:\n file_path = cfg.master_audio\n elif len(cfg.channels) > 0:\n file_path = cfg.channels[0].wav_path\n else:\n return None\n\n # If file_path is \"file.wav\", we want to return \".\" .\n # os.path.dirname(\"file.wav\") == \"\"\n # Path(\"file.wav\").parent..str == \".\"\n dir = Path(file_path).parent\n return str(dir)\n\n @safe_property\n def cfg(self):\n return self.model.cfg\n\n # Misc.\n @qc.Slot()\n def on_action_website(self):\n website_url = r\"https://github.com/corrscope/corrscope/\"\n QDesktopServices.openUrl(qc.QUrl(website_url))\n\n @qc.Slot()\n def on_action_help(self):\n help_url = r\"https://corrscope.github.io/corrscope/\"\n QDesktopServices.openUrl(qc.QUrl(help_url))\n\n\ndef _format_exc_value(e: BaseException, limit=None, chain=True):\n \"\"\"Like traceback.format_exc() but takes an exception object.\"\"\"\n list = traceback.format_exception(\n type(e), e, e.__traceback__, limit=limit, chain=chain\n )\n str = \"\".join(list)\n return str\n\n\ndef format_stack_trace(e: BaseException):\n if isinstance(e, CorrError):\n stack_trace = _format_exc_value(e, limit=0)\n else:\n stack_trace = _format_exc_value(e)\n return stack_trace\n\n\nclass PreviewOrRender(Enum):\n # PreviewOrRender.value is translated at time of use, not time of definition.\n preview = qc.QT_TRANSLATE_NOOP(\"MainWindow\", \"preview\")\n render = qc.QT_TRANSLATE_NOOP(\"MainWindow\", \"render\")\n\n\nclass CorrJob(qc.QObject):\n is_aborted: Locked[bool]\n\n @qc.Slot()\n def abort(self):\n self.is_aborted.set(True)\n\n def abort_terminate(self):\n \"\"\"Sends abort signal to main loop, and terminates all outputs.\"\"\"\n self.abort()\n if self.corr is not None:\n for output in self.corr.outputs:\n output.terminate(from_same_thread=False)\n\n finished = qc.Signal()\n error = qc.Signal(str)\n ffmpeg_missing = qc.Signal()\n\n def __init__(self, cfg: Config, arg: Arguments, mode: PreviewOrRender):\n qc.QObject.__init__(self)\n self.is_aborted = Locked(False)\n\n self.cfg = cfg\n self.arg = arg\n self.arg.is_aborted = self.is_aborted.get\n self.mode = mode\n self.corr = None # type: Optional[CorrScope]\n\n def run(self) -> None:\n \"\"\"Called in separate thread.\"\"\"\n cfg = self.cfg\n arg = self.arg\n with appnope.nope_scope(reason=\"corrscope preview/render active\"):\n try:\n self.corr = CorrScope(cfg, arg)\n self.corr.play()\n\n except paths.MissingFFmpegError:\n arg.on_end()\n self.ffmpeg_missing.emit()\n\n except Exception as e:\n arg.on_end()\n stack_trace = format_stack_trace(e)\n self.error.emit(stack_trace)\n\n else:\n arg.on_end()\n\n\nclass CorrThread(Thread):\n job: CorrJob\n\n def __init__(self, cfg: Config, arg: Arguments, mode: PreviewOrRender):\n Thread.__init__(self)\n self.job = CorrJob(cfg, arg, mode)\n\n def run(self):\n \"\"\"Callback invoked on new thread.\"\"\"\n try:\n self.job.run()\n finally:\n self.job.finished.emit()\n\n\nclass CorrProgressDialog(qw.QProgressDialog):\n def __init__(self, parent: Optional[qw.QWidget], title: str):\n super().__init__(parent)\n self.setMinimumWidth(300)\n self.setWindowTitle(title)\n self.setLabelText(\"Progress:\")\n\n # If set to 0, the dialog is always shown as soon as any progress is set.\n self.setMinimumDuration(0)\n\n # Don't reset when rendering is approximately finished.\n self.setAutoReset(False)\n\n # Close after CorrScope finishes.\n self.setAutoClose(True)\n\n @qc.Slot(float, float)\n def on_begin(self, begin_time, end_time):\n self.setRange(iround(begin_time), iround(end_time))\n # self.setValue is called by CorrScope, on the first frame.\n\n\ndef run_on_ui_thread(\n bound_slot: MethodType, types: Tuple[type, ...]\n) -> Callable[..., None]:\n \"\"\"Runs an object's slot on the object's own thread.\n It's terrible code but it works (as long as the slot has no return value).\n \"\"\"\n qmo = qc.QMetaObject\n\n # QObject *obj,\n obj = bound_slot.__self__\n\n # const char *member,\n member = bound_slot.__name__\n\n # Qt::ConnectionType type,\n # QGenericReturnArgument ret,\n # https://riverbankcomputing.com/pipermail/pyqt/2014-December/035223.html\n conn = Qt.QueuedConnection\n\n @functools.wraps(bound_slot)\n def inner(*args):\n if len(types) != len(args):\n raise TypeError(f\"len(types)={len(types)} != len(args)={len(args)}\")\n\n # https://www.qtcentre.org/threads/29156-Calling-a-slot-from-another-thread?p=137140#post137140\n # QMetaObject.invokeMethod(skypeThread, \"startSkypeCall\", Qt.QueuedConnection, QtCore.Q_ARG(\"QString\", \"someguy\"))\n\n _args = [qc.Q_ARG(typ, typ(arg)) for typ, arg in zip(types, args)]\n return qmo.invokeMethod(obj, member, conn, *_args)\n\n return inner\n\n\n# Begin ConfigModel properties\n\n\ndef nrow_ncol_property(altered: str, unaltered: str) -> SafeProperty:\n def get(self: \"ConfigModel\"):\n val = getattr(self.cfg.layout, altered)\n if val is None:\n return 0\n else:\n return val\n\n def set(self: \"ConfigModel\", val: int):\n if val > 0:\n setattr(self.cfg.layout, altered, val)\n setattr(self.cfg.layout, unaltered, None)\n self.update_all_bound(\"layout__\" + unaltered)\n elif val == 0:\n setattr(self.cfg.layout, altered, None)\n else:\n raise CorrError(f\"invalid input: {altered} < 0, should never happen\")\n\n return safe_property(get, set)\n\n\n# Unused\ndef default_property(path: str, default: Any) -> SafeProperty:\n def getter(self: \"ConfigModel\"):\n val = rgetattr(self.cfg, path)\n if val is None:\n return default\n else:\n return val\n\n def setter(self: \"ConfigModel\", val):\n rsetattr(self.cfg, path, val)\n\n return safe_property(getter, setter)\n\n\ndef path_strip_quotes(path: str) -> str:\n if len(path) and path[0] == path[-1] == '\"':\n return path[1:-1]\n return path\n\n\ndef path_fix_property(path: str) -> SafeProperty:\n \"\"\"Removes quotes from paths, when setting from GUI.\"\"\"\n\n def getter(self: \"ConfigModel\") -> str:\n return rgetattr(self.cfg, path)\n\n def setter(self: \"ConfigModel\", val: str):\n val = path_strip_quotes(val)\n rsetattr(self.cfg, path, val)\n\n return safe_property(getter, setter)\n\n\nflatten_no_stereo = {\n Flatten.SumAvg: \"Average: (L+R)/2\",\n Flatten.DiffAvg: \"DiffAvg: (L-R)/2\",\n}\nflatten_modes = {**flatten_no_stereo, Flatten.Stereo: \"Stereo\"}\nassert set(flatten_modes.keys()) == set(Flatten.modes) # type: ignore\n\n\nclass ConfigModel(PresentationModel):\n cfg: Config\n combo_symbol_text: Dict[str, Sequence[SymbolText]] = {}\n\n master_audio = path_fix_property(\"master_audio\")\n\n # Stereo flattening\n combo_symbol_text[\"trigger_stereo\"] = list(flatten_no_stereo.items()) + [\n (BoundComboBox.Custom, \"Custom\")\n ]\n combo_symbol_text[\"render_stereo\"] = list(flatten_modes.items()) + [\n (BoundComboBox.Custom, \"Custom\")\n ]\n\n # Trigger\n @safe_property\n def trigger__pitch_tracking(self) -> bool:\n scfg = self.cfg.trigger.pitch_tracking\n gui = scfg is not None\n return gui\n\n @trigger__pitch_tracking.setter\n def trigger__pitch_tracking(self, gui: bool):\n scfg = SpectrumConfig() if gui else None\n self.cfg.trigger.pitch_tracking = scfg\n\n combo_symbol_text[\"trigger__edge_direction\"] = [\n (1, \"Rising (+1)\"),\n (-1, \"Falling (-1)\"),\n ]\n\n combo_symbol_text[\"trigger__post_trigger\"] = [\n (type(None), \"Disabled\"),\n (ZeroCrossingTriggerConfig, \"Zero Crossing\"),\n ]\n\n # Render\n @safe_property\n def render_resolution(self) -> str:\n render = self.cfg.render\n w, h = render.width, render.height\n return f\"{w}x{h}\"\n\n @render_resolution.setter\n def render_resolution(self, value: str):\n error = CorrError(f\"invalid resolution {value}, must be WxH\")\n\n for sep in \"x*,\":\n width_height = value.split(sep)\n if len(width_height) == 2:\n break\n else:\n raise error\n\n render = self.cfg.render\n width, height = width_height\n try:\n render.width = int(width)\n render.height = int(height)\n except ValueError:\n raise error\n\n combo_symbol_text[\"default_label\"] = [\n (DefaultLabel.NoLabel, MainWindow.tr(\"None\", \"Default Label\")),\n (DefaultLabel.FileName, MainWindow.tr(\"File Name\", \"Default Label\")),\n (DefaultLabel.Number, MainWindow.tr(\"Number\", \"Default Label\")),\n ]\n\n combo_symbol_text[\"render.label_position\"] = [\n (LabelPosition.LeftTop, \"Top Left\"),\n (LabelPosition.LeftBottom, \"Bottom Left\"),\n (LabelPosition.RightTop, \"Top Right\"),\n (LabelPosition.RightBottom, \"Bottom Right\"),\n ]\n\n @safe_property\n def render__label_qfont(self) -> QFont:\n qfont = QFont()\n qfont.setStyleHint(QFont.SansSerif) # no-op on X11\n\n font = self.cfg.render.label_font\n if font.toString:\n qfont.fromString(font.toString)\n return qfont\n\n # Passing None or \"\" to QFont(family) results in qfont.family() = \"\", and\n # wrong font being selected (Abyssinica SIL, which appears early in the list).\n family = coalesce(font.family, qfont.defaultFamily())\n # Font file selection\n qfont.setFamily(family)\n qfont.setBold(font.bold)\n qfont.setItalic(font.italic)\n # Font size\n qfont.setPointSizeF(font.size)\n return qfont\n\n @render__label_qfont.setter\n def render__label_qfont(self, qfont: QFont):\n self.cfg.render.label_font = attr.evolve(\n self.cfg.render.label_font,\n # Font file selection\n family=qfont.family(),\n bold=qfont.bold(),\n italic=qfont.italic(),\n # Font size\n size=qfont.pointSizeF(),\n # QFont implementation details\n toString=qfont.toString(),\n )\n\n # Layout\n layout__nrows = nrow_ncol_property(\"nrows\", unaltered=\"ncols\")\n layout__ncols = nrow_ncol_property(\"ncols\", unaltered=\"nrows\")\n\n _orientations = [[\"h\", \"Horizontal\"], [\"v\", \"Vertical\"]]\n _stereo_orientations = _orientations + [[\"overlay\", \"Overlay\"]]\n\n combo_symbol_text[\"layout__orientation\"] = [\n (Orientation(key), name) for key, name in _orientations\n ]\n combo_symbol_text[\"layout__stereo_orientation\"] = [\n (StereoOrientation(key), name) for key, name in _stereo_orientations\n ]\n\n\n# End ConfigModel\n\n\n@attr.dataclass\nclass Column:\n key: str\n\n # fn(str) -> T. If ValueError is thrown, replaced by `default`.\n cls: Union[type, Callable[[str], Any]]\n\n # `default` is written into config,\n # when users type \"blank or whitespace\" into table cell.\n default: Any\n\n def _display_name(self) -> str:\n return self.key.replace(\"__\", \"\\n\").replace(\"_\", \" \").title()\n\n display_name: str = attr.Factory(_display_name, takes_self=True)\n always_show: bool = False\n\n\ndef plus_minus_one(value: str) -> int:\n if int(value) >= 0: # Raises ValueError\n return 1\n else:\n return -1\n\n\nnope = qc.QVariant()\n\n\ndef parse_bool_maybe(s: str) -> Optional[bool]:\n \"\"\"Does not throw. But could legally throw ValueError.\"\"\"\n\n if not s:\n return None\n\n # len(s) >= 1\n try:\n return bool(int(s))\n except ValueError:\n pass\n\n s = s.lower()\n if s[0] in [\"t\", \"y\"]:\n return True\n if s[0] in [\"f\", \"n\"]:\n return False\n return None\n\n\nclass ChannelModel(qc.QAbstractTableModel):\n \"\"\"Design based off\n https://doc.qt.io/qt-5/model-view-programming.html#a-read-only-example-model and\n https://doc.qt.io/qt-5/model-view-programming.html#model-subclassing-reference\n \"\"\"\n\n def __init__(self, channels: List[ChannelConfig]):\n \"\"\"Mutates `channels` and `line_color` for convenience.\"\"\"\n super().__init__()\n self.channels = channels\n\n line_color = \"line_color\"\n\n for cfg in self.channels:\n t = cfg.trigger\n if isinstance(t, MainTriggerConfig):\n if not isinstance(t, CorrelationTriggerConfig):\n raise CorrError(f\"Loading per-channel {obj_name(t)} not supported\")\n trigger_dict = attr.asdict(t)\n else:\n trigger_dict = dict(t or {})\n\n if line_color in trigger_dict:\n trigger_dict[line_color] = color2hex(trigger_dict[line_color])\n\n cfg.trigger = trigger_dict\n\n def triggers(self, row: int) -> Dict[str, Any]:\n trigger = self.channels[row].trigger\n assert isinstance(trigger, dict)\n return trigger\n\n # columns\n col_data = [\n Column(\"wav_path\", path_strip_quotes, \"\", \"WAV Path\"),\n Column(\"label\", str, \"\", \"Label\"),\n Column(\"amplification\", float, None, \"Amplification\\n(override)\"),\n Column(\"line_color\", str, None, \"Line Color\"),\n Column(\"color_by_pitch\", parse_bool_maybe, None, \"Color Lines\\nBy Pitch\"),\n Column(\"render_stereo\", str, None, \"Render Stereo\\nDownmix\"),\n Column(\"trigger_width\", int, 1, \"Trigger Width ×\", always_show=True),\n Column(\"render_width\", int, 1, \"Render Width ×\", always_show=True),\n Column(\"trigger__mean_responsiveness\", float, None, \"DC Removal\\nRate\"),\n Column(\"trigger__sign_strength\", float, None, \"Sign\\nAmplification\"),\n Column(\"trigger__edge_direction\", plus_minus_one, None),\n Column(\"trigger__edge_strength\", float, None),\n Column(\"trigger__slope_width\", float, None),\n Column(\"trigger__buffer_strength\", float, None),\n Column(\"trigger__responsiveness\", float, None, \"Buffer\\nResponsiveness\"),\n Column(\"trigger__reset_below\", float, None, \"Reset Below\\nMatch\"),\n ]\n\n idx_of_key = {}\n for idx, col in enumerate(col_data):\n idx_of_key[col.key] = idx\n del idx, col\n\n def columnCount(self, parent: QModelIndex = ...) -> int:\n return len(self.col_data)\n\n def headerData(\n self, section: int, orientation: Qt.Orientation, role: int = Qt.DisplayRole\n ) -> Union[str, QVariant]:\n if role == Qt.DisplayRole:\n if orientation == Qt.Horizontal:\n col = section\n try:\n return self.col_data[col].display_name\n except IndexError:\n return nope\n else:\n return str(section + 1)\n return nope\n\n # rows\n def rowCount(self, parent: QModelIndex = ...) -> int:\n return len(self.channels)\n\n # data\n TRIGGER = \"trigger__\"\n\n def data(self, index: QModelIndex, role=Qt.DisplayRole) -> Any:\n col = index.column()\n row = index.row()\n\n if (\n role in [Qt.DisplayRole, Qt.EditRole]\n and index.isValid()\n and row < self.rowCount()\n ):\n data = self.col_data[col]\n key = data.key\n if key.startswith(self.TRIGGER):\n key = behead(key, self.TRIGGER)\n value = self.triggers(row).get(key, \"\")\n\n else:\n value = getattr(self.channels[row], key)\n\n if not data.always_show and value == data.default:\n return \"\"\n if key == \"wav_path\" and role == Qt.DisplayRole:\n if Path(value).parent != Path():\n return \"...\" + Path(value).name\n return str(value)\n\n return nope\n\n def setData(self, index: QModelIndex, value: str, role=Qt.EditRole) -> bool:\n col = index.column()\n row = index.row()\n\n if index.isValid() and role == Qt.EditRole:\n # type(value) == str\n\n data = self.col_data[col]\n key = data.key\n if value and not value.isspace():\n try:\n value = data.cls(value)\n except ValueError:\n return False\n else:\n value = data.default\n\n if key.startswith(self.TRIGGER):\n key = behead(key, self.TRIGGER)\n trigger = self.triggers(row)\n if value == data.default:\n # Delete key if (key: value) present\n trigger.pop(key, None)\n else:\n trigger[key] = value\n\n else:\n setattr(self.channels[row], key, value)\n\n self.dataChanged.emit(index, index, [role])\n return True\n return False\n\n \"\"\"So if I understood it correctly you want to reorder the columns by moving the\n headers and then want to know how the view looks like. I believe ( 90% certain )\n when you reorder the headers it does not trigger any change in the model! and\n then if you just start printing the data of the model you will only see the data\n in the order how it was initially before you swapper/reordered some column with\n the header. \"\"\"\n\n def insertRows(self, row: int, count: int, parent=QModelIndex()) -> bool:\n if not (count >= 1 and 0 <= row <= len(self.channels)):\n return False\n\n self.beginInsertRows(parent, row, row + count - 1)\n self.channels[row:row] = [ChannelConfig(\"\") for _ in range(count)]\n self.endInsertRows()\n return True\n\n def removeRows(self, row: int, count: int, parent=QModelIndex()) -> bool:\n nchan = len(self.channels)\n # row <= nchan for consistency.\n if not (count >= 1 and 0 <= row <= nchan and row + count <= nchan):\n return False\n\n self.beginRemoveRows(parent, row, row + count - 1)\n del self.channels[row : row + count]\n self.endRemoveRows()\n return True\n\n def moveRows(\n self,\n _sourceParent: QModelIndex,\n src_row: int,\n count: int,\n _destinationParent: QModelIndex,\n dest_row: int,\n ):\n nchan = len(self.channels)\n if not (\n count >= 1\n and 0 <= src_row <= nchan\n and src_row + count <= nchan\n and 0 <= dest_row <= nchan\n ):\n return False\n\n # If source and destination overlap, beginMoveRows returns False.\n if not self.beginMoveRows(\n _sourceParent, src_row, src_row + count - 1, _destinationParent, dest_row\n ):\n return False\n\n # We know source and destination do not overlap.\n src = slice(src_row, src_row + count)\n dest = slice(dest_row, dest_row)\n\n if dest_row > src_row:\n # Move down: Insert dest, then remove src\n self.channels[dest] = self.channels[src]\n del self.channels[src]\n else:\n # Move up: Remove src, then insert dest.\n rows = self.channels[src]\n del self.channels[src]\n self.channels[dest] = rows\n self.endMoveRows()\n return True\n\n def flags(self, index: QModelIndex):\n if not index.isValid():\n return Qt.ItemIsEnabled\n return (\n qc.QAbstractItemModel.flags(self, index)\n | Qt.ItemIsEditable\n | Qt.ItemNeverHasChildren\n )\n\n\nclass DownloadFFmpegActivity:\n title = \"Missing FFmpeg\"\n\n ffmpeg_url = paths.get_ffmpeg_url()\n can_download = bool(ffmpeg_url)\n\n required = (\n f\"FFmpeg+FFplay must be in PATH or \"\n f'corrscope PATH in order to use corrscope.
'\n )\n\n ffmpeg_template = required + (\n f'Download ffmpeg from this link, '\n f\"open in 7-Zip and navigate to the ffmpeg-.../bin folder, \"\n f\"and copy all .exe files to the folder above.\"\n )\n fail_template = required + \"Cannot download FFmpeg for your platform.\"\n\n def __init__(self, window: qw.QWidget):\n \"\"\"Prompt the user to download and install ffmpeg.\"\"\"\n Msg = qw.QMessageBox\n\n if not self.can_download:\n Msg.information(window, self.title, self.fail_template, Msg.Ok)\n return\n\n Msg.information(window, self.title, self.ffmpeg_template, Msg.Ok)\n","repo_name":"corrscope/corrscope","sub_path":"corrscope/gui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":42732,"program_lang":"python","lang":"en","doc_type":"code","stars":431,"dataset":"github-code","pt":"77"} +{"seq_id":"29522501913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'askshjcn'\n\n'''\ntest3\nhannuota youxi\n'''\ndef move(n, a, b, c):\n\tif n == 1:\n\t\tprint(a, \"-->\", c)\n\telse:\n\t\tmove(n-1, a, c, b)\n\t\tprint(a, \"-->\", c)\n\t\tmove(n-1, b, a, c)\n\nn = int(input('enter a number: '))\nmove(n, 'A', 'B', 'C')\n","repo_name":"askshjcn/learnpython","sub_path":"hannuota.py","file_name":"hannuota.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72655138490","text":"from django.db import models\nfrom music.upload_to import uuid_name_upload_to\n\nclass List(models.Model):\n name = models.CharField(max_length=100, verbose_name=\"제목\")\n singer = models.CharField(max_length=100, verbose_name=\"가수명\")\n created_at = models.DateTimeField(auto_now_add=True)\n content = models.TextField(verbose_name=\"한줄 평\") # FK키를 쓰는 게 적절\n photo = models.ImageField(blank=True, upload_to=uuid_name_upload_to)\n updated_at = models.DateTimeField(auto_now=True)\n\n\n # upload_to\n # - 문자열 : 파일이 저장되는 폴더의 경로\n\n\n","repo_name":"junsoo-cpu/devops_cloud","sub_path":"myhomework11/music/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30147367281","text":"from flask import session, request, redirect, g, abort\nfrom app.models.Theme import Theme\nfrom app.models.User import User\nfrom app.server import server\nfrom app.instances import db\nfrom app.helpers.render import render_template, render_json\nfrom app.controllers import user_settings\nfrom app.session.csrf import csrf_protected\n\n\ndef do_redirect():\n redirect_url = request.args.get('redirect')\n if redirect_url is not None:\n return redirect(redirect_url, code=303)\n else:\n return \"\", 204\n\n\n@server.route(\"/settings\")\ndef profile_settings():\n if g.user is None:\n do_redirect()\n\n return render_template('settings.html')\n\n\n@server.route(\"/theme/light\", methods=['POST'])\n@csrf_protected\ndef set_light_theme():\n session['theme'] = 'light'\n if g.user is not None:\n g.user.theme = Theme.query.filter_by(name='light').first().id\n db.session.commit()\n return do_redirect()\n\n\n@server.route(\"/theme/dark\", methods=['POST'])\n@csrf_protected\ndef set_dark_theme():\n session['theme'] = 'dark'\n if g.user is not None:\n g.user.theme = Theme.query.filter_by(name='dark').first().id\n db.session.commit()\n return do_redirect()\n\n\n@server.route(\"/preferences/email\", methods=['POST'])\n@csrf_protected\ndef set_email():\n try:\n new_email = request.form['email']\n except KeyError:\n return abort(400)\n\n return user_settings.set_email(new_email) or do_redirect()\n\n\n@server.route(\"/preferences/name\", methods=['POST'])\n@csrf_protected\ndef set_name():\n try:\n new_name = request.form['name']\n except KeyError:\n return abort(400)\n\n return user_settings.set_name(new_name) or do_redirect()\n\n\n@server.route(\"/preferences/avatar\", methods=['POST'])\n@csrf_protected\ndef set_avatar():\n try:\n new_avatar_source = request.form['avatar']\n except KeyError:\n return abort(400)\n\n return user_settings.set_avatar(new_avatar_source) or do_redirect()\n\n\n@server.route(\"/preferences/profile\", methods=['POST'])\n@csrf_protected\ndef set_profile_preferences():\n if g.user is None:\n return abort(401)\n\n data = request.get_json()\n\n try:\n new_email = data.get('settings-profile-email', g.user.email)\n new_name = data.get('settings-profile-displayname', g.user.name)\n avatar_url = data.get('avatar-url', g.user.avatar)\n following_is_public = data.get('settings-privacy-public-following', g.user.following_public)\n linked_stackexchange_is_public = data.get('settings-privacy-public-linked-stackexchange', g.user.linked_stackexchange_public)\n receive_notifications = data.get('settings-profile-receivenotifications', g.user.receive_notifications)\n except KeyError:\n return abort(400)\n\n return \\\n user_settings.set_email(new_email) or \\\n user_settings.set_name(new_name) or \\\n user_settings.set_avatar(avatar_url) or \\\n user_settings.set_following_is_public(following_is_public) or \\\n user_settings.set_linked_stackexchange_is_public(linked_stackexchange_is_public) or \\\n user_settings.set_receive_notifications(receive_notifications) or \\\n do_redirect()\n\n\n@server.route(\"/preferences/privacy\", methods=['GET'])\n@csrf_protected\ndef get_privacy_preferences():\n if not isinstance(g.user, User):\n return abort(401)\n\n privacy_preferences = user_settings.get_privacy_settings(g.user)\n return render_json(privacy_preferences)\n","repo_name":"Axtell/Axtell","sub_path":"app/routes/user_settings.py","file_name":"user_settings.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"12855681446","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 14 23:03:11 2020\n\n@author: nathan\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef passe_bas(freq,FFT,fc): \n assert len(freq)==len(FFT)\n for i in range (len(freq)):\n if freq[i]>=fc:\n FFT[i]=0\n if freq[i]<=-fc: #ne pas oublier que la transformée de fourrier est une fct impaire\n FFT[i]=0\n return()\n \ndef fonction(t):\n return(t)\n\ndef FFT(F,Tech,end): #F : tableau de la fct, période echantillonage, fin echantillonage\n b=int(end/Tech)\n fft=np.fft.fft(F[0:b]) \n amp=np.abs(fft)\n amp=np.abs(fft)/amp.max()\n axefreq = np.fft.fftfreq(len(amp),Tech)\n return(fft,amp,axefreq)\n\nFech=100\nTech=1/Fech\nend=100\nt = np.arange(0,end,Tech)\nsignal = 2*np.cos(2*np.pi*3*t)+np.cos(2*np.pi*8*t)\n\n# signal=[]\n# for i in range(len(t)):\n# signal.append(fonction(t[i]))\n\nfft,amp,axef= FFT(signal,Tech,end)\n\nplt.figure(\"fct\")\nplt.plot(t,signal,'red')\nplt.xlim(0, 2)\nplt.show()\n\nplt.figure(\"spectre\")\nplt.plot(axef,amp,'red')\nplt.xlim(0, 30)\nplt.xlabel(\"f (Hz)\")\nplt.ylabel(\"amplitude\")\nplt.grid()\nplt.show() \n\n#\npasse_bas(axef,fft, 4)\namp=np.abs(fft)\namp=amp/amp.max()\n#\n\nplt.figure(\"filtrée\")\nplt.plot(axef,amp,'blue')\nplt.xlim(0, 30)\nplt.xlabel(\"f (Hz)\")\nplt.ylabel(\"amplitude\")\nplt.grid()\nplt.show() \n\nplt.figure(\"final\")\nplt.plot(t,np.fft.ifft(fft),'blue')\nplt.xlim(0, 2)\nplt.show()","repo_name":"nathF78/TIPE-Differents-image-compression-algorithms-study","sub_path":"src/versions finales/spectre 1D.py","file_name":"spectre 1D.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26954189401","text":"from __future__ import print_function\nimport numpy as np\nimport scipy as scipy\nimport scipy.stats as stats\nfrom scipy.sparse import dok_matrix\nimport threading\nfrom threading import Thread\nfrom load import loader\nimport time\nt = loader(\"20newsgroups.mat\")\nword_list = []\nfor word in t[\"words\"]:\n\tword_list.append(str(word[0][0]))\nprint(len(word_list))\nmatrix = np.array(t[\"X\"].toarray())\nsparse = dok_matrix(t[\"X\"])\nkeys = sparse.keys()\nN = matrix.shape[0]\nM = matrix.shape[1]\nK = 20\nprint(\"The dimensions of U are : {} x {}\".format(N, K))\nprint(\"The dimensions of V are : {} x {}\".format(M, K))\na_u = 10.\nb_u = 120.\na_v = 10.\nb_v = 80.\nU = np.random.gamma(a_u, b_u, [N, K])\nV = np.random.gamma(a_v, b_v, [M,K])\nnum_iterations = 1000\nX_out = np.zeros([N,M, K])\nU_new = np.zeros_like(U)\nV_new = np.zeros_like(V)\ncount = 0\ndef sample(i,j, name):\n\tX_out = np.zeros([N,M, K])\n\tprint(\"Sampling for entry : {},{}\".format(i+1,j+1),end='\\r')\n\tP_vec = np.zeros([K])\n\tfor k in range(K):\n\t\tP_vec[k] = U[i,k]*V[j,k]\n\tP_vec = P_vec / np.sum(P_vec)\n\tX_out[i,j] = np.random.multinomial(matrix[i,j], P_vec).astype(np.float32)\n\tprint(\"Sampling for entry : {},{} completed\".format(i+1, j+1),end='\\r')\n\ndef x_sample(i,name):\n\ta_i = np.sum(X_out[i], axis=0) + np.ones([K])*a_u\n\tb_i = np.sum(V, axis=0) + np.ones([K])*(b_u)\n\tU_new[i] = np.random.gamma(a_i, 1./b_i)\ndef y_sample(j,name):\n\ta_i = np.sum(X_out[:,j], axis=0) + np.ones([K])*a_v\n\tb_i = np.sum(U, axis=0) + np.ones([K])*(b_v)\n\tV_new[j] = np.random.gamma(a_i,1./ b_i)\n\t\n\nfor i in range(num_iterations):\n\tstart_iter = time.time()\n\tprint(\"Running iteration : {}\".format(i + 1))\n\tprint(\"Sampling from multinomial\")\n\tthreads = []\n\tfor key in keys:\n\t\tprint(\"Setting up thread: {}\".format(key), end='\\r')\n\t\tthread = Thread(target=sample,args=(key[0],key[1],str(i)))\n\t\tthreads.append(thread)\n\tx = 0\n\tstart = time.time()\n\twhile(x < len(threads)):\n\t\t\tthreads[x].start()\n\t\t\tx+=1\n\tfor i in range(len(threads)):\n\t\tthreads[i].join()\n\tprint(time.time() - start)\n\tprint(\"sampling from gamma for u\")\n\tthreads = []\n\tfor i in range(N):\n\t\tthread = Thread(target=x_sample,args=(i, str(i)))\n\t\tthreads.append(thread)\n\tx = 0\n\tstart = time.time()\n\twhile(x < len(threads)):\n\t\tif (threading.active_count() < 32):\n\t\t\tthreads[x].start()\n\t\t\tx+=1\n\tfor i in range(len(threads)):\n\t\tthreads[i].join()\n\tprint(time.time() - start)\n\tprint(\"sampling from gamma for v\")\n\tthreads = []\n\tfor j in range(M):\n\t\tthread = Thread(target=y_sample,args= (j, str(j)))\n\t\tthreads.append(thread)\n\tx = 0\n\tstart = time.time()\n\twhile(x < len(threads)):\n\t\tif (threading.active_count() < 32):\n\t\t\tthreads[x].start()\n\t\t\tx+=1\n\tfor i in range(len(threads)):\n\t\tthreads[i].join()\n\tprint(\"computing error and seeing results\")\n\tX_new = np.matmul(U, np.transpose(V))\n\tX_sparse_new = dok_matrix(sparse)\n\terror = np.mean(np.abs(sparse-X_sparse_new))\n\tif error_old == None or i < 50:\n\t\tsparse_old = X_sparse_new\n\telse :\n\t\tsparse_old = sparse_old * (float(i)/float(i+1)) + (1./i+1)*X_sparse_new\n\terror_old = np.mean(np.abs(sparse-sparse_old))\n\tprint(\"Error is : {} in time : {}\".format(error, time.time() - start_iter))\n\tprint(\"Error averaged is : {} in time : {}\".format(error_old, time.time() - start_iter))\n\tU = np.copy(U_new)\n\tV = np.copy(V_new)\n","repo_name":"prannayk/CS772","sub_path":"hw3/threaded.py","file_name":"threaded.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4634280239","text":"fruit = input().lower()\nday = input()\nquantity = float(input())\nweek = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\"]\nweekend = [\"Saturday\", \"Sunday\"]\nfruits = [\"banana\", \"apple\", \"orange\", \"grapefruit\", \"kiwi\", \"pineapple\", \"grapes\"]\nprice = 0\nis_fruit_valid = False\nis_day_valid = False\nif fruit in fruits:\n is_fruit_valid = True\nif day in week:\n is_day_valid = True\n\n if fruit == \"banana\":\n price = 2.50\n elif fruit == \"apple\":\n price = 1.20\n elif fruit == \"orange\":\n price = 0.85\n elif fruit == \"grapefruit\":\n price = 1.45\n elif fruit == \"kiwi\":\n price = 2.70\n elif fruit == \"pineapple\":\n price = 5.50\n elif fruit == \"grapes\":\n price = 3.85\nelif day in weekend:\n is_day_valid = True\n\n if fruit == \"banana\":\n price = 2.70\n elif fruit == \"apple\":\n price = 1.25\n elif fruit == \"orange\":\n price = 0.90\n elif fruit == \"grapefruit\":\n price = 1.60\n elif fruit == \"kiwi\":\n price = 3\n elif fruit == \"pineapple\":\n price = 5.60\n elif fruit == \"grapes\":\n price = 4.20\n\nif is_fruit_valid and is_day_valid:\n end_sum = quantity * price\n print(f\"{end_sum:.2f}\")\nelse:\n print(f\"error\")\n","repo_name":"4um3n/SoftUni-Courses","sub_path":"Programing-Basics-With-Python/0-3-Conditional-Statements-Advanced/Lab/fruit_shop.py","file_name":"fruit_shop.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"74137386807","text":"from controllers.controller import CustomGroupController, CustomUserController, CRMController\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\nimport json\n\n\ndef lire_configuration():\n with open('config.json', 'r') as fichier_config:\n config = json.load(fichier_config)\n result = division_by_zero = 1 / 0\n return config\n\n\nconfig = lire_configuration()\ndsn_sentry = config.get('dsn')\n\n# Initialisation de Sentry\nsentry_sdk.init(\n dsn=dsn_sentry,\n integrations=[FlaskIntegration()],\n traces_sample_rate=1.0\n)\n\n\ndef main():\n # Instanciez les contrôleurs\n crm_controller = CRMController()\n custom_group_controller = CustomGroupController()\n custom_user_controller = CustomUserController()\n # result = division_by_zero = 1 / 0\n while True:\n print(\"\\n================================================\")\n print(\" Bienvenue dans notre application CRM\")\n print(\"\\n================================================\")\n print(\"1. Gérer les clients\")\n print(\"2. Gérer les contrats\")\n print(\"3. Gérer les événements\")\n print(\"4. Gérer les groupes personnalisés\")\n print(\"5. Gérer les utilisateurs personnalisés\")\n print(\"6. Quitter\")\n\n choix = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix == \"1\":\n # Menu de gestion des clients\n while True:\n try:\n print(\"1. Ajouter un client\")\n print(\"2. Afficher tous les clients\")\n print(\"3. Mettre à jour l'email d'un client\")\n print(\"4. Supprimer un client\")\n print(\"5. Retour\")\n\n choix_client = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix_client == \"1\":\n nom_complet = input(\"Nom complet du client : \")\n email = input(\"Email du client : \")\n telephone = input(\"Téléphone du client : \")\n nom_entreprise = input(\"Nom de l'entreprise : \")\n contact_commercial = input(\"Nom du contact commercial : \")\n crm_controller.add_client(nom_complet, email, telephone, nom_entreprise, contact_commercial)\n elif choix_client == \"2\":\n clients = crm_controller.get_all_clients()\n for client in clients:\n print(f\"ID: {client.id}, Nom: {client.nom_complet}, Email: {client.email}\")\n elif choix_client == \"3\":\n client_id = int(input(\"ID du client : \"))\n new_email = input(\"Nouvel email : \")\n crm_controller.update_client_email(client_id, new_email)\n elif choix_client == \"4\":\n client_id = int(input(\"ID du client : \"))\n crm_controller.delete_client(client_id)\n elif choix_client == \"5\":\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n except Exception as e:\n sentry_sdk.capture_exception(e)\n print(\"Une erreur s'est produite. Veuillez réessayer.\")\n\n elif choix == \"2\":\n # Menu de gestion des contrats\n while True:\n try:\n print(\"1. Ajouter un contrat\")\n print(\"2. Afficher tous les contrats\")\n print(\"3. Retour\")\n\n choix_contrat = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix_contrat == \"1\":\n client_id = int(input(\"ID du client associé au contrat : \"))\n contact_commercial = input(\"Nom du contact commercial : \")\n montant_total = float(input(\"Montant total du contrat : \"))\n montant_restant = float(input(\"Montant restant à payer : \"))\n statut_contrat = int(input(\"Statut du contrat (1 pour actif, 0 pour inactif) : \"))\n crm_controller.add_contract(client_id, contact_commercial,\n montant_total, montant_restant,\n statut_contrat\n )\n elif choix_contrat == \"2\":\n contracts = crm_controller.get_all_contracts()\n for contract in contracts:\n print(f\"ID: {contract.identifiant_unique},\"\n f\" Client ID: {contract.client_id},\"\n f\" Montant Total: {contract.montant_total},\"\n f\" Statut: {contract.statut_contrat}\")\n elif choix_contrat == \"3\":\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n except Exception as e:\n sentry_sdk.capture_exception(e)\n print(\"Une erreur s'est produite. Veuillez réessayer.\")\n\n elif choix == \"3\":\n # Menu de gestion des événements\n while True:\n try:\n print(\"1. Ajouter un événement\")\n print(\"2. Afficher les événements d'un contrat\")\n print(\"3. Retour\")\n\n choix_evenement = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix_evenement == \"1\":\n contract_id = int(input(\"ID du contrat associé à l'événement : \"))\n client_name = input(\"Nom du client : \")\n client_contact = input(\"Contact du client : \")\n event_date_start = input(\"Date de début de l'événement (YYYY-MM-DD HH:MM:SS) : \")\n event_date_end = input(\"Date de fin de l'événement (YYYY-MM-DD HH:MM:SS) : \")\n support_contact = input(\"Nom du contact de support : \")\n location = input(\"Lieu de l'événement : \")\n attendees = int(input(\"Nombre de participants : \"))\n notes = input(\"Notes sur l'événement : \")\n crm_controller.add_event(contract_id, client_name, client_contact,\n event_date_start, event_date_end,\n support_contact, location, attendees, notes\n )\n elif choix_evenement == \"2\":\n contract_id = int(input(\"ID du contrat : \"))\n events = crm_controller.get_events_by_contract(contract_id)\n for event in events:\n print(f\"ID: {event.event_id},\"\n f\" Client: {event.client_name},\"\n f\" Début: {event.event_date_start},\"\n f\" Fin: {event.event_date_end},\"\n f\" Lieu: {event.location}\")\n elif choix_evenement == \"3\":\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n except Exception as e:\n sentry_sdk.capture_exception(e)\n print(\"Une erreur s'est produite. Veuillez réessayer.\")\n\n elif choix == \"4\":\n # Menu de gestion des groupes personnalisés\n while True:\n try:\n print(\"1. Ajouter un groupe personnalisé\")\n print(\"2. Afficher tous les groupes personnalisés\")\n print(\"3. Supprimer un groupe personnalisé\")\n print(\"4. Retour\")\n\n choix_groupe = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix_groupe == \"1\":\n nom_groupe = input(\"Nom du groupe personnalisé : \")\n custom_group_controller.add_custom_group(nom_groupe)\n elif choix_groupe == \"2\":\n groupes = custom_group_controller.get_all_custom_groups()\n for groupe in groupes:\n print(f\"ID: {groupe.id}, Nom: {groupe.name}\")\n elif choix_groupe == \"3\":\n groupe_id = int(input(\"ID du groupe personnalisé : \"))\n custom_group_controller.delete_custom_group(groupe_id)\n elif choix_groupe == \"4\":\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n except Exception as e:\n sentry_sdk.capture_exception(e)\n print(\"Une erreur s'est produite. Veuillez réessayer.\")\n\n elif choix == \"5\":\n # Menu de gestion des utilisateurs personnalisés\n while True:\n try:\n print(\"1. Ajouter un utilisateur personnalisé\")\n print(\"2. Afficher tous les utilisateurs personnalisés\")\n print(\"3. Supprimer un utilisateur personnalisé\")\n print(\"4. Retour\")\n\n choix_utilisateur = input(\"Veuillez entrer le numéro de l'option que vous souhaitez choisir : \")\n\n if choix_utilisateur == \"1\":\n username = input(\"Nom d'utilisateur : \")\n first_name = input(\"Prénom : \")\n last_name = input(\"Nom : \")\n email = input(\"Adresse e-mail : \")\n password = input(\"Mot de passe : \")\n is_staff = int(input(\"Est un membre du personnel (1 pour vrai, 0 pour faux) : \"))\n is_active = int(input(\"Est actif (1 pour vrai, 0 pour faux) : \"))\n department = input(\"Département : \")\n custom_user_controller.add_custom_user(username, first_name, last_name, email, password,\n is_staff, is_active, department)\n elif choix_utilisateur == \"2\":\n utilisateurs = custom_user_controller.get_all_custom_users()\n for utilisateur in utilisateurs:\n print(\n f\"ID: {utilisateur.id},\"\n f\" Nom d'utilisateur: {utilisateur.username},\"\n f\" Email: {utilisateur.email}\")\n elif choix_utilisateur == \"3\":\n utilisateur_id = int(input(\"ID de l'utilisateur personnalisé : \"))\n custom_user_controller.delete_custom_user(utilisateur_id)\n elif choix_utilisateur == \"4\":\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n except Exception as e:\n sentry_sdk.capture_exception(e)\n print(\"Une erreur s'est produite. Veuillez réessayer.\")\n\n elif choix == \"6\":\n print(\"Merci d'avoir utilisé votre CRM. Au revoir !\")\n break\n else:\n print(\"Option invalide. Veuillez choisir une option valide.\")\n\n\n# Générez une erreur délibérée pour tester l'intégration avec Sentry\ndef generate_error():\n result = 1 / 0 # Cela générera une division par zéro exception\n return result\n\n\n# Initialisation de Sentry avec votre DSN\ndef initialiser_sentry(dsn):\n sentry_sdk.init(dsn=dsn, integrations=[FlaskIntegration()])\n\n\n# Lire la configuration et obtenir le DSN\nconfig = lire_configuration()\ndsn_sentry = config.get('dsn')\n\n# Initialiser Sentry avec le DSN\ninitialiser_sentry(dsn_sentry)\n\nif __name__ == \"__main__\":\n main()\n\n # Appelez la fonction generate_error() à la fin du script pour générer une erreur délibérée\n generate_error()\n","repo_name":"EmeryKroquet/OC_P12-EpicEvents","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12534,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"45298961814","text":"# accepting positional arguments can make a function call more clear and remove visual noise. These are sometimes called *args.\n# for example, say you want to log some debug information. with a fixed number of arguments, you would need a function that takes a\n# message and a list of values\n\ndef log(message, values):\n if not values:\n print(message)\n else:\n values_str = ' , '.join(str(x) for x in values)\n print('%s: %s' % (message, values_str))\n\nlog('My numbers are', [1,2])\nlog('Hi there', [])\n\n# but here we have to pass an empty list when there are no values. would be better to leave out the second argument entirely.\n# This can be done in python by prefixing the last positional parameter name with *. The first parameter for the log message is\n# required, whereas any number of subsequent positional arguments are optional. The function body doesnt need to change, only the call\n\ndef log2(message, *values):\n if not values:\n print(message)\n else:\n values_str = ' , '.join(str(x) for x in values)\n print('%s: %s' % (message, values_str))\n\n\nlog2('My numbers are', [1, 2])\nlog2('Hi there')\n\n#if you already have a list and want to call a variable argument function like log, you can do this by useing the * operator:\n# this instructs python to pass items from the sequence as positional arguments\n\nfavorites = [7, 33, 99]\nlog2('favorite colours', *favorites)\n\n#two problems with accepting a variable number of positional arguments:\n\n#first is that the variable arguments are always turned into a tuple before they are passed to your function. This means that\n# if the caller of your function uses the * operator on a generator, it will be iterated until its exhausted. The resulting\n# tuple will include every value from the generator, which could consime a lot of memory and cause program to crash.\n\ndef my_generator():\n for i in range(10):\n yield i\n\ndef my_func(*args):\n print(args)\n\nit = my_generator()\nmy_func(*it)\n\n#functions that accept *args are best for situations where you know the number of inputs in the argument list will be reasonably small.\n# its ideal for function calls that pass many literals or variable name together. It's primarily for the convenience of the programmer\n# and the reliability of the code.\n\n# the second issue with *args is that you cant add new positional arguments to your function in the future without migrating every caller.\n# If you try to add a positional argument in the front of the argument list, existing callers will subtly break if they arent updated.\n\ndef log3(sequence, message, *values):\n if not values:\n print('%s: %s' % (sequence, message))\n else:\n values_str = ', '.join(str(x) for x in values)\n print('%s: %s: %s' % (sequence, message, values_str))\n\nlog3(1, 'favorites', 7, 33)\nlog3('Favorite numbers', 7, 33)\n\n#the problem here is that the second call to log used 7 as the message parameter because a sequence qrgument wasnt given.\n# bugs like thisare hard to track down because the code still runs without raising exceptions\n","repo_name":"micullen/learn","sub_path":"59/18_posargs.py","file_name":"18_posargs.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6703748278","text":"import random\nimport string\nimport time\nfrom contextlib import contextmanager\n\nimport redis\n\n\nclass RedisClient(redis.StrictRedis):\n \"\"\" RedisClient: a wrapper around redis.StrictRedis that adds:\n - connects to port 63179 instead of 6379\n - sets decode_responses to be True so all replies from redis are translated from bytes to utf-8\n - lock/unlock functionality\n - delete keys by pattern\n \"\"\"\n decode_responses=True\n charset='utf-8'\n random_str_choices = string.ascii_uppercase + string.ascii_lowercase + string.digits\n unlock_lua_code = \"\"\"if redis.call('get', KEYS[1]) == KEYS[2]\n then\n return redis.call('del', KEYS[1])\n else\n return 0\n end\"\"\"\n unlock_lua_script = None\n del_keys_by_pattern_lua_code = \"\"\" local key_list = redis.call('KEYS', KEYS[1])\n if #key_list == 0 then\n return 0\n else\n return redis.call('DEL', unpack(key_list))\n end\"\"\"\n del_keys_by_pattern_lua_script = None\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n super().__init__(port=self.port, host=self.host, decode_responses=self.decode_responses, charset=self.charset)\n if self.__class__.unlock_lua_script is None:\n self.__class__.unlock_lua_script = self.register_script(self.unlock_lua_code)\n if self.__class__.del_keys_by_pattern_lua_script is None:\n self.__class__.del_keys_by_pattern_lua_script = self.register_script(self.del_keys_by_pattern_lua_code)\n\n @classmethod\n def del_by_pattern(cls, redis_obj, keys_pattern):\n \"\"\" del_by_pattern deletes keys according to a pattern such as wv:wle:device:*\n del_by_pattern is implemented with a lua script that is passed to the server\n when RedisClient is initialized.\n del_by_pattern is implemented as a class method so it can be used by both\n RedisClient or a pipeline object.\n Usage: redisClient.RedisClient.del_by_pattern(redis_instance, \"100*\")\n \"\"\"\n cls.del_keys_by_pattern_lua_script(keys=(keys_pattern,), client=redis_obj)\n\n @contextmanager\n def transaction_pipeline(self):\n pl = self.pipeline()\n yield pl\n pl.execute()\n\n @contextmanager\n def lock_redis(self, lock_name, expire_seconds=5, re_trys=1):\n \"\"\" lock redis raise exception if failed \"\"\"\n lock_value = ''.join(random.choice(self.random_str_choices) for i in range(64))\n for re_try in range(re_trys):\n locked = self.set(lock_name, lock_value, nx=True) # , ex=expire_seconds\n if locked:\n yield locked\n self.unlock_lua_script(keys=(lock_name, lock_value))\n break\n else:\n time.sleep(1)\n else:\n raise TimeoutError(\"Unable to lock redis, lock name=\"+lock_name)\n\n @contextmanager\n def try_lock_redis(self, lock_name, expire_seconds=5, re_trys=1):\n \"\"\" lock redis yield False if failed \"\"\"\n try:\n with self.lock_redis(lock_name, expire_seconds=expire_seconds, re_trys=re_trys):\n yield True\n except TimeoutError:\n yield False\n\n def force_remove_lock(self, lock_name):\n retVal = self.delete(lock_name)\n print(\"force removed lock\", lock_name, \"retVal:\", retVal)\n return retVal\n\n # this function is listed as part of the redis.StrictRedis API but is not implemented\n # so here is a copy\n def pubsub_channels(self, pattern='*'):\n \"\"\"\n Return a list of channels that have at least one subscriber\n \"\"\"\n return self.execute_command('PUBSUB CHANNELS', pattern)\n","repo_name":"wavesaudio/instl","sub_path":"utils/redisClient.py","file_name":"redisClient.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"33902706815","text":"# var = open(, )\n# keys:\n# r - open for reading (default)\n# w - open for re-writing, if not exists - open new\n# x - open for writing, only when file is not exist\n# a - open for writing in the end\n# b - open in binary\n# t - open in text (default)\n# + - open for reading and writing\n\n# example\n# f_w = open(\"course_python_1/python_test_file.txt\", \"w\")\n# f_w.write(\"Hello_World\\n\")\n# f_w.close()\n#\n# f_r = open(\"course_python_1/python_test_file.txt\")\n# print(f_r.read())\n# f_r.close()\n\n# fill e-mails in task_file\n\ndef is_telephone_number(input_string):\n s = str(input_string)\n try:\n if len(s) == 7:\n for ch in s:\n if not (int(ch) >= 0) & (int(ch) <= 9):\n return False\n else:\n return False\n except Exception as ex:\n print(\"WARN: skip, caught Exception while checking telephone number: \", end=\"\")\n print(ex)\n return False\n else:\n return True\n\n\ndef email_gen(list_of_names):\n emails = []\n for i in list_of_names:\n letter = 1\n while i[1] + '.' + i[0][0:letter] + '@company.io' in emails:\n letter += 1\n generated_email = i[1] + '.' + i[0][0:letter] + '@company.io'\n emails.append(generated_email)\n i.append(generated_email)\n return list_of_names\n\n\ndef fill_list_of_lines_from_file(file_name):\n '''\n fill list_of_lines from file with removing extra chars\n '''\n file = open(file_name, \"r\")\n list_of_lines_from_file = []\n for line in file:\n line = line.replace(\" \", \"\")\n line = line.replace(\"\\n\", \"\")\n list_of_lines_from_file.append(line.split(\",\"))\n file.close()\n return list_of_lines_from_file\n\n\ndef enrich_list_of_lines(list_of_lines, list_of_names_and_surnames_with_emails):\n for line in list_of_lines:\n for sub_line in list_of_names_and_surnames_with_emails:\n if (line[1] == sub_line[0]) & (line[2] == sub_line[1]) & (sub_line[2] != \"used\"):\n line[0] = sub_line[2]\n sub_line[2] = \"used\"\n break\n return list_of_lines\n\n\ndef create_filled_list_with_emails(filename):\n list_of_lines = fill_list_of_lines_from_file(filename)\n list_of_names_and_surnames = []\n for line in list_of_lines:\n name_and_surname = []\n if (len(line[1]) == 0) | (len(line[2]) == 0) | (not is_telephone_number(line[3])):\n continue\n name_and_surname.append(line[1])\n name_and_surname.append(line[2])\n list_of_names_and_surnames.append(name_and_surname)\n\n list_of_names_and_surnames_with_emails = email_gen(list_of_names_and_surnames)\n return enrich_list_of_lines(list_of_lines, list_of_names_and_surnames_with_emails)\n\n\ndef create_new_file_with_emails(original_file, filled_file):\n result_list_with_emails = create_filled_list_with_emails(original_file)\n result_file_object = open(filled_file, \"w\")\n\n for line in result_list_with_emails:\n i = 0\n for element in line:\n if i != len(line) - 1:\n result_file_object.write(element + \", \")\n else:\n result_file_object.write(element + \"\\n\")\n i += 1\n\n result_file_object.close()\n\n\ntask_file = \"course_python_1/files_task_file.txt\"\nresult_file = \"course_python_1/files_task_file_result.csv\"\ncreate_new_file_with_emails(task_file, result_file)\n","repo_name":"Karavaevich/python","sub_path":"course_python_1/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"377401017","text":"import csv\nimport sys\n\nbegin_time = {}\nend_time = {}\nwith open(sys.argv[1]) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=' ')\n previous_token = ''\n previous_crowdsale = ''\n begin = 0\n end = 0\n begin_time = {}\n end_time = {}\n for row in csv_reader:\n if previous_token == '':\n previous_token = row[0]\n previous_crowdsale = row[1]\n begin = int(row[2])\n end = int(row[3])\n continue\n if row[0] == previous_token and row[1] == previous_crowdsale:\n if int(row[3]) > end:\n end = int(row[3])\n if int(row[2]) < begin:\n begin = int(row[2])\n else:\n key = previous_token+previous_crowdsale\n begin_time[key] = begin\n end_time[key] = end\n previous_token = row[0]\n previous_crowdsale = row[1]\n begin = int(row[2])\n end = int(row[3])\n key=previous_token+previous_crowdsale\n begin_time[key]=begin\n end_time[key] = end\n\nwith open(sys.argv[2]) as csv_file1:\n csv_reader1 = csv.reader(csv_file1, delimiter=' ')\n file1 = open(\"step1_airdropper.csv\", \"w+\")\n file2 = open(\"find_the_ico_senders.csv\", \"w+\")\n for row in csv_reader1:\n key = row[0]+row[1]\n if key in begin_time:\n coin_begin = begin_time[key]\n coin_end = end_time[key]\n if int(row[6]) < coin_begin or int(row[5]) > coin_end:\n file1.write(\"{0} {1} {2} {3} {4} {5} {6}\\n\".format(row[0], row[1], row[2], row[3], row[4], row[5], row[6]))\n else:\n file2.write(\"{0} {1} {2} {3} {4} {5}\\n\".format(row[0], row[1], row[5], row[6], coin_begin, coin_end))\n else:\n file1.write(\"{0} {1} {2} {3} {4} {5} {6}\\n\".format(row[0], row[1], row[2], row[3], row[4], row[5], row[6]))\n","repo_name":"Airdrop-Study-MSR-2020/ETH-airdrop-study","sub_path":"proactive_airdrop/remove_ico_account_from_airdropper.py","file_name":"remove_ico_account_from_airdropper.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3312761182","text":"from machine import ADC, Pin\nimport utime\n\n# use variables instead of numbers:\nwater = ADC(Pin(28)) # water PIN reference 3.3V\n\n#Calibraton values\nmin_waterheight=19200\nmax_waterheight=49300\n\nreadDelay = 0.5\n\nwhile True:\n# read moisture value and convert to percentage into the calibration range\n #\n waterlevel = (max_waterheight-water.read_u16())*100/(max_waterheight-min_waterheight)\n # x=str(waterlevel)\n #\n print(\"water: \" + \"%.2f\" % waterlevel +\"% (adc: \"+str(water.read_u16())+\")\")\n utime.sleep(readDelay)\n \n \n \n ","repo_name":"munin1010/AIOT-","sub_path":"4 降雨量感測器/沒用/waterlevel0801.py","file_name":"waterlevel0801.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9636130803","text":"import logging\nimport math\nfrom typing import List, Optional, Tuple\n\nimport casadi as ca\n\nimport numpy as np\n\nfrom rtctools.optimization.collocated_integrated_optimization_problem import (\n CollocatedIntegratedOptimizationProblem,\n)\nfrom rtctools.optimization.timeseries import Timeseries\n\nfrom rtctools_heat_network._heat_loss_u_values_pipe import heat_loss_u_values_pipe\nfrom rtctools_heat_network.control_variables import map_comp_type_to_control_variable\n\nfrom .base_component_type_mixin import BaseComponentTypeMixin\nfrom .head_loss_mixin import HeadLossOption, _HeadLossMixin\nfrom .pipe_class import PipeClass\n\n\nlogger = logging.getLogger(\"rtctools_heat_network\")\n\n\nclass HeatMixin(_HeadLossMixin, BaseComponentTypeMixin, CollocatedIntegratedOptimizationProblem):\n __allowed_head_loss_options = {\n HeadLossOption.NO_HEADLOSS,\n HeadLossOption.LINEAR,\n HeadLossOption.LINEARIZED_DW,\n }\n\n def __init__(self, *args, **kwargs):\n # Prepare dicts for additional variables\n self.__flow_direct_var = {}\n self.__flow_direct_bounds = {}\n self.__pipe_to_flow_direct_map = {}\n\n self.__pipe_disconnect_var = {}\n self.__pipe_disconnect_var_bounds = {}\n self.__pipe_disconnect_map = {}\n\n self.__check_valve_status_var = {}\n self.__check_valve_status_var_bounds = {}\n self.__check_valve_status_map = {}\n\n self.__control_valve_direction_var = {}\n self.__control_valve_direction_var_bounds = {}\n self.__control_valve_direction_map = {}\n\n self.__buffer_t0_bounds = {}\n\n self.__pipe_topo_diameter_var = {}\n self.__pipe_topo_diameter_var_bounds = {}\n self.__pipe_topo_diameter_map = {}\n self.__pipe_topo_diameter_nominals = {}\n\n self.__pipe_topo_cost_var = {}\n self.__pipe_topo_cost_var_bounds = {}\n self.__pipe_topo_cost_map = {}\n self.__pipe_topo_cost_nominals = {}\n\n self.__pipe_topo_heat_loss_var = {}\n self.__pipe_topo_heat_loss_var_bounds = {}\n self.__pipe_topo_heat_loss_map = {}\n self.__pipe_topo_heat_loss_nominals = {}\n self.__pipe_topo_heat_losses = {}\n\n self.__pipe_topo_pipe_class_var = {}\n self.__pipe_topo_pipe_class_var_bounds = {}\n self.__pipe_topo_pipe_class_map = {}\n self.__pipe_topo_pipe_class_result = {}\n\n self.__pipe_topo_heat_discharge_bounds = {}\n\n self.__pipe_topo_diameter_area_parameters = []\n self.__pipe_topo_heat_loss_parameters = []\n\n # Setpoint vars\n self._timed_setpoints = {}\n self._change_setpoint_var = {}\n self._change_setpoint_bounds = {}\n self._component_to_change_setpoint_map = {}\n\n if \"timed_setpoints\" in kwargs and isinstance(kwargs[\"timed_setpoints\"], dict):\n self._timed_setpoints = kwargs[\"timed_setpoints\"]\n\n super().__init__(*args, **kwargs)\n\n def pre(self):\n super().pre()\n\n options = self.heat_network_options()\n parameters = self.parameters(0)\n\n def _get_max_bound(bound):\n if isinstance(bound, np.ndarray):\n return max(bound)\n elif isinstance(bound, Timeseries):\n return max(bound.values)\n else:\n return bound\n\n def _get_min_bound(bound):\n if isinstance(bound, np.ndarray):\n return min(bound)\n elif isinstance(bound, Timeseries):\n return min(bound.values)\n else:\n return bound\n\n bounds = self.bounds()\n\n # Mixed-interger formulation of component setpoint\n for component_name in self._timed_setpoints.keys():\n # Make 1 variable per component (so not per control\n # variable) which represents if the setpoint of the component\n # is changed (1) is not changed (0) in a timestep\n change_setpoint_var = f\"{component_name}._change_setpoint_var\"\n self._component_to_change_setpoint_map[component_name] = change_setpoint_var\n self._change_setpoint_var[change_setpoint_var] = ca.MX.sym(change_setpoint_var)\n self._change_setpoint_bounds[change_setpoint_var] = (0, 1.0)\n\n # Mixed-integer formulation applies only to hot pipes, not to cold\n # pipes.\n for p in self.hot_pipes:\n flow_dir_var = f\"{p}__flow_direct_var\"\n\n self.__pipe_to_flow_direct_map[p] = flow_dir_var\n self.__flow_direct_var[flow_dir_var] = ca.MX.sym(flow_dir_var)\n\n # Fix the directions that are already implied by the bounds on heat\n # Nonnegative heat implies that flow direction Boolean is equal to one.\n # Nonpositive heat implies that flow direction Boolean is equal to zero.\n\n heat_in_lb = _get_min_bound(bounds[f\"{p}.HeatIn.Heat\"][0])\n heat_in_ub = _get_max_bound(bounds[f\"{p}.HeatIn.Heat\"][1])\n heat_out_lb = _get_min_bound(bounds[f\"{p}.HeatOut.Heat\"][0])\n heat_out_ub = _get_max_bound(bounds[f\"{p}.HeatOut.Heat\"][1])\n\n if (heat_in_lb >= 0.0 and heat_in_ub >= 0.0) or (\n heat_out_lb >= 0.0 and heat_out_ub >= 0.0\n ):\n self.__flow_direct_bounds[flow_dir_var] = (1.0, 1.0)\n elif (heat_in_lb <= 0.0 and heat_in_ub <= 0.0) or (\n heat_out_lb <= 0.0 and heat_out_ub <= 0.0\n ):\n self.__flow_direct_bounds[flow_dir_var] = (0.0, 0.0)\n else:\n self.__flow_direct_bounds[flow_dir_var] = (0.0, 1.0)\n\n if parameters[f\"{p}.disconnectable\"]:\n disconnected_var = f\"{p}__is_disconnected\"\n\n self.__pipe_disconnect_map[p] = disconnected_var\n self.__pipe_disconnect_var[disconnected_var] = ca.MX.sym(disconnected_var)\n self.__pipe_disconnect_var_bounds[disconnected_var] = (0.0, 1.0)\n\n if heat_in_ub <= 0.0 and heat_out_lb >= 0.0:\n raise Exception(f\"Heat flow rate in/out of pipe '{p}' cannot be zero.\")\n\n for v in self.heat_network_components.get(\"check_valve\", []):\n status_var = f\"{v}__status_var\"\n\n self.__check_valve_status_map[v] = status_var\n self.__check_valve_status_var[status_var] = ca.MX.sym(status_var)\n self.__check_valve_status_var_bounds[status_var] = (0.0, 1.0)\n\n for v in self.heat_network_components.get(\"control_valve\", []):\n flow_dir_var = f\"{v}__flow_direct_var\"\n\n self.__control_valve_direction_map[v] = flow_dir_var\n self.__control_valve_direction_var[flow_dir_var] = ca.MX.sym(flow_dir_var)\n self.__control_valve_direction_var_bounds[flow_dir_var] = (0.0, 1.0)\n\n # Pipe topology variables\n\n # In case the user overrides the pipe class of the pipe with a single\n # pipe class we update the diameter/area parameters. If there is more\n # than a single pipe class for a certain pipe, we set the diameter\n # and area to NaN to prevent erroneous constraints.\n for _ in range(self.ensemble_size):\n self.__pipe_topo_diameter_area_parameters.append({})\n self.__pipe_topo_heat_loss_parameters.append({})\n\n for pipe in self.hot_pipes:\n cold_pipe = self.hot_to_cold_pipe(pipe)\n pipe_classes = self.pipe_classes(pipe)\n cold_pipe = self.hot_to_cold_pipe(pipe)\n\n if len([c for c in pipe_classes if c.inner_diameter == 0]) > 1:\n raise Exception(\n f\"Pipe {pipe} should not have more than one `diameter = 0` pipe class\"\n )\n\n # Note that we always make a diameter symbol, even if the diameter\n # is fixed. This can be convenient when playing around with\n # different pipe class options, and providing a uniform interface\n # to the user. Contrary to that, the pipe class booleans are very\n # much an internal affair.\n diam_var_name = f\"{pipe}__hn_diameter\"\n self.__pipe_topo_diameter_var[diam_var_name] = ca.MX.sym(diam_var_name)\n self.__pipe_topo_diameter_map[pipe] = diam_var_name\n\n cost_var_name = f\"{pipe}__hn_cost\"\n self.__pipe_topo_cost_var[cost_var_name] = ca.MX.sym(cost_var_name)\n self.__pipe_topo_cost_map[pipe] = cost_var_name\n\n if not pipe_classes:\n # No pipe class decision to make for this pipe w.r.t. diameter\n diameter = parameters[f\"{pipe}.diameter\"]\n self.__pipe_topo_diameter_var_bounds[diam_var_name] = (diameter, diameter)\n self.__pipe_topo_cost_var_bounds[cost_var_name] = (0.0, 0.0)\n if diameter > 0.0:\n self.__pipe_topo_diameter_nominals[diam_var_name] = diameter\n self.__pipe_topo_cost_nominals[cost_var_name] = 1.0\n elif len(pipe_classes) == 1:\n # No pipe class decision to make for this pipe w.r.t. diameter\n diameter = pipe_classes[0].inner_diameter\n self.__pipe_topo_diameter_var_bounds[diam_var_name] = (diameter, diameter)\n self.__pipe_topo_cost_var_bounds[cost_var_name] = (0.0, 0.0)\n if diameter > 0.0:\n self.__pipe_topo_diameter_nominals[diam_var_name] = diameter\n self.__pipe_topo_cost_nominals[cost_var_name] = 1.0\n\n for ensemble_member in range(self.ensemble_size):\n d = self.__pipe_topo_diameter_area_parameters[ensemble_member]\n\n for p in [pipe, cold_pipe]:\n d[f\"{p}.diameter\"] = diameter\n d[f\"{p}.area\"] = pipe_classes[0].area\n else:\n diameters = [c.inner_diameter for c in pipe_classes]\n self.__pipe_topo_diameter_var_bounds[diam_var_name] = (\n min(diameters),\n max(diameters),\n )\n costs = [c.investment_costs for c in pipe_classes]\n self.__pipe_topo_cost_var_bounds[cost_var_name] = (\n min(costs),\n max(costs),\n )\n self.__pipe_topo_cost_nominals[cost_var_name] = min(x for x in costs if x > 0.0)\n\n self.__pipe_topo_diameter_nominals[diam_var_name] = min(\n x for x in diameters if x > 0.0\n )\n\n for ensemble_member in range(self.ensemble_size):\n d = self.__pipe_topo_diameter_area_parameters[ensemble_member]\n\n for p in [pipe, cold_pipe]:\n d[f\"{p}.diameter\"] = np.nan\n d[f\"{p}.area\"] = np.nan\n\n # For similar reasons as for the diameter, we always make a heat\n # loss symbol, even if the heat loss is fixed. Note that we also\n # override the .Heat_loss parameter for cold pipes, even though\n # it is not actually used in the optimization problem.\n heat_loss_var_name = f\"{pipe}__hn_heat_loss\"\n self.__pipe_topo_heat_loss_var[heat_loss_var_name] = ca.MX.sym(heat_loss_var_name)\n self.__pipe_topo_heat_loss_map[pipe] = heat_loss_var_name\n\n heat_losses = [\n self.__pipe_heat_loss(options, parameters, pipe, c.u_values) for c in pipe_classes\n ]\n\n if not pipe_classes or options[\"neglect_pipe_heat_losses\"]:\n # No pipe class decision to make for this pipe w.r.t. heat loss\n heat_loss = self.__pipe_heat_loss(options, parameters, pipe)\n self.__pipe_topo_heat_loss_var_bounds[heat_loss_var_name] = (heat_loss, heat_loss)\n if heat_loss > 0:\n self.__pipe_topo_heat_loss_nominals[heat_loss_var_name] = heat_loss\n\n for ensemble_member in range(self.ensemble_size):\n h = self.__pipe_topo_heat_loss_parameters[ensemble_member]\n for p in [pipe, cold_pipe]:\n h[f\"{p}.Heat_loss\"] = self.__pipe_heat_loss(options, parameters, p)\n elif len(pipe_classes) == 1:\n # No pipe class decision to make for this pipe w.r.t. heat loss\n u_values = pipe_classes[0].u_values\n heat_loss = self.__pipe_heat_loss(options, parameters, pipe, u_values)\n\n self.__pipe_topo_heat_loss_var_bounds[heat_loss_var_name] = (heat_loss, heat_loss)\n if heat_loss > 0:\n self.__pipe_topo_heat_loss_nominals[heat_loss_var_name] = heat_loss\n\n for ensemble_member in range(self.ensemble_size):\n h = self.__pipe_topo_heat_loss_parameters[ensemble_member]\n h[f\"{pipe}.Heat_loss\"] = heat_loss\n h[f\"{cold_pipe}.Heat_loss\"] = self.__pipe_heat_loss(\n options, parameters, cold_pipe, u_values\n )\n else:\n self.__pipe_topo_heat_losses[pipe] = heat_losses\n self.__pipe_topo_heat_loss_var_bounds[heat_loss_var_name] = (\n min(heat_losses),\n max(heat_losses),\n )\n self.__pipe_topo_heat_loss_nominals[heat_loss_var_name] = min(\n x for x in heat_losses if x > 0\n )\n\n for ensemble_member in range(self.ensemble_size):\n h = self.__pipe_topo_heat_loss_parameters[ensemble_member]\n h[f\"{pipe}.Heat_loss\"] = np.nan\n h[f\"{cold_pipe}.Heat_loss\"] = np.nan\n\n # Pipe class variables.\n if not pipe_classes or len(pipe_classes) == 1:\n # No pipe class decision to make for this pipe\n pass\n else:\n self.__pipe_topo_pipe_class_map[pipe] = {}\n\n for c in pipe_classes:\n pipe_class_var_name = f\"{pipe}__hn_pipe_class_{c.name}\"\n\n self.__pipe_topo_pipe_class_map[pipe][c] = pipe_class_var_name\n self.__pipe_topo_pipe_class_var[pipe_class_var_name] = ca.MX.sym(\n pipe_class_var_name\n )\n self.__pipe_topo_pipe_class_var_bounds[pipe_class_var_name] = (0.0, 1.0)\n\n # Update the bounds of the pipes that will have their diameter\n # optimized. Note that the flow direction may have already been fixed\n # based on the original bounds, if that was desired. We can therefore\n # naively override the bounds without taking this into account.\n for pipe in self.__pipe_topo_pipe_class_map:\n pipe_classes = self.__pipe_topo_pipe_class_map[pipe]\n max_discharge = max(c.maximum_discharge for c in pipe_classes)\n\n self.__pipe_topo_heat_discharge_bounds[f\"{pipe}.Q\"] = (-max_discharge, max_discharge)\n self.__pipe_topo_heat_discharge_bounds[f\"{self.hot_to_cold_pipe(pipe)}.Q\"] = (\n -max_discharge,\n max_discharge,\n )\n\n # Heat on cold side is zero, so no change needed\n cp = parameters[f\"{pipe}.cp\"]\n rho = parameters[f\"{pipe}.rho\"]\n dt = parameters[f\"{pipe}.dT\"]\n\n max_heat = cp * rho * dt * max_discharge\n\n self.__pipe_topo_heat_discharge_bounds[f\"{pipe}.Heat_in\"] = (-max_heat, max_heat)\n self.__pipe_topo_heat_discharge_bounds[f\"{pipe}.Heat_out\"] = (-max_heat, max_heat)\n\n # Note that all entries in self.__pipe_topo_heat_losses are guaranteed\n # to be in self.__pipe_topo_pipe_class_map, but not vice versa. If\n # e.g. all diameters have a heat loss of zero, we don't have any\n # decision to make w.r.t heat loss.\n for p in self.__pipe_topo_heat_losses:\n assert p in self.__pipe_topo_pipe_class_map\n\n # When optimizing for pipe size, we do not yet support all options\n if self.__pipe_topo_pipe_class_map:\n if options[\"minimum_velocity\"] > 0.0:\n raise Exception(\n \"When optimizing pipe diameters, \"\n \"the `maximum_velocity` option should be set to zero.\"\n )\n\n if np.isfinite(options[\"maximum_temperature_der\"]) and np.isfinite(\n options[\"maximum_flow_der\"]\n ):\n raise Exception(\n \"When optimizing pipe diameters, \"\n \"the `maximum_temperature_der` or `maximum_flow_der` should be infinite.\"\n )\n\n # Check that buffer information is logical and\n # set the stored heat at t0 in the buffer(s) via bounds\n if len(self.times()) > 2:\n self.__check_buffer_values_and_set_bounds_at_t0()\n\n self.__maximum_total_head_loss = self.__get_maximum_total_head_loss()\n\n def heat_network_options(self):\n r\"\"\"\n Returns a dictionary of heat network specific options.\n\n +--------------------------------------+-----------+-----------------------------+\n | Option | Type | Default value |\n +======================================+===========+=============================+\n | ``minimum_pressure_far_point`` | ``float`` | ``1.0`` bar |\n +--------------------------------------+-----------+-----------------------------+\n | ``maximum_temperature_der`` | ``float`` | ``2.0`` °C/hour |\n +--------------------------------------+-----------+-----------------------------+\n | ``maximum_flow_der`` | ``float`` | ``np.inf`` m3/s/hour |\n +--------------------------------------+-----------+-----------------------------+\n | ``neglect_pipe_heat_losses`` | ``bool`` | ``False`` |\n +--------------------------------------+-----------+-----------------------------+\n | ``heat_loss_disconnected_pipe`` | ``bool`` | ``True`` |\n +--------------------------------------+-----------+-----------------------------+\n | ``minimum_velocity`` | ``float`` | ``0.005`` m/s |\n +--------------------------------------+-----------+-----------------------------+\n | ``head_loss_option`` (inherited) | ``enum`` | ``HeadLossOption.LINEAR`` |\n +--------------------------------------+-----------+-----------------------------+\n | ``minimize_head_losses`` (inherited) | ``bool`` | ``False`` |\n +--------------------------------------+-----------+-----------------------------+\n\n The ``maximum_temperature_der`` gives the maximum temperature change\n per hour. Similarly, the ``maximum_flow_der`` parameter gives the\n maximum flow change per hour. These options together are used to\n constrain the maximum heat change per hour allowed in the entire\n network. Note the unit for flow is m3/s, but the change is expressed\n on an hourly basis leading to the ``m3/s/hour`` unit.\n\n The ``heat_loss_disconnected_pipe`` option decides whether a\n disconnectable pipe has heat loss or not when it is disconnected on\n that particular time step. By default, a pipe has heat loss even if\n it is disconnected, as it would still contain relatively hot water in\n reality. We also do not want minimization of heat production to lead\n to overly disconnecting pipes. In some scenarios it is hydraulically\n impossible to supply heat to these disconnected pipes (Q is forced to\n zero), in which case this option can be set to ``False``.\n\n The ``neglect_pipe_heat_losses`` option sets the heat loss in pipes to\n zero. This can be useful when the insulation properties are unknown.\n Note that other components can still have heat loss, e.g. a buffer.\n\n The ``minimum_velocity`` is the minimum absolute value of the velocity\n in every pipe. It is mostly an option to improve the stability of the\n solver in a possibly subsequent QTH problem: the default value of\n `0.005` m/s helps the solver by avoiding the difficult case where\n discharges get close to zero.\n\n Note that the inherited options ``head_loss_option`` and\n ``minimize_head_losses`` are changed from their default values to\n ``HeadLossOption.LINEAR`` and ``False`` respectively.\n \"\"\"\n\n options = super().heat_network_options()\n\n options[\"minimum_pressure_far_point\"] = 1.0\n options[\"maximum_temperature_der\"] = 2.0\n options[\"maximum_flow_der\"] = np.inf\n options[\"neglect_pipe_heat_losses\"] = False\n options[\"heat_loss_disconnected_pipe\"] = True\n options[\"minimum_velocity\"] = 0.005\n options[\"head_loss_option\"] = HeadLossOption.LINEAR\n options[\"minimize_head_losses\"] = False\n\n return options\n\n def pipe_classes(self, pipe: str) -> List[PipeClass]:\n \"\"\"\n Note that this method is only queried for _hot_ pipes. Their\n respective cold pipes are assumed to have the exact same properties.\n\n If the returned List is:\n - empty: use the pipe properties from the model\n - len() == 1: use these pipe properties to overrule that of the model\n - len() > 1: decide between the pipe class options.\n\n A pipe class with diameter 0 is interpreted as there being _no_ pipe.\n \"\"\"\n return []\n\n def get_optimized_pipe_class(self, pipe: str) -> PipeClass:\n \"\"\"\n Return the optimized pipe class for a specific pipe. If no\n optimized pipe class is available (yet), a `KeyError` is returned.\n \"\"\"\n return self.__pipe_topo_pipe_class_result[pipe]\n\n def pipe_diameter_symbol_name(self, pipe: str) -> str:\n return self.__pipe_topo_diameter_map[pipe]\n\n def pipe_cost_symbol_name(self, pipe: str) -> str:\n return self.__pipe_topo_cost_map[pipe]\n\n @property\n def extra_variables(self):\n variables = super().extra_variables.copy()\n variables.extend(self.__pipe_topo_diameter_var.values())\n variables.extend(self.__pipe_topo_cost_var.values())\n variables.extend(self.__pipe_topo_heat_loss_var.values())\n variables.extend(self.__pipe_topo_pipe_class_var.values())\n return variables\n\n @property\n def path_variables(self):\n variables = super().path_variables.copy()\n variables.extend(self.__flow_direct_var.values())\n variables.extend(self.__pipe_disconnect_var.values())\n variables.extend(self.__check_valve_status_var.values())\n variables.extend(self.__control_valve_direction_var.values())\n variables.extend(self._change_setpoint_var.values())\n return variables\n\n def variable_is_discrete(self, variable):\n if (\n variable in self.__flow_direct_var\n or variable in self.__pipe_disconnect_var\n or variable in self.__check_valve_status_var\n or variable in self.__control_valve_direction_var\n or variable in self.__pipe_topo_pipe_class_var\n or variable in self._change_setpoint_var\n ):\n return True\n else:\n return super().variable_is_discrete(variable)\n\n def variable_nominal(self, variable):\n if variable in self.__pipe_topo_diameter_nominals:\n return self.__pipe_topo_diameter_nominals[variable]\n elif variable in self.__pipe_topo_heat_loss_nominals:\n return self.__pipe_topo_heat_loss_nominals[variable]\n elif variable in self.__pipe_topo_cost_nominals:\n return self.__pipe_topo_cost_nominals[variable]\n else:\n return super().variable_nominal(variable)\n\n def bounds(self):\n bounds = super().bounds()\n bounds.update(self.__flow_direct_bounds)\n bounds.update(self.__pipe_disconnect_var_bounds)\n bounds.update(self.__check_valve_status_var_bounds)\n bounds.update(self.__control_valve_direction_var_bounds)\n bounds.update(self.__buffer_t0_bounds)\n bounds.update(self.__pipe_topo_pipe_class_var_bounds)\n bounds.update(self.__pipe_topo_diameter_var_bounds)\n bounds.update(self.__pipe_topo_cost_var_bounds)\n bounds.update(self.__pipe_topo_heat_loss_var_bounds)\n bounds.update(self.__pipe_topo_heat_discharge_bounds)\n bounds.update(self._change_setpoint_bounds)\n return bounds\n\n def __pipe_heat_loss(\n self, options, parameters, p: str, u_values: Optional[Tuple[float, float]] = None\n ):\n \"\"\"\n The heat losses have three components:\n\n - dependency on the pipe temperature\n - dependency on the ground temperature\n - dependency on temperature difference between the supply/return line.\n\n This latter term assumes that the supply and return lines lie close\n to, and thus influence, each other. I.e., the supply line loses heat\n that is absorbed by the return line. Note that the term dtemp is\n positive when the pipe is in the supply line and negative otherwise.\n \"\"\"\n if options[\"neglect_pipe_heat_losses\"]:\n return 0.0\n\n if u_values is None:\n u_kwargs = {\n \"inner_diameter\": parameters[f\"{p}.diameter\"],\n \"insulation_thicknesses\": parameters[f\"{p}.insulation_thickness\"],\n \"conductivities_insulation\": parameters[f\"{p}.conductivity_insulation\"],\n \"conductivity_subsoil\": parameters[f\"{p}.conductivity_subsoil\"],\n \"depth\": parameters[f\"{p}.depth\"],\n \"h_surface\": parameters[f\"{p}.h_surface\"],\n \"pipe_distance\": parameters[f\"{p}.pipe_pair_distance\"],\n }\n\n # NaN values mean we use the function default\n u_kwargs = {k: v for k, v in u_kwargs.items() if not np.all(np.isnan(v))}\n u_1, u_2 = heat_loss_u_values_pipe(**u_kwargs)\n else:\n u_1, u_2 = u_values\n\n length = parameters[f\"{p}.length\"]\n temperature = parameters[f\"{p}.temperature\"]\n temperature_ground = parameters[f\"{p}.T_ground\"]\n sign_dtemp = 1 if self.is_hot_pipe(p) else -1\n dtemp = sign_dtemp * parameters[f\"{p}.dT\"]\n\n heat_loss = (\n length * (u_1 - u_2) * temperature\n - (length * (u_1 - u_2) * temperature_ground)\n + (length * u_2 * dtemp)\n )\n\n if heat_loss < 0:\n raise Exception(f\"Heat loss of pipe {p} should be nonnegative.\")\n\n return heat_loss\n\n def parameters(self, ensemble_member):\n parameters = super().parameters(ensemble_member)\n\n # To avoid mistakes by accidentally using the `diameter`, `area` and `Heat_loss`\n # parameters in e.g. constraints when those are variable, we set them\n # to NaN in that case. In post(), they are set to their resulting\n # values once again.\n if self.__pipe_topo_diameter_area_parameters:\n parameters.update(self.__pipe_topo_diameter_area_parameters[ensemble_member])\n if self.__pipe_topo_heat_loss_parameters:\n parameters.update(self.__pipe_topo_heat_loss_parameters[ensemble_member])\n\n return parameters\n\n def __get_maximum_total_head_loss(self):\n \"\"\"\n Get an upper bound on the maximum total head loss that can be used in\n big-M formulations of e.g. check valves and disconnectable pipes.\n\n There are multiple ways to calculate this upper bound, depending on\n what options are set. We compute all these upper bounds, and return\n the lowest one of them.\n \"\"\"\n\n options = self.heat_network_options()\n components = self.heat_network_components\n\n if options[\"head_loss_option\"] == HeadLossOption.NO_HEADLOSS:\n # Undefined, and all constraints using this methods value should\n # be skipped.\n return np.nan\n\n # Summing head loss in pipes\n max_sum_dh_pipes = 0.0\n\n for ensemble_member in range(self.ensemble_size):\n parameters = self.parameters(ensemble_member)\n\n head_loss = 0.0\n\n for pipe in components[\"pipe\"]:\n if self.is_cold_pipe(pipe):\n hot_pipe = self.cold_to_hot_pipe(pipe)\n else:\n hot_pipe = pipe\n\n try:\n pipe_classes = self.__pipe_topo_pipe_class_map[hot_pipe].keys()\n head_loss += max(\n self._hn_pipe_head_loss(\n pipe, options, parameters, pc.maximum_discharge, pipe_class=pc\n )\n for pc in pipe_classes\n if pc.maximum_discharge > 0.0\n )\n except KeyError:\n area = parameters[f\"{pipe}.area\"]\n max_discharge = options[\"maximum_velocity\"] * area\n head_loss += self._hn_pipe_head_loss(pipe, options, parameters, max_discharge)\n\n head_loss += options[\"minimum_pressure_far_point\"] * 10.2\n\n max_sum_dh_pipes = max(max_sum_dh_pipes, head_loss)\n\n # Maximum pressure difference allowed with user options\n # NOTE: Does not yet take elevation differences into acccount\n max_dh_network_options = (\n options[\"pipe_maximum_pressure\"] - options[\"pipe_minimum_pressure\"]\n ) * 10.2\n\n return min(max_sum_dh_pipes, max_dh_network_options)\n\n def __check_buffer_values_and_set_bounds_at_t0(self):\n t = self.times()\n # We assume that t0 is always equal to self.times()[0]\n assert self.initial_time == self.times()[0]\n\n parameters = self.parameters(0)\n bounds = self.bounds()\n components = self.heat_network_components\n buffers = components.get(\"buffer\", [])\n\n for b in buffers:\n min_fract_vol = parameters[f\"{b}.min_fraction_tank_volume\"]\n if min_fract_vol < 0.0 or min_fract_vol >= 1.0:\n raise Exception(\n f\"Minimum fraction of tank capacity of {b} must be smaller\"\n \"than 1.0 and larger or equal to 0.0\"\n )\n\n cp = parameters[f\"{b}.cp\"]\n rho = parameters[f\"{b}.rho\"]\n dt = parameters[f\"{b}.dT\"]\n heat_t0 = parameters[f\"{b}.init_Heat\"]\n vol_t0 = parameters[f\"{b}.init_V_hot_tank\"]\n stored_heat = f\"{b}.Stored_heat\"\n if not np.isnan(vol_t0) and not np.isnan(heat_t0):\n raise Exception(\n f\"At most one between the initial heat and volume of {b} should be prescribed.\"\n )\n\n if np.isnan(heat_t0):\n if not np.isnan(vol_t0):\n # Extract information from volume\n heat_t0 = vol_t0 * dt * cp * rho\n else:\n # Set default value\n volume = parameters[f\"{b}.volume\"]\n default_vol_t0 = min_fract_vol * volume\n heat_t0 = default_vol_t0 * dt * cp * rho\n\n # Check that volume/initial stored heat at t0 is within bounds\n lb_heat, ub_heat = bounds[stored_heat]\n lb_heat_t0 = np.inf\n ub_heat_t0 = -np.inf\n for bound in [lb_heat, ub_heat]:\n assert not isinstance(\n bound, np.ndarray\n ), f\"{b} stored heat cannot be a vector state\"\n if isinstance(bound, Timeseries):\n bound_t0 = bound.values[0]\n else:\n bound_t0 = bound\n lb_heat_t0 = min(lb_heat_t0, bound_t0)\n ub_heat_t0 = max(ub_heat_t0, bound_t0)\n\n if heat_t0 < lb_heat_t0 or heat_t0 > ub_heat_t0:\n raise Exception(f\"Initial heat of {b} is not within bounds.\")\n\n # Set heat at t0\n lb = np.full_like(t, -np.inf)\n ub = np.full_like(t, np.inf)\n lb[0] = heat_t0\n ub[0] = heat_t0\n b_t0 = (Timeseries(t, lb), Timeseries(t, ub))\n self.__buffer_t0_bounds[stored_heat] = self.merge_bounds(bounds[stored_heat], b_t0)\n\n def __pipe_rate_heat_change_constraints(self, ensemble_member):\n # To avoid sudden change in heat from a timestep to the next,\n # constraints on d(Heat)/dt are introduced.\n # Information of restrictions on dQ/dt and dT/dt are used, as d(Heat)/dt is\n # proportional to average_temperature * dQ/dt + average_discharge * dT/dt.\n # The average discharge is computed using the assumption that the average velocity is 1.\n constraints = []\n\n parameters = self.parameters(ensemble_member)\n hn_options = self.heat_network_options()\n\n t_change = hn_options[\"maximum_temperature_der\"]\n q_change = hn_options[\"maximum_flow_der\"]\n\n if np.isfinite(t_change) and np.isfinite(q_change):\n assert (\n not self.__pipe_topo_pipe_class_map\n ), \"heat rate change constraints not allowed with topology optimization\"\n\n for p in self.hot_pipes:\n variable = f\"{p}.HeatIn.Heat\"\n dt = np.diff(self.times(variable))\n\n canonical, sign = self.alias_relation.canonical_signed(variable)\n source_temperature_out = sign * self.state_vector(canonical, ensemble_member)\n\n # Maximum differences are expressed per hour. We scale appropriately.\n cp = parameters[f\"{p}.cp\"]\n rho = parameters[f\"{p}.rho\"]\n area = parameters[f\"{p}.area\"]\n avg_t = parameters[f\"{p}.temperature\"]\n # Assumption: average velocity is 1 m/s\n avg_v = 1\n avg_q = avg_v * area\n heat_change = cp * rho * (t_change / 3600 * avg_q + q_change / 3600 * avg_t)\n\n if heat_change < 0:\n raise Exception(f\"Heat change of pipe {p} should be nonnegative.\")\n elif not np.isfinite(heat_change):\n continue\n\n var_cur = source_temperature_out[1:]\n var_prev = source_temperature_out[:-1]\n variable_nominal = self.variable_nominal(variable)\n\n constraints.append(\n (\n var_cur - var_prev,\n -heat_change * dt / variable_nominal,\n heat_change * dt / variable_nominal,\n )\n )\n\n return constraints\n\n def __node_heat_mixing_path_constraints(self, ensemble_member):\n constraints = []\n\n for node, connected_pipes in self.heat_network_topology.nodes.items():\n heat_sum = 0.0\n heat_nominals = []\n\n for i_conn, (_pipe, orientation) in connected_pipes.items():\n heat_conn = f\"{node}.HeatConn[{i_conn + 1}].Heat\"\n heat_sum += orientation * self.state(heat_conn)\n heat_nominals.append(self.variable_nominal(heat_conn))\n\n heat_nominal = np.median(heat_nominals)\n constraints.append((heat_sum / heat_nominal, 0.0, 0.0))\n\n return constraints\n\n def __node_discharge_mixing_path_constraints(self, ensemble_member):\n constraints = []\n\n for node, connected_pipes in self.heat_network_topology.nodes.items():\n q_sum = 0.0\n q_nominals = []\n\n for i_conn, (_pipe, orientation) in connected_pipes.items():\n q_conn = f\"{node}.HeatConn[{i_conn + 1}].Q\"\n q_sum += orientation * self.state(q_conn)\n q_nominals.append(self.variable_nominal(q_conn))\n\n q_nominal = np.median(q_nominals)\n constraints.append((q_sum / q_nominal, 0.0, 0.0))\n\n return constraints\n\n def __heat_loss_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n options = self.heat_network_options()\n\n for p in self.cold_pipes:\n heat_in = self.state(f\"{p}.HeatIn.Heat\")\n heat_out = self.state(f\"{p}.HeatOut.Heat\")\n heat_nominal = self.variable_nominal(f\"{p}.HeatOut.Heat\")\n\n constraints.append(((heat_in - heat_out) / heat_nominal, 0.0, 0.0))\n\n for p in self.hot_pipes:\n heat_in = self.state(f\"{p}.HeatIn.Heat\")\n heat_out = self.state(f\"{p}.HeatOut.Heat\")\n heat_nominal = self.variable_nominal(f\"{p}.HeatIn.Heat\")\n\n is_disconnected_var = self.__pipe_disconnect_map.get(p)\n\n if is_disconnected_var is None:\n is_disconnected = 0.0\n else:\n is_disconnected = self.state(is_disconnected_var)\n\n if p in self.__pipe_topo_heat_losses:\n # Heat loss is variable depending on pipe class\n heat_loss_sym_name = self.__pipe_topo_heat_loss_map[p]\n heat_loss = self.__pipe_topo_heat_loss_var[heat_loss_sym_name]\n heat_loss_nominal = self.__pipe_topo_heat_loss_nominals[heat_loss_sym_name]\n constraint_nominal = (heat_nominal * heat_loss_nominal) ** 0.5\n\n if options[\"heat_loss_disconnected_pipe\"]:\n constraints.append(\n ((heat_in - heat_out - heat_loss) / constraint_nominal, 0.0, 0.0)\n )\n else:\n # Force heat loss to `heat_loss` when pipe is connected, and zero otherwise.\n big_m = 2 * max(self.__pipe_topo_heat_losses[p])\n heat_loss_nominal = self.__pipe_topo_heat_loss_nominals[heat_loss_sym_name]\n constraint_nominal = (heat_nominal * heat_loss_nominal) ** 0.5\n\n # Force heat loss to `heat_loss` when pipe is connected.\n constraints.append(\n (\n (heat_in - heat_out - heat_loss - is_disconnected * big_m)\n / constraint_nominal,\n -np.inf,\n 0.0,\n )\n )\n constraints.append(\n (\n (heat_in - heat_out - heat_loss + is_disconnected * big_m)\n / constraint_nominal,\n 0.0,\n np.inf,\n )\n )\n\n # Force heat loss to zero (heat_in = heat_out) when pipe is\n # disconnected. Note that heat loss is never less than zero, so\n # we can skip a Big-M formulation in the lower bound.\n constraints.append(\n (\n (heat_in - heat_out - (1 - is_disconnected) * big_m)\n / constraint_nominal,\n -np.inf,\n 0.0,\n )\n )\n constraints.append(\n (\n (heat_in - heat_out) / heat_nominal,\n 0.0,\n np.inf,\n )\n )\n else:\n # Heat loss is constant, i.e. does not depend on pipe class\n heat_loss = parameters[f\"{p}.Heat_loss\"]\n\n if options[\"heat_loss_disconnected_pipe\"]:\n constraints.append(((heat_in - heat_out - heat_loss) / heat_nominal, 0.0, 0.0))\n else:\n constraint_nominal = (heat_loss * heat_nominal) ** 0.5\n constraints.append(\n (\n (heat_in - heat_out - heat_loss * (1 - is_disconnected))\n / constraint_nominal,\n 0.0,\n 0.0,\n )\n )\n\n return constraints\n\n @staticmethod\n def __get_abs_max_bounds(*bounds):\n max_ = 0.0\n\n for b in bounds:\n if isinstance(b, np.ndarray):\n max_ = max(max_, max(abs(b)))\n elif isinstance(b, Timeseries):\n max_ = max(max_, max(abs(b.values)))\n else:\n max_ = max(max_, abs(b))\n\n return max_\n\n def __flow_direction_path_constraints(self, ensemble_member):\n constraints = []\n options = self.heat_network_options()\n parameters = self.parameters(ensemble_member)\n\n bounds = self.bounds()\n\n # These constraints are redundant with the discharge ones. However,\n # CBC tends to get confused and return significantly infeasible\n # results if we remove them.\n for p in self.hot_pipes:\n flow_dir_var = self.__pipe_to_flow_direct_map[p]\n\n heat_in = self.state(f\"{p}.HeatIn.Heat\")\n heat_out = self.state(f\"{p}.HeatOut.Heat\")\n flow_dir = self.state(flow_dir_var)\n\n heat_nominal = self.variable_nominal(f\"{p}.HeatIn.Heat\")\n\n big_m = self.__get_abs_max_bounds(\n *self.merge_bounds(bounds[f\"{p}.HeatIn.Heat\"], bounds[f\"{p}.HeatOut.Heat\"])\n )\n\n if not np.isfinite(big_m):\n raise Exception(f\"Heat in pipe {p} must be bounded\")\n\n constraint_nominal = (big_m * heat_nominal) ** 0.5\n\n # Fix flow direction\n constraints.append(((heat_in - big_m * flow_dir) / constraint_nominal, -np.inf, 0.0))\n constraints.append(\n ((heat_in + big_m * (1 - flow_dir)) / constraint_nominal, 0.0, np.inf)\n )\n\n # Flow direction is the same for In and Out. Note that this\n # ensures that the heat going in and/or out of a pipe is more than\n # its heat losses.\n constraints.append(((heat_out - big_m * flow_dir) / constraint_nominal, -np.inf, 0.0))\n constraints.append(\n ((heat_out + big_m * (1 - flow_dir)) / constraint_nominal, 0.0, np.inf)\n )\n\n if not options[\"heat_loss_disconnected_pipe\"]:\n # If this pipe is disconnected, the heat should be zero\n is_disconnected_var = self.__pipe_disconnect_map.get(p)\n\n if is_disconnected_var is not None:\n is_disconnected = self.state(is_disconnected_var)\n is_conn = 1 - is_disconnected\n\n # Note that big_m should now cover the range from [-max, max],\n # so we need to double it.\n big_m_dbl = 2 * big_m\n for heat in [heat_in, heat_out]:\n constraints.append(\n ((heat + big_m_dbl * is_conn) / constraint_nominal, 0.0, np.inf)\n )\n constraints.append(\n ((heat - big_m_dbl * is_conn) / constraint_nominal, -np.inf, 0.0)\n )\n\n minimum_velocity = options[\"minimum_velocity\"]\n maximum_velocity = options[\"maximum_velocity\"]\n\n if minimum_velocity > 0.0:\n assert (\n not self.__pipe_topo_pipe_class_map\n ), \"non-zero minimum velocity not allowed with topology optimization\"\n\n # Also ensure that the discharge has the same sign as the heat.\n for p in self.heat_network_components[\"pipe\"]:\n # FIXME: Enable heat in cold pipes as well.\n if self.is_cold_pipe(p):\n hot_pipe = self.cold_to_hot_pipe(p)\n else:\n hot_pipe = p\n\n flow_dir_var = self.__pipe_to_flow_direct_map[hot_pipe]\n flow_dir = self.state(flow_dir_var)\n\n is_disconnected_var = self.__pipe_disconnect_map.get(hot_pipe)\n\n if is_disconnected_var is None:\n is_disconnected = 0.0\n else:\n is_disconnected = self.state(is_disconnected_var)\n\n q_pipe = self.state(f\"{p}.Q\")\n\n try:\n pipe_classes = self.__pipe_topo_pipe_class_map[hot_pipe].keys()\n maximum_discharge = max(c.maximum_discharge for c in pipe_classes)\n minimum_discharge = 0.0\n except KeyError:\n maximum_discharge = maximum_velocity * parameters[f\"{p}.area\"]\n\n if math.isfinite(minimum_velocity) and minimum_velocity > 0.0:\n minimum_discharge = minimum_velocity * parameters[f\"{p}.area\"]\n else:\n minimum_discharge = 0.0\n\n big_m = maximum_discharge + minimum_discharge\n\n if minimum_discharge > 0.0 and is_disconnected_var is not None:\n constraint_nominal = (minimum_discharge * big_m) ** 0.5\n else:\n constraint_nominal = big_m\n\n constraints.append(\n (\n (q_pipe - big_m * flow_dir + (1 - is_disconnected) * minimum_discharge)\n / constraint_nominal,\n -np.inf,\n 0.0,\n )\n )\n constraints.append(\n (\n (q_pipe + big_m * (1 - flow_dir) - (1 - is_disconnected) * minimum_discharge)\n / constraint_nominal,\n 0.0,\n np.inf,\n )\n )\n\n # If a pipe is disconnected, the discharge should be zero\n if is_disconnected_var is not None:\n constraints.append(((q_pipe - (1 - is_disconnected) * big_m) / big_m, -np.inf, 0.0))\n\n constraints.append(((q_pipe + (1 - is_disconnected) * big_m) / big_m, 0.0, np.inf))\n\n # Pipes that are connected in series should have the same heat direction.\n for pipes in self.heat_network_topology.pipe_series:\n if len(pipes) <= 1:\n continue\n\n assert (\n len({p for p in pipes if self.is_cold_pipe(p)}) == 0\n ), \"Pipe series for Heat models should only contain hot pipes\"\n\n base_flow_dir_var = self.state(self.__pipe_to_flow_direct_map[pipes[0]])\n\n for p in pipes[1:]:\n flow_dir_var = self.state(self.__pipe_to_flow_direct_map[p])\n constraints.append((base_flow_dir_var - flow_dir_var, 0.0, 0.0))\n\n return constraints\n\n def __buffer_path_constraints(self, ensemble_member):\n constraints = []\n\n for b, ((_, hot_orient), (_, cold_orient)) in self.heat_network_topology.buffers.items():\n heat_nominal = self.variable_nominal(f\"{b}.HeatIn.Heat\")\n\n heat_in = self.state(f\"{b}.HeatIn.Heat\")\n heat_out = self.state(f\"{b}.HeatOut.Heat\")\n heat_hot = self.state(f\"{b}.HeatHot\")\n heat_cold = self.state(f\"{b}.HeatCold\")\n\n # Note that in the conventional scenario, where the hot pipe out-port is connected\n # to the buffer's in-port and the buffer's out-port is connected to the cold pipe\n # in-port, the orientation of the hot/cold pipe is 1/-1 respectively.\n constraints.append(((heat_hot - hot_orient * heat_in) / heat_nominal, 0.0, 0.0))\n constraints.append(((heat_cold + cold_orient * heat_out) / heat_nominal, 0.0, 0.0))\n\n return constraints\n\n def __demand_heat_to_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n\n for d in self.heat_network_components[\"demand\"]:\n heat_nominal = parameters[f\"{d}.Heat_nominal\"]\n q_nominal = self.variable_nominal(f\"{d}.Q\")\n cp = parameters[f\"{d}.cp\"]\n rho = parameters[f\"{d}.rho\"]\n dt = parameters[f\"{d}.dT\"]\n\n discharge = self.state(f\"{d}.Q\")\n heat_consumed = self.state(f\"{d}.Heat_demand\")\n\n constraint_nominal = (heat_nominal * cp * rho * dt * q_nominal) ** 0.5\n\n constraints.append(\n ((heat_consumed - cp * rho * dt * discharge) / constraint_nominal, 0.0, 0.0)\n )\n\n return constraints\n\n def __source_heat_to_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n\n for s in self.heat_network_components[\"source\"]:\n heat_nominal = parameters[f\"{s}.Heat_nominal\"]\n q_nominal = self.variable_nominal(f\"{s}.Q\")\n cp = parameters[f\"{s}.cp\"]\n rho = parameters[f\"{s}.rho\"]\n dt = parameters[f\"{s}.dT\"]\n\n discharge = self.state(f\"{s}.Q\")\n heat_production = self.state(f\"{s}.Heat_source\")\n\n constraint_nominal = (heat_nominal * cp * rho * dt * q_nominal) ** 0.5\n\n constraints.append(\n ((heat_production - cp * rho * dt * discharge) / constraint_nominal, 0.0, np.inf)\n )\n\n return constraints\n\n def __pipe_heat_to_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n\n sum_heat_losses = 0.0\n\n for p in self.hot_pipes:\n if p in self.__pipe_topo_heat_losses:\n sum_heat_losses += max(self.__pipe_topo_heat_losses[p])\n else:\n sum_heat_losses += parameters[f\"{p}.Heat_loss\"]\n\n assert not np.isnan(sum_heat_losses)\n\n for p in self.hot_pipes:\n cp = parameters[f\"{p}.cp\"]\n rho = parameters[f\"{p}.rho\"]\n dt = parameters[f\"{p}.dT\"]\n heat_to_discharge_fac = 1.0 / (cp * rho * dt)\n\n flow_dir_var = self.__pipe_to_flow_direct_map[p]\n flow_dir = self.state(flow_dir_var)\n scaled_heat_in = self.state(f\"{p}.HeatIn.Heat\") * heat_to_discharge_fac\n scaled_heat_out = self.state(f\"{p}.HeatOut.Heat\") * heat_to_discharge_fac\n pipe_q = self.state(f\"{p}.Q\")\n q_nominal = self.variable_nominal(f\"{p}.Q\")\n\n # We do not want Big M to be too tight in this case, as it results\n # in a rather hard yes/no constraint as far as feasibility on e.g.\n # a single source system is concerned. Use a factor of 2 to give\n # some slack.\n big_m = 2 * sum_heat_losses * heat_to_discharge_fac\n\n for heat in (scaled_heat_in, scaled_heat_out):\n if sum_heat_losses == 0:\n constraints.append(((heat - pipe_q) / q_nominal, 0.0, 0.0))\n else:\n assert big_m > 0.0\n constraints.append(\n ((heat - pipe_q + big_m * (1 - flow_dir)) / big_m, 0.0, np.inf)\n )\n constraints.append(((heat - pipe_q - big_m * flow_dir) / big_m, -np.inf, 0.0))\n\n return constraints\n\n def __buffer_heat_to_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n bounds = self.bounds()\n\n for b, ((hot_pipe, hot_pipe_orientation), _) in self.heat_network_topology.buffers.items():\n heat_nominal = parameters[f\"{b}.Heat_nominal\"]\n q_nominal = self.variable_nominal(f\"{b}.Q\")\n cp = parameters[f\"{b}.cp\"]\n rho = parameters[f\"{b}.rho\"]\n dt = parameters[f\"{b}.dT\"]\n\n discharge = self.state(f\"{b}.HeatIn.Q\") * hot_pipe_orientation\n # Note that `heat_consumed` can be negative for the buffer; in that case we\n # are extracting heat from it.\n heat_consumed = self.state(f\"{b}.Heat_buffer\")\n\n # We want an _equality_ constraint between discharge and heat if the buffer is\n # consuming (i.e. behaving like a \"demand\"). We want an _inequality_\n # constraint (`|heat| >= |f(Q)|`) just like a \"source\" component if heat is\n # extracted from the buffer. We accomplish this by disabling one of\n # the constraints with a boolean. Note that `discharge` and `heat_consumed`\n # are guaranteed to have the same sign.\n flow_dir_var = self.__pipe_to_flow_direct_map[hot_pipe]\n is_buffer_charging = hot_pipe_orientation * self.state(flow_dir_var)\n\n big_m = self.__get_abs_max_bounds(\n *self.merge_bounds(bounds[f\"{b}.HeatIn.Heat\"], bounds[f\"{b}.HeatOut.Heat\"])\n )\n\n coefficients = [heat_nominal, cp * rho * dt * q_nominal, big_m]\n constraint_nominal = (min(coefficients) * max(coefficients)) ** 0.5\n constraints.append(\n (\n (heat_consumed - cp * rho * dt * discharge + (1 - is_buffer_charging) * big_m)\n / constraint_nominal,\n 0.0,\n np.inf,\n )\n )\n\n constraint_nominal = (heat_nominal * cp * rho * dt * q_nominal) ** 0.5\n constraints.append(\n ((heat_consumed - cp * rho * dt * discharge) / constraint_nominal, -np.inf, 0.0)\n )\n\n return constraints\n\n def __setpoint_constraint(self, ensemble_member, component_name, windowsize, setpointchanges):\n r\"\"\"Constraints that can switch only every n time steps of setpoint.\n A component can only switch setpoint every hours.\n Apply the constraint every timestep from after the first time step onwards [from i=1].\n\n Inspect example curve below for understanding of dHeat/dt for\n windowsize 12 with a time domain of 35 timesteps.\n\n Heat\n d *-------*\n c *-------*\n b *\n a *---*---* *-------*\n\n\n i 0 1 2 3 4 16 17 29 30 35\n \"\"\"\n assert windowsize <= len(self.times())\n assert windowsize > 0\n assert windowsize % 1 == 0\n assert component_name in sum(self.heat_network_components.values(), [])\n\n # Find the component type\n comp_type = next(\n iter(\n [\n comptype\n for comptype, compnames in self.heat_network_components.items()\n for compname in compnames\n if compname == component_name\n ]\n )\n )\n\n constraints = []\n times = self.times()\n control_vars = map_comp_type_to_control_variable[comp_type]\n if not isinstance(control_vars, list):\n control_vars = [control_vars]\n\n for var_name in control_vars:\n # Retrieve the relevant variable names\n variable_name = f\"{component_name}{var_name}\"\n var_name_setpoint = self._component_to_change_setpoint_map[component_name]\n\n # Get the timewise symbolic variables of Heat_source\n sym_var = self.__state_vector_scaled(variable_name, ensemble_member)\n\n # Get the timewise symbolic variables of the setpoint\n canonical, sign = self.alias_relation.canonical_signed(var_name_setpoint)\n setpoint_is_free = sign * self.state_vector(canonical, ensemble_member)\n\n # d/dt expression, forward Euler\n backward_heat_rate_expression = sym_var[:-1] - sym_var[1:]\n\n # Compute threshold for what is considered a change in setpoint\n big_m = 4.0 * max(self.bounds()[variable_name])\n # Constraint which fixes if the variable is allowed to switch or not.\n # With a sliding window, shifting one timestep.\n # Sum the binairy variables in the window. The sum should be <=1 as\n # only on of the binairy variable is allowed to represent a\n # switch in operations.\n for i in range(math.floor(windowsize / 2.0), len(times) - math.floor(windowsize / 2.0)):\n if i < math.floor(windowsize / 2):\n # Start of optim domain\n start_idx = 0\n end_idx = i + math.ceil(windowsize / 2)\n elif i >= (len(times) - math.ceil(windowsize / 2)):\n # End of optim domain\n start_idx = i - math.ceil(windowsize / 2)\n end_idx = setpoint_is_free.shape[0] - 1\n else:\n # All inbetween\n start_idx = i - math.floor(windowsize / 2)\n end_idx = i + math.ceil(windowsize / 2)\n\n expression = 0.0\n for j in range(start_idx, end_idx + 1):\n expression = expression + setpoint_is_free[j]\n # This constraint forces that only 1 timestep in the sliding\n # window can have setpoint_is_free=1. In combination with the\n # constraints lower in this function we ensure the desired\n # behavior of limited setpoint changes.\n constraints.append(((setpointchanges - expression), 0.0, np.inf))\n\n # Constraints for the allowed heat rate of the component.\n # Made 2 constraints which each do or do not constrain the value\n # of the setpoint_is_free var the value of the\n # backward_heat_expression. So the discrete variable does or does\n # not have an influence on making the constrained uphold or not.\n\n # Note: the equations are not apply at t0\n\n # NOTE: we start from 2 this is to not constrain the derivative at t0\n for i in range(2, len(times)):\n # Constraining setpoint_is_free to 1 when value of\n # backward_heat_rate_expression < 0, otherwise\n # setpoint_is_free's value can be 0 and 1\n constraints.append(\n (\n (backward_heat_rate_expression[i - 1] + setpoint_is_free[i] * big_m)\n / big_m,\n 0.0,\n np.inf,\n )\n )\n # Constraining setpoint_is_free to 1 when value of\n # backward_heat_rate_expression > 0, otherwise\n # setpoint_is_free's value can be 0 and 1\n constraints.append(\n (\n (backward_heat_rate_expression[i - 1] - setpoint_is_free[i] * big_m)\n / big_m,\n -np.inf,\n 0.0,\n )\n )\n\n return constraints\n\n def __heat_exchanger_heat_to_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n\n # We apply a equality constraint to the primary side, which is essentially consuming heat\n # from the primary side network. For the secondary side the we apply a inequality constraint\n # to allow for the heat to be larger than what is required for the discharge. This allows to\n # compensate for heat losses in the pipes.\n for heat_exchanger in [\n *self.heat_network_components.get(\"heat_exchanger\", []),\n *self.heat_network_components.get(\"heat_pump\", []),\n ]:\n q_nominal_prim = self.variable_nominal(f\"{heat_exchanger}.HeatInPrimarry.Q\")\n q_nominal_sec = self.variable_nominal(f\"{heat_exchanger}.HeatOutSecondary.Q\")\n cp_prim = parameters[f\"{heat_exchanger}.Primary.cp\"]\n rho_prim = parameters[f\"{heat_exchanger}.Primary.rho\"]\n cp_sec = parameters[f\"{heat_exchanger}.Primary.cp\"]\n rho_sec = parameters[f\"{heat_exchanger}.Primary.rho\"]\n dt_prim = parameters[f\"{heat_exchanger}.Primary.dT\"]\n dt_sec = parameters[f\"{heat_exchanger}.Secondary.dT\"]\n discharge_primary = self.state(f\"{heat_exchanger}.Primary.HeatIn.Q\")\n discharge_secondary = self.state(f\"{heat_exchanger}.Secondary.HeatOut.Q\")\n heat_primary = self.state(f\"{heat_exchanger}.Primary_heat\")\n heat_secondary = self.state(f\"{heat_exchanger}.Secondary_heat\")\n constraint_nominal = cp_prim * rho_prim * dt_prim * q_nominal_prim\n constraints.append(\n (\n (heat_primary - cp_prim * rho_prim * dt_prim * discharge_primary)\n / constraint_nominal,\n 0.0,\n 0.0,\n )\n )\n constraint_nominal = cp_sec * rho_sec * dt_sec * q_nominal_sec\n constraints.append(\n (\n (heat_secondary - cp_sec * rho_sec * dt_sec * discharge_secondary)\n / constraint_nominal,\n 0.0,\n np.inf,\n )\n )\n\n return constraints\n\n def __state_vector_scaled(self, variable, ensemble_member):\n canonical, sign = self.alias_relation.canonical_signed(variable)\n return (\n self.state_vector(canonical, ensemble_member) * self.variable_nominal(canonical) * sign\n )\n\n def _hn_pipe_nominal_discharge(self, heat_network_options, parameters, pipe: str) -> float:\n if self.is_cold_pipe(pipe):\n hot_pipe = self.cold_to_hot_pipe(pipe)\n else:\n hot_pipe = pipe\n\n try:\n pipe_classes = self.__pipe_topo_pipe_class_map[hot_pipe].keys()\n area = np.median(c.area for c in pipe_classes)\n except KeyError:\n area = parameters[f\"{pipe}.area\"]\n\n return area * heat_network_options[\"estimated_velocity\"]\n\n @staticmethod\n def _hn_get_pipe_head_loss_option(pipe, heat_network_options, parameters):\n head_loss_option = heat_network_options[\"head_loss_option\"]\n\n if head_loss_option == HeadLossOption.LINEAR and parameters[f\"{pipe}.has_control_valve\"]:\n # If there is a control valve present, we use the more accurate\n # Darcy-Weisbach inequality formulation.\n head_loss_option = HeadLossOption.LINEARIZED_DW\n\n return head_loss_option\n\n def _hn_pipe_head_loss_constraints(self, ensemble_member):\n constraints = []\n\n options = self.heat_network_options()\n parameters = self.parameters(ensemble_member)\n components = self.heat_network_components\n\n # Set the head loss according to the direction in the pipes. Note that\n # the `.__head_loss` symbol is always positive by definition, but that\n # `.dH` is not (positive when flow is negative, and vice versa).\n # If the pipe is disconnected, we leave the .__head_loss symbol free\n # (and it has no physical meaning). We also do not set any discharge\n # relationship in this case (but dH is still equal to Out - In of\n # course).\n for pipe in components[\"pipe\"]:\n if parameters[f\"{pipe}.length\"] == 0.0:\n # If the pipe does not have a control valve, the head loss is\n # forced to zero via bounds. If the pipe _does_ have a control\n # valve, then there still is no relationship between the\n # discharge and the head loss/dH.\n continue\n\n if self.is_cold_pipe(pipe):\n hot_pipe = self.cold_to_hot_pipe(pipe)\n else:\n hot_pipe = pipe\n\n head_loss_sym = self._hn_pipe_to_head_loss_map[pipe]\n\n dh = self.__state_vector_scaled(f\"{pipe}.dH\", ensemble_member)\n head_loss = self.__state_vector_scaled(head_loss_sym, ensemble_member)\n discharge = self.__state_vector_scaled(f\"{pipe}.Q\", ensemble_member)\n\n # We need to make sure the dH is decoupled from the discharge when\n # the pipe is disconnected. Simply put, this means making the\n # below constraints trivial.\n is_disconnected_var = self.__pipe_disconnect_map.get(hot_pipe)\n\n if is_disconnected_var is None:\n is_disconnected = 0.0\n else:\n is_disconnected = self.__state_vector_scaled(is_disconnected_var, ensemble_member)\n\n max_discharge = None\n max_head_loss = -np.inf\n\n if hot_pipe in self.__pipe_topo_pipe_class_map:\n # Multiple diameter options for this pipe\n pipe_classes = self.__pipe_topo_pipe_class_map[hot_pipe]\n max_discharge = max(c.maximum_discharge for c in pipe_classes)\n\n for pc, pc_var_name in pipe_classes.items():\n if pc.inner_diameter == 0.0:\n continue\n\n head_loss_max_discharge = self._hn_pipe_head_loss(\n pipe, options, parameters, max_discharge, pipe_class=pc\n )\n\n big_m = max(1.1 * self.__maximum_total_head_loss, 2 * head_loss_max_discharge)\n\n is_topo_disconnected = 1 - self.extra_variable(pc_var_name, ensemble_member)\n is_topo_disconnected = ca.repmat(is_topo_disconnected, dh.size1())\n\n # Note that we add the two booleans `is_disconnected` and\n # `is_topo_disconnected`. This is allowed because of the way the\n # resulting expression is used in the Big-M formulation. We only care\n # that the expression (i.e. a single boolean or the sum of the two\n # booleans) is either 0 when the pipe is connected, or >= 1 when it\n # is disconnected.\n constraints.extend(\n self._hn_pipe_head_loss(\n pipe,\n options,\n parameters,\n discharge,\n head_loss,\n dh,\n is_disconnected + is_topo_disconnected,\n big_m,\n pc,\n )\n )\n\n # Contrary to the Big-M calculation above, the relation\n # between dH and the head loss symbol requires the\n # maximum head loss that can be realized effectively. So\n # we pass the current pipe class's maximum discharge.\n max_head_loss = max(\n max_head_loss,\n self._hn_pipe_head_loss(\n pipe, options, parameters, pc.maximum_discharge, pipe_class=pc\n ),\n )\n else:\n # Only a single diameter for this pipe. Note that we rely on\n # the diameter parameter being overridden automatically if a\n # single pipe class is set by the user.\n area = parameters[f\"{pipe}.area\"]\n max_discharge = options[\"maximum_velocity\"] * area\n\n is_topo_disconnected = int(parameters[f\"{pipe}.diameter\"] == 0.0)\n\n constraints.extend(\n self._hn_pipe_head_loss(\n pipe,\n options,\n parameters,\n discharge,\n head_loss,\n dh,\n is_disconnected + is_topo_disconnected,\n 1.1 * self.__maximum_total_head_loss,\n )\n )\n\n max_head_loss = self._hn_pipe_head_loss(pipe, options, parameters, max_discharge)\n\n # Relate the head loss symbol to the pipe's dH symbol.\n\n # FIXME: Ugly hack. Cold pipes should be modelled completely with\n # their own integers as well.\n flow_dir = self.__state_vector_scaled(\n self.__pipe_to_flow_direct_map[hot_pipe], ensemble_member\n )\n\n # Note that the Big-M should _at least_ cover the maximum\n # distance between `head_loss` and `dh`. If `head_loss` can be at\n # most 1.0 (= `max_head_loss`), that means our Big-M should be at\n # least double (i.e. >= 2.0). And because we do not want Big-Ms to\n # be overly tight, we include an additional factor of 2.\n big_m = 2 * 2 * max_head_loss\n\n constraints.append(\n (\n (-dh - head_loss + (1 - flow_dir) * big_m) / big_m,\n 0.0,\n np.inf,\n )\n )\n constraints.append(((dh - head_loss + flow_dir * big_m) / big_m, 0.0, np.inf))\n\n return constraints\n\n def __check_valve_head_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n options = self.heat_network_options()\n\n all_pipes = set(self.heat_network_components[\"pipe\"])\n maximum_velocity = options[\"maximum_velocity\"]\n\n for v in self.heat_network_components.get(\"check_valve\", []):\n status_var = self.__check_valve_status_map[v]\n status = self.state(status_var)\n\n q = self.state(f\"{v}.Q\")\n dh = self.state(f\"{v}.dH\")\n\n # Determine the maximum discharge that can go through the Valve\n # by looking at connected pipes.\n q_aliases = self.alias_relation.aliases(q.name())\n connected_pipes = {p for p in all_pipes if f\"{p}.Q\" in q_aliases}\n\n maximum_discharge = 0.0\n\n for p in connected_pipes:\n try:\n pipe_classes = self.__pipe_topo_pipe_class_map[p].keys()\n max_discharge_pipe = max(c.maximum_discharge for c in pipe_classes)\n except KeyError:\n max_discharge_pipe = maximum_velocity * parameters[f\"{p}.area\"]\n\n maximum_discharge = max(maximum_discharge, max_discharge_pipe)\n\n maximum_head_loss = self.__maximum_total_head_loss\n\n # (Ideal) check valve status:\n # - 1 means \"open\", so positive discharge, and dH = 0\n # - 0 means \"closed\", so Q = 0 and positive dH\n # Note that the Q >= 0 and dH >= 0 constraints are part of the bounds.\n constraints.append((q - status * maximum_discharge, -np.inf, 0.0))\n\n if options[\"head_loss_option\"] != HeadLossOption.NO_HEADLOSS:\n constraints.append((dh - (1 - status) * maximum_head_loss, -np.inf, 0.0))\n\n return constraints\n\n def __control_valve_head_discharge_path_constraints(self, ensemble_member):\n constraints = []\n parameters = self.parameters(ensemble_member)\n options = self.heat_network_options()\n\n all_pipes = set(self.heat_network_components[\"pipe\"])\n maximum_velocity = options[\"maximum_velocity\"]\n\n for v in self.heat_network_components.get(\"control_valve\", []):\n flow_dir_var = self.__control_valve_direction_map[v]\n flow_dir = self.state(flow_dir_var)\n\n q = self.state(f\"{v}.Q\")\n dh = self.state(f\"{v}.dH\")\n\n # Determine the maximum discharge that can go through the Valve\n # by looking at connected pipes.\n q_aliases = self.alias_relation.aliases(q.name())\n connected_pipes = {p for p in all_pipes if f\"{p}.Q\" in q_aliases}\n\n maximum_discharge = 0.0\n\n for p in connected_pipes:\n try:\n pipe_classes = self.__pipe_topo_pipe_class_map[p].keys()\n max_discharge_pipe = max(c.maximum_discharge for c in pipe_classes)\n except KeyError:\n max_discharge_pipe = maximum_velocity * parameters[f\"{p}.area\"]\n\n maximum_discharge = max(maximum_discharge, max_discharge_pipe)\n\n maximum_head_loss = self.__maximum_total_head_loss\n\n # Flow direction:\n # - 1 means positive discharge, and negative dH\n # - 0 means negative discharge, and positive dH\n # It's a control valve, so the dH is of arbitrary magnitude.\n constraints.append((q + (1 - flow_dir) * maximum_discharge, 0.0, np.inf))\n constraints.append((q - flow_dir * maximum_discharge, -np.inf, 0.0))\n\n if options[\"head_loss_option\"] != HeadLossOption.NO_HEADLOSS:\n constraints.append((-dh + (1 - flow_dir) * maximum_head_loss, 0.0, np.inf))\n constraints.append((-dh - flow_dir * maximum_head_loss, -np.inf, 0.0))\n\n return constraints\n\n def __pipe_topology_constraints(self, ensemble_member):\n constraints = []\n\n for p, pipe_classes in self.__pipe_topo_pipe_class_map.items():\n v = []\n for pc_var_name in pipe_classes.values():\n v.append(self.extra_variable(pc_var_name, ensemble_member))\n\n # Make sure exactly one indicator is true\n constraints.append((sum(v), 1.0, 1.0))\n\n # Match the indicators to the diameter symbol\n diam_sym_name = self.__pipe_topo_diameter_map[p]\n diam_sym = self.extra_variable(diam_sym_name, ensemble_member)\n\n cost_sym_name = self.__pipe_topo_cost_map[p]\n cost_sym = self.extra_variable(cost_sym_name, ensemble_member)\n\n diameters = [c.inner_diameter for c in pipe_classes.keys()]\n investment_costs = [c.investment_costs for c in pipe_classes.keys()]\n\n diam_expr = sum(s * d for s, d in zip(v, diameters))\n constraint_nominal = self.variable_nominal(diam_sym_name)\n\n costs_expr = sum(s * d for s, d in zip(v, investment_costs))\n costs_constraint_nominal = self.variable_nominal(cost_sym_name)\n\n constraints.append(((diam_sym - diam_expr) / constraint_nominal, 0.0, 0.0))\n\n constraints.append(((cost_sym - costs_expr) / costs_constraint_nominal, 0.0, 0.0))\n\n for p, heat_losses in self.__pipe_topo_heat_losses.items():\n assert self.is_hot_pipe(p)\n\n pipe_classes = self.__pipe_topo_pipe_class_map[p]\n v = []\n for pc_var_name in pipe_classes.values():\n v.append(self.extra_variable(pc_var_name, ensemble_member))\n\n heat_loss_sym_name = self.__pipe_topo_heat_loss_map[p]\n heat_loss_sym = self.extra_variable(heat_loss_sym_name, ensemble_member)\n\n heat_loss_expr = sum(s * h for s, h in zip(v, heat_losses))\n constraint_nominal = self.variable_nominal(heat_loss_sym_name)\n\n constraints.append(((heat_loss_sym - heat_loss_expr) / constraint_nominal, 0.0, 0.0))\n\n return constraints\n\n def __pipe_topology_path_constraints(self, ensemble_member):\n constraints = []\n\n # Clip discharge based on pipe class\n for p, pipe_classes in self.__pipe_topo_pipe_class_map.items():\n v = []\n for var_name in pipe_classes.values():\n v.append(self.__pipe_topo_pipe_class_var[var_name])\n\n # Match the indicators to the discharge symbol(s)\n discharge_sym_hot = self.state(f\"{p}.Q\")\n discharge_sym_cold = self.state(f\"{self.hot_to_cold_pipe(p)}.Q\")\n\n maximum_discharges = [c.maximum_discharge for c in pipe_classes.keys()]\n\n max_discharge_expr = sum(s * d for s, d in zip(v, maximum_discharges))\n constraint_nominal = self.variable_nominal(f\"{p}.Q\")\n\n constraints.append(\n ((discharge_sym_hot - max_discharge_expr) / constraint_nominal, -np.inf, 0.0)\n )\n constraints.append(\n ((discharge_sym_hot + max_discharge_expr) / constraint_nominal, 0.0, np.inf)\n )\n\n constraints.append(\n ((discharge_sym_cold - max_discharge_expr) / constraint_nominal, -np.inf, 0.0)\n )\n constraints.append(\n ((discharge_sym_cold + max_discharge_expr) / constraint_nominal, 0.0, np.inf)\n )\n\n return constraints\n\n def path_constraints(self, ensemble_member):\n constraints = super().path_constraints(ensemble_member)\n\n constraints.extend(self.__node_heat_mixing_path_constraints(ensemble_member))\n constraints.extend(self.__heat_loss_path_constraints(ensemble_member))\n constraints.extend(self.__flow_direction_path_constraints(ensemble_member))\n constraints.extend(self.__buffer_path_constraints(ensemble_member))\n constraints.extend(self.__node_discharge_mixing_path_constraints(ensemble_member))\n constraints.extend(self.__demand_heat_to_discharge_path_constraints(ensemble_member))\n constraints.extend(self.__source_heat_to_discharge_path_constraints(ensemble_member))\n constraints.extend(self.__pipe_heat_to_discharge_path_constraints(ensemble_member))\n constraints.extend(self.__buffer_heat_to_discharge_path_constraints(ensemble_member))\n constraints.extend(\n self.__heat_exchanger_heat_to_discharge_path_constraints(ensemble_member)\n )\n constraints.extend(self.__check_valve_head_discharge_path_constraints(ensemble_member))\n constraints.extend(self.__control_valve_head_discharge_path_constraints(ensemble_member))\n constraints.extend(self.__pipe_topology_path_constraints(ensemble_member))\n\n return constraints\n\n def constraints(self, ensemble_member):\n constraints = super().constraints(ensemble_member)\n\n constraints.extend(self.__pipe_rate_heat_change_constraints(ensemble_member))\n constraints.extend(self.__pipe_topology_constraints(ensemble_member))\n\n for component_name, params in self._timed_setpoints.items():\n constraints.extend(\n self.__setpoint_constraint(ensemble_member, component_name, params[0], params[1])\n )\n\n return constraints\n\n def history(self, ensemble_member):\n history = super().history(ensemble_member)\n\n initial_time = np.array([self.initial_time])\n empty_timeseries = Timeseries(initial_time, [np.nan])\n buffers = self.heat_network_components.get(\"buffer\", [])\n\n for b in buffers:\n hist_heat_buffer = history.get(f\"{b}.Heat_buffer\", empty_timeseries).values\n hist_stored_heat = history.get(f\"{b}.Stored_heat\", empty_timeseries).values\n\n # One has to provide information of what Heat_buffer (i.e., the heat\n # added/extracted from the buffer at that timestep) is at t0.\n # Else the solution will always extract heat from the buffer at t0.\n # This information can be passed in two ways:\n # - non-trivial history of Heat_buffer at t0;\n # - non-trivial history of Stored_heat.\n # If not known, we assume that Heat_buffer is 0.0 at t0.\n\n if (len(hist_heat_buffer) < 1 or np.isnan(hist_heat_buffer[0])) and (\n len(hist_stored_heat) <= 1 or np.any(np.isnan(hist_stored_heat[-2:]))\n ):\n history[f\"{b}.Heat_buffer\"] = Timeseries(initial_time, [0.0])\n\n # TODO: add ATES when component is available\n\n return history\n\n def goal_programming_options(self):\n options = super().goal_programming_options()\n options[\"keep_soft_constraints\"] = True\n return options\n\n def solver_options(self):\n options = super().solver_options()\n options[\"casadi_solver\"] = \"qpsol\"\n options[\"solver\"] = \"cbc\"\n return options\n\n def compiler_options(self):\n options = super().compiler_options()\n options[\"resolve_parameter_values\"] = True\n return options\n\n def __pipe_class_to_results(self):\n for ensemble_member in range(self.ensemble_size):\n results = self.extract_results(ensemble_member)\n\n for pipe in self.hot_pipes:\n pipe_classes = self.pipe_classes(pipe)\n\n if not pipe_classes:\n continue\n elif len(pipe_classes) == 1:\n pipe_class = pipe_classes[0]\n else:\n pipe_class = next(\n c\n for c, s in self.__pipe_topo_pipe_class_map[pipe].items()\n if round(results[s][0]) == 1.0\n )\n\n for p in [pipe, self.hot_to_cold_pipe(pipe)]:\n self.__pipe_topo_pipe_class_result[p] = pipe_class\n\n def __pipe_diameter_to_parameters(self):\n for ensemble_member in range(self.ensemble_size):\n d = self.__pipe_topo_diameter_area_parameters[ensemble_member]\n for pipe in self.__pipe_topo_pipe_class_map:\n pipe_class = self.get_optimized_pipe_class(pipe)\n\n for p in [pipe, self.hot_to_cold_pipe(pipe)]:\n d[f\"{p}.diameter\"] = pipe_class.inner_diameter\n d[f\"{p}.area\"] = pipe_class.area\n\n def __pipe_heat_loss_to_parameters(self):\n options = self.heat_network_options()\n\n for ensemble_member in range(self.ensemble_size):\n parameters = self.parameters(ensemble_member)\n\n h = self.__pipe_topo_heat_loss_parameters[ensemble_member]\n for pipe in self.__pipe_topo_heat_losses:\n pipe_class = self.get_optimized_pipe_class(pipe)\n\n cold_pipe = self.hot_to_cold_pipe(pipe)\n\n for p in [pipe, cold_pipe]:\n h[f\"{p}.Heat_loss\"] = self.__pipe_heat_loss(\n options, parameters, p, pipe_class.u_values\n )\n\n def priority_completed(self, priority):\n options = self.heat_network_options()\n\n self.__pipe_class_to_results()\n\n # The head loss mixin wants to do some check for the head loss\n # minimization priority that involves the diameter/area. We assume\n # that we're sort of done minimizing/choosing the pipe diameter, and\n # that we can set the parameters to the optimized values.\n if (\n options[\"minimize_head_losses\"]\n and options[\"head_loss_option\"] != HeadLossOption.NO_HEADLOSS\n and priority == self._hn_minimization_goal_class.priority\n ):\n self.__pipe_diameter_to_parameters()\n\n super().priority_completed(priority)\n\n def post(self):\n super().post()\n\n self.__pipe_class_to_results()\n self.__pipe_diameter_to_parameters()\n self.__pipe_heat_loss_to_parameters()\n\n results = self.extract_results()\n parameters = self.parameters(0)\n options = self.heat_network_options()\n\n # The flow directions are the same as the heat directions if the\n # return (i.e. cold) line has zero heat throughout. Here we check that\n # this is indeed the case.\n for p in self.cold_pipes:\n heat_in = results[f\"{p}.HeatIn.Heat\"]\n heat_out = results[f\"{p}.HeatOut.Heat\"]\n if np.any(heat_in > 1.0) or np.any(heat_out > 1.0):\n logger.warning(f\"Heat directions of pipes might be wrong. Check {p}.\")\n\n if options[\"head_loss_option\"] != HeadLossOption.NO_HEADLOSS:\n for p in self.heat_network_components[\"pipe\"]:\n head_diff = results[f\"{p}.HeatIn.H\"] - results[f\"{p}.HeatOut.H\"]\n if parameters[f\"{p}.length\"] == 0.0 and not parameters[f\"{p}.has_control_valve\"]:\n atol = self.variable_nominal(f\"{p}.HeatIn.H\") * 1e-5\n assert np.allclose(head_diff, 0.0, atol=atol)\n else:\n q = results[f\"{p}.Q\"]\n\n if self.is_cold_pipe(p):\n hot_pipe = self.cold_to_hot_pipe(p)\n else:\n hot_pipe = p\n\n try:\n is_disconnected = np.round(results[self.__pipe_disconnect_map[hot_pipe]])\n except KeyError:\n is_disconnected = np.zeros_like(q)\n\n q_nominal = self.variable_nominal(\n self.alias_relation.canonical_signed(f\"{p}.Q\")[0]\n )\n inds = (np.abs(q) / q_nominal > 1e-4) & (is_disconnected == 0)\n assert np.all(np.sign(head_diff[inds]) == np.sign(q[inds]))\n\n minimum_velocity = options[\"minimum_velocity\"]\n for p in self.heat_network_components[\"pipe\"]:\n if self.is_cold_pipe(p):\n hot_pipe = self.cold_to_hot_pipe(p)\n else:\n hot_pipe = p\n area = parameters[f\"{p}.area\"]\n\n if area == 0.0:\n continue\n\n q = results[f\"{p}.Q\"]\n v = q / area\n flow_dir = np.round(results[self.__pipe_to_flow_direct_map[hot_pipe]])\n try:\n is_disconnected = np.round(results[self.__pipe_disconnect_map[hot_pipe]])\n except KeyError:\n is_disconnected = np.zeros_like(q)\n\n inds_disconnected = is_disconnected == 1\n inds_positive = (flow_dir == 1) & ~inds_disconnected\n inds_negative = (flow_dir == 0) & ~inds_disconnected\n\n # We allow a bit of slack in the velocity. If the\n # exceedence/discrepancy is more than 0.1 mm/s, we log a warning,\n # if it's more than 1 cm/s, we log an error message.\n if np.any(inds_positive) or np.any(inds_negative):\n max_exceedence = max(\n np.hstack(\n [minimum_velocity - v[inds_positive], v[inds_negative] + minimum_velocity]\n )\n )\n\n for criterion, log_level in [(0.01, logging.ERROR), (1e-4, logging.WARNING)]:\n if max_exceedence > criterion:\n logger.log(\n log_level,\n f\"Velocity in {p} lower than minimum velocity {minimum_velocity} \"\n f\"by more than {criterion} m/s. ({max_exceedence} m/s)\",\n )\n\n break\n\n # Similar check for disconnected pipes, where we want the velocity\n # to be zero but allow the same amount of slack.\n if np.any(inds_disconnected):\n max_exceedence = max(np.abs(v[inds_disconnected]))\n\n for criterion, log_level in [(0.01, logging.ERROR), (1e-4, logging.WARNING)]:\n if max_exceedence > criterion:\n logger.log(\n log_level,\n f\"Velocity in disconnected pipe {p} exceeds {criterion} m/s. \"\n f\"({max_exceedence} m/s)\",\n )\n\n break\n\n for p in self.hot_pipes:\n if parameters[f\"{p}.diameter\"] == 0.0:\n continue\n\n heat_in = results[f\"{p}.HeatIn.Heat\"]\n heat_out = results[f\"{p}.HeatOut.Heat\"]\n inds = np.abs(heat_out) > np.abs(heat_in)\n nominal = self.variable_nominal(f\"{p}.HeatIn.Heat\")\n\n heat = heat_in.copy()\n heat[inds] = heat_out[inds]\n\n flow_dir_var = np.round(results[self.__pipe_to_flow_direct_map[p]])\n\n if options[\"heat_loss_disconnected_pipe\"]:\n np.testing.assert_array_equal(np.sign(heat), 2 * flow_dir_var - 1)\n else:\n try:\n is_disconnected = np.round(results[self.__pipe_disconnect_map[p]])\n except KeyError:\n is_disconnected = np.zeros_like(heat_in)\n\n inds_disconnected = is_disconnected == 1\n\n np.testing.assert_allclose(\n heat[inds_disconnected] / nominal, 0.0, atol=1e-5, rtol=0\n )\n\n np.testing.assert_array_equal(\n np.sign(heat[~inds_disconnected]), 2 * flow_dir_var[~inds_disconnected] - 1\n )\n","repo_name":"jackvreeken/rtc-tools-heat-network","sub_path":"src/rtctools_heat_network/heat_mixin.py","file_name":"heat_mixin.py","file_ext":"py","file_size_in_byte":88284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8535826698","text":"import logging\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Generator\nfrom typing import Union, Tuple\n\nimport numpy as np\nfrom sklearn.datasets import load_svmlight_file\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\nfrom .utils import download_file\n\n\nclass Dataset(ABC):\n \"\"\"Class represents dataset interface.\"\"\"\n\n @abstractmethod\n def load(self,\n data_folder: Union[str, Path],\n n_splits: int = 1,\n test_size: Union[int, float, None] = None) -> Generator:\n \"\"\"Load the dataset.\n\n :param data_folder: A path to the folder with datasets.\n :param n_splits: Number of splits to generate.\n :param test_size: Represents the proportion of the dataset to include in the test split.\n :return: X_train, X_test, y_train, y_test\n \"\"\"\n pass\n\n\nclass LibsvmDataset(Dataset):\n \"\"\"\n Class represents libsvm datasets.\n Visit https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html for more information.\n \"\"\"\n\n SOURCE = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/'\n\n def __init__(self, train: str, test: str = None, n_classes: int = None, imbalanced: bool = False):\n self.train = train\n self.test = test\n self.n_classes = n_classes\n self.imbalanced = imbalanced\n self.logger = logging.getLogger(self.__class__.__name__)\n\n @staticmethod\n def _select_classes(y: np.ndarray, n_classes: int) -> np.ndarray:\n np.random.seed(0)\n classes = np.unique(y)\n\n if n_classes is None:\n return classes\n\n if len(classes) < n_classes:\n raise RuntimeError(f'Dataset contains just {len(classes)} unique labels!')\n\n return np.random.choice(classes, size=n_classes, replace=False)\n\n @staticmethod\n def _reduce_classes(X: np.ndarray, y: np.ndarray, classes: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n mask = np.isin(y, classes)\n X, y = X[mask], y[mask]\n mapping = {y: i for i, y in enumerate(classes)}\n y = np.array([mapping[it] for it in y])\n return X, y\n\n @staticmethod\n def _imbalance(X: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n np.random.seed(0)\n\n result = []\n indices = np.arange(len(y))\n classes, cnt = np.unique(y, return_counts=True)\n\n for clazz in classes:\n fraction = 0.5 # np.random.random()\n count = max(int(cnt[clazz] * fraction), 2)\n result.append(np.random.choice(indices[y == clazz], size=count, replace=False))\n\n indices = np.concatenate(result)\n return X[indices], y[indices]\n\n def load(self, data_folder, n_splits: int = 1, test_size: float = None):\n if n_splits < 1:\n raise ValueError('n_splits should be positive!')\n\n if not isinstance(data_folder, Path):\n data_folder = Path(data_folder)\n\n if not self.train:\n raise ValueError('Train argument is not specified!')\n\n if not data_folder.exists():\n data_folder.mkdir()\n\n train = data_folder / self.train\n test = data_folder / self.test if self.test else None\n\n if not train.exists():\n url = self.SOURCE + self.train\n self.logger.info(f'Downloading train data from {url}')\n download_file(url, train)\n\n if test and not test.exists():\n url = self.SOURCE + self.test\n self.logger.info(f'Downloading test data from {url}')\n download_file(url, test)\n\n X_train, y_train = load_svmlight_file(str(train))\n X_train, y_train = X_train.toarray(), y_train.astype(np.int32)\n\n if self.imbalanced:\n X_train, y_train = self._imbalance(X_train, y_train)\n\n classes = self._select_classes(y_train, self.n_classes)\n X_train, y_train = self._reduce_classes(X_train, y_train, classes)\n\n if test:\n X_test, y_test = load_svmlight_file(str(test))\n X_test, y_test = X_test.toarray(), y_test.astype(np.int32)\n if X_test.shape[1] < X_train.shape[1]:\n X_train = X_train[:,:X_test.shape[1]]\n if X_train.shape[1] < X_test.shape[1]:\n X_test = X_test[:,:X_train.shape[1]]\n X_test, y_test = self._reduce_classes(X_test, y_test, classes)\n X_train = np.vstack((X_train, X_test))\n y_train = np.concatenate((y_train, y_test))\n\n for it in range(n_splits):\n yield train_test_split(X_train, y_train, test_size=test_size, random_state=it, stratify=y_train)\n\n\nclass CSVDataset(Dataset):\n def __init__(self, class_name, source, train, test, sep, skiprows, compression, label_column):\n self.source = source\n self.train = train\n self.test = test\n self.sep = sep\n self.skiprows = skiprows\n self.compression = compression\n self.label_column = label_column\n self.logger = logging.getLogger(class_name)\n\n def _load_csv(self, data_folder, dataset):\n dataset_path = data_folder / dataset\n if not dataset_path.exists():\n url = self.source + dataset\n self.logger.info(f'Downloading dataset from {url}')\n download_file(url, dataset_path)\n\n df = pd.read_csv(dataset_path, header=None, index_col=None, sep=self.sep, skiprows=self.skiprows, compression=self.compression)\n X, y = np.array(df.drop(df.columns[[self.label_column]], axis=1)), np.array(df.iloc[:,self.label_column])\n\n classes = np.unique(y)\n for c in classes:\n cnt = 0\n for i in range(len(y)):\n if y[i] == c:\n cnt += 1\n if cnt > 1:\n continue\n\n for i in range(len(y)):\n if y[i] == c:\n X = np.delete(X, i, axis=0)\n y = np.delete(y, i, axis=0)\n break\n\n return X, y\n\n def load(self, data_folder, n_splits: int = 1, test_size: float = None):\n if n_splits < 1:\n raise ValueError('n_splits should be positive!')\n\n if not isinstance(data_folder, Path):\n data_folder = Path(data_folder)\n\n if not data_folder.exists():\n data_folder.mkdir()\n\n X_train, y_train = self._load_csv(data_folder, self.train)\n\n if self.test is not None:\n X_test, y_test = self._load_csv(data_folder, self.test)\n X_train = np.vstack((X_train, X_test))\n y_train = np.concatenate((y_train, y_test))\n\n for i in range(X_train.shape[1]):\n if isinstance(X_train[0, i], str):\n values = np.unique(X_train[:, i])\n mapping = {y : j for j, y in enumerate(values)}\n X_train[:, i] = np.array([mapping[x] for x in X_train[:, i]])\n\n X_train = X_train.astype(np.float32)\n\n classes = np.unique(y_train)\n mapping = {y : i for i, y in enumerate(classes)}\n y_train = np.array([mapping[y] for y in y_train], dtype=np.int32)\n\n for it in range(n_splits):\n yield train_test_split(X_train, y_train, test_size=test_size, random_state=it, stratify=y_train)\n\n\nclass ImageSegmentation(CSVDataset):\n \"\"\"\n Class represents Image Segmentation Dataset.\n Visit http://archive.ics.uci.edu/ml/datasets/image+segmentation for more information\n \"\"\"\n\n SOURCE = 'http://archive.ics.uci.edu/ml/machine-learning-databases/image/'\n TRAIN = 'segmentation.data'\n TEST = 'segmentation.test'\n\n def __init__(self):\n super().__init__(\n class_name=self.__class__.__name__,\n source=self.SOURCE,\n train=self.TRAIN,\n test=self.TEST,\n sep=',',\n skiprows=6,\n compression=None,\n label_column=0\n )\n\n\nclass Covertype(CSVDataset):\n \"\"\"\n Class represents Covertype Dataset.\n Visit https://archive.ics.uci.edu/ml/datasets/Covertype for more information\n \"\"\"\n\n SOURCE = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/'\n TRAIN = 'covtype.data.gz'\n\n def __init__(self):\n super().__init__(\n class_name=self.__class__.__name__,\n source=self.SOURCE,\n train=self.TRAIN,\n test=None,\n sep=',',\n skiprows=0,\n compression='gzip',\n label_column=-1\n )\n\n\nclass WinequalityWhite(CSVDataset):\n \"\"\"\n Class represents Wine Quality (White) Dataset.\n Visit https://archive.ics.uci.edu/ml/datasets/wine+quality for more information\n \"\"\"\n\n SOURCE = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/'\n TRAIN = 'winequality-white.csv'\n\n def __init__(self):\n super().__init__(\n class_name=self.__class__.__name__,\n source=self.SOURCE,\n train=self.TRAIN,\n test=None,\n sep=';',\n skiprows=1,\n compression=None,\n label_column=-1\n )\n\n\nclass Abalone(CSVDataset):\n \"\"\"\n Class represents Abalone Dataset.\n Visit https://archive.ics.uci.edu/ml/datasets/Abalone for more information\n \"\"\"\n\n SOURCE = 'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/'\n TRAIN = 'abalone.data'\n\n def __init__(self):\n super().__init__(\n class_name=self.__class__.__name__,\n source=self.SOURCE,\n train=self.TRAIN,\n test=None,\n sep=',',\n skiprows=0,\n compression=None,\n label_column=-1\n )\n\n\nclass CSVDataset(Dataset):\n def __init__(self, class_name, source, train, test, sep, skiprows, compression, label_column):\n self.source = source\n self.train = train\n self.test = test\n self.sep = sep\n self.skiprows = skiprows\n self.compression = compression\n self.label_column = label_column\n self.logger = logging.getLogger(class_name)\n\n def _load_csv(self, data_folder, dataset):\n dataset_path = data_folder / dataset\n if not dataset_path.exists():\n url = self.source + dataset\n self.logger.info(f'Downloading dataset from {url}')\n download_file(url, dataset_path)\n\n df = pd.read_csv(dataset_path, header=None, index_col=None, sep=self.sep, skiprows=self.skiprows,\n compression=self.compression)\n X, y = np.array(df.drop(df.columns[[self.label_column]], axis=1)), np.array(df.iloc[:, self.label_column])\n\n classes = np.unique(y)\n for c in classes:\n cnt = 0\n for i in range(len(y)):\n if y[i] == c:\n cnt += 1\n if cnt > 1:\n continue\n\n for i in range(len(y)):\n if y[i] == c:\n X = np.delete(X, i, axis=0)\n y = np.delete(y, i, axis=0)\n break\n\n return X, y\n\n def load(self, data_folder, n_splits: int = 1, test_size: float = None):\n if n_splits < 1:\n raise ValueError('n_splits should be positive!')\n\n if not isinstance(data_folder, Path):\n data_folder = Path(data_folder)\n\n if not data_folder.exists():\n data_folder.mkdir()\n\n X_train, y_train = self._load_csv(data_folder, self.train)\n\n if self.test is not None:\n X_test, y_test = self._load_csv(data_folder, self.test)\n X_train = np.vstack((X_train, X_test))\n y_train = np.concatenate((y_train, y_test))\n\n for i in range(X_train.shape[1]):\n if isinstance(X_train[0, i], str):\n values = np.unique(X_train[:, i])\n mapping = {y: j for j, y in enumerate(values)}\n X_train[:, i] = np.array([mapping[x] for x in X_train[:, i]])\n\n X_train = X_train.astype(np.float32)\n\n classes = np.unique(y_train)\n mapping = {y: i for i, y in enumerate(classes)}\n y_train = np.array([mapping[y] for y in y_train], dtype=np.int32)\n\n for it in range(n_splits):\n yield train_test_split(X_train, y_train, test_size=test_size, random_state=it, stratify=y_train)\n\n\nclass IMAT(Dataset):\n SOURCE = 'http://eranik.me/~education/machine-learning/datasets/'\n\n def __init__(self, train: str):\n self.train = train\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def load(self, data_folder, n_splits: int = 1, test_size: float = None):\n if n_splits < 1:\n raise ValueError('n_splits should be positive!')\n\n if not isinstance(data_folder, Path):\n data_folder = Path(data_folder)\n\n if not self.train:\n raise ValueError('Train argument is not specified!')\n\n if not data_folder.exists():\n data_folder.mkdir()\n\n train = data_folder / self.train\n if not train.exists():\n url = self.SOURCE + self.train\n self.logger.info(f'Downloading dataset from {url}')\n download_file(url, train)\n\n if not train.exists():\n raise ValueError('Train dataset does not exist')\n\n X_train, y_train = load_svmlight_file(str(train))\n _, y_train = np.unique(np.round(y_train), return_inverse=True)\n X_train, y_train = X_train.toarray(), y_train.astype(np.int32)\n\n assert len(np.unique(y_train)) == 5\n\n for it in range(n_splits):\n yield train_test_split(X_train, y_train, test_size=test_size, random_state=it, stratify=y_train)","repo_name":"rebryk/mcc-benchmark","sub_path":"benchmark/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":13628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7070917207","text":"# Author: David Legg\n# Date: 10/11/19\n# \n# This is a test of the Keras system on a simple reinforcement learning task.\n# Setup is a 2-layer fully connected neural net, acting as a Deep Q-Learning system.\n# Problem is a version of the NChain Gym environment, inspired by this article:\n# https://adventuresinmachinelearning.com/reinforcement-learning-tutorial-python-keras/\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, InputLayer\nfrom tensorflow.keras.utils import to_categorical\nimport numpy as np\nfrom utils import chance\n\nclass ChainProblem:\n def __init__(self):\n self._state = 0\n self._dings = 0\n\n def act(self, action):\n if action == 0:\n if self._state == 4:\n self._dings += 1\n reward = 10\n else:\n reward = 0\n self._state += 1\n elif action == 1:\n reward = 2\n self._state = 0\n else:\n raise ValueError('Action {} is not allowed. Allowed actions are 0, 1.'.format(action))\n\n return (reward, self._state)\n\n def reset(self):\n self._state = 0\n return self._state\n\nCHAIN = ChainProblem()\nNUM_EPISODES = 1000\nNUM_STEPS = 200\nDISCOUNT = 0.95\nEXPLORE_FACTOR = 1.00\nEXPLORE_DECAY = 0.99\nNUM_STATES = 5\nNUM_ACTIONS = 2\n\nmodel = Sequential([\n InputLayer(input_shape=(5,)),\n Dense(10, activation='sigmoid'),\n Dense(2, activation='linear')\n ])\nmodel.compile(loss='mse', optimizer='adam', metrics=['mae'])\n\ndef print_q_table():\n # Generates predicted Q values for every state/action pair\n predicted_q_values = model.predict(np.eye(NUM_STATES))\n print(' S\\\\A |', end='')\n for action in range(NUM_ACTIONS):\n print('{:>3} |'.format(action), end='')\n print()\n print('-----+' * (NUM_ACTIONS + 1))\n for state in range(NUM_STATES):\n print('{:>4} |'.format(state), end='')\n for action in range(NUM_ACTIONS):\n print('{:>5.2f}|'.format(predicted_q_values[state, action]), end='')\n print()\n\ndef main():\n current_explore_factor = EXPLORE_FACTOR\n for episode_number in range(NUM_EPISODES):\n if episode_number % 10 == 0:\n print('Episode {}/{}'.format(episode_number, NUM_EPISODES))\n print_q_table()\n print('Dings:', CHAIN._dings)\n CHAIN._dings = 0\n current_explore_factor *= EXPLORE_DECAY\n new_observation = to_categorical( [CHAIN.reset()], NUM_STATES )\n for step_number in range(NUM_STEPS):\n observation = new_observation\n if chance(EXPLORE_FACTOR):\n action = np.random.randint(NUM_ACTIONS)\n else:\n action = model.predict_classes(observation)[0]\n (reward, new_observation) = CHAIN.act(action)\n new_observation = to_categorical( [new_observation], NUM_STATES )\n # target_value is known reward for the action we took, plus the discounted estimated reward for the state that put us in.\n target_value = reward + DISCOUNT * model.predict(new_observation).max()\n # target_vector is what we want the new output for the observation input to be\n target_vector = model.predict(observation)\n target_vector[0, action] = target_value\n # Now, actually update the model according to target_vector\n model.fit(observation, target_vector, epochs=1, verbose=0)\n observation = new_observation\n\n print('Training complete.')\n\n print('Example run:')\n print('State | Action | Reward')\n state = CHAIN.reset()\n for _ in range(NUM_STEPS):\n action = model.predict_classes( to_categorical( [state], NUM_STATES ) )[0]\n (reward, new_state) = CHAIN.act(action)\n print('{:>5} |{:>7} |{:>7} '.format(state, action, reward))\n state = new_state\n\n\nif __name__ == '__main__':\n main()","repo_name":"SurajSSingh/Collabuilder","sub_path":"samples/keras_reinforcement_test.py","file_name":"keras_reinforcement_test.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69894087930","text":"#Jednoduchy program na kreslení\n\nfrom turtle import forward,right, left, exitonclick, shape, penup, pendown ,setx, sety\npenup()\nsety(200)\npendown()\n\nfor i in range(1):\n left(-90)\n forward(150)\n left(90)\n for i in range(20):\n forward(2)\n left(2)\n \n left(135)\nfor b in range(20):\n forward(2)\n left(2)\nright(90)\n\n \n \n \n\n \n\npenup() \nexitonclick()\n","repo_name":"TomasBalbinder/tutorialy","sub_path":"03/stonek.py","file_name":"stonek.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17465747063","text":"from typing import Any, Dict\n\nfrom ax.core.metric import Metric\nfrom ax.core.optimization_config import MultiObjectiveOptimizationConfig\nfrom ax.core.parameter import FixedParameter, ParameterType, RangeParameter\nfrom ax.core.search_space import HierarchicalSearchSpace\nfrom ax.exceptions.core import UnsupportedError\nfrom ax.service.utils.instantiation import InstantiationBase\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.common.typeutils import checked_cast\n\n\nclass TestInstantiationtUtils(TestCase):\n \"\"\"Testing the instantiation utilities functionality that is not tested in\n main `AxClient` testing suite (`TestServiceAPI`).\"\"\"\n\n def test_parameter_type_validation(self) -> None:\n with self.assertRaisesRegex(ValueError, \"No AE parameter type\"):\n # pyre-fixme[6]: For 1st param expected `Union[Type[bool], Type[float],\n # Type[int], Type[str]]` but got `Type[list]`.\n InstantiationBase._get_parameter_type(list)\n\n def test_constraint_from_str(self) -> None:\n with self.assertRaisesRegex(ValueError, \"Bound for the constraint\"):\n InstantiationBase.constraint_from_str(\n \"x1 + x2 <= not_numerical_bound\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None},\n )\n with self.assertRaisesRegex(ValueError, \"Outcome constraint bound\"):\n InstantiationBase.outcome_constraint_from_str(\"m1 <= not_numerical_bound\")\n three_val_constaint = InstantiationBase.constraint_from_str(\n \"x1 + x2 + x3 <= 3\",\n {\n \"x1\": RangeParameter(\n name=\"x1\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=2.0\n ),\n \"x2\": RangeParameter(\n name=\"x2\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=2.0\n ),\n \"x3\": RangeParameter(\n name=\"x3\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=2.0\n ),\n },\n )\n\n self.assertEqual(three_val_constaint.bound, 3.0)\n with self.assertRaisesRegex(ValueError, \"Parameter constraint should\"):\n InstantiationBase.constraint_from_str(\n \"x1 + x2 + <= 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n with self.assertRaisesRegex(ValueError, \"Parameter constraint should\"):\n InstantiationBase.constraint_from_str(\n \"x1 + x2 + x3 = 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n one_val_constraint = InstantiationBase.constraint_from_str(\n \"x1 <= 0\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None},\n )\n self.assertEqual(one_val_constraint.bound, 0.0)\n self.assertEqual(one_val_constraint.constraint_dict, {\"x1\": 1.0})\n one_val_constraint = InstantiationBase.constraint_from_str(\n \"-0.5*x1 >= -0.1\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None},\n )\n self.assertEqual(one_val_constraint.bound, 0.1)\n self.assertEqual(one_val_constraint.constraint_dict, {\"x1\": 0.5})\n three_val_constaint2 = InstantiationBase.constraint_from_str(\n \"-x1 + 2.1*x2 - 4*x3 <= 3\",\n {\n \"x1\": RangeParameter(\n name=\"x1\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=4.0\n ),\n \"x2\": RangeParameter(\n name=\"x2\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=4.0\n ),\n \"x3\": RangeParameter(\n name=\"x3\", parameter_type=ParameterType.FLOAT, lower=0.1, upper=4.0\n ),\n },\n )\n\n self.assertEqual(three_val_constaint2.bound, 3.0)\n self.assertEqual(\n three_val_constaint2.constraint_dict, {\"x1\": -1.0, \"x2\": 2.1, \"x3\": -4.0}\n )\n with self.assertRaisesRegex(ValueError, \"Multiplier should be float\"):\n InstantiationBase.constraint_from_str(\n \"x1 - e*x2 + x3 <= 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n with self.assertRaisesRegex(ValueError, \"A linear constraint should be\"):\n InstantiationBase.constraint_from_str(\n \"x1 - 2 *x2 + 3 *x3 <= 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n with self.assertRaisesRegex(ValueError, \"A linear constraint should be\"):\n InstantiationBase.constraint_from_str(\n \"x1 - 2* x2 + 3* x3 <= 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n with self.assertRaisesRegex(ValueError, \"A linear constraint should be\"):\n InstantiationBase.constraint_from_str(\n \"x1 - 2 * x2 + 3*x3 <= 3\",\n # pyre-fixme[6]: For 2nd param expected `Dict[str, Parameter]` but\n # got `Dict[str, None]`.\n {\"x1\": None, \"x2\": None, \"x3\": None},\n )\n\n def test_objective_validation(self) -> None:\n with self.assertRaisesRegex(UnsupportedError, \"Ambiguous objective definition\"):\n InstantiationBase.make_experiment(\n # pyre-fixme[6]: For 1st param expected `List[Dict[str, Union[None,\n # Dict[str, List[str]], List[Union[None, bool, float, int, str]],\n # bool, float, int, str]]]` but got `Dict[str, Union[List[int],\n # str]]`.\n parameters={\"name\": \"x\", \"type\": \"range\", \"bounds\": [0, 1]},\n objective_name=\"branin\",\n objectives={\"branin\": \"minimize\", \"currin\": \"maximize\"},\n )\n\n def test_add_tracking_metrics(self) -> None:\n experiment = InstantiationBase.make_experiment(\n parameters=[{\"name\": \"x\", \"type\": \"range\", \"bounds\": [0, 1]}],\n tracking_metric_names=None,\n )\n self.assertDictEqual(experiment._tracking_metrics, {})\n\n metrics_names = [\"metric_1\", \"metric_2\"]\n experiment = InstantiationBase.make_experiment(\n parameters=[{\"name\": \"x\", \"type\": \"range\", \"bounds\": [0, 1]}],\n tracking_metric_names=metrics_names,\n )\n self.assertDictEqual(\n experiment._tracking_metrics,\n {metric_name: Metric(name=metric_name) for metric_name in metrics_names},\n )\n\n def test_make_objectives(self) -> None:\n with self.assertRaisesRegex(ValueError, \"specify 'minimize' or 'maximize'\"):\n InstantiationBase.make_objectives({\"branin\": \"unknown\"})\n objectives = InstantiationBase.make_objectives(\n {\"branin\": \"minimize\", \"currin\": \"maximize\"}\n )\n branin_metric = [o.minimize for o in objectives if o.metric.name == \"branin\"]\n self.assertTrue(branin_metric[0])\n currin_metric = [o.minimize for o in objectives if o.metric.name == \"currin\"]\n self.assertFalse(currin_metric[0])\n\n def test_make_optimization_config(self) -> None:\n objectives = {\"branin\": \"minimize\", \"currin\": \"maximize\"}\n objective_thresholds = [\"branin <= 0\", \"currin >= 0\"]\n with self.subTest(\"Single-objective optimizations with objective thresholds\"):\n with self.assertRaisesRegex(ValueError, \"not specify objective thresholds\"):\n InstantiationBase.make_optimization_config(\n {\"branin\": \"minimize\"},\n objective_thresholds,\n outcome_constraints=[],\n status_quo_defined=False,\n )\n\n with self.subTest(\"MOO with partial objective thresholds\"):\n multi_optimization_config = InstantiationBase.make_optimization_config(\n objectives,\n objective_thresholds=objective_thresholds[:1],\n outcome_constraints=[],\n status_quo_defined=False,\n )\n self.assertEqual(len(multi_optimization_config.objective.metrics), 2)\n self.assertEqual(\n len(\n checked_cast(\n MultiObjectiveOptimizationConfig, multi_optimization_config\n ).objective_thresholds\n ),\n 1,\n )\n\n with self.subTest(\"MOO with all objective threshold\"):\n multi_optimization_config = InstantiationBase.make_optimization_config(\n objectives,\n objective_thresholds,\n outcome_constraints=[],\n status_quo_defined=False,\n )\n self.assertEqual(len(multi_optimization_config.objective.metrics), 2)\n self.assertEqual(\n len(\n checked_cast(\n MultiObjectiveOptimizationConfig, multi_optimization_config\n ).objective_thresholds\n ),\n 2,\n )\n\n with self.subTest(\n \"Single-objective optimizations without objective thresholds\"\n ):\n single_optimization_config = InstantiationBase.make_optimization_config(\n {\"branin\": \"minimize\"},\n objective_thresholds=[],\n outcome_constraints=[],\n status_quo_defined=False,\n )\n self.assertEqual(single_optimization_config.objective.metric.name, \"branin\")\n\n def test_single_valued_choice_to_fixed_param_conversion(self) -> None:\n for use_dependents in [True, False]:\n representation: Dict[str, Any] = {\n \"name\": \"test\",\n \"type\": \"choice\",\n \"values\": [1.0],\n }\n if use_dependents:\n representation[\"dependents\"] = {1.0: [\"foo_or_bar\", \"bazz\"]}\n output = checked_cast(\n FixedParameter, InstantiationBase.parameter_from_json(representation)\n )\n self.assertIsInstance(output, FixedParameter)\n self.assertEqual(output.value, 1.0)\n if use_dependents:\n self.assertEqual(output.dependents, {1.0: [\"foo_or_bar\", \"bazz\"]})\n\n def test_hss(self) -> None:\n parameter_dicts = [\n {\n \"name\": \"root\",\n \"type\": \"fixed\",\n \"value\": \"HierarchicalSearchSpace\",\n \"dependents\": {\"HierarchicalSearchSpace\": [\"foo_or_bar\", \"bazz\"]},\n },\n {\n \"name\": \"foo_or_bar\",\n \"type\": \"choice\",\n \"values\": [\"Foo\", \"Bar\"],\n \"dependents\": {\"Foo\": [\"an_int\"], \"Bar\": [\"a_float\"]},\n },\n {\n \"name\": \"an_int\",\n \"type\": \"choice\",\n \"values\": [1, 2, 3],\n \"dependents\": None,\n },\n {\"name\": \"a_float\", \"type\": \"range\", \"bounds\": [1.0, 1000.0]},\n {\n \"name\": \"bazz\",\n \"type\": \"fixed\",\n \"value\": \"Bazz\",\n \"dependents\": {\"Bazz\": [\"another_int\"]},\n },\n {\"name\": \"another_int\", \"type\": \"fixed\", \"value\": \"2\"},\n ]\n search_space = InstantiationBase.make_search_space(\n # pyre-fixme[6]: For 1st param expected `List[Dict[str, Union[None, Dict[...\n parameters=parameter_dicts,\n parameter_constraints=[],\n )\n self.assertIsInstance(search_space, HierarchicalSearchSpace)\n # pyre-fixme[16]: `SearchSpace` has no attribute `_root`.\n self.assertEqual(search_space._root.name, \"root\")\n","repo_name":"facebook/Ax","sub_path":"ax/service/tests/test_instantiation_utils.py","file_name":"test_instantiation_utils.py","file_ext":"py","file_size_in_byte":12472,"program_lang":"python","lang":"en","doc_type":"code","stars":2182,"dataset":"github-code","pt":"77"} +{"seq_id":"45951184646","text":"from winterdev_components import *\nfrom text_renderer import *\n\n# article\n# article list\n# project list\n# project home page\n# mesh project page\n\ndef winterdev_render_content(content: str):\n\tcontent_render = render_article(content, {\n\t\t\"title\": title_html,\n\t\t\"sub-title\": sub_title,\n\t\t\"sub-title2\": sub_title2,\n\t\t\"link\": link,\n\t\t\"img\": image,\n\t\t\"img-half\": image_half,\n\t\t\"svg\": svg,\n\t\t\"svg-half\": svg_half,\n\t\t\"iframe\": iframe,\n\t\t\"iframe-youtube-video\": iframe_youtube_video,\n\t\t\"code\": code,\n\t\t\"equation\": equation,\n\t\t\"equation-inline\": equation_inline,\n\t\t\"br\": br\n\t})\n\n\treturn content_render\n\ndef winterdev_render_card_list(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\t\n\trender = render_template(template, {\n\t\t\"card-list-title\": lambda io: title_html(io, page[\"title\"]),\n\t\t\"card-list\": lambda io: card_list_html(io, page, context),\n\t\t\"name\": lambda io: raw_html(io, page[\"title\"]),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render\n\ndef winterdev_render_article(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\n\tcontent_render = winterdev_render_content(page[\"content\"])\n\n\trender = render_template(template, {\n\t\t\"article\": lambda io: raw_html(io, content_render),\n\t\t\"name\": lambda io: raw_html(io, page[\"title\"]),\n\t\t\"comments\": lambda io: comments_html(io, page[\"slug\"]),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render\n\ndef winterdev_render_support(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\n\trender = render_template(template, {\n\t\t\"comments\": lambda io: comments_html(io, page[\"slug\"]),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render\n\ndef winterdev_render_project_simple(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\n\trender = render_template(template, {\n\t\t\"project\": lambda io: raw_html(io, page[\"content\"]),\n\t\t\"name\": lambda io: raw_html(io, page[\"title\"]),\n\t\t\"title\": lambda io: title_html(io, page[\"title\"]),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render\n\ndef winterdev_render_project_mesh_page_home(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\n\tcontent_render = render_template(page[\"content\"], {\n\t\t\"shapes\": lambda io: shapes_html(io, page[\"shapes\"], context)\n\t})\n\n\trender = render_template(template, {\n\t\t\"project\": lambda io: raw_html(io, content_render),\n\t\t\"name\": lambda io: raw_html(io, page[\"title\"]),\n\t\t\"title\": lambda io: title_html(io, page[\"title\"]),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render\n\ndef winterdev_render_project_mesh_page_mesh(template: str, slug: str, context: dict) -> str:\n\tpage = context[slug]\n\n\tcontent_render = winterdev_render_content(page[\"content\"])\n\n\trender = render_template(template, {\n\t\t\"name\": lambda io: raw_html(io, page[\"title\"]),\n\t\t\"id\": lambda io: raw_html(io, page[\"id\"]),\n\t\t\"shape-article\": lambda io: raw_html(io, content_render),\n\t\t\"shape-title\": lambda io: shape_title_html(io, page[\"title\"]),\n\t\t\"shape-tools\": lambda io: shape_tools_html(io, page),\n\t\t\"vars\": vars,\n\t\t\"top\": top,\n\t})\n\n\treturn render","repo_name":"IainWinter/winter-dev-static","sub_path":"generator/winterdev_render.py","file_name":"winterdev_render.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74759665524","text":"from random import randint\n\ndef compare(x, y):\n\tif x < y:\n\t\tprint(f'{x} is less than {y}')\n\t\n\telif x > y:\n\t\tprint(f'{x} is greater than {y}')\n\n\telse:\n\t\tprint(f' {x} and {y} are equal')\n\nprint(compare(4, 7))\n\ndef isVowelList(c):\n\tvowels = ['e', 'i', 'o', 'a', 'y']\n\treturn c in vowels\n\ndef isVowel(c):\n\treturn c == 'e' or c == 'i' or c == 'o' or c == 'a' or c == 'y' #🤢\n\nprint(f'Character {\"i\"} is a vowel: {isVowel(\"i\")}')\n\ndef determine_weightclass(weight):\n\tif weight > 265: return \"Too heavy for weight class\"\n\telif weight > 205: return \"Heavyweight\"\n\telif weight > 185: return \"Light Heavyweight\"\n\telif weight > 170: return \"Middleweight\"\n\telif weight > 155: return \"Welterweight\"\n\telif weight > 145: return \"Lightweight\"\n\telse: return \"Too light for weight class\"\n\nprint(f'Weight {183} is in class {determine_weightclass(183)}')\n\nprint (4 < 7)\nprint (4 > 7)\nprint (True and True)\nprint (4 < 7 and 1 < 10)\n\nprint (True and False)\nprint (4 < 7 and 1 > 10)\n\nprint (True or False)\nprint (4 < 7 or 1 > 10)\n\nprint (not(False))\n\nprint(bool(\"hello\"))\nprint(bool(\"\"))\nprint(bool(100))\nprint(bool(0))\n\ndef is_divisible_by_3(n):\n\treturn n % 3 == 0\n\nprint(f'{346} is divisible by 3: {is_divisible_by_3(346)}')\n\ndef calculate_payroll(hours_worked, hourly_rate):\n\tif hours_worked > 40:\n\t\tovertimeRate = 1.5*hourly_rate\n\t\tbeforeOvertime = 40*hourly_rate\n\t\tovertime = overtimeRate * (hours_worked-40)\n\t\treturn beforeOvertime + overtime\n\telse:\n\t\treturn hourly_rate * hours_worked\n\nprint(f'Working 46 hours at a rate of $8.75 per hour pays ${calculate_payroll(46,8.75):.2f}')\n\ndef couponDiscount(price, coupon):\n\ttaxPrice = price+price*0.13\n\tif coupon == \"BONUS\" or \"BONUS40\":\n\t\treturn taxPrice * (1 - 0.4)\n\treturn taxPrice\n\nprint(f'An item that costs ${239.99} costs ${couponDiscount(239.99, \"BONUS40\"):.2f} with coupon {\"BONUS40\"}')\n\ndef validDriver(age, permit, yearly_test):\n\tif age < 18: return permit == \"G1\"\n\telif age <= 70: return permit == \"G\"\n\telse: return permit == \"G\" and yearly_test == \"yes\"\n\n# I don't check yearly_test unless age > 70 so it doesn't need to be tested\n# This assumes that I trust my code and i'm not sure that I do 🤨\nprint(validDriver(6, 'G1', 'no')) # Valid indeed\nprint(validDriver(16, 'G', 'no')) # False\nprint(validDriver(22, 'G1', 'no')) # False\nprint(validDriver(45, 'G', 'no')) # True\nprint(validDriver(77, 'G1', 'no')) # False\nprint(validDriver(72, 'G', 'no')) # False\nprint(validDriver(81, 'G1', 'yes')) # False\nprint(validDriver(34554, 'G', 'yes')) # True\n\nprint(f'I have my g1 and I am 16, I am a valid driver: {validDriver(16, \"G1\", \"no\")}')\n\n# Coding bat alarm clock\ndef alarm_clock(day, vacation):\n\tif day == 0 or day == 6: # weekend\n\n\t\tif vacation:\n\t\t\treturn \"off\"\n\t\telse:\n\t\t\treturn \"10:00\"\n\n\telse:\n \n\t\tif vacation:\n\t\t\treturn \"10:00\"\n\t\telse:\n\t\t\treturn \"7:00\"\n\t# Kind of a mess but it works!\n\nprint(f'The alarm will go off at {alarm_clock(0, False)} on saturday when not on vacation')\n\n# Coding bat Cigar party\n\ndef cigar_party(cigars, is_weekend):\n if is_weekend:\n return cigars >= 40\n else:\n return cigars >= 40 and cigars <= 60\n\t# This could also be:\n\t# return cigars >= 40 and (is_weekend or cigars <= 60)\n\t# But I thought I would keep it tidy\n\nprint(f'Party is a success on a weekday with {54} cigars: {cigar_party(54, False)}')\n\n# Coding bat sum double\n\ndef sum_double_black_magic(a, b):\n sum = a+b\n return sum + sum*(a==b)\n\t# Hell yeah, booleans are numbers too!\n\ndef sum_double(a, b):\n\t# Now without the black magic\n\tsum = a+b\n\tif a == b: \treturn 2*sum\n\telse: \t\t\treturn sum\n\nprint(f'Sum double of {5} and {5} is {sum_double(5, 5)}')\n\ndef convert_to_day_dict(number):\n\tdays = {\n\t\t1: \"Sunday\",\n\t\t2: \"Monday\",\n\t\t3: \"Tuesday\",\n\t\t4: \"Wednesday\",\n\t\t5: \"Thursday\",\n\t\t6: \"Friday\",\n\t\t7: \"Saturday\",\n\t} # I could also use a list and use index = number-1\n\t\t# But why would I do that when I can use a dictionary\n\treturn days.get(number)\n\ndef convert_to_day(number):\n\tif number == 1: return \"Sunday\"\n\telif number == 2: return \"Monday\"\n\telif number == 3: return \"Tuesday\"\n\telif number == 4: return \"Wednesday\"\n\telif number == 5: return \"Thursday\"\n\telif number == 6: return \"Friday\"\n\telif number == 7: return \"Saturday\"\n\nday_num = randint(1,7)\nprint(f'Day {day_num} is named {convert_to_day(day_num)}')\n\ndef real_root_exist(a, b, c):\n\tdisc = b**2 - 4*a*c\n\tif disc > 0:\n\t\treturn \"Two Real Solutions\"\n\telif disc < 0:\n\t\treturn \"No Real Solutions\"\n\telse:\n\t\treturn \"One Real Solution\"\n\nprint(\"Idk how to format this its been too long since grade 10 math\")\nprint(\"and unlike trig, uses for this stuff is rare\")\nprint(f'{real_root_exist(1,2,3)}') # So i can add it later\n\n# Take a number\ndef takeANumber(start, file):\n\tmachineNumber = start\n\tinLine = 0\n\tlateStudents = 0\n\tfor cmd in file:\n\t\tif cmd == \"EOF\": break # You just never know 🤪\n\t\telif cmd == \"CLOSE\":\n\t\t\tprint(lateStudents, inLine, machineNumber)\n\t\t\tinLine = 0\n\t\t\tlateStudents = 0\n\t\t\t# I'm sorry but this is how the gods wanted it\n\t\telif cmd == \"TAKE\":\n\t\t\tmachineNumber += 1 # PYTHON WHY!!!!!!\n\t\t\tinLine += 1 # WHY MUST YOU DO THIS!!!\n\t\t\tlateStudents += 1 # LIFE IS TORTURE!!\n\t\telif cmd == \"SERVE\":\n\t\t\tinLine -= 1 # PLEASE MAKE IT STOP!!!!\n\nfile1 = [\n\"TAKE\",\n\"TAKE\",\n\"SERVE\",\n\"TAKE\",\n\"SERVE\",\n\"SERVE\",\n\"CLOSE\",\n\"TAKE\",\n\"TAKE\",\n\"TAKE\",\n\"SERVE\",\n\"CLOSE\",\n\"TAKE\",\n\"SERVE\",\n\"TAKE\",\n\"SERVE\",\n\"TAKE\",\n\"TAKE\",\n\"TAKE\",\n\"TAKE\",\n\"TAKE\",\n\"TAKE\",\n\"SERVE\",\n\"CLOSE\",\n\"EOF\"] # This is kinda stupid\n# And after I am done is when I realize that you gave us the code for all this\n# And I refuse to use it\ntakeANumber(23, file1) # At least it works and it's pretty clean","repo_name":"Jumner/compSci","sub_path":"python/20/11/24 Conditionals - 7/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9484374441","text":"import requests\nimport threading\nimport random\n\n\n\n\ndef check():\n for i in range(10):\n while True:\n \n user = open('usernames.txt', 'r').read().splitlines()\n username = random.choice(user)\n\n r = requests.get(f'https://www.instagram.com/{username}/')\n if r.status_code == 200:\n print(f\"Username is Taken | {username}\\n\")\n else:\n print(f\"Username is available/not in use | {username}\\n\")\n open('available.txt', 'a').write(f'{username}\\n')\n\n\n\ndef start():\n r = input(\"Amount of threads: \")\n for i in range(int(r)):\n threading.Thread(target=check).start()\n\nstart()\n","repo_name":"Hazza3100/Instagram-username-Checker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"23386202909","text":"import discord, asyncio, re, random\nfrom discord.ext import commands\nfrom cogs.utils.checks import *\nfrom datetime import datetime, timedelta\n\nclass tatsumaki:\n active = True\n text = ['To confirm, type `' , 'âž¡ | Tippe `']\n balancetext = ['balance of', '**, **dein Guthaben beträgt 💴 ']\n reptext = [', **you can award more reputation in ', '**, **in deinem Konto sind ']\n repcantext = ['**, **you can award a reputation point!**', '']\n cmds = ['credits ', 'points ']\n cooldown = '**, please cool down! (**'\n tatsu = 172002275412279296\n allowed = False\n member = ''\n def __init__(self, bot):\n self.bot = bot\n\n async def on_message(self, message):\n if not self.active: return\n if message.author.id == self.bot.user.id:\n if any(s in message.content.lower() for s in self.cmds):\n print('Allowing auto-transaction via Tatsumaki in channel #'+message.channel.name+' on server \"'+message.channel.guild.name+'\"')\n self.allowed = True\n # await self.bot.delete_message(message)\n elif message.author.id == self.tatsu and self.allowed:\n if not any(x in message.content for x in self.text): return\n captcha = re.findall(\"`(\\d+)`\", message.content)[0]\n if captcha:\n msg = await message.channel.send(captcha)\n await msg.delete()\n self.allowed = False\n\n def check2(self, msg):\n tatsu = msg.channel.guild.get_member(self.tatsu)\n if msg.author == tatsu: return True\n\n def check(self, msg):\n tatsu = msg.channel.guild.get_member(self.tatsu)\n if not msg.author == tatsu: return False\n if self.cooldown in msg.content:\n return re.findall(\"\\*\\*(\\d+)\\*\\*\", msg.content)[0]\n return any(x in msg.content.lower() for x in self.balancetext)\n\n async def credits(self, channel):\n \"\"\"Returns current Tatsumaki credits\"\"\"\n _msg = await channel.send(content='t!credits')\n try: msg = await self.bot.wait_for('message', timeout=15, check=self.check)\n except asyncio.TimeoutError: return False\n await _msg.delete()\n if msg is None: return False\n _content = msg.content\n if _msg.author.permissions_in(_msg.channel).manage_messages: await msg.delete()\n return int(''.join(char for char in _content.split(self.balancetext[0])[-1] if char.isdigit()))\n\n async def points(self, channel):\n \"\"\"Returns current Tatsumaki guild points\"\"\"\n _msg = await channel.send(content='t!points')\n try: msg = await self.bot.wait_for('message', timeout=15, check=self.check)\n except asyncio.TimeoutError: return False\n await _msg.delete()\n if msg is None: return False\n _content = msg.content\n if _msg.author.permissions_in(_msg.channel).manage_messages: await msg.delete()\n return int(''.join(char for char in _content.split(self.balancetext[0])[-1] if char.isdigit()))\n\n async def rep(self, channel):\n \"\"\"Returns next Tatsumaki rep time\"\"\"\n _msg = await channel.send(content='t!rep')\n try: msg = await self.bot.wait_for('message', timeout=15, check=self.check2)\n except asyncio.TimeoutError: return False\n await _msg.delete()\n if msg is None: return False\n _content = msg.clean_content\n if _msg.author.permissions_in(_msg.channel).manage_messages: await msg.delete()\n curtime = datetime.now()\n if any(x in _content for x in self.repcantext):\n repstring = '0 hours, 0 minutes and 0 seconds'\n else:\n repstring = _content.split(self.reptext[0])[1][:-3]\n reptime = datetime.strptime(repstring, '%H hours, %M minutes and %S seconds')\n repdelta = timedelta(hours=reptime.hour, minutes=reptime.minute, seconds=reptime.second)\n return [repstring, reptime, repdelta, curtime+repdelta]\n\n @commands.group(name='tatsumaki', aliases=['tatsu', 't'], pass_context=True)\n async def _tatsumaki(self, ctx):\n \"\"\"Toggles Tatsumaki auto captcha solving.\"\"\"\n if ctx.invoked_subcommand is not None: return\n await ctx.message.delete()\n self.active = not self.active\n await ctx.message.channel.send(self.bot.bot_prefix + 'Tatsumaki set to: `%s`' % self.active)\n\n @_tatsumaki.command(name='check', pass_context=True)\n async def _check(self, ctx, type: str = 'all'):\n \"\"\"Checks your current credits, guild points and time to next rep\"\"\"\n await ctx.message.delete()\n msg = '_Tatsumaki stats_:\\n\\n'\n if type in ['all', 'credits']:\n credits = await self.credits(ctx.message.channel)\n if credits: msg += 'Global Credits: **{}**\\n'.format(credits)\n if type in ['all', 'points']:\n points = await self.points(ctx.message.channel)\n if points: msg += '*{}* Guild Points: **{}**\\n'.format(ctx.message.channel.guild.name, points)\n if type in ['all', 'rep']:\n rep = await self.rep(ctx.message.channel)\n if rep: msg += 'Next rep available in: **{}**\\nNext Rep available at: **{}'.format(rep[0], rep[3]).partition('.')[0].rstrip()+'**'\n await ctx.message.channel.send(content=msg)\n\n @_tatsumaki.command(name='giveaway', pass_context=True)\n async def _giveaway(self, ctx, type: str = 'default'):\n \"\"\"Gives away all your guildpoints, credits and/or rep to randoms on the current guild\"\"\"\n await ctx.message.delete()\n online_members = []\n for member in ctx.message.channel.guild.members:\n if not member.status == discord.Status.offline and not member.bot:\n online_members.append(member.id)\n random_member = random.choice(online_members)\n while random_member == self.member:\n random_member = random.choice(online_members)\n tatsu = ctx.message.channel.guild.get_member(self.tatsu)\n type = type.lower()\n if type in ['default', 'all' , 'credits']:\n credits = await self.credits(ctx.message.channel)\n if not credits == '0':\n await asyncio.sleep(5)\n msg = await ctx.message.channel.send('t!credits <@{id}> {credits}'.format(id=random_member, credits=credits))\n # await self.bot.delete_message(msg)\n self.member = random_member\n if type in ['default', 'all', 'points']:\n await asyncio.sleep(5)\n while random_member == self.member:\n random_member = random.choice(online_members)\n points = await self.points(ctx.message.channel)\n if not points == '0':\n await asyncio.sleep(5)\n msg = await ctx.message.channel.send('t!points <@{id}> {points}'.format(id=random_member, points=points))\n # await self.bot.delete_message(msg)\n self.member = random_member\n if type in ['all', 'rep']:\n await asyncio.sleep(3)\n while random_member == self.member:\n random_member = random.choice(online_members)\n msg = await ctx.message.channel.send('t!rep {}'.format(random_member))\n await msg.delete()\n self.member = random_member\n\ndef setup(bot):\n bot.add_cog(tatsumaki(bot))\n","repo_name":"LyricLy/Discord-Selfbot-Cogs","sub_path":"tatsumaki.py","file_name":"tatsumaki.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"6884868662","text":"### SAMS Senior CS-Track Hw4\n### Due Date: Friday 07/26 8:30pm\n### Name: Muhaimin Sarker\n### andrewID: msarker\n\n# Write code in each of the functions below such that they fulfill the\n# instructions. You can run the file to test your code against the test cases\n# at the bottom.\n\n# Starting this week, you will need to write some test cases yourself to\n# check your code! You can add them to the test functions at the bottom.\n\n# Submit your work on Autolab. Autolab may contain additional test cases,\n# so don't hard-code your solutions. We will manually grade any code that\n# does not have tests, or does not pass the test cases on Autolab.\n\n### Problem 1: Console Interaction ###\n\n\"\"\"\nWrite an interactive function, averageRainfall(), which takes no parameters and\ncalculates the average rainfall based on entries made by the user. The program\nshould repeatedly ask the user to input a rainfall amount, and collect the\nuser's answers. When the user enters the number -999, that is a signal that the\nentry is done, and the program should stop collecting numbers. At that point,\nthe program should average all the valid rainfall entries and print out the\nresult. An entry is only valid if it is a non-negative number.\n\"\"\"\ndef averageRainfall():\n averageRain= int(input(\"Input rain fall: \"))\n if (averageRain==-999):\n print(\"Average rainfall: 0\")\n return\n term= 1\n while (True):\n try:\n userInput = int(input(\"Input rain fall: \"))\n if (userInput<0):\n if (userInput==-999):\n averageRain/=term\n break\n print(\"STOP PUTTING NEGATIVE NUMBERS\")\n term-=1\n else: \n averageRain+=userInput\n term+=1 \n except:\n print(\"ENTER A NUMBER!!!!\")\n print(\"Average rainfall: \" + str(averageRain))\n return\n \n \n\n\n### Problem 2: Interactive Graphics ###\n\n\"\"\"\nProgram an interactive Sudoku game using Tkinter graphics. If you have never\nplayed Sudoku before, try it out here: http://www.logicgamesonline.com/sudoku/\n\nThis problem comes in three parts. We strongly recommend you complete them in \norder, to make your life easier!\n\nFor additional clarification, you may wish to view an online writeup of the \nproblem here: http://www.krivers.net/15112-f18/notes/hw5.html\n\nSTEP 1\nBuild an interactive Sudoku board using the framework below the # GRAPHICS CODE \nline. You can design your game as you wish, but it should meet the following \nrequirements:\n\n - The game must start by displaying the full 9x9 grid (in the format of a \n standard Sudoku board) and filling in the numbers already included in the \n starter board. We include two starter boards below for testing. Note that\n in our starter boards, a 0 represents a blank space.\n \nboard1 = [\n [ 5, 3, 0, 0, 7, 0, 0, 0, 0 ],\n [ 6, 0, 0, 1, 9, 5, 0, 0, 0 ],\n [ 0, 9, 8, 0, 0, 0, 0, 6, 0 ],\n [ 8, 0, 0, 0, 6, 0, 0, 0, 3 ],\n [ 4, 0, 0, 8, 0, 3, 0, 0, 1 ],\n [ 7, 0, 0, 0, 2, 0, 0, 0, 6 ],\n [ 0, 6, 0, 0, 0, 0, 2, 8, 0 ],\n [ 0, 0, 0, 4, 1, 9, 0, 0, 5 ],\n [ 0, 0, 0, 0, 8, 0, 0, 7, 9 ]\n]\n\nboard2 = [\n [ 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [ 5, 0, 8, 1, 3, 9, 6, 2, 4],\n [ 4, 9, 6, 8, 7, 2, 1, 5, 3],\n [ 9, 5, 2, 3, 8, 1, 4, 6, 7],\n [ 6, 4, 1, 2, 9, 7, 8, 3, 5],\n [ 3, 8, 7, 5, 6, 4, 0, 9, 1],\n [ 7, 1, 9, 6, 2, 3, 5, 4, 8],\n [ 8, 6, 4, 9, 1, 5, 3, 7, 2],\n [ 2, 3, 5, 7, 4, 8, 9, 1, 6]\n]\n\n - At all times, a single cell on the board is highlighted (using either a \n different color or different outline than the rest of the cells). The player \n can change the highlighted cell by clicking on a new cell, or by moving from \n the current cell with the up, down, left, and right arrows.\n\n - To make a move, the player can press a single digit key to insert a number \n into an empty square. The player can also clear the number from the \n highlighted square by pressing the backspace key.\n \n - Initial numbers (squares that were filled in before game play began) should \n be a different color than numbers added by a player. In addition, the player \n cannot modify initial numbers.\n\n\nSTEP 2\nFill in the five functions listed below (areLegalValues, isLegalRow, isLegalCol,\nisLegalBlock, and isLegalSudoku) to test whether a given Sudoku board is legal.\nEach function has its own set of requirements listed above the function.\n\nHint: you should use areLegalValues in isLegalRow/isLegalCol/isLegalBlock,\nand you should use those three isLegals in isLegalSudoku!\n\n\nSTEP 3\nUpdate your interactive graphics code to use your Sudoku legality checking code,\nwhich will make the game playable. You'll need to add the following features:\n\n - The user should only be allowed to enter a number if it will still result in \n a valid board, as determined by isLegalSudoku.\n\n - If, after a move, the player has properly filled in the entire board and won \n the game, a message should be displayed congratulating the player. After \n this, all further keypresses and mouse presses should be ignored.\n\nGet creative, and have fun!\n\"\"\"\n\n\n\"\"\"\nThis function takes a 1D list of values. These values may be extracted from any \ngiven row, column, or block in a Sudoku board. The function returns True if the \nvalues are legal: that is, if every value is an integer between 0 and 9, \ninclusive, and if each integer from 1 to 9 occurs at most once in the given \nlist (0 may be repeated, of course).\n\"\"\"\ndef areLegalValues(values):\n for i in range(len(values)):\n if (values[i]>=10 or values[i]<0):\n return False\n if (type(values[i])!=int):\n return False\n for k in range(1,10):\n if (values.count(k)>1):\n return False\n return True\n\"\"\"\nThis function takes a Sudoku board and a row number. The function returns True \nif the given row in the given board is legal (where row 0 is the top row and \nrow 8 is the bottom row), and False otherwise.\n\"\"\"\ndef isLegalRow(board, row):\n return areLegalValues(board[row])\n\n\"\"\"\nThis function takes a Sudoku board and a column number. The function returns \nTrue if the given column in the given board is legal (where col 0 is the left \ncolumn and col 8 is the right column), and False otherwise.\n\"\"\"\ndef isLegalCol(board, col):\n coldata= []\n for i in range(len(board)):\n coldata.append(board[i][col])\n return areLegalValues(coldata)\n\n\"\"\"\nThis function works just like isLegalRow and isLegalCol, only for blocks. In a\nSudoku board, we'll label the blocks as follows, where each block is a 3x3 grid:\n\n0 1 2\n3 4 5\n6 7 8\n\nSo Block 0 is the top-left block, and block numbers proceed across, then down.\nFor additional clarification, see this writeup:\nhttp://www.krivers.net/15112-f18/notes/hw5.html\n\nHint: use div and mod to find the starting row and col for each block!\n\"\"\"\ndef isLegalBlock(board, block):\n startRow= (block//3)*3\n startCol= (block%3)*3\n blockdata= []\n for k in range(startRow, startRow+3):\n for i in range(startCol, startCol+3):\n blockdata.append(board[k][i])\n return areLegalValues(blockdata)\n\n\"\"\"\nThis function takes a Sudoku board (which you may assume is a 9x9 2D list of \nintegers), and returns True if the board is legal. A Sudoku board is legal if\nevery row, column, and block in the board is legal.\n\"\"\"\ndef isLegalSudoku(board):\n for i in range(9):\n if (isLegalBlock(board,i)==True and isLegalCol(board,i)==True and isLegalRow(board,i)==True):\n return True\n else:\n return False\n \n \n\n######################################################################\n# GRAPHICS CODE\n# ignore_rest: The autograder will ignore all code below here\n######################################################################\n\nfrom tkinter import *\n\ndef init(data):\n data.grid= [\n [ 5, 3, 0, 0, 7, 0, 0, 0, 0 ],\n [ 6, 0, 0, 1, 9, 5, 0, 0, 0 ],\n [ 0, 9, 8, 0, 0, 0, 0, 6, 0 ],\n [ 8, 0, 0, 0, 6, 0, 0, 0, 3 ],\n [ 4, 0, 0, 8, 0, 3, 0, 0, 1 ],\n [ 7, 0, 0, 0, 2, 0, 0, 0, 6 ],\n [ 0, 6, 0, 0, 0, 0, 2, 8, 0 ],\n [ 0, 0, 0, 4, 1, 9, 0, 0, 5 ],\n [ 0, 0, 0, 0, 8, 0, 0, 7, 9 ]\n ]\n data.colors= [\n [ \"red\", \"red\", \"red\", \"green\", \"green\", \"green\", \"blue\", \"blue\", \"blue\" ],\n [ \"red\", \"red\", \"red\", \"green\", \"green\", \"green\", \"blue\", \"blue\", \"blue\" ],\n [ \"red\", \"red\", \"red\", \"green\", \"green\", \"green\", \"blue\", \"blue\", \"blue\" ],\n [ \"yellow\", \"yellow\", \"yellow\", \"purple\", \"purple\", \"purple\", \"orange\", \"orange\", \"orange\" ],\n [ \"yellow\", \"yellow\", \"yellow\", \"purple\", \"purple\", \"purple\", \"orange\", \"orange\", \"orange\" ],\n [ \"yellow\", \"yellow\", \"yellow\", \"purple\", \"purple\", \"purple\", \"orange\", \"orange\", \"orange\" ],\n [ \"white\", \"white\", \"white\", \"gray\", \"gray\", \"gray\", \"brown\", \"brown\", \"brown\" ],\n [ \"white\", \"white\", \"white\", \"gray\", \"gray\", \"gray\", \"brown\", \"brown\", \"brown\" ],\n [ \"white\", \"white\", \"white\", \"gray\", \"gray\", \"gray\", \"brown\", \"brown\", \"brown\" ]\n ]\n \n data.buttonX, data.buttonY = data.width/9, data.height/9\n data.buttonClicked = False\n data.buttonSize= 81\n data.prevColor=''\n \ndef mousePressed(event, data):\n for row in range(9):\n for col in range(9):\n if data.colors[row][col]==\"aqua\":\n data.colors[row][col]=data.prevColor\n col= event.x//(data.width//9)\n row= event.y//(data.height//9)\n data.prevColor= data.colors[row][col]\n data.colors[row][col]= \"aqua\"\n \n \ndef keyPressed(event, data):\n pass\ndef redrawAll(canvas, data):\n fillNumber=0\n for row in range(9):\n for col in range(9):\n fillNumber+=1\n newHeight = row*(data.width/9) #Takes the value of the row and multiplies it\n newWidth = col*(data.width/9) #Takes the value of the column and multiples it\n canvas.create_rectangle(newWidth, newHeight, newWidth + (data.width/9), newHeight + (data.width/9), fill= data.colors[row][col])\n if (data.grid[row][col]!=0):\n canvas.create_text(((2*newWidth+(data.width/9))/2), ((2*newHeight+(data.height/9))/2), text=data.grid[row][col], font=\"Arial 20 bold\")\ndef runSudoku(width=300, height=300):\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0) \n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n redrawAllWrapper(canvas, data)\n mousePressed(event, data)\n \n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n root = Tk()\n root.resizable(width=False, height=False) # prevents resizing window\n init(data)\n # create the root and the canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.configure(bd=0, highlightthickness=0)\n canvas.pack()\n # set up events\n root.bind(\"\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"\", lambda event:\n keyPressedWrapper(event, canvas, data))\n redrawAll(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n\n\n######################################################################\n# TEST CODE\n######################################################################\n\ndef testAverageRainfall():\n print(\"Testing averageRainfall()...\", end=\"\")\n averageRainfall()\n print(\"Done with these calculations\")\ndef getBasicBoard():\n return [\n [ 5, 3, 0, 0, 7, 0, 0, 0, 0 ],\n [ 6, 0, 0, 1, 9, 5, 0, 0, 0 ],\n [ 0, 9, 8, 0, 0, 0, 0, 6, 0 ],\n [ 8, 0, 0, 0, 6, 0, 0, 0, 3 ],\n [ 4, 0, 0, 8, 0, 3, 0, 0, 1 ],\n [ 7, 0, 0, 0, 2, 0, 0, 0, 6 ],\n [ 0, 6, 0, 0, 0, 0, 2, 8, 0 ],\n [ 0, 0, 0, 4, 1, 9, 0, 0, 5 ],\n [ 0, 0, 0, 0, 8, 0, 0, 7, 9 ]\n ]\ndef getNewBoard():\n return [\n [ 1, 1, 1, 0, 0, 9, 4, 0, 0 ],\n [ 1, 1, 1, 20, 10, 9, 4, 0, 0 ],\n [ -5, 6, 7, -20, 10, 9, 4, 6, 0],\n [ 5, 90, 5, '', 10, 9, 4, 0, 3 ],\n [ 4, 0, 0, 8, 0, 3, 0, 0, 1 ],\n [ 7, 0, 0, 0, 2, 0, 0, 0, 6 ],\n [ 0, 6, 0, 0, 0, 0, 2, 8, 0 ],\n [ 0, 0, 0, 4, 1, 9, 0, 0, 5 ],\n [ 0, 0, 0, 0, 8, 0, 0, 7, 9 ]\n ]\n\ndef testAreLegalValues():\n print(\"Testing areLegalValues()...\", end=\"\")\n for i in range(len(getBasicBoard())):\n print(i)\n assert(areLegalValues(getBasicBoard()[i]) == True)\n print(\"Done testing basic board\")\n for i in range(4):\n print(i)\n assert(areLegalValues(getNewBoard()[i]) == False)\n print(\"WROTE THEM CASES for areLegalValues!\")\n print()\n \n\ndef testIsLegalRow():\n print(\"Testing isLegalRow()...\", end=\"\")\n for i in range(len(getBasicBoard())):\n print(i)\n assert(isLegalRow(getBasicBoard(), i) == True)\n print(\"Done testing basic board\")\n for i in range(4):\n print(i)\n assert(isLegalRow(getNewBoard(), i) == False)\n print(\"HAHA TESTED EVERYTHING!!!! for those ROWS\")\n print()\n\ndef testIsLegalCol():\n print(\"Testing isLegalCol()...\", end=\"\")\n for i in range(len(getBasicBoard())):\n print(i)\n assert(isLegalCol(getBasicBoard(), i) == True)\n print(\"Done testing basic board\")\n for i in range(6):\n print(i)\n assert(isLegalCol(getNewBoard(), i) == False)\n print(\"COLUMNS ARE NO PROBLEM SINCE I PASSED ALL THEM TESTS\")\n print()\n\ndef testIsLegalBlock():\n print(\"Testing isLegalBlock()...\", end=\"\")\n for i in range(len(getBasicBoard())):\n print(i)\n assert(isLegalBlock(getBasicBoard(), i) == True)\n print(\"Done testing basic board\")\n for i in range(3):\n print(i)\n assert(isLegalBlock(getNewBoard(), i) == False)\n print(\"Passed the block tests. Sudoku tests are incoming\")\n print()\n\ndef testIsLegalSudoku():\n print(\"Testing isLegalSudoku()...\", end=\"\")\n assert(isLegalSudoku(getBasicBoard()) == True)\n assert(isLegalSudoku(getNewBoard())==False)\n print(\"Sudoku tests were passed. It was a joint effort from helper functions\")\ndef testSudokuAnimation():\n print(\"Running Sudoku Animation...\", end=\"\")\n # Feel free to change the width and height!\n width = 540\n height = 540\n runSudoku(width, height)\n print(\"Done.\")\n\ndef testAll():\n # 1\n testAverageRainfall()\n # 2\n testSudokuAnimation()\n testAreLegalValues()\n testIsLegalRow()\n testIsLegalCol()\n testIsLegalBlock()\n testIsLegalSudoku()\n\ntestAll()","repo_name":"muhaiminsarker/SAMS-Programming","sub_path":"Muhaimin's Homework 4.py","file_name":"Muhaimin's Homework 4.py","file_ext":"py","file_size_in_byte":14963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10539725933","text":"\nimport os\nimport tempfile\nimport logging as log\nfrom func_setups import FuncSetups\nfrom eth_proxy import EthProxyHttp, EthNodeSigner\nfrom eth_proxy import EthContract\nfrom eth_proxy.utils import bytes_to_str\nimport re\nimport os\n\n# End-to-end test using EthContract abstraction\n# in synchronous mode (for quickie development)\n\n\n#\nlib_src = \\\n '''\npragma solidity ^0.4.0; \n\nlibrary TestLib\n{\n struct InnerStruct {\n int32 x;\n int32 y;\n }\n \n struct DataBlock {\n int16 a;\n string str;\n int16 b;\n InnerStruct inner;\n }\n \n function TestLib()\n {\n \n } \n \n function init(DataBlock storage db)\n {\n db.a = -1;\n db.b = -2;\n db.str = \"The Data\";\n db.inner.x = -100;\n db.inner.y = -200; \n }\n \n function getInner(DataBlock storage db) internal returns(InnerStruct storage inner)\n {\n return db.inner;\n }\n \n function setA(DataBlock storage db, int16 val) returns(int16)\n {\n db.a = val;\n return val;\n }\n\n function setB(DataBlock storage db, int16 val) \n {\n db.b = val;\n }\n\n function setB2(DataBlock storage db, int16 val) \n {\n db.b = val;\n }\n \n function setB3(DataBlock storage db, int16 val) \n {\n db.b = val;\n }\n \n function setB4(DataBlock storage db, int16 val) \n {\n db.b = val;\n }\n \n function setB5(DataBlock storage db, int16 val) \n {\n db.b = val;\n }\n \n function setB6(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n\n function setB7(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n \n function setB8(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n \n function setB9(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n \n function setB10(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n \n function setB11(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n \n function setB12(DataBlock storage db, int16 val) \n {\n db.b = val;\n } \n} \n '''\n\nlib2_src = \\\n '''\npragma solidity ^0.4.0; \n\nlibrary AnotherLib\n{ \n struct Data{\n int x;\n int y;\n }\n\n function dataSum(Data storage db) \n public\n returns(int)\n {\n return db.x + db.y;\n }\n \n} \n ''' \n \n \ncaller_src = \\\n '''\npragma solidity ^0.4.0; \n\nimport \"{{libpath}}\";\nimport \"{{lib2path}}\";\n \ncontract TestCaller\n{\n\n TestLib.DataBlock db;\n AnotherLib.Data alData;\n \n //using AnotherLib for AnotherLib.Data;\n \n int16 x; \n int16 y;\n\n function TestCaller( int16 aVal, int16 yVal)\n {\n TestLib.init(db);\n TestLib.setA(db, aVal);\n y = yVal;\n \n alData.x = 123;\n alData.y = 999;\n }\n\n function getX() returns (int16)\n {\n return x;\n }\n\n function getY() returns (int16)\n {\n return y;\n }\n\n function _getInner() internal returns (TestLib.InnerStruct storage)\n {\n return TestLib.getInner(db);\n }\n \n function getInnerX() returns (int32)\n {\n TestLib.InnerStruct storage i = _getInner();\n return i.x;\n }\n \n function getDbA() returns (int16)\n {\n return db.a;\n }\n\n function getAnotherSum()\n public\n returns (int)\n {\n return AnotherLib.dataSum(alData);\n }\n\n function libSetA(int16 val)\n {\n TestLib.setA(db,val); \n TestLib.setB(db,val); \n TestLib.setA(db,val); \n TestLib.setB2(db,val); \n TestLib.setA(db,val); \n TestLib.setB3(db,val); \n TestLib.setA(db,val); \n TestLib.setB4(db,val);\n TestLib.setA(db,val); \n TestLib.setB5(db,val); \n TestLib.setA(db,val); \n TestLib.setB6(db,val); \n TestLib.setA(db,val); \n TestLib.setB7(db,val); \n TestLib.setA(db,val); \n TestLib.setB8(db,val);\n TestLib.setA(db,val); \n TestLib.setB9(db,val);\n TestLib.setA(db,val); \n TestLib.setB10(db,val);\n \n TestLib.setA(db,val); \n TestLib.setB(db,val); \n TestLib.setA(db,val); \n TestLib.setB2(db,val); \n TestLib.setA(db,val); \n TestLib.setB3(db,val); \n TestLib.setA(db,val); \n TestLib.setB4(db,val);\n TestLib.setA(db,val); \n TestLib.setB5(db,val); \n TestLib.setA(db,val); \n TestLib.setB6(db,val); \n TestLib.setA(db,val); \n TestLib.setB7(db,val); \n TestLib.setA(db,val); \n TestLib.setB8(db,val);\n TestLib.setA(db,val); \n TestLib.setB9(db,val);\n TestLib.setA(db,val); \n TestLib.setB10(db,val); \n \n TestLib.setA(db,val); \n TestLib.setB(db,val); \n TestLib.setA(db,val); \n TestLib.setB2(db,val); \n TestLib.setA(db,val); \n TestLib.setB3(db,val); \n TestLib.setA(db,val); \n TestLib.setB4(db,val);\n TestLib.setA(db,val); \n TestLib.setB5(db,val); \n TestLib.setA(db,val); \n TestLib.setB6(db,val); \n TestLib.setA(db,val); \n TestLib.setB7(db,val); \n TestLib.setA(db,val); \n TestLib.setB8(db,val);\n TestLib.setA(db,val); \n TestLib.setB9(db,val);\n TestLib.setA(db,val); \n TestLib.setB10(db,val);\n }\n\n function libSetB(int16 val)\n {\n TestLib.setB(db,val); \n } \n\n} \n ''' \n \n#\n# Use to check EthContract.link_library()\n#\ndef insert_library_address(bytecode, libspec, address):\n '''\n libscpe is :\n ie\" ./base/contract/bob.sol:BobContract\"\n ''' \n print(\"\\n\\n{0}\".format(bytecode))\n pat = '__({0})__+'.format(libspec)\n newcode = re.sub(pat,address,bytecode,0)\n print(\"\\n\\n{0}\".format(newcode))\n return newcode\n \n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\nfs = FuncSetups()\n\nlib_path = fs.write_temp_contract(\"test-lib.sol\", lib_src) \nlib2_path = fs.write_temp_contract(\"test-lib2.sol\", lib2_src) \nlib_folder = os.path.dirname(lib_path)\n\n\nos.chdir(lib_folder)\nlib_path = \"./test-lib.sol\"\nlib2_path = \"./test-lib2.sol\"\n\n# insert actual library source path,\n# needs to happen because files are written to temp\ncaller_src = str.replace(caller_src, '{{libpath}}', lib_path)\ncaller_src = str.replace(caller_src, '{{lib2path}}', lib2_path)\ncaller_path = fs.write_temp_contract(\"test_caller.sol\", caller_src)\n\n#eth = fs.create_proxy()\neth = fs.create_proxy('https://infuranet.infura.io:443')\nassert(eth)\n\nkeystore = fs.create_keystore()\nassert(keystore)\n\naccount = fs.get_account(keystore, 0)\n\neth.set_eth_signer(keystore)\n \nether = eth.eth_getBalance(account)\nlog.info(\"Account: {0} Ether balance: {1}\".format(account, ether))\nif ether == 0:\n raise RuntimeError(\"Account has no ether.\")\n \n# install library\nlib_con = EthContract(None, eth, account) # No description path\nlib_con.new_source(lib_path,'TestLib')\ntxdata = lib_con.install_sync() # sync mode\nif not lib_con.installed():\n raise RuntimeError(\"library creation failed\")\nprint(\"TestLib Address: {0}\".format(lib_con.address()))\n\n# install other library\nlib2_con = EthContract(None, eth, account) # No description path\nlib2_con.new_source(lib2_path, 'AnotherLib')\ntxdata = lib2_con.install_sync() # sync mode\nif not lib2_con.installed():\n raise RuntimeError(\"library creation failed\")\nprint(\"AnotherLib Address: {0}\".format(lib2_con.address())) \n \n# create caller\ncall_con = EthContract( None, eth, account) # No description path\ncall_con.new_source(caller_path, \"TestCaller\")\n\nlibs_needed = call_con.library_stubs()\nprint( \"Libs that need to be linked: {0}\".format(libs_needed))\n\n\nUSE_EXTERNAL_LINK = False\nif USE_EXTERNAL_LINK:\n print(' >> Linking: **** Using External regex replacement ***') \n bcode = call_con._hex_bytedata\n newbc = insert_library_address(bcode,'{0}:TestLib'.format(lib_path),lib_con.address()[2:])\n newbc2 = insert_library_address(newbc,'{0}:AnotherLib'.format(lib2_path),lib2_con.address()[2:]) \n call_con._hex_bytedata = newbc2 \nelse:\n print(' >> Linking: Using EthContract.link()')\n call_con.link_library('{0}:TestLib'.format(lib_path),lib_con.address()) \n call_con.link_library('{0}:AnotherLib'.format(lib2_path),lib2_con.address())\n \n\n\n\ntxdata = call_con.install_sync([123, 234], gas=3000000) # sync mode\nif not call_con.installed():\n raise RuntimeError(\"Caller creation failed\") \n\n\n#\n[x_val] = call_con.call('getX') \nprint(\"X: {0}\".format(x_val))\n\n[y_val] = call_con.call('getY') \nprint(\"y: {0}\".format(y_val))\n\n[a_val] = call_con.call('getDbA') \nprint(\"db.a: {0}\".format(a_val))\n\n[val] = call_con.call('getInnerX') \nprint(\"db.inner.x: {0}\".format(val))\n\n\n[val] = call_con.call('getAnotherSum') \nprint(\"getAnotherSum(): {0}\".format(val)) \nassert( val == 123+999) \n \nexit() \n\n\n[b_msg] = contract.call('checkArray', [1])\nmsg = bytes_to_str(b_msg) \nprint(\"struct[1] msg: {0}\".format(msg))\n \n[b_msg] = contract.call('checkMap', [1])\nmsg = bytes_to_str(b_msg) \nprint(\"map[1] msg: {0}\".format(msg)) \n \n[b_msg] = contract.call('checkArray', [2])\nmsg = bytes_to_str(b_msg) \nprint(\"struct[2] msg: {0}\".format(msg))\n \n \n[b_msg] = contract.call('checkMap', [7])\nmsg = bytes_to_str(b_msg) \nprint(\"map[7] msg: {0}\".format(msg)) \n \n[s] = contract.call('return_string')\nmsg = bytes_to_str(s) \nprint(\"return_string(): {0}\".format(msg)) \n \n \n \n \n \n# Send a tx\nprint(\"Sending function TX to contract\")\nmsg = contract.transaction_sync('SetTheInt', [863])\nif msg['err']:\n raise RuntimeError(\"Contract TX failed: {0}\".format(msg['errmsg'])) \n \nrcpt = eth.eth_getTransactionReceipt(msg['tx_hash'])\nprint(\"Rcpt: {0}\".format(rcpt))\n\nlogs = eth.get_transaction_logs(msg['tx_hash'])\nprint(\"Logs: {0}\".format(logs))\n \n \n# check the tx worked\nprint(\"Calling contract function\") \n[result] = contract.call( 'aPublicInt')\nprint(\"Result {0}\".format(result)) \n \nlog.info(\"Done EthContract test...\\n\")\n\n\n","repo_name":"jimkberry/eth_proxy.py","sub_path":"func_tests/test_library.py","file_name":"test_library.py","file_ext":"py","file_size_in_byte":11462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"26530231870","text":"from botocore.vendored import requests\nimport json\nimport os\nsessionAttributes = {}\ndef lambda_handler(event, context):\n key = os.environ['oauthkey']\n baseurl = 'https://console.cloud.vmware.com/csp/gateway'\n uri = '/am/api/auth/api-tokens/authorize'\n headers = {'Content-Type':'application/json'}\n payload = {'refresh_token': key}\n r = requests.post(f'{baseurl}{uri}', headers = headers, params = payload)\n if r.status_code != 200:\n print(f'Unsuccessful Login Attmept. Error code {r.status_code}')\n else:\n print('Login successful. ') \n auth_header = r.json()['access_token']\n finalHeader = {'Content-Type':'application/json','csp-auth-token':auth_header}\n req = requests.get('https://vmc.vmware.com/vmc/api/orgs', headers = finalHeader)\n myorgs = req.json()\n\n arr = []\n for i, b in enumerate(myorgs):\n arr.append(b['display_name'])\n arr.sort()\n \n newarray = []\n for a, c in enumerate(arr):\n newarray.append('*'+str(a)+'*'+' - '+c+'\\n')\n strlist = ''.join(newarray)\n \n response = {\n \"sessionAttributes\": { \n \"key1\": \"Brian\",\n \"key2\": \"Graf\"\n },\n \"dialogAction\":\n {\n \"fulfillmentState\":\"Fulfilled\",\n \"type\":\"Close\",\"message\":\n {\n \"contentType\":\"PlainText\",\n \"content\": strlist\n }\n }\n }\n return response\n","repo_name":"TheBrianGraf/vmclex","sub_path":"Lambda Python Code/getUserOrgs.py","file_name":"getUserOrgs.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72206601846","text":"#sourcecode\nl=list(map(int,input().split()))\nfor i in l:\n i=i-2*i\n print(i,end=' ')\n#invert the signs\n\n \n'''\nSample IP:\n1 2 -3\n\nSample OP:\n-1 -2 3\n'''\n","repo_name":"ThejeshwarAB/daily-programs","sub_path":"python/numbers_211019.py","file_name":"numbers_211019.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44699600075","text":"import os\nimport datetime\n\npath = '.'\n# パス内のディレクトリ一覧を取得\nfiles = os.listdir(path)\n\n# ファイルが隠しファイルでない、かつディレクトリである場合、リストに追加\nfiles_dir = [file for file in files if not file.startswith('.') and os.path.isdir(os.path.join(path, file))]\n\n\nfor dir_name in files_dir:\n \n files = os.listdir(os.path.join(path, dir_name))\n for file in files:\n\n # '.'を境目に文字列を分割しリストに格納\n # ファイル名\n file_name = file.split('.')[0]\n # 拡張子\n ext = file.split('.')[1]\n\n # ファイルパスの作成\n file_path = os.path.join(path, dir_name, file)\n\n # ファイルの作成日時を取得\n created_time = os.stat(file_path).st_birthtime\n\n # UNIX時間をdatetimeに変換\n date = datetime.datetime.fromtimestamp(created_time)\n # datetimeをフォーマット(文字列変換)する YYYYMMDD\n date = date.strftime('%Y%m%d')\n\n # 新しいファイル名\n new_file = f\"{date}_{file_name}.{ext}\"\n\n # 旧ファイル名:file_path\n # 新ファイル名:os.path.join(path, dir_name, new_file)\n # ファイル名を変更する\n os.rename(file_path, os.path.join(path, dir_name, new_file))","repo_name":"sc30gsw/PythonLearning","sub_path":"automation/ch_filename.py","file_name":"ch_filename.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5326640364","text":"from typing import List\n\n\nclass Solution:\n def distributeCandies(self, candies: int, n: int) -> List[int]:\n ans = [0 for _ in range(n)]\n i = 0\n while candies > 0:\n ans[i % n] += min(candies, i + 1)\n candies -= i + 1\n i += 1\n\n return ans\n","repo_name":"srajsonu/LeetCode-Solutions-Python","sub_path":"Maths/1103. Distribute Candies to People.py","file_name":"1103. Distribute Candies to People.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17883197208","text":"from machine import Pin, ADC\nimport time\n\n# set up the ADC pin\nadc = ADC(Pin(2))\n\nwhile True:\n # read the analog voltage from the ADC pin\n temp = adc.read()\n\n print(\"Temperatura: \" + str(temp))\n\n # wait for some time before taking another reading\n time.sleep(0.5)\n","repo_name":"IssacCloudVII/Inmotica","sub_path":"Codigos/Termopar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36554054115","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef raspar(url, tag, cls):\n headers = {'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}\n page = requests.get(url, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n lista = soup.find_all(tag, {\"class\": cls})\n return lista\n\n\ndef buscar(dados, tag, cls):\n return dados.find_all(tag, {\"class\": cls})\n\n\ndef extensao(nome):\n index = nome.rfind('.')\n return nome[index:]\n\n\ndef baixar(url, endereco):\n url = str(url)\n resposta = requests.get(url)\n if resposta.status_code == requests.codes.OK:\n with open(endereco, 'wb') as novo:\n novo.write(resposta.content)\n print(f\"Imagem salva em: {endereco}\")\n else:\n resposta.raise_for_status()\n\n\ndef atleta(url):\n html = raspar(url, 'div', 'row py-3 magic-row')[0]\n\n cls0 = 'text-danger d-flex justify-content-center align-items-center font-weight-bold shirt-number'\n cls1 = 'text-white text-uppercase'\n cls2 = 'img-fluid d-none d-md-inline-block img-persona'\n\n numero = buscar(html, 'span', cls0)[0].get_text().strip()\n nome = buscar(html, 'h2', cls1)[0].get_text().strip()\n nome_completo = buscar(html, 'p', cls1)[0].get_text().strip()\n posicao = buscar(html, 'li', cls1)[0].get_text().strip()\n nascimento = buscar(html, 'li', cls1)[1].get_text().strip()\n cidade = buscar(html, 'li', cls1)[2].get_text().strip()\n link_img = buscar(html, 'img', cls2)[0]['src']\n\n posicao = posicao.replace(\"Posição: \", \"\")\n nascimento = nascimento.replace(\"Nascimento: \", \"\")[0:10]\n cidade = cidade.replace(\"Cidade: \", \"\")\n\n ext = extensao(link_img)\n imagem = f\"img/jogadores/flamengo_{numero}{ext}\"\n\n dicionario = {\n \"nome\": nome,\n \"nome_completo\": nome_completo,\n \"posicao\": posicao,\n \"nascimento\": nascimento,\n \"cidade\": cidade,\n \"imagem\": imagem,\n \"link_img\": link_img\n }\n\n print(f\"{numero}: {nome}, {nome_completo}, {posicao}, {nascimento}, {cidade}\")\n baixar(link_img, imagem)\n return [str(numero), dicionario]\n\n\ndef elenco(url, cls='elenco-atleta'):\n dicionario = {}\n elencoHTML = raspar(url, 'div', cls)\n\n for html in elencoHTML:\n link = html.a['href']\n dados = atleta(link)\n dicionario[dados[0]] = dados[1]\n\n return dicionario\n\n\ndef flamengo():\n url = \"https://www.flamengo.com.br/elencos/elenco-profissional\"\n colunas = ['nome', 'nome_completo', 'posicao',\n 'nascimento', 'cidade', 'imagem', 'link_img']\n df = pd.DataFrame(elenco(url))\n df.to_json('assets/json/flamengo.json')\n\n\ndef palmeiras():\n url = \"https://www.palmeiras.com.br/elenco/\"\n elencoHTML = raspar(url, \"div\", \"box-elenco\")\n\n dicionario = {}\n for jogadorHTML in elencoHTML:\n jogador = jogadorHTML.a\n link_jogador = jogador[\"href\"]\n link_img = jogador.img[\"src\"]\n numero = jogador.h4.get_text().strip()\n nome = jogador.div.h6.get_text().strip()\n posicao = jogador.div.p.get_text().strip()\n if numero == \"\":\n numero = 33\n\n dadosJogadorHTML = raspar(link_jogador, \"div\", \"dados-jogador\")\n for dadoHTML in dadosJogadorHTML:\n conteudoHTML = buscar(dadoHTML, \"div\", \"row quadro-content\")[0]\n colunaHTML = conteudoHTML.find_all(\"span\")\n nome_completo = colunaHTML[0].get_text().strip()\n nascimento = colunaHTML[1].get_text().strip()\n cidade = colunaHTML[2].get_text().strip()\n\n ext = extensao(link_img)\n imagem = f\"img/jogadores/palmeiras_{numero}{ext}\"\n\n dict_num = {\n \"nome\": nome,\n \"nome_completo\": nome_completo,\n \"posicao\": posicao,\n \"nascimento\": nascimento,\n \"cidade\": cidade,\n \"imagem\": imagem,\n \"link_img\": link_img\n }\n\n print(\n f\"{numero}: {nome}, {nome_completo}, {posicao}, {nascimento}, {cidade}\")\n baixar(link_img, imagem)\n\n dicionario[str(numero)] = dict_num\n\n df = pd.DataFrame(dicionario)\n df.to_json('assets/json/palmeiras.json')\n print(\"json salvo em: 'assets/json/palmeiras.json'\")\n\n\n# palmeiras()\n","repo_name":"tiaonazario/futeflix","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11430274571","text":"from __future__ import absolute_import, print_function\n\nimport itertools\nfrom mercurial import pycompat\nfrom hgext import absorb\n\nclass simplefctx(object):\n def __init__(self, content):\n self.content = content\n\n def data(self):\n return self.content\n\ndef insertreturns(x):\n # insert \"\\n\"s after each single char\n if isinstance(x, bytes):\n return b''.join(ch + b'\\n' for ch in pycompat.bytestr(x))\n else:\n return pycompat.maplist(insertreturns, x)\n\ndef removereturns(x):\n # the revert of \"insertreturns\"\n if isinstance(x, bytes):\n return x.replace(b'\\n', b'')\n else:\n return pycompat.maplist(removereturns, x)\n\ndef assertlistequal(lhs, rhs, decorator=lambda x: x):\n if lhs != rhs:\n raise RuntimeError('mismatch:\\n actual: %r\\n expected: %r'\n % tuple(map(decorator, [lhs, rhs])))\n\ndef testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None):\n \"\"\"([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None\n\n workingcopy is a string, of which every character denotes a single line.\n\n oldcontents, expectedcontents are lists of strings, every character of\n every string denots a single line.\n\n if fixups is not None, it's the expected fixups list and will be checked.\n \"\"\"\n expectedcontents = insertreturns(expectedcontents)\n oldcontents = insertreturns(oldcontents)\n workingcopy = insertreturns(workingcopy)\n state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents),\n 'path')\n state.diffwith(simplefctx(workingcopy))\n if fixups is not None:\n assertlistequal(state.fixups, fixups)\n state.apply()\n assertlistequal(state.finalcontents, expectedcontents, removereturns)\n\ndef buildcontents(linesrevs):\n # linesrevs: [(linecontent : str, revs : [int])]\n revs = set(itertools.chain(*[revs for line, revs in linesrevs]))\n return [b''] + [\n b''.join([l for l, rs in linesrevs if r in rs])\n for r in sorted(revs)\n ]\n\n# input case 0: one single commit\ncase0 = [b'', b'11']\n\n# replace a single chunk\ntestfilefixup(case0, b'', [b'', b''])\ntestfilefixup(case0, b'2', [b'', b'2'])\ntestfilefixup(case0, b'22', [b'', b'22'])\ntestfilefixup(case0, b'222', [b'', b'222'])\n\n# input case 1: 3 lines, each commit adds one line\ncase1 = buildcontents([\n (b'1', [1, 2, 3]),\n (b'2', [ 2, 3]),\n (b'3', [ 3]),\n])\n\n# 1:1 line mapping\ntestfilefixup(case1, b'123', case1)\ntestfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c'])\ntestfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3'])\ntestfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc'])\ntestfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23'])\ntestfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c'])\ntestfilefixup(case1, b'ab3', [b'', b'a', b'ab', b'ab3'])\ntestfilefixup(case1, b'abc', [b'', b'a', b'ab', b'abc'])\n\n# non 1:1 edits\ntestfilefixup(case1, b'abcd', case1)\ntestfilefixup(case1, b'ab', case1)\n\n# deletion\ntestfilefixup(case1, b'', [b'', b'', b'', b''])\ntestfilefixup(case1, b'1', [b'', b'1', b'1', b'1'])\ntestfilefixup(case1, b'2', [b'', b'', b'2', b'2'])\ntestfilefixup(case1, b'3', [b'', b'', b'', b'3'])\ntestfilefixup(case1, b'13', [b'', b'1', b'1', b'13'])\n\n# replaces\ntestfilefixup(case1, b'1bb3', [b'', b'1', b'1bb', b'1bb3'])\n\n# (confusing) replaces\ntestfilefixup(case1, b'1bbb', case1)\ntestfilefixup(case1, b'bbbb', case1)\ntestfilefixup(case1, b'bbb3', case1)\ntestfilefixup(case1, b'1b', case1)\ntestfilefixup(case1, b'bb', case1)\ntestfilefixup(case1, b'b3', case1)\n\n# insertions at the beginning and the end\ntestfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c'])\ntestfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123'])\n\n# (confusing) insertions\ntestfilefixup(case1, b'1a23', case1)\ntestfilefixup(case1, b'12b3', case1)\n\n# input case 2: delete in the middle\ncase2 = buildcontents([\n (b'11', [1, 2]),\n (b'22', [1 ]),\n (b'33', [1, 2]),\n])\n\n# deletion (optimize code should make it 2 chunks)\ntestfilefixup(case2, b'', [b'', b'22', b''],\n fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)])\n\n# 1:1 line mapping\ntestfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa'])\n\n# non 1:1 edits\n# note: unlike case0, the chunk is not \"continuous\" and no edit allowed\ntestfilefixup(case2, b'aaa', case2)\n\n# input case 3: rev 3 reverts rev 2\ncase3 = buildcontents([\n (b'1', [1, 2, 3]),\n (b'2', [ 2 ]),\n (b'3', [1, 2, 3]),\n])\n\n# 1:1 line mapping\ntestfilefixup(case3, b'13', case3)\ntestfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b'])\ntestfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3'])\ntestfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab'])\n\n# non 1:1 edits\ntestfilefixup(case3, b'a', case3)\ntestfilefixup(case3, b'abc', case3)\n\n# deletion\ntestfilefixup(case3, b'', [b'', b'', b'2', b''])\n\n# insertion\ntestfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c'])\n\n# input case 4: a slightly complex case\ncase4 = buildcontents([\n (b'1', [1, 2, 3]),\n (b'2', [ 2, 3]),\n (b'3', [1, 2, ]),\n (b'4', [1, 3]),\n (b'5', [ 3]),\n (b'6', [ 2, 3]),\n (b'7', [ 2 ]),\n (b'8', [ 2, 3]),\n (b'9', [ 3]),\n])\n\ntestfilefixup(case4, b'1245689', case4)\ntestfilefixup(case4, b'1a2456bbb', case4)\ntestfilefixup(case4, b'1abc5689', case4)\ntestfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689'])\ntestfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee'])\ntestfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee'])\ntestfilefixup(case4, b'aaaaaa', case4)\ntestfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b'])\ntestfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689'])\ntestfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689'])\ntestfilefixup(case4, b'28', [b'', b'34', b'2378', b'28'])\ntestfilefixup(case4, b'', [b'', b'34', b'37', b''])\n\n# input case 5: replace a small chunk which is near a deleted line\ncase5 = buildcontents([\n (b'12', [1, 2]),\n (b'3', [1]),\n (b'4', [1, 2]),\n])\n\ntestfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4'])\n\n# input case 6: base \"changeset\" is immutable\ncase6 = [b'1357', b'0125678']\n\ntestfilefixup(case6, b'0125678', case6)\ntestfilefixup(case6, b'0a25678', case6)\ntestfilefixup(case6, b'0a256b8', case6)\ntestfilefixup(case6, b'abcdefg', [b'1357', b'a1c5e7g'])\ntestfilefixup(case6, b'abcdef', case6)\ntestfilefixup(case6, b'', [b'1357', b'157'])\ntestfilefixup(case6, b'0123456789', [b'1357', b'0123456789'])\n\n# input case 7: change an empty file\ncase7 = [b'']\n\ntestfilefixup(case7, b'1', case7)\n","repo_name":"indygreg/hg","sub_path":"tests/test-absorb-filefixupstate.py","file_name":"test-absorb-filefixupstate.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"32510553859","text":"import os\nimport sys\nimport numpy as np \nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#删除src路径下所有文件\nlog_dir = 'log'\ndef delete_file_folder(src):\n '''delete files and folders'''\n if os.path.isfile(src):\n try:\n os.remove(src)\n except:\n pass\n elif os.path.isdir(src):\n for item in os.listdir(src):\n itemsrc=os.path.join(src,item)\n delete_file_folder(itemsrc) \n try:\n os.rmdir(src)\n except:\n pass\n\n#载入数据集\nmnist = input_data.read_data_sets(\"mnistdata\",one_hot=True)\n#定义一些常量\nlearn_rate = tf.Variable(0.01,dtype=tf.float32) \nepoch_step = 51\ndisplay_step = 1\n#loss_mode = [\"square_mean\",\"cross_entory\"]\ntrain_mode = [\"Grad\",\"Adam\",\"Moment\",\"RMSProp\"]\n\n#两层网络神经元数\nlayer1_dim = 150\nlayer2_dim = 50\n#每个批次的大小\nbatch_size = 100\n#计算一共有多少个批次\nn_batch = mnist.train.num_examples // batch_size\n#定义keep_prob\nkeep_prob = tf.placeholder(tf.float32)\n#���义两个placeholder\nwith tf.name_scope('input'):\n x = tf.placeholder(tf.float32,[None,784],name='x_input')\n y = tf.placeholder(tf.float32,[None,10],name='y_input')\n\n#定义权重和偏置\ndef weights_variable(shape):\n w = tf.Variable(tf.truncated_normal(shape))\n return w\ndef biases_variable(shape):\n b = tf.Variable(tf.zeros(shape)+0.1)\n return b\n# 计算参数的均值,并使用tf.summary.scaler记录\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)#标准差\n tf.summary.scalar('max', tf.reduce_max(var))#最大值\n tf.summary.scalar('min', tf.reduce_min(var))#最小值\n tf.summary.histogram('histogram', var)#直方图\n\n#定义神经网络\ndef network(x_input,input_dim,output_dim,layer_name,act_style):\n act = {\n \"tanh\":tf.nn.tanh,\n \"relu\":tf.nn.relu,\n \"sigmoid\":tf.nn.sigmoid,\n \"softmax\":tf.nn.softmax,\n \"elu\":tf.nn.elu,\n \"relu6\":tf.nn.relu6\n }\n with tf.name_scope(layer_name):\n # 调用之前的方法初始化权重w和偏置b,并且调用参数信息的记录方法\n with tf.name_scope('weights'): \n weights = weights_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = biases_variable([output_dim])\n variable_summaries(biases)\n # 执行wx+b的线性计算,并且用直方图记录下来\n with tf.name_scope('linear_compute'):\n preactivate = tf.matmul(x_input, weights) + biases\n tf.summary.histogram('linear', preactivate)\n # 返回激励层的最终输出\n with tf.name_scope('activations'): \n activations = act[act_style](preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n with tf.name_scope('dropout'):\n activations_drop = tf.nn.dropout(activations,keep_prob) \n return activations_drop\n\n#定义loss函数\ndef lossfunction(pred,y_input):\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels= y_input,logits= pred))\n tf.summary.scalar('loss',loss)\n return loss\n#定义训练函数train\ndef trainfunction(train_style,learnrate,cost): \n\n train_mode = {\"Grad\":tf.train.GradientDescentOptimizer,\n \"Adam\":tf.train.AdamOptimizer,\n \"Moment\":tf.train.MomentumOptimizer,\n \"RMSProp\":tf.train.RMSPropOptimizer\n }\n with tf.name_scope('train'):\n train_step = train_mode[train_style](learning_rate= learnrate).minimize(cost)\n\n return train_step\n#删除之前生成的log\nif os.path.exists(log_dir + '/train'):\n delete_file_folder(log_dir + '/train') \n#执行函数\nlayer1 = network(x,784,layer1_dim,'layer1','tanh')\nlayer2 = network(layer1,layer1_dim,layer2_dim,'layer2','tanh')\nout = network(layer2,layer2_dim,10,'output','tanh')\nloss = lossfunction(out,y)\n\ntrain_step = trainfunction(train_mode[1],learn_rate,loss)\n\n#初始化变量\ninit = tf.global_variables_initializer()\n\nwith tf.name_scope('accuracy'): \n with tf.name_scope('correct_prediction'): \n #结果存放在一个布尔型列表中\n correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(out,1))#argmax返回一维张量中最大的值所在的位置\n with tf.name_scope('accuracy'):\n #求准确率\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n tf.summary.scalar('accuracy',accuracy)\n#合并所有的summary\nmerged = tf.summary.merge_all()\n\n#saver = tf.train.Saver()\n\nprint(\";layercombin:\"+str(layer1_dim)+\",\"+str(layer2_dim))\nwith tf.Session() as sess:\n sess.run(init)\n train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(log_dir + '/test', sess.graph)\n for epoch in range(epoch_step):\n sess.run(tf.assign(learn_rate,0.01*(0.97**epoch)))\n #sess.run(tf.assign(learn_rate,0.01/np.sqrt(epoch+1)))\n #r = -2*np.random.rand()-2\n #lr = 10**r\n #sess.run(tf.assign(learn_rate,lr))\n for batch in range(n_batch):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\n summary_train,_ = sess.run([merged, train_step],feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.85})\n train_writer.add_summary(summary_train,epoch)\n if epoch % display_step == 0: \n summary_test,acc = sess.run([merged, accuracy],feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})\n print(\"Iter \" + str(epoch) + \",Testing Accuracy \" + str(acc))\n test_writer.add_summary(summary_test,epoch)\n #saver.save(sess,'net/my_net.ckpt')\n train_writer.close()\n test_writer.close()","repo_name":"oo7lfc/mnist_nn","sub_path":"mnistlearnpro.py","file_name":"mnistlearnpro.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73510606326","text":"# y = wx+b\nimport numpy as np\n\n\nclass linear_r:\n def __init__(self, lr=0.001, n_iters=1000):\n self.lr = lr\n self.n_iters = n_iters\n self.w = None\n self.b = None\n\n def fit(self, X, y):\n n_samples, n_features = X.shape\n self.w = np.zeros(n_features)\n self.b = 0\n\n for _ in range(self.n_iters):\n y_predicted = np.dot(X, self.w) + self.b\n # Apply Gradient Decent\n dw = (1/n_samples) * np.dot(X.T, (y_predicted - y))\n db = (1/n_samples) * np.sum(y_predicted - y)\n\n self.w -= self.lr * dw\n self.b -= self.lr * db\n pass\n\n def predict(self, X):\n y_predicted = np.dot(X, self.w) + self.b\n return y_predicted","repo_name":"anmol6536/project_ideas","sub_path":"all_ideas/ml_algo/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"5404827974","text":"# Giovanni Medrano\n\n\nimport sys\n\nclass the_Node(object):\n\n def __init__(self, data):\n empty = None\n self.right_c = empty\n self.left_c = empty\n self.data = data\n\nclass Tree(object):\n # the init() function creates the binary search tree with the\n # encryption string. If the encryption string contains any\n # character other than the characters 'a' through 'z' or the\n # space character drop that character.\n def __init__(self, encrypt_str):\n path = len(encrypt_str)\n path2 = encrypt_str[0]\n self.root = the_Node(path2)\n for x in range(1, path):\n if (ord(encrypt_str[x]) == 32 or (97 <= ord(encrypt_str[x]) <= 122)):\n self.insert(encrypt_str[x])\n\n\n # the insert() function adds a node containing a character in\n # the binary search tree. If the character already exists, it\n # does not add that character. There are no duplicate characters\n # in the binary search tree.\n def insert(self, char):\n space = ''\n if self.search(char) == space:\n node_new = the_Node(char)\n empty = None\n curr = self.root\n prev = self.root\n while curr is not empty:\n if ord(curr.data) > ord(char):\n prev = curr\n curr = curr.left_c\n else:\n prev = curr\n curr = curr.right_c\n if ord(prev.data) > ord(char):\n prev.left_c = node_new\n else:\n prev.right_c = node_new\n\n # the search() function will search for a character in the binary\n # search tree and return a string containing a series of lefts\n # (<) and rights (>) needed to reach that character. It will\n # return a blank string if the character does not exist in the tree.\n # It will return * if the character is the root of the tree.\n\n def search(self, char):\n curr = self.root\n left = '<'\n right = '>'\n if char == curr.data:\n star = '*'\n return star\n l_n_r = []\n while curr is not None:\n if ord(char) == ord(curr.data):\n return l_n_r\n elif ord(curr.data) > ord(char):\n l_n_r.append(left)\n curr = curr.left_c\n else:\n l_n_r.append(right)\n curr = curr.right_c\n space = ''\n return space\n\n\n # the traverse() function will take string composed of a series of\n # lefts (<) and rights (>) and return the corresponding\n # character in the binary search tree. It will return an empty string\n # if the input parameter does not lead to a valid character in the tree.\n def traverse(self, string):\n curr = self.root\n empty = None\n for char in string:\n left = '<'\n right = '>'\n star = '*'\n if char == right:\n curr = curr.right_c\n elif char == left:\n curr = curr.left_c\n elif char == star:\n curr = self.root\n\n if curr is empty or curr.data is empty:\n return ''\n else:\n return curr.data\n\n\n # the encrypt() function will take a string as input parameter, convert\n # it to lower case, and return the encrypted string. It will ignore\n # all digits, punctuation marks, and special characters.\n def encrypt(self, string):\n\n encrypt = ''\n string = string.lower()\n path = string[:-1]\n for char in path:\n string1 = ''\n exclemation = '!'\n encrypt = encrypt + string1.join(self.search(char))\n if string1.join(self.search(char)) != string1:\n encrypt = encrypt + exclemation\n string2 = ''\n encrypt = encrypt + string2.join(self.search(string[-1]))\n return encrypt\n\n # the decrypt() function will take a string as input parameter, and\n # return the decrypted string.\n def decrypt(self, string):\n\n decrypt = ''\n exclemation = '!'\n string = string.split(exclemation)\n\n for x in string:\n decrypt = decrypt + self.traverse(x)\n return decrypt\n\n\ndef main():\n # read encrypt string\n line = sys.stdin.readline()\n encrypt_str = line.strip()\n\n # create a Tree object\n the_tree = Tree(encrypt_str)\n\n # read string to be encrypted\n line = sys.stdin.readline()\n str_to_encode = line.strip()\n\n # print the encryption\n print(the_tree.encrypt(str_to_encode))\n\n # read the string to be decrypted\n line = sys.stdin.readline()\n str_to_decode = line.strip()\n\n # print the decryption\n print(the_tree.decrypt(str_to_decode))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"medrano123/PythonSolutions","sub_path":"HW21/BST_Cipher.py","file_name":"BST_Cipher.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"991086731","text":"import math\nfrom typing import List\n\nfrom bearlibterminal import terminal\nimport tcod\nimport tcod.path\n\nfrom components.ai import BasicMonster, ConfusedMonster\nfrom components.equipment import Equipment\nfrom components.equippable import Equippable\nfrom components.fighter import Fighter\nfrom components.inventory import Inventory\nfrom components.item import Item\n\nfrom render_functions import RenderOrder\n\n\nclass Entity:\n \"\"\"\n A generic object to represent players, enemies, items, etc.\n \"\"\"\n def __init__(self, x: int, y: int, char: str, color, name: str, blocks: bool = False,\n render_order: RenderOrder = RenderOrder.CORPSE, ai=None, equipment=None, equippable=None, fighter=None,\n inventory=None, item=None):\n self.x: int = x\n self.y: int = y\n self.char: str = char\n self.color = color\n self.name: str = name\n self.blocks: bool = blocks\n self.render_order = render_order\n\n self.ai = ai\n self.equipment = equipment\n self.equippable = equippable\n self.fighter = fighter\n self.inventory = inventory\n self.item = item\n\n if self.ai:\n self.ai.owner = self\n\n if self.equipment:\n self.equipment.owner = self\n\n if self.fighter:\n self.fighter.owner = self\n\n if self.inventory:\n self.inventory.owner = self\n\n if self.item:\n self.item.owner = self\n\n if self.equippable:\n self.equippable.owner = self\n\n if not self.item:\n item = Item()\n self.item = item\n self.item.owner = self\n\n def to_json(self):\n json_data = {\n 'x': self.x,\n 'y': self.y,\n 'char': self.char,\n 'color': self.color,\n 'name': self.name,\n 'blocks': self.blocks,\n 'render_order': self.render_order.value\n }\n\n if self.ai:\n json_data['ai'] = self.ai.to_json()\n\n if self.equipment:\n json_data['equipment'] = self.equipment.to_json()\n\n if self.equippable:\n json_data['equippable'] = self.equippable.to_json()\n\n if self.fighter:\n json_data['fighter'] = self.fighter.to_json()\n\n if self.inventory:\n json_data['inventory'] = self.inventory.to_json()\n\n if self.item:\n json_data['item'] = self.item.to_json()\n\n return json_data\n\n @classmethod\n def from_json(cls, json_data):\n x = json_data.get('x')\n y = json_data.get('y')\n char = json_data.get('char')\n color = json_data.get('color')\n name = json_data.get('name')\n blocks = json_data.get('blocks')\n render_order_value = json_data.get('render_order')\n\n ai_json = json_data.get('ai')\n equipment_json = json_data.get('equipment')\n equippable_json = json_data.get('equippable')\n fighter_json = json_data.get('fighter')\n inventory_json = json_data.get('inventory')\n item_json = json_data.get('item')\n\n if equipment_json:\n equipment = Equipment.from_json(json_data=equipment_json)\n else:\n equipment = None\n\n if equippable_json:\n equippable = Equippable.from_json(json_data=equippable_json)\n else:\n equippable = None\n\n if fighter_json:\n fighter = Fighter.from_json(json_data=fighter_json)\n else:\n fighter = None\n\n if inventory_json:\n inventory = Inventory.from_json(json_data=inventory_json)\n else:\n inventory = None\n\n if item_json:\n item = Item.from_json(json_data=item_json)\n else:\n item = None\n\n entity = cls(\n x=x,\n y=y,\n char=char,\n color=color,\n name=name,\n blocks=blocks,\n render_order=RenderOrder(render_order_value),\n ai=None,\n equipment=equipment,\n equippable=equippable,\n fighter=fighter,\n inventory=inventory,\n item=item\n )\n\n if ai_json:\n name = ai_json.get('name')\n\n if name == BasicMonster.__name__:\n ai = BasicMonster.from_json(json_data=ai_json, owner=entity)\n elif name == ConfusedMonster.__name__:\n ai = ConfusedMonster.from_json(json_data=ai_json['ai_data'], owner=entity)\n else:\n ai = None\n\n entity.ai = ai\n\n return entity\n\n def distance(self, target_x, target_y):\n return math.sqrt((target_x - self.x) ** 2 + (target_y - self.y) ** 2)\n\n def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n\n return math.sqrt(dx ** 2 + dy ** 2)\n\n def draw(self):\n \"\"\"\n Draw the entity to the terminal\n \"\"\"\n terminal.printf(x=self.x, y=self.y, s=f'[color={self.color}]{self.char}[/color]')\n\n def move(self, dx, dy):\n \"\"\"\n Move the entity by a given amount\n \"\"\"\n self.x += dx\n self.y += dy\n\n def move_astar(self, target, entities, game_map):\n # Create a FOV map that has the dimensions of the map\n fov = tcod.map_new(game_map.width, game_map.height)\n\n # Scan the current map each turn and set all the walls as unwalkable\n for y1 in range(game_map.height):\n for x1 in range(game_map.width):\n tcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight, not game_map.tiles[x1][y1].blocked)\n\n # Scan all the objects to see if there are objects that must be navigated around\n # Check also that the object isn't self or the target (so that the start and the end points are free)\n # The AI class handles the situation if self is next to the target so it will not use this A* function anyway\n for entity in entities:\n if entity.blocks and entity != self and entity != target:\n # Set the tile as a wall so it must be navigated around\n tcod.map_set_properties(fov, entity.x, entity.y, True, False)\n\n # Allocate a A* path\n # The 1.41 is the normal diagonal cost of moving, it can be set as 0.0 if diagonal moves are prohibited\n my_path = tcod.path_new_using_map(fov, 1.41)\n\n # Compute the path between self's coordinates and the target's coordinates\n tcod.path_compute(my_path, self.x, self.y, target.x, target.y)\n\n # Check if the path exists, and in this case, also the path is shorter than 25 tiles\n # The path size matters if you want the monster to use alternative longer paths (for example through other rooms) if for example the player is in a corridor\n # It makes sense to keep path size relatively low to keep the monsters from running around the map if there's an alternative path really far away\n if not tcod.path_is_empty(my_path) and tcod.path_size(my_path) < 25:\n # Find the next coordinates in the computed full path\n x, y = tcod.path_walk(my_path, True)\n if x or y:\n # Set self's coordinates to the next path tile\n self.x = x\n self.y = y\n else:\n # Keep the old move function as a backup so that if there are no paths (for example another monster blocks a corridor)\n # it will still try to move towards the player (closer to the corridor opening)\n self.move_towards(target.x, target.y, game_map, entities)\n\n # Delete the path to free memory\n tcod.path_delete(my_path)\n\n def move_towards(self, target_x, target_y, game_map, entities):\n astar = tcod.path.AStar(game_map.current_floor.walkable, diagonal=1.41)\n path = astar.get_path(self.x, self.y, target_x, target_y)\n\n if path:\n dx = path[0][0] - self.x\n dy = path[0][1] - self.y\n\n if game_map.current_floor.walkable[path[0][0], path[0][1]] and not get_blocking_entities_at_location(\n entities,\n self.x + dx,\n self.y + dy):\n self.move(dx, dy)\n\n\ndef get_blocking_entities_at_location(entities: List[Entity], destination_x: int, destination_y: int) -> [Entity, None]:\n for entity in entities:\n if entity.blocks and entity.x == destination_x and entity.y == destination_y:\n return entity\n\n return None\n","repo_name":"TStand90/roguelike-tutorial-2019","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"26909293224","text":"#!/usr/bin/env python3\n\nimport sys\n\npattern = sys.argv[1]\ns = input()\nl = []\nwhile s != \"end\":\n i = 0\n while i < len(s):\n if s[i:len(pattern) + i] == pattern:\n l.append(s)\n i = i + 1\n s = input()\n\ni = 0\nwhile i < len(l):\n print(l[i])\n i += 1\n","repo_name":"conallkavanagh/ca116","sub_path":"lab6a/grep-lines.py","file_name":"grep-lines.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30574197567","text":"#!/usr/bin/env python3\n\nfrom math import radians, sin, cos, sqrt\nimport numpy\nimport rospy\nfrom duckietown_msgs.msg import WheelEncoderStamped, Pose2DStamped\n\nclass OdomNode:\n def __init__(self):\n self.pose = Pose2DStamped()\n self.R = rospy.Publisher(\"pose\", Pose2DStamped, queue_size=10)\n self.x = 0\n self.y = 0\n self.theta = 0\n self.s_l = 0\n self.s_r = 0\n self.delta_s_r = 0\n self.delta_s_l = 0\n self.delta_theta = 0\n radius = .065/2\n self.circumference = radius*2*numpy.pi\n self.rev_per_tick = 135\n self.dist_wheel_left = 0\n self.dist_wheel_right = 0 \n self.left_first = True\n self.right_first = True\n self.left_flag_new = False\n self.right_flag_new = False\n self.left_tick = rospy.Subscriber(\"left_wheel_encoder_node/tick\", WheelEncoderStamped, self.Left_Wheel)\n self.right_tick = rospy.Subscriber(\"right_wheel_encoder_node/tick\", WheelEncoderStamped, self.Right_Wheel)\n \n \n def Left_Wheel(self, msg):\n #number of revolutions\n revs_left = msg.data/self.rev_per_tick\n #calculation of distance based on above\n dist_left = self.circumference*revs_left\n self.delta_s_l = dist_left-self.dist_wheel_left\n self.dist_wheel_left = dist_left\n if self.left_first == True:\n self.left_first = False\n else:\n self.left_flag_new = True\n \n \n def Right_Wheel(self, msg):\n #number of revolutions\n revs_right = msg.data/self.rev_per_tick\n #calculation of distance based on above\n dist_right = self.circumference*revs_right\n self.delta_s_r = dist_right-self.dist_wheel_right\n self.dist_wheel_right = dist_right\n if self.right_first == True:\n self.right_first = False\n else:\n self.right_flag_new = True\n \n def callback_function(self):\n if self.right_flag_new == True and self.left_flag_new == True: \n L = .05\n delta_s_r = self.delta_s_r\n delta_s_l = self.delta_s_l\n delta_s = (delta_s_r+delta_s_l)/2\n self.delta_theta = (delta_s_r-delta_s_l)/(2*L)\n delta_x = delta_s*cos(self.theta+self.delta_theta/2)\n delta_y = delta_s*sin(self.theta+self.delta_theta/2)\n self.x = self.x+delta_x\n self.y = self.y+delta_y\n self.theta = self.theta+self.delta_theta \n self.pose.x = self.x\n self.pose.y = self.y\n self.pose.theta = self.theta \n self.R.publish(self.pose)\n self.right_flag_new = False\n self.left_flag_new = False \n \nif __name__=='__main__':\n rospy.init_node ('odom_node', anonymous=True)\n O = OdomNode()\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n O.callback_function()\n rate.sleep()\n \n","repo_name":"angelynn408/treasure_trove","sub_path":"packages/lab_2/src/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41104167676","text":"import torch\nimport os\n\ndef load_checkpoint(pytorch_model_path):\n model = torch.load(pytorch_model_path)\n state_dict = model[\"model\"]\n for key in state_dict.keys():\n value = state_dict[key]\n if \"layers.0\" in key and not \"min\" in key and not \"max\" in key and (\"scaling\" in key or \"integer\" in key):\n print(key, value.shape)\n #if \"scaling\" in key or \"min\" in key or \"max\" in key:\n # print(key, value)\n #if \"integer\" in key:\n # print(state_dict[key])\n # break\n return state_dict\nmodel_dir = \"/home/user/shared_docker/I-BERT/outputs/symmetric/QQP-base/wd0.1_ad0.1_d0.1_lr1e-06/0919-202918_ckpt\"\nmodel_name = \"checkpoint_best.pt\"\nmodel_path = os.path.join(model_dir, model_name)\nload_checkpoint(model_path)","repo_name":"iCAS-Lab/TransformerAccelerator","sub_path":"debug/pt_file_reader.py","file_name":"pt_file_reader.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73933228084","text":"from setuptools import setup, find_packages\nfrom glob import glob\n\nimport sys\nimport os\nfrom setuptools.command.test import test as TestCommand\n\nverstr = \"none\"\ntry:\n import subprocess\n verstr = subprocess.check_output(\n ['git', 'describe', '--long']).decode('utf-8').strip()\nexcept EnvironmentError:\n pass\nexcept Exception as err:\n print(err)\n verstr = 'v0.0.0-???'\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == 'True'\n\nrequirements = ['uproot>=2.8.13', 'colorlog', 'PyYAML>=3.13', 'pyparsing>=2.1.5',\n 'pystan==2.17.1.0', 'dnspython==1.12.0',\n 'pbr==0.10.8', 'cycler==0.10.0', 'lz4', 'six', 'asteval', 'awkward']\n\n#everything = set()\n#for deps in extras_require.values():\n# everything.update(deps)\n#extras_require['all'] = everything\n\nsetup(\n name='morpho',\n version=verstr,\n description=\"A python interface with Stan/PyStan Markov Chain Monte Carlo package\",\n packages=find_packages(),\n scripts=[\"bin/morpho\"],\n install_requires=requirements,\n# extras_require=extras_require,\n url='http://www.github.com/project8/morpho',\n author=\"J. Formaggio, J. Johnston (MIT), T. Weiss (Yale), M. Guigue (Sorbonne Université), B. LaRoque, N. Oblath (PNNL)\",\n maintainer=\"T. Weiss\",\n maintainer_email=\"talia.weiss@yale.edu\"\n)\n","repo_name":"morphoorg/morpho","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"6225923574","text":"import pygame\nfrom aster import asteroid\npygame.init()\n\nwin = pygame.display.set_mode((400,500))\npygame.display.set_caption(\"Space Game\")\n\nwalkRight = [pygame.image.load('right_1.png'),\npygame.image.load('right_2.png'),pygame.image.load('right_3.png'),\npygame.image.load('right_4.png')]\n\nwalkLeft = [pygame.image.load('left_1.png'),\npygame.image.load('left_2.png'),pygame.image.load('left_3.png'),\npygame.image.load('left_4.png')]\n\nplayerStand = [pygame.image.load('stand_1.png'),pygame.image.load('stand_2.png'),\npygame.image.load('stand_3.png')]\nbg = pygame.image.load('bg.jpg')\n\nclock = pygame.time.Clock()\nx = 250\ny = 400\nwidth = 60\nhight = 84\nspeed = 5\n\nisJump = False\njumpCount = 10\n\nleft = False\nright = False\nanimCount = 0\n\nasteroid_y = 0\nlastMove = \"right\"\n\n\n\nclass shot():\n def __init__(self,x,y,radius,color,facing):\n self.x = x\n self.y = y\n self.radius = radius\n self.color = color\n self.facing = facing\n self.vel = 8 * facing\n\n def draw(self,win):\n pygame.draw.circle(win,self.color,(self.x,self.y),self.radius)\n\ndef drawWindow():\n global animCount\n global asteroid_y\n win.blit(bg,(0,0))\n if animCount + 1 >= 30:\n animCount = 0\n if left:\n win.blit(walkLeft[animCount % 4],(x,y))\n animCount += 1\n elif right:\n win.blit(walkRight[animCount % 4],(x,y))\n animCount += 1\n else:\n win.blit(playerStand[animCount % 3],(x,y))\n animCount += 1\n\n for bullet in bullets:\n bullet.draw(win)\n if abs(bullet.y - asteroid_y) < 5:\n asteroid_y = 0\n bullets.pop(bullets.index(bullet))\n if asteroid_y < 500:\n asteroid_y += 1 * speed\n else:\n asteroid_y = 0\n aster = asteroid(x,asteroid_y)\n aster.draw(win)\n if abs(y - asteroid_y) < 8:\n pygame.draw.circle(win,(255,0,0),(round(x),round(y)),40)\n asteroid_y = 0\n\n pygame.display.update()\nrun = True\nbullets = []\nasteroids = []\nwhile run:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n for bullet in bullets:\n if bullet.y < 500 and bullet.y > 0:\n bullet.y -= bullet.vel\n else:\n bullets.pop(bullets.index(bullet))\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_f]:\n if len(bullets) > 0:\n if bullets[-1].y < 350:\n bullets.append(shot(round(x + width // 2),\n round (y),5,(255,0,0),1))\n else:\n bullets.append(shot(round(x + width // 2),\n round (y),5,(255,0,0),1))\n\n if keys[pygame.K_r]:\n bullets.append(shot(round(x + width // 2),\n round (y),5,(0,0,255),1))\n\n if keys[pygame.K_LEFT] and x > 5:\n x -= speed\n left = True\n right = False\n lastMove = \"left\"\n\n elif keys[pygame.K_RIGHT] and x < 395 - width:\n x += speed\n left = False\n right = True\n lastMove = \"right\"\n else:\n left = False\n right = False\n #animCount = 0\n\n if not(isJump):\n if keys[pygame.K_SPACE]:\n isJump = True\n else:\n if jumpCount >= -10:\n if jumpCount < 0:\n y += (jumpCount **2 )/2\n else:\n y -= (jumpCount **2 )/2\n jumpCount -= 1\n\n else:\n isJump = False\n jumpCount = 10\n drawWindow()\n\n\n\npygame.quit()\n","repo_name":"dojdlivaia/pygame_example","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26191052386","text":"#!/usr/bin/env python3\n\nimport os\nimport crossval, features, estimators, estpicker, bootstrap\n\ndef Vecuum():\n \n print('\\nGROUP CHOICES (automated order: 2,1,0,4,3,5,11,10,12)')\n print('\\nSymptom Severity:\\n')\n print(' 0= control/mild\\n 1= control/severe\\n 2= control/very severe\\n 3= mild/severe\\n 4= mild/very severe\\n 5= severe/very severe\\n 6= control/all patients\\n')\n \n print('Treatment Response:\\n')\n print(' 7= control/non-responder\\n 8= control/all responder\\n 9= control/remitter only\\n 10= non-responder/all responder\\n 11= non-responder/remitter only\\n 12= responder vs remitter\\n 13= control/all patients\\n')\n\n groups= [2,1,0,\n 4,3,5,\n 11,10,12]\n runs=5\n\n for group in groups:\n run=1\n for i in range(runs): \n print('BEGINNING RUN {}/{}'.format(run, runs))\n crossval.OuterCV(group)\n crossval.InnerCV() \n features.SelKBest()\n features.SelKBestOuter()\n estimators.InnerFolds(group, run)\n bestest= estpicker.Best(group, run)\n estimators.OuterFolds(group, run, bestest)\n bootstrap.Bill(group, run)\n run= run + 1\n print('RUN COMPLETE') \n\n os.system('spd-say -r -50 -p -50 -t female3 \"your groups have finished running. To run more groups, you must construct additional pylons.\"')\n \n return\n\n","repo_name":"jrabenoit/shopvec","sub_path":"shopvec.py","file_name":"shopvec.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6937087014","text":"\"\"\"\nSetup function for creating an exe\n\"\"\"\n\nimport sys\nfrom cx_Freeze import setup, Executable\n\n# Dependencies are automatically detected, but it might need fine tuning.\nbuild_exe_options = {\n \"packages\": [\"Tkinter\", \"PIL\", \"functions\", \"threading\", \"time\"],\n \"include_files\": ['floor.PNG']\n }\n\n# GUI applications require a different base on Windows (the default is for a\n# console application).\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nsetup( name = \"ProdFloorTool\",\n version = \"3.0\",\n description = \"Wincore floor tool created by Shane Hinzman\",\n options = {\"build_exe\": build_exe_options},\n executables = [Executable(\"ProdFloorTool.py\", base=base, icon=\"icon.ico\")])\n","repo_name":"shanehinzmanwincore/Production-Floor-Tool-Wincore","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3258054572","text":"\nimport move_media\n\ntoy_story = move_media.Movie(\"Toy Stroy\",\n \"A story of a boy and his toys that come to life\", \n \"http://bing.com\",\n \"http://baidu.com\")\n\nprint (toy_story.title)\nprint (toy_story.storyline)\n\navatar = move_media.Movie(\"Avatar\",\n\"A marine on an alien planet\",\n\"http://upload.wikimedia.org/wikipedia/id/b/b0/Avatar-Teaser-Poster.jpg\",\n\"http://www.youtube.com/watch?v=-9ceBgWV8io\")\n\nprint (avatar.storyline)","repo_name":"chengbindai1984/python_learning","sub_path":"entertainment_centery.py","file_name":"entertainment_centery.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24736216999","text":"#!/usr/bin/python3\n\"\"\"\n0. Count it! mandatory\n\nWrite a recursive function that queries the Reddit API,\nparses the title of all hot articles, and prints a\nsorted count of given keywords (case-insensitive,\ndelimited by spaces. Javascript should count as\njavascript, but java should not).\n\nRequirements:\n Prototype: def count_words(subreddit, word_list)\n Note: You may change the prototype, but it must be\n able to be called with just a subreddit supplied\n and a list of keywords. AKA you can add a counter\n or anything else, but the function must work without\n supplying a starting value in the main.\n If word_list contains the same word (case-insensitive),\n the final count should be the sum of each duplicate\n (example below with java)\n Results should be printed in descending order,\n by the count, and if the count is the same for\n separate keywords, they should then be sorted\n alphabetically (ascending, from A to Z). Words with\n no matches should be skipped and not printed.\n Words must be printed in lowercase.\n Results are based on the number of times a keyword\n appears, not titles it appears in. java java\n java counts as 3 separate occurrences of java.\n To make life easier, java. or\n java! or java_ should not count as java\n If no posts match or the subreddit is invalid,\n print a newline.\n NOTE: Invalid subreddits may return a redirect to\n search results. Ensure that you are NOT\n following redirects.\n\nYour code will NOT pass if you are using a loop and not\nrecursively calling the function! This /can/ be done\nwith a loop but the point is to use a\nrecursive function. :)\n\nDisclaimer: number presented in this example cannot\nbe accurate now - Reddit is hot\narticles are always changing.\n\"\"\"\nimport requests\nimport pprint\nimport re\n\n\ndef count_words(subreddit, word_list, hot=[], after=None):\n \"\"\"\n recursive function that queries the Reddit API,\n https://www.reddit.com/dev/api\n \"\"\"\n meta = {'User-agent': 'Unix:0-subs:v1'}\n query = {'limit': 100}\n\n if isinstance(after, str):\n if after != \"STOP\":\n query['after'] = after\n else:\n return show_results(word_list, hot)\n\n response = requests.get(\n 'http://reddit.com/r/{}/hot.json'.format(\n subreddit),\n headers=meta, params=query)\n if response.status_code != 200:\n return None\n data = response.json().get('data', {})\n after = data.get('after', 'STOP')\n if not after:\n after = \"STOP\"\n hot = hot + [post.get('data', {})\n .get('title') for post in data.get('children', [])]\n return count_words(subreddit, word_list, hot, after)\n\n\ndef show_results(word_list, hot):\n \"\"\"\n show results\n \"\"\"\n i = {}\n for item in word_list:\n i[item] = 0\n for title in hot:\n for item in word_list:\n for tw in title.lower().split():\n if tw == item.lower():\n i[item] += 1\n\n i = {k: v for k, v in i.items() if v > 0}\n items = list(i.keys())\n for item in sorted(items, reverse=True,\n key=lambda k: i[k]):\n print(\"{}: {}\".format(item, i[item]))\n","repo_name":"facu2279/holbertonschool-interview","sub_path":"0x13-count_it/0-count.py","file_name":"0-count.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5005903659","text":"import pathlib\n\nDATABASE = {\n \"name\": \"serverName\",\n \"address\": \"127.0.0.1:3000\",\n \"autoStart\": False,\n \"hosts\": [{\n \"name\": \"Guuner Host\",\n \"path\": \"D:/save\",\n \"writable\": False,\n \"public\": False,\n \"admin\": {\n \"name\": \"souravgain605\",\n \"writable\": True,\n \"sharedUsers\": [\"botai69\"]\n },\n \"validUsers\": [\"souravgain605\", \"botai69\"]\n }]\n}\n\n\n\ndef isValidPath(req, onlyDir = True):\n is_technically_valid = pathlib.Path(req.get('path')).exists() and (onlyDir or pathlib.Path(req.get('path')).is_file())\n\n if not is_technically_valid : \n return False\n\n if req.get('path').find(DATABASE.get('hosts')[0].get('path')) == 0:\n return True\n else:\n return False\n","repo_name":"project-ncloud/tempServer","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25574932100","text":"def Hbonds(molecule,zval,atomid1,atomid2,inout,twotb=False,save=False,plotting=False):\n \"\"\"\n Get internal H bonds between atomid1 = bynum of polar h, atomid2 = bynum of heavy atom\n \"\"\"\n\n #Get the trajectory into universe object\n if twotb == True:\n u = newUin(molecule, inout, zval)\n if twotb == False:\n u = readinU(\"/home/kselvon/Downloads/directedruns/\", molecule, zval, inout)\n\n freqstore = np.zeros(69)\n count = 0\n pos_array1 = np.zeros((len(u.trajectory), 3))\n pos_array2 = np.zeros((len(u.trajectory), 3))\n r = np.zeros((len(u.trajectory), 1))\n atom1 = u.select_atoms(atomid1)\n atom2 = u.select_atoms(atomid2)\n print('getting atom positions')\n for n, ts in enumerate(u.trajectory):\n\n pos_array1[n] = atom1.get_positions()\n pos_array2[n] = atom2.get_positions()\n\n xyzdiff = pos_array1 - pos_array2\n xyzdiff = xyzdiff**2\n\n for i in range(len(u.trajectory)):\n r[i] = sum(xyzdiff[i])\n r = r**0.5\n freq, bin_edges = np.histogram(r, bins=np.arange(0,30.25,0.25))\n #normalise by traj length \n freq = freq/np.float(len(u.trajectory))\n #count number of bonds within the H bond length 3.25 \n bondval = sum(r <= 3.25)/float(len(u.trajectory))\n return bondval\n\ndef graph_Hbonds(molecule,id1,id2,inout,zrange,twotb=False,saverdf=False,save=False,plot=False):\n \"\"\" plot number of H bonds as function of Z \"\"\"\n bvalstore = []\n for i in zrange:\n print('on z val ',i)\n bvalstore.append(Hbonds_new(molecule, i, id1, id2, inout, twotb, saverdf, plotting=plot))\n plt.figure()\n plt.plot(bvalstore)\n plt.ylabel('Intramolecular hydrogen bonds')\n plt.xlabel('z Angstrom')\n plt.title('Internal H bonding between '+id1+' and '+id2)\n if save == True:\n plt.savefig('/media/kselvon/D20492AA049290D9/Iridis-runs/E-decomposed/'+molecule+'/InternalHbonds_'+id1+'_'+id2)\n return bvalstore\n\ndef get_hvy_indicies(molecule, typelist, inout, twotb, zval=0):\n \"\"\"Get the heavy atom indicies for Hbonding functions\"\"\" \n if twotb == True:\n u = newUin(molecule, inout, zval)\n if twotb == False:\n u = readinU(\"/home/kselvon/Downloads/directedruns/\", molecule , zval, inout)\n\n solute = u.select_atoms('resid 129')\n indlist = [i for i, x in enumerate(solute.names) if x in typelist]\n\n hvylist = []\n namelist = []\n for i in indlist:\n hvylist.append(solute.indices[i]+1) #the plus 1 is needed to get correct indexing sol.indicies retuns a number 1 too small\n namelist.append(solute.names[i])\n return hvylist, namelist\n\ndef internal_hydrogen_bonding(molecule, inout, dnr, polarh, custnames=False, indexs=[], twotb=False, savehbonds=False):\n \"\"\"Wrapper function to call all necesary functions for intH bonding \"\"\"\n if custnames == False:\n hatomlist, names = get_hvy_indicies(molecule, dnr, inout, twotb, zval=0)\n if custnames == True:\n hatomlist = indexs \n mydict = {}\n for n, j in enumerate(polarh):\n for m, i in enumerate(hatomlist):\n print('on ', n, ' of ', len(polarh), 'polarh, on ', m,' of ', len(hatomlist),' hatoms' )\n mydict['atoms '+str(i)+' '+str(j)] = graph_Hbonds(molecule, 'bynum '+str(i), 'bynum '+str(j), inout, range(31), twotb, saverdf=False, save=savehbonds ,plot=False)\n return mydict\n\ndef intH_inout_compare_plotter(molecule, dictin, dictout, typedict, save=False):\n \"\"\" plot intH for a each heavy atom, on seperate graphs comparing in and out\"\"\"\n inkeys = dictin.keys()\n outkeys = dictout.keys()\n assert (inkeys == outkeys), 'keys from in and out dictionaries differ'\n for key in inkeys:\n title = 'Int H bonding'+' '+molecule+' for: '+str(typedict[key[:10]]+' h'+key[11:15])\n plt.figure()\n plt.plot(dictin[key],label='in')\n plt.plot(dictout[key],label='out')\n plt.title(title)\n plt.legend(loc='best')\n plt.xlabel('z depth Angstom')\n plt.ylabel('H bonds')\n if save == True:\n savedir = '/home/kselvon/Downloads/directedruns/'+molecule+'/intH'\n plt.savefig(savedir+'/'+title)\n","repo_name":"KRSelvon/Scripts-","sub_path":"Analysis scripts/intH/intH.py","file_name":"intH.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74285124086","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport urllib\r\nfrom fred_api import fred_api\r\nimport time\r\nimport requests\r\nimport json \r\nfrom scipy import stats\r\n\r\napi = fred_api\r\nendpoint = r'https://api.stlouisfed.org/fred/releases?&api_key={}&file_type=json'.format(api)\r\n \r\ncontent = requests.get(url = endpoint)\r\n\r\ndata = content.json()\r\n\r\n\r\nliabilities_endpoint = r'https://api.stlouisfed.org/fred/series/observations?series_id=NCBCEL&api_key={}&file_type=json'.format(api)\r\n \r\ncontent = requests.get(url = liabilities_endpoint)\r\n\r\ndata = content.json()\r\n#data = dict(data)\r\ndate = []\r\nvalue = []\r\nfor i in data['observations']:\r\n date.append(i['date'])\r\n value.append(i['value'])\r\n \r\nd = {'date':date, 'liabilities_value':value}\r\nliabilities = pd.DataFrame(d)\r\n\r\nnetworth_endoint = r'https://api.stlouisfed.org/fred/series/observations?series_id=TNWMVBSNNCB&api_key={}&file_type=json'.format(api)\r\n \r\ncontent = requests.get(url = networth_endoint)\r\n\r\ndata = content.json()\r\n\r\nnw_date = []\r\nnw_value = []\r\nfor i in data['observations']:\r\n nw_date.append(i['date'])\r\n nw_value.append(i['value'])\r\n \r\n#make dict then dataframe\r\n\r\nnw_d = {'date':nw_date, 'networth_value':nw_value}\r\nnetworth = pd.DataFrame(nw_d)\r\n\r\nms_index = liabilities.merge(networth, left_on='date', right_on='date', how = 'inner')\r\ncondition_liabilities = [ms_index['liabilities_value'] == \".\"]\r\ncondition_networth = [ms_index['networth_value'] == '.']\r\nreplace = [0]\r\nms_index['liabilities_value'] = np.select(condition_liabilities, replace, default=ms_index['liabilities_value'])\r\nms_index['networth_value'] = np.select(condition_networth, replace, default = ms_index['networth_value'])\r\n\r\nms_index['liabilities_value'] = ms_index['liabilities_value'].astype(float)\r\nms_index['networth_value'] = ms_index['networth_value'].astype(float)\r\nms_index['ms_index_ratio'] = round(((ms_index['liabilities_value']*1000000)/(ms_index['networth_value']*1000000)), 2)\r\n\r\ng_mean = stats.gmean(ms_index.iloc[24:,]['ms_index_ratio'], axis = 0)\r\nms_index['ratio_normal'] = (ms_index['ms_index_ratio'] / g_mean)\r\n\r\nfig_dims = (20, 8)\r\nfig, ax = plt.subplots(figsize=fig_dims)\r\nplt.xticks(\r\n rotation=90, \r\n horizontalalignment='right',\r\n fontweight='light',\r\n fontsize='small' \r\n)\r\ng = sns.pointplot(x = 'date', y = 'ratio_normal', data = ms_index.iloc[24:,])\r\nax1 = g.axes\r\n\r\nax1.axhline(g_mean, ls='--', c ='r')\r\n\r\nplt.show()\r\n\r\n","repo_name":"jbh1128d1/Q-Ratio-Check","sub_path":"Q_Ratio_Check.py","file_name":"Q_Ratio_Check.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27665524782","text":"# But how can we easily convert integer values into binary numbers? \n# The answer is an algorithm called “Divide by 2” that uses a stack to keep track of the digits for the binary result.\n\n# The Divide by 2 algorithm assumes that we start with an integer greater than 0. \n# A simple iteration then continually divides the decimal number by 2 and keeps track of the remainder. \n# The first division by 2 gives information as to whether the value is even or odd. \n# An even value will have a remainder of 0. It will have the digit 0 in the ones place. \n# An odd value will have a remainder of 1 and will have the digit 1 in the ones place. \n# We think about building our binary number as a sequence of digits; the first remainder we compute will actually be the last digit in the sequence. \n\nfrom pythonds.basic.stack import Stack\n\ndef divideBy2(decNumber):\n remstack = Stack()\n\n while decNumber > 0:\n rem = decNumber % 2\n remstack.push(rem)\n decNumber = decNumber // 2\n\n binString = \"\"\n while not remstack.isEmpty():\n binString = binString + str(remstack.pop())\n\n return binString\n\nprint(divideBy2(42))\n\n# The algorithm for binary conversion can easily be extended to perform the conversion for any base. \n# The most common of these are binary, octal (base 8), and hexadecimal (base 16).\n\ndef baseConverter(decNumber,base):\n digits = \"0123456789ABCDEF\"\n\n remstack = Stack()\n\n while decNumber > 0:\n rem = decNumber % base\n remstack.push(rem)\n decNumber = decNumber // base\n\n newString = \"\"\n while not remstack.isEmpty():\n newString = newString + digits[remstack.pop()]\n\n return newString\n\nprint(baseConverter(25,2))\nprint(baseConverter(25,16))","repo_name":"sxw031/Problem-Solving-with-Algorithms-and-Data-Structures","sub_path":"stack/baseConverter.py","file_name":"baseConverter.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41205010322","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n@Author: wjm\n@Date: 2020-02-17 22:19:38\nLastEditTime: 2021-08-20 23:44:53\n@Description: file content\n'''\nfrom solver.basesolver import BaseSolver\nimport os, torch, time, cv2, importlib\nimport torch.backends.cudnn as cudnn\nfrom data.data import *\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable \nimport numpy as np\nimport matplotlib.pyplot as plt\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\nclass Testsolver(BaseSolver):\n def __init__(self, cfg):\n super(Testsolver, self).__init__(cfg)\n \n net_name = self.cfg['algorithm'].lower()\n lib = importlib.import_module('model.' + net_name)\n net = lib.Net\n \n self.model = net(\n args = self.cfg\n )\n self.fmap_block = list()\n self.input_block = list()\n \n ## define hook\n def forward_hook(self, module, data_input, data_output):\n self.fmap_block.append(data_output)\n self.input_block.append(data_input)\n \n def check(self):\n self.cuda = self.cfg['gpu_mode']\n torch.manual_seed(self.cfg['seed'])\n if self.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n if self.cuda:\n torch.cuda.manual_seed(self.cfg['seed'])\n cudnn.benchmark = True\n \n gups_list = self.cfg['gpus']\n self.gpu_ids = []\n for str_id in gups_list:\n gid = int(str_id)\n if gid >=0:\n self.gpu_ids.append(gid)\n torch.cuda.set_device(self.gpu_ids[0]) \n \n self.model_path = os.path.join(self.cfg['checkpoint'], self.cfg['test']['model'])\n\n self.model = self.model.cuda(self.gpu_ids[0])\n self.model = torch.nn.DataParallel(self.model, device_ids=self.gpu_ids)\n self.model.load_state_dict(torch.load(self.model_path, map_location=lambda storage, loc: storage)['net'])\n\n def test(self):\n self.model.eval()\n avg_time= []\n for batch in self.data_loader: \n input, target, bicubic, name = Variable(batch[0]), Variable(batch[1]), Variable(batch[2]), batch[3]\n if self.cuda:\n input = input.cuda(self.gpu_ids[0])\n target = target.cuda(self.gpu_ids[0])\n bicubic = bicubic.cuda(self.gpu_ids[0])\n\n if self.cfg['algorithm'] == 'VDSR' or self.cfg['algorithm'] == 'SRCNN':\n input = bicubic\n \n ## hook\n # if self.cuda:\n # hadle_hook = self.model.module.res_b1.register_forward_hook(self.forward_hook)\n # else:\n # hadle_hook = self.model.res_b1.register_forward_hook(self.forward_hook)\n\n t0 = time.time()\n with torch.no_grad():\n prediction = self.model(input)\n t1 = time.time()\n\n if self.cfg['data']['normalize'] :\n target = (target+1) /2\n prediction = (prediction+1) /2\n bicubic = (bicubic+1) /2\n\n ## remove hook, save feature maps\n # hadle_hook.remove()\n # self.fmap_block = self.fmap_block[0].squeeze().detach().cpu()\n # self.fmap_block = (self.fmap_block*255).numpy().astype(np.uint8)\n # for i in range(0, self.fmap_block[0].shape[1]-1):\n # plt.imsave('./1/{}.png'.format(str(i)), self.fmap_block[i,:,:], cmap = plt.cm.jet)\n # self.fmap_block = list()\n # self.input_block = list()\n\n print(\"===> Processing: %s || Timer: %.4f sec.\" % (name[0], (t1 - t0)))\n avg_time.append(t1 - t0)\n self.save_img(bicubic.cpu().data, name[0][0:-4]+'_bic.png')\n self.save_img(target.cpu().data, name[0][0:-4]+'_gt.png')\n self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')\n print(\"===> AVG Timer: %.4f sec.\" % (np.mean(avg_time)))\n \n def eval(self):\n self.model.eval()\n avg_time= []\n for batch in self.data_loader:\n \n input, bicubic, name = Variable(batch[0]), Variable(batch[1]), batch[2]\n if self.cuda:\n input = input.cuda(self.gpu_ids[0])\n bicubic = bicubic.cuda(self.gpu_ids[0])\n\n t0 = time.time()\n with torch.no_grad(): \n prediction = self.model(input)\n t1 = time.time()\n print(\"===> Processing: %s || Timer: %.4f sec.\" % (name[0], (t1 - t0)))\n avg_time.append(t1 - t0)\n self.save_img(bicubic.cpu().data, name[0][0:-4]+'_Bic.png')\n self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')\n print(\"===> AVG Timer: %.4f sec.\" % (np.mean(avg_time)))\n\n def save_img(self, img, img_name):\n save_img = img.squeeze().clamp(0, 1).numpy().transpose(1,2,0)\n # save img\n save_dir=os.path.join('results/',self.cfg['test']['type'])\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n save_fn = save_dir +'/'+ img_name\n cv2.imwrite(save_fn, cv2.cvtColor(save_img*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])\n \n def run(self):\n self.check()\n if self.cfg['test']['type'] == 'test': \n self.dataset = get_test_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])\n self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,\n num_workers=self.cfg['threads'])\n self.test()\n elif self.cfg['test']['type'] == 'eval': \n self.dataset = get_eval_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])\n self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,\n num_workers=self.cfg['threads'])\n self.eval()\n else:\n raise ValueError('Mode error!')","repo_name":"jiaming-wang/N_SR","sub_path":"solver/testsolver.py","file_name":"testsolver.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"76"} +{"seq_id":"26251230380","text":"import pprint as pp\n# Count the frequency of character in String\n\ntext = 'Count the frequency of character in String'\nletters = {}\n\nfor i in text.lower():\n letters.setdefault(i, 0)\n letters[i] = letters[i] + 1\n\n# pp.pprint(letters) \n\n# Password simulation / password protected simulation\n\n'''\nuser_list = {'user1@mail.com': 123, 'user2@mail.com': 456, 'user3@mail.com': 789}\n\ntries_left = 0\nuser_found = False\n\nwhile(tries_left < 5):\n user_name = input('Enter Your Username: ')\n if user_name in user_list:\n for i in range(0, 3):\n password = int(input('Enter Your Password: '))\n if password in user_list.values():\n user_found = True\n print('Welcome, '+ str(user_name)+'!')\n break\n else:\n print('Incorrect Password! Try Again, you have ' + str(2 - i)+' tries left.')\n else:\n print('Username Not Found! Try Again: ')\n\n tries_left = tries_left + 1 \n\n if user_found:\n break\n''' \n\ncontacts = {'john': 1122, 'doe': 2122, 'smith': 9090, 'samsi': 4567}\nflag = 0\n\nwhile flag < 5:\n name = input('Enter The Name(Press ENTER to exit.): ')\n if name == '':\n break\n if name in contacts:\n print(name + '-' + str(contacts[name]))\n break\n else:\n print('No Such Contact in the list. Add this to Contact? Y/N ')\n reply = input()\n if reply == 'Y':\n phone = int(input('Enter Phone Number: '))\n contacts[name] = phone\n print('Contact Added!')\n break\n else:\n print('Search Again')\n flag = flag + 1 \n\n# print(contacts)\n","repo_name":"HelloKowshik/99-python","sub_path":"python-practice/basic/pp16.py","file_name":"pp16.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6986599086","text":"import vk_api\nfrom vk_api.utils import get_random_id\nimport os\nimport logging\nimport re\nimport requests\nfrom PIL import Image\nimport numpy as np\nfrom vk_api import Captcha\nfrom io import BytesIO\nimport onnxruntime as rt\nimport sys\nimport random\n\n\ndef fix_relative_path(relative_path: str) -> str:\n \"\"\"\n Фикс относительных путей PyInstaller\n \"\"\"\n application_path = ''\n if getattr(sys, 'frozen', False):\n application_path = os.path.dirname(os.path.abspath(sys.executable))\n elif __file__:\n application_path = os.path.dirname(os.path.abspath(__file__))\n return os.path.abspath(os.path.join(application_path, relative_path))\n\n\ndef captcha_handler(captcha: Captcha):\n \"\"\"\n Хендлер для обработки капчи из VK\n \"\"\"\n captcha_url = captcha.get_url()\n captcha_params = re.match(r\"https://api\\.vk\\.com/captcha\\.php\\?sid=(\\d+)&s=(\\d+)\", captcha_url)\n if captcha_params is not None:\n logging.info(\"Появилась капча, пытаюсь автоматически её решить...\")\n key = solve_captcha(sid=int(captcha_params.group(1)), s=int(captcha_params.group(2)))\n logging.info(\"Текст на капче обнаружен, отправляю решение...\")\n else:\n key = input(\"\\n\\n[!] Чтобы продолжить, введи сюда капчу с картинки {0}:\\n> \".format(captcha.get_url())).strip()\n return captcha.try_again(key)\n\n\ndef solve_captcha(sid, s):\n \"\"\"\n Обработчик капчи с помощью машинного зрения\n \"\"\"\n response = requests.get(f'https://api.vk.com/captcha.php?sid={sid}&s={s}')\n img = Image.open(BytesIO(response.content)).resize((128, 64)).convert('RGB')\n x = np.array(img).reshape(1, -1)\n x = np.expand_dims(x, axis=0)\n x = x / np.float32(255.)\n session = rt.InferenceSession(fix_relative_path('models/captcha_model.onnx'))\n session2 = rt.InferenceSession(fix_relative_path('models/ctc_model.onnx'))\n out = session.run(None, dict([(inp.name, x[n]) for n, inp in enumerate(session.get_inputs())]))\n out = session2.run(None, dict([(inp.name, np.float32(out[n])) for n, inp in enumerate(session2.get_inputs())]))\n char_map = ' 24578acdehkmnpqsuvxyz'\n captcha = ''.join([char_map[c] for c in np.uint8(out[-1][out[0] > 0])])\n return captcha\n\n\ntoken = \"\"#token got from website\nsession = vk_api.VkApi(token=token, captcha_handler=captcha_handler)\nvk = session.get_api()\nd = vk.friends.get(order='hints')\nfriends = d['items'][:]\nfriends = list(reversed(friends))\npozdr = ['С Новым годом!!!', 'С НОВЫМ ГОДОМ!!!', \"Поздравляю с Новым 2024 годом!!!\",\n \"Поздравляю с Новым 2023 годом!!!!\", \"С Новым 2023 годом!!!\", \"Поздравляю с Новым годом!\",\n \"ПОЗДРАВЛЯЮ С НОВЫМ ГОДОМ!!!\"]\nfriends=friends[20:]\nprint(friends)\n# for i in range(len(friends)):\n# vk.messages.send(user_id=friends[i], message=random.choice(pozdr), random_id=get_random_id())","repo_name":"StrVlad/NewYear_Sendler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18765627641","text":"# Problem Set 4B\n# Name: \n# Collaborators:\n# Time Spent: x:xx\n\nimport string\n\n### HELPER CODE ###\ndef load_words(file_name):\n '''\n file_name (string): the name of the file containing \n the list of words to load \n \n Returns: a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n '''\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(file_name, 'r')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.extend([word.lower() for word in line.split(' ')])\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef is_word(word_list, word):\n '''\n Determines if word is a valid word, ignoring\n capitalization and punctuation\n\n word_list (list): list of words in the dictionary.\n word (string): a possible word.\n \n Returns: True if word is in word_list, False otherwise\n\n Example:\n >>> is_word(word_list, 'bat') returns\n True\n >>> is_word(word_list, 'asdf') returns\n False\n '''\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in word_list\n\ndef get_story_string():\n \"\"\"\n Returns: a story in encrypted text.\n \"\"\"\n f = open(\"story.txt\", \"r\")\n story = str(f.read())\n f.close()\n return story\n\n### END HELPER CODE ###\n\nWORDLIST_FILENAME = 'words.txt'\n\nclass Message(object):\n def __init__(self, text):\n '''\n Initializes a Message object\n \n text (string): the message's text\n\n a Message object has two attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n '''\n \n self.message_text = text\n self.valid_words = load_words(WORDLIST_FILENAME)[:]\n\n def get_message_text(self):\n '''\n Used to safely access self.message_text outside of the class\n \n Returns: self.message_text\n '''\n return self.message_txt[:]\n\n def get_valid_words(self):\n '''\n Used to safely access a copy of self.valid_words outside of the class.\n This helps you avoid accidentally mutating class attributes.\n \n Returns: a COPY of self.valid_words\n '''\n return self.valid_words[:]\n\n def build_shift_dict(self, shift):\n '''\n Creates a dictionary that can be used to apply a cipher to a letter.\n The dictionary maps every uppercase and lowercase letter to a\n character shifted down the alphabet by the input shift. The dictionary\n should have 52 keys of all the uppercase letters and all the lowercase\n letters only. \n \n shift (integer): the amount by which to shift every letter of the \n alphabet. 0 <= shift < 26\n\n Returns: a dictionary mapping a letter (string) to \n another letter (string). \n '''\n # initiate shift_dict\n shift_dict = self.get_normal_letter_dict(65).copy()\n assert 0 <= shift and shift <26, 'Range for shift is 0<= shift < 26.'\n # Uppercase\n for i in range(65, 91): # for unit in get_normal_letter_dict in range 65~90\n if shift_dict[chr(i)] + shift >= 91: # if unit value + shift > 91\n shift_dict[chr(i)] = shift_dict[chr(i)] + shift -26 # unit value in shift_dict = value in get_normal_letter_dict + shift -26\n else: # else\n shift_dict[chr(i)] = shift_dict[chr(i)] + shift # value = value + shift \n # Lowercase\n for i in range(97, 123): # for unit in get_normal_letter_dict in range 97~122\n if shift_dict[chr(i)] + shift >= 123 : # if unit value + shift > 122\n shift_dict[chr(i)] = shift_dict[chr(i)] + shift -26 # unit value in shift_dict = value in get_normal_letter_dict + shift -26\n else: # else\n shift_dict[chr(i)] = shift_dict[chr(i)] + shift # value = value + shift \n return shift_dict.copy()\n \n def get_normal_letter_dict(self, cha_num): \n \"\"\" Assumes cha is a number :65\n originally be called by self.get_normal_letter_dict(65)\n Returns a diction contains a~z:97~122 and A~Z:65~90 by recursion\"\"\"\n whole_letter = {}\n \n if chr(cha_num) == 'Z': # recursion stop at Z 90\n return {'Z':90, 'z':122}\n else: \n whole_letter[chr(cha_num)] = cha_num # whole_letter assign chr 65 and chr 97\n whole_letter[chr(cha_num+32)] = cha_num+32 \n whole_letter.update(self.get_normal_letter_dict(cha_num+1)) # whole_letter.append()\n return whole_letter.copy() #return the dict\n\n def apply_shift(self, shift):\n '''\n Applies the Caesar Cipher to self.message_text with the input shift.\n Creates a new string that is self.message_text shifted down the\n alphabet by some number of characters determined by the input shift \n shift (integer): the shift with which to encrypt the message.\n 0 <= shift < 26\n\n Returns: the message text (string) in which every character is shifted\n down the alphabet by the input shift\n '''\n #initialize\n shifted_message = ''\n shift_dict = self.build_shift_dict(shift).copy()\n \n for letter in self.message_text: # every letter in the message_text\n if letter.isalpha() is False: # change letter one by one according to shifted dict\n shifted_message += letter\n else: \n shifted_message += chr(shift_dict[letter])\n return shifted_message\n \nclass PlaintextMessage(Message):\n def __init__(self, text, shift):\n '''\n Initializes a PlaintextMessage object \n \n text (string): the message's text\n shift (integer): the shift associated with this message\n\n A PlaintextMessage object inherits from Message and has five attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n self.shift (integer, determined by input shift)\n self.encryption_dict (dictionary, built using shift)\n self.message_text_encrypted (string, created using shift)\n\n '''\n Message.__init__(self, text)\n self.shift = shift\n self.encryption_dict = Message.build_shift_dict(self, shift)\n self.message_text_encrypted = Message.apply_shift(self, shift)\n \n\n def get_shift(self):\n '''\n Used to safely access self.shift outside of the class\n \n Returns: self.shift\n '''\n #return input('Enter a favor number for shift: ')\n return self.shift\n\n def get_encryption_dict(self):\n '''\n Used to safely access a copy self.encryption_dict outside of the class\n \n Returns: a COPY of self.encryption_dict\n '''\n return self.encryption_dict.copy()\n \n\n def get_message_text_encrypted(self):\n '''\n Used to safely access self.message_text_encrypted outside of the class\n \n Returns: self.message_text_encrypted\n '''\n return self.message_text_encrypted\n\n def change_shift(self, shift):\n '''\n Changes self.shift of the PlaintextMessage and updates other \n attributes determined by shift. \n \n shift (integer): the new shift that should be associated with this message.\n 0 <= shift < 26\n\n Returns: nothing\n '''\n self.shift = input('Please Eneter a new integer for shift: ')\n\n\nclass CiphertextMessage(Message):\n def __init__(self, text):\n '''\n Initializes a CiphertextMessage object\n \n text (string): the message's text\n\n a CiphertextMessage object has two attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n '''\n Message.__init__(self, text)\n \n\n def decrypt_message(self):\n '''\n Decrypt self.message_text by trying every possible shift value\n and find the \"best\" one. We will define \"best\" as the shift that\n creates the maximum number of real words when we use apply_shift(shift)\n on the message text. If s is the original shift value used to encrypt\n the message, then we would expect 26 - s to be the best shift value \n for decrypting it.\n\n Note: if multiple shifts are equally good such that they all create \n the maximum number of valid words, you may choose any of those shifts \n (and their corresponding decrypted messages) to return\n\n Returns: a tuple of the best shift value used to decrypt the message\n and the decrypted message text using that shift value\n '''\n \n num_matched = 0\n rank = {}\n sample = []\n best = 0\n\n for i in range(1, 26): # make 26 version\n if len(Message.apply_shift(self, i).split(' ')) == 1:\n sample.extend(Message.apply_shift(self, i).split(' '))\n else:\n sample.extend(Message.apply_shift(self, i).split(' '))\n for word in sample: # make dict to record and compare\n if is_word(Message.get_valid_words(self) , word):\n num_matched += 1\n \n rank[i] = num_matched\n num_matched = 0\n sample = []\n for i in range(1, 26): # find the best\n if rank[i] == max(rank.values()):\n best = int(i)\n break\n return (best, Message.apply_shift(self, best))\n \n def get_exerpt_from_Ciphertext(self, text):\n \"\"\" \n Assumes text is a string including punctuation and blank space\n Returns a string without punctuation and blank space\n \"\"\"\n text_list = []\n high = len(text)\n low = 0\n mid = (high+low)/2\n text = text.lower()\n text_ref = text[:]\n \n for unit in text_ref: # remove not alpha\n if unit.isalpha() is False:\n text.remove(unit)\n text_list = text[mid:mid + 100].split(' ') # split the string into list by the blank space \n text_list.remove(text_list[0]) # remove the first and the last word due to it may be a incomplete word\n text_list.remove(text_list[-1])\n return text_list # return the list \n \nif __name__ == '__main__':\n\n #Example test case (PlaintextMessage)\n plaintext = PlaintextMessage('hello', 2)\n print('Expected Output: jgnnq')\n print('Actual Output:', plaintext.get_message_text_encrypted())\n print('-----------')\n print('')\n\n #Example test case (CiphertextMessage)\n ciphertext = CiphertextMessage('jgnnq')\n print('Expected Output:', (24, 'hello'))\n print('Actual Output:', ciphertext.decrypt_message())\n print('-----------')\n print('')\n\n #TODO: WRITE YOUR TEST CASES HERE\n plaintext = PlaintextMessage('Tell me. \\nWhat kind of the kind person you are?', 4)\n print('Expected Output: Xipp qi. \\nAlex omrh sj xli omrh tivwsr csy evi?')\n print('Actual Output:', plaintext.get_message_text_encrypted())\n print('-----------')\n print('')\n \n #TODO: best shift value and unencrypted story \n ciphertext = CiphertextMessage('Xipp qi. \\nAlex omrh sj xli omrh tivwsr csy evi?')\n print('Expected Output:', (22, 'Tell me. \\nWhat kind of the kind person you are?'))\n print('Actual Output:', ciphertext.decrypt_message())\n print('-----------')\n print('')\n \n","repo_name":"lemonwater600ml/60001","sub_path":"ps4/ps4b.py","file_name":"ps4b.py","file_ext":"py","file_size_in_byte":12463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"8370413157","text":"# lista = []\n# for cont in range(5):\n# n = int(input(\"Digite um valor: \"))\n# lista.append(n)\n\n# for pos in range(4, -1, -1):\n# print(lista[pos])\n\n####################################################################\n\n\n# lista = []\n# n = int(input(\"Digite um valor: \"))\n# while n != 0:\n# lista.append(n)\n# n = int(input(\"Digite um valor: \"))\n\n# for pos in range(4, -1, -1):\n\n######################################################################\n\n\nalgI = []\nnome = input(\"Digite o nome do aluno: \")\nnome = nome.upper()\nwhile nome != \"FIM\":\n algI.append(nome)\n algI.append(float(input(\"Digite a 1ª nota do aluno: \")))\n algI.append(float(input(\"Digite a 2ª nota do aluno: \")))\n algI.append(float(input(\"Digite a 3ª nota do aluno: \")))\n nome = input(\"Digite o nome do aluno: \")\n nome = nome.upper()\n\npos = 0\nwhile pos < len(algI):\n print(algI[pos], end=\"\\t\")\n media = (algI[pos + 1] + algI[pos + 2] + algI[pos + 3]) / 3\n print(f\"{media:.2f}\", end=\"\\t\")\n if media >= 6:\n print(\"Aprovado\")\n else:\n print(\"Reprovado\")\n pos += 4\n\nprint(*algI)\n","repo_name":"RodDu/Python-e-algor-timos","sub_path":"lista_1.py","file_name":"lista_1.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36634387","text":"print('-' *22)\nprint('SEQUÊNCIA DE FIBONACCI')\nprint('-' *22)\ntermo = int(input('Quantos termos você quer mostrar? '))\ncont = 3\nt1 = 0\nt2 = 1\nprint(f'{t1} {t2} ', end= '')\nwhile cont <= termo:\n t3 = t1 + t2\n print(t3, end= ' ')\n t1 = t2\n t2 = t3\n cont += 1\nprint('FIM')","repo_name":"fndalemao/Python","sub_path":"Desafios/desafio063.py","file_name":"desafio063.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4236765413","text":"class Solution:\n # count straightforward.\n # # O(N^2) Time O(N) Space.\n def smallerNumbersThanCurrent(self, nums: [int]) -> [int]:\n count = dict()\n for x in nums:\n if x in count:\n count[x] += 1\n else:\n count[x] = 1\n\n res = [0] * len(nums)\n for i, x in enumerate(nums):\n for k in count:\n if k < x:\n res[i] += count[k]\n\n return res\n","repo_name":"b1ueskydragon/PythonGround","sub_path":"leetcode/p1365/smaller_numbers_than_current.py","file_name":"smaller_numbers_than_current.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2716842783","text":"numbers = [2,4,6,7, 5, 84,43,3]\r\nnumbers.append(56)\r\nnumbers.insert(1,45)\r\nnumbers.remove(45)\r\nnumbers.pop()\r\nprint(numbers)\r\nprint(56 in numbers)\r\nprint(numbers.count(5))\r\nnumbers.sort()\r\nprint(numbers)\r\nnumbers.reverse()\r\nprint(numbers)\r\nnumbers2 = numbers.copy()\r\nnumbers.append(65)\r\nprint(f'{numbers}, {numbers2}')\r\n\r\n\r\n\r\nnumbers3 = [3,4,4,53,2,2,4,546]\r\nunique = []\r\nfor duplicates in numbers3:\r\n if duplicates not in unique:\r\n unique.append(duplicates)\r\nprint(f'The unique List is {unique}')\r\n","repo_name":"Burhan1310/Python_beginning","sub_path":"list_methods.py","file_name":"list_methods.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44172144882","text":"import os\nimport torch\nimport argparse\n\nfrom cail.buffer import Buffer, SerializedBuffer\nfrom cail.env import make_env\n\n\ndef mix_demo(folder: str, env_id: str):\n \"\"\"\n Create a mixture of demonstrations based on demonstrations in the folder\n\n Parameters\n ----------\n folder: str\n folder containing demos to be mixed\n env_id: str\n name of the environment\n \"\"\"\n size = []\n buffer_name = []\n files = os.listdir(folder)\n for file in sorted(files):\n buffer_name.append(os.path.join(folder, file))\n size.append(int(file.split('_')[0].split('size')[1]))\n\n device = torch.device(\"cpu\")\n env = make_env(env_id)\n\n output_buffer = Buffer(\n buffer_size=sum(size),\n state_shape=env.observation_space.shape,\n action_shape=env.action_space.shape,\n device=device\n )\n\n buffers = []\n for i_buffer, name in enumerate(buffer_name):\n buffers.append(\n SerializedBuffer(\n path=name,\n device=device\n )\n )\n states, actions, rewards, dones, next_states = buffers[i_buffer].get()\n for i_demo in range(size[i_buffer]):\n output_buffer.append(\n states[i_demo].numpy(),\n actions[i_demo].numpy(),\n rewards[i_demo].numpy(),\n dones[i_demo].numpy(),\n next_states[i_demo].numpy()\n )\n\n rewards_name = ''\n for name in buffer_name:\n mean_reward = name.split('reward')[1].split('.pth')[0]\n rewards_name = rewards_name + '_' + mean_reward\n\n if os.path.exists(os.path.join(\n 'buffers',\n env_id,\n f'size{sum(size)}_reward{rewards_name}.pth'\n )):\n print('Error: demonstrations with the same reward exists')\n else:\n output_buffer.save(os.path.join(\n 'buffers',\n env_id,\n f'size{sum(size)}_reward{rewards_name}.pth'\n ))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # required\n parser.add_argument('--folder', type=str, required=True,\n help='folder containing demos to be mixed')\n parser.add_argument('--env-id', type=str, required=True,\n help='name of the environment')\n\n args = parser.parse_args()\n mix_demo(folder=args.folder, env_id=args.env_id)\n","repo_name":"syzhang092218-source/Confidence-Aware-Imitation-Learning","sub_path":"mix_demo.py","file_name":"mix_demo.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"37482176748","text":"# patWalk.py\n\n\"\"\"\nThis module is an integeral part of the program\nMMA - Musical Midi Accompaniment.\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nBob van der Poel \n\n\"\"\"\n\n\nimport random\n\n\nimport MMA.harmony\nimport MMA.notelen\n\nfrom . import gbl\nfrom MMA.common import *\nfrom MMA.pat import PC, Pgroup\n\n\nclass Walk(PC):\n \"\"\" Pattern class for a walking bass track. \"\"\"\n\n vtype = 'WALK'\n walkChoice = 0\n\n def getPgroup(self, ev):\n \"\"\" Get group for walking bass pattern.\n\n Fields - start, length, volume\n \"\"\"\n\n if len(ev) != 3:\n error(\"There must be at exactly 3 items in each group in \"\n \"a Walking Bass definition, not <%s>\" % ' '.join(ev))\n\n a = Pgroup()\n\n a.offset = self.setBarOffset(ev[0])\n a.duration = MMA.notelen.getNoteLen(ev[1])\n a.vol = stoi(ev[2], \"Type error in Walking Bass definition\")\n\n return a\n\n def restart(self):\n self.ssvoice = -1\n self.walkChoice = 0\n\n def trackBar(self, pattern, ctable):\n \"\"\" Do a waling bass bar.\n\n Called from self.bar()\n\n \"\"\"\n\n sc = self.seq\n dir = self.direction[sc]\n\n for p in pattern:\n\n tb = self.getChordInPos(p.offset, ctable)\n\n if tb.walkZ:\n continue\n\n # Create a note list from the current scale. We do\n # this for each beat, but it's pretty fast. The note\n # list is simply notes 0..5 of the scale PLUS notes\n # 1..4 reversed. So, a Cmajor chord would result in\n # the note list (0,2,4,5,7,9,7,5,4,2). We never use\n # scale notes past this point. So in a C chord we\n # are using notes C, D, E, F, G and A. This is one\n # reason we ignore the RANGE setting ... there would\n # be a big gap between the 'A' and the next 'C'.\n #\n # Note that we deliberately skip the 7th. Too often\n # the chord is a Major but the melody note will be\n # the dom. 7th and the M7 will sound off. So, just\n # err on the side of caution.\n #\n # If DIR is UP or DOWN we don't append the 2nd half\n # of the scale.\n #\n # If DIR is DOWN we reverse the order as well.\n\n wNotes = list(tb.chord.scaleList[0:6])\n\n if dir not in ('UP', 'DOWN'):\n b = list(tb.chord.scaleList[1:5])\n b.reverse()\n wNotes += b\n\n if dir == 'DOWN':\n wNotes.reverse()\n\n # Ensure that the offset is in range.\n\n if self.walkChoice >= len(wNotes) or self.walkChoice < 0:\n self.walkChoice = 0\n\n \"\"\" Even with a walking bass it's nice to have the chord root on\n beat 1 ... not all the time, but most. This bit of code ensures\n that more that 50% of beat ones will have the root.\n \"\"\"\n\n if p.offset == 0 and random.choice((0, 1)):\n self.walkChoice = 0\n\n note = wNotes[self.walkChoice]\n\n \"\"\" Adjust offset for NEXT TIME. If the direction is\n up/down we just increment the pointer. If we have\n direction set to RANDOM then we select either -1,\n 0 or 1 with equal change for moving up, down or\n not-at-all. With BOTH we have a preference to move up.\n \"\"\"\n\n if dir in ('UP', 'DOWN'):\n self.walkChoice += 1\n elif dir == 'RANDOM':\n self.walkChoice += random.choice((0, 1, -1))\n else: # BOTH\n self.walkChoice += random.choice((-1, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1))\n\n if not self.harmonyOnly[sc]:\n notelist = [(note, p.vol)]\n else:\n notelist = []\n\n if self.harmony[sc]:\n ch = self.getChordInPos(p.offset, ctable).chord.noteList\n h = MMA.harmony.harmonize(self.harmony[sc], note, ch)\n vol = p.vol * self.harmonyVolume[sc]\n harmlist = list(zip(h, [vol] * len(h)))\n else:\n harmlist = []\n\n offset = p.offset\n if self.ornaments['type']:\n offset = MMA.ornament.doOrnament(self, notelist,\n self.getChordInPos(offset, ctable).chord.scaleList, p)\n notelist = []\n\n self.sendChord(notelist + harmlist, p.duration, offset)\n","repo_name":"infojunkie/mma","sub_path":"MMA/patWalk.py","file_name":"patWalk.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"35660156501","text":" #\n# @lc app=leetcode id=461 lang=python3\n#\n# [461] Hamming Distance\n#\n\n# @lc code=start\nclass Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n x_xor_y = x ^ y\n mask = 1\n count = 0\n while x_xor_y >= mask:\n if x_xor_y & mask != 0:\n count += 1\n mask = mask << 1\n \n return count\n\n# @lc code=end","repo_name":"gincheong/leetcode","sub_path":"Top Interview Questions/src/461.hamming-distance.py","file_name":"461.hamming-distance.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"547782870","text":"import cv2\nimport json\nimport numpy as np\nimport os\n'''\nimg_idx 변경 or img_file을 원하는 이미지 파일명으로 변경하여 사용\n\nmasked : 파란색\nmaintable : 노란색\nstamp : 초록색\n그 외 : 빨간색\n'''\n\n# 경로 설정\nnew_img_path = r'C:\\Users\\hyungu_lee\\PycharmProjects\\ocr\\data\\medical\\img/stamp2.jpg'\nnew_json_path = r'C:\\Users\\hyungu_lee\\PycharmProjects\\ocr\\data\\medical\\ufo/stamp2.json'\nwith open(\"./data/medical/ufo/train.json\", encoding=\"utf-8\") as file:\n ann = json.load(file)\n#'train' 'test'\nmode = 'train'\nimg_idx = 69\n\n\ndef onmouse(event, x, y, flags, param):\n global isDragging, x0, y0, image,row_image, img_file, words, offsetx, offsety, new_img, w, h, nw,nh, new_key\n if event == cv2.EVENT_LBUTTONDOWN:\n isDragging = True\n x0 = x\n y0 = y\n elif event == cv2.EVENT_MOUSEMOVE:\n if isDragging:\n img_draw = image.copy()\n cv2.rectangle(img_draw, (x0, y0), (x, y), blue, 2)\n cv2.imshow(img_file, img_draw)\n elif event == cv2.EVENT_LBUTTONUP:\n if isDragging:\n isDragging = False\n w_ = x - x0\n h_ = y - y0\n if w_ > 0 and h_ > 0:\n img_draw = image.copy()\n cv2.rectangle(img_draw, (x0, y0), (x, y), red, 2)\n roi = row_image[y0:y0 + h_, x0:x0 + w_]\n if (w_ >= 1024) or (h_ >= 1200):\n print('too big')\n pass\n else:\n if (offsetx + w_ <= 1024) and (offsety + h_ <= 1200):\n # xy 둘 다 offset 초과 안함.\n # 이미지 그리기\n new_img[offsety:offsety + h_,offsetx:offsetx + w_ ] = roi\n keys = words.keys()\n for k in keys:\n word = words[k]\n pts = np.array(word['points']).astype(np.int32)\n ptx = pts[:,0]*(nw/w)\n pty = pts[:,1]*(nh/h)\n ptx_b = (x0 <= ptx).all() and (ptx <= x0+w_).all()\n pty_b = (y0 <= pty).all() and (pty <= y0+h_).all()\n if pty_b and ptx_b:\n nptx = (ptx - x0) + offsetx\n npty = (pty - y0) + offsety\n npt = np.vstack((nptx, npty)).transpose()\n new_json['images'][new_img_base]['words']['{0:04d}'.format(new_key)]=word\n new_json['images'][new_img_base]['words']['{0:04d}'.format(new_key)]['points']=npt.tolist()\n new_key +=1\n print(new_json)\n offsety += h_\n else:\n offsety = 0\n cv2.imshow('newimg',new_img)\n cv2.imwrite(new_img_path,new_img)\n with open(new_json_path, 'w', encoding='utf-8') as f:\n json.dump(new_json,f,indent=4, ensure_ascii=False)\n else:\n cv2.imshow(img_file, image)\n print('drag should start from left-top side')\n\n#set global var\nisDragging = False\nx0, y0, w, h = -1, -1, -1, -1\nblue, red = (255, 0, 0), (0, 0, 255)\nnew_img = np.full((1200,1024,3),170, np.uint8)\noffsetx, offsety = 500, 0\nnew_key = 1\n#init json\nnew_img_base = str(os.path.basename(new_img_path))\nnew_json = {'images':{new_img_base:{\"paragraphs\": {},\"words\": {}}}}\nprint(new_json)\nwhile 1:\n print(img_idx)\n img_file = list(ann.get(\"images\").keys())[img_idx]\n img = ann.get(\"images\")[img_file]\n words = img[\"words\"]\n # image load\n image = cv2.imread(\"./data/medical/img/train/\"+img_file)\n row_image = image\n canvas = np.zeros_like(image)\n print(img_file)\n # box color\n colors = {\"default\" : (0,0,255), \"masked\" : (255,0,0), \"maintable\" : (0,50,50), \"stamp\" : (0,255,0)}\n\n # draw boxes\n keys = words.keys()\n for k in keys:\n word = words[k]\n pts = np.array(word['points']).astype(np.int32)\n # select color\n tags = word['tags']\n color = colors['default']\n if \"masked\" in tags:\n color = colors[\"masked\"]\n if \"maintable\" in tags:\n #continue\n color = colors[\"maintable\"]\n if \"stamp\" in tags:\n color = colors[\"stamp\"]\n\n mask_canvas = np.zeros_like(image)\n cv2.fillPoly(mask_canvas, [pts], color)\n canvas = cv2.addWeighted(canvas, 1, mask_canvas, 1, 0)\n\n image = cv2.addWeighted(image, 0.5, canvas, 0.5, 0)\n\n # resize\n\n h, w, _ = image.shape\n ratio = w/h\n nh = 1200\n nw = nh * ratio\n image = cv2.resize(image, (int(nw), nh))\n row_image = cv2.resize(row_image, (int(nw), nh))\n # show image\n cv2.imshow(img_file, image)\n cv2.setMouseCallback(img_file, onmouse, image)\n cvkey = cv2.waitKeyEx(0)\n\n if cvkey == 0x270000: # 오른쪽 방향키\n img_idx +=1\n elif cvkey == 0x250000: # 왼쪽 방향키\n img_idx -=1\n cv2.destroyAllWindows()","repo_name":"boostcampaitech5/level2_cv_datacentric-cv-06","sub_path":"hyungu_lee/img_json_maker.py","file_name":"img_json_maker.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35969170109","text":"#!/usr/bin/python3\n# _*_ coding=utf-8 _*_\n\nimport os\n\n#创建测试文件夹和文件\nos.mkdir('test')\nos.chdir('test')\nqytang1 = open('qytang1', 'w')\nqytang1.write('test file\\n')\nqytang1.write('this is qytang\\n')\nqytang1.close()\nqytang2 = open('qytang2', 'w')\nqytang2.write('test file\\n')\nqytang2.write('qytang python\\n')\nqytang2.close()\nqytang3 = open('qytang3', 'w')\nqytang3.write('test file\\n')\nqytang3.write('this is python\\n')\nqytang3.close()\nos.mkdir('qytang4')\nos.mkdir('qytang5')\n\nprint('文件中包含\"qytang\"关键字的文件为:')\nprint('方案一:')\n\nfor file_or_dir in os.listdir(os.getcwd()):\n if os.path.isfile(file_or_dir):\n for line in open(file_or_dir):\n if 'qytang' in line:\n print(file_or_dir)\n break\n\nprint('方案二:')\n#这是更优化的递归方案,topdown的作用:True优先遍历top目录中的内容,False优先遍历top(这里是test目录)的子目录的内容\nos.chdir('/root/remote')\nfor root, dirs, files in os.walk('test', topdown=False):\n if files != []:\n for file_name in files:\n for line in open(os.path.join(root, file_name)):\n if 'qytang' in line:\n print(file_name)\n break\n\n\n\n#完成清理工作\nos.chdir('/root/remote')\nfor root, dirs, files in os.walk('test', topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\nos.removedirs('test')\n\n\n\n\n\n\nif __name__ == '__main__':\n pass\n ","repo_name":"mrtiansir/python","sub_path":"file_1_2.py","file_name":"file_1_2.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16255096304","text":"is_debug = True;\n\n# une fonction simple\ndef power(nombre,puissance):\n i = 0\n resultat = 1\n while i < puissance:\n resultat = resultat*nombre\n i = i+1\n return resultat\n\n# une fonction qui retourne plusieurs résultats\ndef decomposer(nombre,divise_par):\n \"\"\"Retourne le résultat de la division entière\n ainsi que le reste.\n Oui ça sert à rien.\"\"\"\n reste = nombre % divise_par\n resultat_entier = nombre // divise_par\n return resultat_entier, reste\n \n# une autre fonction simple qui utilise une variable créée en dehors\ndef debug(message):\n if is_debug:\n print(\"DEBUG::: \" + str(message))\n\n# une fonction lambda\naddition_lambda = lambda a,b: a+b\n# est exactement la meme chose que :\ndef addition(a,b):\n return a+b\n\nmon_resultat = power(2,3)\ndebug(mon_resultat)\nprint(mon_resultat)\n\nle_resultat_entier, le_reste = decomposer(12,5)\ndebug(le_reste)\nprint(le_resultat_entier)\n\nl_addition_SVP = addition(3,2)\nprint(l_addition_SVP)\nl_addition_SVP_lambda = addition_lambda(6,3)\nprint(l_addition_SVP_lambda)\n","repo_name":"Nico-Duduf/Formation_Python","sub_path":"CheatSheets/cheatsheet fonction.py","file_name":"cheatsheet fonction.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72021350324","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nfrom torch.optim import Adam,SGD,Adagrad\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.svm import LinearSVC\nimport xgboost as xgb\nimport os\nprint(os.listdir(\"../input\"))\ntrain_data = pd.read_csv('../input/train.csv')\n\ndef prep_test():\n test_csv = pd.read_csv('../input/test.csv')\n temp_data = train_data\n ####################### Test data #############################################\n test_csv['HF1'] = test_csv['Horizontal_Distance_To_Hydrology'] + test_csv['Horizontal_Distance_To_Fire_Points']\n test_csv['HF2'] = abs(test_csv['Horizontal_Distance_To_Hydrology']-test_csv['Horizontal_Distance_To_Fire_Points'])\n test_csv['HR1'] = abs(test_csv['Horizontal_Distance_To_Hydrology']+test_csv['Horizontal_Distance_To_Roadways'])\n test_csv['HR2'] = abs(test_csv['Horizontal_Distance_To_Hydrology']-test_csv['Horizontal_Distance_To_Roadways'])\n test_csv['FR1'] = abs(test_csv['Horizontal_Distance_To_Fire_Points']+test_csv['Horizontal_Distance_To_Roadways'])\n test_csv['FR2'] = abs(test_csv['Horizontal_Distance_To_Fire_Points']-test_csv['Horizontal_Distance_To_Roadways'])\n test_csv['ele_vert'] = test_csv.Elevation - test_csv.Vertical_Distance_To_Hydrology\n\n test_csv['slope_hyd'] = (test_csv['Horizontal_Distance_To_Hydrology']**2+test_csv['Vertical_Distance_To_Hydrology']**2)**0.5\n test_csv.slope_hyd=test_csv.slope_hyd.map(lambda x: 0 if np.isinf(x) else x) # remove infinite value if any\n\n #Mean distance to Amenities \n test_csv['Mean_Amenities']=(test_csv.Horizontal_Distance_To_Fire_Points + test_csv.Horizontal_Distance_To_Hydrology + test_csv.Horizontal_Distance_To_Roadways) / 3 \n #Mean Distance to Fire and Water \n test_csv['Mean_Fire_Hyd']=(test_csv.Horizontal_Distance_To_Fire_Points + test_csv.Horizontal_Distance_To_Hydrology) / 2\n test_csv.drop(['Soil_Type15' , \"Soil_Type7\"], inplace = True, axis = 1 )\n for x in to_normalize:\n mean = temp_data[x].mean()\n std = temp_data[x].std()\n test_csv[x]= test_csv[x].apply(lambda y: (y-mean) / std)\n return test_csv\n\n\n\n####################### Train data #############################################\ntrain_data['HF1'] = train_data['Horizontal_Distance_To_Hydrology']+train_data['Horizontal_Distance_To_Fire_Points']\ntrain_data['HF2'] = abs(train_data['Horizontal_Distance_To_Hydrology']-train_data['Horizontal_Distance_To_Fire_Points'])\ntrain_data['HR1'] = abs(train_data['Horizontal_Distance_To_Hydrology']+train_data['Horizontal_Distance_To_Roadways'])\ntrain_data['HR2'] = abs(train_data['Horizontal_Distance_To_Hydrology']-train_data['Horizontal_Distance_To_Roadways'])\ntrain_data['FR1'] = abs(train_data['Horizontal_Distance_To_Fire_Points']+train_data['Horizontal_Distance_To_Roadways'])\ntrain_data['FR2'] = abs(train_data['Horizontal_Distance_To_Fire_Points']-train_data['Horizontal_Distance_To_Roadways'])\ntrain_data['ele_vert'] = train_data.Elevation-train_data.Vertical_Distance_To_Hydrology\n\ntrain_data['slope_hyd'] = (train_data['Horizontal_Distance_To_Hydrology']**2+train_data['Vertical_Distance_To_Hydrology']**2)**0.5\ntrain_data.slope_hyd=train_data.slope_hyd.map(lambda x: 0 if np.isinf(x) else x) # remove infinite value if any\n\n#Mean distance to Amenities \ntrain_data['Mean_Amenities']=(train_data.Horizontal_Distance_To_Fire_Points + train_data.Horizontal_Distance_To_Hydrology + train_data.Horizontal_Distance_To_Roadways) / 3 \n#Mean Distance to Fire and Water \ntrain_data['Mean_Fire_Hyd']=(train_data.Horizontal_Distance_To_Fire_Points + train_data.Horizontal_Distance_To_Hydrology) / 2 \ntrain_data.drop(['Soil_Type15' , \"Soil_Type7\"], inplace = True, axis = 1 )\nto_normalize = list(train_data.drop([\"Id\", \"Cover_Type\"], inplace=False, axis=1).columns)\n\n\ntest_csv = prep_test()\ntrain_data.head(5)\ntest_csv.head(5)\n\nfor x in to_normalize:\n mean = train_data[x].mean()\n std = train_data[x].std()\n train_data[x]= train_data[x].apply(lambda y: (y-mean)/ std)\n \ntrain_data.head(5)\ndef print_acc(acc,model_name):\n print(\"{} validation accuracy is {:.4f}%\".format(model_name, acc))\nX_train, X_test, y_train, y_test = train_test_split(train_data.drop([\"Id\", \"Cover_Type\"], inplace=False, axis=1).as_matrix(), list(train_data[\"Cover_Type\"].values), test_size=0.2)\nneigh = KNeighborsClassifier(n_neighbors=10, weights='distance', p=1)\net = ExtraTreesClassifier(n_estimators=100, max_depth=None, min_samples_split=2, random_state=0)\nclf2 = RandomForestClassifier(n_estimators=250,random_state=1)\nclf3 = GaussianNB(var_smoothing=True)\nclf4 = LinearSVC(random_state=5)\ngbm = xgb.XGBClassifier(max_depth=5, n_estimators=250, learning_rate=0.5)\nestms = [('rf', clf2), ('xgb', gbm), ('neigh', neigh), ('et', et)]\neclf1 = VotingClassifier(estimators=estms, voting='hard')\n#for tag, voter in estms:\n# voter = voter.fit(X_train, y_train)\n# print_acc(float(np.array(voter.predict(X_test) == y_test, dtype=np.int).sum() * 100) / len(y_test), tag)\neclf1 = eclf1.fit(X_train, y_train)\nprint_acc(float(np.array(eclf1.predict(X_test) == y_test, dtype=np.int).sum() * 100) / len(y_test), \"Voting\")\npreds = eclf1.predict(test_csv.drop([\"Id\"], axis=1).as_matrix())\nresult = pd.DataFrame(data={'Id': test_csv['Id'], 'Cover_Type': preds})\nresult.to_csv(path_or_buf='soft_voting_submittion.csv', index = False, header = True)","repo_name":"aorursy/new-nb-7.2","sub_path":"syncush_neural-forest-and-voting-79-accuracy.py","file_name":"syncush_neural-forest-and-voting-79-accuracy.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32860525413","text":"import unittest\nfrom services.tool_services.LtpService import ltpService\nfrom settings import BASE_DIR\nimport os\n\n\nclass TestLtp(unittest.TestCase):\n def test_seg(self):\n word = \"我来自北京青岛,我是中国人,我爱中国\"\n words = ltpService.segment(word)\n tags = ltpService.postag(words)\n parses = ltpService.parse(words, tags)\n entitys = ltpService.recognize(words, tags)\n labels = ltpService.label(words, tags, parses)\n word_tags = zip(words, tags)\n for wrod, tag in word_tags:\n print(\"w{0}:{1}\".format(wrod, tag))\n\n print(\"words:\\n\")\n for word in words:\n print(word)\n print(\"tags: \\n\")\n for tag in tags:\n print(tag)\n\n for parse in parses:\n print(parse)\n\n for entity in entitys:\n print(entity)\n\n for label in labels:\n print(label)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Will-Holden/kb_demo","sub_path":"offline_processor/unit_test/TestLtp.py","file_name":"TestLtp.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72043827125","text":"# This script allows to create, start, stop and delete a VM on AWS EC2\r\n\r\nfrom libcloud.compute.types import Provider\r\nfrom libcloud.compute.providers import get_driver\r\nfrom libcloud.compute.base import NodeImage\r\nimport yaml\r\n\r\n# read access key, id and region from config file\r\nconfig = yaml.load(open('config.yml'))\r\nACCESS_ID = config['ACCESS_ID']\r\nSECRET_KEY = config['SECRET_KEY']\r\nREGION = config['REGION']\r\nAMI_ID = config['AMI_ID']\r\nKEYPAIR_NAME = config['KEYPAIR_NAME']\r\n\r\n# EC2 Size\r\nSIZE_ID = 't2.micro'\r\n# A list of security groups you want this node to be added to\r\nSECURITY_GROUP_NAMES = ['default']\r\n#EC2 node name\r\nNODE_NAME = 'test-node-1'\r\n\r\n\r\ncls = get_driver(Provider.EC2)\r\nCONNECTION = cls(ACCESS_ID, SECRET_KEY, region=REGION)\r\n\r\n\r\n# function to create node\r\ndef create_node(node_name, connection, size_id, ami_id, keypair_name, security_group_names):\r\n print('Creating node : ' + node_name)\r\n sizes = connection.list_sizes()\r\n size = [s for s in sizes if s.id == size_id][0]\r\n image = NodeImage(id=ami_id, name=None, driver=connection)\r\n print('Selected images :')\r\n print(image)\r\n print('Selected size : ')\r\n print(size)\r\n node = connection.create_node(name=node_name, image=image, size=size,\r\n ex_keyname=keypair_name,\r\n ex_securitygroup=security_group_names)\r\n print('Created !!')\r\n\r\n\r\n# Function to stop a node\r\ndef stop_node(node_name, connection):\r\n node = [n for n in connection.list_nodes() if n.name == node_name][0]\r\n print('Stopping node : '+ node_name)\r\n connection.ex_stop_node(node=node)\r\n print('Stopped !!')\r\n\r\n\r\n# Function to start the node\r\ndef start_node(node_name, connection):\r\n node = [n for n in connection.list_nodes() if n.name == node_name][0]\r\n print('Starting node : '+ node_name)\r\n connection.ex_start_node(node=node)\r\n print('Started !!')\r\n\r\n\r\ndef delete_node(node_name, connection):\r\n node = [n for n in connection.list_nodes() if n.name == node_name][0]\r\n print('Deleting node : '+ node_name)\r\n node.destroy()\r\n print('Deleted !!')\r\n\r\n\r\n\r\n#Uncomment the respective call for various steps as needed after adding entries in config.yml for the below values\r\n# ACCESS_ID\r\n# SECRET_KEY\r\n# REGION\r\n# AMI_ID\r\n# KEYPAIR_NAME\r\n\r\n#create_node(NODE_NAME, CONNECTION, SIZE_ID, AMI_ID, KEYPAIR_NAME, SECURITY_GROUP_NAMES)\r\n\r\n#stop_node(NODE_NAME, CONNECTION)\r\n\r\n#start_node(NODE_NAME, CONNECTION)\r\n\r\n#delete_node(NODE_NAME, CONNECTION)\r\n","repo_name":"cloudmesh-community/sp19-516-128","sub_path":"comprehension/LibCloud_VM_AWS_EC2_Script.py","file_name":"LibCloud_VM_AWS_EC2_Script.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33818969790","text":"from django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom .models import CustomUser, Profile\n\n\nclass RegisterForm(auth_forms.UserCreationForm):\n \n class Meta:\n model = CustomUser\n fields = (\"email\", \"username\",)\n\n\n\nclass CustomUserChangeForm(auth_forms.UserChangeForm):\n\n class Meta:\n model = CustomUser\n fields = (\"email\", \"username\",)\n\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n fields = \"__all__\"\n exclude = (\"user\", )\n\n","repo_name":"mohamedAbdelaleem/On-this-day","sub_path":"src/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30869306781","text":"import os\nimport subprocess\nimport time\nimport unittest\n\nfrom oasislmf.model_preparation import (\n oed\n)\n\ncwd = os.path.dirname(os.path.realpath(__file__))\ninput_dir = os.path.join(cwd, 'examples')\n\nclass TestReinsurance(unittest.TestCase):\n\n def test_validate_oed_direct_inly(self):\n\n case_dir = os.path.join(input_dir, \"direct_only\")\n\n (\n ri_info_df,\n ri_scope_df, \n do_reinsurance\n ) = oed.load_oed_dfs(case_dir)\n\n self.assertFalse(do_reinsurance)\n\n def test_validate_oed_single_cxl(self):\n\n case_dir = os.path.join(input_dir, \"single_cxl\")\n\n (\n ri_info_df,\n ri_scope_df, \n do_reinsurance\n ) = oed.load_oed_dfs(case_dir)\n\n self.assertTrue(do_reinsurance)\n\n\n","repo_name":"Simplitium/OasisLMF_SQL","sub_path":"tests/model_preparation/test_oed.py","file_name":"test_oed.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1860685724","text":"from gensim.models.doc2vec import TaggedDocument, Doc2Vec\nimport numpy as np\n\nmin_count = 1\nwindow = 10\nvector_size = 100\nnegative = 5\nworkers = 2\nepochs = 50\ndbow_words = 0\n\n\nclass Doc_vectors(object):\n def __init__(self):\n self.min_count = min_count\n self.window = window\n self.vector_size = vector_size\n self.negative = negative\n self.workers = workers\n self.epochs = epochs\n self.dbow_words=dbow_words\n \n #把文本数据转成doc2vec要求的格式,即生成words\n def list2tag(self, corpus):\n documents = []\n for i, text in enumerate(list(corpus)):\n words_list = text.split(' ')\n documents.append(TaggedDocument(words=words_list, tags=[i]))\n return documents\n\n #建立doc2vec模型,并获取doc_vectors \n def build_doc_model(self, documents, dm):\n model = Doc2Vec(min_count=self.min_count, window=self.window, vector_size=self.vector_size, negative=self.negative, workers=self.workers, epochs=self.epochs, dm=dm, dbow_words=self.dbow_words)\n #建立词典\n model.build_vocab(documents)\n #训练模型\n model.train(documents, total_examples=model.corpus_count, epochs=model.epochs)\n return model\n\n #获取doc_vectors\n def getVecs(self, model, documents):\n vecs = [np.array(model.docvecs[z.tags[0]]).reshape((1, self.vector_size)) for z in documents]\n return np.concatenate(vecs)","repo_name":"middle-plat-ai/text-similarity","sub_path":"FeatureEngineer/My_doc2vec.py","file_name":"My_doc2vec.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"2698854823","text":"#돈 적게 쓰는 게 핵심\nN, M = map(int, input().split())\n\nanswer = 0\nprice_list = []\n\nfor _ in range(M):\n #패키지 가격, 낱개 가격\n price = tuple(map(int, input().split()))\n price_list.append(price)\n\n#6개 패키지 묶음과 낱개 가격이 가장 싼 순서대로 정렬\npkg_list = sorted(price_list, key=lambda x : x[0])\none_list = sorted(price_list, key=lambda x : x[1])\n\nif pkg_list[0][0] <= one_list[0][1] * 6: #패키지가 낱개*6보다 싸면\n #묶음으로 여러개 사고 남은 만큼 낱개로 구매\n answer = pkg_list[0][0] * (N // 6) + one_list[0][1] * (N % 6)\n # 이 때, 남은 만큼 낱개로 구매하는 가격보다 묶음으로 사고 남기는 게 더 싸면\n if pkg_list[0][0] < one_list[0][1] * (N % 6):\n answer = pkg_list[0][0] * (N//6 + 1) #패키지 묶음 +1 사기\nelse: #패키지 묶음보다 낱개 *6 가격이 더 싸면 \n answer = one_list[0][1] * N #모두 낱개 가격으로 구매\n\nprint(answer)","repo_name":"ahh1214/Algorithm","sub_path":"BaekJoon/1049.py","file_name":"1049.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74255424244","text":"# Creating Field Files from Input String: \n# : it takes in the string of sequence (professor name + professor fields)\n#\tand creates files for each field, and the content being all the \n# \tprofessor names that are associated with that field. \n\nimport sys \nimport os\nimport re\n\ndef createFieldFiles(inputStr, fieldDir):\n\tres = set()\n\ttry: \n\t\tos.mkdir('fields')\n\texcept OSError: \n\t\tpass\n\t\n\ttempStr = re.sub(r'\\d+\\.\\d+|\\d+', '', inputStr)\n\tstrList = tempStr.split()\n\tprofName = []\n\tfor word in strList: \n\t\tif word[0].islower(): \n\t\t\tif len(profName) > 3: \n\t\t\t\tif '.' in profName[-4]:\n\t\t\t\t\tprofName = profName[-2:]\n\t\t\t\telse: \n\t\t\t\t\tprofName = profName[-3:]\n\n\t\t\tif ',' in word: \n\t\t\t\tprint(profName)\n\t\t\t\tfieldList = word.split(',')\n\t\t\t\tfor field in fieldList:\n\t\t\t\t\t# //\n\t\t\t\t\tfind = ''\n\t\t\t\t\tfor Area, subArea in fieldDir.items():\n\t\t\t\t\t\tif field in subArea:\n\t\t\t\t\t\t\tfind = Area\n\t\t\t\t\tif find == '':\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\toutput = fieldFile_helper('fields/'+find+'/'+field)\n\t\t\t\t\ttempName = ' '.join(profName)\n\t\t\t\t\tres.add(tempName)\n\n\t\t\t\t\tfor name in profName: \t\n\t\t\t\t\t\toutput.write(name + ' ')\n\t\t\t\t\toutput.write('\\n')\n\t\t\t\t\toutput.close()\n\t\t\t\tprofName = []\n\t\t\telse:\n\t\t\t\t# //\n\t\t\t\tfind = ''\n\t\t\t\tfor Area, subArea in fieldDir.items():\n\t\t\t\t\tif word in subArea:\n\t\t\t\t\t\tfind = Area\n\t\t\t\tif find == '':\n\t\t\t\t\tbreak\n\n\t\t\t\toutput = fieldFile_helper('fields/'+find+'/'+word)\n\n\t\t\t\tfor name in profName: \t\n\t\t\t\t\toutput.write(name + ' ')\n\t\t\t\ttempName = ' '.join(profName)\n\t\t\t\tres.add(tempName)\n\t\t\t\toutput.write('\\n')\n\t\t\t\toutput.close()\n\t\t\t\tprofName = []\n\t\telse: \n\t\t\tprofName.append(word)\n\treturn res\n\n# Helper Function for createFieldFiles, returns correct file object, \n# either for creating new files or appending data to existing files\t\ndef fieldFile_helper(fname):\n\tif os.path.isfile(fname): \n\t\treturn open(fname, 'a')\n\telse: \n\t\ttemp = open(fname, 'w')\n\t\ttemp.close()\n\t\treturn open(fname, 'a')\n\n\n\nif __name__ == \"__main__\":\n\tname = ' Carla P. Gomes ai Bart Selman ai,ml Joseph Y. Halpern ai Daniel D. Lee robotics Rafael Pass theory,crypto Kilian Q. Weinberger ml Tanzeem Choudhury hci Siddhartha Banerjee ml,metrics Arpita Ghosh ecom,web+ir Robert D. Kleinberg theory,ecom Volodymyr Kuleshov ml,ai,ecom Jon M. Kleinberg web+ir,ml,ecom David B. Shmoys Karthik Sridharan ml 8 '\n\tcreateFieldFiles(name)\n","repo_name":"felixwqp/ProfPedia","sub_path":"string_function.py","file_name":"string_function.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"12923711604","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras import datasets, layers, models, losses\nfrom tensorflow.keras.optimizers import Adam\nimport numpy as np\nimport os\n\n(x_train,y_train),(x_test,y_test) = datasets.mnist.load_data()\n\nx_train = (x_train)/255\nx_test = (x_test)/255\n\ny_tmp = np.zeros((y_train.size, y_train.max() + 1), dtype=int)\ny_tmp[np.arange(y_train.size), y_train] = 1\ny_train = y_tmp\ny_tmp = np.zeros((y_test.size, y_test.max() + 1), dtype=int)\ny_tmp[np.arange(y_test.size), y_test] = 1\ny_test = y_tmp\n\n\nx_train = tf.expand_dims(x_train, axis=3, name=None)\nx_test = tf.expand_dims(x_test, axis=3, name=None)\n\nx_train = 1 - x_train\nx_test = 1 - x_test\n# x_train = x_train.numpy()\n# x_test = x_test.numpy()\n# print(type(x_train))\n# print(x_train.shape)\n# for ll in ((x_train[0]).reshape(28,28)):\n# for lll in ll:\n# print(\"{:.4f}\".format(lll), end=\" \")\n# print()\n\n# print(\"######################\")\n\n# x_train = np.load(os.path.join('data', 'X_split_train.npy'), mmap_mode='r')\n# y_train = np.load(os.path.join('data', 'Y_split_train.npy'))\n# x_test = np.load(os.path.join('data', 'X_split_test.npy'), mmap_mode='r')\n# y_test = np.load(os.path.join('data', 'Y_split_test.npy'))\n\n# x_train = np.asarray(x_train)\n# x_test = np.asarray(x_test)\n\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\nvalidation_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n\nBATCH_SIZE = 64\ntrain_dataset_batch = train_dataset.batch(BATCH_SIZE, drop_remainder=False)\nvalidation_dataset_batch = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)\n\nopt = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)\n\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(6, 5, activation='tanh', input_shape=x_train.shape[1:]))\nmodel.add(layers.AveragePooling2D(2))\nmodel.add(layers.Activation('sigmoid'))\nmodel.add(layers.Conv2D(16, 5, activation='tanh'))\nmodel.add(layers.AveragePooling2D(2))\nmodel.add(layers.Activation('sigmoid'))\nmodel.add(layers.Conv2D(120, 4, activation='tanh'))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(84, activation='tanh'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\nmodel.compile(optimizer=opt, \n loss=losses.categorical_crossentropy, \n metrics=['accuracy'])\n\nhistory = model.fit(train_dataset_batch, \n batch_size=BATCH_SIZE, \n epochs=10, \n validation_data=validation_dataset_batch)\n\nmodel.evaluate(x_test, y_test)\n","repo_name":"xinyew/EdgeImpulse_keras_LeNet","sub_path":"train_minst.py","file_name":"train_minst.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15668291559","text":"from nelsnmp.hostinfo.collect import get_device_version\nfrom nelsnmp.oids import GeneralOids\nfrom nelsnmp.snmp import SnmpHandler\nfrom nelsnmp.vendors.mappings import vendor_map\no = GeneralOids()\n\nclass HostInfo(object):\n\n def __init__(self, Snmp, contact=None, description=None, location=None,\n os=None, vendor=None, sysobjectid=None):\n\n if not isinstance(Snmp, SnmpHandler):\n raise ValueError('Must pass a Nelmon SnmpHandler')\n\n self.contact = contact\n self.location = location\n self.sysobjectid = sysobjectid\n self.description = description\n self.os = os\n self.vendor = vendor\n self.version = None\n self.uptime = None\n self._snmp = Snmp\n\n def _parse_data(self, data):\n for oid, value in data:\n if o.sysContact in oid:\n self.contact = value\n if o.sysDescr in oid:\n self.description = value\n if o.sysLocation in oid:\n self.location = value\n if o.sysObjectId in oid:\n self.sysobjectid = value\n if o.sysUpTime in oid:\n self.uptime = value\n\n def get_all(self):\n oids = [\n o.sysContact + '.0',\n o.sysDescr + '.0',\n o.sysLocation + '.0',\n o.sysObjectId + '.0',\n o.sysUpTime + '.0',\n ]\n data = self._snmp.get(*oids)\n self._parse_data(data)\n self.get_vendor()\n self.get_version()\n\n def get_description(self):\n data = self._snmp.get(o.sysDescr + '.0')\n self._parse_data(data)\n\n def get_contact(self):\n data = self._snmp.get(o.sysContact + '.0')\n self._parse_data(data)\n\n def get_location(self):\n data = self._snmp.get(o.sysLocation + '.0')\n self._parse_data(data)\n\n def get_vendor(self):\n if self.sysobjectid is None:\n self.get_sysobjectid()\n try:\n enterprise_id = self.sysobjectid.split('.')[6]\n self.vendor = vendor_map[enterprise_id]\n except:\n self.vendor = 'UNKNOWN'\n\n def get_version(self):\n if self.vendor is None:\n self.get_vendor()\n if self.description is None:\n self.get_description()\n version_info = get_device_version(\n sysobjectid=self.sysobjectid,\n description=self.description,\n vendor=self.vendor,\n snmp=self._snmp\n )\n if self.vendor != version_info.vendor:\n self.vendor = version_info.vendor\n self.os = version_info.os\n self.version = version_info.version\n\n def get_sysobjectid(self):\n data = self._snmp.get(o.sysObjectId + '.0')\n self._parse_data(data)\n\n\nclass Hostinfo(HostInfo):\n '''Deprecated marked for removal, use HostInfo instead'''\n pass\n","repo_name":"networklore/nelsnmp","sub_path":"lib/nelsnmp/hostinfo/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"23712187936","text":"from django.db import models\nfrom Customer .models import Customer_class\nfrom order .models import order_class\n# Create your models here.\n\n\n\n\nclass Product_Class(models.Model):\n Product_Id = models.IntegerField(blank=True, null=True)\n Order_No = models.IntegerField(blank=True,null=True)\n Product_details=models.CharField(max_length=20)\n Rate = models.IntegerField(blank=True,null=True)\n Catagories = models.CharField(max_length=50,\n choices=(('Cloth', 'Cloth'),\n ('Shoes', 'Shoes'),\n ('Cosmetics', 'Cosmetics')),\n default='Cloth')\n\n Product_image = models.ImageField(null='true')\n\n def __str__(self):\n return str(self.Product_Id) + \" : \" + self.Product_details\n\n\n\nclass order_product(models.Model):\n Customer=models.ForeignKey(Customer_class, on_delete=models.SET_NULL, null=True, default=1)\n Product= models.ForeignKey(Product_Class, on_delete=models.SET_NULL, null=True, default=1)\n order=models.ForeignKey(order_class, on_delete=models.SET_NULL, null=True, default=1)","repo_name":"Amy12343/FashionBlog","sub_path":"Product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23273822848","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom ..models import Group, Post\n\nUser = get_user_model()\n\n\nclass PaginatorViewsTest(TestCase):\n\n TEST_OF_POST = 13\n\n def setUp(self):\n self.guest_client = Client()\n self.user = User.objects.create(username='Author')\n self.group = Group.objects.create(\n title='Тестовая группа',\n slug='test_group'\n )\n bilk_post: list = []\n for number in range(self.TEST_OF_POST):\n bilk_post.append(Post(text=f'Тестовый текст {number}',\n group=self.group,\n author=self.user))\n Post.objects.bulk_create(bilk_post)\n\n def test_paginator_correct(self):\n \"\"\"Пагинатор работает корректно.\"\"\"\n urls = (reverse('posts:index'),\n reverse('posts:profile',\n kwargs={'username': f'{self.user.username}'}),\n reverse('posts:group_list',\n kwargs={'slug': f'{self.group.slug}'}))\n for url in urls:\n page_float = self.TEST_OF_POST % settings.PAGES\n page = self.TEST_OF_POST // settings.PAGES\n if page_float > 0:\n response_second_post = self.guest_client.get(\n f'{url}?page={page+1}'\n )\n else:\n response_second_post = self.guest_client.get(\n f'{url}?page={page}'\n )\n response_firts_post = self.guest_client.get(url)\n count_posts_first = len(response_firts_post.context['page_obj'])\n count_posts_second = len(response_second_post.context['page_obj'])\n self.assertEqual(count_posts_first,\n settings.PAGES)\n if page_float > 0:\n self.assertEqual(count_posts_second, page_float)\n else:\n self.assertEqual(count_posts_second, settings.PAGES)\n","repo_name":"doberman-ghost/hw04_tests","sub_path":"yatube/posts/tests/test_paginator.py","file_name":"test_paginator.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6832370554","text":"\"\"\"\n95. 不同的二叉搜索树 II\n\n给你一个整数 n ,请你生成并返回所有由 n 个节点组成且节点值从 1 到 n 互不相同的不同 二叉搜索树 。可以按 任意顺序 返回答案。\n\n示例 1:\n输入:n = 3\n输出:[[1,null,2,null,3],[1,null,3,2],[2,1,3],[3,1,null,null,2],[3,2,null,1]]\n\n示例 2:\n输入:n = 1\n输出:[[1]]\n\n通过遍历所有节点作为根结点,选择出当前根结点所有的左子树和右子树,然后进行拼接成为当前的一种情况\n\"\"\"\n\n# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def generating(self, start, end):\n allTrees = []\n if start > end:\n allTrees.append(None)\n return allTrees\n for i in range(start, end + 1):\n leftTrees = self.generating(start, i - 1)\n\n rightTrees = self.generating(i + 1, end)\n\n for left in leftTrees:\n for right in rightTrees:\n currNode = TreeNode(i)\n currNode.left = left\n currNode.right = right\n allTrees.append(currNode)\n\n return allTrees\n\n def generateTrees(self, n: int) -> List[TreeNode]:\n if n == 0:\n return []\n return self.generating(1, n)\n\n\nif __name__ == '__main__':\n solution = Solution()\n trees = solution.generateTrees(3)\n","repo_name":"geek-Xie/PY_Myleetcode","sub_path":"95GenerateTrees(Med)/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27626207890","text":"\"\"\"\nDemo of the flip image\n\"\"\"\n\nimport cv2\n\nfrom utils.helpers import flip\n\n# Read the image input\nimage = cv2.imread('./images/dog.jpg')\n\n# Flip the image\nflipped_img = flip(image, random_flip=True)\n\n# Display the original and rotated image\nwind_name_orig = cv2.namedWindow('Original image', cv2.WINDOW_NORMAL)\ncv2.imshow('Original image', image)\n\nwind_name_flip = cv2.namedWindow('Flipped image', cv2.WINDOW_NORMAL)\ncv2.imshow('Flipped image', flipped_img)\n\nif cv2.waitKey(0) == ord('s'):\n cv2.imwrite('outputs/flipped_image.jpg', flipped_img)\ncv2.destroyAllWindows()\n","repo_name":"sthanhng/OCVLibs","sub_path":"flip_image.py","file_name":"flip_image.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6059162407","text":"import pygame, sys, os\nfrom player import Agent, Action, Street\nfrom handeval import fasteval, gethand\nfrom threading import Thread\nfrom hugame import HUGame\nfrom time import sleep\n\nPLAYER1 = Agent()\nPLAYER2 = Agent()\n\nclass GameThread(Thread):\n def __init__(self, GUI, observeOnly):\n super().__init__(daemon = True)\n self.GUI = GUI\n p1 = PLAYER1 if observeOnly else GUI\n self.game = HUGame(p1, PLAYER2, fullSpeed = False) \n\n def run(self):\n self.game.start(observer = self.GUI)\n\nclass GUI(Agent):\n black = 30, 30, 30\n lightGrey = 180, 180, 180\n white = 255, 255, 255\n yellow = 255, 255, 0\n \n ranks = {0: \"Two\", 1: \"Three\", 2: \"Four\",\n 3: \"Five\", 4: \"Six\", 5: \"Seven\",\n 6: \"Eight\", 7: \"Nine\", 8: \"Ten\",\n 9: \"Jack\", 10: \"Queen\", 11: \"King\", 12: \"Ace\"}\n \n shortRanks = {0: \"2\", 1: \"3\", 2: \"4\",\n 3: \"5\", 4: \"6\", 5: \"7\",\n 6: \"8\", 7: \"9\", 8: \"T\",\n 9: \"J\", 10: \"Q\", 11: \"K\", 12: \"A\"}\n \n suits = {0: \"hearts\", 1: \"spades\", 2: \"diamonds\", 3: \"clubs\"}\n \n evals = {0: \"High card\", 1: \"Pair\", 2: \"Two Pair\", 3: \"Three of a kind\",\n 4: \"Straight\", 5: \"Flush\", 6: \"Full House\", 7: \"Four of a kind\",\n 8: \"Straight Flush\"}\n \n def __init__(self, observeOnly = False):\n super().__init__()\n self.observeOnly = observeOnly\n self.gameThread = GameThread(self, observeOnly)\n \n def get_file_name(self, card):\n r = self.ranks[card[0]] if card[0] >= 9 else card[0] + 2\n return str.lower(str(r)) + \"_of_\" + self.suits[card[1]] + \".png\"\n \n def get_eval_string(self, evl):\n category = evl[0]\n name1 = self.evals[evl[0]]\n if category == 0 or category == 5:\n name2 = \"-\".join((self.shortRanks[x] for x in evl[1:]))\n elif category == 1 or category == 3:\n name2 = self.ranks[evl[1]] + \"s + \" + \"-\".join(self.shortRanks[x] for x in evl[2:])\n elif category == 2:\n name2 = self.ranks[evl[1]] + \"s and \" + self.ranks[evl[2]] + \"s + \" + self.shortRanks[evl[3]]\n elif category == 6:\n name2 = self.ranks[evl[1]] + \"s full of \" + self.ranks[evl[2]] + \"s\"\n elif category == 7:\n name2 = self.ranks[evl[1]] + \"s + \" + self.shortRanks[evl[2]]\n else:\n name2 = self.ranks[evl[1]] + \" high\"\n \n return name1, name2.replace(\"Sixs\", \"Sixes\")\n \n \n def update_cards(self, street):\n self.pocketImages = []\n self.boardImages = []\n self.enemyImages = [] \n \n for card, rect in zip(self.state.hand, self.pocketRects):\n self.pocketImages.append(self.get_scaled_image(self.get_file_name(card), rect))\n \n for card, rect in zip(self.state.boardCards[:min(street.value, 5)], self.boardRects): \n self.boardImages.append(self.get_scaled_image(self.get_file_name(card), rect))\n \n for card, rect in zip(self.enemyState.hand, self.enemyRects):\n img = self.get_file_name(card) if street == Street.SHOWDOWN or self.observeOnly else \"back.png\"\n self.enemyImages.append(self.get_scaled_image(img, rect))\n \n if street.value >= 3:\n evl = fasteval(self.state.hand + self.state.boardCards[:min(street.value, 5)], min(street.value + 2, 7))\n n1, n2 = self.get_eval_string(evl)\n if street == Street.SHOWDOWN or self.observeOnly:\n evl = fasteval(self.enemyState.hand + self.state.boardCards[:min(street.value, 5)], min(street.value + 2, 7))\n n3, n4 = self.get_eval_string(evl)\n else:\n n3 = n4 = \"\"\n else:\n n1 = n2 = n3 = n4 = \"\"\n \n self.texts[\"eval1\"][0] = self.myfont.render(n1, True, (0, 160, 0))\n self.texts[\"eval2\"][0] = self.myfont.render(n2, True, (0, 160, 0))\n self.texts[\"eval3\"][0] = self.myfont.render(n3, True, (0, 160, 0))\n self.texts[\"eval4\"][0] = self.myfont.render(n4, True, (0, 160, 0))\n \n def get_scaled_image(self, image, destination):\n img = pygame.image.load(os.path.join('png', image))\n return pygame.transform.scale(img, (destination.width, destination.height))\n \n def update(self, street, pot): \n self.update_cards(street)\n self.texts[\"pot\"][0] = self.myfont.render('{0:.2f}'.format(pot / 100), True, (0, 0, 0))\n self.texts[\"bet\"][0] = self.myfont.render('{0:.2f}'.format(self.state.betSize / 100), True, (0, 0, 0))\n self.texts[\"enemyBet\"][0] = self.myfont.render('{0:.2f}'.format(self.enemyState.betSize / 100), True, (0, 0, 0))\n self.texts[\"stack\"][0] = self.myfont.render('{0:.2f}'.format(self.state.stack / 100), True, (0, 0, 0))\n self.texts[\"enemyStack\"][0] = self.myfont.render('{0:.2f}'.format(self.enemyState.stack / 100), True, (0, 0, 0))\n \n def resolve_key_press(self, key_string):\n stripped = key_string.replace(\"[\", \"\").replace(\"]\", \"\")\n try:\n if key_string == \"enter\":\n self.action = (Action.BETRAISE, int(float(self.betSizeStr) * 100))\n self.awaitingAction = False\n self.betSizeStr = \"0\"\n elif key_string == \"backspace\":\n if len(self.betSizeStr) > 1:\n self.betSizeStr = self.betSizeStr[:-1]\n else:\n self.betSizeStr = \"0\"\n elif key_string == \"space\":\n self.action = (Action.CHECKCALL, 0)\n self.awaitingAction = False\n self.betSizeStr = \"0\"\n elif key_string == \"escape\":\n self.action = (Action.CHECKFOLD, 0)\n self.awaitingAction = False\n self.betSizeStr = \"0\"\n elif stripped == \".\" and (not \".\" in self.betSizeStr):\n self.betSizeStr += \".\"\n else:\n num = str(int(stripped))\n if self.betSizeStr == \"0\":\n self.betSizeStr = num\n else:\n self.betSizeStr += num\n \n self.texts[\"betSize\"][0] = self.myfont.render(self.betSizeStr, True, (0, 0, 0)) \n except ValueError:\n pass\n \n def start_gui(self,):\n pygame.init()\n self.myfont = pygame.font.SysFont('System', 30)\n self.betSizeStr = \"0\"\n self.eval1 = \"\"\n self.eval2 = \"\"\n self.eval3 = \"\"\n self.eval4 = \"\"\n \n size = 800, 600\n self.screen = pygame.display.set_mode(size)\n \n self.rects = {\"betButton\": (pygame.Rect(493, 525, 125, 50), self.lightGrey),\n \"checkButton\": (pygame.Rect(338, 525, 125, 50), self.lightGrey),\n \"foldButton\": (pygame.Rect(183, 525, 125, 50), self.lightGrey),\n \"betSize\": (pygame.Rect(503, 490, 105, 25), self.white),\n \"pot\": (pygame.Rect(20, 200, 100, 60), self.white),\n \"bet\": (pygame.Rect(515, 375, 120, 60), self.white),\n \"enemyBet\": (pygame.Rect(515, 65, 120, 60), self.white),\n \"stack\": (pygame.Rect(650, 500, 120, 60), self.white),\n \"enemyStack\": (pygame.Rect(650, 15, 120, 60), self.white),\n \"dealer\": (pygame.Rect(225, 440, 50, 50), self.black),\n \"enemyDealer\": (pygame.Rect(225, 20, 50, 50), self.black),\n \"eval1\": (pygame.Rect(10, 375, 100, 25), self.black),\n \"eval2\": (pygame.Rect(10, 400, 100, 25), self.black),\n \"eval3\": (pygame.Rect(10, 60, 100, 25), self.black),\n \"eval4\": (pygame.Rect(10, 85, 100, 25), self.black)}\n \n self.backGrounds = [pygame.Rect(488, 520, 135, 60),\n pygame.Rect(333, 520, 135, 60),\n pygame.Rect(178, 520, 135, 60)]\n \n self.pocketRects = [pygame.Rect(310, 350, 82, 120),\n pygame.Rect(408, 350, 82, 120)]\n \n self.enemyRects = [pygame.Rect(310, 10, 82, 120),\n pygame.Rect(408, 10, 82, 120)]\n self.enemyImages = [self.get_scaled_image('back.png', x) for x in self.enemyRects]\n \n self.boardRects = [pygame.Rect(163, 180, 82, 120),\n pygame.Rect(261, 180, 82, 120),\n pygame.Rect(359, 180, 82, 120),\n pygame.Rect(457, 180, 82, 120),\n pygame.Rect(555, 180, 82, 120)]\n \n self.texts = {\"foldButton\": [self.myfont.render('Fold', True, (0, 0, 0)), (42, 16)],\n \"checkButton\": [self.myfont.render('Check/Call', True, (0, 0, 0)), (10, 16)],\n \"betButton\": [self.myfont.render('Bet/Raise', True, (0, 0, 0)), (15, 16)],\n \"pot\": [self.myfont.render(\"0\", True, (0, 0, 0)), (12, 19)],\n \"bet\": [self.myfont.render(\"0\", True, (0, 0, 0)), (12, 19)],\n \"enemyBet\": [self.myfont.render(\"0\", True, (0, 0, 0)), (12, 19)],\n \"stack\": [self.myfont.render(\"0\", True, (0, 0, 0)), (12, 19)],\n \"enemyStack\": [self.myfont.render(\"0\", True, (0, 0, 0)), (12, 19)],\n \"betSize\": [self.myfont.render(self.betSizeStr, True, (0, 0, 0)), (8, 3)],\n \"eval1\":[self.myfont.render(\"\", True, (0, 0, 0)), (8, 3)],\n \"eval2\":[self.myfont.render(\"\", True, (0, 0, 0)), (8, 3)],\n \"eval3\":[self.myfont.render(\"\", True, (0, 0, 0)), (8, 3)],\n \"eval4\":[self.myfont.render(\"\", True, (0, 0, 0)), (8, 3)]}\n \n self.pocketImages = []\n self.boardImages = []\n self.enemyImages = []\n \n self.awaitingAction = False \n self.gameThread.start()\n \n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n if event.type == pygame.KEYDOWN:\n self.resolve_key_press(pygame.key.name(event.key)) \n elif event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n if self.rects[\"foldButton\"][0].collidepoint(pos):\n self.resolve_key_press(\"escape\")\n elif self.rects[\"checkButton\"][0].collidepoint(pos):\n self.resolve_key_press(\"space\")\n elif self.rects[\"betButton\"][0].collidepoint(pos):\n self.resolve_key_press(\"enter\")\n\n self.screen.fill(self.black)\n if self.awaitingAction:\n for r in self.backGrounds:\n pygame.draw.rect(self.screen, self.yellow, r)\n \n for _, (r, c) in self.rects.items():\n pygame.draw.rect(self.screen, c, r)\n \n for x, y in zip(self.pocketImages, self.pocketRects):\n pygame.draw.rect(self.screen, self.white, y)\n self.screen.blit(x, y)\n \n for x, y in zip(self.enemyImages, self.enemyRects):\n pygame.draw.rect(self.screen, self.white, y)\n self.screen.blit(x, y)\n \n for x, y in zip(self.boardImages, self.boardRects):\n pygame.draw.rect(self.screen, self.white, y)\n self.screen.blit(x, y)\n \n for k, (text, offSet) in self.texts.items():\n parent = self.rects[k][0]\n self.screen.blit(text, (parent.left + offSet[0], parent.top + offSet[1]))\n \n if self.state.hasButton:\n self.screen.blit(self.get_scaled_image(\"dealer.png\", self.rects[\"dealer\"][0]), self.rects[\"dealer\"][0])\n else:\n self.screen.blit(self.get_scaled_image(\"dealer.png\", self.rects[\"enemyDealer\"][0]), self.rects[\"enemyDealer\"][0]) \n \n pygame.display.flip()\n pygame.time.wait(50)\n \n def get_action(self):\n self.awaitingAction = True\n while True: \n if not self.awaitingAction:\n return self.action\n sleep(0.01)\n \nif __name__ == \"__main__\":\n gui = GUI(observeOnly = True)\n gui.start_gui()\n ","repo_name":"tterava/PokerTrainingFramework","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12512,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"19676265208","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 09:49:20 2018\n@author: karips\nSolves the \"Quality-Adjusted Life-Year\" Kattis problem\n\"\"\"\niters = int(input())\nres=0\nfor i in range(iters):\n x=input().split(' ') \n nums = [float(i) for i in x] \n res+=nums[0]*nums[-1]\nprint(\"%.3f\" % res)\n","repo_name":"dimkary/Python-tests","sub_path":"Kattis/qaly.py","file_name":"qaly.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40686367711","text":"def prime_num_generator():\r\n yield 2\r\n prime_nums = [2]\r\n num = 3\r\n while True:\r\n is_prime = True\r\n for prime_num in prime_nums:\r\n if num % prime_num == 0:\r\n is_prime = False\r\n break\r\n if prime_num**2 > num:\r\n break\r\n if is_prime:\r\n prime_nums.append(num)\r\n yield num\r\n num += 2\r\n\r\nprime_gen = prime_num_generator()\r\nprint(next(prime_gen))\r\nprint(next(prime_gen))\r\n","repo_name":"Jeka2004/KPZ-Practice-1","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17595851561","text":"\"\"\"\n\n438. Find all anagrams in a string\nMedium\n\nGiven a string s and a non-empty string p, find all the start indices of p's anagrams in s.\n\nStrings consists of lowercase English letters only and the length of both strings s and \np will not be larger than 20,100.\n\nThe order of output does not matter.\n\nExample 1:\n\nInput:\ns: \"cbaebabacd\" p: \"abc\"\n\nOutput:\n[0, 6]\n\nExplanation:\nThe substring with start index = 0 is \"cba\", which is an anagram of \"abc\".\nThe substring with start index = 6 is \"bac\", which is an anagram of \"abc\".\nExample 2:\n\nInput:\ns: \"abab\" p: \"ab\"\n\nOutput:\n[0, 1, 2]\n\nExplanation:\nThe substring with start index = 0 is \"ab\", which is an anagram of \"ab\".\nThe substring with start index = 1 is \"ba\", which is an anagram of \"ab\".\nThe substring with start index = 2 is \"ab\", which is an anagram of \"ab\".\n\n\"\"\"\n\nfrom collections import Counter\nfrom typing import List\n\n\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n\n cp, np, ns, cs = Counter(p), len(p), len(s), Counter()\n if ns < np:\n return []\n\n ans = []\n for i, c in enumerate(s):\n if i < np:\n cs[c] += 1\n else:\n if cs == cp:\n ans.append(i - np)\n if cs[s[i-np]] == 1:\n cs.pop(s[i - np])\n else:\n cs[s[i - np]] -= 1\n cs[c] += 1\n\n if cp == cs:\n ans.append(ns-np)\n return ans\n \n def findAnagrams2(self, s: str, p: str) -> List[int]:\n \"\"\"\n \n Sliding window.\n 2020.02.23: Fixed window length. Window state maintained by a hashtable\n \"\"\"\n cp = Counter(p)\n k, n = len(p), len(s)\n \n \n if k > len(s):\n return []\n \n ans = []\n curr = Counter(s[:k])\n pos = k\n \n # a window of length k\n while pos < n:\n # print(pos, curr)\n if curr == cp:\n ans.append(pos-k)\n \n # element leaving the window\n if curr[s[pos-k]] == 1:\n curr.pop(s[pos-k])\n else:\n curr[s[pos-k]] -= 1\n\n # element entering the window\n if s[pos] in curr:\n curr[s[pos]] += 1\n else:\n curr[s[pos]] = 1\n pos += 1\n\n # check the last window\n if curr == cp:\n ans.append(pos-k)\n return ans\n\n\n\nif __name__=='__main__':\n\n sol = Solution()\n method = sol.findAnagrams\n\n cases = [\n (method, (\"cbaebabacd\", \"abc\"), [0,6]),\n (method, (\"baa\", \"aa\"), [1]),\n\n ]\n\n for i, (func, case, expected) in enumerate(cases):\n ans = func(*case)\n if ans == expected:\n print(\"Case {:d} Passed\".format(i + 1))\n else:\n print(\"Case {:d} Failed; Expected {:s} != {:s}\".format(i+1, str(expected), str(ans)))","repo_name":"xys234/coding-problems","sub_path":"algo/hashtable/find_anagram.py","file_name":"find_anagram.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71019556410","text":"from typing import Iterable\nfrom uuid import UUID\n\nfrom flask import Flask, request, abort\nfrom flask_restx import Namespace, Resource\n\n\ndef get_current_app() -> Flask:\n from server import app\n return app\n\n\ndef any_non_nones(iterable: Iterable) -> bool:\n for i in iterable:\n if i is not None:\n return True\n return False\n\n\napi = Namespace(\"\")\n\n\nclass OptionsResource(Resource):\n @api.hide\n def options(self):\n return None, 200\n\n\ndef get_uuid(param_name, allow_empty: bool = False) -> str:\n if isinstance(request, str):\n value = request\n else:\n value = request.args.get(param_name, '')\n if not value and allow_empty:\n return value\n try:\n UUID(value)\n except ValueError:\n abort(400, f\"Incorrect '{param_name}' parameter (must match UUID v4)\")\n except TypeError:\n abort(400, f\"Cannot find '{param_name}' parameter of correct type (must appear once in query)\")\n return value\n","repo_name":"kvirikroma/qfinder","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"49979365496","text":"import os\nimport pickle\nimport pandas as pd\nimport settings\nimport json\nfrom sklearn.ensemble import RandomForestRegressor\nimport pickle\n\ndef get_data():\n crop_production_train = pd.read_csv(os.path.join(settings.PROCESSED_DIR, settings.CROP_PRODUCTION_TRAIN_DATA), sep=',')\n crop_production_test = pd.read_csv(os.path.join(settings.PROCESSED_DIR, settings.CROP_PRODUCTION_TEST_DATA), sep=',')\n return [crop_production_train, crop_production_test]\n\nif __name__ == '__main__':\n [crop_production_train, crop_production_test] = get_data()\n \n x_train = crop_production_train.drop(labels=['Production'], axis=1)\n x_test = crop_production_test.drop(labels=['Production'], axis=1)\n \n y_train = crop_production_train['Production']\n y_test = crop_production_test['Production']\n \n # Random Forest Regressor model\n rfr_model = RandomForestRegressor(n_estimators=70, max_depth=12)\n rfr_model.fit(x_train, y_train)\n rfr_model.feature_names = list(x_train.columns.values)\n \n # storing crop production column names\n columns = {\n 'data_columns' : [col for col in x_train.columns]\n }\n with open(os.path.join(settings.BACKEND_DIR, 'yield_columns.json'), 'w') as f:\n f.write(json.dumps(columns))\n \n\n with open(os.path.join(settings.BACKEND_DIR,'crop_yield_prediction_rfr_model.pickle'), 'wb') as f:\n pickle.dump(rfr_model, f)\n","repo_name":"sagarraskar/crop_yield_prediction_and_recommendation","sub_path":"crop_production.py","file_name":"crop_production.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43733548602","text":"str1 = input().split(\" \")\na = input().split(\" \")\n\nn = int(str1[0])\nk = int(str1[1])\nim = 0\nib, jb = 0, (1 + k)\n\nfor i in range(k + 1, n):\n if int(a[im]) < int(a[i - k - 1]):\n im = i - k - 1\n if int(a[i]) + int(a[im]) > int(a[ib]) + int(a[jb]):\n ib, jb = im, i\n\nprint(f\"{ib + 1} {jb + 1}\")","repo_name":"SaGiMan6/olympiad-preparation","sub_path":"2022-2023/lin_112735_c.py","file_name":"lin_112735_c.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41126712642","text":"import csv\nfrom functools import partial\n\nfrom typing import Callable\nfrom ..pool import Pool\n\n\ndef machines_pool_reader(\n iterable,\n resource_name_mapping: dict = { # noqa: B006\n \"cores\": \"CPUs_per_node\",\n \"memory\": \"RAM_per_node_in_KB\",\n },\n unit_conversion_mapping={ # noqa: B006\n \"CPUs_per_node\": 1,\n \"RAM_per_node_in_KB\": 1000,\n },\n pool_type: Callable = Pool,\n make_drone: Callable = None,\n):\n \"\"\"\n Load a pool configuration that was exported via htcondor from files or\n iterables\n\n :param make_drone: The callable to create the drone\n :param iterable: an iterable yielding lines of CSV, such as an open file\n :param resource_name_mapping: Mapping from given header names to well-defined\n resources in simulation\n :param pool_type: The type of pool to be yielded\n :return: Yields the :py:class:`StaticPool`s found in the given iterable\n \"\"\"\n assert make_drone\n reader = csv.DictReader(iterable, delimiter=\" \", skipinitialspace=True)\n for row in reader:\n yield pool_type(\n capacity=int(row[\"number_of_nodes\"]),\n make_drone=partial(\n make_drone,\n {\n key: int(float(row[value]) * unit_conversion_mapping.get(value, 1))\n for key, value in resource_name_mapping.items()\n },\n ),\n name=row[\"cluster_name\"],\n )\n","repo_name":"MatterMiners/lapis","sub_path":"lapis/pool_io/machines.py","file_name":"machines.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73795704568","text":"from __future__ import absolute_import\n\nfrom mock import Mock\n\nfrom sentry.api.bases.organization import OrganizationPermission\nfrom sentry.models import ApiKey, ProjectKey\nfrom sentry.testutils import TestCase\n\n\nclass OrganizationPermissionBase(TestCase):\n def setUp(self):\n self.org = self.create_organization()\n super(OrganizationPermissionBase, self).setUp()\n\n def has_object_perm(self, method, obj, auth=None, user=None, is_superuser=None):\n perm = OrganizationPermission()\n request = Mock()\n request.auth = auth\n request.user = user\n request.method = method\n request.is_superuser = lambda: is_superuser if is_superuser is not None else user.is_superuser\n return perm.has_object_permission(request, None, obj)\n\n\nclass OrganizationPermissionTest(OrganizationPermissionBase):\n def test_regular_user(self):\n user = self.create_user()\n assert not self.has_object_perm('GET', self.org, user=user)\n\n def test_superuser(self):\n user = self.create_user(is_superuser=True)\n assert self.has_object_perm('GET', self.org, user=user)\n\n def test_org_member(self):\n user = self.create_user()\n self.create_member(\n user=user,\n organization=self.org,\n role='member',\n )\n assert self.has_object_perm('GET', self.org, user=user)\n assert not self.has_object_perm('POST', self.org, user=user)\n\n def test_project_key(self):\n key = ProjectKey.objects.create(\n project=self.create_project(\n team=self.create_team(organization=self.org),\n ),\n )\n assert not self.has_object_perm('GET', self.org, auth=key)\n\n def test_api_key_with_org_access(self):\n key = ApiKey.objects.create(\n organization=self.org,\n )\n assert self.has_object_perm('GET', self.org, auth=key)\n\n def test_api_key_without_org_access(self):\n key = ApiKey.objects.create(\n organization=self.create_organization(),\n )\n assert not self.has_object_perm('GET', self.org, auth=key)\n","repo_name":"NetEaseGame/Sentry","sub_path":"tests/sentry/api/bases/test_organization.py","file_name":"test_organization.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"77"} +{"seq_id":"9302740206","text":"import random\n\nclass RandomPick():\n\tdef __init__(self, total_play_times, levers) -> None:\n\t\tself.total_play_times = total_play_times\n\t\tself.levers = levers\n\t\tself.amount_of_levers = len(self.levers.get_levers())\n\n\tdef _choice(self):\n\t\treturn random.randint(0, self.amount_of_levers-1)\n\t\n\tdef play(self):\n\t\tself.picks = [0] * self.amount_of_levers\n\t\tgain = 0\n\t\tfor _ in range(0, self.total_play_times):\n\t\t\tchoice = self._choice()\n\t\t\tself.picks[choice] += 1\n\t\t\tgain += self.levers.pick_lever(choice)\n\t\treturn (self.picks, gain)\n","repo_name":"dorinm17/connect_four_statistics","sub_path":"part3/random_pick.py","file_name":"random_pick.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71910068730","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\nif __name__ == '__main__':\n n = int(input())\n bno = bin(n).replace(\"0b\", \"\") #in built function\n c = list(bno)\n count = 0\n temp = 0\n for i in c:\n if(i == '1' ):\n temp +=1\n if temp > count:\n count = temp\n else:\n temp = 0 \n \n print(count) \n","repo_name":"christorejisam/30-Days-of-Code-Hackerrank-Solution","sub_path":"Day-10-Binary Numbers.py","file_name":"Day-10-Binary Numbers.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"48336364493","text":"from typing import List\n\nimport numpy as np\nfrom beanmachine.ppl.diagnostics.tools.marginal1d import typing\nfrom beanmachine.ppl.diagnostics.tools.utils import plotting_utils\nfrom bokeh.models.annotations import Band, LabelSet\nfrom bokeh.models.glyphs import Circle, Line\nfrom bokeh.models.layouts import Column, Row\nfrom bokeh.models.sources import ColumnDataSource\nfrom bokeh.models.tools import HoverTool\nfrom bokeh.models.widgets.inputs import Select\nfrom bokeh.models.widgets.markups import Div\nfrom bokeh.models.widgets.panels import Panel, Tabs\nfrom bokeh.models.widgets.sliders import Slider\nfrom bokeh.plotting.figure import figure\n\n\nPLOT_WIDTH = 500\nPLOT_HEIGHT = 500\nFIGURE_NAMES = [\"marginal\", \"cumulative\"]\n# Define what the empty data object looks like in order to make the browser handle all\n# computations.\nEMPTY_DATA = {\n \"marginal\": {\n \"distribution\": {\"x\": [], \"y\": [], \"bandwidth\": np.NaN},\n \"hdi\": {\"base\": [], \"lower\": [], \"upper\": []},\n \"stats\": {\"x\": [], \"y\": [], \"text\": []},\n \"labels\": {\n \"x\": [],\n \"y\": [],\n \"text\": [],\n \"text_align\": [],\n \"x_offset\": [],\n \"y_offset\": [],\n },\n },\n \"cumulative\": {\n \"distribution\": {\"x\": [], \"y\": [], \"bandwidth\": np.NaN},\n \"hdi\": {\"base\": [], \"lower\": [], \"upper\": []},\n \"stats\": {\"x\": [], \"y\": [], \"text\": []},\n \"labels\": {\n \"x\": [],\n \"y\": [],\n \"text\": [],\n \"text_align\": [],\n \"x_offset\": [],\n \"y_offset\": [],\n },\n },\n}\nSIZING = {\n \"sizing_mode\": \"scale_both\",\n \"max_height\": PLOT_HEIGHT + 250, # drop down menus and tabs\n \"max_width\": 2 * PLOT_WIDTH + 30, # tool bars\n}\n\n\ndef create_sources() -> typing.Sources:\n \"\"\"Create Bokeh sources from the given data that will be bound to glyphs.\n\n Returns\n -------\n typing.Sources\n A dictionary of Bokeh ColumnDataSource objects.\n \"\"\"\n output = {}\n for figure_name, figure_data in EMPTY_DATA.items():\n output[figure_name] = {}\n for glyph_name, glyph_data in figure_data.items():\n if \"bandwidth\" in list(glyph_data.keys()):\n glyph_data.pop(\"bandwidth\")\n output[figure_name][glyph_name] = ColumnDataSource(data=glyph_data)\n return output\n\n\ndef create_figures(rv_name: str) -> typing.Figures:\n \"\"\"Create the Bokeh figures used for the tool.\n\n Parameters\n ----------\n rv_name : str\n The string representation of the random variable data.\n\n Returns\n -------\n typing.Figures\n A dictionary of Bokeh Figure objects.\n \"\"\"\n output = {}\n for figure_name in FIGURE_NAMES:\n fig = figure(\n max_width=PLOT_WIDTH,\n max_height=PLOT_HEIGHT,\n outline_line_color=\"black\",\n title=f\"{figure_name} distribution\",\n x_axis_label=rv_name,\n y_axis_label=None,\n sizing_mode=\"scale_both\",\n )\n fig.yaxis.visible = False\n plotting_utils.style_figure(fig)\n output[figure_name] = fig\n output[FIGURE_NAMES[0]].x_range = output[FIGURE_NAMES[1]].x_range\n output[FIGURE_NAMES[0]].y_range = output[FIGURE_NAMES[1]].y_range\n return output\n\n\ndef create_glyphs() -> typing.Glyphs:\n \"\"\"Create the glyphs used for the figures of the tool.\n\n Returns\n -------\n typing.Glyphs\n A dictionary of Bokeh Glyphs objects.\n \"\"\"\n palette = plotting_utils.choose_palette(num_colors=2)\n output = {}\n for figure_name, figure_data in EMPTY_DATA.items():\n output[figure_name] = {}\n for glyph_name, _ in figure_data.items():\n if glyph_name in [\"distribution\", \"stats\"]:\n if glyph_name == \"distribution\":\n output[figure_name][glyph_name] = {\n \"glyph\": Line(\n x=\"x\",\n y=\"y\",\n line_color=palette[0],\n line_alpha=0.7,\n line_width=2.0,\n name=f\"{figure_name}DistributionGlyph\",\n ),\n \"hover_glyph\": Line(\n x=\"x\",\n y=\"y\",\n line_color=palette[1],\n line_alpha=1.0,\n line_width=2.0,\n name=f\"{figure_name}DistributionHoverGlyph\",\n ),\n }\n if glyph_name == \"stats\":\n output[figure_name][glyph_name] = {\n \"glyph\": Circle(\n x=\"x\",\n y=\"y\",\n size=10,\n fill_color=palette[0],\n line_color=\"white\",\n fill_alpha=1.0,\n name=f\"{figure_name}StatsGlyph\",\n ),\n \"hover_glyph\": Circle(\n x=\"x\",\n y=\"y\",\n size=10,\n fill_color=palette[1],\n line_color=\"black\",\n fill_alpha=1.0,\n name=f\"{figure_name}StatsHoverGlyph\",\n ),\n }\n return output\n\n\ndef add_glyphs(\n figures: typing.Figures,\n glyphs: typing.Glyphs,\n sources: typing.Sources,\n) -> None:\n \"\"\"Bind source data to glyphs and add the glyphs to the given figures.\n\n Parameters\n ----------\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n glyphs : typing.Glyphs\n A dictionary of Bokeh Glyphs objects.\n sources : typing.Sources\n A dictionary of Bokeh ColumnDataSource objects.\n\n Returns\n -------\n None\n Adds data bound glyphs to the given figures directly.\n \"\"\"\n for figure_name, figure_glyphs in glyphs.items():\n fig = figures[figure_name]\n figure_sources = sources[figure_name]\n for glyph_name, glyphs in figure_glyphs.items():\n glyph_source = figure_sources[glyph_name]\n fig.add_glyph(\n source_or_glyph=glyph_source,\n glyph=glyphs[\"glyph\"],\n hover_glyph=glyphs[\"hover_glyph\"],\n name=glyphs[\"glyph\"].name,\n )\n\n\ndef create_annotations(sources: typing.Sources) -> typing.Annotations:\n \"\"\"Create any annotations for the figures of the tool.\n\n Parameters\n ----------\n source : typing.Sources\n A dictionary of Bokeh ColumnDataSource objects.\n\n Returns\n -------\n typing.Annotations\n A dictionary of Bokeh Annotation objects.\n \"\"\"\n palette = plotting_utils.choose_palette(num_colors=1)\n output = {}\n for figure_name, figure_sources in sources.items():\n output[figure_name] = {}\n for glyph_name, glyph_source in figure_sources.items():\n if glyph_name == \"hdi\":\n output[figure_name][glyph_name] = Band(\n base=\"base\",\n lower=\"lower\",\n upper=\"upper\",\n source=glyph_source,\n level=\"underlay\",\n fill_color=palette[0],\n fill_alpha=0.2,\n line_width=1.0,\n line_color=\"white\",\n name=f\"{figure_name}HdiAnnotation\",\n )\n elif glyph_name == \"labels\":\n output[figure_name][glyph_name] = LabelSet(\n x=\"x\",\n y=\"y\",\n text=\"text\",\n x_offset=\"x_offset\",\n y_offset=\"y_offset\",\n text_align=\"text_align\",\n source=glyph_source,\n background_fill_color=\"white\",\n background_fill_alpha=0.8,\n name=f\"{figure_name}LabelAnnotation\",\n )\n return output\n\n\ndef add_annotations(figures: typing.Figures, annotations: typing.Annotations) -> None:\n \"\"\"Add the given annotations to the given figures of the tool.\n\n Parameters\n ----------\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n annotations : typing.Annotations\n A dictionary of Bokeh Annotation objects.\n\n Returns\n -------\n None\n Adds annotations directly to the given figures.\n \"\"\"\n for figure_name, annotation_sources in annotations.items():\n fig = figures[figure_name]\n for _, annotation in annotation_sources.items():\n fig.add_layout(annotation)\n\n\ndef create_tooltips(rv_name: str, figures: typing.Figures) -> typing.Tooltips:\n \"\"\"Create hover tools for the glyphs used in the figures of the tool.\n\n Parameters\n ----------\n rv_name : str\n The string representation of the random variable data.\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n\n Returns\n -------\n typing.Tooltips\n A dictionary of Bokeh HoverTools objects.\n \"\"\"\n output = {}\n for figure_name, fig in figures.items():\n output[figure_name] = {\n \"distribution\": HoverTool(\n renderers=plotting_utils.filter_renderers(\n figure=fig,\n search=\"DistributionGlyph\",\n glyph_type=\"GlyphRenderer\",\n substring=True,\n ),\n tooltips=[(rv_name, \"@x\")],\n ),\n \"stats\": HoverTool(\n renderers=plotting_utils.filter_renderers(\n figure=fig,\n search=\"StatsGlyph\",\n glyph_type=\"GlyphRenderer\",\n substring=True,\n ),\n tooltips=[(\"\", \"@text\")],\n ),\n }\n return output\n\n\ndef add_tooltips(figures: typing.Figures, tooltips: typing.Tooltips) -> None:\n \"\"\"Add the given tools to the figures.\n\n Parameters\n ----------\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n tooltips : typing.Tooltips\n A dictionary of Bokeh HoverTools objects.\n\n Returns\n -------\n None\n Adds the tooltips directly to the given figures.\n \"\"\"\n for figure_name, figure_tooltips in tooltips.items():\n fig = figures[figure_name]\n for _, tooltip in figure_tooltips.items():\n fig.add_tools(tooltip)\n\n\ndef create_widgets(\n rv_name: str,\n rv_names: List[str],\n bw_factor: float,\n bandwidth: float,\n) -> typing.Widgets:\n \"\"\"Create the widgets used in the tool.\n\n Parameters\n ----------\n rv_name : str\n The string representation of the random variable data.\n rv_names : List[str]\n A list of all available random variable names.\n bw_factor : float\n Multiplicative factor used when calculating the kernel density estimate.\n bandwidth : float\n The bandwidth used to calculate the KDE.\n\n Returns\n -------\n typing.Widgets\n A dictionary of Bokeh widget objects.\n \"\"\"\n return {\n \"rv_select\": Select(value=rv_name, options=rv_names, title=\"Query\"),\n \"bw_factor_slider\": Slider(\n title=\"Bandwidth factor\",\n start=0.01,\n end=2.00,\n value=1.00,\n step=0.01,\n ),\n \"bw_div\": Div(text=f\"Bandwidth: {bw_factor * bandwidth}\"),\n \"hdi_slider\": Slider(start=1, end=99, step=1, value=89, title=\"HDI\"),\n }\n\n\ndef help_page() -> Div:\n \"\"\"Help tab for the tool.\n\n Returns\n -------\n Div\n Bokeh Div widget containing the help tab information.\n \"\"\"\n text = \"\"\"\n

\n Highest density interval\n

\n

\n The highest density interval region is not equal tailed like a typical\n equal tailed interval of 2.5%. Thus it will include the mode(s) of the\n posterior distribution.\n

\n

\n There is nothing particularly specific about having a default HDI of 89%.\n If fact, the only remarkable thing about defaulting to 89% is that it is\n the highest prime number that does not exceed the unstable 95% threshold.\n See the link to McElreath's book below for further discussion.\n

\n
    \n
  • \n McElreath R (2020)\n \n Statistical Rethinking: A Bayesian Course with Examples in R and Stan\n 2nd edition.\n \n Chapman and Hall/CRC\n \n doi: 10.1201/9780429029608\n .\n
  • \n
\n \"\"\"\n return Div(text=text, disable_math=False, min_width=PLOT_WIDTH)\n\n\ndef create_figure_grid(figures: typing.Figures) -> Row:\n \"\"\"Layout the given figures in a grid, and make one toolbar.\n\n Parameters\n ----------\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n\n Returns\n -------\n Row\n A Bokeh layout object.\n \"\"\"\n toolbar = plotting_utils.create_toolbar(figures=list(figures.values()))\n return Row(children=[*list(figures.values()), toolbar], css_classes=[\"bk-loading\"])\n\n\ndef create_view(widgets: typing.Widgets, figures: typing.Figures) -> Tabs:\n \"\"\"Create the tool view.\n\n Parameters\n ----------\n widgets : typing.Widgets\n A dictionary of Bokeh widget objects.\n figures : typing.Figures\n A dictionary of Bokeh Figure objects.\n\n Returns\n -------\n Tabs\n Bokeh Tabs objects.\n \"\"\"\n help_panel = Panel(child=help_page(), title=\"Help\", name=\"helpPanel\")\n fig_child = Column(\n children=[\n create_figure_grid(figures),\n widgets[\"bw_factor_slider\"],\n widgets[\"bw_div\"],\n widgets[\"hdi_slider\"],\n ],\n css_classes=[\"bm-tool-loading\", \"arcs\"],\n )\n fig_child.update_from_json(SIZING)\n tool_child = Column(children=[widgets[\"rv_select\"], fig_child])\n tool_child.update_from_json(SIZING)\n tool_panel = Panel(\n child=tool_child,\n title=\"Marginal 1D\",\n name=\"toolPanel\",\n )\n tabs = Tabs(tabs=[tool_panel, help_panel])\n tabs.update_from_json(SIZING)\n return tabs\n","repo_name":"facebookresearch/beanmachine","sub_path":"src/beanmachine/ppl/diagnostics/tools/marginal1d/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14452,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"77"} +{"seq_id":"41875154078","text":"#!/usr/bin/python3\n\"\"\"define Student Class\"\"\"\n\n\nclass Student:\n def __init__(self, first_name, last_name, age):\n \"\"\"adding new studient\n params:\n first_name = student first name\n last_name = student last name\n age = student age\n \"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n if attrs == None:\n return self.__dict__\n else:\n selector = {}\n for arrtub in attrs:\n if isinstance(attrs, list and str):\n return selector[arrtub] == getattr(self, arrtub)\n","repo_name":"houssam980/alx-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33873590247","text":"# coding=utf-8\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render_to_response\nfrom django.db.models.deletion import ProtectedError\n\nfrom experiment.models import Experiment, QuestionnaireConfiguration, Subject, TimeUnit, \\\n QuestionnaireResponse, SubjectOfExperiment\nfrom experiment.forms import ExperimentForm, QuestionnaireConfigurationForm, QuestionnaireResponseForm, FileForm\n\nfrom quiz.models import Patient\nfrom quiz.abc_search_engine import Questionnaires\n\nfrom django.conf import settings\n\nfrom functools import partial\n\nimport re\n\nimport datetime\n\npermission_required = partial(permission_required, raise_exception=True)\n\n# pylint: disable=E1101\n# pylint: disable=E1103\n\n@login_required\n@permission_required('experiment.view_experiment')\ndef experiment_list(request, template_name=\"experiment/experiment_list.html\"):\n experiments = Experiment.objects.order_by('title')\n\n context = {\"experiments\": experiments}\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.add_experiment')\ndef experiment_create(request, template_name=\"experiment/experiment_register.html\"):\n experiment_form = ExperimentForm(request.POST or None)\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"save\":\n\n if experiment_form.is_valid():\n experiment_added = experiment_form.save()\n\n messages.success(request, 'Experimento criado com sucesso.')\n\n redirect_url = reverse(\"experiment_edit\", args=(experiment_added.id,))\n return HttpResponseRedirect(redirect_url)\n\n context = {\n \"experiment_form\": experiment_form,\n \"creating\": True}\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.view_experiment')\ndef experiment_update(request, experiment_id, template_name=\"experiment/experiment_register.html\"):\n experiment = get_object_or_404(Experiment, pk=experiment_id)\n\n if experiment:\n\n questionnaires_configuration_list = QuestionnaireConfiguration.objects.filter(experiment=experiment)\n\n surveys = Questionnaires()\n\n\n limesurvey_available = check_limesurvey_access(request, surveys)\n\n questionnaires_configuration_list = [\n {\"survey_title\": surveys.get_survey_title(questionnaire_configuration.lime_survey_id),\n \"number_of_fills\": questionnaire_configuration.number_of_fills,\n \"interval_between_fills_value\": questionnaire_configuration.interval_between_fills_value,\n \"interval_between_fills_unit\": questionnaire_configuration.interval_between_fills_unit,\n \"id\": questionnaire_configuration.id}\n for questionnaire_configuration in questionnaires_configuration_list]\n surveys.release_session_key()\n\n experiment_form = ExperimentForm(request.POST or None, instance=experiment)\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"save\":\n\n if experiment_form.is_valid():\n if experiment_form.has_changed():\n experiment_form.save()\n\n redirect_url = reverse(\"experiment_edit\", args=(experiment_id,))\n return HttpResponseRedirect(redirect_url)\n\n else:\n if request.POST['action'] == \"remove\":\n try:\n experiment.delete()\n except ProtectedError:\n messages.error(request, \"Não foi possível excluir o experimento, pois há questões associadas\")\n redirect_url = reverse(\"experiment_edit\", args=(experiment.id,))\n return HttpResponseRedirect(redirect_url)\n return redirect('experiment_list')\n\n context = {\n \"experiment_form\": experiment_form,\n \"creating\": False,\n \"questionnaires_configuration_list\": questionnaires_configuration_list,\n \"experiment\": experiment,\n \"limesurvey_available\": limesurvey_available}\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.add_questionnaireconfiguration')\ndef questionnaire_create(request, experiment_id, template_name=\"experiment/questionnaire_register.html\"):\n\n experiment = get_object_or_404(Experiment, pk=experiment_id)\n\n questionnaire_form = QuestionnaireConfigurationForm(\n request.POST or None,\n initial={'number_of_fills': 1, 'interval_between_fills_value': None})\n\n if request.method == \"GET\":\n\n questionnaires_of_experiment = QuestionnaireConfiguration.objects.filter(experiment=experiment)\n\n if not questionnaires_of_experiment:\n questionnaires_list = Questionnaires().find_all_active_questionnaires()\n else:\n active_questionnaires_list = Questionnaires().find_all_active_questionnaires()\n for questionnaire in questionnaires_of_experiment:\n for active_questionnaire in active_questionnaires_list:\n if active_questionnaire['sid'] == questionnaire.lime_survey_id:\n active_questionnaires_list.remove(active_questionnaire)\n questionnaires_list = active_questionnaires_list\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"save\":\n\n if questionnaire_form.is_valid():\n\n lime_survey_id = request.POST['questionnaire_selected']\n\n questionnaire = QuestionnaireConfiguration()\n questionnaire.lime_survey_id = lime_survey_id\n questionnaire.experiment = experiment\n\n if \"number_of_fills\" in request.POST:\n questionnaire.number_of_fills = request.POST['number_of_fills']\n\n if \"interval_between_fills_value\" in request.POST:\n questionnaire.interval_between_fills_value = request.POST['interval_between_fills_value']\n\n if \"interval_between_fills_unit\" in request.POST:\n questionnaire.interval_between_fills_unit = \\\n get_object_or_404(TimeUnit, pk=request.POST['interval_between_fills_unit'])\n\n questionnaire.save()\n\n messages.success(request, 'Questionário incluído com sucesso.')\n\n redirect_url = reverse(\"experiment_edit\", args=(experiment_id,))\n return HttpResponseRedirect(redirect_url)\n\n context = {\n \"questionnaire_form\": questionnaire_form,\n \"creating\": True,\n \"updating\": False,\n \"experiment\": experiment,\n \"questionnaires_list\": questionnaires_list}\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.change_questionnaireconfiguration')\ndef questionnaire_update(request, questionnaire_configuration_id,\n template_name=\"experiment/questionnaire_register.html\"):\n questionnaire_configuration = get_object_or_404(QuestionnaireConfiguration, pk=questionnaire_configuration_id)\n experiment = get_object_or_404(Experiment, pk=questionnaire_configuration.experiment.id)\n questionnaire_form = QuestionnaireConfigurationForm(request.POST or None, instance=questionnaire_configuration)\n\n surveys = Questionnaires()\n questionnaire_title = surveys.get_survey_title(questionnaire_configuration.lime_survey_id)\n surveys.release_session_key()\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"save\":\n if questionnaire_form.is_valid():\n\n if \"number_of_fills\" in request.POST:\n questionnaire_configuration.number_of_fills = request.POST['number_of_fills']\n\n if \"interval_between_fills_value\" in request.POST:\n questionnaire_configuration.interval_between_fills_value = \\\n request.POST['interval_between_fills_value']\n\n if \"interval_between_fills_unit\" in request.POST:\n questionnaire_configuration.interval_between_fills_unit = \\\n get_object_or_404(TimeUnit, pk=request.POST['interval_between_fills_unit'])\n\n questionnaire_configuration.save()\n\n messages.success(request, 'Questionário atualizado com sucesso.')\n\n redirect_url = reverse(\"experiment_edit\", args=(experiment.id,))\n return HttpResponseRedirect(redirect_url)\n else:\n if request.POST['action'] == \"remove\":\n try:\n questionnaire_configuration.delete()\n except ProtectedError:\n messages.error(request, \"Não foi possível excluir o questionário, pois há respostas associadas\")\n redirect_url = reverse(\"questionnaire_edit\", args=(questionnaire_configuration_id,))\n return HttpResponseRedirect(redirect_url)\n\n redirect_url = reverse(\"experiment_edit\", args=(experiment.id,))\n return HttpResponseRedirect(redirect_url)\n\n context = {\n \"questionnaire_form\": questionnaire_form,\n \"creating\": False,\n \"updating\": True,\n \"experiment\": experiment,\n \"questionnaire_title\": questionnaire_title,\n \"questionnaire_id\": questionnaire_configuration.lime_survey_id}\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.add_subject')\ndef subjects(request, experiment_id, template_name=\"experiment/subjects.html\"):\n experiment = get_object_or_404(Experiment, id=experiment_id)\n\n subject_of_experiment_list = SubjectOfExperiment.objects.all().filter(experiment=experiment)\n\n subject_list_with_status = []\n\n questionnaires_configuration_list = QuestionnaireConfiguration.objects.filter(experiment=experiment)\n\n surveys = Questionnaires()\n\n limesurvey_available = check_limesurvey_access(request, surveys)\n\n for subject_of_experiment in subject_of_experiment_list:\n\n number_of_questionnaires_filled = 0\n\n for questionnaire_configuration in questionnaires_configuration_list:\n\n subject_responses = QuestionnaireResponse.objects. \\\n filter(subject_of_experiment=subject_of_experiment). \\\n filter(questionnaire_configuration=questionnaire_configuration)\n\n if subject_responses:\n if (questionnaire_configuration.number_of_fills is None and subject_responses.count() > 0) or \\\n (questionnaire_configuration.number_of_fills is not None and\n questionnaire_configuration.number_of_fills == subject_responses.count()):\n\n number_of_questionnaires_completed = 0\n\n for subject_response in subject_responses:\n\n response_result = surveys.get_participant_properties(questionnaire_configuration.lime_survey_id,\n subject_response.token_id, \"completed\")\n\n if response_result == \"N\" or response_result == \"\":\n break\n else:\n number_of_questionnaires_completed += 1\n\n if (questionnaire_configuration.number_of_fills is None and\n number_of_questionnaires_completed >= subject_responses.count()) or \\\n (questionnaire_configuration.number_of_fills is not None and\n number_of_questionnaires_completed >= questionnaire_configuration.number_of_fills):\n number_of_questionnaires_filled += 1\n\n percentage = 0\n\n if questionnaires_configuration_list.count() > 0:\n percentage = 100 * number_of_questionnaires_filled / questionnaires_configuration_list.count()\n\n subject_list_with_status.append(\n {'subject': subject_of_experiment.subject,\n 'number_of_questionnaires_filled': number_of_questionnaires_filled,\n 'total_of_questionnaires': questionnaires_configuration_list.count(),\n 'percentage': percentage,\n 'consent': subject_of_experiment.consent_form})\n\n context = {\n 'experiment_id': experiment_id,\n 'subject_list': subject_list_with_status,\n 'experiment_title': experiment.title,\n \"limesurvey_available\": limesurvey_available\n }\n\n surveys.release_session_key()\n\n return render(request, template_name, context)\n\n\ndef subject_questionnaire_response_start_fill_questionnaire(request, subject_id, questionnaire_id):\n questionnaire_response_form = QuestionnaireResponseForm(request.POST)\n\n if questionnaire_response_form.is_valid():\n\n questionnaire_response = questionnaire_response_form.save(commit=False)\n\n questionnaire_config = get_object_or_404(QuestionnaireConfiguration, id=questionnaire_id)\n\n questionnaire_lime_survey = Questionnaires()\n\n subject = get_object_or_404(Subject, pk=subject_id)\n patient = subject.patient\n\n subject_of_experiment = get_object_or_404(SubjectOfExperiment, subject=subject,\n experiment=questionnaire_config.experiment)\n\n if not questionnaire_lime_survey.survey_has_token_table(questionnaire_config.lime_survey_id):\n messages.warning(request,\n 'Preenchimento não disponível - Tabela de tokens não iniciada')\n return None, None\n\n if questionnaire_lime_survey.get_survey_properties(questionnaire_config.lime_survey_id, 'active') == 'N':\n messages.warning(request,\n 'Preenchimento não disponível - Questionário não está ativo')\n return None, None\n\n if not check_required_fields(questionnaire_lime_survey, questionnaire_config.lime_survey_id):\n messages.warning(request,\n 'Preenchimento não disponível - Questionário não contém campos padronizados')\n return None, None\n\n result = questionnaire_lime_survey.add_participant(questionnaire_config.lime_survey_id, patient.name, '',\n patient.email)\n\n questionnaire_lime_survey.release_session_key()\n\n if not result:\n messages.warning(request,\n 'Falha ao gerar token para responder questionário. Verifique se o questionário está ativo')\n return None, None\n\n questionnaire_response.subject_of_experiment = subject_of_experiment\n questionnaire_response.questionnaire_configuration = questionnaire_config\n questionnaire_response.token_id = result['token_id']\n questionnaire_response.date = datetime.datetime.strptime(request.POST['date'], '%d/%m/%Y')\n questionnaire_response.questionnaire_responsible = request.user\n questionnaire_response.save()\n\n redirect_url = get_limesurvey_response_url(questionnaire_response)\n\n return redirect_url, questionnaire_response.pk\n else:\n return None, None\n\n\ndef get_limesurvey_response_url(questionnaire_response):\n questionnaire_lime_survey = Questionnaires()\n token = questionnaire_lime_survey.get_participant_properties(\n questionnaire_response.questionnaire_configuration.lime_survey_id,\n questionnaire_response.token_id, \"token\")\n questionnaire_lime_survey.release_session_key()\n\n redirect_url = \\\n '%s/index.php/%s/token/%s/responsibleid/%s/acquisitiondate/%s/subjectid/%s/newtest/Y' % (\n settings.LIMESURVEY['URL_WEB'],\n questionnaire_response.questionnaire_configuration.lime_survey_id,\n token,\n str(questionnaire_response.questionnaire_responsible.id),\n questionnaire_response.date.strftime('%d-%m-%Y'),\n str(questionnaire_response.subject_of_experiment.subject.id))\n\n # versao com os nomes dos campos em portugues\n #\n # redirect_url = \\\n # '%s/index.php/%s/token/%s/idavaliador/%s/datdataaquisicao/%s/idparticipante/%s/newtest/Y' % (\n # settings.LIMESURVEY['URL'],\n # questionnaire_response.questionnaire_configuration.lime_survey_id,\n # token,\n # str(questionnaire_response.questionnaire_responsible.id),\n # questionnaire_response.date.strftime('%d-%m-%Y'),\n # str(questionnaire_response.subject_of_experiment.subject.id))\n\n return redirect_url\n\n\n@login_required\n@permission_required('experiment.add_questionnaireresponse')\ndef subject_questionnaire_response_create(request, experiment_id, subject_id, questionnaire_id,\n template_name=\"experiment/subject_questionnaire_response_form.html\"):\n questionnaire_config = get_object_or_404(QuestionnaireConfiguration, id=questionnaire_id)\n\n surveys = Questionnaires()\n survey_title = surveys.get_survey_title(questionnaire_config.lime_survey_id)\n survey_active = surveys.get_survey_properties(questionnaire_config.lime_survey_id, 'active')\n survey_admin = surveys.get_survey_properties(questionnaire_config.lime_survey_id, 'admin')\n surveys.release_session_key()\n\n questionnaire_responsible = request.user.get_full_name()\n subject = get_object_or_404(Subject, pk=subject_id)\n\n if request.method == \"GET\":\n questionnaire_response_form = QuestionnaireResponseForm(request.POST or None)\n fail = None\n redirect_url = None\n questionnaire_response_id = None\n\n if request.method == \"POST\":\n questionnaire_response_form = QuestionnaireResponseForm(request.POST)\n\n if request.POST['action'] == \"save\":\n redirect_url, questionnaire_response_id = subject_questionnaire_response_start_fill_questionnaire(request, subject_id,\n questionnaire_id)\n if not redirect_url:\n fail = False\n else:\n fail = True\n messages.info(request, 'Você será redirecionado para o questionário. Aguarde.')\n\n context = {\n \"FAIL\": fail,\n \"URL\": redirect_url,\n \"questionnaire_response_id\": questionnaire_response_id,\n \"questionnaire_response_form\": questionnaire_response_form,\n \"questionnaire_configuration\": questionnaire_config,\n \"survey_title\": survey_title,\n \"survey_admin\": survey_admin,\n \"survey_active\": survey_active,\n \"questionnaire_responsible\": questionnaire_responsible,\n \"creating\": True,\n \"subject\": subject\n }\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.change_questionnaireresponse')\ndef questionnaire_response_update(request, questionnaire_response_id,\n template_name=\"experiment/subject_questionnaire_response_form.html\"):\n questionnaire_response = get_object_or_404(QuestionnaireResponse, id=questionnaire_response_id)\n\n questionnaire_configuration = questionnaire_response.questionnaire_configuration\n\n surveys = Questionnaires()\n survey_title = surveys.get_survey_title(questionnaire_configuration.lime_survey_id)\n survey_active = surveys.get_survey_properties(questionnaire_configuration.lime_survey_id, 'active')\n survey_admin = surveys.get_survey_properties(questionnaire_configuration.lime_survey_id, 'admin')\n survey_completed = (surveys.get_participant_properties(questionnaire_configuration.lime_survey_id,\n questionnaire_response.token_id,\n \"completed\") != \"N\")\n surveys.release_session_key()\n\n questionnaire_responsible = questionnaire_response.questionnaire_responsible\n subject = questionnaire_response.subject_of_experiment.subject\n\n questionnaire_response_form = QuestionnaireResponseForm(None, instance=questionnaire_response)\n\n if request.method == \"GET\":\n fail = None\n redirect_url = None\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"save\":\n\n redirect_url = get_limesurvey_response_url(questionnaire_response)\n\n if not redirect_url:\n fail = False\n else:\n fail = True\n messages.info(request, 'Você será redirecionado para o questionário. Aguarde.')\n\n else:\n if request.POST['action'] == \"remove\":\n surveys = Questionnaires()\n result = surveys.delete_participant(\n questionnaire_configuration.lime_survey_id,\n questionnaire_response.token_id)\n surveys.release_session_key()\n\n can_delete = False\n\n if str(questionnaire_response.token_id) in result:\n result = result[str(questionnaire_response.token_id)]\n if result == 'Deleted' or result == 'Invalid token ID':\n can_delete = True\n else:\n if 'status' in result and result['status'] == u'Error: Invalid survey ID':\n can_delete = True\n\n if can_delete:\n questionnaire_response.delete()\n messages.success(request, 'Preenchimento removido com sucesso')\n else:\n messages.error(request, \"Erro ao deletar o preenchimento\")\n redirect_url = reverse(\"subject_questionnaire\",\n args=(questionnaire_configuration.experiment.id, subject.id,))\n return HttpResponseRedirect(redirect_url)\n\n context = {\n \"FAIL\": fail,\n \"URL\": redirect_url,\n \"questionnaire_response_form\": questionnaire_response_form,\n \"questionnaire_configuration\": questionnaire_configuration,\n \"survey_title\": survey_title,\n \"survey_admin\": survey_admin,\n \"survey_active\": survey_active,\n \"questionnaire_response_id\": questionnaire_response_id,\n \"questionnaire_responsible\": questionnaire_responsible,\n \"creating\": False,\n \"subject\": subject,\n \"completed\": survey_completed\n }\n\n return render(request, template_name, context)\n\n\n# método para verificar se o questionário tem as questões de identificação corretas e se seus tipos também são corretos\ndef check_required_fields(surveys, lime_survey_id):\n\n fields_to_validate = {\n 'responsibleid': {'type': 'N', 'found': False},\n 'acquisitiondate': {'type': 'D', 'found': False},\n 'subjectid': {'type': 'N', 'found': False},\n }\n\n validated_quantity = 0\n error = False\n\n groups = surveys.list_groups(lime_survey_id)\n\n for group in groups:\n if 'id' in group:\n question_list = surveys.list_questions(lime_survey_id, group['id'])\n for question in question_list:\n question_properties = surveys.get_question_properties(question)\n if question_properties['title'] in fields_to_validate:\n field = fields_to_validate[question_properties['title']]\n if not field['found']:\n field['found'] = True\n if field['type'] == question_properties['type']:\n validated_quantity += 1\n else:\n error = True\n if error or validated_quantity == len(fields_to_validate):\n break\n if error or validated_quantity == len(fields_to_validate):\n break\n\n return validated_quantity == len(fields_to_validate)\n\n@login_required\n@permission_required('experiment.view_questionnaireresponse')\ndef questionnaire_response_view(request, questionnaire_response_id,\n template_name=\"experiment/subject_questionnaire_response_view.html\"):\n questionnaire_response = get_object_or_404(QuestionnaireResponse, id=questionnaire_response_id)\n questionnaire_configuration = questionnaire_response.questionnaire_configuration\n surveys = Questionnaires()\n survey_title = surveys.get_survey_title(questionnaire_configuration.lime_survey_id)\n token = surveys.get_participant_properties(questionnaire_configuration.lime_survey_id,\n questionnaire_response.token_id, \"token\")\n\n question_properties = []\n groups = surveys.list_groups(questionnaire_configuration.lime_survey_id)\n questionnaire_responses = []\n\n if not isinstance(groups, dict):\n for group in groups:\n if 'id' in group:\n question_list = surveys.list_questions(questionnaire_configuration.lime_survey_id, group['id'])\n question_list = sorted(question_list)\n for question in question_list:\n properties = surveys.get_question_properties(question)\n if ('{int' not in properties['question']) and ('{(' not in properties['question'])\\\n and ('{if' not in properties['question']) and ('{pont' not in properties['question']):\n properties['question'] = re.sub('<.*?>', '', properties['question'])\n\n if isinstance(properties['subquestions'], dict):\n question_properties.append({\n 'question': properties['question'],\n 'question_id': properties['title'],\n 'answer_options': 'super_question',\n 'type': properties['type']\n })\n for key, value in sorted(properties['subquestions'].iteritems()):\n question_properties.append({\n 'question': value['question'],\n 'question_id': properties['title'] + '[' + value['title'] + ']',\n 'answer_options': properties['answeroptions'],\n 'type': properties['type']\n })\n else:\n question_properties.append({\n 'question': properties['question'],\n 'question_id': properties['title'],\n 'answer_options': properties['answeroptions'],\n 'type': properties['type']\n })\n\n responses_list = surveys.get_responses_by_token(questionnaire_configuration.lime_survey_id, token)\n responses_list = responses_list.replace('\\\"', '')\n responses_list = responses_list.split('\\n')\n responses_list[0] = responses_list[0].split(\",\")\n responses_list[1] = responses_list[1].split(\",\")\n\n for question in question_properties:\n\n if isinstance(question['answer_options'], basestring) and question['answer_options'] == \"super_question\":\n\n if question['question'] != '':\n questionnaire_responses.append({\n 'question': question['question'],\n 'answer': '',\n 'type': question['type']\n })\n else:\n\n answer = ''\n\n if question['question_id'] in responses_list[0]:\n\n index = responses_list[0].index(question['question_id'])\n\n answer_options = question['answer_options']\n\n if isinstance(answer_options, dict):\n\n if responses_list[1][index] in answer_options:\n answer_option = answer_options[responses_list[1][index]]\n answer = answer_option['answer']\n else:\n answer = 'Sem resposta'\n else:\n if question['type'] == 'D':\n if responses_list[1][index]:\n answer = datetime.datetime.strptime(responses_list[1][index], '%Y-%m-%d %H:%M:%S')\n else:\n answer = ''\n else:\n answer = responses_list[1][index]\n\n questionnaire_responses.append({\n 'question': question['question'],\n 'answer': answer,\n 'type': question['type']\n })\n\n surveys.release_session_key()\n\n context = {\n \"questionnaire_responses\": questionnaire_responses,\n \"survey_title\": survey_title,\n \"questionnaire_response\": questionnaire_response\n }\n\n return render(request, template_name, context)\n\n\ndef check_limesurvey_access(request, surveys):\n limesurvey_available = True\n if not surveys.session_key:\n limesurvey_available = False\n messages.warning(request, \"LimeSurvey indisponível. Sistema funcionando parcialmente.\")\n\n return limesurvey_available\n\n\n@login_required\n@permission_required('experiment.view_questionnaireresponse')\ndef subject_questionnaire_view(request, experiment_id, subject_id,\n template_name=\"experiment/subject_questionnaire_response_list.html\"):\n experiment = get_object_or_404(Experiment, id=experiment_id)\n subject = get_object_or_404(Subject, id=subject_id)\n\n questionnaires_configuration_list = QuestionnaireConfiguration.objects.filter(experiment=experiment)\n\n subject_questionnaires = []\n can_remove = True\n\n surveys = Questionnaires()\n\n limesurvey_available = check_limesurvey_access(request, surveys)\n\n for questionnaire_configuration in questionnaires_configuration_list:\n\n subject_of_experiment = get_object_or_404(SubjectOfExperiment, experiment=experiment, subject=subject)\n\n questionnaire_responses = QuestionnaireResponse.objects. \\\n filter(subject_of_experiment=subject_of_experiment). \\\n filter(questionnaire_configuration=questionnaire_configuration)\n\n questionnaire_responses_with_status = []\n\n if questionnaire_responses:\n can_remove = False\n\n for questionnaire_response in questionnaire_responses:\n response_result = surveys.get_participant_properties(questionnaire_configuration.lime_survey_id,\n questionnaire_response.token_id,\n \"completed\")\n questionnaire_responses_with_status.append(\n {'questionnaire_response': questionnaire_response,\n 'completed': None if response_result is None else response_result != \"N\" and response_result != \"\"}\n )\n\n subject_questionnaires.append(\n {'questionnaire_configuration': questionnaire_configuration,\n 'title': surveys.get_survey_title(questionnaire_configuration.lime_survey_id),\n 'questionnaire_responses': questionnaire_responses_with_status}\n )\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"remove\":\n if can_remove:\n subject_of_experiment = get_object_or_404(SubjectOfExperiment, experiment=experiment, subject=subject)\n subject_of_experiment.delete()\n\n messages.info(request, 'Participante removido do experimento.')\n redirect_url = reverse(\"subjects\", args=(experiment_id,))\n return HttpResponseRedirect(redirect_url)\n else:\n messages.error(request, \"Não foi possível excluir o paciente, pois há respostas associadas\")\n redirect_url = reverse(\"subject_questionnaire\", args=(experiment_id, subject_id,))\n return HttpResponseRedirect(redirect_url)\n\n context = {\n 'subject': subject,\n 'experiment': experiment,\n 'subject_questionnaires': subject_questionnaires,\n 'limesurvey_available': limesurvey_available\n }\n\n surveys.release_session_key()\n\n return render(request, template_name, context)\n\n\n@login_required\n@permission_required('experiment.add_subject')\ndef subjects_insert(request, experiment_id, patient_id):\n patient = get_object_or_404(Patient, pk=patient_id)\n\n subject = Subject()\n\n try:\n subject = Subject.objects.get(patient=patient)\n except subject.DoesNotExist:\n subject.patient = patient\n subject.save()\n\n experiment = get_object_or_404(Experiment, id=experiment_id)\n\n if not SubjectOfExperiment.objects.all().filter(experiment=experiment, subject=subject):\n SubjectOfExperiment(subject=subject, experiment=experiment).save()\n else:\n messages.warning(request, 'Participante já inserido para este experimento.')\n\n redirect_url = reverse(\"subjects\", args=(experiment_id,))\n return HttpResponseRedirect(redirect_url)\n\n\n@login_required\n@permission_required('experiment.add_subject')\ndef search_patients_ajax(request):\n patient_list = ''\n if request.method == \"POST\":\n search_text = request.POST['search_text']\n experiment_id = request.POST['experiment_id']\n if search_text:\n if re.match('[a-zA-Z ]+', search_text):\n patient_list = Patient.objects.filter(name__icontains=search_text).exclude(removed=True)\n else:\n patient_list = Patient.objects.filter(cpf__icontains=search_text).exclude(removed=True)\n\n return render_to_response('experiment/ajax_search_patients.html',\n {'patients': patient_list, 'experiment_id': experiment_id})\n\n\ndef upload_file(request, subject_id, experiment_id, template_name=\"experiment/upload_consent_form.html\"):\n subject = get_object_or_404(Subject, pk=subject_id)\n experiment = get_object_or_404(Experiment, pk=experiment_id)\n subject_of_experiment = get_object_or_404(SubjectOfExperiment, subject=subject, experiment=experiment)\n\n if request.method == \"POST\":\n\n if request.POST['action'] == \"upload\":\n file_form = FileForm(request.POST, request.FILES, instance=subject_of_experiment)\n if 'consent_form' in request.FILES:\n if file_form.is_valid():\n file_form.save()\n messages.success(request, 'Termo salvo com sucesso.')\n\n redirect_url = reverse(\"subjects\", args=(experiment_id, ))\n return HttpResponseRedirect(redirect_url)\n else:\n messages.error(request, 'Não existem anexos para salvar')\n else:\n if request.POST['action'] == \"remove\":\n # subject_of_experiment.consent_form = ''\n subject_of_experiment.consent_form.delete()\n subject_of_experiment.save()\n messages.success(request, 'Anexo removido com sucesso.')\n\n redirect_url = reverse(\"subjects\", args=(experiment_id,))\n return HttpResponseRedirect(redirect_url)\n\n else:\n file_form = FileForm(request.POST or None)\n\n context = {\n 'subject': subject,\n 'experiment': experiment,\n 'file_form': file_form,\n 'file_list': subject_of_experiment.consent_form\n }\n return render(request, template_name, context)\n","repo_name":"CULTSCIENCE-2033/nes","sub_path":"patientregistrationsystem/qdc/experiment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"8565513487","text":"from typing import List, Dict, Tuple, Optional\n\nfrom dataclasses import dataclass\nfrom enum import Enum, IntEnum\nfrom copy import deepcopy\n\nimport argparse\n\ndef load_input(fn):\n with open(fn, 'r') as fin:\n for line in fin:\n if line[-1] == '\\n':\n line = line[:-1]\n yield line\n\nclass PointType(Enum):\n EMPTY = 0,\n PATH = 1,\n WALL = 2,\n\n def __repr__(self) -> str:\n if self == PointType.EMPTY:\n return ' '\n if self == PointType.WALL:\n return '#'\n if self == PointType.PATH:\n return '.'\n\n raise ValueError('unsupported point type \"{self}\"')\n\n @staticmethod\n def parse(s) -> 'PointType':\n if s == '.':\n return PointType.PATH\n elif s == '#':\n return PointType.WALL\n elif s == ' ':\n return PointType.EMPTY\n else:\n raise ValueError(f'unsupported map symbol \"{s}\"')\n\n@dataclass\nclass Point:\n y: int\n x: int\n t: PointType\n plane: int\n\n\nclass Turn(Enum):\n CLOCK = 0,\n COUNTER = 1,\n STILL = 2,\n\n @staticmethod\n def parse(symbol) -> 'Turn':\n if symbol == 'R':\n return Turn.CLOCK\n elif symbol == 'L':\n return Turn.COUNTER\n else:\n raise ValueError(f'unsupported turn symbol \"{symbol}\"')\n\n@dataclass\nclass Move:\n num_steps: int\n turn: Turn\n\nclass Direction(IntEnum):\n RIGHT = 0,\n DOWN = 1,\n LEFT = 2,\n UP = 3,\n\n def __repr__(self) -> str:\n return '>v<^'[int(self)]\n\n def __str__(self) -> str:\n return self.__repr__()\n\nclass Facing:\n def __init__(self, initial: Direction):\n self.dir = initial\n\n def turn(self, turn: Turn):\n if turn == Turn.CLOCK:\n self.dir = Direction((self.dir + 1) % 4)\n elif turn == Turn.COUNTER:\n self.dir = Direction((self.dir - 1) % 4)\n elif turn == Turn.STILL:\n pass\n else:\n raise ValueError(f'unsupported turn \"{turn}\"')\n\nclass FlatPlane:\n def __init__(self, size: int, start_y: int, start_x: int):\n self.size = size\n self.start_x = start_x\n self.start_y = start_y\n\n def __repr__(self) -> str:\n return f'[flat_y: [{self.start_y}, {self.start_y+self.size}], flat_x: [{self.start_x}, {self.start_x+self.size}]]'\n\n def point_in(self, pos: Point) -> bool:\n if pos.x < self.start_x or pos.y < self.start_y:\n return False\n if pos.x >= self.start_x + self.size or pos.y >= self.start_y + self.size:\n return False\n\n return True\n\n def step(self, pos: Point, direction: Direction) -> Point:\n if not self.point_in(pos):\n raise ValueError(f'initial position {pos} does not belong to this plane {self}')\n\n pos = deepcopy(pos)\n\n if direction == Direction.UP:\n pos.y -= 1\n elif direction == Direction.DOWN:\n pos.y += 1\n elif direction == Direction.RIGHT:\n pos.x += 1\n elif direction == Direction.LEFT:\n pos.x -= 1\n else:\n raise ValueError(f'invalid direction \"{direction}\"')\n\n return pos\n\nclass CubeFold1:\n def __init__(self, cube_size: int):\n self.cube_size = cube_size\n self.planes = {\n 1: FlatPlane(size=self.cube_size, start_y=1, start_x=9),\n 2: FlatPlane(size=self.cube_size, start_y=5, start_x=1),\n 3: FlatPlane(size=self.cube_size, start_y=5, start_x=5),\n 4: FlatPlane(size=self.cube_size, start_y=5, start_x=9),\n 5: FlatPlane(size=self.cube_size, start_y=9, start_x=9),\n 6: FlatPlane(size=self.cube_size, start_y=9, start_x=13),\n }\n self.transitions = {\n 1: {\n Direction.UP: (2, 'MX-MY', Direction.DOWN),\n Direction.LEFT: (3, 'MX-C', Direction.DOWN),\n Direction.DOWN: (4, '', Direction.DOWN),\n Direction.RIGHT: (6, 'MX-MY', Direction.LEFT),\n },\n 2: {\n Direction.UP: (1, 'MX-MY', Direction.DOWN),\n Direction.LEFT: (6, 'MY-C', Direction.UP),\n Direction.DOWN: (5, 'MX-MY', Direction.UP),\n Direction.RIGHT: (3, '', Direction.RIGHT),\n },\n 3: {\n Direction.UP: (1, 'MY-C', Direction.RIGHT),\n Direction.LEFT: (2, '', Direction.LEFT),\n Direction.DOWN: (5, 'MX-C', Direction.RIGHT),\n Direction.RIGHT: (4, '', Direction.RIGHT),\n },\n 4: {\n Direction.UP: (1, '', Direction.UP),\n Direction.LEFT: (3, '', Direction.LEFT),\n Direction.DOWN: (5, '', Direction.DOWN),\n Direction.RIGHT: (6, 'MY-C', Direction.DOWN),\n },\n 5: {\n Direction.UP: (4, '', Direction.UP),\n Direction.LEFT: (3, 'MY-C', Direction.UP),\n Direction.DOWN: (2, 'MY-MX', Direction.UP),\n Direction.RIGHT: (6, '', Direction.RIGHT),\n },\n 6: {\n Direction.UP: (4, 'MX-C', Direction.LEFT),\n Direction.LEFT: (5, '', Direction.LEFT),\n Direction.DOWN: (2, 'MX-C', Direction.RIGHT),\n Direction.RIGHT: (1, 'MY-MX', Direction.LEFT),\n },\n }\n\n def relative_transform(self, p: Point, transform: str) -> Point:\n x = p.x\n y = p.y\n for op in transform.split('-'):\n if op == 'MX':\n x = self.cube_size - x + 1\n elif op == 'MY':\n y = self.cube_size - y + 1\n elif op == 'C':\n tmp = x\n x = y\n y = tmp\n\n return Point(y=y, x=x, t=PointType.WALL, plane=0)\n\n\n def flat_point_to_plane(self, point: Point):\n for plane_id, plane in self.planes.items():\n if plane.point_in(point):\n point.plane = plane_id\n return\n\nclass CubeFold2:\n def __init__(self, cube_size: int):\n self.cube_size = cube_size\n self.planes = {\n 1: FlatPlane(size=self.cube_size, start_y=1, start_x=51),\n 2: FlatPlane(size=self.cube_size, start_y=1, start_x=101),\n 3: FlatPlane(size=self.cube_size, start_y=51, start_x=51),\n 4: FlatPlane(size=self.cube_size, start_y=101, start_x=1),\n 5: FlatPlane(size=self.cube_size, start_y=101, start_x=51),\n 6: FlatPlane(size=self.cube_size, start_y=151, start_x=1),\n }\n self.transitions = {\n 1: {\n Direction.UP: (6, 'MY-C', Direction.RIGHT),\n Direction.LEFT: (4, 'MX-MY', Direction.RIGHT),\n Direction.DOWN: (3, '', Direction.DOWN),\n Direction.RIGHT: (2, '', Direction.RIGHT),\n },\n 2: {\n Direction.UP: (6, '', Direction.UP),\n Direction.LEFT: (1, '', Direction.LEFT),\n Direction.DOWN: (3, 'MY-C', Direction.LEFT),\n Direction.RIGHT: (5, 'MY-MX', Direction.LEFT),\n },\n 3: {\n Direction.UP: (1, '', Direction.UP),\n Direction.LEFT: (4, 'MX-C', Direction.DOWN),\n Direction.DOWN: (5, '', Direction.DOWN),\n Direction.RIGHT: (2, 'MX-C', Direction.UP),\n },\n 4: {\n Direction.UP: (3, 'MY-C', Direction.RIGHT),\n Direction.LEFT: (1, 'MX-MY', Direction.RIGHT),\n Direction.DOWN: (6, '', Direction.DOWN),\n Direction.RIGHT: (5, '', Direction.RIGHT),\n },\n 5: {\n Direction.UP: (3, '', Direction.UP),\n Direction.LEFT: (4, '', Direction.LEFT),\n Direction.DOWN: (6, 'MY-C', Direction.LEFT),\n Direction.RIGHT: (2, 'MX-MY', Direction.LEFT),\n },\n 6: {\n Direction.UP: (4, '', Direction.UP),\n Direction.LEFT: (1, 'MX-C', Direction.DOWN),\n Direction.DOWN: (2, '', Direction.DOWN),\n Direction.RIGHT: (5, 'MX-C', Direction.UP),\n },\n }\n\n def relative_transform(self, p: Point, transform: str) -> Point:\n x = p.x\n y = p.y\n for op in transform.split('-'):\n if op == 'MX':\n x = self.cube_size - x + 1\n elif op == 'MY':\n y = self.cube_size - y + 1\n elif op == 'C':\n tmp = x\n x = y\n y = tmp\n\n return Point(y=y, x=x, t=PointType.WALL, plane=0)\n\n\n def flat_point_to_plane(self, point: Point):\n for plane_id, plane in self.planes.items():\n if plane.point_in(point):\n point.plane = plane_id\n return\n\nclass Map:\n def __init__(self, is_small=True):\n self.map = []\n self.width = 0\n self.height = 0\n\n if is_small:\n self.cube_size = 4\n self.fold = CubeFold1(self.cube_size)\n else:\n self.cube_size = 50\n self.fold = CubeFold2(self.cube_size)\n\n\n def add_point(self, y, x, s):\n point = Point(y, x, PointType.parse(s), 0)\n self.fold.flat_point_to_plane(point)\n if point.x > self.width:\n self.width = point.x\n if point.y > self.height:\n self.height = point.y\n\n if len(self.map) == 0:\n self.map.append([point])\n return\n\n last_row = self.map[-1]\n last_y = last_row[0].y\n if last_y == point.y:\n last_row.append(point)\n else:\n self.map.append([point])\n\n def wrap(self):\n for y, line in enumerate(self.map):\n for x in range(len(line), self.width):\n line.append(Point(y+1, x, PointType.EMPTY, 0))\n\n def visualize(self, steps: List[Tuple[Point, Direction]], facing: Direction):\n map = []\n for y, line in enumerate(self.map):\n line_str = [p.t.__repr__() for p in line]\n map.append(line_str)\n\n for point, direction in steps:\n map[point.y-1][point.x-1] = direction.__repr__()\n\n map_str = []\n for line in map:\n line_str = ''.join(line)\n map_str.append(line_str)\n map_str = '\\n'.join(map_str)\n print(map_str)\n\n def flat_step(self, pos: Point, direction: Direction, num_steps: int) -> Point:\n for _ in range(num_steps):\n y = pos.y\n x = pos.x\n\n while True:\n if direction == Direction.UP:\n y -= 1\n elif direction == Direction.DOWN:\n y += 1\n elif direction == Direction.RIGHT:\n x += 1\n elif direction == Direction.LEFT:\n x -= 1\n else:\n raise ValueError(f'invalid direction \"{direction}\"')\n\n if y == self.height+1:\n y = 1\n if y == 0:\n y = self.height\n\n if x == self.width+1:\n x = 1\n if x == 0:\n x = self.width\n\n point = self.map[y-1][x-1]\n if point.t == PointType.PATH:\n pos.y = y\n pos.x = x\n break\n\n if point.t == PointType.WALL:\n return pos\n\n # continue wrapping around over empty space\n\n return pos\n\n def wrap_coords(self, pos):\n pos = deepcopy(pos)\n\n if pos.y == self.cube_size + 1:\n pos.y = 1\n if pos.y == 0:\n pos.y = self.cube_size\n\n if pos.x == self.cube_size + 1:\n pos.x = 1\n if pos.x == 0:\n pos.x = self.cube_size\n\n return pos\n\n def single_cube_step(self, pos: Point, direction: Direction) -> Tuple[Point, Direction]:\n current_plane = self.fold.planes[pos.plane]\n next_pos = current_plane.step(pos, direction)\n if current_plane.point_in(next_pos):\n return next_pos, direction\n\n next_plane_id, relative_coord_transform, next_direction = self.fold.transitions[pos.plane][direction]\n\n relative_next_pos = Point(y=next_pos.y-current_plane.start_y+1,\n x=next_pos.x-current_plane.start_x+1,\n t=next_pos.t,\n plane=next_pos.plane)\n relative_next_pos = self.wrap_coords(relative_next_pos)\n #print(f'transition1: {pos} {next_direction} -> {next_pos} {next_direction}, current_plane: {current_plane}, relative_next_pos: {relative_next_pos}')\n\n relative_next_pos = self.fold.relative_transform(relative_next_pos, relative_coord_transform)\n\n next_plane = self.fold.planes[next_plane_id]\n next_y = relative_next_pos.y + next_plane.start_y - 1\n next_x = relative_next_pos.x + next_plane.start_x - 1\n #print(f'transition2: {pos} {direction} -> plane: {next_plane_id} {next_plane}, y: {next_y}, x: {next_x}, relative_next_pos: {relative_next_pos}')\n next_point_type = self.map[next_y-1][next_x-1].t\n next_pos = Point(next_y, next_x, next_point_type, next_plane_id)\n\n #print(f'transition3: {pos} {direction} -> {next_pos} {next_direction}')\n return next_pos, next_direction\n\n def cube_step(self, pos: Point, direction: Direction, num_steps: int) -> List[Tuple[Point, Direction]]:\n steps = []\n for _ in range(num_steps):\n while True:\n next_pos, next_direction = self.single_cube_step(pos, direction)\n\n point = self.map[next_pos.y-1][next_pos.x-1]\n #print(f'cube: {pos} {next_direction} -> {next_pos} {next_direction}, map: {point}')\n if point.t == PointType.PATH:\n pos = next_pos\n direction = next_direction\n steps.append((pos, direction))\n break\n\n if point.t == PointType.WALL:\n #print(f'cube: ret: {pos}')\n steps.append((pos, direction))\n return steps\n\n # continue wrapping around over empty space\n\n #print(f'cube: ret: {pos}')\n steps.append((pos, direction))\n return steps\n\nclass Solution:\n def __init__(self, line_generator, is_small=False):\n self.map = Map(is_small)\n\n for y, line in enumerate(line_generator):\n if len(line) == 0:\n break\n\n for x, s in enumerate(line):\n self.map.add_point(y+1, x+1, s)\n\n self.map.wrap()\n\n self.route = self.parse_route(next(line_generator))\n self.steps_made = []\n\n for pos in self.map.map[0]:\n if pos.t == PointType.PATH:\n self.orig_pos = pos\n break\n\n self.orig_facing = Facing(Direction.RIGHT)\n self.reset()\n print(f'map: w: {self.map.width}/{len(self.map.map[0])}, h: {self.map.height}/{len(self.map.map)}, route: {len(self.route)} turns, pos: {self.pos}')\n\n def reset(self):\n self.pos = deepcopy(self.orig_pos)\n self.facing = deepcopy(self.orig_facing)\n\n def parse_route(self, line) -> List[Move]:\n steps = []\n\n start_index = 0\n for end_index, s in enumerate(line):\n if not s.isdigit():\n num_steps = int(line[start_index:end_index])\n turn = Turn.parse(s)\n\n steps.append(Move(num_steps, turn))\n start_index = end_index + 1\n\n num_steps = int(line[start_index:])\n steps.append(Move(num_steps, Turn.STILL))\n return steps\n\n def part1(self):\n self.reset()\n\n for step in self.route:\n self.pos = self.map.flat_step(self.pos, self.facing.dir, step.num_steps)\n self.facing.turn(step.turn)\n\n return self.pos.y * 1000 + self.pos.x * 4 + int(self.facing.dir)\n\n def part2(self):\n self.reset()\n\n for step in self.route:\n #print(f'pos: {self.pos}, dir: {self.facing.dir}, turn: {step.turn}, num_steps: {step.num_steps}')\n steps = self.map.cube_step(self.pos, self.facing.dir, step.num_steps)\n self.pos, self.facing.dir = steps[-1]\n self.facing.turn(step.turn)\n\n self.steps_made += steps\n #self.map.visualize(self.steps_made, self.facing.dir)\n\n return self.pos.y * 1000 + self.pos.x * 4 + int(self.facing.dir)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True, type=str, help='Input file')\n parser.add_argument('--is_small', action='store_true', help='Whether this is a small example with a particular folding')\n FLAGS = parser.parse_args()\n\n line_generator = load_input(FLAGS.input)\n solution = Solution(line_generator, FLAGS.is_small)\n\n part1 = solution.part1()\n print(f'part1: {part1}')\n\n part2 = solution.part2()\n print(f'part2: {part2}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"bioothod/adventofcode","sub_path":"2022/day22/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":17228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"14927592070","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom babel_python.rabbitmq import *\n\n__author__ = \"nebula\"\n\n\nclass TestRabbitmq:\n\n def setup_method(self, method):\n self.queue = \"test\"\n self.exchange = \"test\"\n self.exchange_type = \"direct\"\n self.routing_key = \"test\"\n self.max_send = 100\n self.max_recv = 10\n self.sender = PikaQueueSender(\"sh\", self.exchange, self.exchange_type, exchange_durable=False,\n queue_durable=False, max_queue_size=self.max_send, lazy_limit=True, self_delete=True)\n self.recver = PikaQueueReceiver(\"sh\", self.exchange, self.exchange_type, self.queue, self.routing_key, exchange_durable=False,\n queue_durable=False, max_cache_size=self.max_recv, self_delete=True)\n self.recver.start_consuming()\n\n def teardown_method(self, method):\n self.recver.stop_consuming()\n self.sender.close()\n self.recver.close()\n\n def test_simple_send_recv(self):\n msg = \"test_message\"\n self.sender.put(msg, self.routing_key)\n assert msg == self.recver.get(True, 5)\n\n def test_multiple_recv(self):\n msg = \"test_message\"\n for i in xrange(self.max_send):\n self.sender.put(msg, self.routing_key)\n\n import time\n time.sleep(2)\n\n # assert self.recver.get_errors_due_to_queue_full() == self.max_send - self.max_recv\n\n\n\n","repo_name":"threathunterX/python_lib","sub_path":"babel_python/test/testrabbitmq.py","file_name":"testrabbitmq.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"117915543","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2015-12-13 14:37:40\n# @Author : Alex Tang (1174779123@qq.com)\n# @Link : http://t1174779123.iteye.com\n\n'''\n\tdescription:\n'''\n\nclass Solution(object):\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n result = 1\n for i in range(n-1):\n \tresult *= m + n - 2 - i\n for i in range(n-1):\n \tresult /= i\n return result\n \n\n","repo_name":"PrivateVictory/Leetcode","sub_path":"src/ziyi/Array/UniquePaths.py","file_name":"UniquePaths.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"40750647949","text":"from typing import List\n\n\nclass Solution0944:\n def minDeletionSize(self, strs: List[str]) -> int:\n row, col = len(strs), len(strs[0])\n res = 0\n for c in range(col):\n for r in range(1,row):\n if ord(strs[r][c]) < ord(strs[r-1][c]):\n res += 1\n break\n return res\n\nsol = Solution0944()\nstrs = [\"cba\",\"daf\",\"ghi\"]\nres = sol.minDeletionSize(strs)\nprint('input :', strs)\nprint('output:', res)","repo_name":"zt5rice/LC-archive","sub_path":"0944. Delete Columns to Make Sorted/Solution0944.py","file_name":"Solution0944.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44028367261","text":"def fact(x):\n if x==0 or x==1:\n return 1\n else:\n return x*fact(x-1)\ndef sin(x,n):\n power=1\n sign=1\n s=0\n for i in range(n):\n s=s+(sign*(x**power)/fact(power))\n power=power+2\n sign=sign*-1\n return s\nprint(\"This program will calculate value of sinx using taylor series upto n terms: \")\nx=float(input(\"Enter the value of x: \"))\nn=int(input(\"Enter the value of n: \"))\nprint(\"Calculating...\")\nprint(\"Value of sinx upto n-terms using taylor series is: \",sin(x,n))\n","repo_name":"raviprakashdev/simplePythonProgram","sub_path":"SineTaylorSeries.py","file_name":"SineTaylorSeries.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"22256357187","text":"import logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\ncheck = False\nec2 = boto3.resource('ec2', region_name='eu-west-1')\nec2c = boto3.client('ec2', region_name='eu-west-1')\nfor rds_security_group in ec2c.describe_security_groups()['SecurityGroups']:\n if rds_security_group['GroupName'] == 'SSH-HTTP-ONLY': \n check = check | True\n else:\n check = check | False\nif check == False:\n try:\n securitygroup = ec2.create_security_group(GroupName='SSH-HTTP-ONLY', Description='only allow SSH and HTTP traffic')\n securitygroup.authorize_ingress(CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=22, ToPort=22)\n securitygroup.authorize_ingress(CidrIp='0.0.0.0/0', IpProtocol='tcp', FromPort=80, ToPort=80)\n except ClientError as e:\n logging.error(e)\n","repo_name":"alexwork13/python_aws","sub_path":"security_group.py","file_name":"security_group.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"50539566066","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom staticpages.views import PageView, HomeView\nfrom userspace.views import UserActivityView\nfrom places_core.views import FileServeView\nadmin.autodiscover()\n# include action hooks globally\nfrom places_core import actstreams\n\n# djangorestframework\nfrom rest_framework import routers\nfrom rest import views\nrouter = routers.DefaultRouter()\n\n# Api Views\n#router.register(r'news_add', views.SimpleNewsViewSet, base_name=\"news_add\")\nrouter.register(r'current_user', views.CurrentUserViewSet, base_name='current_user')\n\n# Site Views\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'categories', views.CategoryViewSet)\nrouter.register(r'comments', views.CommentsViewSet, base_name=r'comment')\nrouter.register(r'votes', views.CommentVoteViewSet, base_name=r'vote')\nrouter.register(r'tags', views.TagViewSet, base_name=r'tag')\nrouter.register(r'news', views.NewsViewSet, base_name=r'news')\nrouter.register(r'discussion', views.ForumCategoryViewSet, base_name=r'discussion')\nrouter.register(r'topics', views.ForumViewSet, base_name=r'topics')\nrouter.register(r'replies', views.DiscussionRepliesViewSet, base_name=r'replies')\nrouter.register(r'reports', views.AbuseReportViewSet, base_name=r'reports')\nrouter.register(r'idea_categories', views.IdeaCategoryViewSet, base_name=r'idea_categories')\nrouter.register(r'badges', views.BadgeViewSet, base_name=r'badges')\nrouter.register(r'galleries', views.GalleryViewSet, base_name=r'galleries')\nrouter.register(r'ideas', views.IdeaListViewSet, base_name=r'ideas')\nrouter.register(r'idea_votes', views.IdeaVoteCounterViewSet, base_name=r'idea_votes')\nrouter.register(r'usermedia', views.MediaViewSet, base_name=r'usermedia')\nrouter.register(r'my_actions', views.UserActionsRestViewSet, base_name=r'my_actions')\nrouter.register(r'polls', views.PollListViewSet, base_name=r'polls')\nrouter.register(r'locationlist', views.LocationBasicViewSet, base_name=r'locationlist')\n# django sitemaps framework\nimport places_core.sitemaps as sitemaps\nsitemaps = {\n 'locations': sitemaps.LocationSitemap,\n 'ideas' : sitemaps.IdeaSitemap,\n 'news' : sitemaps.NewsSitemap,\n 'polls' : sitemaps.PollsSitemap,\n 'discussions': sitemaps.DiscussionSitemap,\n 'projects': sitemaps.ProjectsSitemap,\n 'articles': sitemaps.ArticleSitemap,\n 'documents': sitemaps.EtherpadSitemap,\n}\n# Javascript translations catalog\njs_info_dict = {\n 'packages': (\n 'comments',\n 'blog',\n 'gallery',\n 'locations',\n 'maps',\n 'topics',\n 'userspace',\n ),\n}\n\n# Django Rest Framework\n# ------------------------------------------------------------------------------\nfrom locations.urls import router as location_router\nfrom ideas.urls import router as idea_router\nfrom topics.urls import router as discussion_router\nfrom blog.urls import router as blog_router\nfrom maps.urls import router as map_router\nfrom userspace.urls import router as user_router\nfrom places_core.urls import router as core_router\nfrom gallery.urls import router as gallery_router\nfrom notifications.urls import router as notification_router\nfrom activities.urls import router as activity_router\nurlpatterns = patterns('',\n url(r'^api-ideas/', include(idea_router.urls)),\n url(r'^api-locations/', include(location_router.urls)),\n url(r'^api-discussions/', include(discussion_router.urls)),\n url(r'^api-blog/', include(blog_router.urls)),\n url(r'^api-maps/', include(map_router.urls)),\n url(r'^api-userspace/', include(user_router.urls)),\n url(r'^api-core/', include(core_router.urls)),\n url(r'^api-gallery/', include(gallery_router.urls)),\n url(r'^api-notifications/', include(notification_router.urls)),\n url(r'^api-activities/', include(activity_router.urls)),\n)\n\nfrom civmail.views import InviteFriendsView\nurlpatterns += patterns('',\n url(r'^invite-friends/', InviteFriendsView.as_view(), name=\"invite_friends\"),\n)\n\nfrom places_core.views import set_language\nurlpatterns += patterns('',\n url(r'^', include('hitcounter.urls')),\n url(r'^organizations/', include('organizations.urls', namespace=\"organizations\")),\n # user account\n url(r'^user/', include('userspace.urls', namespace='user')),\n url(r'^users/', include('userspace.urls', namespace='user')),\n url(r'^bookmarks/', include('bookmarks.urls')),\n # Email app\n url(r'^civmail/', include('civmail.urls', namespace='civmail')),\n # Maps\n url(r'^maps/', include('maps.urls', namespace='maps')),\n # blog\n url(r'^news/', include('blog.urls', namespace='blog')),\n # ideas\n url(r'^ideas/', include('ideas.urls', namespace='ideas')),\n # django-activity-stream\n url(r'^activity/', UserActivityView.as_view()),\n # social auth\n url('', include('social.apps.django_app.urls', namespace='social')),\n # django-discussions (e.g. user messages)\n # disabled because of lack South integrity\n #url('^messages/', include('discussions.urls', namespace='messages')),\n # Basic project views - the rest is within locations\n url('^projects/', include('projects.urls', namespace='projects')),\n # Discussions (e.g. forum)\n url('^discussion/', include('topics.urls', namespace='discussion')),\n # comments\n url('^comments/', include('comments.urls', namespace='comments')),\n # admin panel\n url(r'^fuck-off-i-am-awesome/', include(admin.site.urls)),\n # Abuse reports (static)\n url(r'^report/', include('places_core.urls', namespace='reports')),\n # User media\n url(r'^gallery/', include('gallery.urls', namespace='gallery')),\n # Polls app\n url(r'^polls/', include('polls.urls', namespace='polls')),\n # Static pages\n url(r'^pages/', include('staticpages.urls', namespace='pages')),\n # media\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n # REST server\n url(r'^rest/', include(router.urls, namespace='rest')),\n # Haystack - search engine\n url(r'^search/', include('haystack.urls', namespace='search')),\n # django-messages: messages between users\n url(r'^messages/', include('postman.urls')),\n # Language support\n url(r'^i18n/setlang', set_language, name='set_language'),\n url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),\n # For robots - indexers :)\n url(r'^robots.txt$', FileServeView.as_view(filename='robots.txt')),\n # Captcha\n url(r'^captcha/', include('captcha.urls')),\n # Static Pages\n # Definition of static pages go here, you can add new ones by\n # copy-paste method.\n url(r'^home/', PageView.as_view(page='home')),\n url(r'^home-b/', PageView.as_view(page='home-b')),\n #url(r'^about/', PageView.as_view(page='about')),\n #url(r'^privacy/', PageView.as_view(page='privacy')),\n #url(r'^terms/', PageView.as_view(page='terms')),\n #url(r'^cookies/', PageView.as_view(page='cookies')),\n #url(r'^contact/', PageView.as_view(page='contact')),\n #url(r'^jobs/', PageView.as_view(page='jobs')),\n #url(r'^press/', PageView.as_view(page='press')),\n #url(r'^mission/', PageView.as_view(page='mission')),\n #url(r'^team/', PageView.as_view(page='team')),\n #url(r'^values/', PageView.as_view(page='values')),\n #url(r'^creed/', PageView.as_view(page='creed')),\n #url(r'^support/', PageView.as_view(page='support')),\n url(r'^feature/', PageView.as_view(page='feature')),\n\n # Etherpad - live collaboration tool\n url(r'^', include('etherpad.urls')),\n\n # Notifications for users - mainly views for testing.\n url(r'^', include('notifications.urls')),\n\n # Default URL - Do not add anything below!!!\n #url(r'^$', PageView.as_view(page='home')),\n url(r'^$', HomeView.as_view()),\n\n #url(r'^$', staticpages.views.HomeView.as_view()),\n url(r'^', include('articles.urls', namespace='articles')),\n url(r'^', include('locations.urls', namespace='locations')),\n)\n\nurlpatterns += patterns('django.contrib.sitemaps.views',\n (r'^sitemap\\.xml$', 'index', {'sitemaps': sitemaps}),\n (r'^sitemap-(?P
.+)\\.xml$', 'sitemap', {'sitemaps': sitemaps}),\n)\n","repo_name":"14mmm/CivilHub","sub_path":"places/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"73106891449","text":"\"\"\"\nscatter plot and line plot samples\n\"\"\"\n\n#character color\n#b blue\n#g green\n#r red\n#c cyan\n#m magenta\n#y yellow\n#k black\n#w white\n\n#line style\n#plot(x,y, '--')\n#Other linestyles you can use can be found on the Matplotlib webpage http://\n#matplotlib.sourceforge.net/api/pyplot api.html#matplotlib.pyplot.plot.\n\n#marker types:\n#'s' square marker\n#'p' pentagon marker\n#'*' star marker\n#'h' hexagon1 marker\n#'H' hexagon2 marker\n#'+' plus marker\n#'x' x marker\n#'D' diamond marker\n#'d' thin diamond marker\n#'o' circle marker\n\n#****************************************************\n# scatterplot.py\nimport numpy as np\nimport pylab as pl\n# Make an array of x values\nx = [1, 2, 3, 4, 5]\n# Make an array of y values for each x value\ny = [1, 4, 9, 16, 25]\n# use pylab to plot x and y as \"r\" red \"D\" diamonds \"--\" dashed line\npl.plot(x, y, 'rD--')\n#plot limits\npl.xlim(-2, 10)\npl.ylim(0, 27)\n\n# show the plot on the screen\npl.xlabel('x-axis')\npl.ylabel('y-axis')\npl.title('Title Plot')\npl.show()\n\n#****************************************************\n\n# lineplotFigLegend.py\nimport numpy as np\nimport pylab as pl\n# Make x, y arrays for each graph\nx1 = [1, 2, 3, 4, 5]\ny1 = [1, 4, 9, 16, 25]\nx2 = [1, 2, 4, 6, 8]\ny2 = [2, 4, 8, 12, 16]\n# use pylab to plot x and y : Give your plots names\nplot1 = pl.plot(x1, y1, 'r')\nplot2 = pl.plot(x2, y2, 'go')\n# give plot a title\npl.title('Plot of y vs. x')\n# make axis labels\npl.xlabel('x axis')\npl.ylabel('y axis')\n# set axis limits\npl.xlim(0.0, 9.0)\npl.ylim(0.0, 30.)\n# make legend\npl.legend([plot1, plot2], ('red line', 'green circles'), 'best', numpoints=1)\n# show the plot on the screen\npl.show()\n","repo_name":"lawrencl/PythonPlotting","sub_path":"scatter_line_plot.py","file_name":"scatter_line_plot.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37284989182","text":"def solution(s) :\n answer = len(s)\n length = len(s) #문자열 길이 저장 \n count = 0 # 카운트\n equ = '' # 비교 변수\n string = '' # 문자열 이어 붙일 변수\n for i in range(1,int(length/2)+1,1) : #최대 반복이 문자열 길이 절반일테니까\n string=''\n for j in range(0,length,i) : # 반복\n if j==0 :\n equ = s[j:j+i] #처음 값\n if s[j:j+i] == equ : #같으면 +1\n count+=1\n else : # 카운트 개수가 한개 이상이라면 문자열 생성 이어붙이기 없으면 그냥 기본만\n if count>1 :\n string+=str(count)+equ\n else :\n string+=equ\n equ=s[j:j+i]\n count=1\n if count>1 :\n string+=str(count)+equ\n else :\n string+=equ\n count=0\n if answer>len(string) :\n answer=len(string)\n print(answer)\n \nsolution(\"abcabcabcabcdededededede\")\n","repo_name":"supungbab/algorithm","sub_path":"13_Week/StringZip.py","file_name":"StringZip.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33659173043","text":"# -*- coding: utf-8 -*-\n\nimport socket, random\n\n# numero randomico para simular dados do oximetro\nrnd = random.randint(80,98)\n\n#18.188.134.59\ndef client(host = 'localhost', port=8080): \n # Create a TCP/IP socket \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect the socket to the server \n server_address = (host, port) \n print (\"Conectando ao endereco %s:%s\" % server_address) \n sock.connect(server_address)\n \n # Send data \n try: \n while True:\n # Send data \n message = \"Saturacao sanguinea =\"\n i = 0\n if i == 0:\n value = rnd\n dif = random.randint(-1,1)\n value = value + dif\n print (\"Enviando o dado: %s\" %message, value, \"%\")\n message = \"Cliente1,Saturacao Sanguinea,\"+str(value)\n sock.sendall(message.encode('utf-8')) \n # Look for the response \n # amount_received = 0 \n # amount_expected = len(message) \n # while amount_received < amount_expected: \n data = sock.recv(254) \n # amount_received += len(data) \n # print (\"Received: %s\" %data)\n i+=1\n if not data:\n break\n \n except socket.error as e: \n print (\"Socket error: %s\" %str(e)) \n \n except Exception as e: \n print (\"Other exception: %s\" %str(e)) \n \n finally: \n sock.close() \n print (\"Fechando a conexao com o server...\")\n \nclient()","repo_name":"ramondcsilva/COVID-19-Monitoring-System","sub_path":"Server/TcpCliente1.py","file_name":"TcpCliente1.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14652612266","text":"\"\"\"Flask app for Cupcakes\"\"\"\nfrom flask import Flask, request, jsonify, render_template\nfrom models import db, connect_db, Cupcake\nfrom serializer import serialize_cupcake\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgres:///cupcakes\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\nconnect_db(app)\ndb.create_all()\n\n\napp.config['SECRET_KEY'] = 'apoisdhfpaiosdhfpioasd'\n\n\n@app.route('/api/cupcakes')\ndef get_cupcakes():\n cupcakes = Cupcake.query.all()\n serialized = [Cupcake.serialize(c) for c in cupcakes]\n\n return (jsonify(cupcakes=serialized), 200)\n\n\n@app.route('/api/cupcakes/')\ndef get_cupcake(id):\n cupcake = Cupcake.query.get(id)\n\n if cupcake:\n serialized = Cupcake.serialize(cupcake)\n return (jsonify(cupcake=serialized), 200)\n else:\n return (jsonify(error=\"Cupcake doesn't exist.\"), 404)\n\n\n@app.route('/api/cupcakes', methods=['POST'])\ndef add_cupcake():\n print(\"****************** IN POST ********\", request.json)\n flavor = request.json.get('flavor')\n size = request.json.get('size')\n rating = request.json.get('rating')\n image = request.json.get('image') or None\n\n\n new_cupcake = Cupcake(\n flavor=flavor,\n size=size,\n rating=rating,\n image=image)\n\n db.session.add(new_cupcake)\n db.session.commit()\n\n serialized = Cupcake.serialize(new_cupcake)\n\n return (jsonify(cupcake=serialized), 201)\n\n\n@app.route('/api/cupcakes/', methods=[\"PATCH\"])\ndef update_cupcake(id):\n cupcake = Cupcake.query.get_or_404(id)\n cupcake.flavor = request.json.get('flavor')\n cupcake.size = request.json.get('size')\n cupcake.rating = request.json.get('rating')\n cupcake.image = request.json.get('image') or \"https://tinyurl.com/demo-cupcake\"\n\n db.session.commit()\n\n serialized = Cupcake.serialize(cupcake)\n\n return (jsonify(cupcake=serialized), 200)\n\n\n@app.route('/api/cupcakes/', methods=[\"DELETE\"])\ndef delete_cupcake(id):\n cupcake = Cupcake.query.get_or_404(id)\n db.session.delete(cupcake)\n db.session.commit()\n\n return (jsonify(message=\"Deleted\"), 200)\n\n\n@app.route('/')\ndef get_home_html():\n return render_template(\"index.html\")\n","repo_name":"GroverW/flask-cupcakes","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42100702388","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom constants import Constants\nfrom overrides import overrides\nfrom indicatorLightWidget import IndicatorLightWidget\n\nimport pyqtgraph as pg\n\nimport math\nimport time\n\n\"\"\"\nThis file contains the class to make pyQtGraph Bearable to work with\n\"\"\"\n\n\nclass PlotWidgetWrapper(pg.PlotWidget):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n\n super().__init__()\n\n self.plot_widget = self\n self.plot_item = self.plot_widget.getPlotItem()\n self.left_view_box = self.plot_item.getViewBox()\n\n self.left_view_box.name = \"Left\"\n self.left_view_box.sigYRangeChanged.connect(self.YaxisChanged)\n\n self.right_view_box = None\n self.curves = {}\n self.curves_label_alias = {}\n\n self.left_view_box.sigResized.connect(self.updateViews)\n\n def setBackgroundColor(self, r:int, g:int, b:int):\n self.left_view_box.setBackgroundColor((r, g, b))\n\n # Fixes some bug in pyqtgraph that makes things not show\n self.plot_item.getAxis(\"right\").setZValue(0)\n self.plot_item.getAxis(\"bottom\").setZValue(1)\n self.plot_item.getAxis(\"left\").setZValue(1)\n self.plot_item.getAxis(\"top\").setZValue(1)\n self.left_view_box.setZValue(1)\n\n def showGrid(self, showXGrid: bool, showYGrid: bool, alpha: float = 1):\n self.plot_item.showGrid(showXGrid, showYGrid, alpha)\n\n def setMouseEnabled(self, xEnabled:bool, yEnabled:bool):\n self.left_view_box.setMouseEnabled(xEnabled, yEnabled)\n\n if self.right_view_box is not None:\n self.right_view_box.setMouseEnabled(xEnabled, yEnabled)\n\n def setTitleSize(self, pointSize: str):\n self.plot_item.titleLabel.setAttr(\"size\", pointSize)\n\n def setTitleColor(self, color: str):\n self.plot_item.titleLabel.setAttr(\"color\", color)\n\n def setClipToView(self, clip_to_view):\n self.plot_item.setClipToView(clip_to_view)\n\n def setAxisLabel(self, target_axis: str, label: str):\n self.plot_item.setLabel(target_axis, label)\n\n def setAxisLabelColor(self, target_axis: str, color: str):\n args = {\"color\": color}\n self.plot_item.getAxis(target_axis).setLabel(text = self.plot_item.getAxis(target_axis).labelText, units = self.plot_item.getAxis(target_axis).labelUnits, unitPrefix = self.plot_item.getAxis(target_axis).labelUnitPrefix, **args)\n\n def setAxisTickFont(self, target_axis: str, font):\n self.plot_item.getAxis(target_axis).setTickFont(font)\n\n # def setLegendLabels(self, curves, labels):\n #\n # if self.plot_item.legend is None:\n # self.plot_item.addLegend()\n #\n # self.plot_item.legend.clear()\n # for i, curve in enumerate(curves):\n # self.plot_item.legend.addItem(curve, labels[i])\n\n def showLegend(self):\n\n if self.plot_item.legend is None:\n self.plot_item.addLegend()\n\n self.plot_item.legend.clear()\n for curve_label in self.curves:\n if type(self.curves[curve_label]) is not pg.InfiniteLine:\n self.plot_item.legend.addItem(self.curves[curve_label], self.curves_label_alias.get(curve_label, curve_label))\n\n self.plot_item.legend.setOffset((5, 5))\n\n def removeLegend(self):\n self.plot_item.legend.clear()\n\n def setLegendColors(self, backgroundRGBA: [int], borderRGBA: [int]):\n if self.plot_item.legend is None:\n self.plot_item.addLegend()\n\n self.plot_item.legend.setBrush(\n pg.mkBrush(backgroundRGBA[0], backgroundRGBA[1], backgroundRGBA[2], backgroundRGBA[3]))\n\n self.plot_item.legend.setBrush(\n pg.mkPen(borderRGBA[0], borderRGBA[1], borderRGBA[2], borderRGBA[3]))\n\n def setLegendFontColor(self, r:int, g:int, b:int):\n if self.plot_item.legend is None:\n self.plot_item.addLegend()\n\n self.plot_item.legend.setLabelTextColor((r,g,b))\n\n def setLegendTextSize(self, size):\n if self.plot_item.legend is None:\n self.plot_item.addLegend()\n\n # This is so stupid I cant with this library\n self.plot_item.legend.setLabelTextSize(str(size) + \"pt\")\n\n def addRightAxis(self):\n self.right_view_box = pg.ViewBox()\n self.right_view_box.name = \"Right\"\n\n self.right_view_box.sigYRangeChanged.connect(self.YaxisChanged)\n self.plot_item.showAxis(\"right\")\n self.setAxisLabel(\"right\", \" \")\n self.plot_item.scene().addItem(self.right_view_box)\n self.plot_item.getAxis('right').linkToView(self.right_view_box)\n self.right_view_box.setZValue(0)\n self.right_view_box.setMouseEnabled(False, False)\n\n self.right_view_box.setGeometry(self.left_view_box.sceneBoundingRect())\n self.right_view_box.linkedViewChanged(self.left_view_box, self.right_view_box.XAxis)\n\n def hideRightAxis(self):\n\n # TODO: Makee sure that the signal was disconnected when deleted\n #self.right_view_box.sigYRangeChanged.disco\n\n self.setAxisLabel(\"right\", \"\")\n self.plot_item.hideAxis(\"right\")\n self.plot_item.scene().removeItem(self.right_view_box)\n self.plot_item.getAxis(\"right\").unlinkFromView()\n\n self.right_view_box.deleteLater()\n del self.right_view_box\n\n self.right_view_box = None\n\n def addCurve(self, label: str, color: QColor, width: int = 4, axis: str = \"left\"):\n self.curves[label] = pg.PlotCurveItem()\n\n self.curves[label].setPen(pg.mkPen(color, width = width))\n\n if axis == \"left\":\n self.plot_item.addItem(self.curves[label])\n elif axis == \"right\":\n self.right_view_box.addItem(self.curves[label])\n\n return self.curves[label]\n\n def addInfiniteLineCurve(self, label: str, color:QColor , val, angle, width:int = 4, axis: str = \"left\"):\n self.curves[label] = pg.InfiniteLine(pos=val, angle=angle)\n\n self.curves[label].setPen(pg.mkPen(color, width = width))\n\n if axis == \"left\":\n self.plot_item.addItem(self.curves[label])\n elif axis == \"right\":\n self.right_view_box.addItem(self.curves[label])\n\n return self.curves[label]\n\n def addCurveLabelAlias(self, curve_label:str, alias: str):\n\n if curve_label in self.curves:\n self.curves_label_alias[curve_label] = alias\n else:\n print(\"Curve label not found! Alias not set\")\n\n def updateViews(self):\n if self.right_view_box is not None:\n self.right_view_box.setGeometry(self.left_view_box.sceneBoundingRect())\n self.right_view_box.linkedViewChanged(self.left_view_box, self.right_view_box.XAxis)\n\n def clearLegend(self):\n self.plot_item.legend.clear()\n\n def removeAllCurves(self):\n\n for child in self.left_view_box.allChildren():\n if type(child) == pg.PlotCurveItem or type(child) == pg.InfiniteLine:\n self.left_view_box.removeItem(child)\n\n if self.right_view_box is not None:\n for child in self.right_view_box.allChildren():\n if type(child) == pg.PlotCurveItem or type(child) == pg.InfiniteLine:\n self.right_view_box.removeItem(child)\n\n self.clearLegend()\n self.curves = {}\n self.curves_label_alias = {}\n\n @pyqtSlot(object, object)\n def YaxisChanged(self, viewbox, bounds):\n #print(\"Called\")\n # print(a.name)\n # print(b[1])\n # print(a.state['viewRange'][1])\n pass\n\n \"\"\"\n I Hope it is well know that I fucking hate pyqtgraph. Hours have been spent doing the stupidest simplest shit. Down\n here shall lie tons of comments on randoms things that seems helpful to do that can be implemented at a later date\n \"\"\"\n\n \"\"\"\n prevent scrolling\n \"\"\"\n #self.right_view_box.installEventFilter(self)\n\n # def eventFilter(self, watched, event):\n # if event.type() == QEvent.GraphicsSceneWheel:\n # return True\n # return super().eventFilter(watched, event)\n\n \"\"\"\n Add event action to right click menu\n \"\"\"\n # self.testAct = QAction(\"Test\", self.plot2.left_view_box.menu)\n # self.testAct.triggered.connect(self.pleaseWorkOmg)\n # self.plot2.left_view_box.menu.addAction(self.testAct)\n\n \"\"\"\n Add in button on plot window\n \"\"\"\n # button2 = pg.ButtonItem(imageFile=pg.icons.getGraphPixmap('auto'), width= 14, parentItem=self.plot2.plot_item, pixmap=None)\n\n \"\"\"\n How to add a curve properly\n \"\"\"\n\n #curve = pg.PlotCurveItem()\n # self.left.addItem(curve)\n # curve.setData(x = np.array([0, 1]), y = np.array([10, 20]))\n # curve.setPen(\n # pg.mkPen(color='r',width = 2))\n #\n # curve2 = pg.PlotCurveItem()\n # #self.left.addItem(curve)\n # curve2.setData(x=np.array([0, 1]), y=np.array([10, 35]))\n # curve2.setPen(\n # pg.mkPen(color='r', width=2))\n # #self.left.legend.addItem(curve, 'teset')\n #\n # self.plot2.addCurve(\"Garbage\")\n # self.plot2.addCurveLabelAlias(\"Garbage\", \"Yo Boss\")\n # self.plot2.curves[\"Garbage\"].setData(x = np.array([0, 1]), y = np.array([0, 15]))\n # self.plot2.curves[\"Garbage\"].setPen(pg.mkPen(color=QtGui.QColor(self.colors[0].color()),width = 2))\n # #self.plot2.right_view_box.addItem(curve)\n # #self.plot2.right_view_box.addItem(curve2)\n","repo_name":"masa-umich/Ground-Support-Software","sub_path":"Python/plotWidgetWrapper.py","file_name":"plotWidgetWrapper.py","file_ext":"py","file_size_in_byte":9325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15271468339","text":"import pickle\nfrom itertools import islice\n\nimport boto3\nfrom PIL import Image, ExifTags\nimport logging\nimport numpy as np\nimport cv2\nimport json\nfrom mtcnn.mtcnn import MTCNN\nfrom numpy import asarray\n\n\ndef set_logger(log_path):\n \"\"\"Sets the logger to log info in terminal and file `log_path`.\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path, mode='a')\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n\ndef rotate_image(img):\n try:\n image = Image.open(img)\n for orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n break\n exif = dict(image._getexif().items())\n\n if exif[orientation] == 3:\n image = image.rotate(180, expand=True)\n elif exif[orientation] == 6:\n image = image.rotate(270, expand=True)\n elif exif[orientation] == 8:\n image = image.rotate(90, expand=True)\n return image\n except (AttributeError, KeyError, IndexError):\n image = Image.open(img)\n return image\n\n\ndef get_partition_label():\n with open('data/label.json') as f:\n label = json.load(f)\n\n with open('data/partition.json') as f:\n partition = json.load(f)\n return partition, label\n\n\ndef data_generation(user_id_list_temp, label, args):\n s3 = boto3.client('s3')\n # Generates data containing batch_size samples\n data_len = len(user_id_list_temp)\n x = np.empty((data_len, args.frame_size, args.image_size, args.image_size, 3))\n y = np.empty((data_len, 2), dtype=int)\n resized_img = np.empty((args.frame_size, args.image_size, args.image_size, 3))\n mask = np.empty((data_len, args.frame_size), dtype=int)\n\n # Generate data\n for i, user_id in enumerate(user_id_list_temp):\n # rename error\n y[i] = label[str(user_id)]\n user_id = str(user_id)\n if user_id[:3] == '999' and not (user_id == '9993' or user_id == '9996'):\n user_id = '#' + user_id[3:]\n\n response = s3.get_object(Bucket='cureskin-dataset', Key='new_data/image_{}.pkl'.format(user_id))\n body = response['Body'].read()\n img_frame = pickle.loads(body)\n\n for j, img in enumerate(img_frame):\n resized_img[j, ] = cv2.resize(img, dsize=(args.image_size, args.image_size), interpolation=cv2.INTER_LINEAR)\n del img_frame\n resized_img /= 255\n # (batch, frame, size, size, channel)\n x[i, ] = resized_img\n img_mask = np.all((resized_img == 0), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.logical_not(img_mask)\n mask[i, ] = img_mask\n\n for x, mask, y in zip(x, mask, y):\n yield {'img': x, 'mask': mask}, y\n\n\ndef pre_process(img_frame, args):\n x = np.empty((1, args.frame_size, args.image_size, args.image_size, 3))\n mask = np.empty((1, args.frame_size), dtype=int)\n\n x[0, ] = img_frame\n img_mask = np.all((img_frame == 0), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.logical_not(img_mask)\n mask[0, ] = img_mask\n\n return x, mask\n\n\ndef pre_process_post(img_frame, args):\n face_img_frame = np.empty((args.frame_size, args.image_size, args.image_size, 3))\n x = np.empty((1, args.frame_size, args.image_size, args.image_size, 3))\n mask = np.empty((1, args.frame_size), dtype=int)\n detector = MTCNN()\n\n for i, img in islice(enumerate(img_frame), 0, args.frame_size):\n img = Image.fromarray((img * 255).astype(np.uint8))\n img = img.convert('RGB')\n img = asarray(img)\n results = detector.detect_faces(img)\n\n if len(results) == 0:\n face_img_frame[i, ] = np.zeros((args.image_size, args.image_size, 3))\n else:\n x1, y1, width, height = results[0]['box']\n x1, y1 = abs(x1), abs(y1)\n x2, y2 = x1 + width, y1 + height\n face = img[y1:y2, x1:x2]\n face = Image.fromarray(face)\n face = face.resize((args.image_size, args.image_size))\n print(face.shape)\n face_img_frame[i, ] = face\n face_img_frame /= 255\n\n x[0, ] = face_img_frame\n img_mask = np.all((face_img_frame == 0), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.all((img_mask == True), axis=1)\n img_mask = np.logical_not(img_mask)\n mask[0, ] = img_mask\n\n return x, mask\n\n\ndef detect_face(img, args):\n detector = MTCNN()\n img = Image.fromarray(img.astype(np.uint8))\n img = img.convert('RGB')\n img = asarray(img)\n results = detector.detect_faces(img)\n if len(results) == 0:\n return np.zeros((args.image_size, args.image_size, 3))\n else:\n x1, y1, width, height = results[0]['box']\n x1, y1 = abs(x1), abs(y1)\n x2, y2 = x1 + width, y1 + height\n face = img[y1:y2, x1:x2]\n face = Image.fromarray(face)\n face = face.resize((args.image_size, args.image_size))\n return face\n","repo_name":"tedyap/SkinCredible","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36401316113","text":"import csv\nimport os\nimport sys\nimport tkinter as tk\nfrom tkinter import PhotoImage, filedialog, messagebox\nfrom tkinter import ttk\n\nimport psycopg2\n\nconn = psycopg2.connect(\n host=\"localhost\",\n database=\"Dhenusya\",\n user=\"postgres\",\n password=\"96528728\")\npgcur=conn.cursor()\ndef upload():\n filenames = []\n for filepath in filedialog.askopenfilenames():\n filename = os.path.basename(filepath)\n filenames.append(filename)\n \n with open(filepath, \"r\") as file:\n reader = csv.reader(file)\n headers = next(reader)\n\n min_date=min([row[0] for row in reader])\n # first_column_data = []\n # for row in reader:\n # first_column_data.append(row[0])\n # min_date = min(first_column_data)\n \n print(f\"date for where candition:{min_date}\")\n\n print(filename)\n # Add a new column to the dataframe with the filename\n check_sql = f\"SELECT COUNT(*) FROM do_f_uploaded_csv_files WHERE filename = '{filename}';\"\n pgcur.execute(check_sql)\n count = pgcur.fetchone()[0]\n if count > 0:\n # messagebox.showerror(\"Error\", \"File has already been uploaded, Do you want to over ride.\")\n choice = messagebox.askyesno(f\"Error\", \"File has already been uploaded, do you want to override?.\\n{}\\nClick Yes To Continue !\".format(filename))\n if choice == True:\n Delete_sql = f\"Delete from do_f_daily_milk_tracker where date ='{min_date}'\"\n print(Delete_sql) \n pgcur.execute(Delete_sql)\n messagebox.showwarning(f\"Sucess\",\"Previous CSV's file \\n {} Data Deleted\".format(filename))\n\n copy_sql = f\"COPY do_f_daily_milk_tracker FROM '{filepath}' DELIMITER ',' CSV HEADER;\"\n pgcur.execute(copy_sql)\n messagebox.showinfo(f\"Sucess\",\"CSV's Files Uploaded to DataBase\")\n\n insert_sql = f\"INSERT INTO do_f_uploaded_csv_files (filename) VALUES ('{filename}');\"\n pgcur.execute(insert_sql)\n conn.commit()\n messagebox.showinfo(f\"Sucess\",\"Filenames Are inserted\")\n else:\n continue\n else:\n copy_sql = f\"COPY do_f_daily_milk_tracker FROM '{filepath}' DELIMITER ',' CSV HEADER;\"\n pgcur.execute(copy_sql)\n messagebox.showinfo(f\"Sucess\",\"CSV's Files Uploaded to DataBase\")\n insert_sql = f\"INSERT INTO do_f_uploaded_csv_files (filename) VALUES ('{filename}');\"\n pgcur.execute(insert_sql)\n conn.commit()\n messagebox.showinfo(f\"Sucess\",\"Filenames Are inserted\") \n pgcur.execute(\"SELECT column_name FROM information_schema.columns WHERE table_name = 'do_f_daily_milk_tracker' ORDER BY ordinal_position\")\n columns = [col[0] for col in pgcur.fetchall()]\n\n if set(headers) != set(columns):\n print(\"The columns in the CSV file do not match the columns in the database.\")\n else:\n print(\"The columns match.\")\n # Print the count and columns of the CSV\n print(\"CSV file columns: {} ({} columns)\".format(headers, len(headers)))\n\n # Print the count and columns of the database\n print(\"Database columns: {} ({} columns)\".format(columns, len(columns)))\n \n return \n# Create a Tkinter buttons\nroot = tk.Tk()\nroot.geometry(\"600x800\") \nroot.title(\"Dhenusya Organics\") \n\nlabel = tk.Label(root, text=\"Welcome to Dhenusya Organics UI design!\")\nlabel.pack()\n\nbackground_image = PhotoImage(file=\"D:/Python/Dhenusya.png\")\nimg =background_image.zoom(int(round(2)), int(round(2)))\nbackground_label = tk.Label(root, image=img)\n\n# Create a label to display the image\nlabel = ttk.Label(root, image=img)\nbackground_label.pack(fill=\"both\", expand=True, side=tk.BOTTOM, anchor=\"sw\")\n\n\n# upload csv button\nbutton1 = tk.Button(root, command=upload, text=\"Daily CSV Upload!!\", bg=\"Deep Sky Blue\")\nbutton1.place(x=50, y=30)\nroot.mainloop()","repo_name":"TeluguSuresh/DhenusyaOrganics-UI_Design-","sub_path":"Upload_CSV.py","file_name":"Upload_CSV.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46825058046","text":"import streamlit as st\r\nimport pandas as pd\r\n\r\n# Diseño personalizado\r\ndf = pd.read_csv('hurto_a_persona_transporte_publico.csv')\r\n\r\n#st.table(df.loc[:, \"estado_civil\"].unique())\r\n\r\nestadoCivil = df.loc[:,\"cantidad\"].unique()\r\n\r\noption = st.selectbox('Fecha Hecho',estadoCivil)\r\n\r\nst.header(option)\r\n\r\nfiltroEstadoCivil = df[df[\"cantidad\"] == option]\r\n\r\nst.write(filtroEstadoCivil.describe())","repo_name":"JDCO123/Proyecto_Integrador","sub_path":"pages/2_Aplicación 1.py","file_name":"2_Aplicación 1.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72720612409","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n list1 = []\n list2 = []\n while l1:\n list1.append(l1.val)\n l1 = l1.next\n while l2:\n list2.append(l2.val)\n l2 = l2.next\n\n merged_list = list1 + list2\n merged_list = sorted(merged_list)\n result = self.list_to_link(merged_list)\n return result\n\n @staticmethod\n def list_to_link(source_list):\n head = None\n point = None\n for item in source_list:\n if not head:\n head = ListNode(item)\n point = head\n else:\n n = ListNode(item)\n point.next = n\n point = n\n\n return head\n\n\nif __name__ == '__main__':\n solution = Solution()\n l1 = solution.list_to_link([1, 2, 4])\n l2 = solution.list_to_link([1, 3, 4])\n ml = solution.mergeTwoLists(l1, l2)\n while ml:\n print(ml.val,)\n ml = ml.next\n","repo_name":"xiezhedaima9591/leetcode_answer","sub_path":"easy/merge_two_sorted_linked_list.py","file_name":"merge_two_sorted_linked_list.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31825470050","text":"from setuptools import setup\n\nversion = '0.1.0'\n\nwith open('README.md') as readme:\n long_desc = readme.read()\n\nsetup(\n name='pytorch-unet',\n description='PyTorch Implementation of U-Net',\n author='Shuo Han',\n author_email='shan50@jhu.edu',\n version=version,\n packages=['pytorch_unet'],\n license='GPLv3',\n python_requires='>=3.7.10',\n long_description=long_desc,\n install_requires=[\n 'torch>=1.8.1'\n ],\n long_description_content_type='text/markdown',\n url='https://github.com/shuohan/pytorch-unet.git',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent'\n ]\n)\n","repo_name":"shuohan/pytorch-unet","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17221602622","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# ---------------------------------------------------------------------------\n# Author: LiaoChenchen\n# Created on: 2020/4/21 15:18\n# Reference:\n\"\"\"\nDescription:\nUsage:\n\"\"\"\n# ---------------------------------------------------------------------------\nimport cmath\nimport math\nimport sys\n\n\ndef get_float(msg, allow_zero):\n\tx = None\n\twhile x is None:\n\t\ttry:\n\t\t\tx = float(raw_input(msg))\n\t\t\tif not allow_zero and abs(x) < sys.float_info.epsilon:\n\t\t\t# 在python中float是双精度,精度不够,在比较时容易出错,所以需要用函数sys.float_info.epsilon\n\t\t\t# sys.float_info.epsilon代表无限接近 0,是机器可以区分出的两个浮点数的最小区别\n\t\t\t\tprint(u'不允许为0')\n\t\t\t\tx = None\n\t\texcept ValueError as err:\n\t\t\tprint(err)\n\treturn x\n\ndef main():\n\ta = get_float('enter a: ',False)\n\tb = get_float('enter b: ',True)\n\tc = get_float('enter c: ',True)\n\t\n\tx1 = None\n\tx2 = None\n\tdiscriminant = (b**2)-(4*a*c)\n\tif discriminant == 0:\n\t\tx1 = -(b/(2*a))\n\telse:\n\t\tif discriminant >0:\n\t\t\troot = math.sqrt(discriminant)\n\t\telse:\n\t\t\troot = cmath.sqrt(discriminant)\n\t\tx1 = (-b+root)/(2*a)\n\t\tx2 = (-b-root)/(2*a)\n\tequation = (\"{0}x+{1}x+{2}=0\"\n\t\t\t\t\" x={3}\").format(a,b,c,x1)\n\t# \\N{RIGHTWARDS ARROW} 代表显示一个箭头标识(→)\n\t# \\N{SUPERSCRIPT TWO}\n\t\n\tif x2 is not None:\n\t\tequation +=' or x={0}'.format(x2)\n\tprint(equation)\n\t\nmain()\n# print \"{:.2f}\".format(3.1415926)","repo_name":"hygnic/Gispot","sub_path":"gispot/test/解二元一次方程.py","file_name":"解二元一次方程.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1433268945","text":"import pypyodbc\nimport os\nif os.path.isfile(\"D:\\workplace_python\\FinnerIDNRHest\\Hestmangleridnr.txt\"):\n print(\"D:\\workplace_python\\FinnerIDNRHest\\Hestmangleridnr.txt\",\"blir slettet\")\n os.remove(\"D:\\workplace_python\\FinnerIDNRHest\\Hestmangleridnr.txt\")\ncon = pypyodbc.connect('Driver={SQL Server};' 'Server=DESKTOP-0CMQN56;''Database=DWtrav;''user=Gunnar;' 'Pwd=gunnar')\nif con:\n print(\"ja vi har forbindelse Hovedprorgam Hester mangler idnr\")\n# SQL = '''\\ \n# SELECT Heste_navn, Navn, Id_nr\n# FROM [Trav].[dbo].[StartLister]\n# where Heste_navn <> Navn\n# '''\nSQL = '''\\\nSELECT Heste_navn,Bane, Dato\n FROM [Trav].[dbo].[StartLister]\n Where id_nr IS NULL\n''' \ncursor = con.cursor()\ncursor.execute(SQL)\nfor a in cursor:\n Hestenavn = a[0]\n Bane = a[1] \n Dato = a[2]\n with open('Hestmangleridnr.txt', 'a') as f:\n f.write(\"%s;%s;%s\" % \\\n (Hestenavn,Bane,Dato))\n f.write('\\n')\ncursor.close() ","repo_name":"andy99/FinnBoligV3.py","sub_path":"FinnerIDNRHest/HovedprogrammidnrHest.py","file_name":"HovedprogrammidnrHest.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12959163053","text":"from django.contrib import admin\nfrom . import models, views\nfrom django.urls import path, include\n\nurlpatterns = [\n path('', views.index, name = \"index\"),\n path('home',views.home, name= 'home'),\n path('blog', views.blog, name = 'blog'),\n path('register', views.register , name = 'register'),\n path('notes', views.addNote, name= 'note'),\n path('subject', views.addSubject, name= 'subject'),\n path('level', views.addLevel, name= 'level'),\n path('ajanx/load-subject/', views.load_subject, name='ajax_load_subject')\n # path('^activate/(?p[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}''-[0-9A-Za-z]{1,20}))/$', views.activate, name = 'activate')\n\n]","repo_name":"Casper94/SampleBlog","sub_path":"SampleBlog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25805936047","text":"import requests\nimport csv\nimport json\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util.retry import Retry\n\ndef get_page_text(url :str) -> str:\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n response = session.get(url)\n return response.text\n\ndef get_book_page_link(title: str) -> str:\n request_link = f'https://www.goodreads.com/search?q={title}'\n soup = BeautifulSoup(get_page_text(request_link), features=\"html.parser\")\n table = soup.find('table', class_ = 'tableList')\n while(table == None):\n print(\"failed to find book link, will retry\")\n soup = BeautifulSoup(get_page_text(request_link), features=\"html.parser\")\n table = soup.find('table', class_ = 'tableList')\n sleep(1)\n book_title = table.find('a', class_=\"bookTitle\")\n link = 'https://www.goodreads.com' + book_title['href']\n return link\n\ndef get_book_quotes_link(ref: str) -> str:\n text = get_page_text(ref)\n soup = BeautifulSoup(text, features=\"html.parser\")\n discussion_card = (soup.find('a', class_ = 'DiscussionCard'))\n try_number = 0\n while(discussion_card == None):\n print(\"failed to find quote link, will retry\")\n text = get_page_text(ref)\n soup = BeautifulSoup(text, features=\"html.parser\")\n discussion_card = (soup.find('a', class_ = 'DiscussionCard'))\n sleep(1)\n link = discussion_card['href']\n return link\n\ndef get_book_id(ref: str) -> str:\n index = ref.find('show/')\n index += len('show/')\n dot_index = ref.find('.', index, len(ref) - 1)\n return ref[index:dot_index]\n\ndef read_book_list(filename :str):\n books = []\n with open(filename, newline='',encoding=\"utf8\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n book = []\n book.append(row['book_title'])\n book.append(row['author'])\n books.append(book)\n\n return books\n\ndef get_book_quotes(ref :str, max_num_of_quotes = 1) -> list[str]:\n soup = BeautifulSoup(get_page_text(ref), features=\"html.parser\")\n current_page_quotes = soup.findAll('div', class_ = 'quotes')\n quotes = []\n #for some reason on quotes page exist two quotes objects\n #first contains first quote, second contains others\n if len(current_page_quotes) <= 1:\n return quotes\n\n first_quote = current_page_quotes[0].find('div', class_= 'quoteText')\n quotes.append(first_quote.text.strip())\n other_quotes = current_page_quotes[1].findAll('div', class_= 'quoteText')\n for quote in other_quotes:\n raw_text = str(quote.text.strip())\n quote_text = raw_text.split('\\n')[0]\n quotes.append(quote_text)\n \n return quotes\n\ndef get_book_info(book_name :str, author_name :str):\n book_page_link = get_book_page_link(book_name)\n book_quotes_link = get_book_quotes_link(book_page_link)\n print(book_page_link, \" \", book_quotes_link)\n book_id = get_book_id(book_page_link)\n book_quotes = get_book_quotes(book_quotes_link)\n dictionary = {\n \"book_name\" : book_name,\n \"author_name\" : author_name,\n \"book_id\" : book_id,\n \"goodreads_link\" : book_page_link,\n \"goodreads_guotes_link\" : book_quotes_link,\n \"book_quotes\" : book_quotes\n }\n return dictionary\n\ndef main():\n\n books_list = read_book_list(\"mybooks.csv\")\n outfile = open(\"books.json\", \"w\") \n books = []\n for book in books_list:\n book_info = get_book_info(book[0], book[1])\n json_object = json.dumps(book_info, indent=4)\n books.append(json_object)\n\n books_dict = {\n \"books\" : books\n }\n json_object = json.dumps(books_dict, indent=4)\n outfile.write(json_object)\n outfile.close()\n return\n\nif __name__ == '__main__':\n main()\n\n# DiscussionCard - class of the ref to the quotes page\n# https://www.goodreads.com/search?q={insert here name of the book from the list} - link for the search","repo_name":"DaurKas/goodreads_quote_scrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10294411504","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 09.07.2010\r\n\r\n@author: Foobar\r\n'''\r\nimport threading,time\r\n\r\nclass Scheduler(threading.Thread):\r\n def __init__(self,cycle_length):\r\n threading.Thread.__init__(self)\r\n self.taskLock = threading.Lock() \r\n self.resultLock = threading.Lock()\r\n self.cycle_length = cycle_length \r\n self._tasks = {}\r\n self._taskfreq = {}\r\n self._tasksresult = {}\r\n self._taskcount = {}\r\n def AddTask(self,task,getter,freq=3):\r\n with self.taskLock:\r\n try:\r\n self._taskcount[task] = self._taskcount + 1\r\n return\r\n except:\r\n self._taskcount[task] = 1\r\n self._tasks[task] = getter\r\n self._tasksresult[task] = \"\"\r\n self._taskfreq[task] = (freq-1,freq)\r\n def GetFreq(self,task):\r\n with self.taskLock:\r\n try:\r\n res = self._taskfreq[task][1]\r\n except:\r\n res = 1\r\n return res\r\n def RemoveTask(self,task):\r\n with self.taskLock:\r\n if task in self._tasks:\r\n if self._taskcount[task] == 1:\r\n with self.resultLock:\r\n try:\r\n del self._taskresult[task]\r\n except:\r\n pass\r\n del self._tasks[task]\r\n del self._taskfreq[task]\r\n else:\r\n self._taskcount[task] = self._taskcount[task]-1\r\n def TotallyRemove(self,task):\r\n with self.taskLock:\r\n if task in self._tasks:\r\n with self.resultLock:\r\n try:\r\n del self._taskresult[task]\r\n except:\r\n pass\r\n del self._tasks[task]\r\n del self._taskfreq[task]\r\n def GetResult(self,task):\r\n with self.resultLock:\r\n if task in self._tasksresult:\r\n result = self._tasksresult[task]\r\n else:\r\n result =\"404\"\r\n return result\r\n def update_time(self):\r\n with self.taskLock:\r\n for k in self._taskfreq.keys():\r\n (t,f) = self._taskfreq[k]\r\n if t==0:\r\n t = f-1\r\n else:\r\n t = t-1\r\n self._taskfreq[k] = (t,f)\r\n \r\n def run(self):\r\n time_step = -1\r\n tasklist = []\r\n while True:\r\n if time_step < 0:\r\n with self.taskLock:\r\n lt = len([True for t in self._tasks if self._taskfreq[t][0]==0])\r\n if not lt:\r\n time_step = -1\r\n self.update_time()\r\n time.sleep(self.cycle_length)\r\n continue\r\n else:\r\n time_step = self.cycle_length / lt\r\n with self.taskLock:\r\n for (k,v) in self._tasks.iteritems():\r\n if self._taskfreq[k][0]==0:\r\n tasklist.append((k,v))\r\n self.update_time()\r\n else:\r\n r = 0\r\n (u,l) = tasklist[-1]\r\n tasklist = tasklist[:-1]\r\n with self.taskLock:\r\n if u in self._tasks:\r\n self._tasks[u](u,self.resultLock,self._tasksresult).start()\r\n r = 1\r\n if r==1:\r\n time.sleep(time_step)\r\n if not tasklist:\r\n time_step = -1","repo_name":"hashme33/BoardToJabber","sub_path":"src/main/Scheduler.py","file_name":"Scheduler.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73858744887","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/17 17:10\n# @Author : zsj\n# @File : array_test2.py\n# @Description:\nimport numpy as np\n\nn1 = np.arange(20).reshape(5,4)\nn4 = n1[[1,3]]\nprint(n4)\nn2 = np.arange(20,24).reshape(1,4)\nn3 = np.concatenate((n1, n2),axis=0)\nprint(n3)\n\n","repo_name":"zsjwish/AIOps","sub_path":"test/array_test2.py","file_name":"array_test2.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25052655301","text":"from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nSQLALCHEMY_DATABASE_URL = \"postgresql+asyncpg://financepal:financepal@db:5432/financepal\"\n\nengine = create_async_engine(\n SQLALCHEMY_DATABASE_URL,\n future=True\n)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine, class_=AsyncSession, future=True)\n\nBase = declarative_base()\n\nasync def get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n await db.close()","repo_name":"justdodo27/financepal","sub_path":"backend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15078767492","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nimport time\n\n\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"detach\", True)\ndriver_service = Service(executable_path=ChromeDriverManager().install())\ndriver = webdriver.Chrome(options=options, service=driver_service)\ndriver.maximize_window()\ndriver.get(\"https://rahulshettyacademy.com/dropdownsPractise/\")\n\ndriver.find_element(By.ID, \"autosuggest\").send_keys(\"ind\")\ntime.sleep(2)\n\ncountries = driver.find_elements(By.CSS_SELECTOR, \"li[class='ui-menu-item'] a\")\nprint(len(countries))\n\nfor country in countries:\n if country.text == \"India\":\n country.click()\n break\n\n#print(driver.find_element(By.ID, \"autosuggest\").text)\nprint(driver.find_element(By.ID, \"autosuggest\").get_attribute(\"value\"))\nassert driver.find_element(By.ID, \"autosuggest\").get_attribute(\"value\") == \"India\"\n","repo_name":"Aishwarya0224/Selenium_Python_Basics_Practice","sub_path":"Selenium With Python/findElementsTest.py","file_name":"findElementsTest.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43691923541","text":"from AccessControl.SecurityInfo import ClassSecurityInformation\nfrom ftw.publisher.core import getLogger\nfrom ftw.publisher.core.interfaces import IDataCollector\nfrom ftw.publisher.core.utils import IS_PLONE_5\nfrom OFS.Image import File\nfrom Products.Archetypes.Field import ComputedField\nfrom Products.Archetypes.Field import DateTimeField\nfrom Products.Archetypes.Field import FileField\nfrom zope.interface import implements\nimport base64\nimport pkg_resources\nimport StringIO\n\n\nif IS_PLONE_5:\n from plone.app.collection.field import QueryField\nelse:\n from archetypes.querywidget.field import QueryField\n\ntry:\n pkg_resources.get_distribution('plone.app.blob')\n\nexcept pkg_resources.DistributionNotFound:\n HAS_BLOBS = False\n\nelse:\n HAS_BLOBS = True\n from plone.app.blob.interfaces import IBlobWrapper\n\n\nclass FieldData(object):\n \"\"\"returns all field data\n \"\"\"\n\n implements(IDataCollector)\n logger = getLogger()\n security = ClassSecurityInformation()\n\n def __init__(self, object):\n self.object = object\n\n security.declarePrivate('getData')\n def getData(self):\n \"\"\"returns all important data\"\"\"\n return self.getFieldData()\n\n security.declarePrivate('getFieldData')\n def getFieldData(self):\n \"\"\"\n Extracts data from the object fields and creates / returns a dictionary\n with the data. Objects are converted to string.\n @return: dictionary with extracetd data\n @rtype: dict\n \"\"\"\n data = {}\n\n fields = self.object.Schema().fields()\n\n for field in fields:\n # don't serialize AT ComputedFields\n if isinstance(field, ComputedField):\n continue\n name = field.getName()\n\n value = field.getRaw(self.object)\n value = self.fieldSerialization(field, value)\n data[name] = value\n\n return data\n\n security.declarePrivate('fieldSerialization')\n def fieldSerialization(self, field, value):\n \"\"\"\n Custom serialization for fields which provide field values that are\n incompatible with json / JSON-standard.\n @param field: Field-Object from Schema\n @type field: Field\n @param value: Return-Value of the Raw-Accessor of the Field on the\n current context\n @type value: string or stream\n @return: JSON-optimized value\n @rtype: string\n \"\"\"\n\n if isinstance(field, DateTimeField) and value:\n value = str(value)\n\n elif HAS_BLOBS and IBlobWrapper.providedBy(value):\n file_ = value.getBlob().open()\n value = {'filename': value.getFilename(),\n 'data': base64.encodestring(file_.read()),\n 'type': 'blob'}\n file_.close()\n\n elif isinstance(field, FileField) and isinstance(value, File):\n tmp = StringIO.StringIO(value.data)\n tmp.seek(0)\n value = {'filename': value.filename,\n 'data': base64.encodestring(tmp.read())}\n\n elif isinstance(field, QueryField):\n query = field.getRaw(self.object)\n # Cast \"ZPublisher.HTTPRequest.record\" instance to dict\n value = [dict(item) for item in query]\n\n return value\n\n security.declarePrivate('setData')\n def setData(self, fielddata, metadata):\n \"\"\"sets all important field data\n \"\"\"\n\n # update with new values\n self.logger.info('Updating object values (UID %s)' %\n metadata['UID'])\n fields = self.object.Schema().fields()\n\n for field in fields:\n fieldname = field.getName()\n\n # do not update \"id\" field\n if fieldname == 'id':\n continue\n\n if fieldname in fielddata.keys():\n field_value = fielddata[fieldname]\n\n if field.mode == 'r':\n continue\n\n field.getMutator(self.object)(field_value)\n","repo_name":"4teamwork/ftw.publisher.core","sub_path":"ftw/publisher/core/adapters/field_data.py","file_name":"field_data.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"957464434","text":"from collections import defaultdict\n\nfrom odoo import api, fields, models\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n taxes_updeatable_from_category = fields.Boolean(default=True)\n\n @api.onchange(\"categ_id\")\n def _onchange_categ_id_set_taxes(self):\n if self.categ_id:\n self.taxes_id = [(6, 0, self.categ_id.taxes_id.ids)]\n self.supplier_taxes_id = [(6, 0, self.categ_id.supplier_taxes_id.ids)]\n\n def set_tax_from_category(self):\n records_by_categ = defaultdict(lambda: self.browse())\n for rec in self:\n records_by_categ[rec.categ_id] += rec\n for categ, records in records_by_categ.items():\n records.write(\n {\n \"taxes_id\": [(6, 0, categ.taxes_id.ids)],\n \"supplier_taxes_id\": [(6, 0, categ.supplier_taxes_id.ids)],\n }\n )\n return True\n\n @api.model_create_multi\n def create(self, vals_list):\n for vals in vals_list:\n if vals.get(\"categ_id\"):\n if \"taxes_id\" not in vals:\n categ = self.env[\"product.category\"].browse(vals[\"categ_id\"])\n vals[\"taxes_id\"] = [(6, 0, categ.taxes_id.ids)]\n if \"supplier_taxes_id\" not in vals:\n categ = self.env[\"product.category\"].browse(vals[\"categ_id\"])\n vals[\"supplier_taxes_id\"] = [(6, 0, categ.supplier_taxes_id.ids)]\n return super().create(vals_list)\n","repo_name":"xcgd/account-financial-tools","sub_path":"product_category_tax/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"26513962872","text":"# INFO\n# Jamie Churchouse, 20007137\n# Massey University, Palmerston North, NZ\n# 282772 Industrial Systems Design and Integration\n# Machine Learning Project, 2023-10-06 1800\n# \n# This file handles creating, verifying, and handling directories\n\n\n# LIBRARIES\nimport os\n\n\n# DIRECTORIES & FILEPATHS\ncwd = os.getcwd()\n\nraw = os.path.join(cwd,\"master_images\")\ncateg = os.path.join(cwd,\"categorised\")\n\ntrain = os.path.join(cwd,\"training_data\")\nmodel = os.path.join(cwd,\"models\")\ntboard = os.path.join(cwd,\"tensorboard\")\noutput = os.path.join(cwd,\"classified_images\")\n\n# Lists of required directories\ndirs_required = [categ,raw]\ndirs_required_messages = [\n f\"Direcotry required \\\"{os.path.split(categ)[1]}\\\" must contain sub-directories named with the categories and respective training images inside\",\n f\"Direcotry required \\\"{os.path.split(raw )[1]}\\\" must contain all of the images provided without any organisation\"\n ]\n\ndirs_to_create = [model,train,tboard,output]\n\n\nname_data = \"data\"\nname_labels = \"labels\"\nname_categs = \"categs\"\n\n\n# FUNCTIONS\n\n# Function to verify directory integrities\ndef Verify():\n\n fail = False\n\n for msg in dirs_required_messages:\n print(msg)\n\n for dir in dirs_to_create:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print(\"Created output directory:\\n%s\\n\" %dir)\n \n for dir in dirs_required:\n if not os.path.exists(dir):\n print(\"Missing input directory:\\n%s\\n\" %dir)\n fail = True\n\n if fail: raise Exception(\"Unable to continue - one or more required input directories are missing\")\n\n return","repo_name":"JPChurchouse/282772ISDI_MachineLearning","sub_path":"Directories.py","file_name":"Directories.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1965517839","text":"from typing import Optional, Type\n\nimport lightweaver as lw\nimport lightweaver.wittmann as witt\nimport numpy as np\n\nfrom .bc_provider import DynamicContextPromBcProvider\nfrom .compute_bc import compute_falc_bc_ctx\nfrom .j_prom_bc import UniformJPromBc\nfrom .prom_bc import PromBc\nfrom .prom_model import PromModel\nfrom .utils import default_atomic_models\n\n\nclass IsoPromModel(PromModel):\n \"\"\"\n Class for \"Iso\" prominence simulations. Iso implies isothermal and isobaric.\n\n Parameters\n ----------\n projection : str\n Whether the object is to be treated as a \"filament\" or a \"prominence\".\n temperature : float\n The temperature of the prominence [K].\n pressure : float\n The pressure of the prominence [Pa].\n thickness : float\n The thickness of the prominence [m].\n vturb : float\n The microturbulent velocity inside the prominence [m/s].\n altitude : float\n The altitude of the prominence above the solar surface [m].\n active_atoms : list of str\n The element names to make \"active\" i.e. consider in non-LTE.\n atomic_models : list of `lw.AtomicModels`, optional\n The atomic models to use, a default set will be chosen if none are\n specified.\n Nhalf_points : int, optional\n The number of points in half of the slab. Default: 45.\n Nrays : int, optional\n The number of Gauss-Legendre angular quadrature rays to use in the\n model. Default: 3. This number will need to be set higher (e.g. 10) if\n using the `ConePromBc`.\n Nthreads : int, optional\n The number of CPU threads to use when solving the radiative transfer\n equations. Default: 1.\n prd : bool, optional\n Whether to consider the effects of partial frequency redistribution.\n Default: False.\n vlos : float, optional\n The z-projected velocity to apply to the prominence model. Default:\n None, i.e. 0.\n vrad : float, optional\n The Doppler-dimming radial velocity to apply. Note that for filaments\n this is the same as `vlos` and that should be used instead. Not fully\n supported in boundary conditions yet, (i.e. you should interpolate the\n wavelength grid first). Default: None, i.e. 0.\n ctx_kwargs : dict, optional\n Extra kwargs to be passed when constructing the Context.\n BcType : Constructor for a type of PromBc, optional\n The base type to be used for constructing the boundary conditions.\n Default: UniformJPromBc.\n bc_kwargs : dict, optional\n Extra kwargs to be passed to the construction of the boundary conditions.\n bc_provider : PromBcProvider, optional\n The provider to use for computing the radiation in the boundary\n conditions. Default: `DynamicContextPromBcProvider` using an\n `lw.Context` configured to match the current model. Note that the\n default is note very performant, but is convenient for experimenting.\n When running a grid of models, consider creating a\n `TabulatedPromBcProvider` using `compute_falc_bc_ctx` and `tabulate_bc`,\n since the default performs quite a few extra RT calculations.\n \"\"\"\n def __init__(self, projection, temperature, pressure, thickness, vturb, altitude,\n active_atoms, atomic_models=None, Nhalf_points=45, Nrays=3, Nthreads=1, prd=False,\n vlos: Optional[float]=None, vrad: Optional[float]=None,\n ctx_kwargs=None, BcType: Optional[Type[PromBc]]=None, bc_kwargs=None, bc_provider=None):\n\n self.projection = projection\n if projection not in [\"prominence\", \"filament\"]:\n raise ValueError(f\"Expected projection ({projection}), to be 'prominence' or 'filament'\")\n\n self.temperature = temperature\n self.pressure = pressure\n self.pressure_cgs = pressure * 10\n self.thickness = thickness\n self.vturb = vturb\n self.altitude = altitude\n self.Nrays = Nrays\n self.prd = prd\n self.vlos = vlos\n self.vrad = vrad\n\n if projection == \"filament\" and vrad is not None and vlos is not None:\n raise ValueError(\"Cannot set both vrad and vlos for a filament. (Just set one of the two).\")\n\n if projection == \"filament\" and vrad is not None and vlos is None:\n vlos = vrad\n self.vlos = vrad\n\n if ctx_kwargs is None:\n ctx_kwargs = {}\n if BcType is None:\n BcType = UniformJPromBc\n if bc_kwargs is None:\n bc_kwargs = {}\n\n if atomic_models is None:\n atomic_models = default_atomic_models()\n\n if bc_provider is None:\n vz = None\n if projection == \"prominence\" and vrad is not None:\n vz = self.vrad\n\n ctx = compute_falc_bc_ctx(active_atoms=active_atoms, atomic_models=atomic_models,\n prd=self.prd, vz=vz, Nthreads=Nthreads)\n bc_provider = DynamicContextPromBcProvider(ctx)\n\n xmod = np.concatenate([(1e-30,), np.logspace(-8, 0, Nhalf_points)])\n z1 = thickness - xmod * 0.5 * thickness\n z2 = xmod[::-1] * 0.5 * thickness\n z = np.concatenate((z1, z2[1:]))\n temp = np.ones_like(z) * temperature\n\n # NOTE(cmo): CGS Starts Here\n eos = witt.Wittmann()\n rho = np.zeros_like(temp)\n ne = np.zeros_like(temp)\n for k in range(rho.shape[0]):\n rho[k] = eos.rho_from_pg(temperature, self.pressure_cgs)\n ne[k] = eos.pe_from_pg(temperature, self.pressure_cgs) / (witt.BK * temperature)\n nHTot = rho / (lw.CM_TO_M**3 / lw.G_TO_KG) / (lw.Amu * lw.DefaultAtomicAbundance.massPerH)\n ne /= lw.CM_TO_M**3\n # NOTE(cmo): CGS Ends Here\n\n lower_bc = BcType(projection, bc_provider, altitude, \"lower\", **bc_kwargs)\n if projection == \"prominence\":\n upper_bc : Optional[PromBc] = BcType(projection, bc_provider, altitude, \"upper\", **bc_kwargs)\n else:\n upper_bc = None\n\n vel = np.zeros_like(z) if vlos is None else np.ones_like(z) * vlos\n self.atmos = lw.Atmosphere.make_1d(lw.ScaleType.Geometric, depthScale=z,\n temperature=temp, vlos=vel,\n vturb=np.ones_like(z) * vturb,\n ne=ne,\n nHTot=nHTot,\n lowerBc=lower_bc,\n upperBc=upper_bc\n )\n self.atmos.quadrature(Nrays)\n\n self.rad_set = lw.RadiativeSet(atomic_models)\n self.rad_set.set_active(*active_atoms)\n self.eq_pops = self.rad_set.iterate_lte_ne_eq_pops(self.atmos)\n\n self.spect = self.rad_set.compute_wavelength_grid()\n self.Nthreads = Nthreads\n hprd = (self.prd and self.vlos is not None)\n if hprd and hprd not in ctx_kwargs:\n ctx_kwargs['hprd'] = hprd\n ctx = lw.Context(self.atmos, self.spect, self.eq_pops,\n Nthreads=Nthreads, conserveCharge=True,\n **ctx_kwargs\n )\n super().__init__(ctx)\n\n\n def iterate_se(self, *args, update_model_kwargs: Optional[dict]=None, **kwargs):\n if update_model_kwargs is None:\n update_model_kwargs = {}\n\n if self.prd and 'prd' not in kwargs:\n kwargs['prd'] = self.prd\n\n def update_model(self, printNow, **kwargs):\n # NOTE(cmo): Fix pressure throughout the atmosphere.\n N = (lw.DefaultAtomicAbundance.totalAbundance * self.atmos.nHTot + self.atmos.ne)\n NError = self.pressure / (lw.KBoltzmann * self.temperature) - N\n nHTotCorrection = NError / (lw.DefaultAtomicAbundance.totalAbundance\n + self.eq_pops['H'][-1] / self.atmos.nHTot)\n if printNow:\n print(f' nHTotError: {np.max(np.abs(nHTotCorrection / self.atmos.nHTot))}')\n self.atmos.ne[:] += nHTotCorrection * self.eq_pops['H'][-1] / self.atmos.nHTot\n prevnHTot = np.copy(self.atmos.nHTot)\n self.atmos.nHTot[:] += nHTotCorrection\n if np.any(self.atmos.nHTot < 0.0):\n raise lw.ConvergenceError(\"nHTot driven negative!\")\n nHTotRatio = self.atmos.nHTot / prevnHTot\n\n\n for atom in self.rad_set.activeAtoms:\n p = self.eq_pops[atom.element]\n p[...] *= nHTotRatio[None, :]\n\n # TODO(cmo): Add condition to not always re-evaluate the line profiles. Maybe on ne change?\n self.ctx.update_deps(vlos=False, background=False)\n\n return super().iterate_se(*args, update_model=update_model, update_model_kwargs=update_model_kwargs, **kwargs)\n","repo_name":"Goobley/Promweaver","sub_path":"promweaver/iso_prom.py","file_name":"iso_prom.py","file_ext":"py","file_size_in_byte":8879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27589516218","text":"import configparser\nimport RPi.GPIO as GPIO\nfrom gpiozero import Button,LED\nimport LCD1602\nimport time\nfrom evenement import Evenement\nfrom exceptions.peripheriqueException import PeripheriqueException\nglobal buttonState\nbuttonState=0\n\n\nclass Peripherique:\n global buttonState\n buttonState=0\n\n def __init__(self) -> None:\n self._LEDs = list()\n self._boutons = list()\n self._evenements = list()\n self._buzzers= list()\n self._obstacles = list()\n self._LCD1602s = list()\n \n def setup(self, fichierConfig):\n #Lecture du fichier de configuration\n config_obj = configparser.ConfigParser()\n\n try:\n config_obj.read(fichierConfig)\n boutonsParam = config_obj[\"boutons\"]\n buzzersParam = config_obj[\"Buzzer\"]\n obstaclesParam = config_obj[\"Obstacle\"]\n LEDsParam = config_obj[\"LED\"]\n\n #récupère la config des boutons\n for bouton in boutonsParam:\n self._boutons.append(Button(boutonsParam[bouton]))\n\n #récupère la config des LED \n for led in LEDsParam:\n self._LEDs.append(LED(LEDsParam[led]))\n\n #configure le temps entre 2 clics de bouton\n for bouton in self._boutons:\n bouton.hold_time = 0.5\n \n global BuzzerPin\n BuzzerPin = int(buzzersParam['Buzzer'])\n global Rpin\n Rpin = int(LEDsParam['Rpin'])\n global Gpin\n Gpin = int(LEDsParam['Gpin'])\n global BtnPin\n BtnPin = int(boutonsParam['BtnPin'])\n \n LCD1602.init(0x27, 1) # init(slave address, background light)\n LCD1602.write(0, 0, 'Bienvenu :) !')\n LCD1602.write(1, 1, 'SECU MOYA 1.0')\n time.sleep(2)\n LCD1602.clear()\n time.sleep(2)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM) # Numbers GPIOs by physical location\n GPIO.setup(int(obstaclesParam['ObstaclePin']), GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n #GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n \n GPIO.setup(BuzzerPin, GPIO.OUT)\n GPIO.output(BuzzerPin, GPIO.HIGH)\n GPIO.setup(Rpin, GPIO.OUT)\n GPIO.setup(Gpin, GPIO.OUT)\n GPIO.setup(BtnPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n #GPIO.add_event_detect(BtnPin, GPIO.RISING, callback=detect, bouncetime=200)\n\n except Exception as err:\n raise PeripheriqueException(\"Échec de la configuration des périphériques\")\n \n \n '''def detect(chn):\n global buttonState\n Led(GPIO.input(BtnPin))\n if(buttonState == 0):\n LCD1602.clear()\n LCD1602.write(0, 0, 'Systeme:')\n LCD1602.write(1, 1, 'active')\n buttonState=1\n\n elif(buttonState == 1):\n LCD1602.clear()\n LCD1602.write(0, 0, 'Systeme:')\n LCD1602.write(1, 1, 'desactive')\n buttonState=0'''\n\n @property \n def LCD1602s(self):\n return self._LCD1602s\n \n @property \n def LEDs(self):\n return self._LEDs\n \n @property \n def buzzers(self):\n return self._buzzers\n \n @property \n def boutons(self):\n return self._boutons\n\n @property\n def obstacles(self):\n return self._obstacles\n\n @property\n def evenements(self):\n return self._evenements\n\n\n \n\n def allumer_LED(self, couleur):\n self._evenements.append(Evenement(self._LEDs[couleur].pin.number, time.time()))\n self._LEDs[couleur].on()\n\n \n \n\n def on():\n GPIO.output(BuzzerPin, GPIO.LOW)\n\n def off():\n GPIO.output(BuzzerPin, GPIO.HIGH)\n\n\n\n\n def beep(x):\n on()\n time.sleep(x)\n off()\n time.sleep(x)\n\n def light(x,y):\n GPIO.output(Rpin, GPIO.HIGH)\n time.sleep(x)\n GPIO.output(Rpin, GPIO.LOW)\n time.sleep(y)\n\n def Led(x):\n if x == 0:\n GPIO.output(Rpin, 1)\n GPIO.output(Gpin, 0)\n if x == 1:\n GPIO.output(Rpin, 0)\n GPIO.output(Gpin, 1)\n \n \n","repo_name":"EnissayG/AlarmRasberryPi","sub_path":"peripheriques.py","file_name":"peripheriques.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33424153119","text":"import urllib.parse\r\nimport tabula\r\nfrom tabula.io import read_pdf\r\nimport csv\r\nimport pandas as pd\r\nimport zipfile\r\n\r\nlistaTabelas = tabula.read_pdf(\"componente_organizacional.pdf\", pages = \"114,115\")\r\ntabela1 = listaTabelas[0]\r\ntabela2 = listaTabelas[1]\r\n\r\n\r\ntabela1.columns = tabela1.iloc[0]\r\ntabela1 = tabela1['Código Descrição da categoria'].str.split(' ', expand=True, n=1)\r\ntabela1.columns = tabela1.iloc[0]\r\ntabela1 = tabela1.drop(tabela1.index[[0]])\r\ntabela2.columns = tabela2.iloc[0]\r\ntabela2 = tabela2.drop(tabela2.index[[0]])\r\n\r\n\r\n\r\ntabela1.to_csv(\"Tabela de Tipo de Demandante.csv\", index = False)\r\n\r\ntabela2.to_csv(\"Tabela de Categoria do Padrão TISS.csv\", index = False)\r\n\r\n\r\nlistaTabelas2 = tabula.read_pdf(\"componente_organizacional.pdf\", pages = \"120\", lattice = True)\r\n\r\ntabela3 = listaTabelas2[1]\r\n\r\n## usei o loop a baixo para descobrir quais eram as colunas da tabela3\r\n#for column in tabela3:\r\n# print(column)\r\n\r\ntabela3 = tabela3[['Tabela de Tipo de Solicitação','Unnamed: 0']]\r\n\r\ntabela3 = tabela3.loc[2:]\r\ntabela3.columns = tabela3.iloc[0]\r\ntabela3 = tabela3.loc[3:]\r\n\r\n\r\ntabela3.to_csv(\"Tabela de Tipo de Solicitação.csv\", index = False)\r\n\r\nz = zipfile.ZipFile('Teste_{Henrique_Franco}.zip', 'w', zipfile.ZIP_DEFLATED)\r\nz.write('Tabela de Tipo de Demandante.csv')\r\nz.write('componente_organizacional.pdf')\r\nz.write('Tabela de Tipo de Solicitação.csv')\r\nz.write('Tabela de Categoria do padrão TISS.csv')\r\nz.close()\r\n","repo_name":"henriquedalga/Testes_para_estagio","sub_path":"teste2.py","file_name":"teste2.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17001705809","text":"import os\r\nclass AkunBank:\r\n def __init__(self, no_pelanggan, nama_pelanggan, jumlah_saldo):\r\n self.nama_pelanggan = nama_pelanggan\r\n self.no_pelanggan = no_pelanggan\r\n self.__jumlah_saldo = jumlah_saldo #atribut privat\r\n \r\n def lihat_saldo(self):\r\n print(f\"saldo anda saat ini adalah : {self.__jumlah_saldo}\")\r\n\r\n def tarik_tunai(self, jumlah_uang):\r\n if jumlah_uang <= self.__jumlah_saldo:\r\n self.__jumlah_saldo -= jumlah_uang\r\n print(f\"tarik tunai senilai {jumlah_uang} berhasil\")\r\n print(f\"saldo anda saat ini adalah {self.__jumlah_saldo}\")\r\n else:\r\n print(f\"jumlah uang anda tidak mencukupi!\")\r\n\r\n def transfer(self, penerima, jumlah_uang):\r\n if jumlah_uang <= self.__jumlah_saldo:\r\n self.__jumlah_saldo -= jumlah_uang\r\n penerima.__jumlah_saldo += jumlah_uang #penambahan saldo ke rekening tujuan\r\n print(f\"transfer senilai {jumlah_uang} ke {penerima.nama_pelanggan} berhasil\")\r\n print(f\"saldo anda saat ini adalah {self.__jumlah_saldo}\")\r\n else:\r\n print(f\"jumlah uang anda tidak mencukupi!\")\r\n\r\nos.system(\"cls\")\r\nAkun1 = AkunBank(1234, \"fauzan alfa abhista\", 5000000000)\r\nAkun2 = AkunBank(2345, \"Ukraina\", 6666666666)\r\nAkun3 = AkunBank(3456, \"Elon Musk\", 9999999999)\r\nbase_akun = [Akun1, Akun2, Akun3] #list untuk data ke 3 akun\r\nprint(f\"\"\"Selamat datang di Bank Jago\r\nmohon untuk login terlebih dahulu!\"\"\")\r\nlogin_no = int(input(\"masukan no pelanggan : \")) #input no pelanggan\r\nlogin_nama = input(\"masukan nama pelanggan : \") #input nama pelanggan\r\n\r\nfor akun in base_akun:\r\n if akun.nama_pelanggan == login_nama and akun.no_pelanggan == login_no: #validasi login\r\n validasi = True\r\n while True:\r\n print(\"Selamat data di bank jago!\")\r\n print(f\"\"\"Halo {akun.nama_pelanggan}, ingin melakukan apa?\r\n1. Lihat saldo\r\n2. Tarik tunai\r\n3. Transfer saldo\r\n4. Keluar\"\"\") #menu\r\n masukan = int(input(\"Masukkan nomor input: \"))\r\n if (masukan == 1):\r\n akun.lihat_saldo()\r\n elif(masukan == 2):\r\n jumlah_tarik = int(input(\"masukan jumlah tarik tunai : \"))\r\n akun.tarik_tunai(jumlah_tarik)\r\n elif(masukan == 3):\r\n jumlah_transfer = int(input(\"masukan jumlah uang yang ingin ditranfer : \"))\r\n no_tujuan = int(input(\"masukan no pelanggan tujuan : \"))\r\n for akun_tujuan in base_akun: #perulangan for untuk mengetahui akun tujuan\r\n if akun_tujuan.no_pelanggan == no_tujuan:\r\n akun.transfer(akun_tujuan, jumlah_transfer)\r\n elif(masukan == 4):\r\n break\r\n break\r\n# else:\r\n# validasi = False\r\n ","repo_name":"FzanAlfa/prak-pbo","sub_path":"tugas3/121140217_Fauzan alfa abhista_Prak3_No2.py","file_name":"121140217_Fauzan alfa abhista_Prak3_No2.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24447555985","text":"\n#%%\nimport pandas as pd\nimport geopandas as gpd\nimport matplotlib as plt\n\n#%%\nparks = gpd.read_file(\"../../data/raw/Park Lands - Recreation and Parks Department.geojson\")\nparks.loc[:, ['x', 'y']] = parks.loc[:, ['x', 'y']].apply(lambda x: x.astype('double'))\nparks = parks[(parks['x'] < -122.0)]\n\n#%%\ntrees = pd.read_csv(\"../../data/raw/Street_Tree_List.csv\")\ntrees = gpd.GeoDataFrame(trees, geometry = gpd.points_from_xy(trees.Longitude, trees.Latitude))\ntrees = trees[(trees['Latitude'] > 37.65) & (trees['Latitude'] < 40)]\n\n#%%\nax = parks.plot(color = \"white\", edgecolor = \"black\")\ntrees.plot(ax = ax, color = \"green\")\n\n# %%\n","repo_name":"rtjohnson12/sf-trees","sub_path":"src/features/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36439021626","text":"import zipfile\n\n\ndef extract_archive(archive_path, dest_directory):\n with zipfile.ZipFile(archive_path, \"r\") as archive:\n archive.extractall(dest_directory)\n\n\nif __name__ == \"__main__\":\n extract_archive(\"/home/echeadle/Udemy-Learn-Py/Py_Mega_Course/app1/archive_extractor/compressed.zip\",\n \"/home/echeadle/Udemy-Learn-Py/Py_Mega_Course/app1/archive_extractor/files\")\n","repo_name":"echeadle/Py_Mega_course","sub_path":"app1/archive_extractor/modules/zip_extractor.py","file_name":"zip_extractor.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17303271001","text":"import os\nfrom datetime import datetime\n\nfrom api.db.db import Base, engine, session\nfrom api.db.models.common import SQLITE3_NAME\nfrom api.db.models.task import Task\nfrom api.db.models.user import User\n\nif __name__ == \"__main__\":\n path = SQLITE3_NAME\n if not os.path.isfile(path):\n print(\"Creating database...\")\n Base.metadata.create_all(engine)\n print(\"Database created!\")\n\n admin = User(\n username=\"admin\",\n password=\"admin\",\n email=\"hogehoge@example.com\",\n )\n session.add(admin)\n session.commit()\n\n task = Task(\n user_id=admin.id,\n content=\"test\",\n deadline=datetime(2023, 8, 31, 12, 00, 00),\n )\n print(task)\n session.add(task)\n session.commit()\n\n session.close()\n","repo_name":"HIR0-728/fastapi-my-test-app","sub_path":"backend/create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6171644812","text":"import re\n\npattern = re.compile(r'^(\\/.*;\\d{2}\\/\\d{2}\\/\\d{4});(\\d*);(\\d*);(\\d*;\\d*)$')\n\nwith open(\"ficheiro.txt\") as fp:\n for linha in fp:\n linha = linha.strip()\n g = pattern.match(linha)\n\n if g:\n perc = format(float(float(g.group(3)) / float(g.group(2))) * 100, '.0f')\n\n print(pattern.sub(r'\\1;\\2;' + perc + r';\\4', linha))\n","repo_name":"JessicaCoelho21/ASI","sub_path":"Exemplos_Exames/ER_2019_2020/Percentage_v2.py","file_name":"Percentage_v2.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70615885363","text":"from __future__ import annotations\r\n\r\nimport logging\r\nimport numpy as np\r\nfrom langchain.vectorstores import Pinecone\r\nfrom langchain.schema import Document\r\nfrom langchain.embeddings.base import Embeddings\r\nfrom typing import Optional, List, Any, Tuple\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n\r\nclass CustomPinecone(Pinecone):\r\n\r\n def similarity_search_with_score(self,\r\n dim: int = 1536,\r\n k: int = 4,\r\n filter: Optional[dict] = None,\r\n namespace: Optional[str] = None) -> List[Tuple[Document, float]]:\r\n if namespace is None:\r\n namespace = self._namespace\r\n query_obj = np.random.rand(dim).tolist()\r\n docs = []\r\n results = self._index.query(\r\n [query_obj],\r\n top_k=k,\r\n include_metadata=True,\r\n namespace=namespace,\r\n filter=filter,\r\n )\r\n for res in results[\"matches\"]:\r\n metadata = res[\"metadata\"]\r\n if self._text_key in metadata:\r\n text = metadata.pop(self._text_key)\r\n score = res[\"score\"]\r\n docs.append(\r\n (Document(page_content=text, metadata=metadata), score))\r\n else:\r\n logging.warning(\r\n f\"Found document with no `{self._text_key}` key. Skipping.\"\r\n )\r\n return docs\r\n \r\n def similarity_search(\r\n self,\r\n dim: int = 1536,\r\n k: int = 4,\r\n filter: Optional[dict] = None,\r\n namespace: Optional[str] = None,\r\n **kwargs: Any,\r\n ) -> List[Document]:\r\n \"\"\"Return pinecone documents most similar to query.\r\n\r\n Args:\r\n query: Text to look up documents similar to.\r\n k: Number of Documents to return. Defaults to 4.\r\n filter: Dictionary of argument(s) to filter on metadata\r\n namespace: Namespace to search in. Default will search in '' namespace.\r\n\r\n Returns:\r\n List of Documents most similar to the query and score for each\r\n \"\"\"\r\n docs_and_scores = self.similarity_search_with_score(\r\n dim=dim, k=k, filter=filter, namespace=namespace, **kwargs\r\n )\r\n return [doc for doc, _ in docs_and_scores]\r\n\r\n @classmethod\r\n def from_existing_index(\r\n cls,\r\n index_name: str,\r\n embedding: Embeddings,\r\n text_key: str = \"text\",\r\n namespace: Optional[str] = None,\r\n ) -> CustomPinecone:\r\n \"\"\"Load pinecone vectorstore from index name.\"\"\"\r\n try:\r\n import pinecone\r\n except ImportError:\r\n raise ValueError(\r\n \"Could not import pinecone python package. \"\r\n \"Please install it with `pip install pinecone-client`.\"\r\n )\r\n return cls(\r\n pinecone.Index(\r\n index_name), embedding.embed_query, text_key, namespace\r\n )\r\n","repo_name":"Heicarbook/customer-service-bot-pinecone","sub_path":"custom_pinecone.py","file_name":"custom_pinecone.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22999184451","text":"\n#! path/to/missing_migrants/bin/python\n\nfrom bokeh.io import show, output_file\nfrom bokeh.models import (\n ColumnDataSource,\n HoverTool,\n LogColorMapper\n)\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.embed import components\n\nfrom bokeh.sampledata.us_counties import data as counties\n\nimport pandas as pd\nfrom us_missing_migrants_preprocess import preprocess\n\ndef reduce_by_id(joined):\n results = {}\n for i in joined.itertuples():\n if i.id not in results:\n results[i.id] = i.toll\n else:\n results[i.id] += i.toll\n return results # we need the dictionary for the next part\n\n\ndef get_missing_migrant_data():\n joined = preprocess()\n return reduce_by_id(joined)\n\nmissing = get_missing_migrant_data()\n\n# Below is borrowed from Bokeh Documentation with modification\n\npalette.reverse()\n\nexclusion = [\"hi\", \"ak\"] # get rid of hawai and alaska for ease of projection\ninclusion = [\"az\", \"ca\", \"fl\", \"la\", \"nm\", \"tx\",\"ga\", \"al\", \"ms\"] # state we want to see\n\ncounties = { code: county for code,\n county in counties.items() if county[\"state\"] in inclusion}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['detailed name'] for county in counties.values()]\ncounty_rates = [missing[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\nsource = ColumnDataSource(data=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n))\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Missing Migrants in the US, 01-2014 to 03-2018\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None\n)\np.grid.grid_line_color = None\n\np.patches('x', 'y', source=source,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.9, line_color=\"white\", line_width=0.5)\n\nhover = p.select_one(HoverTool)\nhover.point_policy = \"follow_mouse\"\nhover.tooltips = [\n (\"Name\", \"@name\"),\n (\"# Fatalities/Disappearances\", \"@rate\"),\n]\noutput_file(\"missing_migrants.html\") # or output to an html\nshow(p)\n#script, div = components(p)\n#with open(\"missing_migrants_script.txt\", \"w\") as f:\n# f.write(script)\n#with open(\"missing_migrants_div.txt\", \"w\") as f:\n # f.write(div)\n\n\n","repo_name":"tamos/MissingMigrants","sub_path":"plot_missing_migrants_us.py","file_name":"plot_missing_migrants_us.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36733167898","text":"#!/usr/bin/python3\n\nfrom __future__ import print_function\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom auv_perception.fls import ARISFrameFile\nfrom auv_perception.sonar import fractional_polar_axes\n\nimport sys, os, argparse\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom skimage.io import imsave\n\n#ARIS Explorer 3000 horizontal field of view\nMIN_FOV = -15\nMAX_FOV = 15\n\ndef savePolarProjection(outputFileName, frame):\n fig = plt.figure()\n\n theta, r = np.mgrid[MIN_FOV:MAX_FOV:(frame.width() * 1j), frame.windowStart():frame.windowEnd():(frame.height() * 1j)]\n ax = fractional_polar_axes(fig, thlim = (MIN_FOV, MAX_FOV), rlim = (frame.windowStart(), frame.windowEnd()), ticklabels = False)\n im = ax.pcolormesh(theta, r, frame.numpyImage().T, cmap = cm.Greys_r, shading='flat', edgecolors=\"none\", vmin=0, vmax=255)\n\n fig.tight_layout()\n fig.savefig(outputFileName, bbox_inches = 'tight', facecolor = 'white', dpi=150, antialiased=False, pad_inches=0)\n plt.close(fig)\n\ndef saveRectangularProjection(outputFileName, frame):\n imsave(outputFileName, frame.data)\n\ndef progress(count, total, suffix=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', suffix))\n sys.stdout.flush()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"inputArisFile\", help = \"Input ARIS DDFv5 sonar file (.aris extension)\")\nparser.add_argument(\"outputFolder\", help = \"Output folder where to store the image frames as PNG files\")\nparser.add_argument(\"--startIndex\", help = \"Starting frame index\", type = int)\nparser.add_argument(\"--endIndex\", help = \"End frame index (inclusive)\", type = int)\n\npolarParser = parser.add_mutually_exclusive_group(required=False)\npolarParser.add_argument('--polar', dest='polar', action='store_true', help = \"Output polar projection images\")\npolarParser.add_argument('--rectangular', dest='polar', action='store_false', help = \"Output rectangular images\")\nparser.set_defaults(polar = True)\n\nargs = parser.parse_args()\nbaseFileName = os.path.splitext(os.path.basename(args.inputArisFile))[0]\narisFile = ARISFrameFile(args.inputArisFile)\n\nprojectAndSave = None\n\nif args.polar:\n projectAndSave = savePolarProjection\nelse:\n projectAndSave = saveRectangularProjection\n\ntry:\n os.mkdir(args.outputFolder)\nexcept FileExistsError:\n pass\n\nstartIdx = 0\nendIdx = arisFile.frameCount() - 1\n\nif args.startIndex is not None:\n startIdx = args.startIndex\n\nif args.endIndex is not None:\n endIdx = args.endIndex\n\nfor i in range(startIdx, endIdx + 1):\n frame = arisFile.frame(i)\n\n #print(\"Window start {} window end {}\".format(frame.windowStart(), frame.windowEnd()))\n\n outFileName = \"{}/{}-frame{:05d}.png\".format(args.outputFolder, baseFileName, i)\n\n progress(i, arisFile.frameCount(), \" Processing frame {}\".format(i))\n\n projectAndSave(outFileName, frame)\n\n#sys.exit(0)\n","repo_name":"mvaldenegro/auv-perception","sub_path":"scripts/aris2png.py","file_name":"aris2png.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"1775141093","text":"import pytest\nfrom pytest import approx\nimport pickle\nfrom streetlevel import mapy\nfrom streetlevel.dataclasses import Size\n\n\ndef mocked_getbest(lat, lon, radius, options=None):\n with open(\"mapy/data/getbest.pkl\", \"rb\") as f:\n return pickle.load(f)\n\n\nmapy.api.getbest = mocked_getbest\n\n\ndef test_find_panorama():\n pano = mapy.find_panorama(50.1265193, 17.3762701, 100.0)\n assert pano.id == 59418543\n assert pano.lat == approx(50.1265193, 0.001)\n assert pano.lon == approx(17.3762701, 0.001)\n assert pano.provider == \"stavinvex\"\n assert pano.num_tiles == [Size(x=1, y=1), Size(x=16, y=8)]\n assert pano.pitch == approx(0.01970877694733808, 0.001)\n assert pano.roll == approx(0.019988022665593075, 0.001)\n","repo_name":"sk-zk/streetlevel","sub_path":"tests/mapy/mapy_test.py","file_name":"mapy_test.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"75"} +{"seq_id":"43238930138","text":"# a=b*c+d\r\n# d=a-b*c\r\ndef gcd(a, b):\r\n if a == 0:\r\n return b\r\n diziA.append(b)\r\n diziB.append(int(b / a))\r\n diziC.append(a)\r\n diziD.append(int(b % a))\r\n return gcd(b % a, a)\r\n\r\n\r\ndiziA = [0]\r\ndiziB = [0]\r\ndiziC = [0]\r\ndiziD = [0]\r\n\r\na=24\r\nb=138\r\ngcds = gcd(a, b)\r\n\r\nfor i in len(diziD):\r\n print(str(diziD[i]))\r\n\r\n\r\nprint(diziA)\r\nprint(diziB)\r\nprint(diziC)\r\nprint(diziD)\r\nprint(str(len(diziD))+str(diziD[3]))\r\n\r\n","repo_name":"vitta79/python_basic","sub_path":"oklid.py","file_name":"oklid.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22514220644","text":"import os\nimport errno\nimport socket\nimport signal\nimport random\nimport selectors\nimport argparse\nfrom bidict import bidict\nfrom collections import deque\nfrom functools import partial\nfrom itertools import chain\nfrom rikka.config import Config, ConfigAttribute\nfrom rikka.exceptions import ConfigMissing\nfrom rikka.logger import logger, name2level\nfrom rikka.protocol import Protocol, PKGBuilder, BUF_SIZE, \\\n sentinel\nfrom rikka.utils import parse_netloc, set_non_blocking, \\\n format_addr, create_listening_sock\n\n\nfrom argparse import Namespace\nfrom socket import socket as socket_t\nfrom typing import List, Optional\n\nPOS = 0 # expose to tunnel\nNEG = 1 # tunnel to expose\n\n\nclass Server:\n\n tunnel_addr = ConfigAttribute('tunnel', parse_netloc)\n expose_addr = ConfigAttribute('bind', parse_netloc)\n\n def __init__(self, pkgbuilder: PKGBuilder, config: Config) -> None:\n self._ready: deque = deque() # task queue\n self._config = config\n self._stopping = False\n self._pkgbuilder = pkgbuilder\n self._sel = selectors.DefaultSelector()\n\n self.tunnel_pool: deque = deque()\n self.work_pool = bidict()\n\n # reinstall signal\n self.init_signal()\n # select timeout\n self.reset_timeout()\n\n self.start_listen()\n\n @property\n def config(self) -> Config:\n return self._config\n\n def start_listen(self) -> None:\n \"\"\"listen expose conn and tunnel conn\"\"\"\n self.expose_sock = create_listening_sock(self.expose_addr)\n self._sel.register(self.expose_sock, selectors.EVENT_READ,\n self.accept_expose)\n\n self.tunnel_sock = create_listening_sock(self.tunnel_addr)\n self._sel.register(self.tunnel_sock, selectors.EVENT_READ,\n self.accept_tunnel)\n\n self._listening_sock = [\n self.expose_sock,\n self.tunnel_sock,\n ]\n\n def next_timeout(self) -> None:\n \"\"\"binary exponential backoff\"\"\"\n self._timeout_count += 1\n upper_bound = (2 ** min(self._timeout_count, 7)) - 1\n self._timeout = random.randint(1, upper_bound)\n\n def reset_timeout(self) -> None:\n \"\"\"reset timeout to initial value\"\"\"\n self._timeout: int = 1 # mypy need this variable type\n self._timeout_count: int = 0\n\n def accept_expose(self, expose_sock: socket_t, mask: int) -> None:\n \"\"\"accept user connection\"\"\"\n conn, addr = expose_sock.accept()\n conn.setblocking(False)\n\n self._sel.register(conn, selectors.EVENT_READ, self.prepare_transfer)\n\n logger.info(f'accept user connection from {format_addr(addr)}')\n\n def accept_tunnel(self, tunnel_sock: socket_t, mask: int) -> None:\n \"\"\"accept tunnel connection\"\"\"\n conn, addr = tunnel_sock.accept()\n conn.setblocking(False)\n\n self.tunnel_pool.append(conn)\n\n logger.info(f'accept tunnel connection from {format_addr(addr)}, '\n f'poolsize is {len(self.tunnel_pool)}')\n\n def prepare_transfer(self, expose_conn: socket_t, mask: int) -> None:\n tunnel_conn = self.find_available_tunnel()\n if tunnel_conn is None: # non-available tunnel_conn\n self._sel.unregister(expose_conn)\n # delay\n self._ready.append(\n lambda: self._sel.register(\n expose_conn, selectors.EVENT_READ,\n self.prepare_transfer\n )\n )\n return\n self.work_pool[expose_conn] = tunnel_conn\n buf: List[deque] = [deque(), deque()]\n self._sel.register(tunnel_conn,\n selectors.EVENT_WRITE | selectors.EVENT_READ,\n partial(self.dispatch_tunnel, buf=buf))\n self._sel.modify(expose_conn,\n selectors.EVENT_WRITE | selectors.EVENT_READ,\n partial(self.dispatch_expose, buf=buf))\n\n def dispatch_tunnel(self, conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"schedule tunnel events\"\"\"\n if mask & selectors.EVENT_WRITE:\n self.send_to_tunnel(conn, mask, buf)\n if mask & selectors.EVENT_READ:\n self.transfer_from_tunnel(conn, mask, buf)\n\n def dispatch_expose(self, conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"schedule expose events\"\"\"\n if mask & selectors.EVENT_WRITE:\n self.send_to_expose(conn, mask, buf)\n if mask & selectors.EVENT_READ:\n self.transfer_from_expose(conn, mask, buf)\n\n def transfer_from_expose(self, r_conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"receive data from expose and store in buffer\"\"\"\n w_conn = self.work_pool.get(r_conn)\n if w_conn is None:\n self._sel.unregister(r_conn)\n r_conn.close()\n return\n\n data = b''\n need_close = False\n\n try:\n data = r_conn.recv(BUF_SIZE) # Connection may be close\n except ConnectionError:\n need_close = True\n\n if data == b'' or need_close:\n try:\n peer = r_conn.getpeername()\n logger.info(f'closing user connection from {format_addr(peer)}') # noqa\n except OSError as e:\n logger.warn(e)\n self._sel.unregister(r_conn)\n r_conn.close()\n buf[POS].append(sentinel)\n del self.work_pool[r_conn]\n return\n\n buf[POS].append(data)\n\n def transfer_from_tunnel(self, r_conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"receive data from tunnel and store in buffer\"\"\"\n w_conn = self.work_pool.inv.get(r_conn)\n if w_conn is None:\n self._sel.unregister(r_conn)\n r_conn.close()\n return\n\n data = b''\n need_close = False\n\n try:\n data = r_conn.recv(BUF_SIZE)\n except ConnectionError:\n need_close = True\n\n if data == b'' or need_close:\n try:\n peer = r_conn.getpeername()\n logger.info(f'closing tunnel connection from {format_addr(peer)}') # noqa\n except OSError as e:\n logger.warn(e)\n self._sel.unregister(r_conn)\n r_conn.close()\n buf[NEG].append(sentinel)\n del self.work_pool.inv[r_conn]\n return\n\n buf[NEG].append(data)\n\n def send_to_tunnel(self, w_conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"send buffer data to tunnel\"\"\"\n if not len(buf[POS]):\n return\n try:\n data = buf[POS].popleft()\n if data is sentinel:\n self._sel.unregister(w_conn)\n w_conn.close()\n return\n byte = w_conn.send(data)\n except socket.error as e:\n if e.args[0] == errno.EWOULDBLOCK:\n logger.info('EWOULDBLOCK occur in send to tunnel')\n buf[POS].appendleft(data[byte:])\n\n def send_to_expose(self, w_conn: socket_t,\n mask: int, buf: List[deque]) -> None:\n \"\"\"send buffer data to expose\"\"\"\n if not len(buf[NEG]):\n return\n try:\n data = buf[NEG].popleft()\n if data is sentinel:\n self._sel.unregister(w_conn)\n w_conn.close()\n return\n byte = w_conn.send(data)\n except socket.error as e:\n if e.args[0] == errno.EWOULDBLOCK:\n logger.info('EWOULDBLOCK occur in send to expose')\n buf[NEG].appendleft(data[byte:])\n\n def _handshake(self, conn_slaver: socket_t) -> bool:\n \"\"\"handshake\"\"\"\n conn_slaver.setblocking(True) # TODO use nonblocking IO\n conn_slaver.send(self._pkgbuilder.pbuild_hs_m2s())\n buff = conn_slaver.recv(self._pkgbuilder.PACKAGE_SIZE)\n conn_slaver.setblocking(False)\n if buff == b'': # empty response\n return False\n return self._pkgbuilder.decode_verify(buff,\n self._pkgbuilder.PTYPE_HS_S2M)\n\n def find_available_tunnel(self) -> Optional[socket_t]:\n while True:\n try:\n conn = self.tunnel_pool.popleft()\n except IndexError:\n # no available tunnel connection, just return\n # do not need to wait in a loop, because we work in LT Mode\n self.next_timeout()\n logger.info('no available tunnel connection, waiting')\n return None\n else:\n self.reset_timeout()\n\n if not self._handshake(conn): # handshake first\n conn.close()\n continue\n return conn\n\n def run_forever(self) -> None:\n \"\"\"main loop\"\"\"\n while not self._stopping:\n events = self._sel.select(timeout=self._timeout)\n for job in self._ready: # TODO heartbeat\n job()\n self._ready.clear()\n\n # from pprint import pprint\n # pprint(events)\n for key, mask in events:\n callback = key.data\n callback(key.fileobj, mask)\n logger.info('stopping now ...')\n self.exit()\n\n def exit(self) -> None:\n \"\"\"close all listening fds\"\"\"\n all_fds = chain(self._wake_fds, self.tunnel_pool,\n self.work_pool.keys(), self._listening_sock)\n for s in all_fds:\n s.close()\n\n def init_wake_fds(self) -> None:\n self._wake_fds = socket.socketpair()\n for p in self._wake_fds:\n set_non_blocking(p.fileno()) # epoll need non-blocking fd\n\n def init_signal(self) -> None:\n self.init_wake_fds()\n signal.signal(signal.SIGINT, lambda *args: None)\n signal.set_wakeup_fd(self._wake_fds[1].fileno())\n self._sel.register(self._wake_fds[0], selectors.EVENT_READ,\n self.handle_signal)\n\n def handle_signal(self, expose_sock: socket_t, mask: int) -> None:\n sig = self._wake_fds[0].recv(1)\n logger.info('recving signal {}'.format(sig))\n self._stopping = True\n\n\ndef parse_args() -> Namespace:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument('-c', '--config', default=None, help='config path')\n parser.add_argument('-t', '--tunnel', metavar='host:port', help='')\n parser.add_argument('-b', '--bind', metavar='host:port', help='')\n parser.add_argument('-k', '--secretkey', default='secretkey', help='')\n parser.add_argument('-l', '--level', default='info', help='')\n # parser.add_argument('--ttl', default=300, type=int, dest='ttl', help='')\n\n return parser.parse_args()\n\n\ndef main() -> None:\n args = parse_args()\n config_path = args.config\n delattr(args, 'config')\n\n config = Config()\n if config_path is not None:\n config.load_file(config_path)\n config.from_object(args)\n try:\n config.validate([\n 'tunnel',\n 'bind',\n 'secretkey',\n ])\n except ConfigMissing as e:\n logger.error(e)\n exit()\n\n logger.setLevel(name2level(config.level))\n\n Protocol.set_secret_key(config.secretkey)\n Protocol.recalc_crc32()\n pkgbuilder = PKGBuilder(Protocol)\n\n master = Server(pkgbuilder, config)\n logger.debug('PID: {}'.format(os.getpid()))\n logger.info('init successful, running as master')\n master.run_forever()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Hanaasagi/rikka","sub_path":"rikka/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"26461313976","text":"#!/usr/bin/env python\n\nimport roshelper\nimport rospy\nfrom statemachine import TenderBotStateMachine\nfrom sensor_msgs.msg import Image as ImageMsg\nfrom std_msgs.msg import Int32\nfrom std_msgs.msg import Float32MultiArray\nfrom geometry_msgs.msg import Vector3\n\nfrom TenderbotArm.srv import add_point as add_point_srv\n\nfrom rospy.numpy_msg import numpy_msg\nfrom rospy_tutorials.msg import Floats\nimport numpy\nnode_name = \"Logic\"\n\nn = roshelper.Node(node_name, anonymous=False)\n\n# A class for TenderBot business logic\n@n.entry_point()\nclass TenderBotLogic(object):\n state_machine = None\n drink_choice = None\n drinks = [\n {\"ingredients\": [1,1,1] },\n {\"ingredients\": [1,1,1] },\n {\"ingredients\": [1,1,1] },\n ]\n ingredient_pos = [\n [25, 0, 5, 1],\n [25, 10, 5, 1],\n [25, 20, 5, 1]\n ]\n current_ingredient = None\n \n shapes = None\n effector_pos = None\n \n @n.subscriber(rospy.get_namespace() + \"Input/DrinkChoice\", Int32)\n def drink_choice(self, choice):\n self.drinkchoice = choice.data;\n\n def fault_occurred(self):\n self.state_machine.error_occurred()\n \n def fault_cleared(self):\n self.state_machine.error_cleared()\n \n def force_arm_pos(self, pos):\n return pos\n \n @n.subscriber(rospy.get_namespace() + \"Vision/shapes/red\", Float32MultiArray)\n def get_shapes(self, shapes):\n self.shapes = shapes.data\n \n @n.subscriber(rospy.get_namespace() + \"Arm/effector_pos\", Vector3)\n def get_effector_pos(self, pos):\n self.effector_pos = pos\n\n def mix_drink(self):\n \n self.drink_choice = 0 # TESTESTESTETSTEST\n if (self.shapes is not None):\n rospy.loginfo(self.shapes)\n self.add_arm_point(self.shapes[0], self.shapes[1], 30, 0) # Move over cup\n '''\n if(self.current_ingredient == None): # Start mixing\n #rospy.loginfo(\"Start mix\")\n self.current_ingredient = 0\n if(self.current_ingredient >= len(self.drinks[self.drink_choice][\"ingredients\"])): # Done mixing\n #rospy.loginfo(\"Done mix\")\n self.current_ingredient = None\n self.state_machine.drink_mixed()\n else: # Mixing\n #rospy.loginfo(\"Mix\")\n pos = self.ingredient_pos[self.current_ingredient]\n self.add_arm_point(pos[0], pos[1], 30, 0) # Move over cup\n self.add_arm_point(pos[0], pos[1], pos[2], pos[3]) # Move to cup\n self.current_ingredient = self.current_ingredient + 1\n '''\n \n \n # ctor\n def __init__(self): # (self, exp_a, exp_b, exp_c)\n add_point_path = rospy.get_namespace() + \"Arm/add_point_to_path\"\n rospy.wait_for_service(add_point_path)\n self.add_arm_point = rospy.ServiceProxy(add_point_path, add_point_srv)\n self.state_machine = TenderBotStateMachine(self)\n self.state_machine.start()\n\n @n.main_loop(frequency=30)\n def run(self):\n self.state_machine.update()\n\nif __name__ == \"__main__\":\n n.start(spin=True)\n\n\n \n \n \n","repo_name":"fennesz/ITROB-LAB1","sub_path":"tenderbot_logic/tenderbot_logic.py","file_name":"tenderbot_logic.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7066201471","text":"import sys\nimport psutil\nimport signal\nimport argparse\nimport logging\n\nclass Memmon(object):\n def __init__(self):\n parser = argparse.ArgumentParser(description='Kill runaway processes')\n parser.add_argument('-w', '--whitelist', help='Comma separated list of processes to monitor, will only monitor these if defined')\n parser.add_argument('-b', '--blacklist', help='Comma separated list of processes to ignore')\n parser.add_argument('-m', '--max-memory', help='Max memory for a process in megabytes, default is 100mb')\n parser.add_argument('-s', '--signal', help='The signal to send to the process, default is SIGUSR1')\n parser.add_argument('-l', '--log-level', help='The amount of logging you want: INFO, DEBUG, or ERROR')\n parser.add_argument('-f', '--log-file', help='File to log to')\n parser.add_argument('-e', '--send-mail', action='store_true', help='Will attempt to notify via e-mail if this is set')\n parser.add_argument('--smtp-host', help='The smtp server address you want to send notifications through')\n parser.add_argument('--smtp-port', help='The port the smtp server listens on')\n parser.add_argument('--smtp-user', help='The username to authenticate as')\n parser.add_argument('--smtp-password', help='The password to use for the smtp server')\n parser.add_argument('--smtp-from', help='Who the e-mail is coming from')\n parser.add_argument('--smtp-recipients', help='The people to be notified, comma separated list')\n\n args = parser.parse_args(sys.argv[1:])\n\n # set sane defaults\n self.whitelist = None\n self.blacklist = ['mds']\n\n self.should_send_mail = False\n if args.send_mail:\n self.should_send_mail = True\n\n if not args.smtp_recipients:\n raise Exception('You must define who you want the notifications to go to with --smtp-recipients')\n else:\n self.smtp_recipients = args.smtp_recipients.strip().split(',')\n\n if not args.smtp_from:\n raise Exception('You must define who the notifications should come from with --smtp-from')\n else:\n self.smtp_from = args.smtp_from.strip()\n\n if args.max_memory:\n # convert arg to bytes\n self.max_memory = args.max_memory * (1024 * 1024)\n else:\n # default max memory to 100mb\n MAX_MEM = 100 * (1024 * 1024)\n self.max_memory = MAX_MEM\n\n if args.signal:\n self.sig = getattr(signal, args.signal.strip().upper(), signal.SIGUSR1)\n else:\n self.sig = signal.SIGUSR1\n\n if args.log_level:\n self.log_level = getattr(logging, args.log_level.strip().upper(), logging.ERROR)\n else:\n self.log_level = logging.ERROR\n\n self.LOG_FORMAT='%(asctime)s %(message)s'\n\n if args.log_file:\n logging.basicConfig(format=self.LOG_FORMAT, level=args.log_level, filename=args.log_file)\n else:\n logging.basicConfig(format=self.LOG_FORMAT, level=args.log_level)\n\n if args.whitelist:\n self.whitelist = args.whitelist.strip().split(',')\n\n if args.blacklist:\n self.blacklist += args.blacklist.strip().split(',')\n\n if args.smtp_host:\n self.smtp_host = args.smtp_host.strip()\n else:\n self.smtp_host = 'localhost'\n\n if args.smtp_port:\n try:\n self.smtp_port = int(args.smtp_port.strip())\n except:\n self.smtp_port = 25\n else:\n self.smtp_port = 25\n\n if args.smtp_user:\n self.smtp_user = args.smtp_user.strip()\n\n if args.smtp_password:\n self.smtp_password = args.smtp_password.strip()\n\n def check_processes(self):\n procs = psutil.get_process_list()\n\n for index, proc in enumerate(procs):\n try:\n # if a whitelist is defined, make sure the process is in it\n if self.whitelist:\n if not proc.name in self.whitelist:\n continue\n\n # if a blacklist is defined, make sure the process is not in it\n if self.blacklist:\n if proc.name in self.blacklist:\n continue\n\n mem = proc.get_memory_info()[0]\n\n if mem >= self.max_memory:\n logging.info('Killing %s at memory %s mb' % (proc.name, mem / (1024 * 1024)))\n name = proc.name\n # keep sending the signal until the process dies\n IS_ALIVE = True\n while IS_ALIVE:\n try:\n proc.send_signal(self.sig)\n except psutil.error.NoSuchProcess:\n logging.info('Process %s has been killed' % name)\n IS_ALIVE = False\n\n if not IS_ALIVE:\n if self.should_send_mail:\n self.send_mail(name, mem)\n\n except psutil.error.AccessDenied as exc:\n logging.error('Do not have access to %s' % exc.msg)\n pass\n\n def send_mail(self, name, mem):\n import smtplib\n from email.MIMEText import MIMEText\n\n msg = MIMEText('Had to kill %(process)s it was using %(memory)s megs of memory' % {\n 'process': name,\n 'memory': mem / (1024 * 1024),\n })\n\n msg['Subject'] = 'Killed a process' \n msg['From'] = self.smtp_from\n msg['To'] = ', '.join(self.smtp_recipients)\n\n mailServer = smtplib.SMTP(self.smtp_host, self.smtp_port)\n mailServer.ehlo()\n mailServer.starttls()\n mailServer.ehlo()\n\n if self.smtp_user and self.smtp_password:\n mailServer.login(self.smtp_user, self.smtp_password)\n\n mailServer.sendmail(self.smtp_user, self.smtp_recipients, msg.as_string())\n mailServer.close()\n\nif __name__ == '__main__':\n monitor = Memmon()\n monitor.check_processes()\n\n\n","repo_name":"sontek/pymemmon","sub_path":"pymemmon.py","file_name":"pymemmon.py","file_ext":"py","file_size_in_byte":6152,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"42675383206","text":"import numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\nplt.rcParams['font.sans-serif']=['simsun']\r\nplt.rcParams['axes.unicode_minus']=False\r\nimport xlrd\r\nimport xlwt\r\nimport pandas as pd\r\nplt.figure(figsize=(8,4))\r\nbook_1 = xlrd.open_workbook(\"../../DDPG_FCU/FCU_DDPG_1.xls\")\r\nsheet_1 = book_1.sheet_by_name(\"PfandPp\")\r\ntotal_reward_1= sheet_1.row_values(4392)\r\n\r\nbook_2 = xlrd.open_workbook(\"../../DDPG_FCU/FCU_DDPG_2.xls\")\r\nsheet_2 = book_2.sheet_by_name(\"PfandPp\")\r\ntotal_reward_2= sheet_2.row_values(4392)\r\n\r\nbook_3 = xlrd.open_workbook(\"../../DDPG_FCU/FCU_DDPG_3.xls\")\r\nsheet_3 = book_3.sheet_by_name(\"PfandPp\")\r\ntotal_reward_3= sheet_3.row_values(4392)\r\n\r\nbook_4 = xlrd.open_workbook(\"../../DDPG_FCU/FCU_DDPG_4.xls\")\r\nsheet_4 = book_4.sheet_by_name(\"PfandPp\")\r\ntotal_reward_4= sheet_4.row_values(4392)\r\n\r\nbook_5 = xlrd.open_workbook(\"../../DDPG_FCU/FCU_DDPG_5.xls\")\r\nsheet_5 = book_5.sheet_by_name(\"PfandPp\")\r\ntotal_reward_5= sheet_5.row_values(4392)\r\n\r\nbook_MAC_1 = xlrd.open_workbook(\"../../DDPG_FCU_KN/FCU_DDPG_PK_1.xls\")\r\nsheet_MAC_1 = book_MAC_1.sheet_by_name(\"PfandPp\")\r\ntotal_reward_MAC_1= sheet_MAC_1.row_values(4392)\r\n\r\nbook_MAC_2 = xlrd.open_workbook(\"../../DDPG_FCU_KN/FCU_DDPG_PK_2.xls\")\r\nsheet_MAC_2 = book_MAC_2.sheet_by_name(\"PfandPp\")\r\ntotal_reward_MAC_2= sheet_MAC_2.row_values(4392)\r\n\r\nbook_MAC_3 = xlrd.open_workbook(\"../../DDPG_FCU_KN/FCU_DDPG_PK_3.xls\")\r\nsheet_MAC_3 = book_MAC_3.sheet_by_name(\"PfandPp\")\r\ntotal_reward_MAC_3= sheet_MAC_3.row_values(4392)\r\n\r\nbook_MAC_4 = xlrd.open_workbook(\"../../DDPG_FCU_KN/FCU_DDPG_PK_4.xls\")\r\nsheet_MAC_4 = book_MAC_4.sheet_by_name(\"PfandPp\")\r\ntotal_reward_MAC_4= sheet_MAC_4.row_values(4392)\r\n\r\nbook_MAC_5 = xlrd.open_workbook(\"../../DDPG_FCU_KN/FCU_DDPG_PK_5.xls\")\r\nsheet_MAC_5 = book_MAC_5.sheet_by_name(\"PfandPp\")\r\ntotal_reward_MAC_5= sheet_MAC_5.row_values(4392)\r\n\r\nbook_bianlifa = xlrd.open_workbook(\"../../modelbasedcontrol.xls\")\r\nbook_baseline = xlrd.open_workbook(\"../../baseline.xls\")\r\nsheet_bianlifa = book_bianlifa.sheet_by_name(\"total_power\")\r\nbianlifa_power= sheet_bianlifa.row_values(4391)\r\nsheet_baseline = book_baseline.sheet_by_name(\"total_power\")\r\nbaseline_power= sheet_baseline.row_values(4391)\r\nprint(baseline_power)\r\nprint(bianlifa_power)\r\n#matplotlib.rcParams['text.usetex'] = True # 开启Latex风格\r\n#plt.figure(figsize=(10, 10), dpi=70) # 设置图像大小\r\n#style.use('ggplot') # 加载'ggplot'风格\r\n# f, ax = plt.subplots(1, 3) # 设置子图\r\n# plt.subplots_adjust(wspace=0.25)#子图很有可能左右靠的很近,调整一下左右距离\r\n\r\nX = ['1st','2nd','3th','4th','5th','6th','7th','8th','9th','10th','11st','12nd','13th','14th','15th','16th','17th','18th','19th','20th']\r\n# def fig0():\r\n# numoral_DDPG_reward={}\r\n# moreac_DDPG_reward={}\r\n# for i in range(0,20):\r\n# numoral_DDPG_reward[i]=(total_reward_1[i]+total_reward_2[i]+total_reward_3[i]+total_reward_4[i]+total_reward_5[i])/5\r\n# moreac_DDPG_reward[i]=(total_reward_MAC_1[i]+total_reward_MAC_2[i]+total_reward_MAC_3[i]+total_reward_MAC_4[i]+total_reward_MAC_5[i])/5\r\n# ax[0].plot(X,list(numoral_DDPG_reward.values()) ,label='DDPG' )#s点的大小\r\n# ax[0].plot(X,list(moreac_DDPG_reward.values()),label='DDPG_MAC' )\r\n# ax[0].plot(X, list(bianlifa_power), label='MBC')\r\n# ax[0].plot(X, list(baseline_power), label='RBC')\r\n# ax[0].legend(loc=\"best\")\r\n# #ax[0].set_xlabel('Number of training years')\r\n# ax[0].set_ylabel('power')\r\n# ax[0].set_title('(a) Average power')\r\n# #return list(numoral_DDPG_reward.values()),list(moreac_DDPG_reward.values()),list(bianlifa_power),list(baseline_power)\r\n# def fig1():#ddpg\r\n# ax[1].plot(X, total_reward_1, label='round1') # s点的大小\r\n# ax[1].plot(X, total_reward_2, label='round2')\r\n# ax[1].plot(X, total_reward_3, label='round3')\r\n# ax[1].plot(X, total_reward_4, label='round4')\r\n# ax[1].plot(X, total_reward_5, label='round5')\r\n# ax[1].legend(loc=\"lower left\")\r\n# #ax[1].set_xlabel('Number of training years')\r\n# ax[1].set_ylabel('power')\r\n# ax[1].set_title('(b) Five independent experiments of DDPG')\r\n# def fig2():#ddpg with split action space\r\n# ax[2].plot(X, total_reward_MAC_1, label='round1') # s点的大小\r\n# ax[2].plot(X, total_reward_MAC_2, label='round2')\r\n# ax[2].plot(X, total_reward_MAC_3, label='round3')\r\n# ax[2].plot(X, total_reward_MAC_4, label='round4')\r\n# ax[2].plot(X, total_reward_MAC_5, label='round5')\r\n# ax[2].legend(loc=\"lower left\")\r\n# #ax[2].set_xlabel('Number of training years')\r\n# ax[2].set_ylabel('power')\r\n# ax[2].set_title('(c) Five independent experiments of DDPG with MAC')\r\n#plt.title('Average cumulative rewards for five experiments ')\r\n\r\nnumoral_DDPG_reward={}\r\nmoreac_DDPG_reward={}\r\nfor i in range(0,20):\r\n numoral_DDPG_reward[i]=(total_reward_1[i]+total_reward_2[i]+total_reward_3[i]+total_reward_4[i]+total_reward_5[i])/5\r\n moreac_DDPG_reward[i]=(total_reward_MAC_1[i]+total_reward_MAC_2[i]+total_reward_MAC_3[i]+total_reward_MAC_4[i]+total_reward_MAC_5[i])/5\r\nplt.plot(X,list(numoral_DDPG_reward.values()) ,label='DDPG' )#s点的大小\r\nplt.plot(X,list(moreac_DDPG_reward.values()),label='DDPG PK', )\r\nplt.plot(X, list(bianlifa_power), label='MBC')\r\nplt.plot(X, list(baseline_power), label='RBC')\r\nplt.legend(loc=\"best\")\r\n\r\nworkbook = xlwt.Workbook(encoding = 'utf-8')\r\n# 创建一个worksheet\r\nworksheet_power = workbook.add_sheet('power')\r\nfor i in range(0,20):\r\n worksheet_power.write(i, 0, label=float(numoral_DDPG_reward[i]))\r\n worksheet_power.write(i, 1, label=float(moreac_DDPG_reward[i]))\r\n worksheet_power.write(i, 2, label=float((list(bianlifa_power))[i]))\r\n worksheet_power.write(i, 3, label=float(((baseline_power))[i]))\r\n#ax[0].set_xlabel('Number of training years')\r\n# plt.set_ylabel('Reward')\r\n# plt.set_title('(a) Average cumulative reward')\r\nworkbook.save('cal_power.xls')\r\n\r\n#plt.xlim(0, 10)\r\nplt.xlabel(\"训练的年数\")\r\nplt.ylabel(\"系统功耗(W)\")\r\n#plt.tight_layout()\r\n#plt.savefig(\"enerrgy for five experiments_chinese.jpg\",dpi=800)\r\nplt.show()\r\n\r\n# workbook = xlwt.Workbook(encoding = 'utf-8')\r\n# worksheet_power = workbook.add_sheet('power')\r\n\r\n\r\n\r\n","repo_name":"nuzuichifan/FCU","sub_path":"FCU/draw/energy/energy_chinese.py","file_name":"energy_chinese.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"22443083510","text":"import csv\nimport sys\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\nTEST_SIZE = 0.4\n\nMONTHS = {\n \"Jan\": 0, \"Feb\": 1, \"Mar\": 2,\n \"Apr\": 3, \"May\": 4, \"June\": 5,\n \"Jul\": 6, \"Aug\": 7, \"Sep\": 8,\n \"Oct\": 9, \"Nov\": 10, \"Dec\": 11\n}\n\n\ndef main():\n\n # Check command-line arguments\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python shopping.py data\")\n\n # Load data from spreadsheet and split into train and test sets\n evidence, labels = load_data(sys.argv[1])\n X_train, X_test, y_train, y_test = train_test_split(\n evidence, labels, test_size=TEST_SIZE\n )\n\n # Train model and make predictions\n model = train_model(X_train, y_train)\n predictions = model.predict(X_test)\n sensitivity, specificity = evaluate(y_test, predictions)\n\n # Print results\n print(f\"Correct: {(y_test == predictions).sum()}\")\n print(f\"Incorrect: {(y_test != predictions).sum()}\")\n print(f\"True Positive Rate: {100 * sensitivity:.2f}%\")\n print(f\"True Negative Rate: {100 * specificity:.2f}%\")\n\n\ndef load_data(filename):\n \"\"\"\n Load shopping data from a CSV file `filename` and convert into a list of\n evidence lists and a list of labels. Return a tuple (evidence, labels).\n\n evidence should be a list of lists, where each list contains the\n following values, in order:\n - Administrative, an integer\n - Administrative_Duration, a floating point number\n - Informational, an integer\n - Informational_Duration, a floating point number\n - ProductRelated, an integer\n - ProductRelated_Duration, a floating point number\n - BounceRates, a floating point number\n - ExitRates, a floating point number\n - PageValues, a floating point number\n - SpecialDay, a floating point number\n - Month, an index from 0 (January) to 11 (December)\n - OperatingSystems, an integer\n - Browser, an integer\n - Region, an integer\n - TrafficType, an integer\n - VisitorType, an integer 0 (not returning) or 1 (returning)\n - Weekend, an integer 0 (if false) or 1 (if true)\n\n labels should be the corresponding list of labels, where each label\n is 1 if Revenue is true, and 0 otherwise.\n \"\"\"\n evidence, labels = [], []\n with open(filename) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n input = []\n input.append(int(row[\"Administrative\"]))\n input.append(float(row[\"Administrative_Duration\"]))\n input.append(int(row[\"Informational\"]))\n input.append(float(row[\"Informational_Duration\"]))\n input.append(int(row[\"ProductRelated\"]))\n input.append(float(row[\"ProductRelated_Duration\"]))\n input.append(float(row[\"BounceRates\"]))\n input.append(float(row[\"ExitRates\"]))\n input.append(float(row[\"PageValues\"]))\n input.append(float(row[\"SpecialDay\"]))\n input.append(MONTHS[row[\"Month\"]])\n input.append(int(row[\"OperatingSystems\"]))\n input.append(int(row[\"Browser\"]))\n input.append(int(row[\"Region\"]))\n input.append(int(row[\"TrafficType\"]))\n input.append(1 if row[\"VisitorType\"] == \"Returning_Visitor\" else 0)\n input.append(1 if row[\"Weekend\"] == \"TRUE\" else 0)\n evidence.append(input)\n labels.append(1 if row[\"Revenue\"] == \"TRUE\" else 0)\n\n return (evidence, labels)\n\n\ndef train_model(evidence, labels):\n \"\"\"\n Given a list of evidence lists and a list of labels, return a\n fitted k-nearest neighbor model (k=1) trained on the data.\n \"\"\"\n model = KNeighborsClassifier(n_neighbors=1)\n #model = Perceptron()\n #model = GaussianNB()\n \n model.fit(evidence, labels)\n \n return model\n\n\ndef evaluate(labels, predictions):\n \"\"\"\n Given a list of actual labels and a list of predicted labels,\n return a tuple (sensitivity, specificity).\n\n Assume each label is either a 1 (positive) or 0 (negative).\n\n `sensitivity` should be a floating-point value from 0 to 1\n representing the \"true positive rate\": the proportion of\n actual positive labels that were accurately identified.\n\n `specificity` should be a floating-point value from 0 to 1\n representing the \"true negative rate\": the proportion of\n actual negative labels that were accurately identified.\n \"\"\"\n true_positive, true_negative = 0, 0\n sensitivity, specificity = float(0), float(0)\n for actual, predicted in zip(labels, predictions):\n if actual == 1:\n true_positive += 1\n if actual == predicted:\n sensitivity += 1\n else:\n true_negative += 1\n if actual == predicted:\n specificity += 1\n \n return (sensitivity/true_positive, specificity/true_negative)\n\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"Dchoi0729/cs50ai","sub_path":"shopping/shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11612578693","text":"import socket\nimport sys\n\nsock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nsock.connect(('127.0.0.1',3004))\n\nwhile True:\n\ttry:\n\t\tprint(\"\\nEnter FileName To Open At Server: \")\n\t\tfileName = raw_input()\n\t\tsock.sendall(fileName)\n\t\tres = sock.recv(5)\n\t\tif(res == '1'):\n\t\t\tprint(\"\\n1. Open File\")\n\t\t\tn = input()\n\t\t\tif(n == 1):\n\t\t\t\tsock.sendall(\"get\")\n\t\t\t\tfileData = sock.recv(100)\n\t\t\t\tprint(fileName+\"\\n===================\")\n\t\t\t\tprint(fileData)\n\t\t\telse:\n\t\t\t\tbuff = sock.recv(20)\n\t\t\t\tprint(buff)\n\t\telse:\n\t\t\tprint(\"File NOT FOUND \"+str(res))\n\t\t\tprint(\"\\n2. Send a File\")\n\t\t\tn = input()\n\t\t\tif(n == 2):\n\t\t\t\tsock.sendall(\"put\")\n\t\t\t\tcontent = \"\"\n\t\t\t\twith open(fileName,'r') as f:\n\t\t\t\t\tcontent = f.read()\n\t\t\t\tsock.sendall(content)\n\t\t\t\tresp = sock.recv(1)\n\t\t\t\tprint(\"Successfully Tranferred File\")\n\t\t\telse:\n\t\t\t\tcontinue\n\tfinally:\n\t\tpass\n","repo_name":"amrithm98/NP-Lab","sub_path":"Expt14_FileServer/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"18813087299","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 7 15:16:17 2017\nUpdated on 10/11/2019\n\n@author: josh\n\n@purpose: Exo-clutter GMTI processing.\n\"\"\"\nfrom numpy import *\nfrom numpy.fft import *\nfrom numpy.linalg import *\n# from scipy.signal import chebwin, hanning, hamming, nuttall, parzen, triang\nfrom STAP_helper import getDopplerLine, window_taylor, \\\n getWindowedFFTRefPulseUpdated, rangeCompressData, getRotationOffsetMatrix\nfrom SlimSDRDebugDataGMTIParserModule import SlimSDRGMTIDataParser\n# import SDRParsing\n# from TrackBeforeDetectModule import DwellTrackManager\nfrom ClutterCompensatedExoClutterGMTIModule import computeExoMDV, \\\n detectExoClutterMoversRVMap, getExoClutterDetectedMoversRV\nfrom MoverTruthDataModule import MoverTruthData\nfrom ExoConfigParserModule import ExoConfiguration\nfrom DTEDManagerModule import DTEDManager\n# import ctypes\nimport time\nfrom stanaggenerator import StanagGenerator\nimport csv\nfrom matplotlib.pyplot import *\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport plotly.io as pio\n\n# pio.renderers.default = 'svg'\npio.renderers.default = 'browser'\n\n# Constants\nc0 = 299792458.0\nkb = 1.3806503e-23\nT0 = 290.0\n\n# Conversions\nDTR = pi / 180\n\n# load in the user defined processing parameters\nconfig = ExoConfiguration()\nprint(config)\n\n# grab the config parameters\nNcpi = config.Ncpi\nnearRange_partial_pulse_percent = config.nearRangePartialPulsePercent\npartial_pulse_percent = config.farRangePartialPulsePercent\nrange_interp_factor = config.rangeInterpFactor\nDop_interp_factor = config.dopInterpFactor\nKexo = config.exoBroadeningFactor\nPfa = config.FAFactor\nfDelay = config.fDelay\nnumChan = 2\nTRUTH_EXISTS = config.truthExists\n\n# processing presum value\nPresum = 1\n\n# %%\n\"\"\"Generate the filenames we will use\"\"\"\nrawDirectory = config.rawDir\ndebugDirectory = config.debugDir\nstanagDirectory = config.stanagDir\nvideoDirectory = config.videoDir\ntruthDirectory = config.truthDir\nmonth = config.month\nday = config.day\nyear = config.year\ndateString = config.dateString\nsarName = config.sarFilename\n\n# generate the name of the file with SAR data in the ARTEMIS format\nxmlfilename = '%s/%s/%s.xml' % (rawDirectory, dateString, sarName)\nbasename = '%s/%s/%s' % (debugDirectory, dateString, sarName)\nstanagName = '%s/%s/%s' % (stanagDirectory, dateString, sarName)\ntruthName = '%s/%d/%s/GroundMoversTruthGPS_%s.dat' % (\n truthDirectory, year, dateString, dateString)\ntruthCollectionFilename = \\\n '%s/%s/%s_DetectionTruthData_crap.csv' \\\n % (rawDirectory, dateString, sarName)\nif not TRUTH_EXISTS:\n truthName = ''\n\n# %%\n\"\"\" Open up our data files for reading\"\"\"\n# open the moverInjectedData file for reading binary data\nradar = SlimSDRGMTIDataParser(basename, xmlfilename, numChan, Ncpi)\n\n# print radar info\nprint(radar)\n\n# open the Truth data if it exists\nif TRUTH_EXISTS:\n truthData = MoverTruthData(\n truthName, radar.gpsData[0].latConv, radar.gpsData[0].lonConv,\n radar.xmlData.channel.wavelengthM, radar.xmlData.PRF)\n print(truthData)\n\n# initialize the DTED manager, which will then be passed around to everywhere\n# that needs to look-up DTED data\ndtedManager = DTEDManager(config.dtedDir)\n\nsg = StanagGenerator()\nsg.openFileForWriting(stanagName)\n# [mission_plan, flight_plan, platform_type, ]\nflightPlan = '%02d%02d%02d%06d' % (month, day, year - 2000, config.colTime)\nmissionData = \\\n ['SlimSARLCMCX', flightPlan, 255, 'WBBellyPod', year, month, day]\nsg.writeMissionSegment(missionData)\n\n# %%\n\"\"\"Calculate some parameters for processing\"\"\"\n# define the rotation offsets from the IMU to the mounted gimbal frame\nrotationOffset = getRotationOffsetMatrix(\n radar.xmlData.gimbalSettings.rollOffsetD * DTR,\n radar.xmlData.gimbalSettings.pitchOffsetD * DTR,\n radar.xmlData.gimbalSettings.yawOffsetD * DTR)\n\n# Grab the latitude and longitude conversion factors\nlatConv = radar.gpsData[0].latConv\nlonConv = radar.gpsData[0].lonConv\n\n# calculate near range and some other items\nhAgl = radar.xmlData.configAlt\nPRF = radar.xmlData.PRF\nlamda = radar.xmlData.channel.wavelengthM\nsrate = radar.xmlData.channel.srateHz\ndt = 1 / srate\ntp = radar.xmlData.channel.pulseLengthS\nNchan = radar.TxNchan * radar.RxNchan\n# get this from the noise figure we use for the radar\nF = 10.0 ** (3.0 / 10.0)\nN0 = kb * T0 * F\nNoisePow = N0 * srate\nsigma_n = sqrt(NoisePow)\nMPP = c0 / srate / range_interp_factor\n\n# determine near and far range information\nRxOnS = radar.xmlData.channel.RxOnTAC / radar.xmlData.TAC\nRxOffS = radar.xmlData.channel.RxOffTAC / radar.xmlData.TAC\nTxOnS = radar.xmlData.channel.TxOnTAC / radar.xmlData.TAC\nTxOffS = radar.xmlData.channel.TxOffTAC / radar.xmlData.TAC\n\nconfigNearRange = (RxOnS - TxOnS - fDelay / radar.xmlData.TAC) * c0 / 2.0\ndesiredNearRange = (\n RxOnS - TxOnS - fDelay / radar.xmlData.TAC - tp * nearRange_partial_pulse_percent) * c0 / 2.0\nconfigFarRange = (RxOffS - TxOnS - tp) * c0 / 2.0\ndesiredFarRange = (\n RxOffS - TxOnS - tp * (1.0 - partial_pulse_percent)) * c0 / 2.0\n\n# the length of the total convolution output\nconvOutputLength = radar.Nsam + radar.pulseLengthN - 1\n# this is the number of bins with a full convolution\nNFullConvBins = radar.Nsam - radar.pulseLengthN + 1\n\n# calculate the bin index for the desired near range\nrxPulseLeadingZeroPadLength = int(tp * nearRange_partial_pulse_percent / dt)\n\n# calculate the number of bins from the desiredNearRangeOffsetInd\nnumRangeSamples = int((\n RxOffS - RxOnS - tp * (1.0 - partial_pulse_percent)\n + tp * nearRange_partial_pulse_percent) / dt) + 1\n\n# let's calculate the last valid range index\n# Nrv = NFullConvBins * range_interp_factor\nNrv = numRangeSamples * range_interp_factor\n\n# we will get the FastTimeFFTlength from the matched filter\nFastTimeFFTlength = radar.ftFFTLength\n# and inverse FFT length to get the desired range interpolation factor\nFastTimeIFFTlength = FastTimeFFTlength * range_interp_factor\n# and the Doppler FFT length\nDopFFTlength = int(2 ** (ceil(log2(Dop_interp_factor * Ncpi))))\ndopStep = PRF / DopFFTlength\nvelStep = dopStep * lamda / 2.0\n# Nrv = FastTimeIFFTlength\n\n# Nrv = FastTimeIFFTlength\n# make an array of the range bins we want to look at\nmyRanges = \\\n (desiredNearRange * 2.0 + arange(Nrv, dtype='float32') * MPP) / 2.0\n# re-assign the far range\nactualFarRange = myRanges[-1]\n# compute the middle range\nmidRange = (myRanges[-1] + myRanges[0]) / 2.0\n\nradar.setProcParameters(desiredNearRange, actualFarRange)\n\n# compute the CPI sample time\nTcpi = radar.CPILength * radar.xmlData.PRI\n\n# %%\n\"\"\"\nPre-compute the Doppler window used to reduce sidelobes in the Doppler spectrum\n\"\"\"\n# myDopWindow = zeros(Ncpi, dtype='float32', order='C')\nmyDopWindow = window_taylor(Ncpi, 11, -70)\nslowTimeWindow = myDopWindow.reshape((Ncpi, 1)).dot(ones((1, Nrv)))\n\n# %%\n\"\"\"Loop over the CPI's looking for detections\"\"\"\n\n# allocate memory for recording all of the detection results\ntruthRanges = []\ntruthDops = []\ntruthPositions = []\ntruthVelocities = []\ntruthRadVels = []\ntruthAzs = []\nnoisePowers = []\nnoisePowerLPFs = []\nnoisePowerVarLPFs = []\nONCE = 40\n\n# Open a file for writing the data to a CSV file\nDO_HEADER = True\ncsvID = open(truthCollectionFilename, 'w')\ncsvWriter = csv.writer(csvID)\ncsvHeader = [\n 'Time', 'AirLat', 'AirLon', 'AirEle', 'Yaw', 'Pitch', 'Roll', 'Pan', 'Tilt',\n 'TruthLat', 'TruthLon', 'TruthEle', 'TruthR', 'TruthRDot', 'LatHat',\n 'LonHat', 'EleHat', 'RHat', 'RDotHat', 'AzimuthDOA']\nif DO_HEADER:\n csvWriter.writerow(csvHeader)\n\n# Instantiate our DwellTrackManager\n# dtManager = DwellTrackManager(radar)\n\n# Compute the separation between antenna phase centers\nantSep = abs(\n radar.xmlData.CCS.antSettings[0].portSettings[1].xOffsetM \\\n - radar.xmlData.CCS.antSettings[0].portSettings[2].xOffsetM)\nantSep = 19.6e-2\n# antSep = 1.0\ndetectionCount = 0\nONCE = 397\n# ONCE = 390\nONCE = 1\n\n# Initialize a low-pass filter version of the noisePowerLPF\nLPFCutoffFreqHz = 0.25\nLPFTimeConstant = 1 / (2 * pi * LPFCutoffFreqHz)\nLPFDecayFactor = 1 - exp(-radar.CPITime / LPFTimeConstant)\nnoisePowerLPF = 0\nnoisePowerVarLPF = 0.02\ntimeSinceValidCPI = 0\nRFICounter = 0\n\nstartTime = time.time()\n# while radar.nextCPIExists():\nwhile( ONCE ):\n timeSinceValidCPI += radar.CPITime\n # grab the data for the current CPI\n # rawdata, nextCPI = radar.getNextCPIData()\n rawdata = radar.getCPIData( ONCE )\n nextCPI = 0\n ONCE = 0\n # ONCE -= 1\n # ONCE += 1\n\n # Create an empty array to which we will append the data for a row\n cpiData = []\n\n # Get the position of the airlane and calculate the boresight Vec\n antPos, antVel = radar.getPlatformPosVel()\n # antPos, antVel = radar.getAntennaPosVel()\n boreSightVec, effGrazeI, effAzI = radar.getCPIBoresightVector()\n headingI = arctan2(antVel.item(0), antVel.item(1))\n\n \"\"\" Detection ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\"\"\"\n # Detect the movers in the exo-clutter. In the exo-clutter, in the absence \n # of a moving target, the radar returns are expected to be noise. As we \n # have a pretty good idea of the noise figure of the radar, we can use \n # this to set a threshold based on a false alarm rate.\n # Remember that we added in extra noise to hide the effects of the Hilbert \n # Transform\n\n dopCenLine, dopUpLine, dopDownLine, grazeOverRanges = getDopplerLine(\n effAzI, myRanges, antVel, antPos, radar, dtedManager, lamda,\n radar.azBeamwidth / 2.0, PRF)\n radVelErrors = abs(dopUpLine - dopDownLine) * lamda / 2.0\n # The high-fidelity Doppler line is based on the high-fidelity grazing \n # angle computation. Which should probably be reserved probably for when \n # doing either GMTI Backprojection or when better precision is needed for \n # the slow-time phase compensation.\n # (dopCenLine, dopUpLine, dopDownLine, grazeOverRanges,\n # surfaceHeights, numIterations) = getDopplerLineHiFi( effAzI, myRanges,\n # antVel, antPos, radar, dtedName, dtedCorrection, lamda,\n # azNullBeamwidthHalf, PRF)\n\n # check to make sure that we did not have an error\n if not any(rawdata[0]):\n print(\"We had a problem getting the CPI data for CPI #%d\" % nextCPI)\n break\n\n if (nextCPI - 1) % 10 == 0:\n endTime = time.time()\n print(\"endTime - startTime = %0.9f\" % (endTime - startTime))\n startTime = time.time()\n print(\"CPI #%d of %d\" % (nextCPI - 1, radar.NumberOfCPIs))\n\n \"\"\"Range compress the data from each channel for the CPI\"\"\"\n slowtimes = arange(Ncpi).reshape((Ncpi, 1)) / PRF\n slowtimePhases = \\\n exp(1j * 2 * pi * slowtimes.dot(-dopCenLine.reshape(1, Nrv)))\n # slowtimePhases = 1.0\n\n # Python call for range compression of the CPI data\n rcdopdata = []\n for ci in range(Nchan):\n # zero out the DC frequencies in the matched filter\n myMatchedFilter = radar.matchedFilter[ci] + 0.0\n myMatchedFilter[:3] = 0\n myMatchedFilter[-3:] = 0\n cpidataP = rangeCompressData(\n rawdata[ci], myMatchedFilter, FastTimeFFTlength,\n FastTimeIFFTlength, Nrv, Ncpi)\n # Doppler FFT\n rcdopdata.append(fft(\n cpidataP * slowTimeWindow * slowtimePhases, DopFFTlength, 0).T)\n\n # Now compute the sum of the channels and the phase\n chanSum = rcdopdata[0] + rcdopdata[1]\n magData = abs(chanSum)\n antAz = \\\n arcsin(angle(rcdopdata[0].conj() * rcdopdata[1]) \\\n * lamda / (2 * pi * antSep))\n # antAz = zeros_like( antAz )\n # monoPhase = angle( rcdopdata[ 0 ].conj() * rcdopdata[ 1 ] )\n\n # Compute the upper and lower MDV along with the wrap velocity\n wrapVel = PRF * lamda / 4.0\n MDV, MDVapproach, MDVrecede = computeExoMDV(\n antVel, radar.azBeamwidth, grazeOverRanges[-1], effAzI,\n headingI, Kexo)\n threshVel = max(MDV, radar.radVelRes)\n (detMap, noisePower, thresh) = detectExoClutterMoversRVMap(\n magData, threshVel, -threshVel, Pfa, radar)\n # Store the noise powers\n noisePowers.append(noisePower)\n if not noisePowerLPF:\n noisePowerLPF = 10 * log10(noisePower)\n noisePowerVarLPF = 0.02\n timeSinceValidCPI = 0\n else:\n noisePowerdB = 10 * log10(noisePower)\n # Only update the LPF if a valid CPI was encountered\n if nextCPI < 100000:\n if noisePowerdB < noisePowerLPF + 1.0:\n LPFDecayFactor = 1 - exp(-timeSinceValidCPI / LPFTimeConstant)\n deltaNoisePower = noisePowerdB - noisePowerLPF\n # noisePowerLPF = ( 1 - LPFDecayFactor ) * noisePowerLPF \\\n # + LPFDecayFactor * noisePowerdB\n noisePowerLPF = noisePowerLPF + LPFDecayFactor * deltaNoisePower\n noisePowerVarLPF = (1 - LPFDecayFactor) \\\n * (noisePowerVarLPF + LPFDecayFactor * deltaNoisePower ** 2)\n # noisePowerVarLPF = ( 1 - LPFDecayFactor ) * noisePowerVarLPF \\\n # + LPFDecayFactor * deltaNoisePower**2\n timeSinceValidCPI = 0\n else:\n print(\"CPI with likely RFI encountered.\")\n RFICounter += 1\n else:\n if (noisePowerdB < noisePowerLPF + 5.0 * sqrt(noisePowerVarLPF)):\n LPFDecayFactor = 1 - exp(-timeSinceValidCPI / LPFTimeConstant)\n deltaNoisePower = noisePowerdB - noisePowerLPF\n # noisePowerLPF = ( 1 - LPFDecayFactor ) * noisePowerLPF \\\n # + LPFDecayFactor * noisePowerdB\n noisePowerLPF = noisePowerLPF + LPFDecayFactor * deltaNoisePower\n noisePowerVarLPF = (1 - LPFDecayFactor) \\\n * (noisePowerVarLPF + LPFDecayFactor * deltaNoisePower ** 2)\n # noisePowerVarLPF = ( 1 - LPFDecayFactor ) * noisePowerVarLPF \\\n # + LPFDecayFactor * deltaNoisePower**2\n timeSinceValidCPI = 0\n else:\n print(\"CPI with likely RFI encountered.\")\n RFICounter += 1\n noisePowerLPFs.append(noisePowerLPF + 0.0)\n noisePowerVarLPFs.append(noisePowerVarLPF + 0.0)\n\n \"\"\" Segmentation |||||||||||||||||||||||||||||||||||||||||||||||||||||||\"\"\"\n targetList = getExoClutterDetectedMoversRV(\n detMap, magData, antAz, lamda, wrapVel, PRF, desiredNearRange, MPP,\n noisePower, radar, radVelErrors, velStep)\n # now have all of the detections go through and compute their position and \n # range rate\n\n # Add the detections we got to the dwell track manager\n # nonRedundantNumTrackUpdates, updateKeys = dtManager.addNewDetections(\n # targetList, radar)\n # numReportableTracks, reportableTrackIndices =\\\n # dtManager.getReportableTrackNumber()\n\n \"\"\" Parameter Estimation |||||||||||||||||||||||||||||||||||||||||||||||\"\"\"\n # First, compute the hAgl (to get closer to the actual hAgl, we should\n # lookup the DTED value around the center of the swath)\n cenSwathPointHat = antPos + boreSightVec * midRange\n cenlat = cenSwathPointHat.item(1) / latConv\n cenlon = cenSwathPointHat.item(0) / lonConv\n hAglHat = antPos.item(2) - dtedManager.getDTEDPoint(cenlat, cenlon)\n\n detectionCount += len(targetList)\n # begin the loop over the targets and pass along to them all of the \n # information they should need to compute their position and radial\n # velocity\n for i in range(len(targetList)):\n # parameters: hAgl, airPos_i, airVel_i, boresightVec, dtedName, \n # dtedCorrection\n tPosHat, tRangeHat, tRadVelHat, tAntAzR, tAzI = \\\n targetList[i].estimateParameters(\n hAglHat, antPos, antVel, boreSightVec, effAzI, dtedManager)\n\n # We need to generate the STANAG info and write it out to the stream. We get \n # the data from the targets themselves for the target reports, and from \n # the MoverPositionData object for the dwell\n dwellData = radar.getStanagDwellSegmentData(\n len(targetList), dtedManager)\n # dwellData = radar.getStanagDwellSegmentData(numReportableTracks)\n sg.writeDwellSegment(dwellData)\n # dtManager.generateStanagReports(sg, hAglHat, radar, dtedName,\n # dtedCorrection)\n tarNumber = 0\n for i in range(len(targetList)):\n targetReportData = \\\n targetList[i].getStanagTargetReportData(tarNumber)\n sg.writeTargetReport(targetReportData)\n tarNumber += 1\n\n # if (detectionCount):\n # break\n # let's grab the truth data's ranges and Dopplers\n if TRUTH_EXISTS:\n radarTime = radar.getRadarTime()\n truthR, truthDop, truthRadVel, truthTarRadVel, truthEle, truthAz = \\\n truthData.getRangeDopplerAntennaAnglesForTime(\n radarTime, antPos, antVel, effAzI, effGrazeI, boreSightVec)\n # truthTargetReports = truthData.getStanagTargetTruthReportData(\n # tarNumber, radarTime, antPos, radar)\n truthPos, truthVel = truthData.getPositionVelocityAtTime(radarTime)\n\n # save the truthData to arrays for plotting later\n truthRanges.append(truthR)\n truthDops.append(truthDop)\n truthAzs.append(truthAz)\n truthPositions.append(truthPos)\n truthVelocities.append(truthVel)\n truthRadVels.append(truthRadVel)\n # determine if the target was in the beam or not\n truthInBeam = False\n for i in range(truthData.numTracks):\n # sg.writeTargetReport( truthTargetReports[i] )\n if (abs(truthAz[i]) < radar.azBeamwidth\n and truthR[i] < myRanges[-1]\n and truthR[i] > myRanges[0]):\n truthInBeam = True\n if (truthInBeam):\n # print( \"Azimuth: {}, Range: {}\".format(\n # truthAz * 180 / pi, truthR ) )\n # rangeInd = int( around( ( truthR - myRanges[ 0 ] ) * 2 / MPP ) )\n # velInd = int( around( truthRadVel / velStep ) )\n # azEst = antAz[ rangeInd, velInd ] / DTR\n # print( \"velInd: %d, rangeInd: %d, estimated Az: %0.4f deg\" % (\n # velInd, rangeInd, azEst ) )\n\n # Need to loop through the detections and find the one that\n # corresponds to the truth data so that we can get the phase and\n # other information from the range-Doppler map and spit it out to\n # the CSV record. This is preferrable to simplying using the ind\n # based on the truth data to grab it from the range-Doppler map\n # because there could be errors in the truth data, and it also\n # would be a single pixel look up instead of an average for all\n # the pixels that would correspond to the target response there.\n rangeDeltaThresh = radar.rngRes * 7\n velDeltaThresh = radar.radVelRes * 1\n closeTargets = []\n if (len(targetList) > 0):\n for i in range(len(targetList)):\n # We know there is only one truth target and so we are going\n # to forgo an additional for-loop over the truth targets\n deltaRange = abs(truthR[0] - targetList[i].rangeM)\n # print( 'deltaRange: %0.2f, truthVel: %0.2f, estVel: %0.2f' \\\n # % ( deltaRange, truthRadVel[ 0 ],\n # targetList[ i ].radVelMPerS ) )\n deltaVel = \\\n abs(truthRadVel[0] - targetList[i].radVelMPerS)\n\n if (deltaRange < rangeDeltaThresh\n and deltaVel < velDeltaThresh):\n euclideanDist = sqrt(deltaRange ** 2 + deltaVel ** 2)\n closeTargets.append(\n (i, deltaRange, deltaVel, euclideanDist,\n targetList[i].maxMag))\n # Let's choose the target that is close and brightest\n\n if (not closeTargets):\n continue\n\n # Now, let's compute the euclidian distance. If any of them are\n # close, we will take the one with the largest magnitude\n minEuclideanDist = 0\n maxMagnitude = 0\n closestCloseTarget = 0\n brightestCloseTarget = 0\n for i in range(len(closeTargets)):\n if (i == 0):\n minEuclideanDist = closeTargets[i][3]\n closestCloseTarget = i\n maxMagnitude = closeTargets[i][4]\n brightestCloseTarget = i\n continue\n if (closeTargets[i][3] < minEuclideanDist):\n minEuclideanDist = closeTargets[i][3]\n closestCloseTarget = i\n if (closeTargets[i][4] > maxMagnitude):\n maxMagnitude = closeTargets[i][4]\n brightestCloseTarget = i\n if (closestCloseTarget != brightestCloseTarget):\n print(\"The closest target is not the brightest target!\")\n print(\"\\tClosest target is #%d, with deltaR:%0.2f m, deltaVel:%0.2f m/s, magnitude:%0.2f dB\" \\\n % (closeTargets[closestCloseTarget][0],\n closeTargets[closestCloseTarget][1],\n closeTargets[closestCloseTarget][2],\n 20 * log10(closeTargets[closestCloseTarget][4])))\n print(\"\\tBrightest target is #%d, with deltaR:%0.2f m, deltaVel:%0.2f m/s, magnitude:%0.2f dB\" \\\n % (closeTargets[brightestCloseTarget][0],\n closeTargets[brightestCloseTarget][1],\n closeTargets[brightestCloseTarget][2],\n 20 * log10(closeTargets[brightestCloseTarget][4])))\n print(\"\\tWe will be taking the brightest target.\")\n closestTarget = closeTargets[brightestCloseTarget][0]\n print(\"Detection #%d matches the truth data.\" \\\n % (closestTarget))\n # Append all of the data of record to the cpi data record\n # array\n yawR, pitchR, rollR = radar.getPlatformAttitude()\n panR, tiltR = radar.getGimbalPanTilt()\n cpiData.append(radarTime)\n cpiData.append(antPos.item(1) / latConv)\n cpiData.append(antPos.item(0) / lonConv)\n cpiData.append(antPos.item(2))\n cpiData.append(yawR[0])\n cpiData.append(pitchR[0])\n cpiData.append(rollR[0])\n cpiData.append(panR)\n cpiData.append(tiltR)\n cpiData.append(truthPos[0].item(1) / latConv)\n cpiData.append(truthPos[0].item(0) / lonConv)\n cpiData.append(truthPos[0].item(2))\n cpiData.append(truthR[0])\n cpiData.append(truthTarRadVel[0])\n cpiData.append(\n targetList[closestTarget].posI[1, 0] / latConv)\n cpiData.append(\n targetList[closestTarget].posI[0, 0] / lonConv)\n cpiData.append(\n targetList[closestTarget].posI[2, 0])\n cpiData.append(targetList[closestTarget].rangeM)\n cpiData.append(targetList[closestTarget].tarRadVelMPerS)\n # Note that in this case the antAzR is actually the phase\n # and not the antenna azimuth angle\n cpiData.append(targetList[closestTarget].antAzR)\n csvWriter.writerow(cpiData)\n # break\n\ncsvID.close()\n## close the stanag file\nsg.closeFileForWriting()\n\nnoisePowerdBs = 10 * log10(abs(array(noisePowers)))\nthresholds = array(noisePowerLPFs) + 1.0 # 5.0 * sqrt(array(noisePowerVarLPFs))\nprint(\"%d CPI's with RFI encountered.\" % (RFICounter))\n#\n#\nvelStep = radar.wrapVel * 2 / DopFFTlength\n# Plot some of the results\n# magData = fftshift(20*log10(abs(rcdopdata.T)), axes=1)\n# magdopdata = fftshift(abs(rcdopdata.T), axes=1)\nmaxMagdata = magData.max()\ndetInd = nonzero(detMap)\ndetRanges = detInd[0] * MPP / 2 + desiredNearRange\ndetVels = -detInd[1] * radar.wrapVel * 2 / DopFFTlength\n# magData -= maxlogdata\n# get the indices for any of the detections\n# detInd = nonzero(detMap)\n# detRanges = detInd[0] * MPP/2 + desiredNearRange\n# detDops = detInd[1] * (PRF / DopFFTlength) - PRF/2\n# figure();imshow( antAz / DTR, cmap='jet', origin='lower',\n# extent=[ 0.5 * velStep, -( radar.wrapVel * 2 - velStep / 2.0 ), \n# myRanges[ 0 ] - MPP / 4, myRanges[ -1 ] + MPP / 4 ] )\n# colorbar()\n# axis( 'tight' );xlabel( 'Doppler (Hz)' );ylabel( 'Range (m)' )\n# xlabel( 'Radial Velocity (m/s)' )\n\nfigure()\nimshow(20 * log10(abs(magData)), cmap='jet', origin='lower',\n extent=[0.5 * velStep, (radar.wrapVel * 2 - velStep / 2.0),\n myRanges[0] - MPP / 4, myRanges[-1] + MPP / 4])\n# plot the clutter boundary lines\naxvline(x=threshVel, color='red')\naxvline(x=radar.wrapVel * 2 - threshVel, color='red')\n# plot( dopCenLine * lamda / 2.0, myRanges,'k--', dopUpLine * lamda / 2.0,\n# myRanges, 'r-.', dopDownLine * lamda / 2.0, myRanges, 'r-.' )\n# plot( detVels, detRanges, 'y^' )\n# for i in range(len(targetList)):\n# detRange, detVel, maxLogVal = targetList[i].getRangeVelocity()\n# plot(detVel, detRange, 'ys')\n# for i in range(len(truthRanges)):\n# plot(truthDops[i][0], truthRanges[i][0], 'ro', markersize=15, \n# fillstyle='none')\nif (TRUTH_EXISTS):\n for i in range(truthData.numTracks):\n plot(truthRadVel[i], truthR[i], 'ro', markersize=15,\n fillstyle='none')\n title('Range-Dop Tar in beam:%d, ant Az:%0.2f deg, CPI #%d (pulses %d-%d)' \\\n % (truthInBeam, truthAz[0] * 180 / pi, radar.CPICounter - 1,\n (radar.CPICounter - 1) * Ncpi, radar.CPICounter * Ncpi - 1))\nelse:\n title('Range-Doppler CPI #%d (pulses %d-%d)' % (\n radar.CPICounter - 1, (radar.CPICounter - 1) * Ncpi,\n radar.CPICounter * Ncpi - 1))\ncolorbar()\naxis('tight')\nxlabel('Doppler (Hz)')\nylabel('Range (m)')\nxlabel('Radial Velocity (m/s)')\nclim([20 * log10(maxMagdata) - 60, 20 * log10(maxMagdata)])\n\nprint(\"We have finished all %d CPIs!!!\" % radar.CPICounter)\n","repo_name":"jsbudge/clutter_discretes","sub_path":"ExoClutterGMTIProcessing_SlimSDR_Monopulse_CollectData.py","file_name":"ExoClutterGMTIProcessing_SlimSDR_Monopulse_CollectData.py","file_ext":"py","file_size_in_byte":26669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29403870006","text":"import time\nimport click\nimport digitalocean\n\nfrom zbuilder.dns import dnsUpdate, dnsRemove\n\n\nSLEEP_TIME = 5\n\n\nclass vmProvider(object):\n def __init__(self, cfg):\n if cfg:\n self.cfg = cfg\n self.apikey = self.cfg[\"apikey\"]\n self.manager = digitalocean.Manager(token=self.apikey)\n\n def getDroplets(self, hosts):\n retValue = {}\n droplets = self.manager.get_all_droplets()\n for h, v in hosts.items():\n if hosts[h][\"enabled\"]:\n for droplet in droplets:\n if droplet.name == h:\n retValue[droplet.name] = droplet\n if h not in retValue:\n for curkey in self.manager.get_all_sshkeys():\n if curkey.name == v[\"sshkey\"]:\n sshkey = curkey\n continue\n droplet = digitalocean.Droplet(\n token=self.apikey,\n name=h,\n region=v[\"region\"],\n image=v[\"image\"],\n size_slug=v[\"size_slug\"],\n ssh_keys=[sshkey],\n monitoring=True,\n backups=False,\n )\n retValue[h] = droplet\n\n return retValue\n\n def waitStatus(self, hosts, status):\n allStatus = False\n while not allStatus:\n allStatus = True\n for k, d in self.getDroplets(hosts).items():\n if d.status != status and d.status is not None:\n allStatus = False\n break\n time.sleep(SLEEP_TIME)\n\n def build(self, hosts):\n ips = {}\n for k, d in self.getDroplets(hosts).items():\n if d.status is None:\n click.echo(\" - Creating host: {} \".format(d.name))\n d.create()\n ips[d.name] = None\n elif d.status == \"off\":\n click.echo(\" - Booting host: {} \".format(d.name))\n d.power_on()\n elif d.status == \"active\":\n click.echo(\" - Already up host: {} \".format(d.name))\n else:\n click.echo(\" - Status of host: {} is {}\".format(d.name, d.status))\n\n self.waitStatus(hosts, \"active\")\n for k, d in self.getDroplets(hosts).items():\n if d.ip_address:\n ips[d.name] = d.ip_address\n\n dnsUpdate(ips)\n\n def up(self, hosts):\n for k, d in self.getDroplets(hosts).items():\n if d.status is None:\n click.echo(\" - No such host: {} \".format(d.name))\n elif d.status == \"off\":\n click.echo(\" - Booting host: {} \".format(d.name))\n d.power_on()\n elif d.status == \"active\":\n click.echo(\" - Already up host: {} \".format(d.name))\n else:\n click.echo(\" - Status of host: {} is {}\".format(d.name, d.status))\n\n def halt(self, hosts):\n for k, d in self.getDroplets(hosts).items():\n if d.status == \"active\":\n click.echo(\" - Halting host: {} \".format(d.name))\n d.shutdown()\n else:\n click.echo(\" - Status of host: {} is {}\".format(d.name, d.status))\n\n self.waitStatus(hosts, \"off\")\n\n def destroy(self, hosts):\n updateHosts = {}\n for k, d in self.getDroplets(hosts).items():\n if d.status is not None:\n click.echo(\" - Destroying host: {} \".format(d.name))\n d.destroy()\n updateHosts[d.name] = {}\n else:\n click.echo(\" - Host does not exists : {}\".format(d.name))\n\n dnsRemove(updateHosts)\n\n def dnsupdate(self, hosts):\n ips = {}\n for k, d in self.getDroplets(hosts).items():\n if d.ip_address:\n ips[d.name] = d.ip_address\n dnsUpdate(ips)\n\n def dnsremove(self, hosts):\n ips = {}\n for h in hosts:\n if hosts[h][\"enabled\"]:\n ips[h] = None\n dnsRemove(hosts)\n\n def snapCreate(self, hosts):\n snapshots = self.manager.get_droplet_snapshots()\n snaps = [s.name for s in snapshots]\n for k, d in self.getDroplets(hosts).items():\n if d.status is not None:\n snapshot_name = \"zbuilder-{}\".format(d.name)\n if snapshot_name not in snaps:\n click.echo(\" - Taking snapshot: {}\".format(d.name))\n d.take_snapshot(snapshot_name)\n else:\n click.echo(\" - Snapshot already exists: {}\".format(d.name))\n\n def snapRestore(self, hosts):\n snapshots = self.manager.get_droplet_snapshots()\n snaps = {}\n for s in snapshots:\n snaps[s.name] = s\n\n click.echo(\" Halting\")\n self.halt(hosts)\n self.waitStatus(hosts, \"off\")\n click.echo(\" Restoring\")\n for k, d in self.getDroplets(hosts).items():\n if d.status == \"off\":\n snapshot_name = \"zbuilder-{}\".format(d.name)\n if snapshot_name in snaps.keys():\n click.echo(\" - Restoring from image: {} \".format(snapshot_name))\n d.restore(snaps[snapshot_name].id)\n else:\n click.echo(\" - No such snapshot: {}\".format(snapshot_name))\n click.echo(\" Booting up\")\n self.up(hosts)\n self.waitStatus(hosts, \"active\")\n\n def snapDelete(self, hosts):\n snapshots = self.manager.get_droplet_snapshots()\n snaps = {}\n for s in snapshots:\n snaps[s.name] = s\n for h in hosts:\n if hosts[h][\"enabled\"]:\n snapshot_name = \"zbuilder-{}\".format(h)\n if snapshot_name in snaps.keys():\n click.echo(\" - Deleting snapshot: {}\".format(snapshot_name))\n snaps[snapshot_name].destroy()\n else:\n click.echo(\" - No such snapshot: {}\".format(snapshot_name))\n\n def config(self):\n return \"apikey: {}...\".format(self.cfg[\"apikey\"][0:10])\n\n def status(self):\n return \"PASS\"\n\n def params(self, params):\n return {k: params.get(k, None) for k in [\"size_slug\", \"region\", \"image\"]}\n\n def enabled(self):\n return True\n","repo_name":"hasiotis/zbuilder","sub_path":"zbuilder/vm/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"19883672334","text":"def lonelyinteger(a):\n \n numSet = set(a)\n\n for i in numSet:\n count = 0\n for j in a:\n if(i == j):\n count += 1\n if(count == 1):\n return i\n\n\nif __name__ == '__main__':\n\n n = int(input().strip())\n\n a = list(map(int, input().rstrip().split()))\n\n print(lonelyinteger(a))\n\n","repo_name":"Aravinthvimal/Python-beginner","sub_path":"lonelyInteger.py","file_name":"lonelyInteger.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10548331299","text":"from typing import List\nclass Solution:\n def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:\n diff = (sum(B)-sum(A))//2\n setA = set(A)\n setB = set(B)\n for each in setA:\n if each + diff in setB:\n return [each, each + diff]\n\ns = Solution()\nprint(s.fairCandySwap([1,1],[2,2]))","repo_name":"nikhilbommu/DS-PS-Algorithms","sub_path":"Leetcode/LeetCode Problems/FairCandySwap.py","file_name":"FairCandySwap.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2979776741","text":"import numpy as np\r\nimport scipy.stats as stats\r\n\r\n\r\ndef rand_exp(nums):\r\n return -np.log(1.0 - nums)\r\n\r\n\r\ndef rand_weibull(nums, scale, shape):\r\n return scale * np.power(rand_exp(nums), 1.0 / shape)\r\n\r\n\r\n# These tests verify that the exponential and weibull samplers in\r\n# the kernels produce the summary statistics expected when they\r\n# are not rounded to integers.\r\ndef test_exp_is_exp():\r\n n = 100000\r\n expected_rate = 1.0\r\n exps = rand_exp(np.random.rand(n))\r\n _, rate = stats.expon.fit(exps, floc=0.0)\r\n\r\n assert np.isclose(expected_rate, rate, atol=0.1)\r\n\r\n\r\ndef test_weibull_is_weibull():\r\n n = 100000\r\n expected_scale = 14.0\r\n expected_shape = 5.0\r\n weibulls = rand_weibull(np.random.rand(n), expected_scale, expected_shape)\r\n shape, _, scale = stats.weibull_min.fit(weibulls, floc=0.0)\r\n\r\n assert np.isclose(expected_scale, scale, atol=0.1)\r\n assert np.isclose(expected_shape, shape, atol=0.1)\r\n\r\n\r\ndef test_exposed_dist():\r\n n = 100000\r\n weibulls = rand_weibull(np.random.rand(n), 2.82, 3.93)\r\n mean = weibulls.mean()\r\n std = weibulls.std()\r\n\r\n expected_mean = 2.56\r\n expected_std = 0.72\r\n\r\n assert np.isclose(expected_mean, mean, atol=0.1)\r\n assert np.isclose(expected_std, std, atol=0.1)\r\n\r\n\r\ndef test_presymptomatic_dist():\r\n n = 100000\r\n weibulls = rand_weibull(np.random.rand(n), 2.45, 7.12)\r\n mean = weibulls.mean()\r\n std = weibulls.std()\r\n\r\n expected_mean = 2.3\r\n expected_std = 0.35\r\n\r\n assert np.isclose(expected_mean, mean, atol=0.1)\r\n assert np.isclose(expected_std, std, atol=0.1)\r\n","repo_name":"Urban-Analytics/RAMP-UA","sub_path":"tests/opencl/test_prngs.py","file_name":"test_prngs.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"43796238876","text":"# @Time : 2020/5/15 16:28 \n# @Author : tongyue\nimport yaml\n\n\n# 加载字典\ndict_data = {\"a\":[1,2]}\nprint(yaml.dump(dict_data))\n\nwith open('yaml_data/yaml_data_3.txt',mode='w') as f:\n yaml.dump(data=dict_data,stream=f)","repo_name":"tongyue2018/PythonAdvanced","sub_path":"advanced_method/file_read_write/yaml_usage/yaml_dump.py","file_name":"yaml_dump.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28991450044","text":"from oscar.apps.dashboard.catalogue.forms import ProductForm as CoreProductForm\n\nclass ProductForm(CoreProductForm):\n \n class Meta(CoreProductForm.Meta):\n fields = [\n 'title', 'upc', 'description', 'brand', 'activity',\n 'is_discountable', 'structure',]\n \n def delete_non_child_fields(self):\n \"\"\"\n Deletes any fields not needed for child products. Override this if\n you want to e.g. keep the description field.\n \"\"\"\n parent_only_fields = [\n 'description', 'is_discountable', 'brand', 'activity',]\n for field_name in parent_only_fields:\n if field_name in self.fields:\n del self.fields[field_name]\n\n ","repo_name":"halfnibble/demo-shop","sub_path":"apps/dashboard/catalogue/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17969522052","text":"import json\nimport csv\n\n# INDEXD = {\n# \"host\": \"http://localhost:8000\",\n# \"version\": \"v0\",\n# \"auth\": {\"username\": \"test\", \"password\": \"test\"},\n# }\n\nINDEXD = {\n \"host\": \"https://nci-crdc.datacommons.io/index/index\",\n \"version\": \"v0\",\n \"auth\": {\"username\": \"\", \"password\": \"\"},\n}\n\nGDC_TOKEN = \"\"\n\ntry:\n with open(\"/home/ubuntu/dcf_dataservice_credentials.json\", \"r\") as f:\n data = json.loads(f.read())\n INDEXD = data.get(\"INDEXD\", {})\n GDC_TOKEN = data.get(\"GDC_TOKEN\", \"\")\nexcept Exception as e:\n print(\"Can not read dcf_dataservice_credentials.json file. Detail {}\".format(e))\n\nPROJECT_ACL = {}\ntry:\n with open(\"./GDC_datasets_access_control.csv\", \"rt\") as f:\n csvReader = csv.DictReader(f, delimiter=\",\")\n for line in csvReader:\n PROJECT_ACL[line[\"project_id\"]] = line\nexcept Exception as e:\n print(\"Can not read GDC_datasets_access_control.csv file. Detail {}\".format(e))\n\n\nPROJECT_ACL = {\n \"TARGET\": {\"gs_bucket_prefix\": \"gdc-target-phs000218\"},\n \"TCGA\": {\"gs_bucket_prefix\": \"gdc-tcga-phs000178\"},\n \"VAREPOP\": {\"gs_bucket_prefix\": \"gdc-varepop-apollo-phs001374\"},\n \"FM\": {\"gs_bucket_prefix\": \"gdc-fm-ad-phs001179\"},\n \"NCICCR\": {\"gs_bucket_prefix\": \"gdc-nciccr-phs001444\"},\n \"CTSP\": {\"gs_bucket_prefix\": \"gdc-ctsp-phs001175\"},\n \"CCLE\": {\"gs_bucket_prefix\": \"gdc-ccle\"},\n}\n\n","repo_name":"uc-cdis/google-replicate","sub_path":"google_replicate/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42224832412","text":"def compression_rle(data):\n compression_data = ''\n count = 0\n compress_element = data[0]\n for i,s in enumerate(data):\n if compress_element == data[i] and count < 255:\n count += 1\n else:\n compression_data += chr(count) + compress_element\n compress_element = data[i]\n count = 1\n if (i + 1) == len(data):\n compression_data += chr(count) + compress_element\n return compression_data\n\ndef recovery_from_rle(data):\n decompression_data = ''\n for i in range(0,len(data),2):\n count = ord(data[i])\n value = data[i+1]\n decompression_data += value * count\n return decompression_data","repo_name":"Chesnokov-Mikhail/homeworkGB_Python","sub_path":"seminar6/RLE/rle_algoritm.py","file_name":"rle_algoritm.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37512482424","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import Tensor\nfrom typing import List\nimport numpy as np\n\nfrom cvae.dataman import DataManager\nfrom cvae.modeling import CVAEModel\n\nfrom tools import printProgressBar\nfrom tools import get_config_code_and_summary\n\nimport pickle\nimport os, shutil\n\n# Represents a state of a training process. Used by the training function to advance gradually the optimization search.\nclass TrainerState:\n def __init__(self, model, optimizer, scheduler):\n self.model = model\n self.optimizer = optimizer\n self.loss_history = []\n self.kl_history = []\n self.lrs = []\n self.scheduler = scheduler\n\n def getCurrentEpoch(self):\n return len(self.loss_history)\n\n def advance(self, data : DataManager, batch_size = 1024):\n '''\n Executes one epoch in the training\n '''\n self.model.train() # set in train mode\n\n loss_accum, KL_accum, batches = 0.0, 0.0, 0\n\n for c, x in data.get_batches(batch_size): # get data shuffled in batches\n self.optimizer.zero_grad()\n latent_mu, latent_logVar, z, x_mu, x_logVar = self.model(c, x)\n\n posterior_ll = torch.mean( CVAEModel.posterior_LogLikelihood(x, x_mu, x_logVar), dim = 0 ) # accumulate batch\n kl_div = torch.mean( CVAEModel.KL_divergence(latent_mu, latent_logVar), dim = 0 ) # accumulate batch\n elbo = posterior_ll - kl_div\n loss = -elbo\n \n loss_accum += loss.item()\n KL_accum += kl_div.item()\n batches += 1\n\n # perform optimization\n loss.backward()\n\n self.optimizer.step()\n\n self.lrs.append(self.optimizer.param_groups[0]['lr'])\n\n # self.scheduler.step(accLoss / numberOfBatches)\n self.scheduler.step()\n\n self.loss_history.append(loss_accum / batches)\n self.kl_history.append(KL_accum / batches)\n\n return loss_accum / batches # loss evaluation average\n\n def save(self, folder_name):\n '''\n Saves the current state of the training process at specific folder.\n Notice: use a different folder for each state.\n '''\n os.makedirs(folder_name, exist_ok=True)\n torch.save(self.model.state_dict(), folder_name + \"\\\\model.pt\")\n torch.save(self.optimizer.state_dict(), folder_name+\"\\\\optimizer.pt\")\n with open(folder_name+\"\\\\loss.bin\", 'wb') as fp:\n pickle.dump(self.loss_history, fp)\n with open(folder_name+\"\\\\klDiv.bin\", 'wb') as fp:\n pickle.dump(self.kl_history, fp)\n with open(folder_name+\"\\\\lr.bin\", 'wb') as fp:\n pickle.dump(self.lrs, fp)\n\n def load(self, folder_name):\n '''\n Loads the state of a training process from a specific folder.\n '''\n self.model.load_state_dict(torch.load(folder_name + \"\\\\model.pt\"))\n self.optimizer.load_state_dict(torch.load(folder_name+\"\\\\optimizer.pt\"))\n with open(folder_name+\"\\\\loss.bin\",'rb') as fp:\n self.loss_history = pickle.load(fp)\n with open(folder_name + \"\\\\klDiv.bin\", 'rb') as fp:\n self.kl_history = pickle.load(fp)\n with open(folder_name + \"\\\\lr.bin\", 'rb') as fp:\n self.lrs = pickle.load(fp)\n\nclass WavingScheduler:\n '''\n Defines a custom scheduler with a periodic wave and exponential reduction\n '''\n def __init__(self, optimizer, period, gamma):\n self.optimizer = optimizer\n self.period = period\n self.gamma = gamma\n self.initial_lr = self.optimizer.param_groups[0]['lr']\n self.epoch = 0\n \n def step(self):\n #self.optimizer.param_groups[0]['lr'] = self.initial_lr * (-np.cos(2*self.epoch * 3.14159 / self.period) + 2) * np.power(self.gamma, self.epoch) + 0.000001\n self.optimizer.param_groups[0]['lr'] = self.initial_lr * np.power(self.gamma, self.epoch) + 0.000001\n self.epoch += 1\n\n\ndef create_initial_state(model : CVAEModel, epochs = 400):\n '''\n Creates the initial training state for a specific model.\n The optimizer used is Adamax with starting lr of 0.001.\n The scheduler used is a custom waving scheduler that fall exponentially to a factor of 0.001 depending on the number of epochs.\n '''\n optimizer = torch.optim.Adamax(model.parameters(), lr = 0.01)\n gamma = np.exp(np.log(0.005)/epochs)\n scheduler = WavingScheduler(optimizer, 400, gamma=gamma)\n return TrainerState(model, optimizer, scheduler)\n\ndef train(model : CVAEModel, data : DataManager, folder_name, batch_size = 1024*16, epochs = 400, restart = True):\n '''\n Training of a specific model using data and specific batch size.\n The training process saves every 10 epochs in specified folder, within folder names state0, state1 ....\n If there is states already saved the training process will be resumed on the last saved state unless restart is True.\n '''\n\n state = create_initial_state(model, epochs)\n\n stateID = 0\n while os.path.exists(folder_name+\"\\\\state\"+str(stateID+1)):\n stateID += 1\n if restart:\n shutil.rmtree(folder_name+\"\\\\state\"+str(stateID))\n\n if restart:\n stateID = 0\n \n if stateID > 0:\n state.load(folder_name+\"\\\\state\"+str(stateID))\n else:\n state.save(folder_name+\"\\\\state0\")\n\n for e in range(epochs):\n loss = state.advance(data, batch_size=batch_size)\n if (state.getCurrentEpoch() % 10 == 0):\n printProgressBar((e+1)/epochs, prefix = 'Epoch '+str(state.getCurrentEpoch()), suffix = 'Loss: '+str(loss))\n stateID += 1\n state.save(folder_name+\"\\\\state\"+str(stateID))\n print()\n\ndef train_models (model_name, model_type, model_factory, configs, data : DataManager, output_folder, batch_size, epochs):\n '''\n Train different configurations for a model.\n model_factory is a method that will receive all named parametes from a specific setting.\n setting is a dictionary of each model parameter, the different values it can take.\n Model name refers to the specific model, e.g. lenGen, model type refers to the type of vae used, e.g. cvae.\n '''\n print(\"[INFO] Training configs: \" + str(len(configs)))\n\n for config in configs:\n code, summary = get_config_code_and_summary(config)\n print(\"[INFO] Training: \"+summary)\n model = model_factory(**config).to(torch.device('cuda:0'))\n train(\n model,\n data, \n output_folder+\"\\\\\"+model_name+\"\\\\\"+model_type+\"\\\\\"+code, \n batch_size = batch_size, \n epochs=epochs, \n restart=True\n )\n torch.cuda.empty_cache()\n\n print(\"[INFO] All models trained\")\n\ndef get_last_state(model_name, model_type, model_factory, config, folder, top_state = None):\n '''\n Returns a the final state of a trained configuration.\n If top_state is not None, this value tops the state returned. i.e. get the state 3 even if there are 100\n '''\n code, summary = get_config_code_and_summary(config)\n print(\"[INFO] Loading: \"+summary)\n folder_name = folder+\"\\\\\"+model_name+\"\\\\\"+model_type+\"\\\\\"+code\n model = model_factory(**config).to(torch.device('cuda:0'))\n state = create_initial_state(model)\n \n stateID = 0\n while os.path.exists(folder_name+\"\\\\state\"+str(stateID+1)):\n stateID += 1\n\n if top_state is not None:\n stateID = min(stateID, top_state)\n\n if stateID > 0:\n state.load(folder_name+\"\\\\state\"+str(stateID))\n\n return state\n\ndef get_last_states (model_name, model_type, model_factory, configs, output_folder, top_state = None):\n '''\n Returns a list with final states for different trained configurations.\n If top_state is not None, this value tops the state returned. i.e. get the state 3 even if there are 100\n '''\n states = []\n for config in configs:\n states.append(get_last_state(model_name, model_type, model_factory, config, output_folder, top_state))\n\n return states","repo_name":"lleonart1984/dx4xb","sub_path":"repo_Eurographics2021/subsurface_cvae_training/cvae/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28972127774","text":"from genes import *\nfrom random import randint, sample, choices, random\nfrom itertools import permutations\n\nclass GeneticAlgorithm:\n def __init__(self, length, population):\n self.length = length\n self.population = population\n self.solutionSet = [Tortoise(length) for _ in range(population)]\n self.bestSolution = [Tortoise(length), 0]\n # Eve\n for t in self.solutionSet[length:]:\n t.getSpace().makeEve()\n self.generation = 0\n # Additional parameters\n self.t = 0.90 # Tournament variable\n\n def fitness(self, tortoise):\n var = tortoise.variance()\n mean = tortoise.verticesSum(mean=True)\n fit = mean - (tortoise.nVertices()**.5) * (var**.5)\n\n if var == .0:\n fit += 1000\n\n if fit > self.bestSolution[1]:\n self.bestSolution[0].getSpace().setSpace(tortoise.getSpace().flatten())\n self.bestSolution[1] = fit\n return fit\n\n\n def mutate(self, tortoise):\n nVertices = tortoise.getSpace().nVertices()\n pos1 = randint(0, nVertices-1)\n pos2 = randint(0, nVertices-1)\n tortoise.getSpace().swapElement(pos1, pos2)\n return tortoise\n\n # Cycle\n def crossover(self, papa, mama):\n pa_vector = papa.getSpace().flatten()\n ma_vector = mama.getSpace().flatten()\n vector_size = len(pa_vector)\n origin = pa_vector\n toggle = lambda x: pa_vector if x == ma_vector else ma_vector\n\n child_vector = [0 for _ in range(vector_size)]\n left_idx = 0\n\n while left_idx < vector_size:\n ptr = left_idx\n while child_vector[ptr] == 0:\n child_vector[ptr] = origin[ptr]\n ptr = origin.index(toggle(origin)[ptr])\n\n while left_idx < vector_size and child_vector[left_idx] != 0:\n left_idx += 1\n origin = toggle(origin)\n\n if self.fitness(papa) > self.fitness(mama):\n mama.getSpace().setSpace(child_vector)\n else:\n papa.getSpace().setSpace(child_vector)\n\n def tournament(self, participants):\n length = len(participants)\n if length == 1:\n return (participants[0], self.fitness(participants[0]))\n left = self.tournament(participants[:length//2])\n right = self.tournament(participants[length//2:])\n if left[1] < right[1]:\n return right if random() < self.t else left\n else:\n return left if random() < self.t else right\n \n \n # Tournament\n def select(self):\n num = 8\n samples = sample(self.solutionSet, k=num)\n\n return self.tournament(samples)[0]\n\n def repair(self, tortoise, matured=False):\n nVertices = tortoise.getSpace().nVertices()\n intensity = choices(range(2, 6), weights = [2, 6, 3, 1], k=1)[0]\n \n space = tortoise.getSpace().flatten()\n\n target_indices = sample(range(nVertices), k=intensity)\n target_values = [space[i] for i in target_indices]\n\n comparison_tortoise = Tortoise(self.length)\n best_fitness = self.fitness(tortoise)\n for suggestion in permutations(target_values, intensity):\n for i, j in enumerate(target_indices):\n space[j] = suggestion[i]\n comparison_tortoise.getSpace().setSpace(space)\n \n if best_fitness < self.fitness(comparison_tortoise):\n # best_fitness = current_fitness\n tortoise.getSpace().setSpace(space)\n\n if matured and self.length > 2:\n cont_lv = tortoise.getSpace().contactLevels()\n target_indices = [sample(range(0, cont_lv[0]), k=3), \n sample(range(cont_lv[0], cont_lv[0]+cont_lv[1]), k=3),\n sample(range(cont_lv[0]+cont_lv[1], nVertices), k=3)]\n target_values = [[space[i] for i in lv] for lv in target_indices]\n \n for lv0_suggest in permutations(target_values[0], 3):\n for i, j in enumerate(target_indices[0]):\n space[j] = lv0_suggest[i]\n\n for lv1_suggest in permutations(target_values[1], 3):\n for i, j in enumerate(target_indices[1]):\n space[j] = lv1_suggest[i]\n \n for lv2_suggest in permutations(target_values[2], 3):\n for i, j in enumerate(target_indices[2]):\n space[j] = lv2_suggest[i]\n comparison_tortoise.getSpace().setSpace(space)\n current_fitness = self.fitness(comparison_tortoise)\n if best_fitness < current_fitness:\n best_fitness = current_fitness\n tortoise.getSpace().setSpace(space)\n\n\n def avg_fitness(self):\n fitness_list = [self.fitness(t) for t in self.solutionSet]\n return sum(fitness_list) / self.population\n\n def var_fitness(self):\n fitness_list = [self.fitness(t) for t in self.solutionSet]\n mean = sum(fitness_list) / self.population\n\n sqrdev = lambda x: (x-mean)**2\n \n return sum([sqrdev(x) for x in fitness_list]) / self.population\n\n def solve(self):\n step = 2000\n for i in range(step):\n # Mutation\n for tortoise in self.solutionSet:\n if random() < (1 - i*(10/step)):\n self.mutate(tortoise)\n\n\n # Crossover\n for _ in range(int(self.population*0.3)):\n self.crossover(self.select(), self.select())\n \n \n # Repair\n for tortoise in self.solutionSet:\n if random() < 0.7:\n self.repair(tortoise)\n if 0.9*step < i and random() < 0.1:\n self.repair(tortoise, matured=True)\n \n if i % 10 == 0:\n stddev = self.var_fitness()**(.5)\n print(f\"Maximum fitness in step {i}: {self.bestSolution[1]}, Average: {self.avg_fitness()}, \\\n Standard deviation: {stddev}\")\n if stddev < 0.2:\n break\n \n\np = GeneticAlgorithm(length=3, population=100)\n\np.solve()\nprint(p.bestSolution[1])\nprint(p.bestSolution[0].getSpace().space())\np.bestSolution[0].show()","repo_name":"hd132506/HTPGA","sub_path":"algorithm_skeleton.py","file_name":"algorithm_skeleton.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30842931239","text":"def reduce_by_steps(bin_func, input_list, start_value):\n \"\"\"Recursively applies a binary function to the elements of a list\n and an accumulator value.\n\n Args:\n bin_func (function): A binary function that takes two arguments.\n input_list (list): A list of elements to be reduced.\n start_value (any): The initial value of the accumulator.\n\n Returns:\n any: The final value of the accumulator after all elements\n in the list have been processed.\n\n Raises:\n TypeError: If not all elements in the input list are of the same type.\n \"\"\"\n if not all(isinstance(x, type(input_list[0])) for x in input_list):\n raise TypeError(\n \"All elements in the input list must be of the same type\")\n\n if len(input_list) == 0:\n return start_value\n else:\n if start_value == '':\n start_value = bin_func(start_value, input_list[0]).lstrip()\n input_list.pop(0)\n return reduce_by_steps(bin_func, input_list, start_value)\n else:\n start_value = bin_func(start_value, input_list[0])\n input_list.pop(0)\n return reduce_by_steps(bin_func, input_list, start_value)\n","repo_name":"dipoleman/de-py-katas","sub_path":"src/reduce_by_steps/reduce_by_steps.py","file_name":"reduce_by_steps.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34981929327","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n# Load the Iris dataset\nfrom sklearn.datasets import load_iris\niris = load_iris()\n\ndata = pd.DataFrame(data=np.c_[iris['data']],\n columns=iris['feature_names'])\n\n# Predict Sepal Width based on Sepal Length\n\n\nsepal_length = data[['sepal length (cm)']]\nsepal_width = data[['sepal width (cm)']]\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(\n sepal_length, sepal_width, test_size=0.2, random_state=42)\n\n# Create a linear regression model for Sepal Width prediction\nmodel_sepal_width = LinearRegression()\n\nmodel_sepal_width.fit(X_train, y_train)\n# Make predictions on the test data\ny_pred_sepal_width = model_sepal_width.predict(X_test)\n# Evaluate the model for Sepal Width prediction\nr2_sepal_width = r2_score(y_test, y_pred_sepal_width)\nprint(\n f'R-squared for Sepal Width prediction based on sepal length: {r2_sepal_width:.2f}')\n\n\n# Predict Petal Length based on Sepal Length and Sepal Width\nsepal_length_width = data[['sepal length (cm)', 'sepal width (cm)']]\n\npetal_length = data['petal length (cm)']\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(\n sepal_length_width, petal_length, test_size=0.2, random_state=42)\n\n# Create a linear regression model for Petal Length prediction\nmodel_petal_length = LinearRegression()\nmodel_petal_length.fit(X_train, y_train)\n\n# Make predictions on the test data\ny_pred_petal_length = model_petal_length.predict(X_test)\n\n# Evaluate the model for Petal Length prediction\nr2_petal_length = r2_score(y_test, y_pred_petal_length)\nprint(f'R-squared for Petal Length prediction: {r2_petal_length:.2f}')\n\n\n# Predict Petal Width based on Sepal Length, Sepal Width\npetal_width = data['petal width (cm)']\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(\n sepal_length_width, petal_width, test_size=0.2, random_state=42)\n\n# Create a linear regression model for Petal Width prediction\nmodel_petal_width = LinearRegression()\nmodel_petal_width.fit(X_train, y_train)\n\n# Make predictions on the test data\ny_pred_petal_width = model_petal_width.predict(X_test)\n\n# Evaluate the model for Petal Width prediction\nr2_petal_width = r2_score(y_test, y_pred_petal_width)\nprint(f'R-squared for Petal Width prediction: {r2_petal_width:.2f}')\n","repo_name":"kodo-yousif/AI-Practical-Tasks","sub_path":"2023-2024/week-5/GROUP A - Regression model/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"30803611513","text":"import simplejson as json, uuid, os\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom socless import create_events\nfrom datetime import datetime\n\n\nEVENTS_TABLE = os.environ.get('SOCLESS_EVENTS_TABLE')\nPLAYBOOKS_TABLE = os.environ.get('SOCLESS_PLAYBOOKS_TABLE')\n\ndef lambda_handler(event, context):\n ingest = json.loads(event['body'])\n\n if isinstance(ingest['details'], str):\n ingest['details'] = json.loads(ingest['details'])\n\n event_data = {}\n event_data['event_type'] = ingest.get('name')\n event_data['data_types'] = ingest.get('data_types')\n event_data['event_meta'] = ingest.get('event_meta')\n event_data['playbook'] = ingest.get('playbook')\n event_data['dedup_keys'] = ingest.get('dedup_keys')\n event_data['details'] = ingest.get('details')\n resp = create_events(event_data, context)\n if not resp.get('status'):\n return {\"statusCode\": 200, \"body\": json.dumps(resp.get('message'))}\n return {\"statusCode\": 200}\n","repo_name":"twilio-labs/socless-sumologic","sub_path":"functions/event_endpoint/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72701443442","text":"\"\"\"Tests for the utils module.\"\"\"\nfrom os import remove\nfrom os.path import exists, join\n\nimport pytest\nfrom networkx import Graph, MultiDiGraph\nfrom numpy import array, array_equal, int32, int64, inf, nan, isnan\nfrom shapely import MultiPolygon, Polygon\n\nfrom superblockify.config import TEST_DATA_PATH\nfrom superblockify.utils import (\n load_graph_from_place,\n has_pairwise_overlap,\n compare_dicts,\n __edge_to_1d,\n __edges_to_1d,\n percentual_increase,\n compare_components_and_partitions,\n)\nfrom tests.conftest import mark_xfail_flaky_download\n\n\n@mark_xfail_flaky_download\n@pytest.mark.parametrize(\"only_cache\", [True, False])\n@pytest.mark.parametrize(\"max_nodes\", [None, 100])\ndef test_load_graph_from_place(only_cache, max_nodes):\n \"\"\"Test that the load_graph_from_place function works.\"\"\"\n\n graph = load_graph_from_place(\n join(TEST_DATA_PATH, \"cities\", \"Adliswil.graphml\"),\n \"Adliswil, Bezirk Horgen, Zürich, Switzerland\",\n add_population=True,\n network_type=\"drive\",\n only_cache=only_cache,\n max_nodes=max_nodes,\n )\n if only_cache:\n assert graph is None\n else:\n assert graph is not None\n assert len(graph) > 0\n assert graph.size() > 0\n assert isinstance(graph.graph[\"boundary\"], (MultiPolygon, Polygon))\n\n # check that every edge has the attribute'length', `speed_kph`,\n # and `travel_time`\n for _, _, data in graph.edges(data=True):\n assert \"length\" in data\n assert \"speed_kph\" in data\n assert \"travel_time\" in data\n\n\n@pytest.mark.parametrize(\n \"city,search_string\",\n [\n (\"CPH-str\", \"Københavns Kommune, Region Hovedstaden, Danmark\"),\n (\n \"CPH-list\",\n [\n \"Københavns Kommune, Region Hovedstaden, Danmark\",\n \"Frederiksberg Kommune, Denmark\",\n ],\n ),\n (\"CPH-osmid\", \"R2192363\"),\n (\"CPH-osmid-list\", [\"R2192363\", \"R2186660\"]),\n ],\n)\n@mark_xfail_flaky_download\ndef test_load_graph_from_place_search_str_types(city, search_string):\n \"\"\"Test that the load_graph_from_place function works with different search string\n types.\"\"\"\n graph = load_graph_from_place(\n save_as=join(TEST_DATA_PATH, \"cities\", f\"{city}_query_test.graphml\"),\n search_string=search_string,\n network_type=\"drive\",\n )\n assert graph is not None\n assert len(graph) > 0\n assert graph.size() > 0\n assert isinstance(graph.graph[\"boundary\"], (MultiPolygon, Polygon))\n\n\n@pytest.fixture(scope=\"module\")\ndef _delete_query_test_graphs():\n \"\"\"Delete the query test graphs.\"\"\"\n yield\n for city in [\"CPH-str\", \"CPH-list\", \"CPH-osmid\", \"CPH-osmid-list\"]:\n filepath = join(TEST_DATA_PATH, \"cities\", f\"{city}_query_test.graphml\")\n if exists(filepath):\n remove(filepath)\n\n\n@pytest.mark.parametrize(\n \"lists,expected\",\n [\n ([[]], array([[False]])),\n ([[1]], array([[True]])),\n ([[1, 2], [3, 4]], array([[True, False], [False, True]])),\n ([[1], [1]], array([[True, True], [True, True]])),\n ([[], []], array([[False, False], [False, False]])),\n (\n [[1, 2], [3, 4], [5, 6]],\n array([[True, False, False], [False, True, False], [False, False, True]]),\n ),\n (\n [[1], [1], [2]],\n array([[True, True, False], [True, True, False], [False, False, True]]),\n ),\n (\n [[1, 2], [3, 4], [5, 6], [1]],\n array(\n [\n [True, False, False, True],\n [False, True, False, False],\n [False, False, True, False],\n [True, False, False, True],\n ]\n ),\n ),\n # long list, range\n (\n [list(range(1000)), list(range(1000))],\n array([[True, True], [True, True]]),\n ),\n (\n [list(range(1000)), list(range(1000, 2000))],\n array([[True, False], [False, True]]),\n ),\n (\n [\n list(range(int(1e5))),\n list(range(int(1e5), int(2e5))),\n list(range(int(1.8e5), int(3e5))),\n ],\n array(\n [\n [True, False, False],\n [False, True, True],\n [False, True, True],\n ],\n ),\n ),\n ],\n)\n@pytest.mark.filterwarnings(\"ignore:invalid value encountered\")\ndef test_has_pairwise_overlap(lists, expected):\n \"\"\"Test `_has_pairwise_overlap` by design.\"\"\"\n # Check if ndarrays are equal\n # pylint: disable=protected-access\n assert array_equal(has_pairwise_overlap(lists), expected)\n\n\n@pytest.mark.parametrize(\n \"lists\",\n [\n [],\n False,\n True,\n 1,\n 1.0,\n \"a\",\n None,\n array([]),\n array([[]]),\n array([1]),\n [1],\n [1, 2],\n [[1, 2], [3, 4], [5, 6], 1],\n [[1, 2], [3, 4], [5, 6], \"a\"],\n ],\n)\ndef test_has_pairwise_overlap_exception(lists):\n \"\"\"Test `_has_pairwise_overlap` exception handling.\"\"\"\n with pytest.raises(ValueError):\n # pylint: disable=protected-access\n has_pairwise_overlap(lists)\n\n\n@pytest.mark.parametrize(\n \"dict1,dict2,expected\",\n [\n ({}, {}, True),\n ({}, {\"a\": 1}, False), # missing key\n ({\"a\": 1}, {}, False), # missing key\n ({\"a\": 1}, {\"a\": 1}, True),\n ({\"a\": 1}, {\"a\": 2}, False), # different value\n ({\"a\": 1}, {\"b\": 1}, False), # different key\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 2}, True),\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 3}, False), # different value\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"c\": 2}, False), # different key\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 2, \"c\": 3}, False), # missing key\n ({\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 1, \"b\": 2}, False), # missing key\n ({(1, 2): 1}, {(1, 2): 1}, True),\n ({(1, 2): 1}, {(1, 2): 2}, False), # different value\n ({\"a\": array([1, 2])}, {\"a\": array([1, 2])}, True),\n ({\"a\": array([1, 2])}, {\"a\": array([1, 3])}, False), # different value\n ({\"a\": array([])}, {\"a\": array([])}, True),\n ({\"a\": array([])}, {\"a\": array([1])}, False), # different value\n ({\"a\": array([1])}, {\"a\": [1]}, False), # different type\n ({\"a\": array([[1, 2], [3, 4]])}, {\"a\": array([[1, 2], [3, 4]])}, True),\n ({\"a\": array([[1, 2], [3, 4]])}, {\"a\": array([[1, 2], [3, 5]])}, False),\n ({\"a\": array([[1, 2], [3, 4]])}, {\"a\": array([[1, 2], [3, 4], [5, 6]])}, False),\n # differing types\n ({}, None, False), # None\n ({}, set(), False), # set\n ({}, [], False), # list\n ({}, array([]), False), # ndarray\n ({}, array([[]]), False), # ndarray\n ({}, tuple(), False), # tuple\n # nested dicts\n ({\"a\": {\"b\": 1}}, {\"a\": {\"b\": 1}}, True),\n ({\"a\": {\"b\": 1}}, {\"a\": {\"b\": 2}}, False),\n (\n {\"a\": {\"a\": [1]}, \"b\": {\"b\": array([1])}},\n {\"a\": {\"a\": [1]}, \"b\": {\"b\": array([1])}},\n True,\n ),\n (\n {\"a\": {\"a\": [1]}, \"b\": {\"b\": array([1])}, \"c\": {\"c\": 1}},\n {\"a\": {\"a\": [1]}, \"b\": {\"b\": array([1])}},\n False,\n ),\n (\n {\"a\": {\"a\": {\"a\": {\"a\": array([1])}}}},\n {\"a\": {\"a\": {\"a\": {\"a\": array([1])}}}},\n True,\n ),\n ],\n)\ndef test_compare_dicts(dict1, dict2, expected):\n \"\"\"Test `compare_dicts`.\"\"\"\n assert compare_dicts(dict1, dict2) == expected\n\n\n@pytest.mark.parametrize(\n \"u_idx,v_idx,max_len,expected\",\n [\n (0, 0, 1, \"00\"),\n (0, 1, 1, \"01\"),\n (1, 0, 1, \"10\"),\n (1, 1, 1, \"11\"),\n (0, 0, 2, \"000\"),\n (12, 34, 2, \"1234\"),\n (12, 34, 3, \"12034\"),\n (12, 34, 4, \"120034\"),\n (789, 12345, 5, \"0078912345\"),\n (50, 50, 0, \"100\"), # unintended use case\n (50, 50, 1, \"550\"), # unintended use case\n ],\n)\ndef test___edge_to_1d(u_idx, v_idx, max_len, expected):\n \"\"\"Test `_edge_to_1d`.\"\"\"\n assert __edge_to_1d(u_idx, v_idx, max_len) == int(expected)\n\n\n@pytest.mark.parametrize(\n \"u_idx,v_idx,max_len,expected\",\n [\n ([0], [0], 1, [0]),\n ([0], [1], 1, [1]),\n ([1, 1], [1, 0], 1, [11, 10]),\n ([12, 9, 8], [34, 7, 6], 2, [1234, 907, 806]),\n ([787, 789], [12345, 12345], 5, [78712345, 78912345]),\n ],\n)\ndef test___edges_to_1d(u_idx, v_idx, max_len, expected):\n \"\"\"Test `_edges_to_1d`.\"\"\"\n assert array_equal(\n __edges_to_1d(array(u_idx, dtype=int32), array(v_idx, dtype=int32), max_len),\n array(expected, dtype=int64),\n )\n\n\n@pytest.mark.parametrize(\n \"val_1,val_2,expected\",\n [\n (0, 0, 0),\n (0, 1, inf),\n (1, 0, -inf),\n (1, 1, 0),\n (1, 2, 1),\n (2, 1, -1 / 2),\n (2, 2, 0),\n (2, 3, 1 / 2),\n (3, 2, -1 / 3),\n (-1, 1, -2),\n (1, -1, -2),\n (-1, -1, 0),\n (30, 87, 87 / 30 - 1),\n (40, 60, 1 / 2),\n (0, inf, inf),\n (1, inf, inf),\n (-1, inf, -inf),\n (inf, 0, -inf),\n (inf, 1, -inf),\n (inf, -1, -inf),\n (inf, inf, 0),\n (0, -inf, -inf),\n (1, -inf, -inf),\n (-1, -inf, inf),\n (-inf, 0, -inf),\n (-inf, 1, -inf),\n (-inf, -1, -inf),\n (-inf, -inf, 0),\n (inf, -inf, nan),\n (-inf, inf, nan),\n ],\n)\ndef test_percentual_increase(val_1, val_2, expected):\n \"\"\"Test `percentual_increase` by design.\"\"\"\n\n if expected is nan:\n assert isnan(percentual_increase(val_1, val_2))\n else:\n assert pytest.approx(percentual_increase(val_1, val_2), 1e-6) == expected\n\n\n@pytest.mark.parametrize(\n \"list1,list2,expected\",\n [\n # equal\n ([], None, True),\n ([{}], None, True),\n ([{}, {}], None, True),\n ([{}, {}, {}], None, True),\n ([{\"a\": 1}], None, True),\n # graphs\n ([{\"a\": 1, \"graph\": Graph()}], None, True),\n ([{\"a\": 1, \"graph\": Graph([(1, 2)])}], None, True),\n # isomorphism\n (\n [{\"a\": 1, \"graph\": Graph([(1, 2), (2, 3), (3, 4)])}],\n [{\"a\": 1, \"graph\": Graph([(4, 3), (3, 2), (2, 1)])}],\n True,\n ),\n (\n [{\"a\": 1, \"graph\": Graph([(1, 2), (2, 3), (3, 4)])}],\n [{\"a\": 1, \"graph\": Graph(MultiDiGraph([(4, 3), (3, 2), (2, 1)]))}],\n True,\n ),\n # different keys\n ([{\"a\": 1}], [{\"b\": 1}], False),\n # different values\n ([{\"a\": 1}, {\"a\": 1}], [{\"a\": 1}, {\"a\": 2}], False),\n ([{\"b\": 1}, {\"a\": 1}], [{\"b\": 1}, {\"a\": 2}], False),\n # different length\n ([], [{}], False),\n # different graphs\n (\n [{\"a\": 1, \"graph\": Graph([(1, 2)])}],\n [{\"a\": 1, \"graph\": Graph([(1, 3), (3, 2)])}],\n False,\n ),\n ],\n)\ndef test_compare_components_and_partitions(list1, list2, expected):\n \"\"\"Test `compare_components_and_partitions`.\n Compares two lists of dicts, where the dicts need to have the same keys and\n especially checks if the values in the dict are equal or isomorphic if type is\n Graph.\n \"\"\"\n if list2 is None:\n assert compare_components_and_partitions(list1, list1) is True\n else:\n assert compare_components_and_partitions(list1, list2) == expected\n","repo_name":"cbueth/Superblockify","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":11477,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"41094158166","text":"# coding=utf-8\n#\n# created by kpe on 25.Jul.2019 at 12:23\n#\n\nfrom __future__ import absolute_import, division, print_function\n\n\n\nimport unittest\nimport math\n\nimport tensorflow as tf\nfrom tensorflow.python import keras\n\nfrom bert import BertModelLayer, loader\nfrom bert.loader import map_from_stock_variale_name, map_to_stock_variable_name, load_stock_weights\nfrom bert.loader import StockBertConfig, map_stock_config_to_params\nfrom bert.tokenization import FullTokenizer\n\n#tf.enable_eager_execution()\n#tf.disable_eager_execution()\n\n\ndef flatten_layers(root_layer):\n if isinstance(root_layer, keras.layers.Layer):\n yield root_layer\n for layer in root_layer._layers:\n for sub_layer in flatten_layers(layer):\n yield sub_layer\n\n\ndef freeze_bert_layers(l_bert):\n \"\"\"\n Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751.\n \"\"\"\n for layer in flatten_layers(l_bert):\n if layer.name in [\"LayerNorm\", \"adapter-down\", \"adapter-up\"]:\n layer.trainable = True\n elif len(layer._layers) == 0:\n layer.trainable = False\n l_bert.embeddings_layer.trainable = False\n\n\ndef create_learning_rate_scheduler(max_learn_rate=5e-5,\n end_learn_rate=1e-7,\n warmup_epoch_count=10,\n total_epoch_count=90):\n\n def lr_scheduler(epoch):\n if epoch < warmup_epoch_count:\n res = (max_learn_rate/warmup_epoch_count) * (epoch + 1)\n else:\n res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1))\n return float(res)\n learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)\n\n return learning_rate_scheduler\n\n\nclass TestWeightsLoading(unittest.TestCase):\n bert_ckpt_dir = \".models/uncased_L-12_H-768_A-12/\"\n bert_ckpt_file = bert_ckpt_dir + \"bert_model.ckpt\"\n bert_config_file = bert_ckpt_dir + \"bert_config.json\"\n\n def test_load_pretrained(self):\n print(\"Eager Execution:\", tf.executing_eagerly())\n\n bert_params = loader.params_from_pretrained_ckpt(self.bert_ckpt_dir)\n bert_params.adapter_size = 32\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n model = keras.models.Sequential([\n keras.layers.InputLayer(input_shape=(128,)),\n bert,\n keras.layers.Lambda(lambda x: x[:, 0, :]),\n keras.layers.Dense(2)\n ])\n\n # we need to freeze before build/compile - otherwise keras counts the params twice\n if bert_params.adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.build(input_shape=(None, 128))\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n\n load_stock_weights(bert, self.bert_ckpt_file)\n\n model.summary()\n\n\n","repo_name":"fakerbrother/copy-bert-for-tf2","sub_path":"tests/nonci/test_stock_weights.py","file_name":"test_stock_weights.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11552327917","text":"#function practice,Write a function that takes in two file names as parameters and writes every other line from one file to the other file.\n\n\n\n\n\ndef files (input_file,output_file):\n line_num = 1\n for line in input_file:\n line = line.strip()\n if line_num % 2 == 1:\n print(line, file = output_file)\n line_num += 1\n #no return since output is going to a file\n\nfile = open(\"file1.txt\",\"r\")\nfile2 = open(\"file2.txt\",\"w\")\n\nfiles(file, file2)\nfile.close()\nfile2.close()\n","repo_name":"aarhusdavid/Python-Files","sub_path":"file_prac.py","file_name":"file_prac.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14499605051","text":"# import traceback\nimport os\n\n\nclass FileScanner:\n def __init__(self, settings):\n self.show_warning_on_open_fail = settings.get('find_in_project_show_warning_on_open_failure', False)\n self.encodings = settings.get('find_in_project_encodings', [\"utf-8\"])\n self.skip_binary = settings.get('find_in_project_skip_binary_files', True)\n self.show_warning_binary_skip = settings.get('find_in_project_show_warning_on_binary_skip', False)\n self.max_file_size = settings.get('find_in_project_max_file_size_mb', 20)*1000000\n self.show_warning_size_skip = settings.get('find_in_project_show_warning_on_size_skip', False)\n\n exts_to_ignore = settings.get('find_in_project_ignore_extensions', [])\n self.exts_to_ignore = [x.lower() for x in exts_to_ignore]\n\n self.warnings = []\n\n def read_lines(self, filename):\n if self._should_include_file(filename):\n for enc in self.encodings:\n try:\n # Read file line by line\n with open(filename, \"r\", encoding=enc) as f:\n line_no = 0\n while True:\n line_no += 1\n line = f.readline()\n if not line:\n break\n\n if self.skip_binary and '\\0' in line:\n if self.show_warning_binary_skip:\n self.warnings.append(\n \"Skipped binary file.\")\n return\n else:\n yield (line_no, line)\n\n return\n\n except UnicodeDecodeError:\n # Probably using wrong encoding\n # traceback.print_exc()\n continue\n\n print(\"Unable to read file:\", filename)\n if self.show_warning_on_open_fail:\n self.warnings.append(\n \"Failed to open file. This could be due to unknown/unspecified encoding.\")\n\n def _should_include_file(self, filename):\n file_extension = os.path.splitext(filename)[1][1:]\n if file_extension.lower() in self.exts_to_ignore:\n print('Skipping file with ignored extension: ', filename)\n return False\n\n file_size = os.path.getsize(filename)\n if file_size > self.max_file_size:\n if self.show_warning_size_skip:\n self.warnings.append(\n \"Skipped file due to size (%.2f MB).\" % (file_size / 1000000.,))\n print('Skipping file with size larger than maximum: ', filename)\n return False\n\n return True\n\n\nclass DirScanner:\n def __init__(self, settings):\n self.follow_symlinks = settings.get('find_in_project_follow_sym_links', False)\n\n dirs_to_ignore = settings.get('find_in_project_ignore_dirs', [])\n self.dirs_to_ignore = [x.lower() for x in dirs_to_ignore]\n\n def list_tree(self, directory):\n for root, dirs, files in os.walk(directory, followlinks=self.follow_symlinks):\n dirs[:] = [d for d in dirs if self._should_include_dir(d)]\n yield root, dirs, files\n\n def _should_include_dir(self, directory):\n return directory.lower() not in self.dirs_to_ignore\n","repo_name":"dave-ellis/DocumentSearch","sub_path":"scanners.py","file_name":"scanners.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21642044482","text":"\r\nimport json\r\nimport network\r\nimport dhcp\r\n\r\ndef load_config():\r\n \"\"\"\r\n This function loads the configuration from the config.json file.\r\n \"\"\"\r\n try:\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n return config\r\n except Exception as e:\r\n print(f\"Error loading config: {e}\")\r\n return None\r\n\r\ndef troubleshoot_network():\r\n \"\"\"\r\n This function troubleshoots the network by sending a DHCP request and printing the results.\r\n \"\"\"\r\n try:\r\n # Load the config\r\n config = load_config()\r\n if config is None:\r\n return\r\n\r\n # Get the network interface\r\n interface = network.get_interface()\r\n if interface is None:\r\n return\r\n\r\n # Get the local IP\r\n local_ip = network.get_local_ip(interface)\r\n if local_ip is None:\r\n return\r\n\r\n # Get the MAC address\r\n mac_address = dhcp.get_mac_address(interface)\r\n if mac_address is None:\r\n return\r\n\r\n # Send a DHCP discover packet\r\n dhcp.send_dhcp_discover_packet(interface)\r\n\r\n # Print the results\r\n print(f\"Interface: {interface}\")\r\n print(f\"Local IP: {local_ip}\")\r\n print(f\"MAC Address: {mac_address}\")\r\n except Exception as e:\r\n print(f\"Error troubleshooting network: {e}\")\r\n\r\nif __name__ == \"__main__\":\r\n troubleshoot_network()\r\n\r\n","repo_name":"Shawn-Falconbury/simple_dhcp","sub_path":"troubleshoot.py","file_name":"troubleshoot.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5352028560","text":"import random\nimport numpy as np\n\n# A data structure to hold training steps for subsequent replay\n# holds the most recent self.max_size training steps\nclass ReplayMemory:\n\n def __init__(self, max_size=1000):\n # once ReplayMemory is full, self.i determines where to add new values\n self.i = 0\n self.ls = []\n self.max_size = max_size\n\n def __getitem__(self, slice_obj):\n return self.ls[slice_obj]\n\n def __len__(self):\n return len(self.ls)\n\n def add(self, value):\n if len(self.ls) < self.max_size: # ReplayMemory isn't full\n self.ls.append(value)\n else:\n self.ls[self.i] = value\n self.i += 1\n if self.i == self.max_size:\n self.i = 0 # reset index back to 0 when it reaches max size\n\n# select an epsilon greedy action\ndef choose_epsilon_greedy_action(q, encoded_state, epsilon):\n if random.random() < epsilon:\n return random.choice(range(q.num_actions))\n else:\n action_values = q.propagate(encoded_state)[0]\n return np.argmax(action_values)\n\n# generate an episode using epsilon greedy actions\ndef generate_epsilon_greedy_episode(env, q, state_encoder, epsilon):\n episode = []\n state = env.reset()\n done = False\n while not done:\n encoded_state = state_encoder(env, state)\n action = choose_epsilon_greedy_action(q, encoded_state, epsilon)\n (next_state, reward, done, _) = env.step(action)\n episode_step = (state, action, reward, next_state)\n episode.append(episode_step)\n state = next_state\n return episode\n\n# determine post-training performance\ndef estimate_performance(env, q, state_encoder, epsilon=0.1, num_episodes=25):\n episode_lengths = [len(generate_epsilon_greedy_episode(env, q, state_encoder, epsilon)) \\\n for _ in range(num_episodes)]\n avg = sum(episode_lengths) / num_episodes\n print(\"average episode length: {}\".format(avg))\n","repo_name":"joepalermo/reinforcement-learning-experiments","sub_path":"dqn/dqn_utils.py","file_name":"dqn_utils.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"262567542","text":"from bot import bot\nfrom chat import gen_img, req\nfrom filters import IsAdmin , IsSubscribed\nfrom database import ins,get_all,get_count\nfrom vars import ADMINS\nfrom keyboards import Keyboards\nfrom telebot.types import Message,CallbackQuery\n\nkeyboards=Keyboards(bot)\n\n@bot.message_handler(content_types=['new_chat_members'])\ndef new_chat_members(m: Message):\n for i in m.new_chat_members:\n bot.send_chat_action(m.chat.id,'typing')\n bot.send_message(m.chat.id,f\"Assalomu alaykum {i.first_name}!\\nGuruhga xush kelibsiz.\")\n try:\n bot.delete_message(m.chat.id,m.id)\n except Exception as e:\n print(e)\n\n@bot.message_handler(is_subscribed=True,commands=['start'])\ndef start(m: Message):\n ins(m.from_user.id)\n bot.send_chat_action(m.chat.id,'typing')\n bot.send_message(\n m.chat.id,\n f\"Assalomu alaykum {m.from_user.first_name}! Botdan foydalanish uchun /help buyrug'idan foydalaning.\",\n reply_markup=keyboards.getMainButtons() if m.chat.type=='private' else None\n )\n\n@bot.message_handler(is_subscribed=True,commands=['help'])\ndef help(m: Message):\n bot.send_chat_action(m.chat.id,'typing')\n bot.reply_to(m,\"/ask — savollarga javob topishda va ko'plab boshqa muammolarni yechishda yordam beradi. Foydalanish uchun /ask buyrug'i bilan birgalikda xabar kiriting.\\nMasalan: ``` /ask Salom milliai!``` \\n/photo — rasmlarni osongina yaratish uchun yordam beradi. Foydalanish uchun /photo buyrug'i bilan birgalikda xabarni kiriting.\\nMasalan: ``` /photo offisda ishlayotgan mushuk ```\")\n\n@bot.message_handler(chat_types=['private'],content_types=['text'],func=keyboards.statsFilter)\ndef stats(m: Message): keyboards.statsFunc(m)\n\n@bot.message_handler(commands=['ad'])\ndef ad(m: Message):\n if str(m.from_user.id) in ADMINS:\n bot.send_chat_action(m.chat.id,'typing')\n msg=bot.reply_to(m,f\"Reklama uchun postni menga yuboring.\")\n bot.register_next_step_handler(msg,ad2)\n\ndef ad2(m: Message):\n for i in get_all():\n try:\n bot.copy_message(i[0],m.chat.id,m.id)\n except Exception as e:\n print(e)\n\n@bot.message_handler(chat_types=['private'],content_types=['text'],func=keyboards.contactFilter)\ndef contact(m: Message): keyboards.contactFunc(m)\n\n@bot.message_handler(is_subscribed=True,content_types=['text'],func=lambda m: m.text.startswith('/photo'),chat_types=['group','supergroup'])\ndef rasm(m: Message):\n keyboards.genFunc2(m)\n\n@bot.message_handler(is_subscribed=True,content_types=['text'],func=keyboards.genFilter)\ndef rasm_pr(m: Message): keyboards.genFunc(m)\n\n@bot.message_handler(is_subscribed=True,content_types=['text'],func=lambda m: m.text.startswith('/ask'),chat_types=['group','supergroup'])\ndef rec_gr(m: Message):\n bot.send_chat_action(m.chat.id,'typing')\n r=req(' '.join(m.text.split()[1:]))\n try:\n bot.reply_to(m,r)\n except:\n bot.send_message(m.chat.id,r)\n\n@bot.message_handler(is_subscribed=True,content_types=['text'],chat_types=['private'],func=keyboards.askFilter)\ndef rec_pr(m: Message): keyboards.askFunc(m)\n\n@bot.message_handler(content_types=['text'],chat_types=['private'],func=keyboards.requestFilter)\ndef reqhandler(m: Message): keyboards.requestFunc(m)\n\n@bot.message_handler(chat_types=['private'],func=keyboards.replyFilter)\ndef reply(m: Message): keyboards.reply(m)\n\n@bot.callback_query_handler(keyboards.checkFilter)\ndef check_subscription(cb: CallbackQuery): keyboards.checkFunc(cb)\n\n@bot.message_handler(func=lambda m: True,chat_types=['private'])\ndef check(m: Message):\n if not IsSubscribed.check(m):\n try:\n bot.delete_message(m.chat.id,m.id)\n except Exception as e:\n print(e)\n bot.send_chat_action(m.chat.id,'typing')\n bot.send_message(m.chat.id,f\"Assalomu alaykum {m.from_user.first_name}!\\n⚠️ @milliaibot dan foydalanishdan oldin bizning rasmiy telegram sahifamizga va homiy telegram kanaliga obuna bo'ling. Soʻng botni qayta boshlashni unutmang!\",reply_markup=keyboards.getChannelButton())\n\n\nbot.add_custom_filter(IsSubscribed())\nbot.add_custom_filter(IsAdmin())\n\nif __name__=='__main__':\n bot.infinity_polling()\n","repo_name":"ImEndie/milliaisbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"9428312141","text":"from weboob.tools.backend import Module, BackendConfig\nfrom weboob.capabilities.messages import CapMessages, CapMessagesPost, Thread, Message\nfrom weboob.tools.value import Value\n\nfrom .browser import GuerrillamailBrowser\n\n\n__all__ = ['GuerrillamailModule']\n\n\nclass GuerrillamailModule(Module, CapMessages, CapMessagesPost):\n NAME = 'guerrillamail'\n DESCRIPTION = u'GuerrillaMail temp mailbox'\n MAINTAINER = u'Vincent A'\n EMAIL = 'dev@indigo.re'\n LICENSE = 'AGPLv3+'\n VERSION = '2.1'\n\n BROWSER = GuerrillamailBrowser\n\n CONFIG = BackendConfig(Value('inbox', label='Inbox', default=''))\n\n def iter_threads(self):\n inbox = self.config['inbox'].get()\n if not inbox:\n raise NotImplementedError()\n else:\n return [self.get_thread(inbox)]\n\n def get_thread(self, _id):\n t = Thread(_id)\n t.title = 'Mail for %s' % _id\n t.flags = t.IS_DISCUSSION\n\n first = True\n for d in self.browser.get_mails(_id):\n m = self.make_message(d, t)\n\n if not m.content:\n m.content = self.browser.get_mail_content(m.id)\n\n if first:\n first = False\n t.root = m\n else:\n m.parent = t.root\n m.parent.children.append(m)\n\n return t\n\n def post_message(self, m):\n raise NotImplementedError()\n for receiver in m.receivers:\n self.browser.send_mail(m.sender, receiver, m.title, m.content)\n\n def make_message(self, d, thread):\n m = Message(thread, d['id'])\n m.children = []\n m.sender = d['from']\n m.flags = 0\n if not d.get('read', True):\n m.flags = m.IS_UNREAD\n m.title = d['subject']\n m.date = d['datetime']\n m.receivers = [d['to']]\n return m\n","repo_name":"laurentb/weboob","sub_path":"modules/guerrillamail/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"75"} +{"seq_id":"586704397","text":"import discord\nimport os\nimport random\nimport time\nimport csv\nimport pandas\nfrom replit import db\n\nintents = discord.Intents.default()\nintents.message_content = True\n\nclient = discord.Client(intents=intents)\n\ntest_List = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\ntroll_List = [\"Why don't you play with some friends\"]\n\ncsv = pandas.read_csv('GameItems.csv')\nitems = {'name': [], 'descriptions': []}\n\nfor index, item in csv.iterrows():\n items['name'].append(str(item[13]).lower())\n items['descriptions'].append(str(item[14]).lower())\n\n\n\n\n\ndef update_game_list(game_List):\n if \"games\" in db.keys():\n games = db[\"games\"]\n games.append(game_List)\n db[\"games\"] = games\n else:\n db[\"games\"] = [game_List]\n\ndef delete_game_list(index):\n games = db[\"games\"]\n print(games)\n index = int(index)\n if len(games) > index:\n del games[index]\n db[\"games\"] = games\n\n@client.event\nasync def on_ready():\n print('We have sucessfully logged in as {0.user}'.format(client))\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n \n if message.content.startswith('$hello'):\n start = time.time()\n await message.channel.send('Hello!')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith(\"$help\"):\n start = time.time()\n await message.channel.send('To add a game to the list just say, $add followed by the name of the game you want to add. To delete a game off the list, just type $del followed by the name of the game in the list')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith('$progress'):\n start = time.time()\n await message.channel.send('I am fully working!')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith('palico tell me a joke'):\n start = time.time()\n await message.channel.send('Here is a joke from where I come from. What do you call a hunting squad full of hunting horns? An orcestra. XD')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n \n if message.content.startswith('random number'):\n start = time.time()\n await message.channel.send(random.choice(test_List))\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n options = troll_List\n if \"games\" in db.keys():\n options = options + list(db[\"games\"])\n\n if message.content.startswith('palico tell me what to play'):\n start = time.time()\n await message.channel.send(random.choice(options))\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n \n if message.content.startswith('$add'):\n start = time.time()\n game_List = message.content.split('$add')[1].strip().upper()\n update_game_list(game_List)\n await message.channel.send('Game has been added to list')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith('$del'):\n start = time.time()\n games = []\n if \"games\" in db.keys():\n game = str(message.content).split('$del')[1].strip().upper()\n index = db['games'].index(game)\n delete_game_list(int(index))\n index = db[\"games\"]\n await message.channel.send('Game has been deleted')\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith('$game list'):\n start = time.time()\n listOrder = 'Game list in order:\\n'\n for game in db['games']:\n listOrder = listOrder + f'{db[\"games\"].index(game)}: {game}\\n'\n await message.channel.send(listOrder)\n end = time.time()\n total = end - start\n print('%.2f' % total, 'Seconds')\n\n if message.content.startswith('$item info'):\n start = time.time()\n UserInput = message.content.split('$item info')[1].strip()\n try:\n index = items['name'].index(str(UserInput).lower())\n await message.channel.send(items['descriptions'] [index])\n except:\n suggest = f'Sorry I do not recognize \"{UserInput}\". Did you mean to say any of the following?'\n row = 0\n for item in items['name']:\n if str(UserInput).lower() in str(item).lower():\n row += 1\n suggest += f'\\n{row}) {item}'\n await message.channel.send(suggest)\n end = time.time()\n total = end - start\n print('%2f' % total, 'Seconds')\n\n if message.content.startswith('$monster info '):\n\n csv2 = pandas.read_csv('MonsterInfo.csv')\n monsters = {\n 'mname': [], 'weakness': [], 'resistance':[]\n }\n \n def element_status(order):\n if order == 0:\n return 'Fire'\n elif order == 1:\n return 'Water'\n elif order == 2:\n return 'Thunder'\n elif order == 3:\n return 'Ice'\n elif order == 4:\n return 'Dragon'\n elif order == 5:\n return 'Poison'\n elif order == 6:\n return 'Sleep'\n elif order == 7:\n return 'Paralysis'\n elif order == 8:\n return 'Blast'\n elif order == 9:\n return 'Stun'\n \n for index, row in csv2.iterrows():\n \n name =row[0]\n form = row[2]\n fire = row[3]\n water = row[4]\n thunder = row[5]\n ice = row[6]\n dragon = row[7]\n poison = row[8]\n sleep = row[9]\n paralysis = row[10]\n blast = row[11]\n stun = row[12]\n mlist = 0\n \n elements = [fire, water, thunder, ice, dragon, poison, sleep, paralysis, blast, stun]\n \n if len(str(form)) > 3:\n mweak = f'Weak to \"{form}\":\\n'\n mresistance = f'Resistant against \"{form}\":\\n'\n else:\n mweak = f'Weak to:\\n'\n mresistance = f'Resistant to:\\n'\n for element in elements:\n try:\n if int(element) < 2:\n mresistance += f'- {element_status(mlist)}\\n'\n elif int(element) >= 2:\n mweak += f'- {element_status(mlist)}\\n'\n mlist += 1\n except:\n mlist += 1\n monsters['mname'].append(str(name).lower())\n monsters['resistance'].append(mresistance)\n monsters['weakness'].append(mweak)\n\n \n print(monsters['mname'])\n UserInput2 = message.content.split('$monster info')[1].strip().lower()\n \n\n for name in monsters['mname']:\n if UserInput2 == name:\n await message.channel.send(monsters['mname']['weakness'][mlist])\n mlist += 1\n else:\n await message.channel.send(\"monster not found\")\n \n\nmy_secret = os.environ['token'] \nclient.run(os.getenv(\"token\"))\n\n\n","repo_name":"Drios551/PalicoProject_DataStructuresFinal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3815581259","text":"# (c) 2023, Oliver Graebner, oliver.graebner@zohomail.eu\n\nfrom influxdb_client import InfluxDBClient, Point\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nimport string\nimport re\n\nimport log\nlogger = log.getGlobalLogger()\n\nclass influxDB:\n\n def __init__(self, config : dict()):\n \"\"\"\n Default constructor.\n \"\"\"\n self.url = config['url']\n self.accessToken = config['access_token']\n self.org = config['org']\n self.bucketName = config['bucket_name']\n self.bucketDescription = \"Stores values retrieved from Hargassner Web API\"\n self.client = InfluxDBClient(url=self.url, token=self.accessToken, org=self.org)\n self.bucket = None\n\n \n def initBucket(self):\n \"\"\"\n Creates the necessary bucket if it does not already exist.\n\n Returns:\n The bucket found / created or throws an excpetion in something went wrong.\n \"\"\"\n bucketsApi = self.client.buckets_api()\n b = bucketsApi.find_bucket_by_name(self.bucketName)\n if b == None:\n logger.info(f\"No bucket with name {self.bucketName} found\")\n b = bucketsApi.create_bucket(bucket_name=self.bucketName, description=self.bucketDescription, org=self.org)\n logger.info(f'Created bucket \"{b.name}\" with id \"{b.id}\", retention policy \"{b.retention_rules}\"')\n else:\n logger.debug(f\"Found bucket '{b.name}' with id '{b.id}'\")\n self.bucket = b\n return b\n \n def __sanitizeString(self, str):\n \"\"\"\n Replaces special characters (commas, spaces,... ) in a string so that it can be used as measurement name or tag\n \"\"\"\n chars = re.escape(string.punctuation)\n chars = chars + \" \"\n newStr = re.sub('['+chars+']', '_', str)\n if str != newStr:\n logger.debug(f'Replaced string \"{str}\" with \"{newStr}')\n return newStr\n\n\n def writeData(self, installation, subsystemData, timestamp):\n \"\"\"\n Writes the given JSON data object to the bucket\n\n Paramters:\n installation Dictionary with installation data\n subsystemData List of sybsystems with their data (as dictionary) for the installation\n timestamp Seconds since epoch time.\n \"\"\"\n writeApi = self.client.write_api()\n\n tags = installation.copy()\n tags['name'] = self.__sanitizeString(tags['name'])\n tags['id'] = str(tags['id'])\n \n for subsys in subsystemData:\n influxMeasurement = dict()\n influxMeasurement['measurement'] = self.__sanitizeString(subsys['name'])\n influxMeasurement['tags'] = tags\n influxMeasurement['fields'] = subsys['values']\n influxMeasurement['time'] = timestamp\n logger.debug(f'Data for writing: {influxMeasurement}')\n writeApi.write(self.bucketName, self.org, influxMeasurement, write_precision = 'ms')\n writeApi.flush()\n\n writeApi.close()\n\n","repo_name":"ofg/hargassner-monitoring","sub_path":"hargassner_web_api_pull/influxdb.py","file_name":"influxdb.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5797479108","text":"from django.conf import settings\nfrom colorfield.fields import ColorField\nfrom django.core.validators import (MaxValueValidator,\n MinValueValidator,\n RegexValidator)\nfrom django.db import models\nfrom django.db.models import UniqueConstraint\n\nfrom users.models import User\n\n\nclass Tag(models.Model):\n \"\"\"Tags model.\"\"\"\n name = models.CharField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES,\n db_index=True,\n unique=True,\n verbose_name='Название тега'\n )\n color = ColorField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES_2,\n format='hex',\n unique=True,\n verbose_name='Цвет',\n validators=[\n RegexValidator(\n regex=\"^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$\",\n message='Проверьте вводимый формат',\n )\n ],\n )\n slug = models.SlugField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES,\n unique=True,\n verbose_name='Адрес'\n )\n\n class Meta:\n verbose_name = 'Тег'\n verbose_name_plural = 'Теги'\n\n def __str__(self):\n return self.name\n\n\nclass Ingredient(models.Model):\n \"\"\"Ingredients model.\"\"\"\n name = models.CharField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES,\n db_index=True,\n verbose_name='Название ингредиента'\n )\n measurement_unit = models.CharField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES,\n verbose_name='Единицы измерения'\n )\n\n class Meta:\n verbose_name = 'Ингредиент'\n verbose_name_plural = 'Ингредиенты'\n constraints = [\n models.UniqueConstraint(\n fields=['name', 'measurement_unit'],\n name='unique_name_measurement_unit'\n )\n ]\n\n def __str__(self):\n return f'{self.name}, {self.measurement_unit}'\n\n\nclass Recipe(models.Model):\n \"\"\"Recipes model.\"\"\"\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='recipes',\n verbose_name='Автор рецепта'\n )\n tags = models.ManyToManyField(\n Tag,\n verbose_name='Теги'\n )\n name = models.CharField(\n max_length=settings.LENGTH_OF_FIELDS_RECIPES,\n verbose_name='Название рецепта',\n validators=[\n RegexValidator(\n regex=\"^[а-яА-Я ]+$\",\n message='Символы в название запрещены.',\n )\n ]\n )\n image = models.ImageField(\n upload_to='recipes/image/',\n verbose_name='Изображение'\n )\n text = models.TextField(verbose_name='Рецепт')\n cooking_time = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(\n 1, message='Время приготовления не менее 1 минуты.'\n ), MaxValueValidator(\n 1441, message='Время приготовления не более 24 часов.'\n )]\n )\n ingredients = models.ManyToManyField(\n Ingredient,\n through='IngredientRecipe',\n verbose_name='Ингредиенты'\n )\n pub_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата создания рецепта'\n )\n\n class Meta:\n ordering = ('-pub_date',)\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n\n def __str__(self):\n return self.name\n\n\nclass IngredientRecipe(models.Model):\n \"\"\"Ingredients of recipe model.\"\"\"\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n related_name='ingredient_amount',\n verbose_name='Рецепт'\n )\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.CASCADE,\n verbose_name='Ингредиент'\n )\n amount = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(\n 1, message='Слишком мало ингредиентов.'\n ), MaxValueValidator(9999, message='Слишком много ингредиентов.')],\n verbose_name='Количество продукта'\n )\n\n class Meta:\n verbose_name = 'Количество ингредиента'\n verbose_name_plural = 'Количество ингредиентов'\n constraints = [\n models.UniqueConstraint(\n fields=('ingredient', 'recipe',),\n name='unique_ingredients_amount_for_recipe'\n )\n ]\n\n def __str__(self) -> str:\n return f'{self.ingredient.name}, {self.recipe.name}'\n\n\nclass FavoriteShoppingCart(models.Model):\n \"\"\"Linking model of shopping list and favorites.\"\"\"\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Пользователь'\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт',\n )\n\n class Meta:\n abstract = True\n constraints = [\n UniqueConstraint(\n fields=('user', 'recipe'),\n name='%(app_label)s_%(class)s_unique'\n )\n ]\n\n def __str__(self):\n return f'{self.user} :: {self.recipe}'\n\n\nclass Favorite(FavoriteShoppingCart):\n \"\"\"Adding recipe to favourites model.\"\"\"\n\n class Meta:\n default_related_name = 'favorites'\n verbose_name = 'Избранное'\n verbose_name_plural = 'Избранное'\n\n\nclass ShoppingCart(FavoriteShoppingCart):\n \"\"\"Shopping cart model.\"\"\"\n\n class Meta(FavoriteShoppingCart.Meta):\n default_related_name = 'shopping_list'\n verbose_name = 'Корзина'\n verbose_name_plural = 'Корзина'\n","repo_name":"TonyxRazzor/foodgram-project-react","sub_path":"backend/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34317483193","text":"'''\nMerge sort, time complexity O(nlog(n))\n\nImplemented recursively: breaks the array into half, sort left half, sort right half, then merge.\nNumber of levels of recursions: log(n)\nAmount of work done per level: n\n\nhttps://www.geeksforgeeks.org/merge-sort/\n'''\n\ndef mergeSort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n L = arr[:mid]\n R = arr[mid:]\n\n mergeSort(L)\n mergeSort(R)\n\n # the following is the merge step\n # when mergeSort is called on the left and right sub-array, they came back sorted\n i = j = k = 0\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n arr[k] = L[i]\n i += 1\n else:\n arr[k] = R[j]\n j += 1\n k += 1\n\n while i < len(L):\n arr[k] = L[i]\n i += 1\n k += 1\n while j < len(R):\n arr[k] = R[j]\n j += 1\n k += 1\n\n\narr = [8, 4, 2, 7, 9, 10, 0, 1, 6, 3, 5]\nprint('Given array: \\n', arr)\nmergeSort(arr)\nprint('Sorted array: \\n', arr)\n","repo_name":"mingchang93/algorithm-and-data-structure","sub_path":"Algo-Merge_Sort.py","file_name":"Algo-Merge_Sort.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14179398211","text":"'''\napproach: DP\nTime: O(N^2)\nSpace: O(N)\n\nYou are here!\nYour runtime beats 13.34 % of python3 submissions.\nYou are here!\nYour memory usage beats 48.54 % of python3 submissions.\n'''\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n if not nums:\n return 0\n \n dp = []\n for i in range(len(nums)):\n dp.append(1)\n for j in range(i):\n if nums[i] > nums[j]:\n dp[i] = max(dp[i], dp[j] + 1)\n return max(dp)\n","repo_name":"lixiang2017/leetcode","sub_path":"explore/2021/july/Longest_Increasing_Subsequence.py","file_name":"Longest_Increasing_Subsequence.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23176264460","text":"import unittest\nfrom mock_data import valid_resources\nfrom srctrueview import Input\nfrom config import MAX_STR_LENGTH, PASSWORD_LENGTH, PROJECT_DESCRIPTION_MAX_LENGTH, PROJECT_VERSION_MAX_LENGTH, \\\n MAX_RESOURCES_ALLOWED\n\n\ndef get_input_object():\n return {\"resources\": valid_resources}\n\n\ndef get_valid_and_invalid_str_input(max_length):\n invalid = \"\".join([\"z\" for _ in range(max_length + 1)])\n valid = \"\".join([\"z\" for _ in range(max_length)])\n return invalid, valid\n\n\nclass UnitTest(unittest.TestCase):\n\n def test_resources_initial_validation(self):\n too_many_resources = []\n duplicated_resources = []\n duplicated_resources.extend(valid_resources)\n duplicated_resources.extend(valid_resources)\n\n for _ in range(round(MAX_RESOURCES_ALLOWED / 4)):\n too_many_resources.extend(valid_resources)\n\n # no resources\n with self.assertRaises(ValueError):\n Input()\n\n # too many resources\n with self.assertRaises(ValueError):\n Input(**{\"resources\": too_many_resources})\n\n # less than maximum resources, but contains duplicates\n with self.assertRaises(ValueError):\n Input(**{\"resources\": duplicated_resources})\n\n def test_relationship_to_nonexistent_table(self):\n data = get_input_object()\n relationships = [\n {\n \"type\": \"ONE-TO-MANY\",\n \"table\": \"nonexistent_table\",\n \"reference_field\": \"custid\"\n }\n ]\n\n data[\"resources\"][0][\"relationships\"] = relationships\n\n # relationship to non existent table\n with self.assertRaises(ValueError):\n Input(**data)\n\n def test_relationship_to_same_table(self):\n data = get_input_object()\n relationships = [\n {\n \"type\": \"ONE-TO-MANY\",\n \"table\": \"Customers\",\n \"reference_field\": \"custid\"\n }\n ]\n\n data[\"resources\"][0][\"relationships\"] = relationships\n\n # relationship from a resource to itself\n with self.assertRaises(ValueError):\n Input(**data)\n\n def test_relationship_nonexistent_referenced_field(self):\n data = get_input_object()\n relationships = [\n {\n \"type\": \"ONE-TO-MANY\",\n \"table\": \"Ord\",\n \"reference_field\": \"nonexistent_field\"\n }\n ]\n data[\"resources\"][0][\"relationships\"] = relationships\n\n # indicated field does not exist in the resource\n with self.assertRaises(ValueError):\n Input(**data)\n\n def test_relationship_m2m_with_ref_field(self):\n data = get_input_object()\n relationships = [\n {\n \"type\": \"MANY-TO-MANY\",\n \"table\": \"Ord\",\n \"reference_field\": \"custid\"\n }\n ]\n data[\"resources\"][0][\"relationships\"] = relationships\n\n # many-to-many with referenced_field\n with self.assertRaises(ValueError):\n Input(**data)\n\n def test_relationship_o2m_with_no_ref_field(self):\n data = get_input_object()\n relationships = [\n {\n \"type\": \"ONE-TO-MANY\",\n \"table\": \"Ord\",\n }\n ]\n\n data[\"resources\"][0][\"relationships\"] = relationships\n\n # one-to-many/one-to-one with no referenced_field\n with self.assertRaises(ValueError):\n Input(**data)\n\n def test_invalid_port(self):\n data = get_input_object()\n options = {\n \"application_port\": -1\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"application_port\"] = 65536\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"application_port\"] = 65535\n\n self.assertIsInstance(Input(**data), Input)\n\n def test_db_type_validation(self):\n data = get_input_object()\n options = {\n \"database_options\": {\n \"db_type\": \"PostgreSQL\"\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"database_options\"][\"db_type\"] = \"MariaDB\"\n self.assertIsInstance(Input(**data), Input)\n\n data[\"options\"][\"database_options\"][\"db_type\"] = \"MongoDB\"\n self.assertIsInstance(Input(**data), Input)\n\n def test_db_username_validation(self):\n long_string, valid_string = get_valid_and_invalid_str_input(MAX_STR_LENGTH)\n data = get_input_object()\n options = {\n \"database_options\": {\n \"db_username\": long_string\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"database_options\"][\"db_username\"] = valid_string\n\n self.assertIsInstance(Input(**data), Input)\n\n def test_password_validation(self):\n long_password, valid_password = get_valid_and_invalid_str_input(PASSWORD_LENGTH)\n data = get_input_object()\n options = {\n \"database_options\": {\n \"db_password\": long_password\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"database_options\"][\"db_password\"] = valid_password\n\n self.assertIsInstance(Input(**data), Input)\n\n def test_project_title_validation(self):\n invalid_title, valid_title = get_valid_and_invalid_str_input(MAX_STR_LENGTH)\n data = get_input_object()\n options = {\n \"project_metadata\": {\n \"title\": invalid_title\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"project_metadata\"][\"title\"] = valid_title\n\n self.assertIsInstance(Input(**data), Input)\n\n def test_project_description_validation(self):\n invalid_description, valid_description = get_valid_and_invalid_str_input(PROJECT_DESCRIPTION_MAX_LENGTH)\n data = get_input_object()\n options = {\n \"project_metadata\": {\n \"description\": invalid_description\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"project_metadata\"][\"description\"] = valid_description\n\n self.assertIsInstance(Input(**data), Input)\n\n def test_project_version_validation(self):\n invalid_version, valid_version = get_valid_and_invalid_str_input(PROJECT_VERSION_MAX_LENGTH)\n data = get_input_object()\n options = {\n \"project_metadata\": {\n \"version\": invalid_version\n }\n }\n data[\"options\"] = options\n\n with self.assertRaises(ValueError):\n Input(**data)\n\n data[\"options\"][\"project_metadata\"][\"version\"] = valid_version\n\n self.assertIsInstance(Input(**data), Input)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"xeno-john/a-py-generator","sub_path":"src/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5683014382","text":"from math import fabs\nimport unittest\n\nfrom UserStories.us16 import male_last_names\nfrom ProjectUtils.parser import parse\nfrom write_errors import write_errors\n\nfrom ProjectUtils.config import GEDCOM_FILE\n\nUSER_STORY = \"US16\"\ntype = \"FAMILY\"\n\nindividuals, families = parse(GEDCOM_FILE)\n\nclass Test_male_last_names(unittest.TestCase):\n def test_male_last_names(self):\n \n for fam_id in families:\n \n husb_id = families[fam_id].get_husband()\n husb_name = individuals[husb_id].get_name()\n \n family_last_name = husb_name.split()[1]\n \n children = families[fam_id].get_children()\n \n male_children = [child_id for child_id in children if individuals[child_id].get_gender() == 'M']\n male_child_names = [individuals[child_id].get_name() for child_id in male_children]\n \n try:\n self.assertTrue(male_last_names(family_last_name, male_child_names))\n except Exception as e:\n write_errors(type = type, user_story = USER_STORY, id = fam_id, error = e)","repo_name":"shivangmedhekar/CS555WS_Fall2022_Team7","sub_path":"TestFiles/test_us16.py","file_name":"test_us16.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72840634801","text":"#这个代码��来示意遗传算法\nimport numpy as np\ndef schaffer(p):\n#这是自定义的函数(即优化的目标函数)\n '''\n This function has plenty of local minimum, with strong shocks\n global minimum at (0,0) with value 0\n '''\n x1, x2 = p\n x = np.square(x1) + np.square(x2) #f(x1,x2) =0.5 + (sin^2(x1^2 + x2^2)-0.5)/ ((1+0.001*x)^2)\n return 0.5 + (np.square(np.sin(x)) - 0.5) / np.square(1 + 0.001 * x)\nfrom sko.GA import GA\nga = GA(func=schaffer, n_dim=2, size_pop=50, max_iter=800, lb=[-1, -1], ub=[1, 1], precision=1e-7)\nbest_x, best_y = ga.run()\nprint('best_x:', best_x, '\\n', 'best_y:', best_y) #这里对函数的最优值进行了求解\n\n#绘图\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nY_history = pd.DataFrame(ga.all_history_Y)\nprint(Y_history)\nfig, ax = plt.subplots(2, 1)\nplt.rcParams['font.sans-serif']=['SimHei'] #指定默认字体 SimHei为黑体\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\nprint(Y_history.index)\nax[0].plot(Y_history.index, Y_history.values, '.', color='red')\nax[0].set_title('目标函数值优化路线')\nax[1].plot(Y_history.index,Y_history.min(axis=1).cummin())\n# Y_history.min(axis=1).cummin().plot(kind='line')\nax[1].set_title('每代种群中的最优值')\nplt.subplots_adjust(hspace=1, wspace=1)\nplt.show() #第一张图片\n\n#例子二\ndef my_fun(vector):\n x1,x2 = vector\n value = np.square(x1)+np.square(x2)\n return value\n\nfig,ax = plt.subplots(2,1)\nga = GA(func=my_fun, n_dim=2, size_pop=50, max_iter=800, lb=[-1, -1], ub=[1, 1], precision=1e-7)\nbest_x, best_y = ga.run()\nprint('最优值自变量:', best_x, '\\n', '最优值因变量:', best_y) #这里对函数的最优值进行了求解\nY_history = pd.DataFrame(ga.all_history_Y) #历史的优化迭代值Y(类型为List)\n# plt.plot(Y_history.index,Y_history.values,'-.', color = 'blue')\nax[0].plot(Y_history.index, Y_history.values, '.', color='red')\nax[0].set_title('每次迭代各个种群的函数值')\nax[1].plot(Y_history.index,Y_history.min(axis=1).cummin())\nax[1].set_title('每次迭代各个种群的最优函数值')\nplt.subplots_adjust(hspace= 0.5)\n# Y_history.min(axis=1).cummin().plot(kind='line') #800代,每代50个体(800*50) 绘制每一代的最小的y值\nplt.show() #第二张图\n#Dataframe的方法:cummin()\n#计算累计最小值,例如,一个序列为3 4 5 5 4 3 那么生成的序列为 【3 3 3 3 3 3】,【inf 1 0 -1 2】对应【inf 1 0 -1 -1】\n#先计算每一代里面最优的个体的函数值,再累计绘制全部的最优个体\n# plt.show()\n# print(Y_history.values)\n''''''\n# figure = plt.figure(figsize= (30,30),dpi=30)\n# x = np.linspace(0,2*np.pi,100)\n# y1 = np.sin(x)\n# y2 = np.cos(x)\n# Y = np.array([y1,y2]).transpose()#Y:100*2 x:100*1\n# plt.plot(x,Y)\n# plt.show()\n''''''\n#上面的这段代码只用了一个X,\n","repo_name":"xp19991205/optimation","sub_path":"GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26304070034","text":"# Should be able to display each color (note: colors without 100% saturation\n# will not appear correctly due to physics).\n\n# Press the stop button to advance to the next color\n\nfrom pybricks.hubs import *\nfrom pybricks.parameters import Color\nfrom pybricks.tools import wait\n\n# sneaky way to get XyzHub class without knowning which hub\nhub = next(v for k, v in globals().items() if k.endswith(\"Hub\"))()\n\nfor c in Color:\n print(\"Color:\", c)\n hub.light.on(Color[c])\n try:\n while True:\n wait(10)\n except SystemExit:\n continue\n\nhub.light.off()\nprint(\"...done\")\n","repo_name":"ACKERMANNGUE/ACKERMANNGUE-AG_Dipl_Tech_2021_VoitureAssistee","sub_path":"code/Toolbox/Bluetooth/pybricks-micropython-master/tests/pup/hub/status_light.py","file_name":"status_light.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"37200641332","text":"import torch\nimport torch.utils.data as D\nimport cv2\nimport numpy as np\nimport torchvision.transforms.functional as TF\nfrom torchvision import transforms\nimport random\nimport os\nfrom PIL import Image\nfrom config import Configs\n\nclass Read_data(D.Dataset):\n \"\"\"\n The data loader class for 1 set (for example train)\n\n Args:\n base_dir (str): the data path\n file_label (list of str): the names of the data instances\n set (str): the set (train, valid or test)\n split_size (int): the image (patch) size\n augmentation (bool): whwther to apply augmentation\n flipped (bool): whether the data is flipped\n \"\"\"\n def __init__(self, base_dir, file_label,set, split_size, augmentation=True , flipped = False):\n self.base_dir = base_dir\n self.file_label = file_label\n self.set = set\n self.split_size = split_size\n self.augmentation = augmentation\n self.flipped = flipped\n \n def __getitem__(self, index):\n img_name = self.file_label[index]\n idx, deg_img, gt_img = self.readImages(img_name)\n return idx, deg_img, gt_img\n \n def __len__(self):\n return len(self.file_label)\n\n def readImages(self, file_name):\n \"\"\"\n Read a pair of images (degraded + clean gt)\n \n Args:\n file_name (str): the index (name) of the image pair\n Returns:\n file_name (str): the index (name) of the image pair\n out_deg_img (np.array): the degraded image\n out_gt_img (np.array): the clean image\n\n \"\"\"\n url_deg = self.base_dir +'/'+ self.set+'/' + file_name\n url_gt = self.base_dir +'/'+ self.set+'_gt/'+file_name\n \n deg_img = cv2.imread(url_deg)\n gt_img = cv2.imread(url_gt)\n\n if self.flipped:\n deg_img = cv2.rotate(deg_img, cv2.ROTATE_180)\n gt_img = cv2.rotate(gt_img, cv2.ROTATE_180)\n try:\n deg_img.any()\n except:\n print('###!Cannot find image: ' + url_deg)\n try:\n gt_img.any()\n except:\n print('###!Cannot find image: ' + url_gt)\n \n deg_img = Image.fromarray(np.uint8(deg_img))\n gt_img = Image.fromarray(np.uint8(gt_img))\n\n # apply data augmentation\n if self.augmentation:\n # random crop\n i, j, h, w = transforms.RandomCrop.get_params(deg_img, output_size=(self.split_size, self.split_size))\n deg_img = TF.crop(deg_img, i, j, h, w)\n gt_img = TF.crop(gt_img, i, j, h, w)\n\n # random horizontal flipping\n if random.random() > 0.5:\n deg_img = TF.hflip(deg_img)\n gt_img = TF.hflip(gt_img)\n\n # random vertical flipping\n if random.random() > 0.5:\n deg_img = TF.vflip(deg_img)\n gt_img = TF.vflip(gt_img)\n\n deg_img = (np.array(deg_img) /255).astype('float32')\n gt_img = (np.array(gt_img) / 255).astype('float32')\n \n # normalize data\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n out_deg_img = np.zeros([3, *deg_img.shape[:-1]])\n out_gt_img = np.zeros([3, *gt_img.shape[:-1]])\n for i in range(3):\n out_deg_img[i] = (deg_img[:,:,i] - mean[i]) / std[i]\n out_gt_img[i] = (gt_img[:,:,i] - mean[i]) / std[i]\n \n return file_name, out_deg_img, out_gt_img\n\n\ndef load_datasets(flipped=False):\n \"\"\"\n Create the 3 datasets (train/valid/test) to be used by the dataloaders.\n\n Args:\n flipped (bool): whwther to flip the images of the val dataset (was used\n in 1 experiment to check the effect of flipping)\n Returns:\n data_train (Dateset): train data\n data_valid (Dateset): valid data\n data_test (Dateset): test data\n \"\"\"\n cfg = Configs().parse() \n base_dir = cfg.data_path\n split_size = cfg.split_size\n data_tr = os.listdir(cfg.data_path+'train')\n np.random.shuffle(data_tr)\n data_va = os.listdir(cfg.data_path+'valid')\n np.random.shuffle(data_va)\n data_te = os.listdir(cfg.data_path+'test')\n np.random.shuffle(data_te)\n \n data_train = Read_data(base_dir, data_tr, 'train', split_size, augmentation=True)\n data_valid = Read_data(base_dir, data_va, 'valid', split_size, augmentation=False, flipped = flipped)\n data_test = Read_data(base_dir, data_te, 'test', split_size, augmentation=False)\n\n return data_train, data_valid, data_test\n\ndef sort_batch(batch):\n \"\"\"\n Transform a batch of data to pytorch tensor\n\n Args:\n batch [str, np.array, np.array]: a batch of data\n Returns:\n data_index (tensor): the indexes of the source/target pair\n data_in (tensor): the source images (degraded)\n data_out (tensor): the target images (clean gt)\n \"\"\"\n n_batch = len(batch)\n data_index = []\n data_in = []\n data_out = []\n for i in range(n_batch):\n idx, img, gt_img = batch[i]\n\n data_index.append(idx)\n data_in.append(img)\n data_out.append(gt_img)\n\n data_index = np.array(data_index)\n data_in = np.array(data_in, dtype='float32')\n data_out = np.array(data_out, dtype='float32')\n\n data_in = torch.from_numpy(data_in)\n data_out = torch.from_numpy(data_out)\n\n return data_index, data_in, data_out\n\ndef all_data_loader(batch_size):\n \"\"\"\n Create the 3 data loaders\n\n Args:\n batch_size (int): the batch_size\n Returns:\n train_loader (dataloader): train data loader \n valid_loader (dataloader): valid data loader\n test_loader (dataloader): test data loader\n \"\"\"\n data_train, data_valid, data_test = load_datasets()\n train_loader = torch.utils.data.DataLoader(data_train, collate_fn=sort_batch, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True)\n valid_loader = torch.utils.data.DataLoader(data_valid, collate_fn=sort_batch, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True)\n test_loader = torch.utils.data.DataLoader(data_test, collate_fn=sort_batch, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True)\n\n return train_loader, valid_loader, test_loader","repo_name":"dali92002/DocEnTR","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"75"} +{"seq_id":"349521718","text":"# -*- coding: utf-8 -*-\n\"\"\"\n File Name: 347. Top K Frequent Elements\n Description :\n Author : simon\n date: 19-3-12\n\"\"\"\nimport collections\n\n\"\"\"\n桶排序\n利用到的先验信息 各个元素的频数 <= N\n利用桶排序 可以直接得到排序结果...\n\n主要思路是完成 数字-->计数 转换成 计数-->数字 两张表的转变\n\n\"\"\"\nclass Solution:\n def topKFrequent(self, nums, k):\n bucket, res = [[] for _ in range(len(nums) + 1)], []\n for a, b in collections.Counter(nums).items():\n bucket[b].append(a)\n for l in bucket[::-1]: # 直接得到排序结果...\n if len(l): res += l # 由大到小 只要是非空桶 就加入结果list\n if len(res) >= k: return res[ : k]\n\n\nclass Solution0(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n dic = {} # 手写字典进行计数\n for n in nums:\n if n not in dic:\n dic[n] = 0\n dic[n] += 1\n\n bucket, res = [[] for _ in range(len(nums) + 1)], []\n for n, cnt in dic.items():\n bucket[cnt].append(n)\n for l in bucket[::-1]:\n if l: res += l\n if len(res) > k: return res[:k]\n return res\n\n\"\"\"\n两张表的转换\n\"\"\"\nclass Solution3(object):\n def topKFrequent(self, nums, k):\n hs = {}\n frq = {}\n for i in range(0, len(nums)):\n if nums[i] not in hs:\n hs[nums[i]] = 1\n else:\n hs[nums[i]] += 1\n\n for z, v in hs.items():\n if v not in frq:\n frq[v] = [z]\n else:\n frq[v].append(z)\n\n arr = []\n\n for x in range(len(nums), 0, -1):\n if x in frq:\n\n for i in frq[x]:\n arr.append(i)\n\n return arr[:k]\n\nif __name__ == '__main__':\n test = [1,1,1,2,2,3]\n solu = Solution3()\n print(solu.topKFrequent(test, 2))","repo_name":"Simon717/sword-to-offer-python","sub_path":"leetcode/347. Top K Frequent Elements.py","file_name":"347. Top K Frequent Elements.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"5249114618","text":"from PIL import Image, ImageDraw\nfrom random import choice\n\nwidth = 300\nheight = 300\ncanvas = Image.new('RGBA', (width, height), \"white\")\ndraw = ImageDraw.Draw(canvas)\n\n# ### A Chaos game ###\n\ndef midpoint(p1, p2):\n return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2\n\n\ndef chaos(corners, start_point, steps=10000):\n\n prev_point = start_point\n for _ in range(steps):\n # Select the next guider to move closer to\n corner = choice(corners)\n new_point = midpoint(prev_point, corner)\n # print new_point\n draw.point(new_point, fill='black')\n prev_point = new_point\n\nstart_point = (150.0, 150.0)\n# corners = [(100.0, 100.0),\n# (200.0, 100.0),\n# (200.0, 200.0),\n# (100.0, 200.0)]\ncorners = [(100.0, 100.0),\n (200.0, 100.0),\n (150.0, 250.0)]\nchaos(corners, start_point)\ncanvas.show()\n\n\n# ### B Feigenbaum ###\n\ndef feigenbaum_diagram():\n xa = 2.9\n xb = 4.0\n maxit = 1000\n\n for i in range(width):\n r = xa + (xb - xa) * i / (width - 1)\n x = 0.5\n for j in range(maxit):\n x = r * x * (1 - x)\n if j > maxit / 2:\n draw.point((i, int(x * height)), fill='blue')\n canvas.show()\n","repo_name":"ciso112/MaP","sub_path":"w6.py","file_name":"w6.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15529807806","text":"import locale\nimport sys\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\nfrom abstractions import AbstractScrapper\nfrom lxml.html import fromstring\nfrom services import HTTPResponse, MonthTitleReplace, RequestHeaders\n\n\n@dataclass\nclass UgraClassicEventItem:\n name: str\n description: str\n source: str\n datetime_list: list\n event_venue: str = ''\n poster: str = ''\n categorys: str = '' # ManyToManyField\n tags: str = '' # ManyToManyField\n\n\nclass UgraClassicEventScrapperToolsMixin:\n def _clean_xpath_generated_list(self, data: list):\n data = [x.strip() for x in data]\n data = [x.replace('\\xa0', ' ') for x in data]\n return list(filter(None, [x.strip() for x in data]))\n\n def _clean_xpath_generated_list_for_description(self, data: list):\n data = [x.strip('\\t ') for x in data]\n data = [x.replace('\\n', '
') for x in data]\n data = [x.replace('\\xa0', ' ') for x in data]\n return list(filter(None, [x.strip() for x in data]))\n\n def _get_formated_datetime(self, non_format_string: str):\n return datetime.strptime(\n MonthTitleReplace.for_russian_words(non_format_string),\n \"%d %B%Y%H:%M\"\n )\n\n def _get_event_datetime_list(self, string_date: str, string_time: str) -> list:\n locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8')\n event_times = []\n # print(f\"{string_date=} {string_time=}\")\n\n # Если дата составная то это ничего не значит для данного источника событий (путаница)\n # is_right_time = False значит учитывать только дату, так как время с большой вероятностью неверное\n is_right_time = True\n if '; ' in string_time or ', ' in string_time:\n string_time = \"00:00\"\n is_right_time = False\n if ' и ' in string_date:\n string_time = \"00:00\"\n is_right_time = False\n string_date_list = string_date.split(' и ')\n month = string_date_list[-1].split(' ')[-1]\n for i in range(len(string_date_list)):\n event_times.append((\n self._get_formated_datetime(\n non_format_string=(string_date_list[i] if i + 1 == len(\n string_date_list) else string_date_list[i] + ' ' + month) + datetime.now().strftime(\n '%Y') + string_time\n ), is_right_time)\n )\n pass\n else:\n event_times.append((\n self._get_formated_datetime(\n non_format_string=string_date + datetime.now().strftime('%Y') + string_time\n ), is_right_time)\n )\n # print('--> ', event_times)\n return event_times\n\n\nclass UgraClassicEventScrapper(AbstractScrapper, UgraClassicEventScrapperToolsMixin):\n INDEX_URL = 'https://ugraclassic.ru'\n\n BASE_URL = \"https://ugraclassic.ru/events/index.php\"\n\n PAGINATOR_XPATH = \"//div[contains(@class, 'bx_pagination_page')]//li/a/@href\"\n\n AGE_TAG_XPATH = \"//div[@class='age']//text()\"\n\n EVENT_ITEMS_XPATH = \"//div[contains(@class, 'afisha__item')]\"\n PART_EVENT_TITLE_XPATH = \"//div[contains(@class, 'afisha-desc')]/h3//text()\"\n PART_EVENT_HREF_XPATH = \"/a/@href\"\n PART_SCHEDULE_XPATH = \"//ul[contains(@class, 'afisha-w')]\"\n\n EVENT_ADDITION_DATA_XPATH = \"//div[contains(@class, 'afisha-detail__text')]//text()\"\n\n def __init__(self):\n super().__init__()\n locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8')\n self.parse_result: list = []\n\n def __get_events_from_one_page(self, url: str):\n \"\"\" Append all events to self.parse_result for requested url. \"\"\"\n dom = fromstring(HTTPResponse.get_response(url=url, headers=RequestHeaders().headers).text)\n\n item_event_title = dom.xpath(self.EVENT_ITEMS_XPATH + self.PART_EVENT_TITLE_XPATH)\n item_event_href = dom.xpath(self.EVENT_ITEMS_XPATH + self.PART_EVENT_HREF_XPATH)\n schedule_item = dom.xpath(self.EVENT_ITEMS_XPATH + self.PART_SCHEDULE_XPATH)\n schedule_mix_list = dom.xpath(\n self.EVENT_ITEMS_XPATH + self.PART_SCHEDULE_XPATH + \"//li[contains(@class, 'afisha-w__item')]//text()\")\n # Удалим пробелы в строках элементов и пустые элементы\n schedule_mix_list = self._clean_xpath_generated_list(schedule_mix_list)\n\n for item in schedule_item:\n temp_date = ''\n temp_time = ''\n temp_title = item_event_title.pop(0)\n temp_href = self.INDEX_URL + item_event_href.pop(0)\n for i in range(len(item)):\n if i == 0:\n schedule_mix_list.pop(0)\n temp_date = schedule_mix_list.pop(0)\n elif i == 1:\n schedule_mix_list.pop(0)\n temp_time = schedule_mix_list.pop(0)\n else:\n schedule_mix_list.pop(0)\n schedule_mix_list.pop(0)\n\n event_page = self.__get_event_page(temp_href)\n\n event_description = event_page.xpath(self.EVENT_ADDITION_DATA_XPATH)\n event_tag = event_page.xpath(self.AGE_TAG_XPATH)\n event_description = ''.join(\n self._clean_xpath_generated_list_for_description(event_description)).replace(\n ' ', ' '\n ).replace('

', '
')\n\n self.parse_result.append(\n UgraClassicEventItem(\n name=temp_title,\n description=event_description,\n source=temp_href,\n datetime_list=self._get_event_datetime_list(temp_date, temp_time),\n tags=event_tag\n )\n )\n\n def __get_event_page(self, url: str):\n \"\"\" Send request for event url and get event data. \"\"\"\n with HTTPResponse.get_response(url, headers=RequestHeaders().headers) as get_request:\n return fromstring(get_request.text)\n\n def start_scraping(self):\n\n dom = fromstring(HTTPResponse.get_response(url=self.BASE_URL, headers=RequestHeaders().headers).text)\n\n pagination = dom.xpath(self.PAGINATOR_XPATH)\n event_page_url_set = set()\n for item in pagination:\n event_page_url_set.add(self.INDEX_URL + item)\n\n for page_url in event_page_url_set:\n self.__get_events_from_one_page(page_url)\n\n return self.parse_result\n\n\nif __name__ == \"__main__\":\n print(f\"{__file__} must include as module.\")\n # print(sys.modules)\n # print(vars().keys())\n print(sys.path)\n # result = UgraClassicEventScrapper().start_scraping()\n # print(result[5])\n","repo_name":"Roman-R2/city-poster","sub_path":"event-parser/parsers/ugra_classic_event_scrapper.py","file_name":"ugra_classic_event_scrapper.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19633572122","text":"from flask import Flask, render_template, request, redirect, url_for, flash\n\napp = Flask(__name__)\napp.secret_key = 'Adam'\n# Initialize a global dictionary to store user registrations\nregistered_users = {}\n\n# Define a list of valid organizations\nvalid_organizations = [\"Charlotte Hacks\", \"Code9\", \"Alpha Phi Alpha\", \"OAS\", \"NAACP\"]\n\n@app.route('/')\ndef home():\n return render_template('index.html', organizations=valid_organizations)\n\n@app.route('/register', methods=['POST'])\ndef register():\n name = request.form.get('name')\n organization = request.form.get('organization')\n\n if not name or not organization:\n flash('Both name and organization are required', 'error')\n return redirect(url_for('home'))\n\n if organization not in valid_organizations:\n flash('Invalid organization', 'error')\n return redirect(url_for('home'))\n\n registered_users[name] = organization\n flash('Registration successful', 'success')\n return redirect(url_for('registered_users_page'))\n\n@app.route('/registered_users')\ndef registered_users_page():\n return render_template('registered_users.html', users=registered_users)\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"adaudaa/FlaskExercise","sub_path":"Flask/Exercise3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14395518040","text":"import sys\nsys.path.append('..')\nfrom Constants.DataTypeNames import DataTypeNames\nfrom Constants.ServiceTypeNames import ServiceTypeNames\n\nclass DataWrapper():\n\n # init data wrapper\n def __init__(self):\n self.cnames = DataTypeNames()\n self.snames = ServiceTypeNames()\n\n # wrap method.\n # define a wrap method for specific usage and run here\n def wrapInitData(self, service, data):\n if service == self.snames.default:\n return None\n","repo_name":"Ramonywangziyao/Gensis.py","sub_path":"master/Tools/DataWrapper.py","file_name":"DataWrapper.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20985918581","text":"#!/usr/bin/env python3\n\"\"\"\nFunctions involved in downloading birds from xeno-canto\nCan be used from the command line (see argparse entries\nat end of file). Or one can import XenoCantoCollection\nand XenoCantoRecording into an application.\n\nTwo classes:\n \n o XenoCantoRecording to hold metadata about available XC\n sound files\n o XenoCantoCollection to hold XenoCantoRecording instances\n\nA XenoCantoCollection instance acts like a dict whose keys are\nconcatenations of genus and species names, e.g. 'Tangaragyrola'\nEach value in the dict is a *list* of XenoCantoRecording instances \nfor that type of bird.\n\nSo the keys of a XenoCantoCollection instance might look like:\n\n{'Tangaragyrola' : [rec1, rec2, rec3]\n 'Amaziliadecora': [rec4, rec5]\n}\n\nA XenoCantoRecording also behaves like a dict:\n\nKeys are:\n        genus\n        species\n        phylo_name # 'Tangaragyrola'\n        full_name # Unique name like 'Tangaragyrola_xc3562156' \n        country\n        loc # Name of park/city/preserve \n        recording_date\n        length\n        encoding # ['mpeg', 'wav.vpn']\n        type # ['call', 'song']\n\n        _xeno_canto_id\n        _filename (if downloaded)\n        _url\n        \nA XenoCantoCollection instance can also act as an iterator:\n\n\t for recording in my_coll(one_per_bird_phylo=True):\n\t     print(recording.full_name)\n\nThe one_per_bird_phylo kwarg controls whether\nonly one of each species' recordings are included\nin the iteration, or whether the list of all recordings \nfor all species are served out.\n\nThe interaction model is to pull metadata for each\ndesired species by running this script from the command\nline either with or without downloading the actual sound \nfiles. That process creates a XenoCantoCollection, which\nis saved to disk as a pickle.\n\nOne can ask the XenoCantoCollection instance to\ndownload some or all recordings. Individual XenoCantoRecording\ninstances also know how to download.\n\nWorking with the collection:\n\nClients can import the module, rather than calling\nfrom the command line. The resource can therefore\nbe integrated into a workflow. \n\nAfter the sound files are downloaded, the following\nis a template for use without re-contacting the Xeno Canto\nserver:\n\nfrom birdsong.xeno_canto_manager import XenoCantoCollection, XenoCantoRecording\nsound_collection = XenoCantoCollection.load('>>\n\timport birdsong.xeno_canto_manager as xcm\n\tsound_collection = xcm.XenoCantoCollection.load('~/test_metadata.json')\n\tfor rec in sound_collection:\n\t    print(rec)\n\t\n\t# Get messages about downloading and saving# as it rattles through:\n\tfor rec in sound_collection:\n\t    dest_file = rec.download()\n\t\n\t# Download again: get warning about file\n\t# present. In this case, answer 'no' to the overwrite\n\t# question to avoid re-downloading any\n\t# of the already present files. \n\t\n\tfor rec in sound_collection:\n\t    dest_file = rec.download()\n\t\n\t# Download the collection all at once,\n\t# rather than in a loop. This won't\n\t# re-download, b/c it remembers your\n\t# 'no' answer from above:\n\t\n\tsound_collection.download()\n\t\n\t# Force re-downloading:\n\tsound_collection.download(overwrite_existing=True)\n\n'''\n# ---------------- Class XenoCantoCollection ----------\n\nclass XenoCantoCollection:\n '''\n Holds information about multiple Xeno Canto\n recordings. Each holding is a XenoCantoRecordingO\n instance, keyed by species name. For each species\n may hold multiple recording instances.\n \n Constructor takes a raw download from Xeno Canto\n (see xeno_canto_get_bird_metadata()). Downloads are\n of the form:\n \n {]+>', '', str(response.content))\n self.log.err(somewhat_readable)\n continue\n return metadata\n\n #------------------------------------\n # __iter__ \n #-------------------\n \n def __iter__(self):\n \n # Index into the list of this collection\n # instance's keys. Each key's value is a list\n # of recording instances:\n self.curr_bird_phylo_indx = 0\n \n # Indx into one of the recording\n # lists. Used in case of one_per_bird_phylo\n # is False.\n self.curr_recording_indx = -1\n \n self.phylo_name_list = list(self.keys())\n\n return self\n \n #------------------------------------\n # __next__ \n #-------------------\n \n def __next__(self):\n '''\n Looks worse than it is. Recall:\n 'self' is a dict:\n {\n phylo_name1 : (recording_instance1, recording_instance2,...) \n phylo_name2 : (recording_instance1, recording_instance2,...) \n ...\n }\n \n If iterator is to feed only one recording for each\n phylo_name, track phylo_names with self.curr_bird_phylo_indx\n into the list of phylo_name_n. The 'next' recording is\n the first recording in the next phylo_name.\n \n If iterator is to feed every bird, two indices are needed\n the one into phylo_names, and one into the current phylo_name\n value's list of recordings: self.curr_recording_indx. For\n each phylo_name we feed one recording after the other, until\n the list is exhausted, then move on to the next phylo_name.\n \n :return: Xeno Canto recording instance\n :rtype: XenoCantoRecording\n '''\n if not self.one_per_bird_phylo:\n curr_phylo_name = self.phylo_name_list[self.curr_bird_phylo_indx]\n curr_recording_list = self[curr_phylo_name]\n # We are to feed all recordings\n # of all pylos:\n self.curr_recording_indx += 1\n try:\n return curr_recording_list[self.curr_recording_indx]\n except IndexError:\n # Fed out all recordings in current list\n # Move on to the next phylo, first resetting\n # the pointer into the list:\n self.curr_recording_indx = 0\n self.curr_bird_phylo_indx +=1\n except Exception as e:\n # Unexpected error\n raise RuntimeError(f\"During next in collection: {repr(e)}\") from e\n \n # On to the next phylo entry:\n \n try:\n # Try to get next phylo name. If \n # fails, we fed out everything:\n next_phylo_name = self.phylo_name_list[self.curr_bird_phylo_indx]\n nxt_rec_list = self[next_phylo_name]\n if self.one_per_bird_phylo:\n self.curr_bird_phylo_indx += 1\n except IndexError:\n # Have fed all records of all phylos:\n # Restore default for one_per_bird_phylo:\n self.one_per_bird_phylo = True\n raise StopIteration()\n\n # Have a new phylo entry's recording list:\n # Degenerate case: empty recordings list:\n if len(nxt_rec_list) == 0:\n # Recursively get next:\n return self.__next__()\n\n # Return first recording of list:\n return nxt_rec_list[0] \n\n\n #------------------------------------\n # __setitem__ \n #-------------------\n \n def __setitem__(self, key, val):\n self.data[key] = val\n\n #------------------------------------\n # __getitem__ \n #-------------------\n \n def __getitem__(self, key):\n return self.data[key]\n\n #------------------------------------\n # __delitem__ \n #-------------------\n \n def __delitem__(self, key):\n del self.data[key]\n \n #------------------------------------\n # num_recordings \n #---------------\n \n @property\n def num_recordings(self):\n '''\n Lazily evaluated num_recordings property.\n The quantity is the number of all recordings\n in this collection instance's values(). If\n those values are themselves collections, add\n up their number of recordings.\n\n\n '''\n if self._num_recordings is None:\n all_lengths = [len(recording_list) \n for recording_list \n in self.values()\n ]\n self._num_recordings = sum(all_lengths)\n return self._num_recordings \n\n #------------------------------------\n # keys \n #-------------------\n \n def keys(self):\n return self.data.keys()\n\n #------------------------------------\n # values\n #-------------------\n \n def values(self):\n return self.data.values()\n \n #------------------------------------\n # items\n #-------------------\n \n def items(self):\n return self.data.items()\n \n #------------------------------------\n # all_recordings \n #-------------------\n \n def all_recordings(self):\n '''\n Convenience method: returns a list\n of all XenoCantoRecording instances\n in this collection. I.e. not separated\n by species, just all recordings from all\n species appended together\n '''\n all_recs = np.concatenate(list(self.values())).tolist()\n return all_recs\n \n\n #------------------------------------\n # download \n #-------------------\n \n def download(self, \n birds_to_process=None,\n one_per_species=True, \n courtesy_delay=1.0,\n overwrite_existing=None,\n ):\n '''\n Download a given list of species, or\n all the species in the collection. \n \n If one_per_species is True, only one recording\n from the list of each species' recordings is\n downloaded; else all recordings of each requested\n species.\n \n Courtesy delay is time to wait between sound file\n downloads.\n \n :param birds_to_process: list of bird species names\n :type birds_to_process: [str]\n :param one_per_species: whether or not to download all \n recordings of each species, or just one\n :type one_per_species: bool\n :param courtesy_delay: time between requests to XC server\n :type courtesy_delay: {int | float}\n :param overwrite_existing: if True, already existing \n sound files will be overwritten without asking.\n If False, always ask once, then use answer as \n default going forward. If None: same as False.\n :type overwrite_existing: bool\n '''\n \n # If file overwrite behavior is specified,\n # make it the default for all the XenoCantoRecording\n # instances:\n if overwrite_existing is not None:\n XenoCantoRecording.always_overwrite = overwrite_existing\n \n if birds_to_process is None:\n birds_to_process = list(self.keys())\n\n # Get number of sound files to download:\n if one_per_species:\n num_to_download = len(birds_to_process)\n else:\n num_to_download = 0\n for species in birds_to_process:\n try:\n num_to_download += len(self[species])\n except KeyError:\n self.log.err(f\"Species {species} not represented in collection\")\n\n downloaded = 1\n for rec in self(one_per_bird_phylo=one_per_species):\n self.log.info(f\"{downloaded}/{num_to_download}\")\n rec.download()\n downloaded += 1\n time.sleep(courtesy_delay)\n\n #------------------------------------\n # __len__ \n #-------------------\n \n def __len__(self):\n '''\n Returns number of genus-species are in the\n collection. But does not sum up all the \n recordings for each genus-species. To get\n that total number, use len_all()\n '''\n return len(self.data)\n\n #------------------------------------\n # len_all \n #-------------------\n \n def len_all(self):\n num_recordings = 0\n for recs_list in self.data.values():\n num_recordings += len(recs_list)\n return num_recordings\n\n #------------------------------------\n # __eq__ \n #-------------------\n \n def __eq__(self, other_coll):\n \n for phylo_name, rec_obj_list in self.items():\n other_rec_obj_list = other_coll[phylo_name]\n for rec_obj_this, rec_obj_other in zip(rec_obj_list, other_rec_obj_list):\n if rec_obj_this != rec_obj_other:\n return False\n return True\n\n #------------------------------------\n # __repr__ \n #-------------------\n \n def __repr__(self):\n #return f\"\"\n return f\"\"\n \n #------------------------------------\n # __call__ \n #-------------------\n \n def __call__(self, one_per_bird_phylo=True):\n '''\n This is funky; sorry! Defining this method makes\n a XenoCantoCollection instance callable, in this\n case with a keyword arg. This enables the equivalent\n to being able to pass an argument to iter():\n \n class SomeClass:\n def __init__(self):\n self.i = 0\n def __iter__(self):\n return self\n def __next__(self):\n self.i += 1\n if self.i > 5:\n self.one_per_bird_phylo = False\n self.i = 0\n raise StopIteration\n return self.i\n def __call__(self, one_per_bird_phylo=False):\n self.one_per_bird_phylo=one_per_bird_phylo\n return self\n \n inst = SomeClass()\n for rec in inst(one_per_bird_phylo=True): # <-----------\n print (inst.i, inst.one_per_bird_phylo)\n\n Without this trick, iterators aren't callable. \n\n :param one_per_bird_phylo:\n :type one_per_bird_phylo:\n '''\n self.one_per_bird_phylo=one_per_bird_phylo\n return self\n\n #------------------------------------\n # save \n #-------------------\n \n def save(self, dest=None):\n '''\n Saves collection to given file. Only the\n metadata for the collection itself, and\n the enclosed XenoCantoRecording instances\n are saved, not the soundfiles themselves.\n They are normally in their own dir, with \n the XenoCantoRecording instances holding\n the file paths.\n \n If dest:\n \n o is an existing directory, a .json file name \n is created for the collection\n file.\n o is a file name as ascertained by the\n presence of an extension: \n If the dest file ends with '.json' \n the output format is JSON. If it \n ends with '.pkl' or '.pickle' the \n sound_collection is saved in pickle format.\n Any other extension is an error.\n o Is None: the destination directory\n will be a subdir 'xeno_canto_collections'\n under this script's dir.\n \n In all cases, non-existing directories\n will be created. \n \n Note: Pickle is fast to load, but very finicky \n about file names and directory structures\n being exactly what they were at saving\n time. So JSON is recommended.\n \n :param dest: destination directory or\n file path for the saved collection. \n Default: \n /xeno_canto_collections\n :type dest: str\n :return the full path of the output file\n :rtype str\n '''\n\n # To distinguish between dest\n # being a file name vs. at dir:\n is_file = False\n \n if dest is None:\n curr_dir = os.path.dirname(__file__)\n dest_dir = os.path.join(curr_dir, 'xeno_canto_collections')\n\n else:\n # Is dest a file or a dir?\n dest_p = Path(dest)\n suffix = dest_p.suffix\n if len(suffix) == 0:\n dest_dir = dest\n else:\n # Destination is a file name:\n is_file = True\n # Check for legal extension:\n if suffix not in ('.json', '.pkl', '.pickle'):\n raise ValueError(\"Collection files must end in .json, .pkl, or .pickle\")\n dest_dir = dest_p.parent\n\n if not os.path.exists(dest_dir):\n try:\n os.makedirs(dest_dir)\n except FileExistsError:\n pass\n\n # If dest is a directory, invent a filename:\n if not is_file:\n # Create a non-existing .JSON file name \n # in dest_dir\n dest = self.create_filename(dest_dir) \n \n # Dest is a file at this point.\n dest_p = Path(dest)\n dir_part = dest_p.parent\n file_part = dest_p.stem\n extension = dest_p.suffix\n uniquifier = 0\n orig_file_part = file_part\n \n while dest_p.exists():\n #answer = input(f\"File {dest_p.name} exists; overwrite? (y/N): \")\n #if answer not in ('y','Y','yes','Yes', ''):\n # ...\n # Keep adding numbers to end of file\n # till have a unique name. First, remove\n # a unifier we may have added in a prev.\n # run through this loop:\n if file_part.endswith(f\"_{uniquifier}\"):\n file_part = orig_file_part \n uniquifier += 1\n file_part += f\"_{uniquifier}\"\n dest_p = Path.joinpath(dir_part, file_part+extension)\n\n dest = str(dest_p)\n\n # At this point dest is a non-existing file,\n # which is what we need. Use pickle or json?\n if extension in ('.json', '.JSON'):\n self.to_json(dest, force=True)\n else:\n raise DeprecationWarning(\"Deprecated! Using pickle for saving is unreliable\")\n with open(dest, 'wb') as fd:\n pickle.dump(self, fd)\n\n return dest\n\n #------------------------------------\n # load\n #-------------------\n \n @classmethod\n def load(cls, src):\n '''\n Takes path to either a .json or \n a .pkl (or .pickle) file. Materializes\n the XenoCantoCollection that is\n encoded in the file.\n\n :param src: full path to pickle or json file\n of previously saved collection\n :type src: str\n '''\n\n if not os.path.exists(src) or not(os.path.isfile(src)):\n raise FileNotFoundError(f\"Recording collection file {src} does not exist\")\n\n if Path(src).suffix not in ('.json', '.JSON', '.pkl', '.pickle'):\n raise ValueError(f\"Saved collection must be a JSON or pickle file\")\n\n # Allow use of tilde in fnames:\n src = os.path.expanduser(src)\n if src.endswith('.json'):\n return cls.from_json(src=src)\n else:\n # Assume pickle file\n with open(src, 'rb') as fd:\n return pickle.load(fd, None)\n\n #------------------------------------\n # to_json \n #-------------------\n \n def to_json(self, dest=None, force=False):\n '''\n Creates a JSON string from this \n collection. If dest is a string,\n it is assumed to be a destination\n file where the json will be saved.\n If none, the generated JSON string \n is returned. It can be passed to \n from_json() in the json_str\n kwarg.\n \n If the file exists, the user is warned,\n unless force is True.\n\n :param dest: optional destination file\n :type dest: {None | str}\n :param force: set to True if OK to overwrite\n dest file. Default: ask permission\n :type force: bool\n :return destination file name if written to\n file, else the JSON string\n '''\n\n # self.values() are lists of XenoCantoRecording\n # instances. The default specification \n # tells orjson whom to call with one\n # of those instances to get a json snippet.\n \n jstr = orjson.dumps(self.data,\n default=XenoCantoRecording._mk_json_serializable)\n\n if dest is None:\n return jstr\n \n if os.path.exists(dest) and not force:\n answer = input(f\"File {os.path.basename(dest)} exists; overwrite? (y/N): \")\n if answer not in ('y','Y','yes','Yes', ''):\n self.log.info(\"Collection JSON save aborted on request\")\n return None\n\n # At this point dest is a non-existing file,\n # or one that exists but ok to overwrite:\n with open(dest, 'wb') as fd:\n if type(jstr) != bytes:\n jstr = str.encode(jstr)\n fd.write(jstr)\n \n return dest\n\n #------------------------------------\n # from_json \n #-------------------\n \n @classmethod\n def from_json(cls, src):\n '''\n Load a collection either from a JSON string,\n or from a file that contains JSON.\n \n :param src: either a json string to parse,\n or the path to a file ending with\n either .json or .JSON\n :type src: str\n :return: the loaded collection\n :rtype: XenoCantoCollection\n '''\n if Path(src).suffix in ('.json', '.JSON'):\n # Read JSON from file:\n if not os.path.exists(src):\n raise FileNotFoundError(f\"File {src} not found\")\n with open(src, 'rb') as fd:\n json_str = fd.read()\n else:\n # src is assumed to be a json string\n json_str = src\n \n inst_vars = orjson.loads(json_str)\n \n # We now have a dict (the collection) whose\n # values are lists of dicts. Each of these\n # dicts is json code for one XenoCantoRecording.\n \n # First, get an empty XenoCantoCollection,\n # but with care: \n # This method (from_json()) may be\n # called from __new__(). That following\n # call will call __new__() recursively.\n # But: passing None will make the __new__()\n # method aware, and it will just create\n # an empty instance, for which __init__()\n # will have been called:\n inst = XenoCantoCollection(None)\n\n for phylo_nm, rec_dict_list in inst_vars.items():\n recs = [XenoCantoRecording.from_json(rec_dict)\n for rec_dict\n in rec_dict_list\n ]\n inst[phylo_nm] = recs\n \n return inst\n\n #------------------------------------\n # create_filename\n #-------------------\n \n def create_filename(self, dest_dir, extension='.json'):\n '''\n Create a file name that is not in the \n given directory.\n \n :param dest_dir:\n :type dest_dir:\n :return filename \n :rtype str\n '''\n t = datetime.datetime.now()\n orig_filename_root = t.isoformat().replace('-','_').replace(':','_')\n uniquifier = 0\n filename = orig_filename_root\n full_path = os.path.join(dest_dir,filename)\n while os.path.exists(full_path):\n uniquifier += 1\n filename = os.path.join(f\"{orig_filename_root}_{str(uniquifier)}\",\n extension\n )\n full_path = os.path.join(dest_dir,filename)\n full_path += extension\n return full_path\n\n# --------------------------- Class XenoCantoRecording\n\nclass XenoCantoRecording:\n\n\n # No decision made about whether\n # to overwrite existing recording files\n # when asked to download any of the\n # instances. Will be updated or used\n # during calls to download():\n\n always_overwrite = None\n \n # Directory to use for download destination dir\n # if a dir specified in the call to download()\n # is protected (i.e. permissions). Once the user\n # was asked for a replacement dir, the following\n # class var will be set, and used throughout the \n # lifetime of this instance:\n \n default_dest_dir = None \n \n #------------------------------------\n # Constructor \n #-------------------\n \n def __init__(self, \n recording_metadata, \n dest_dir=None, \n log=None\n ):\n '''\n Recording metadata must be a dict from the \n 'recordings' entry of a Xeno Canto download.\n The dict contains much info, such as bird name, \n recording_metadata location, length, sample rate, and\n file download information. Create instance vars\n from just some of them.\n\n :param recording_metadata: a 'recordings' entry from a\n XenoCanto metadata download of available recordings\n :type recording_metadata: {str | {str : Any}|\n :param dest_dir: directory were to store downloaded\n sound files. If None, creates subdirectory of\n this script called: 'recordings'\n :type dest_dir: {None | str}\n :param log: logging service; if None, creates one\n :type log: {None | LoggingService}\n '''\n \n if log is None:\n self.log = LoggingService()\n else:\n self.log = log\n \n # 'Secret' entry: used when creating\n # from json string (see from_json()):\n \n if recording_metadata is None:\n # Have caller initialize the instance vars\n # Used when creating an instance\n # from JSON.\n return\n \n curr_dir = os.path.dirname(__file__)\n if dest_dir is None:\n self.dest_dir = os.path.join(curr_dir, 'recordings')\n if not os.path.exists(self.dest_dir):\n os.mkdir(self.dest_dir)\n else:\n self.dest_dir = dest_dir\n\n self._xeno_canto_id = recording_metadata['id']\n self.genus = recording_metadata['gen']\n self.species = recording_metadata['sp']\n self.phylo_name= f\"{self.genus}{self.species}\"\n self.full_name = f\"{self.phylo_name}_xc{self._xeno_canto_id}\"\n \n self.country = recording_metadata['cnt']\n self.loc = recording_metadata['loc']\n self.recording_date = recording_metadata['date']\n self.length = recording_metadata['length']\n # One of A-E, or 'no score'\n self.rating = recording_metadata['q']\n\n # '.mp', '.wav':\n self.encoding = Path(recording_metadata['file-name']).suffix\n \n # Whether 'call' or 'song'\n self.type = recording_metadata['type']\n \n # Like: '//www.xeno-canto.org/482431'\n self._url = f\"HTTP:{recording_metadata['url']}/download\"\n \n # Like: 'XC482431-R024 white ruffed manakin.mp3'\n self._filename = recording_metadata['file-name']\n\n #------------------------------------\n # download \n #-------------------\n \n def download(self, \n dest_dir=None, \n overwrite_existing=None,\n testing=False):\n \n if dest_dir is None:\n dest_dir = self.dest_dir\n \n if overwrite_existing is None:\n # Use the global default. False, unless changed\n # during __init__()\n overwrite_existing = self.always_overwrite\n \n while not os.path.exists(dest_dir):\n try:\n os.makedirs(dest_dir)\n except (PermissionError, OSError):\n \n # Maybe we asked user for a replacement\n # dir for an earlier recording. If so,\n # don't ask again, use their prior answer:\n \n if XenoCantoRecording.default_dest_dir is not None and \\\n os.path.exists(XenoCantoRecording.default_dest_dir):\n dest_dir = XenoCantoRecording.default_dest_dir\n continue\n \n # Make a short dir with ellipses if\n # dest_dir very long:\n short_dir = FileUtils.ellipsed_file_path(dest_dir)\n dest_dir = input(f\"No permission for {short_dir}; enter new dest folder(tilde OK): \")\n # Resolve '~' notation:\n dest_dir = os.path.expanduser(dest_dir)\n XenoCantoRecording.default_dest_dir = dest_dir\n else:\n if not os.path.isdir(dest_dir):\n raise ValueError(f\"Destination {dest_dir} is not a directory\")\n\n # Update the full path to the recording\n # file: if a saved recording is loaded\n # by a different user or on a different\n # machine: that path might not exist:\n \n if not self._has_vocalization_prefix(self._filename):\n self._filename = self._ensure_call_or_song_prefix(\n self._filename, \n self.type)\n\n self._filename = self._clean_filename(self._filename)\n self.full_name = os.path.join(dest_dir, self._filename)\n \n # Just to make log info msgs not exceed a terminal\n # line: create an fname with ellipses: '/foo/.../bar/fum.txt'\n \n fname_descr = FileUtils.ellipsed_file_path(self.full_name)\n\n # Dest directory exists, does the sound file\n # already exist?\n if os.path.exists(self.full_name):\n if overwrite_existing:\n go_ahead = True\n else:\n # Need to ask permission to overwrite:\n answer = input(f\"Recording {self._filename} exists; overwrite (y/N): \")\n go_ahead = answer in ('y','Y','yes','Yes')\n # Make answer the global default for recordings:\n self.__class__.always_overwrite = go_ahead\n else:\n go_ahead = True\n \n if testing:\n return go_ahead\n \n if not go_ahead:\n self.log.info(f\"Skipping {self._filename}: already downloaded.\")\n return self.full_name\n \n self.log.info(f\"Downloading {fname_descr}...\")\n try:\n response = requests.get(self._url)\n except Exception as e:\n raise IOError(f\"While downloading {self._url}: {repr(e)}\") from e\n \n self.log.info(f\"Done downloading {fname_descr}\")\n \n # Split \"audio/mpeg\" or \"audio/vdn.wav\"\n medium, self.encoding = response.headers['content-type'].split('/')\n \n if medium != 'audio' or self.encoding not in ('mpeg', 'vdn.wav'):\n msg = f\"Recording {self.full_name} is {medium}/{self.encoding}, not mpeg or vpn_wav\"\n self.log.err(msg)\n #raise ValueError(msg)\n else:\n self.log.info(f\"File encoding: {self.encoding}\")\n \n # Add 'CALL' or 'SONG' in front of filename\n self._filename = self._ensure_call_or_song_prefix(\n self._filename, self.type)\n \n self.file_name = os.path.join(dest_dir, self._filename)\n \n self.log.info(f\"Saving {fname_descr}...\")\n with open(self.full_name, 'wb') as f:\n f.write(response.content)\n self.log.info(f\"Done saving {fname_descr}\")\n return self.full_name\n\n #------------------------------------\n # _clean_filename\n #-------------------\n \n def _clean_filename(self, fname):\n '''\n People put the horriblest chars into \n filenames: spaces, parentheses, backslashes!\n Replace any of those with underscores.\n \n :param fname: original name\n :type fname: str\n :return: cleaned up, unix-safe name\n :rtype: str\n '''\n fname = fname.replace('/', '_')\n fname = fname.replace(' ', '_')\n fname = fname.replace('(', '_')\n fname = fname.replace(')', '_')\n \n return fname\n\n #------------------------------------\n # _ensure_call_or_song_prefix\n #-------------------\n \n def _ensure_call_or_song_prefix(self, path, vocalization_type):\n '''\n Given a path (just a file name or a \n path in a subdir), prefix the file name\n with CALL_ or SONG_, depending on the \n vocalization_type. The vocalization_type can\n actually be anything else to use as prefix.\n \n :param path: current path without the CALL or SONG\n :type path: str\n :param vocalization_type: usually 'CALL' or 'SONG'\n :type vocalization_type: str\n :return: new path with the prefix in place\n :rtype: str\n '''\n \n # Already has the prefix?\n if self._has_vocalization_prefix(path):\n return path\n p = Path(path)\n \n # Sometimes people put things like:\n # 'ADULT, SEX UNCERTAIN, SONG_XC5311'... or\n # 'ALARM CALL_20110913FORANAal.mp3' or\n # 'CALL, FEMALE, MALE_XC441963-FormicanalVZA38a.mp3'\n # Into the type field. See whether we\n # find 'CALL' or 'SONG'. If not, prefix\n # with UNKNOWN:\n \n if vocalization_type not in ('CALL', 'SONG', 'call', 'song'):\n voctype_lowcase = vocalization_type.lower()\n if voctype_lowcase.find('call') > -1:\n vocalization_type = 'CALL'\n elif voctype_lowcase.find('song') > -1:\n vocalization_type = 'SONG'\n else:\n vocalization_type = 'UNKNOWN'\n \n fname = f\"{vocalization_type.upper()}_{p.name}\"\n new_path = str(p.parent.joinpath(fname))\n return new_path\n\n #------------------------------------\n # _has_vocalization_prefix \n #-------------------\n \n def _has_vocalization_prefix(self, path):\n \n fname = os.path.basename(path)\n return fname.startswith('CALL') or fname.startswith('SONG') \n\n #------------------------------------\n # __repr__ \n #-------------------\n \n def __repr__(self):\n return f\"\"\n\n #------------------------------------\n # __str__ \n #-------------------\n \n def __str__(self):\n return self.__repr__()\n\n #------------------------------------\n # __eq__\n #-------------------\n \n def __eq__(self, other_recording):\n for inst_var_nm, inst_var_val in self.__dict__.items():\n if type(inst_var_val) == str:\n if other_recording.__dict__[inst_var_nm] != inst_var_val:\n return False\n elif type(inst_var_val) == LoggingService:\n if type(other_recording.__dict__[inst_var_nm]) != LoggingService:\n return False\n else:\n # Inst var of unexpected type:\n return False\n return True\n\n #------------------------------------\n # to_json \n #-------------------\n \n def to_json(self):\n \n return orjson.dumps(self, \n default=XenoCantoRecording._mk_json_serializable)\n\n #------------------------------------\n # _mk_json_serializable\n #-------------------\n \n def _mk_json_serializable(self):\n '''\n Called by orjson to json-serialize\n a XenoCantoRecording instance. The method\n extracts those instance variables of self\n that are strings, and returns the resulting\n dict. \n '''\n as_dict = {inst_var_nm : inst_var_value\n for inst_var_nm, inst_var_value\n in self.__dict__.items()\n if type(inst_var_value) == str\n }\n return as_dict\n\n #------------------------------------\n # from_json \n #-------------------\n \n @classmethod\n def from_json(cls, json_str_or_dict):\n if type(json_str_or_dict) in (str, bytes):\n # Get a dict if one wasn't passed in:\n inst_vars = orjson.loads(json_str_or_dict)\n else:\n inst_vars = json_str_or_dict\n \n inst = cls.from_dict(inst_vars)\n return inst \n\n #------------------------------------\n # from_dict \n #-------------------\n \n @classmethod\n def from_dict(cls, inst_var_dict):\n inst = XenoCantoRecording(None)\n try:\n inst.__dict__.update(inst_var_dict)\n except TypeError as e:\n print(f\"Err: {repr(e)}\")\n return inst\n\n# ------------------------ Main ------------\nif __name__ == '__main__':\n \n# birds = ['Tangara+gyrola', 'Amazilia+decora', 'Hylophilus+decurtatus', 'Arremon+aurantiirostris', \n# 'Dysithamnus+mentalis', 'Lophotriccus+pileatus', 'Euphonia+imitans', 'Tangara+icterocephala', \n# 'Catharus+ustulatus', 'Parula+pitiayumi', 'Henicorhina+leucosticta', 'Corapipo+altera', \n# 'Empidonax+flaviventris']\n \n parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),\n formatter_class=argparse.RawTextHelpFormatter,\n description=\"Download and process Xeno Canto bird sounds\"\n )\n \n parser.add_argument('-d', '--destdir',\n help='fully qualified directory for downloads. Default: /tmp',\n default='/tmp')\n parser.add_argument('-t', '--timedelay',\n type=float,\n help='time between downloads for politeness to XenoCanto server. Ex: 1.0',\n default=1.0)\n parser.add_argument('-c', '--collection',\n type=str,\n help='optionally path to existing, previously saved collection',\n default=None)\n # Things user wants to do:\n parser.add_argument('--collect_info',\n action='store_true',\n help=\"download metadata for the bird calls, but don't download sounds\",\n default=True\n )\n parser.add_argument('--download',\n action='store_true',\n help=\"download the (birds_to_process) sound files (implies --collect_info\",\n default=False\n \n )\n parser.add_argument('--all_recordings',\n action='store_true',\n help=\"download all recordings rather than one per species; default: one per.\",\n default=False\n )\n parser.add_argument('--overwrite',\n action='store_true',\n help=\"whether or not to overwrite already downloaded files w/o asking; Def: False\",\n default=False\n )\n parser.add_argument('birds_to_process',\n type=str,\n nargs='+',\n help='Repeatable: +. Ex: Tangara_gyrola')\n \n \n args = parser.parse_args()\n \n if args.collection:\n sound_collection = XenoCantoCollection.load(args.collection)\n else:\n sound_collection = None\n \n # For reporting to user: list\n # of actions to do by getting the actions\n # as strings from the args instance:\n \n todo = [action_name \n for action_name\n in ['collect_info', 'download']\n if args.__getattribute__(action_name)\n ]\n \n if len(todo) == 0:\n print(\"No action specified on command line; nothing done\")\n print(parser.print_help())\n sys.exit(0)\n \n # Fix bird names to make HTTP happy later.\n # B/c we allow -b and --bird in args, \n # argparse constructs a name:\n \n birds_to_process = args.birds_to_process\n \n # Replace underscores needed for\n # not confusing bash with the '+' signs\n # required in URLs:\n birds_to_process = [bird.replace('_', '+') \n for bird \n in birds_to_process]\n \n if sound_collection is None and \\\n (('collect_info' in todo) or\\\n ('download' in todo)\n ):\n # Request to download recordings or\n # metadata, and no already existing and\n # saved collection was specified: \n \n sound_collection = XenoCantoCollection(birds_to_process,\n dest_dir=args.destdir,\n always_overwrite=args.overwrite\n )\n saved_path = sound_collection.save()\n sound_collection.log.info(f\"Saved new collection to {saved_path}\")\n \n if 'download' in todo:\n one_per_species = not args.all_recordings\n sound_collection.download(birds_to_process,\n one_per_species=one_per_species,\n overwrite_existing=args.overwrite,\n courtesy_delay=args.timedelay)\n # Save the updated collection:\n saved_path = sound_collection.save(saved_path)\n sound_collection.log.info(f\"Saved updated collection to new files: {saved_path}\")\n \n sound_collection.log.info(f\"Done with {todo}\")\n\n# ------------------------ Testing Only ----------------\n # Testing (Should move to a unittest\n# sound_collection = XenoCantoCollection(['Tangara+gyrola', \n# 'Amazilia+decora'],\n# dest_dir='/tmp'\n# )\n# #rec = next(iter(sound_collection, one_per_bird_phylo=False))\n# for rec in sound_collection(one_per_bird_phylo=False):\n# print(rec.full_name)\n# for rec in sound_collection(one_per_bird_phylo=True):\n# print(rec.full_name)\n# \n# rec.download()\n# print(sound_collection)\n# \n# sound_collection = XenoCantoCollection.from_json(src='/Users/paepcke/EclipseWorkspacesNew/birds/test_metadata.json')\n# for rec in sound_collection:\n# rec.download()\n# print(rec)\n# \n# new = XenoCantoCollection.load('/Users/paepcke/tmp/test_metadata1.pkl')\n# sound_collection = XenoCantoCollection.load('/Users/paepcke/tmp/test_metadata1.json')\n# for rec in sound_collection:\n# print(rec)\n# \n# for rec in sound_collection:\n# rec.download()\n# \n# print(sound_collection)\n","repo_name":"paepcke/birds","sub_path":"src/birdsong/xeno_canto_manager.py","file_name":"xeno_canto_manager.py","file_ext":"py","file_size_in_byte":52571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"41861507560","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .forms import ProductsForm,CreateUserForm,customerform\nfrom .models import Products,User,Cart,CartItem,Order,OrderItem\nfrom django.forms import inlineformset_factory\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.decorators import user_passes_test\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom easyshop.serializers import ProductSerializer,UserSerializer,LoginSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import generics, status, views, permissions\nfrom django.contrib import auth\nfrom django.db.models import Q\nfrom django.contrib import messages\n\n\n \n\ndef registerPage(request):\n if request.user.is_authenticated:\n return redirect('store')\n else:\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request, 'Account was created for ' + user)\n\n return redirect('login')\n\n context = {'form': form}\n return render(request, 'easyshop/register.html', context)\n\n\ndef loginPage(request):\n if request.user.is_authenticated:\n return redirect('store')\n\n else:\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('store')\n else:\n messages.info(request, 'Username OR password is incorrect')\n \n context = {}\n return render(request, 'easyshop/login.html', context)\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('store')\n\n\n\ndef store(request):\n products = Products.objects.all()\n context = { 'products': products }\n return render(request, 'easyshop/store.html', context )\n\n@login_required(login_url='login')\ndef cart(request):\n cartproducts = Cart.objects.get(user=request.user)\n itemcount=cartproducts.items.count()\n if itemcount==0:\n return redirect('alert')\n else:\n context = {'products':cartproducts,'itemcount':itemcount} \n return render(request, 'easyshop/cart.html', context)\n\n\ndef checkout(request):\n cartproducts = Cart.objects.get(user=request.user)\n itemcount=cartproducts.items.count()\n form=customerform()\n context = {'products':cartproducts,'itemcount':itemcount,'form':form}\n return render(request, 'easyshop/placeorder.html', context)\n \n \n \n \n \n\ndef upload(request, id=0):\n if request.method == 'POST':\n form = ProductsForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('store')\n else:\n form = ProductsForm()\n return render(request, 'easyshop/uploadprod.html', {\n 'form': form\n })\n\ndef updateOrder(request, pk):\n maintain = Products.objects.get(id=pk)\n form = ProductsForm(instance=maintain)\n\n if request.method == 'POST':\n form = ProductsForm(request.POST, instance=maintain)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context = {'form': form}\n return render(request, 'easyshop/uploadprod.html', context)\n\ndef maintain(request):\n products = Products.objects.all()\n context = { 'products': products }\n return render(request, 'easyshop/maintain.html', context)\n\n\ndef deleteOrder(request, pk):\n product = Products.objects.get(id=pk)\n if request.method == \"POST\":\n product.delete()\n return redirect('/')\n\n context = {'item': product}\n return render(request, 'easyshop/delete.html', context)\n\ndef show(request, pk):\n product = Products.objects.get(id=pk)\n \n context = {'item': product}\n return render(request, 'easyshop/view.html', context) \n\n \n@login_required(login_url='login')\ndef logoutpage(request):\n \n context = {}\n\n return render(request, 'easyshop/logout.html', context)\n\n\n\n@api_view([\"GET\",])\ndef show_list(request):\n if(request.method==\"GET\"):\n data=Products.objects.all()\n serializers=ProductSerializer(data,many=True)\n response = {\n \n 'error' : False,\n 'datas' : serializers.data\n }\n return Response(response)\n else:\n err={\n 'error' : True\n }\n return Response(err)\n\n \nclass RegisterView(APIView):\n def post(self,request,format=None):\n serializer=UserSerializer(data=request.data)\n data={}\n if serializer.is_valid():\n account=serializer.save()\n data['error']=False\n data['message']='registration success'\n data['username']=account.username\n data['email']=account.email\n token,create=Token.objects.get_or_create(user=account)\n data['token']=token.key\n data['userid']=token.user_id\n else:\n data['error']=True\n data['message']=serializer.errors\n \n return Response(data)\n\n\nclass LoginAPIView(generics.GenericAPIView):\n serializer_class = LoginSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n \n serializer.is_valid(raise_exception=True)\n data={}\n if serializer.is_valid():\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n data['error']=False\n data['message']='login success'\n data['username']=user.username\n data['email']=user.email\n data['userid']=token.user_id\n data['token']=token.key\n \n return Response(data)\n \n \n\n\n@login_required(login_url='login')\ndef addtocart(request, pk, product_qty=None):\n obj, created = Cart.objects.update_or_create(user=request.user)\n product = get_object_or_404(Products, id=pk)\n item, itemCreated = CartItem.objects.update_or_create(\n cart=obj, product=product)\n item.price = product.prod_price\n if(itemCreated == False):\n item.quantity = item.quantity+1\n # if item.quantity = request.GET['q']\n\n obj.items.add(item)\n item.save()\n obj.save()\n return redirect('dialog')\n\ndef dialog(request):\n context = {}\n return render(request, 'easyshop/dialog.html', context)\n \ndef cart_remove(request, pk):\n obj, created = Cart.objects.update_or_create(user=request.user)\n product = get_object_or_404(Products, id=pk)\n cartItems = CartItem.objects.filter(cart=obj, product=product)\n cartItems.delete()\n return redirect('cartremovedialog')\n\n\n\n\ndef cart_remove_alert(request):\n context={}\n return render(request,'easyshop/removedialog.html',context) \n \n\n\n\ndef cart_add_q(request, pk, product_qty=None):\n obj, created = Cart.objects.update_or_create(user=request.user)\n product = get_object_or_404(Products, id=pk)\n item, itemCreated = CartItem.objects.update_or_create(\n cart=obj, product=product)\n # item.price = product.price\n\n # if item.quantity = request.GET['q']\n item.quantity = request.GET['q']\n if request.GET['q'] == \"0\":\n item.delete()\n else:\n obj.items.add(item)\n item.save()\n obj.save()\n return redirect('cart')\n \n \ndef alert(request):\n context = {}\n return render(request, 'easyshop/alert.html', context)\n\ndef placeorder(request):\n order = Order.objects.create(user=request.user)\n order.save()\n for item in cart.items.all():\n orderItem, created = OrderItem.objects.update_or_create(\n order=order, product=item.product, price=item.price, quantity=item.quantity)\n order.order_items.add(orderItem)\n form=customerform()\n if request.method=='POST':\n form = customerform(request.POST)\n if form.is_valid():\n form.save()\n return redirect('store')\n\n\n ","repo_name":"012rashidkp/easyshop","sub_path":"shopping/easyshop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23928184094","text":"# coding: utf-8\n\n\"\"\"\nDefinition of categories.\n\"\"\"\n\nimport order as od\n\n\ndef add_categories(config: od.Config) -> None:\n \"\"\"\n Adds all categories to a *config*.\n \"\"\"\n config.add_category(\n name=\"incl\",\n id=1,\n selection=\"sel_incl\",\n label=\"inclusive\",\n )\n cat_e = config.add_category(\n name=\"1e\",\n id=100,\n selection=\"sel_1e\",\n label=\"1 electron\",\n )\n cat_e.add_category(\n name=\"1e_eq1b\",\n id=110,\n selection=\"sel_1e_eq1b\",\n label=\"1e, 1 b-tag\",\n )\n cat_e.add_category(\n name=\"1e_ge2b\",\n id=120,\n selection=\"sel_1e_ge2b\",\n label=r\"1e, $\\geq$ 2 b-tags\",\n )\n","repo_name":"uhh-cms/analysis_playground","sub_path":"ap/config/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40724102934","text":"import logging\nimport traceback\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.utils.log import AdminEmailHandler\nfrom django.views.debug import ExceptionReporter, get_exception_reporter_filter\n\n# Make sure a NullHandler is available\n# This was added in Python 2.7/3.2\ntry:\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\n# Make sure that dictConfig is available\n# This was added in Python 2.7/3.2\ntry:\n from logging.config import dictConfig\nexcept ImportError:\n from django.utils.dictconfig import dictConfig\n\ngetLogger = logging.getLogger\n\n# Ensure the creation of the Django logger\n# with a null handler. This ensures we don't get any\n# 'No handlers could be found for logger \"django\"' messages\nlogger = getLogger('django')\nif not logger.handlers:\n logger.addHandler(NullHandler())\n\n\nclass AlternativeAdminEmailHandler(AdminEmailHandler):\n def emit(self, record):\n try:\n request = record.request\n subject = '%s (%s IP): %s' % (\n record.levelname,\n (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS\n and 'internal' or 'EXTERNAL'),\n record.getMessage()\n )\n filter = get_exception_reporter_filter(request)\n request_repr = filter.get_request_repr(request)\n except Exception:\n subject = '%s: %s' % (\n record.levelname,\n record.getMessage()\n )\n request = None\n request_repr = \"Request repr() unavailable.\"\n subject = self.format_subject(subject)\n\n if record.exc_info:\n exc_info = record.exc_info\n stack_trace = '\\n'.join(traceback.format_exception(*record.exc_info))\n else:\n exc_info = (None, record.getMessage(), None)\n stack_trace = 'No stack trace available'\n\n message = \"%s\\n\\n%s\" % (stack_trace, request_repr)\n reporter = ExceptionReporter(request, is_email=True, *exc_info)\n html_message = self.include_html and reporter.get_traceback_html() or None\n\n # create new connection\n connection = mail.get_connection()\n connection.password = settings.ALTERNATE_EMAIL_HOST_PASSWORD\n connection.username = settings.ALTERNATE_EMAIL_HOST_USER\n connection.host = settings.ALTERNATE_EMAIL_HOST\n connection.port = settings.ALTERNATE_EMAIL_PORT\n connection.use_tls = settings.ALTERNATE_EMAIL_USE_TLS\n\n mail.mail_admins(subject, message, fail_silently=True, html_message=html_message, connection=connection)\n","repo_name":"PragmaticMates/django-pragmatic","sub_path":"pragmatic/loghandlers.py","file_name":"loghandlers.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"9198914321","text":"def merge_sort(the_list):\n \n if len(the_list) <=1:\n return the_list\n \n left, right = split(the_list)\n left = merge_sort(left)\n right = merge_sort(right)\n\n return merge(left, right)\n\ndef split(the_list):\n \n mid = len(the_list)//2\n return the_list[:mid], the_list[mid:]\n\ndef merge( left, right):\n\n l = []\n i = 0\n j = 0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n l.append(left[i])\n i +=1\n else:\n l.append(right[j])\n j +=1\n\n while i < len(left):\n l.append(left[i])\n i +=1\n while j < len(right):\n l.append(right[j])\n j +=1\n \n return l\n\n\n\n \n\n \nl =[1,10, 31, 14]\n\nmerge_sort(l)","repo_name":"Onesco/algorithm-tutorials","sub_path":"merge_list.py","file_name":"merge_list.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74700829042","text":"# Задайте последовательность цифр.\n# Напишите программу, которая выведет список неповторяющихся элементов исходной последовательности.\n# Пример:\n# 47756688399943 -> [5]\n# 1113384455229 -> [8,9]\n# 1115566773322 -> []\n\n\nfrom random import randint\n\nmy_list = \"\".join(list(map(str, [randint(0, 9) for i in range(15)])))\ndef list_no_repetitions(line):\n new_list = []\n for item in line:\n if line.count(item) == 1:\n new_list.append(int(item))\n return new_list\n\nprint(f\"Неповторяющиеся элементы списка: {my_list} => {list_no_repetitions(my_list)}\")\n\n\n\n\n\n\n\n\n\n# from random import randint as rI\n# unique = {}\n# my_list = \"\".join(list(map(str, [rI(0, 9) for i in range(20)])))\n# print(my_list)\n#\n# for c in my_list:\n# if unique.get(c):\n# unique[c] = unique.get(c) + 1\n# else:\n# unique[c] = 1\n#\n# ulist = []\n#\n# for i in unique.items():\n# if i[1] == 1:\n# ulist.append(i[0])\n# print(ulist)\n\n\n\n\n\n\n\n\n\n","repo_name":"katerinapavlova13/Seminar4_dz_python","sub_path":"Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72214307761","text":"class Pago():\r\n def __init__(self, id=None, cedula_estudiante=None, metodo_pago_id=None, monto_id=None,\r\n fecha_pago=None, referencia_transferencia=None, ciclo=None):\r\n self.id = id\r\n self.cedula_estudiante = cedula_estudiante\r\n self.metodo_pago_id = metodo_pago_id\r\n self.monto_id = monto_id\r\n self.fecha_pago = fecha_pago\r\n self.referencia_transferencia = referencia_transferencia\r\n self.ciclo = ciclo\r\n \r\n def to_JSON(self):\r\n return {\r\n \"id\": self.id,\r\n \"estudiante\": self.cedula_estudiante, \r\n \"metodo_pago\": self.metodo_pago_id.to_JSON(),\r\n \"monto\": self.monto_id.to_JSON(),\r\n \"fecha_pago\": self.fecha_pago,\r\n \"referencia_transferencia\": self.referencia_transferencia,\r\n \"ciclo\": self.ciclo\r\n }\r\n","repo_name":"elicuralli/ADMINISTRACION","sub_path":"src/models/entities/pagos.py","file_name":"pagos.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27793431567","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 12:22:27 2020\n\n@author: Logan Rowe\n\nA string is said to be beautiful if each letter in the string appears at most\n as many times as the previous letter in the alphabet within the string; ie:\n b occurs no more times than a; c occurs no more times than b; etc.\n\nGiven a string, check whether it is beautiful.\n\nExample\n\nFor inputString = \"bbbaacdafe\", the output should be \nisBeautifulString(inputString) = true;\n\nThis string contains 3 as, 3 bs, 1 c, 1 d, 1 e, and 1 f \n(and 0 of every other letter), so since there aren't any letters that appear \nmore frequently than the previous letter, this string qualifies as beautiful.\n\nFor inputString = \"aabbb\", the output should be \nisBeautifulString(inputString) = false;\n\nSince there are more bs than as, this string is not beautiful.\n\nFor inputString = \"bbc\", the output should be \nisBeautifulString(inputString) = false.\n\nAlthough there are more bs than cs, this string is not beautiful because there \nare no as, so therefore there are more bs than as.\n\nInput/Output\n\n[execution time limit] 4 seconds (py3)\n\n[input] string inputString\n\nA string of lowercase English letters.\n\nGuaranteed constraints:\n3 ≤ inputString.length ≤ 50.\n\n[output] boolean\n\nReturn true if the string is beautiful, false otherwise.\n\"\"\"\n\n'''\nthoughts...\n\nthis was a poorly phrased problem because it did not state wheter the alphabet\nwas a continuous loop. i.e. whether 'aaz' is beautiful.\n\nthought process is annotated within the function below\n'''\n\nimport string\n\ndef isBeautifulString(inputString):\n #Use a dictionary to track how many times each letter is found in inputString\n letter_count={}\n for letter in string.ascii_lowercase:\n letter_count[letter]=0\n \n for letter in inputString:\n if letter_count[letter]==0:\n letter_count[letter]=inputString.count(letter)\n \n #Use another dictionary to see which letter precedes any given letter alphavetically\n alphabet=string.ascii_lowercase\n previous='z'+string.ascii_lowercase[:-1]\n previous_letter={alphabet[idx]:previous[idx] for idx in range(len(alphabet))}\n \n #check if the prevoius is in the word, it is in the word more times than the current letter \n for letter in set(inputString):\n if letter_count[letter]>letter_count[previous_letter[letter]] and letter!='a':\n return False\n \n return True\n \n\nif __name__=='__main__':\n inputStrings=[\"bbbaacdafe\",'bbc']\n for s in inputStrings: print(isBeautifulString(s))\n \n","repo_name":"LPRowe/coding-interview-practice","sub_path":"old_practice_problems/arcade/intro-problems/medium/isBeautifulString.py","file_name":"isBeautifulString.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"32818271022","text":"import os\nimport haoc\nimport re\nimport json\nfrom PySide2 import QtCore, QtWidgets, QtGui\nimport time\nimport datetime\n\nis_login = False\n\ndesktop_rect = QtGui.QGuiApplication.instance().desktop().availableGeometry()\n\n\ndef utc_to_local_stamp(t):\n\treturn time.mktime((t - datetime.timedelta(seconds=time.timezone)).timetuple())\n\n\nclass ObjectsHolder:\n\t# Protect objects from garbage collection\n\tobjects = []\n\n\t@classmethod\n\tdef add(cls, obj):\n\t\tcls.objects.append(obj)\n\n\t@classmethod\n\tdef remove(cls, obj):\n\t\tif obj in cls.objects:\n\t\t\tcls.objects.remove(obj)\n\n\tdef __init__(self):\n\t\tpass\n\n\ndef get_local_nodes_path():\n\treturn \"%s/data/%s/nodes\" % (get_root_path(), Config.get_ak())\n\n\ndef get_root_path():\n\treturn os.path.dirname(haoc.__file__).replace('\\\\', '/')\n\n\nclass Config:\n\tpath = get_root_path() + '/data/u_info.json'\n\taccess_key = None\n\n\tdef __init__(self, ak=\"\", sk=\"\", n=\"\"):\n\t\tself.ak = ak\n\t\tself.sk = sk\n\t\tself.name = n\n\n\t@staticmethod\n\tdef config_to_dict(config):\n\t\treturn {'ak': config.ak, 'sk': config.sk, 'name': config.name}\n\n\t@staticmethod\n\tdef dict_to_config(d):\n\t\tconfig = Config()\n\t\tconfig.ak = d['ak']\n\t\tconfig.sk = d['sk']\n\t\tconfig.name = d['name']\n\t\treturn config\n\n\tdef save(self):\n\t\twith open(Config.path, 'w') as f:\n\t\t\tjson.dump(self, f, default=Config.config_to_dict)\n\n\t@staticmethod\n\tdef read():\n\t\tif not os.path.exists(Config.path):\n\t\t\tif not os.path.exists(os.path.dirname(Config.path)):\n\t\t\t\tos.makedirs(os.path.dirname(Config.path))\n\t\t\tc = Config()\n\t\t\tc.save()\n\t\twith open(Config.path, 'r') as f:\n\t\t\tobj = json.load(f, object_hook=Config.dict_to_config)\n\t\t\treturn obj\n\n\t@staticmethod\n\tdef get_ak():\n\t\tif Config.access_key is None or Config.access_key == '':\n\t\t\tConfig.access_key = Config.read().ak\n\t\treturn Config.access_key\n\n\t@staticmethod\n\tdef get_sk():\n\t\treturn Config.read().sk\n\n\t@staticmethod\n\tdef get_name():\n\t\treturn Config.read().name\n\n\tdef __str__(self):\n\t\treturn \"name:%s, access_key:%s ,secret_key:%s\" % (self.name, self.ak, self.sk)\n\n\ndef show_message_box(parent, message):\n\tmsg_box = QtWidgets.QMessageBox()\n\tmsg_box.setText(message)\n\tmsg_box.setParent(parent, QtCore.Qt.Window)\n\tmsg_box.exec_()\n\n\ndef show_question_box(parent, message):\n\tmsg_box = QtWidgets.QMessageBox(parent)\n\tmsg_box.setText(message)\n\tmsg_box.addButton(QtWidgets.QMessageBox.Yes)\n\tmsg_box.addButton(QtWidgets.QMessageBox.No)\n\tmsg_box.addButton(QtWidgets.QMessageBox.Cancel)\n\tmsg_box.setEscapeButton(QtWidgets.QMessageBox.Cancel)\n\treturn msg_box.exec_()\n\n\ndef get_local_file_list(lis, root):\n\tfor f in os.listdir(root):\n\t\tfull_path = os.path.join(root, f)\n\t\tif os.path.isdir(full_path):\n\t\t\t# lis.append(full_path.replace('\\\\', '/'))\n\t\t\tget_local_file_list(lis, full_path)\n\t\telse:\n\t\t\tif full_path[-4:] == '.nod':\n\t\t\t\tlis.append(full_path.replace('\\\\', '/')[len(get_local_nodes_path())+1:])\n\n\nclass TreeItem(QtGui.QStandardItem):\n\tdef __init__(self, text, is_downloaded=True):\n\t\tQtGui.QStandardItem.__init__(self, text)\n\t\tself.is_downloaded = is_downloaded\n\t\tself.set_downloaded(is_downloaded)\n\n\tdef set_downloaded(self, tof):\n\t\tself.is_downloaded = tof\n\t\tfont = self.font()\n\t\tfont.setItalic(not tof)\n\t\tself.setFont(font)\n\n\ndef setup_tree_model(parent, path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\tfor f in os.listdir(path):\n\t\tfull_path = os.path.join(path, f)\n\t\tif os.path.isdir(full_path):\n\t\t\titem = TreeItem(os.path.basename(full_path) + \"/\")\n\t\t\tparent.appendRow(item)\n\t\t\tsetup_tree_model(item, full_path)\n\t\telse:\n\t\t\tif full_path[-4:] == '.nod':\n\t\t\t\tis_d = os.path.getsize(full_path) != 0\n\t\t\t\titem = TreeItem(os.path.basename(full_path)[:-4], is_downloaded=is_d)\n\t\t\t\tparent.appendRow(item)\n\n\ndef check_name_ok(s):\n\tpattern = re.compile(\"^/|//+|[^a-zA-Z0-9/\\s_\\-+,'!@#$%^&`~()\\[\\]]+\")\n\tres = pattern.findall(s)\n\treturn \" \".join(res)\n\n","repo_name":"gonnaflyzhang/HAOC","sub_path":"haoc/HaocUtils.py","file_name":"HaocUtils.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"73789293681","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom math import sqrt\nimport matplotlib.pyplot as plt\n\n###\n\nfrom pandas.plotting import register_matplotlib_converters\n\nregister_matplotlib_converters()\n\n\nclass PFSE:\n \"\"\"\n\n \"\"\"\n\n def __init__(self, assets_hist, weights, ret_column):\n \"\"\"\n\n :param assets:\n :param weights:\n\n \"\"\"\n self.assets = assets_hist\n self.weights = weights\n self.ret_col = ret_column\n\n def offset_date(self, t, asset_n, return_counter=False):\n \"\"\"\n\n :param t: How many days we want to span from today (<0 means we are going back)\n :param return_counter: In some cases it will be useful to know how many days the function\n will span to get a valid date.\n When this parameter is set to True, the function will return exactly this value,\n permitting us to define differences between business days more easily.\n For example, if we input a Sunday and set return_counter=True, the function will\n return 2, i.e. the span between Sunday date and Friday date\n (if Friday is not a holiday).\n\n :return: business day (if return_counter=False)\n :return: difference between date in t and last valid business date (if return_counter=True)\n\n \"\"\"\n\n d = dt.date.today() + dt.timedelta(days=t)\n\n counter = 0\n while True:\n if d.weekday() < 5 and d in self.assets[asset_n].index:\n break\n else:\n d = d - dt.timedelta(days=1)\n if return_counter is True:\n counter += 1\n continue\n\n if return_counter is True:\n return counter\n else:\n return d\n\n def pf_single_return(self, t, asset_n):\n \"\"\"\n\n :param t:\n :return:\n\n \"\"\"\n ret_list = []\n for i in self.assets:\n ret_list.append(i.loc[self.offset_date(t, asset_n), self.ret_col])\n try:\n ret = np.dot(np.array(ret_list), np.array(self.weights))\n except TypeError:\n ret_fixed = [float(i) for i in ret_list]\n ret = np.dot(np.array(ret_fixed), np.array(self.weights))\n\n return ret\n\n def pf_single_volatility(self, t, asset_n):\n \"\"\"\n\n :param t:\n :param asset_n:\n :return:\n\n \"\"\"\n\n ret_list = []\n for i in self.assets:\n ret_list.append(float(i.loc[self.offset_date(t, asset_n), self.ret_col]) ** 2)\n\n w_prod = [2 * w_i * w_j for w_i in self.weights for w_j in self.weights\n if self.weights.index(w_i) != self.weights.index(w_j)]\n\n ret_prod = [r_i * r_j for r_i in self.weights for r_j in self.weights\n if self.weights.index(r_i) != self.weights.index(r_j)]\n\n var = np.dot(np.array(ret_list), np.array(self.weights)) + np.dot(np.array(ret_prod), np.array(w_prod))\n\n return sqrt(var)\n\n def pf_returns(self):\n \"\"\"\n\n :return:\n\n \"\"\"\n ret = {\"Date\": [], \"Log Returns\": []}\n for i in range(100, 3600):\n ret[\"Date\"].append(self.offset_date(-i, 0))\n ret[\"Log Returns\"].append(self.pf_single_return(-i, 0))\n\n df = pd.DataFrame(ret)\n df.set_index(\"Date\", inplace=True)\n\n return df\n\n def pf_volatility(self):\n \"\"\"\n\n :return:\n\n \"\"\"\n\n vol = {\"Date\": [], \"Volatility\": []}\n for i in range(100, 3600):\n vol[\"Date\"].append(self.offset_date(-i, 0))\n vol[\"Volatility\"].append(self.pf_single_volatility(-i, 0))\n\n df = pd.DataFrame(vol)\n df.set_index(\"Date\", inplace=True)\n\n return df\n\n# if __name__ == \"__main__\":\n# ads = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\Master venv\\Data\\Equities\\ADS.DE.csv\",\n# parse_dates = [\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# ads.dropna(inplace=True)\n# ads.set_index(\"Date\", inplace=True)\n#\n# alv = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\ALV.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# alv.dropna(inplace=True)\n# alv.set_index(\"Date\", inplace=True)\n#\n# bas = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\BAS.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# bas.dropna(inplace=True)\n# bas.set_index(\"Date\", inplace=True)\n#\n# bayn = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\BAYN.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# bayn.dropna(inplace=True)\n# bayn.set_index(\"Date\", inplace=True)\n#\n# bmw = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\BMW.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# bmw.dropna(inplace=True)\n# bmw.set_index(\"Date\", inplace=True)\n#\n# dai = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\DAI.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# dai.dropna(inplace=True)\n# dai.set_index(\"Date\", inplace=True)\n#\n# dte = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\DTE.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# dte.dropna(inplace=True)\n# dte.set_index(\"Date\", inplace=True)\n#\n# sap = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\SAP.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# sap.dropna(inplace=True)\n# sap.set_index(\"Date\", inplace=True)\n#\n# sie = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\SIE.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# sie.dropna(inplace=True)\n# sie.set_index(\"Date\", inplace=True)\n#\n# vow3 = pd.read_csv(r\"C:\\Users\\alfa8\\Python Projects\\venv\\Data\\Equities\\VOW3.DE.csv\",\n# parse_dates=[\"Date\"])[[\"Date\", \"Close\", \"Log Returns\"]]\n# vow3.dropna(inplace=True)\n# vow3.set_index(\"Date\", inplace=True)\n#\n# TestBasket = PFSE([ads, alv, bas, bayn, bmw, dai, dte, sap, sie, vow3],\n# [1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10],\n# \"Log Returns\")\n#\n# print(TestBasket.pf_single_return(-50,0))\n# print(TestBasket.pf_single_volatility(-50,0))\n#\n# plt.figure(1)\n# plt.subplot(211)\n# plt.plot(TestBasket.pf_returns())\n# plt.subplot(212)\n# plt.plot(TestBasket.pf_volatility())\n#\n# plt.show()\n","repo_name":"emarepo/General-Volatility-Approaches","sub_path":"PortfolioSelectionEngine.py","file_name":"PortfolioSelectionEngine.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23126814345","text":"import torch\nimport numpy as np\nfrom mlproject.log import DevNullSummaryWriter\n\nfrom deploy.setup import Setup\nfrom mlproject import MLProject\nfrom tqdm import tqdm\nimport os\nfrom deploy.utils import *\nfrom tensorboardX import SummaryWriter\nfrom time import gmtime\nfrom time import strftime\n\nclass AttrProject(MLProject):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup = self.model\n self._init_writer(self.config)\n\n def _init_writer(self, config):\n \"\"\" set the writer \"\"\"\n tb_base = os.environ['TENSORBOARD_DIR'] if 'TENSORBOARD_DIR' in os.environ else None\n tb_dir = config.get(\"tensorboard_dir\", \"default\")\n if tb_base is None or tb_dir is None:\n self.writer = DevNullSummaryWriter()\n else:\n if tb_dir == \"default\":\n tb_dir = strftime(\"%m-%d/%H-%M-%S\", gmtime())\n self.writer = SummaryWriter(os.path.join(tb_base, tb_dir))\n\n def get_model(self, config: dict) -> Setup:\n \"\"\" really: build_setup \"\"\"\n return self.build_setup(config)\n\n def build_setup(self, config) -> Setup:\n raise NotImplementedError\n\n def get_setup(self) -> Setup:\n \"\"\" holder is a nicer name for self.model \"\"\"\n return self.model\n\n def train_single_sample(self, iters=100, idx=0):\n print(\"training on single data...\")\n data, label = self.single_datapoint(idx=idx)\n test = self.setup.test_batch((data, label))\n print(test)\n for i in tqdm(range(0,iters)):\n self.setup.optimizer.zero_grad()\n loss = self.setup.model.get_loss(data, label)\n loss.backward()\n self.setup.optimizer.step()\n test = self.setup.test_batch((data, label))\n print(test)\n\n def info_grad(self, layer: torch.nn.Module, input: torch.Tensor = None):\n if input is None:\n input, _ = next(self.dataset_factory.test_loader().__iter__())\n input = input.to(self.device)\n input = input[0].unsqueeze(0) # single element\n\n # forward + backward\n self.setup.model.zero_grad()\n out = self.setup.model(input)\n one_hot = torch.zeros(out.shape).to(self.device)\n one_hot[0, out.argmax(dim=1)] = 1\n out.backward(one_hot)\n\n # get info\n params = np.concatenate([to_np(p).flatten() for p in layer.parameters()])\n grads = np.concatenate([to_np(p.grad).flatten() for p in layer.parameters()])\n print(\"param dim\", params.shape)\n print(\"param std\", params.std())\n print(\"param mean\", params.mean())\n print(\"grad mean\", grads.mean())\n print(\"grad std\", grads.std())\n\n def test(self):\n print(\"testing...\")\n return super().test()\n\n def train(self):\n self.setup.loss.epoch = self.epoch\n return super().train()\n\n def visualize_info_dropout(self, layers):\n\n # pass thru anything\n self.data_example()\n\n for i, l in enumerate(layers):\n alpha = l.last_alpha\n hmap = alpha.mean(axis=(0,1))\n print(\"alpha: shape={}, max={}, min={}, std={:04f}, mean={}\".format(\n hmap.shape, hmap.max(), hmap.min(), hmap.std(), hmap.mean()))\n plt.imshow(hmap, cmap=\"Greys\")\n plt.title(\"alpha of {}\".format(i))\n plt.show()\n\n for i, l in enumerate(layers):\n kl = l.last_kls.mean(axis=(0,1))\n print(kl.shape)\n hmap = kl\n print(\"KL: shape={}, max={}, min={}, std={:04f}, mean={}\".format(\n hmap.shape, hmap.max(), hmap.min(), hmap.std(), hmap.mean()))\n plt.imshow(hmap, cmap=\"Greys\")\n plt.title(\"KL of {}\".format(i))\n plt.show()\n\n def single_batch(self, idx: int=0) -> (torch.Tensor, torch.Tensor):\n \"\"\" dim: BxCxHxW \"\"\"\n # TODO unneccessary iterating, jump would be better (however, itertools itslice doesnt work somehow)\n for i, (dat, labels) in enumerate(self.dataset_factory.test_loader().__iter__()):\n if i == idx:\n return dat.to(self.device), labels.to(self.device)\n raise RuntimeError\n\n def single_datapoint(self, idx: int=0) -> (torch.Tensor, torch.Tensor):\n \"\"\" dim: 1xCxHxW \"\"\"\n data, labels = self.single_batch(idx)\n return data[0].unsqueeze(0), labels[0].unsqueeze(0)\n\n def data_example(self, idx: int=0):\n data, labels = self.single_datapoint(idx)\n data = data.to(self.device)\n img = data[0].unsqueeze(0)\n show_img(img[0])\n self.setup.print_shapes = True\n out = self.setup.forward(img) # pass thru\n print(\"label: \", out.detach().cpu().numpy())\n self.model.model.print_shapes = False\n\n def data_info(self):\n print(\"train set\", len(self.dataset_factory.train_set()))\n print(\"train batch\", len(self.dataset_factory.train_loader()))\n print(\"test set\", len(self.dataset_factory.test_set()))\n print(\"test batch\", len(self.dataset_factory.test_loader()))\n\n def model_info(self):\n print(\"model family\", self.model.model.family())\n print(\"optimizer\", self.setup.optimizer)\n print(\"parameters\", len(list(self.setup.model.parameters())))\n","repo_name":"karl-schulz/attribution-experiments","sub_path":"deploy/attr_project.py","file_name":"attr_project.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27201803731","text":"import socket, sys, time\n\nTCP_IP = input(\"Connect to Local IP: \")\nTCP_PORT = int(input(\"Connect to Local Port: \"))\nBUFFER_SIZE = 1024\nrunning = True\n\ndef client(conn):\n while True:\n data = conn.recv(BUFFER_SIZE)\n if not data:\n break\n # broadcast\n for client in CLIENTS.values():\n client.send(data)\n\n # the connection is closed: unregister\n del CLIENTS[conn.fileno()]\n\ndef listener():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((TCP_IP,TCP_PORT))\n s.listen(5)\n while True:\n conn, addr = s.accept()\n # register client\n CLIENTS[conn.fileno()] = conn\n threading.Thread(target=client, args=(conn,)).start()\n\nif __name__ == '__main__':\n listener()\n","repo_name":"ottowong/DnD-App","sub_path":"clientTest.py","file_name":"clientTest.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38317704254","text":"import datetime\nimport fnmatch\nimport re\nfrom typing import List, Optional, Set\n\nimport pandas as pd\n\nfrom webviz_subsurface._abbreviations.reservoir_simulation import historical_vector\nfrom webviz_subsurface._providers import EnsembleSummaryProvider\nfrom webviz_subsurface._utils.simulation_timeseries import (\n set_simulation_line_shape_fallback,\n)\nfrom webviz_subsurface._utils.vector_selector import add_vector_to_vector_selector_data\n\n\nclass ProviderTimeSeriesDataModel:\n \"\"\"Class to process and and visualize ensemble timeseries\"\"\"\n\n def __init__(self, provider_set: dict, column_keys: Optional[list] = None) -> None:\n self._provider_set = provider_set\n self.line_shape_fallback = set_simulation_line_shape_fallback(\"linear\")\n all_vector_names = self._create_union_of_vector_names_from_providers(\n list(provider_set.values())\n )\n self._vector_names = (\n self.filter_vectorlist_on_column_keys(column_keys, all_vector_names)\n if column_keys is not None\n else all_vector_names\n )\n if not self._vector_names:\n raise ValueError(\"No vectors match the selected 'column_keys' criteria\")\n\n self._dates = self.all_dates()\n\n # add vectors to vector selector\n self.vector_selector_data: list = []\n for vector in self.get_non_historical_vector_names():\n add_vector_to_vector_selector_data(self.vector_selector_data, vector)\n\n @property\n def vectors(self) -> List[str]:\n return self._vector_names\n\n @property\n def dates(self) -> List[datetime.datetime]:\n return self._dates\n\n def get_non_historical_vector_names(self) -> list:\n return [\n vector\n for vector in self._vector_names\n if historical_vector(vector, None, False) not in self._vector_names\n ]\n\n def all_dates(self) -> List[datetime.datetime]:\n \"\"\"List with the union of dates among providers\"\"\"\n # TODO: Adjust when providers are updated!\n dates_union: Set[datetime.datetime] = set()\n for provider in list(self._provider_set.values()):\n _dates = set(provider.dates(None))\n dates_union.update(_dates)\n return list(sorted(dates_union))\n\n @staticmethod\n def _create_union_of_vector_names_from_providers(\n providers: List[EnsembleSummaryProvider],\n ) -> List[str]:\n \"\"\"Create list with the union of vector names among providers\"\"\"\n vector_names = []\n for provider in providers:\n vector_names.extend(\n provider.vector_names_filtered_by_value(\n exclude_all_values_zero=True, exclude_constant_values=True\n )\n )\n vector_names = list(sorted(set(vector_names)))\n return vector_names\n\n @staticmethod\n def filter_vectorlist_on_column_keys(\n column_key_list: list, vectorlist: list\n ) -> list:\n \"\"\"Filter vectors using list of unix shell wildcards\"\"\"\n try:\n regex = re.compile(\n \"|\".join([fnmatch.translate(col) for col in column_key_list]),\n flags=re.IGNORECASE,\n )\n return [v for v in vectorlist if regex.fullmatch(v)]\n except re.error:\n return []\n\n def get_historical_vector_df(\n self, vector: str, ensemble: str\n ) -> Optional[pd.DataFrame]:\n hist_vecname = historical_vector(vector, smry_meta=None)\n\n if hist_vecname and hist_vecname in self.vectors:\n provider = self._provider_set[ensemble]\n return provider.get_vectors_df(\n [hist_vecname], None, realizations=provider.realizations()[:1]\n ).rename(columns={hist_vecname: vector})\n return None\n\n def get_vector_df(\n self,\n ensemble: str,\n realizations: List[int],\n vectors: List[str],\n ) -> pd.DataFrame:\n provider = self._provider_set[ensemble]\n ens_vectors = [vec for vec in vectors if vec in provider.vector_names()]\n return provider.get_vectors_df(ens_vectors, None, realizations)\n\n def get_last_date(self, ensemble: str) -> str:\n return max(self._provider_set[ensemble].dates(None))\n","repo_name":"equinor/webviz-subsurface","sub_path":"webviz_subsurface/plugins/_property_statistics/models/ensemble_timeseries_datamodel.py","file_name":"ensemble_timeseries_datamodel.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"75"} +{"seq_id":"23110935193","text":"from gnuradio import (\n gr,\n blocks,\n digital,\n dtl,\n fft,\n filter,\n pdu,\n)\nimport pmt\n\n\nclass ofdm_adaptive_feedback_rx(gr.hier_block2):\n \"\"\"Adaptive OFDM feedback RX\n \"\"\"\n\n def __init__(self, threshold):\n gr.hier_block2.__init__(self, \"ofdm_adaptive_feedback_rx\",\n gr.io_signature(1, 1, gr.sizeof_gr_complex),\n gr.io_signature(0, 0, 0))\n\n self.message_port_register_hier_out(\"feedback_out\")\n\n self.sps = 2 # samples per symbol\n self.eb = 0.35 # excess bw\n self.nfilts = 32\n self.taps = filter.firdes.root_raised_cosine(self.nfilts, self.nfilts,\n 1.0, self.eb, 11*self.sps*self.nfilts)\n self.format = dtl.ofdm_adaptive_feedback_format(\n digital.packet_utils.default_access_code, threshold)\n self.constellation = digital.constellation_calcdist(digital.psk_2()[0], digital.psk_2()[1],\n 2, 1, digital.constellation.AMPLITUDE_NORMALIZATION).base()\n self.rxmod = digital.generic_mod(\n self.constellation, False, self.sps, True, self.eb, False, False)\n # digital.packet_utils.default_access_code bytes\n self.preamble = [0xac, 0xdd, 0xa4, 0xe2, 0xf2, 0x8c, 0x20, 0xfc]\n self.mark_delays = [0, 0, 34, 56, 87, 119]\n self.modulated_sync_word = digital.modulate_vector_bc(\n self.rxmod.to_basic_block(), self.preamble, [1])\n self.mark_delay = self.mark_delays[self.sps]\n\n self.parser = digital.protocol_parser_b(self.format)\n\n self.clock_sync = digital.pfb_clock_sync_ccf(\n self.sps, 6.28/400.0, self.taps, self.nfilts, float(self.nfilts/2), 1.5, 1)\n\n self.costas_loop = digital.costas_loop_cc(\n (6.28/200.0), self.constellation.arity(), False)\n self.sync_word_correlator = digital.corr_est_cc(\n self.modulated_sync_word, self.sps, self.mark_delay, 0.99, digital.THRESHOLD_ABSOLUTE)\n self.constellation_decoder = digital.constellation_decoder_cb(\n self.constellation)\n self.amp_est_multiplier = blocks.multiply_by_tag_value_cc(\"amp_est\", 1)\n\n self.connect((self, 0), (self.sync_word_correlator, 0))\n self.connect((self.sync_word_correlator, 0),\n (self.amp_est_multiplier, 0))\n self.connect((self.amp_est_multiplier, 0), (self.clock_sync, 0))\n self.connect((self.clock_sync, 0), (self.costas_loop, 0))\n self.connect((self.costas_loop, 0), (self.constellation_decoder, 0))\n self.connect((self.constellation_decoder, 0), (self.parser, 0))\n self.msg_connect(self.parser, \"info\", self, \"feedback_out\")\n\n # # self.connect((self.sync_word_correlator, 0), blocks.tag_debug(gr.sizeof_gr_complex, \"corr_est\", \"corr_est\"))\n # self.connect((self.sync_word_correlator, 0), blocks.file_sink(\n # gr.sizeof_gr_complex, \"debug/feedback-rx-after-corr.dat\"))\n\n # src = blocks.vector_source_c(self.modulated_sync_word)\n # self.connect(src, blocks.file_sink(\n # gr.sizeof_gr_complex, \"debug/feedback-rx-sync-word.dat\"))\n","repo_name":"mihaipstef/gr-dtl","sub_path":"python/dtl/ofdm_adaptive_feedback_rx.py","file_name":"ofdm_adaptive_feedback_rx.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"3808385509","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom data_loader import add_data_args\n\nfrom models import add_model_args, build_model\nfrom optimizers import add_optimizer_args\nfrom schedulers import add_scheduler_args\nfrom test_tube import HyperOptArgumentParser\nfrom testing import run_testing, setup_testing\nfrom torchnlp.random import set_seed\nfrom training import add_trainer_specific_args, setup_training\nfrom utils import get_main_args_from_yaml, load_yaml_args\n\nlog = logging.getLogger(\"Shell\")\nlogging.basicConfig(level=logging.INFO)\n\n\ndef run_training_pipeline(parser):\n parser.add_argument(\n \"-f\", \"--config\", default=False, type=str, help=\"Path to a YAML config file.\"\n )\n parser.add_argument(\n \"--optimizer\",\n default=False,\n type=str,\n help=\"Optimizer to be used during training.\",\n )\n parser.add_argument(\n \"--scheduler\",\n default=False,\n type=str,\n help=\"LR scheduler to be used during training.\",\n )\n parser.add_argument(\n \"--model\",\n default=False,\n type=str,\n help=\"The estimator architecture we we wish to use.\",\n )\n args, _ = parser.parse_known_args()\n\n if not args.optimizer and not args.scheduler and not args.model:\n optimizer, scheduler, model = get_main_args_from_yaml(args)\n else:\n optimizer = args.optimizer\n scheduler = args.scheduler\n model = args.model\n\n parser = add_optimizer_args(parser, optimizer)\n parser = add_scheduler_args(parser, scheduler)\n parser = add_model_args(parser, model)\n parser = add_trainer_specific_args(parser)\n hparams = load_yaml_args(parser=parser, log=log)\n\n set_seed(hparams.seed)\n model = build_model(hparams)\n trainer = setup_training(hparams)\n\n if hparams.load_weights:\n model.load_weights(hparams.load_weights)\n\n log.info(f\"{model.__class__.__name__} train starting:\")\n trainer.fit(model)\n\n\ndef run_testing_pipeline(parser):\n parser = add_data_args(parser)\n parser.add_argument(\n \"--checkpoint\", default=None, help=\"Checkpoint file path.\",\n )\n hparams = parser.parse_args()\n run_testing(hparams)\n\n\nif __name__ == \"__main__\":\n parser = HyperOptArgumentParser(\n strategy=\"random_search\", description=\"CAPTION project\", add_help=True\n )\n parser.add_argument(\n \"pipeline\", choices=[\"train\", \"test\"], help=\"train a model or test.\",\n )\n args, _ = parser.parse_known_args()\n if args.pipeline == \"test\":\n run_testing_pipeline(parser)\n else:\n run_training_pipeline(parser)\n","repo_name":"Unbabel/caption","sub_path":"caption/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"18837289084","text":"# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code curated for GCPNet (https://github.com/BioinfoMachineLearning/GCPNet):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\nimport os\nfrom pathlib import Path\n\nimport pytest\nimport torch\n\nfrom src.datamodules.atom3d_datamodule import ATOM3DDataModule\n\ntest_cases = [\n (\"LBA\", 32),\n (\"PSR\", 32)\n]\n\n\n@pytest.mark.parametrize(\"task,batch_size\", test_cases)\ndef test_atom3d_datamodule(task: str, batch_size: int):\n # note: append \"..\" to the front of paths when testing in the `tests` dir\n data_dir = os.path.join(\"data\", \"ATOM3D\")\n\n dm = ATOM3DDataModule(\n task=task,\n data_dir=data_dir,\n lba_split=30,\n edge_cutoff=4.5,\n max_neighbors=32,\n max_units=0,\n unit=\"edge\",\n batch_size=batch_size,\n num_workers=2\n )\n dm.prepare_data()\n\n assert not dm.data_train and not dm.data_val and not dm.data_test\n assert Path(data_dir, task).exists()\n\n dm.setup()\n assert dm.data_train and dm.data_val and dm.data_test\n assert dm.train_dataloader() and dm.val_dataloader() and dm.test_dataloader()\n\n num_datapoints = len(dm.data_train) + len(dm.data_val) + len(dm.data_test)\n if task == \"LBA\":\n assert num_datapoints == 4_463\n if task == \"PSR\":\n assert num_datapoints == 44_214\n\n batch = next(iter(dm.train_dataloader()))\n x, y = (batch, batch.num_graphs)\n assert len(x) == batch_size\n assert y == batch_size\n assert x.h.dtype == torch.int64\n assert all([f.dtype == torch.float32 for f in [x.chi, x.e, x.xi, x.x]])\n assert x.edge_index.dtype == torch.int64\n assert type(y) == int\n","repo_name":"BioinfoMachineLearning/GCPNet","sub_path":"tests/test_atom3d_datamodule.py","file_name":"test_atom3d_datamodule.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"75"} +{"seq_id":"72525368242","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom FourMomentaConstructor import Particle\n\n\n# define a function to calculate the angle between two particles, given their four-momenta\ndef angle(p1, p2):\n angle = np.arccos((p1[0] * p2[0] + p1[1] * p2[1] + p1[2] * p2[2]) / (\n np.sqrt(p1[0] ** 2 + p1[1] ** 2 + p1[2] ** 2) * np.sqrt(p2[0] ** 2 + p2[1] ** 2 + p2[2] ** 2)))\n return angle\n\n\n# calculate one particle's cartesian momentum from pT, eta, phi and m\nclass CartesianParticle:\n def __init__(self, pT: float, eta: float, phi: float, m: float, E=None):\n self.px = pT * np.cos(phi)\n self.py = pT * np.sin(phi)\n self.pz = pT / np.tan(2 * np.arctan(np.exp(-eta)))\n self.p = np.sqrt(self.px ** 2 + self.py ** 2 + self.pz ** 2)\n self.E = np.sqrt(self.p ** 2 + m ** 2) if E is None else E\n\n\n# from the four-momenta of the two daughter particles, calculate their parent particle's four-momenta\ndef parent(p1: CartesianParticle, p2: CartesianParticle, m1: float, m2: float, m: float):\n p = np.sqrt((p1.p + p2.p) ** 2 - (p1.px + p2.px) ** 2 - (p1.py + p2.py) ** 2 - (p1.pz + p2.pz) ** 2)\n E = np.sqrt(p ** 2 + m ** 2)\n px = p1.px + p2.px\n py = p1.py + p2.py\n pz = p1.pz + p2.pz\n return CartesianParticle(px, py, pz, m, E)\n\n","repo_name":"ivanfei-1/DelphesDataAlanysis","sub_path":"AngleObservableCalculator.py","file_name":"AngleObservableCalculator.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12273939324","text":"import typing as tp\nimport warnings\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom os import system\nfrom itertools import combinations\n\nimport autograd.numpy as np\nfrom tqdm.auto import tqdm\nfrom numpy.linalg import lstsq\n\nfrom .layer import (\n BaseLayer,\n Dense,\n FuzzyGMDHLayer,\n GMDHLayer,\n NeoFuzzyLayer,\n WeightsParser,\n Fuzzify,\n)\nfrom .functions import (\n GaussianMembership,\n GaussianRBF,\n ReLU,\n Tanh,\n Sigmoid,\n Linear,\n BellMembership,\n TriangularMembership,\n)\nfrom .optimisers import BaseOptimizer, SGDOptimizer\nfrom .utils import gen_batch, step_simplex, check_numerical_stability\n\n\nclass FFN(object):\n \"\"\"\n Feed Forward Network\n --------------------\n \"\"\"\n\n def __init__(\n self,\n input_shape: tp.Tuple[int],\n layer_specs: tp.List[BaseLayer],\n loss: tp.Callable[..., np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"\n Constructor method\n\n Parameters\n ----------\n\n :param input_shape: Input shape.\n :type input_shape: tuple\n\n :param layer_specs: List containing layers.\n :type layer_specs: list\n\n :param loss: Loss function.\n :type mode: callable\n \"\"\"\n self.parser = WeightsParser()\n self.regularization = kwargs.get(\"regularization\", \"l2\")\n self.reg_coef = kwargs.get(\"reg_coef\", 0)\n self.layer_specs = layer_specs\n cur_shape = input_shape\n W_vect = np.array([])\n for num, layer in enumerate(self.layer_specs):\n layer.number = num\n N_weights, cur_shape = layer.build_weights_dict(cur_shape)\n self.parser.add_weights(str(layer), (N_weights,))\n W_vect = np.append(W_vect, layer.initializer(size=(N_weights,)))\n self._loss = loss\n self.W_vect = 0.1 * W_vect\n\n def loss(\n self, W_vect: np.ndarray, X: np.ndarray, y: np.ndarray, omit_reg: bool = False\n ) -> np.ndarray:\n \"\"\"\n Loss function constructor\n\n Parameters\n ----------\n\n :W_vect: Network weights vector.\n :type W_vect: np.ndarray\n\n :X: Input vector.\n :type X: np.ndarray\n\n :y: Desired network response.\n :type y: np.ndarray\n\n :omit_reg: Omit regularization flag. Default is `False`\n :type omit_reg: bool\n \"\"\"\n if self.regularization == \"l2\" and not omit_reg:\n reg = np.power(np.linalg.norm(W_vect, 2), 2)\n elif self.regularization == \"l1\" and not omit_reg:\n reg = np.linalg.norm(W_vect, 1)\n else:\n reg = 0.0\n return self._loss(self._predict(W_vect, X), y) + self.reg_coef * reg\n\n def predict(self, inputs: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict method\n\n Parameters\n ----------\n\n :param inputs: Input vector.\n :type inputs: np.ndarray\n\n Returns\n -------\n\n :returns: Network response.\n :rtype: np.ndarray\n \"\"\"\n return self._predict(self.W_vect, inputs)\n\n def _predict(self, W_vect: np.ndarray, inputs: np.ndarray) -> np.ndarray:\n cur_units = inputs\n for layer in self.layer_specs:\n cur_weights = self.parser.get(W_vect, str(layer))\n cur_units = layer.forward(cur_units, cur_weights)\n return cur_units\n\n def eval(self, input: np.ndarray, output: np.ndarray) -> float:\n \"\"\"\n Evaluate network on given input\n\n Parameters\n ----------\n :param inputs: Input vector.\n :type inputs: np.ndarray\n\n :param output: Desired output\n :type output: np.ndarray\n\n Returns\n -------\n :returns: Loss value\n :rtype: float\n \"\"\"\n return self.loss(self.W_vect, input, output, omit_reg=True)\n\n def frac_err(self, X, T):\n return np.mean(\n np.argmax(T, axis=1) != np.argmax(self.predict(self.W_vect, X), axis=1)\n )\n\n def fit(\n self,\n optimiser: BaseOptimizer,\n train_sample: tp.Tuple[np.ndarray],\n validation_sample: tp.Tuple[np.ndarray],\n batch_size: int,\n epochs: tp.Optional[int] = None,\n verbose: tp.Optional[bool] = None,\n load_best_model_on_end: bool = True,\n minimize_metric: bool = True,\n ):\n \"\"\"\n Fit network on given input\n\n Parameters\n ----------\n :optimiser: Algorithm to use for minimuzing loss.\n :type optimiser: BaseOptimiser\n\n :param train_sample: Train pair (X, y).\n :type train_sample: tuple\n\n :param validation_sample: Validation pair (X, y).\n :type validation_sample: tuple\n\n :batch_size: Batch size.\n :type batch_size: int\n\n :epochs: Number of epochs.\n :type epochs: int\n\n Returns\n -------\n :returns: Tuple (trained_model, loss_history)\n :rtype: union[FFN, dict]\n \"\"\"\n self._optimiser = optimiser\n\n verbose = verbose if verbose else False\n epochs = epochs if epochs else 1\n\n inst = None\n best_inst = None\n best_score = np.inf if minimize_metric else -np.inf\n best_epoch = 0\n\n history = dict(epoch=[], train_loss=[], validation_loss=[])\n\n for i in tqdm(range(epochs), desc=\"Training \"):\n\n tr_accum_loss = []\n tr_loss = np.inf\n to_stop = False\n\n for (X, y) in gen_batch(train_sample, batch_size):\n to_stop, inst, tr_loss = self._optimiser.apply(\n self.loss, X, y, self.W_vect, verbose=verbose\n )\n self.W_vect = inst\n tr_accum_loss.append(tr_loss)\n\n tr_accum_loss = np.mean(tr_accum_loss)\n\n history[\"epoch\"].append(i)\n history[\"train_loss\"].append(tr_accum_loss)\n\n val_loss = self.eval(*validation_sample)[0]\n history[\"validation_loss\"].append(val_loss)\n\n if minimize_metric and val_loss < best_score:\n best_score = val_loss\n best_inst = deepcopy(self.W_vect)\n best_epoch = i\n elif (not minimize_metric) and val_loss > best_score:\n best_score = val_loss\n best_inst = deepcopy(self.W_vect)\n best_epoch = i\n else:\n pass\n\n if verbose:\n print(f\"validation loss - {val_loss}\")\n if to_stop:\n break\n\n if load_best_model_on_end:\n self.W_vect = best_inst\n if verbose:\n print(f\"best validation loss - {best_score}\")\n print(f\"best epoch - {best_epoch}\")\n\n return self, history\n\n\nclass GMDH(object):\n \"\"\"\n Group Method of Data Handling\n -----------------------------\n \"\"\"\n\n def __init__(\n self,\n method_type: str,\n loss: tp.Callable[..., np.ndarray],\n **kwargs,\n ) -> None:\n \"\"\"\n Constructor method\n\n Parameters\n ----------\n\n :param method_type: Type of algorithm, `fuzzy`, `neo_fuzzy` or `crisp`.\n :type method_type: str\n\n :param poli_type: Type of polinome, `linear`, 'quadratiic` or `partial_quadratic`.\n :type poli_type: str\n\n :param loss: Loss function.\n :type mode: callable\n \"\"\"\n self.parser = WeightsParser()\n self._method_type = method_type\n self._poli_type = kwargs.get(\"poli_type\", \"linear\")\n self._num_rules = kwargs.get(\"num_rules\", 3)\n self._confidence = kwargs.get(\"confidence\", 0.8)\n self._num_sgd_rounds = kwargs.get(\"num_sgd_rounds\", 50)\n self.layer_specs = self._construct_initial()\n cur_shape = 2\n W_vect = np.array([])\n for num, layer in enumerate(self.layer_specs):\n layer.number = num\n N_weights, cur_shape = layer.build_weights_dict(cur_shape)\n self.parser.add_weights(str(layer), (N_weights,))\n W_vect = np.append(W_vect, np.abs(layer.initializer(size=(N_weights,))))\n self._loss = loss\n self.W_vect = 0.1 * W_vect\n\n self.predict_history = dict(pairs=[], weights=[])\n\n def _construct_initial(self):\n if self._method_type == \"crisp\":\n return [GMDHLayer(poli_type=self._poli_type)]\n elif self._method_type == \"neo_fuzzy\":\n return [NeoFuzzyLayer(num_rules=self._num_rules, msf=TriangularMembership)]\n elif self._method_type == \"fuzzy\":\n return [\n FuzzyGMDHLayer(\n poli_type=self._poli_type,\n msf=BellMembership,\n confidence=self._confidence,\n return_defuzzify=True,\n )\n ]\n else:\n raise ValueError(f\"{self._method_type} is invalid value for `method_type`\")\n\n def predict_one(self, inputs: np.ndarray, return_ful: bool = False) -> np.ndarray:\n \"\"\"\n Predict one gmdh submodule method\n\n Parameters\n ----------\n\n :param inputs: Input vector.\n :type inputs: np.ndarray\n\n Returns\n -------\n\n :returns: Network response.\n :rtype: np.ndarray\n \"\"\"\n if return_ful:\n return self._predict(self.W_vect, inputs)\n return self._predict(self.W_vect, inputs)[0]\n\n def predict(self, inputs: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict all gmdh path\n\n Parameters\n ----------\n\n :param inputs: Input vector.\n :type inputs: np.ndarray\n\n Returns\n -------\n\n :returns: Network response.\n :rtype: np.ndarray\n \"\"\"\n n_layers = len(self.predict_history[\"weights\"])\n is_fuzzy = self._method_type == \"fuzzy\"\n\n current_inputs = deepcopy(inputs)\n for r in range(n_layers):\n n_pairs = len(self.predict_history[\"pairs\"][r])\n pairs = self.predict_history[\"pairs\"][r]\n weights = self.predict_history[\"weights\"][r]\n\n layer_preds = []\n for k in range(n_pairs):\n temp_pred = self._predict(weights[k], current_inputs[:, pairs[k]])\n if is_fuzzy:\n # Exclude believe inrevals\n layer_preds.append(temp_pred[0])\n else:\n layer_preds.append(temp_pred)\n\n current_inputs = np.concatenate(layer_preds, axis=-1)\n\n # Take best, which is first\n return current_inputs[:, 0][..., np.newaxis]\n\n def loss(self, W_vect: np.ndarray, X: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"\n Loss function constructor\n\n Parameters\n ----------\n\n :W_vect: Network weights vector.\n :type W_vect: np.ndarray\n\n :X: Input vector.\n :type X: np.ndarray\n\n :y: Desired network response.\n :type y: np.ndarray\n\n :omit_reg: Omit regularization flag. Default is `False`\n :type omit_reg: bool\n \"\"\"\n return self._loss(self.layer_specs[0].forward(X, W_vect), y)\n\n def _predict(self, W_vect: np.ndarray, inputs: np.ndarray) -> np.ndarray:\n cur_units = inputs\n for layer in self.layer_specs:\n cur_weights = self.parser.get(W_vect, str(layer))\n cur_units = layer.forward(cur_units, cur_weights)\n return cur_units\n\n def frac_err(self, X, T):\n return np.mean(\n np.argmax(T, axis=1) != np.argmax(self.predict(self.W_vect, X), axis=1)\n )\n\n def fit_simplex(self, train_sample: tp.Tuple[np.ndarray]):\n X_train, y_train = train_sample\n weights, margin, absolute, inputs = self.layer_specs[0].forward(\n X_train, self.W_vect, True\n )\n w = step_simplex(weights, margin, absolute, inputs, y_train)\n self.W_vect = np.array(w).reshape(self.W_vect.shape)\n return self, None\n\n def fit_lstsq(self, train_sample: tp.Tuple[np.ndarray]):\n X_train, y_train = train_sample\n # Get grouped X_train\n X_train_tr = self.layer_specs[0].forward(X_train, self.W_vect, True)\n # Add bias\n X_train_tr = np.concatenate(\n (X_train_tr[:, 0, :], np.ones((X_train_tr.shape[0], 1))), axis=-1\n )\n # Compute LSTSQ\n self.W_vect = lstsq(X_train_tr, y_train[:, 0])[0]\n return self, None\n\n def fit_sgd(self, train_sample: tp.Tuple[np.ndarray]):\n X_train, y_train = train_sample\n W_vect = np.random.default_rng(42).normal(size=self.W_vect.shape)\n optimiser = SGDOptimizer()\n for iter in range(self._num_sgd_rounds):\n to_stop, inst, _ = optimiser.apply(self.loss, X_train, y_train, W_vect)\n self.W_vect = inst\n if to_stop:\n break\n\n return self, None\n\n def one_fit(\n self,\n train_sample: tp.Tuple[np.ndarray],\n validation_sample: tp.Tuple[np.ndarray],\n pair: tp.Tuple[int, int],\n ):\n if self._method_type == \"fuzzy\":\n self.fit_simplex((train_sample[0][:, pair], train_sample[1]))\n elif self._method_type == \"neo_fuzzy\":\n self.fit_sgd((train_sample[0][:, pair], train_sample[1]))\n else:\n self.fit_lstsq((train_sample[0][:, pair], train_sample[1]))\n\n prediction_val = self.predict_one(\n validation_sample[0][:, pair], not (self._method_type == \"fuzzy\")\n )\n prediction_train = self.predict_one(\n train_sample[0][:, pair], not (self._method_type == \"fuzzy\")\n )\n\n metric_val = self._loss(prediction_val, validation_sample[1])[0]\n metric_train = self._loss(prediction_train, train_sample[1])[0]\n\n if check_numerical_stability(prediction_train) or check_numerical_stability(\n prediction_val\n ):\n return None, None, None, None, True\n else:\n return metric_val, metric_train, prediction_val, prediction_train, False\n\n def fit(\n self,\n train_sample: tp.Tuple[np.ndarray],\n validation_sample: tp.Tuple[np.ndarray],\n max_gmdh_layers: int,\n n_best_to_take: int,\n batch_size: tp.Optional[int] = None,\n minimize_metric: bool = True,\n verbose: tp.Optional[bool] = None,\n ):\n \"\"\"\n Fit network on given input\n\n Parameters\n ----------\n :param train_sample: Train pair (X, y).\n :type train_sample: tuple\n\n :param validation_sample: Validation pair (X, y).\n :type validation_sample: tuple\n\n :max_gmdh_layers: Maximum number of GMDH layers.\n :type max_gmdh_layers: int\n\n :n_best_to_take: Number of best GMDH outputs that go to the next layer.\n :type n_best_to_take: int\n\n :n_best_to_take: Number of best GMDH outputs that go to the next layer.\n :type n_best_to_take: int\n\n :batch_size: If we have `long` data we can optimize it by batches.\n :type batch_size: int\n\n :minimize_metric: Whether we minimize target metric.\n :type minimize_metric: bool\n\n :verbose: Whether we turn on verbosity.\n :type verbose: bool\n\n Returns\n -------\n :returns: Tuple (trained_model, loss_history, best_test_pred, best_train_pred)\n :rtype: union[FFN, dict, np.array, np.array]\n \"\"\"\n verbose = verbose if verbose else False\n\n all_possible_pairs = list(combinations(range(train_sample[0].shape[1]), 2))\n\n overall_best_metric = np.inf if minimize_metric else -np.inf\n best_test_pred = None\n best_train_pred = None\n\n history = dict(layer=[], train_loss=[], validation_loss=[])\n\n for r in tqdm(range(max_gmdh_layers), desc=\"Training \"):\n\n layer_metrics = []\n layer_metrics_train = []\n layer_val_preds = []\n layer_train_preds = []\n\n history_weights = []\n histoty_pairs = []\n\n for pair in tqdm(all_possible_pairs, desc=\"One fit\"):\n\n if batch_size is not None and train_sample[0].shape[1] < batch_size:\n\n for (X, y) in gen_batch(train_sample, batch_size):\n\n (\n metric_val,\n metric_train,\n prediction_val,\n prediction_train,\n stop_outer_loop,\n ) = self.one_fit(\n train_sample=(X, y),\n validation_sample=validation_sample,\n pair=pair,\n )\n\n if stop_outer_loop:\n break\n\n # Exclude not full batch\n if prediction_train.shape[0] != batch_size:\n break\n\n layer_metrics.append(metric_val)\n layer_metrics_train.append(metric_train)\n layer_val_preds.append(prediction_val)\n layer_train_preds.append(prediction_train)\n\n history_weights.append(self.W_vect)\n histoty_pairs.append(pair)\n\n if stop_outer_loop:\n break\n else:\n\n (\n metric_val,\n metric_train,\n prediction_val,\n prediction_train,\n stop_outer_loop,\n ) = self.one_fit(\n train_sample=train_sample,\n validation_sample=validation_sample,\n pair=pair,\n )\n\n if stop_outer_loop:\n break\n\n layer_metrics.append(metric_val)\n layer_metrics_train.append(metric_train)\n layer_val_preds.append(prediction_val)\n layer_train_preds.append(prediction_train)\n\n history_weights.append(self.W_vect)\n histoty_pairs.append(pair)\n\n if stop_outer_loop:\n warnings.warn(\"Something gone wrong in optimization\")\n break\n\n layer_metrics = np.array(layer_metrics)\n layer_metrics_train = np.array(layer_metrics_train)\n layer_val_preds = np.concatenate(layer_val_preds, axis=-1)\n layer_train_preds = np.concatenate(layer_train_preds, axis=-1)\n\n if minimize_metric:\n sorted_indices = np.argsort(layer_metrics)\n else:\n sorted_indices = np.argsort(-layer_metrics)\n\n best_metric = layer_metrics[sorted_indices[0]]\n history[\"validation_loss\"].append(best_metric)\n history[\"train_loss\"].append(layer_metrics_train[sorted_indices[0]])\n\n layer_val_preds = layer_val_preds[:, sorted_indices]\n validation_sample = (\n layer_val_preds[:, :n_best_to_take],\n validation_sample[1],\n )\n\n layer_train_preds = layer_train_preds[:, sorted_indices]\n train_sample = (layer_train_preds[:, :n_best_to_take], train_sample[1])\n\n all_possible_pairs = list(combinations(range(train_sample[0].shape[1]), 2))\n\n if verbose:\n print(f\"Layer: {r}. Metric: {best_metric}\")\n\n if minimize_metric and best_metric < overall_best_metric:\n overall_best_metric = best_metric\n best_test_pred = layer_val_preds[:, 0][..., np.newaxis]\n best_train_pred = layer_train_preds[:, 0][..., np.newaxis]\n\n self.predict_history[\"pairs\"].append(\n [histoty_pairs[i] for i in sorted_indices[:n_best_to_take]]\n )\n self.predict_history[\"weights\"].append(\n [history_weights[i] for i in sorted_indices[:n_best_to_take]]\n )\n elif (not minimize_metric) and best_metric > overall_best_metric:\n overall_best_metric = best_metric\n best_test_pred = layer_val_preds[:, 0][..., np.newaxis]\n best_train_pred = layer_train_preds[:, 0][..., np.newaxis]\n\n self.predict_history[\"pairs\"].append(\n [histoty_pairs[i] for i in sorted_indices[:n_best_to_take]]\n )\n self.predict_history[\"weights\"].append(\n [history_weights[i] for i in sorted_indices[:n_best_to_take]]\n )\n else:\n break\n\n return self, history\n","repo_name":"tupoylogin/Neural_Net_Genetic_Alg","sub_path":"citk/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"70743538162","text":"import random\r\n\r\nfrom panda3d.core import loadPrcFile\r\nfrom panda3d.core import *\r\nloadPrcFile(\"config/config.prc\")\r\nfrom direct.showbase.ShowBase import ShowBase\r\nfrom direct.task import Task\r\nimport os, sys\r\nimport math\r\n\r\nclass MyGame(ShowBase):\r\n\r\n def __init__(self):\r\n ShowBase.__init__(self)\r\n\r\n # Get the location of the 'py' file I'm running:\r\n self.mydir = os.path.abspath(sys.path[0])\r\n # convert to panda's specific notation\r\n self.mydir = Filename.fromOsSpecific(self.mydir).getFullpath()\r\n\r\n # load skull obj\r\n self.skullModel = self.loader.loadModel(self.mydir + \"/skull.obj\")\r\n self.skullModel.setPos(5, 0, -5)\r\n self.skullModel.reparentTo(self.render)\r\n\r\n # move camera\r\n self.disableMouse()\r\n self.camera.setPos(0, -30, 0)\r\n\r\n # scale down the skull\r\n self.skullModel.setScale(0.35, 0.35, 0.35)\r\n\r\n # add task to be executed\r\n self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\")\r\n\r\n # add ambient light\r\n self.alight = AmbientLight(\"alight\")\r\n self.alight.setColor((0.8, 0.8, 0.8, 1))\r\n self.alnp = self.render.attachNewNode(self.alight)\r\n self.render.setLight(self.alnp)\r\n\r\n # load panda obj\r\n self.pandaModel = self.loader.loadModel(\"models/panda\")\r\n self.pandaModel.setPos(0.0, 0.0, 30.0)\r\n self.pandaModel.setP(90)\r\n self.pandaModel.setScale(.6, .6, .6)\r\n self.pandaModel.reparentTo(self.skullModel)\r\n self.pandaOrientation = 0\r\n\r\n # addd task to rotate panda\r\n self.taskMgr.add(self.spinPandaTask, \"SpinPandaTask\")\r\n\r\n # add taskt o cahnge radius\r\n self.taskMgr.add(self.moveTask, \"MoveTask\")\r\n\r\n\r\n def spinCameraTask(self, task):\r\n self.cameraRadius = 30.0\r\n angleDegrees = task.time * 20.0\r\n angleRadians = angleDegrees * (math.pi / 180)\r\n\r\n self.camera.setPos(\r\n self.cameraRadius * math.sin(angleRadians),\r\n -self.cameraRadius * math.cos(angleRadians), \r\n 0)\r\n\r\n self.camera.lookAt(0.0, 0.0, 0.0)\r\n \r\n return Task.cont\r\n\r\n\r\n def spinPandaTask(self, task):\r\n self.pandaRadius = 10.0\r\n angleDegrees = task.time * 90.0\r\n angleRadians = angleDegrees * (math.pi / 180)\r\n\r\n self.pandaModel.setPos(\r\n self.pandaRadius * math.sin(angleRadians),\r\n self.pandaRadius * math.cos(angleRadians),\r\n 30.0\r\n )\r\n \r\n self.pandaOrientation = angleDegrees - 45\r\n self.pandaModel.setH(-self.pandaOrientation)\r\n\r\n return Task.cont\r\n\r\n\r\n def moveTask(self, task):\r\n isDown = base.mouseWatcherNode.isButtonDown\r\n\r\n if isDown(KeyboardButton.asciiKey(\"+\")):\r\n self.cameraRadius += 1\r\n\r\n if isDown(KeyboardButton.asciiKey(\"-\")):\r\n self.cameraRadius -= 1\r\n\r\n return task.cont\r\n\r\n\r\n# create an object for the game and run it\r\nif __name__ == \"__main__\":\r\n game = MyGame()\r\n game.run()","repo_name":"D1scak3/CV_labs","sub_path":"lab5/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70449148722","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n i = 1\n for num in nums:\n if (target-num) in nums[i:]:\n if num == (target-num):\n index2 = nums[i:].index(target-num)+i\n else:\n index2 = nums.index(target-num)\n return [nums.index(num), index2]\n i = i + 1","repo_name":"zhaoqqi/leetcode","sub_path":"algorithms/python/twoSum/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10311398645","text":"from services.handlers.handlers_main import handle_search\n\ndef resolve_similarity_search(obj, info, useremail, apikey, input, tags, musthavememo, allowusersearch):\n print(\"resolve_similarity_search\")\n print(\"tags: \", tags)\n # auth\n # search\n result_set = handle_search(useremail, input, tags, musthavememo, allowusersearch) #problem_id, problem, user_search, has_memo, favorite, distance, [tag_id, tag_name, description], [link], sim\n payload = []\n for problem_id, problem, user_search, has_memo, favorite, distance, tags, links, sim in result_set:\n tags_arr = []\n links_arr = []\n for tag_id, tag_name, tag_description in tags:\n tag = {\n \"id\": tag_id,\n \"name\": tag_name,\n \"description\": tag_description\n }\n tags_arr.append(tag)\n\n equation = {\n \"id\": problem_id,\n \"latex\": problem,\n \"tags\": tags_arr,\n \"mathml\": \"String\",\n \"memolinks\": links,\n \"favorite\": favorite,\n \"issearch\": user_search\n }\n equation_similarity = {\n \"equation\": equation,\n \"similarity\": sim,\n \"distance\": distance\n }\n payload.append(equation_similarity)\n\n search_result = {\n \"success\": True,\n \"msg\": \"\",\n \"numberofresults\":len(result_set),\n \"equations\": payload\n }\n return search_result","repo_name":"COS301-SE-2022/MathU-Similarity-Index","sub_path":"server/api/queries/resolve_similarity_search.py","file_name":"resolve_similarity_search.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"17137626105","text":"import csv\nfrom django.core.management.base import BaseCommand, CommandError\nfrom learn.models import Answer_list ,Quiz\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('csv_file', nargs='+', type=str)\n\n def handle(self, *args, **options):\n Answer_list.objects.all().delete()\n for csv_file in options['csv_file']:\n dataReader = csv.reader(open(csv_file), delimiter=',', quotechar='\"')\n header = next(dataReader)\n for row in dataReader:\n print(row[0],row[1],row[2],row[3])\n Answer_list.objects.create(\n pk = row[0],\n question_id=Quiz.objects.get(pk=int(row[1])) ,\n answer= row[2],\n definition=row[3],\n\n )\n # self.stdout.write(\n # 'Created answer {} {}'.format(Answer_list.pk,Answer_list.answer)\n # )\n","repo_name":"Alich13/Django_DLAD","sub_path":"learn/management/commands/import_answers_csv.py","file_name":"import_answers_csv.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14066219152","text":"# link: https://leetcode.com/problems/minimum-average-difference/\n\nclass Solution:\n def minimumAverageDifference(self, nums: List[int]) -> int:\n leftSum = 0\n rightSum = sum(nums)\n n = len(nums)\n minimum = float('inf')\n index = -1\n\n for i in range(n):\n leftSum += nums[i]\n rightSum -= nums[i]\n if i != n-1:\n diff = abs(leftSum // (i+1) - rightSum // (n-(i+1)))\n else:\n diff = abs(leftSum // (i+1))\n\n if diff < minimum:\n minimum = diff\n index = i\n\n\n return index\n\n","repo_name":"rbrn1999/leetcode-sol","sub_path":"problems/2256. Minimum Average Difference.py","file_name":"2256. Minimum Average Difference.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2025569329","text":"# Atividade\n# API da Sportmonks:\n# Configurar e extrair dados times\n# Banco de Dados:\n# Criar tabelas: Times (ID e nome)\n\nimport time\n\nimport openpyxl\nimport pandas as pd\nimport pyodbc\nimport requests\nfrom pandas import json_normalize\n\n# Parâmetros da conexão\nSERVER = 'DESKTOP-295VC9D'\nDATABASE = 'SPORTMONKS'\nUSERNAME = 'usuario'\nPASSWORD = 'senha'\n\n# Conexão com o SQL\nconnection_string = f'DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={SERVER};DATABASE={DATABASE};Trusted_Connection=yes'\n\nconn = pyodbc.connect(connection_string)\ncursor = conn.cursor()\n\n\n# Conexão e Extração dos dados da API\n\nURL = 'https://api.sportmonks.com/v3/football/teams'\nTOKEN = 'jJDE36NINPPXX54ZJJvL0H5YjgumgRCVjnUdTNwsW6VkrBcq2YQsiCHrax3v'\n\nURL_TOKEN = URL + '/?api_token=' + TOKEN\n\nheaders = {\n 'Authorization': TOKEN\n}\n\nresposta = requests.get(URL_TOKEN)\n\nif resposta.status_code == 200:\n dados = resposta.json()\n print(f\"Sucesso, Api conectada: {resposta.status_code}\")\nelse:\n print(f\"Erro ao acessar a API. Status Code: {resposta.status_code}\")\n# print(resposta.text)\nNormalize_Json = json_normalize(dados['data'])\n\ntime.sleep(2)\n\n# Selecionando colunas Específicas\n\ndf_0 = Normalize_Json[['id', 'name']]\n\ndf_0 = df_0.rename(columns={\n 'id': 'TEAM_ID',\n 'name': 'TEAM_NAME'\n})\n\nprint('Organização do Data Frame finalizada!')\ntime.sleep(2)\n\ndf = pd.DataFrame(df_0)\n\nprint('Gerando um arquivo para avaliar os dados')\ntime.sleep(2)\nnome_do_arquivo = \"meus_dados.xlsx\"\ndf.to_excel(nome_do_arquivo, index=False, engine='openpyxl')\nprint(\"Tabela de times Disponível para criação\")\ntime.sleep(2)\nprint('Criando a tabela')\n\nwith open('CREATE_API_SPORTMONKS_TEAMS.SQL', 'r') as file:\n sql_create_tables = file.read()\n cursor.execute(sql_create_tables)\n\nconn.commit()\ntime.sleep(2)\nprint('Tebala Criada!')\ntime.sleep(2)\nprint('Inserindo os novos dados na tabela e atualizando os antigos.')\nwith open('MERGE_API_SPORTMONKS_TEAMS.SQL', 'r') as file:\n sql_merge = file.read()\n\nfor index, row in df.iterrows():\n cursor.execute(sql_merge, row['TEAM_ID'], row['TEAM_NAME'])\n\nconn.commit()\ntime.sleep(2)\nprint('Inserção concluída!')\ntime.sleep(2)\nprint('Checando volumetria de times inserida')\nsql_count = \"select count(distinct TEAM_ID) from API_SPORTMONKS_TEAMS with (nolock)\"\ncursor.execute(sql_count)\nnumber_of_teams = cursor.fetchone()[0]\n\nprint(f\"{number_of_teams} Times foram inseridos.\")\n","repo_name":"Checkmat196/API_To_SQL","sub_path":"Api_main.py","file_name":"Api_main.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35755815578","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author: Dominik Gresch \n# Date: 20.07.2016 17:32:38 CEST\n# File: test_log.py\n\nimport sys\nfrom io import StringIO\nfrom contextlib import contextmanager\n\nimport z2pack\nimport logging\n\n\nfrom hm_systems import simple_system, simple_surface, simple_line\n\nIGNORE_LINES = ['Calculation finished', 'starting at', 'Z2Pack version', ' at 0x']\n\ndef compare_lines(x, y):\n for xline, yline in zip(x.splitlines(), y.splitlines()):\n if any((part in xline) and (part in yline) for part in IGNORE_LINES):\n continue\n assert xline == yline\n return True\n\n@contextmanager\ndef CaptureLoggingOutput(compare_data):\n out = StringIO()\n handler = logging.StreamHandler(stream=out)\n handler.setFormatter(z2pack._logging_format.DefaultFormatter())\n logger = logging.getLogger('z2pack')\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n yield\n handler.flush()\n logger.removeHandler(handler)\n out.seek(0)\n res = out.read()\n compare_data(compare_lines, res)\n \n\ndef test_surface_report(compare_data, simple_system, simple_surface):\n with CaptureLoggingOutput(compare_data):\n result = z2pack.surface.run(system=simple_system, surface=simple_surface)\n\ndef test_line_report(compare_data, simple_system, simple_line):\n with CaptureLoggingOutput(compare_data):\n result = z2pack.line.run(system=simple_system, line=simple_line)\n \n","repo_name":"dlnguyen/Z2Pack","sub_path":"tests/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"534929962","text":"import logging\nimport threading\nimport time\nfrom dac.ad5734 import ad5734\nfrom adc.ltc1858 import ltc1858\nfrom mux.adg5409 import adg5409\n\nlogging.basicConfig()\n\nclass bbCommunicator():\n def __init__(self,\n spi_bus = 0,\n spi_client = 0,\n spi_freq = 1000000,\n spi_mode = 0b00,\n RD = \"P9_12\",\n SCLK_DAC = \"P8_11\",\n DIN_DAC = \"P8_12\",\n SYNC = \"P8_15\",\n LDAC = \"P8_16\"\n ):\n self.logger = logging.getLogger('bbCommunicator')\n self.logger.setLevel(logging.DEBUG)\n\n self.adc = ltc1858.ltc1858(spi_bus,spi_client,spi_freq,spi_mode)\n self.dac = ad5734.ad5734_chained(3,SCLK_DAC,DIN_DAC,SYNC,LDAC)\n self.mux = adg5409.adg5409()\n \n self.adc_event = threading.Event()\n self.mem_lock = threading.Lock()\n\n self.drain_sense_r = 2.0\n self.gate_sense_r = 100.0\n\n self.setup_dac()\n self.setup_adc()\n\n def __del__(self):\n self.adc_event.set()\n\n def setup_adc(self):\n #We will just monitor everything and see how\n #slow that is\n self.drainV = 0\n self.drainI = 1\n self.auxV = 2\n self.gndSense = 3\n self.gateaV = 4\n self.gateaI = 5\n self.gatebV = 6\n self.gatebI = 7\n\n self.adc.set_reg(self.drainV,True,\"+5V\")\n self.adc.set_reg(self.drainI,True,\"+-5V\")\n self.adc.set_reg(self.gndSense,True,\"+-5V\")\n self.adc.set_reg(self.auxV,True,\"+-5V\") \n self.adc.set_reg(self.gateaV,True,\"+-5V\") \n self.adc.set_reg(self.gateaI,True,\"+-5V\") \n self.adc.set_reg(self.gatebV,True,\"+-5V\")\n self.adc.set_reg(self.gatebI,True,\"+-5V\")\n\n def setup_dac(self):\n #We setup the dacs to the correct range\n #and place them in a list - It seems backwards\n #due to the way the chips work\n self.drain = []\n self.drain.append({\"Chip\" : 2, \"daq\" : \"DACA\", \"V\" : 0, \"I\" : 0})\n self.drain.append({\"Chip\" : 2, \"daq\" : \"DACD\", \"V\" : 0, \"I\" : 0})\n self.drain.append({\"Chip\" : 1, \"daq\" : \"DACC\", \"V\" : 0, \"I\" : 0})\n self.drain.append({\"Chip\" : 0, \"daq\" : \"DACB\", \"V\" : 0, \"I\" : 0})\n\n self.gatea = []\n self.gatea.append({\"Chip\" : 2, \"daq\" : \"DACB\", \"V\" : 0, \"I\" : 0})\n self.gatea.append({\"Chip\" : 1, \"daq\" : \"DACA\", \"V\" : 0, \"I\" : 0})\n self.gatea.append({\"Chip\" : 1, \"daq\" : \"DACD\", \"V\" : 0, \"I\" : 0})\n self.gatea.append({\"Chip\" : 0, \"daq\" : \"DACC\", \"V\" : 0, \"I\" : 0})\n\n self.gateb = []\n self.gateb.append({\"Chip\" : 2, \"daq\" : \"DACC\", \"V\" : 0, \"I\" : 0})\n self.gateb.append({\"Chip\" : 1, \"daq\" : \"DACB\", \"V\" : 0, \"I\" : 0})\n self.gateb.append({\"Chip\" : 0, \"daq\" : \"DACA\", \"V\" : 0, \"I\" : 0})\n self.gateb.append({\"Chip\" : 0, \"daq\" : \"DACD\", \"V\" : 0, \"I\" : 0})\n \n \n #And we set the daq ranges\n for i in xrange(4):\n tc = self.set_chip(self.drain[i][\"Chip\"])\n self.dac.set_range(tc, self.drain[i][\"daq\"], \"+10V\")\n\n tc = self.set_chip(self.gatea[i][\"Chip\"])\n self.dac.set_range(tc, self.gatea[i][\"daq\"], \"+-10V\")\n\n tc = self.set_chip(self.gateb[i][\"Chip\"])\n self.dac.set_range(tc, self.gateb[i][\"daq\"], \"+-10V\")\n\n def set_chip(self, chip):\n #Due to the way the ltc1858 works we need to\n #construct a list with the correct chip flag set to true\n tmp_chip = [False, False, False]\n tmp_chip[chip] = True\n return tmp_chip\n\n def set_dac(self,amp,tt,volts):\n #This sets the value on a particular amplifier\n if tt == \"drain\":\n tc = self.set_chip(self.drain[amp][\"Chip\"])\n self.dac.set_volts(tc,self.drain[amp][\"daq\"],volts)\n elif tt == \"gatea\":\n tc = self.set_chip(self.gatea[amp][\"Chip\"])\n self.dac.set_volts(tc,self.gatea[amp][\"daq\"],volts)\n elif tt == \"gateb\":\n tc = self.set_chip(self.gateb[amp][\"Chip\"])\n self.dac.set_volts(tc,self.gateb[amp][\"daq\"],volts)\n else:\n self.logger.error(\"BAD TYPE\")\n \n def latch(self):\n self.dac.chips[0].update_dacs()\n\n def set_power(self,amp,state):\n #This turns on or off an amplifier\n tc = self.set_chip(self.drain[amp][\"Chip\"])\n self.dac.set_power(tc,self.drain[amp][\"daq\"],state)\n\n tc = self.set_chip(self.gatea[amp][\"Chip\"])\n self.dac.set_power(tc,self.gatea[amp][\"daq\"],state)\n\n tc = self.set_chip(self.gateb[amp][\"Chip\"])\n self.dac.set_power(tc,self.gateb[amp][\"daq\"],state)\n\n\n def start_adc(self):\n self.adc_thread = threading.Thread(target=self.collect_adc_data)\n self.adc_thread.daemon = True\n self.adc_thread.start()\n\n def collect_adc_data(self):\n self.adc_event.clear()\n while not self.adc_event.isSet():\n self.collect_single_adc_data()\n\n time.sleep(0.25)\n\n def collect_single_adc_data(self):\n #set the mux position\n for i in xrange(4):\n self.mux.set_mux(i)\n self.mem_lock.acquire()\n self.adc.register_read()\n self.mem_lock.release()\n \n self.sense = self.adc.chip_reg[self.gndSense][\"V\"]\n\n self.drain[i][\"V\"] = self.calculate_voltage(self.adc.chip_reg[self.drainV][\"V\"])\n self.drain[i][\"I\"] = self.calculate_current(self.adc.chip_reg[self.drainI][\"V\"],\n self.adc.chip_reg[self.drainV][\"V\"],\n self.drain_sense_r,True)\n \n self.gatea[i][\"V\"] = self.calculate_voltage(self.adc.chip_reg[self.gateaV][\"V\"])\n self.gatea[i][\"I\"] = self.calculate_current(self.adc.chip_reg[self.gateaI][\"V\"],\n self.adc.chip_reg[self.gateaV][\"V\"],\n self.gate_sense_r)\n \n self.gateb[i][\"V\"] = self.calculate_voltage(self.adc.chip_reg[self.gatebV][\"V\"])\n self.gateb[i][\"I\"] = self.calculate_current(self.adc.chip_reg[self.gatebI][\"V\"],\n self.adc.chip_reg[self.gatebV][\"V\"],\n self.gate_sense_r)\n\n def calculate_voltage(self,v1):\n #Calculates the voltage - We use the class member\n #self.sense to remove GND sense voltage\n\n #return v1 - self.sense\n return v1\n\n def calculate_current(self,v1,v2,res_val,drain=False):\n #Calculates the current from the input voltages and resistances\n v1 = -v1\n #v1 = v1+self.sense # Invert because the board is crap\n #v2 = v2-self.sense\n if drain is False:\n return (v1-v2)*1000.0/res_val\n else:\n #Gain of 10 then milivolts\n return 100*v1/res_val\n","repo_name":"RossWilliamson/ccat_bitbang_chips","sub_path":"boards/ccat_bias_board/bbCommunicator.py","file_name":"bbCommunicator.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20765072371","text":"from binance.helpers import round_step_size\nfrom typing import Dict\nfrom binance import ThreadedWebsocketManager\nfrom telegram_alert import send_alert\nfrom binance.client import Client\nfrom binance.enums import *\nfrom binance.exceptions import BinanceAPIException, BinanceOrderException\nfrom datetime import datetime\nfrom termcolor import cprint\nimport os\nimport json\nfrom ws_message_handler import handle_ws_messages\n\n\nclass BinanceClient:\n def __init__(self):\n print(\"Starting Binance Client...\")\n testnet = os.environ.get('DEV') == 'True'\n\n self.RISK_REWARD_RATIO = float(os.environ.get('RISK_REWARD_RATIO'))\n\n if testnet:\n cprint(\"*** Using TestNet API ***\",\n 'green', attrs=['bold', 'underline'])\n\n api_key = os.environ.get('API_KEY_TESTNET')\n api_secret = os.environ.get('API_SECRET_TESTNET')\n else:\n api_key = os.environ.get('API_KEY')\n api_secret = os.environ.get('API_SECRET')\n\n self.client = Client(api_key, api_secret,\n testnet=testnet)\n self.twm = ThreadedWebsocketManager(\n api_key=api_key, api_secret=api_secret, testnet=testnet)\n\n self.latest_transactions = []\n self.start_websocket()\n\n def start_websocket(self):\n self.twm.start()\n\n self.twm.start_user_socket(callback=handle_ws_messages)\n\n def get_open_orders(self, symbol=None):\n try:\n return self.client.get_open_orders(symbol=symbol)\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return []\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return []\n\n def cancel_order_by_id(self, orderId, symbol):\n return self.client.cancel_order(symbol=symbol, orderId=orderId)\n\n def fetch_data(self, symbol, interval=KLINE_INTERVAL_30MINUTE, limit=250, backtest=False):\n try:\n candles: Dict\n if not backtest:\n print(\n f\"Fetching new candles for {symbol} {datetime.now().isoformat()}\")\n candles = self.client.get_klines(\n symbol=symbol, interval=interval, limit=limit)\n else:\n print(\n f\"Fetching new historical candles for {symbol} {datetime.now().isoformat()}\")\n candles = self.client.get_historical_klines(\n symbol=symbol, interval=interval, limit=limit, start_str=\"1 Jan, 2021\")\n for line in candles:\n del line[5:]\n return candles\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return {}\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return {}\n\n def get_symbol_info(self, symbol):\n try:\n return self.client.get_symbol_info(symbol)\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n\n def get_balance(self, asset):\n try:\n return self.client.get_asset_balance(asset=asset)\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n\n def get_trade_fee(self, symbol):\n try:\n return self.client.get_trade_fee(symbol=symbol)\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return 15\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return 15\n\n def get_filter(self, filters, filter_type, filter_prop, get_float=False):\n for f in filters:\n if f['filterType'] == filter_type:\n result = (f[filter_prop])\n return float(result) if get_float else result\n\n def create_market_order(self, side, symbol_info, last_closed_price):\n try:\n symbol = symbol_info['symbol']\n quote_asset = symbol_info['quoteAsset']\n base_asset = symbol_info['baseAsset']\n quote_req_amount = float(os.environ.get('MAX_USDT_PRICE'))\n\n if os.environ.get('PLOT') == 'True':\n buy_balance = 5000\n else:\n buy_balance = float(self.get_balance(quote_asset)['free'])\n sell_balance = float(self.get_balance(base_asset)['free'])\n\n if side == SIDE_SELL:\n amount_of_crypto_in_quote = sell_balance * last_closed_price # ish\n if amount_of_crypto_in_quote >= quote_req_amount / 2:\n quote_amount = amount_of_crypto_in_quote * 0.9\n else:\n return\n else:\n if quote_req_amount > buy_balance:\n quote_amount = buy_balance\n else:\n quote_amount = quote_req_amount\n\n filters = symbol_info['filters']\n tick_size = self.get_filter(\n filters, 'PRICE_FILTER', 'tickSize', True)\n req_price = round_step_size(quote_amount, tick_size)\n\n if os.environ.get('PLOT') == 'True':\n with open('example_responses.json') as json_file:\n data = json.load(json_file)\n return data['market_order']\n\n print('Placing market order...')\n order = self.client.order_market(\n symbol=symbol, quoteOrderQty=req_price, side=side)\n\n return order\n\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n\n def create_oco_order(self, side, market_order, df, symbol_info):\n symbol = symbol_info['symbol']\n filters = symbol_info['filters']\n market_order_fills = market_order.get('fills')\n market_order_qty = float(market_order.get('executedQty'))\n\n purchase_price = sum(\n [float(f['price']) * (float(f['qty']) / market_order_qty) for f in market_order_fills])\n print('purchase_price: ', purchase_price)\n tick_size = self.get_filter(\n filters, 'PRICE_FILTER', 'tickSize', True)\n print('tick_size: ', tick_size)\n\n # stop signal at closest swing low\n lowest_of_last_10 = df.tail(11)['low'].min()\n print('lowest_of_last_10: ', lowest_of_last_10)\n stopPrice = round_step_size(lowest_of_last_10 - tick_size, tick_size)\n\n # *** Stop loss ***\n # Long: set below the pullback of the trend\n # Short: set above the pullback of the trend\n # actual sell price. To be more secure, set lower than stopPrice\n atr_at_lowest = 1\n for row in df.tail(11).itertuples():\n if(row.low == lowest_of_last_10 and row.atr > 0):\n atr_at_lowest = row.atr\n break\n print('atr_at_lowest', atr_at_lowest)\n stopLimitPrice = round_step_size(\n stopPrice - (tick_size * atr_at_lowest), tick_size)\n\n # *** Take profit (higher than bought price) ***\n # Set 1.5x the size of the stop loss\n req_price = 0\n if side == SIDE_SELL:\n req_price = purchase_price + \\\n ((purchase_price - stopPrice) * self.RISK_REWARD_RATIO)\n else:\n req_price = purchase_price + \\\n ((stopPrice - purchase_price) * self.RISK_REWARD_RATIO)\n take_profit = round_step_size(req_price, tick_size)\n\n if os.environ.get('PLOT') == 'True':\n return take_profit, stopPrice, stopLimitPrice\n\n # Price Restrictions:\n # SELL: Limit Price > Last Price > Stop Price\n # BUY: Limit Price < Last Price < Stop Price\n try:\n # creates two orders, one take profit and one stop-loss\n cprint(\n f\"Sending OCO order: take profit: {take_profit}, stopPrice: {stopPrice}, stopLimitPrice: {stopLimitPrice}, qty: {market_order_qty}\", 'green', attrs=['bold'])\n\n self.client.create_oco_order(\n symbol=symbol,\n side=side, # SELL/BUY\n quantity=market_order_qty,\n price=str(take_profit),\n stopPrice=str(stopPrice),\n stopLimitPrice=str(stopLimitPrice),\n stopLimitTimeInForce=TIME_IN_FORCE_GTC\n )\n\n return True\n except BinanceAPIException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n except BinanceOrderException as e:\n cprint(e, 'red', attrs=['bold'])\n send_alert(e, True)\n return False\n\n def validate(self, symbol, side, type, price, qty):\n order = self.client.create_test_order(\n symbol=symbol,\n side=side,\n type=type,\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=qty,\n price=price\n )\n print(order)\n\n def topup_bnb(self, min_balance=1.0, topup=2.5, symbol='BNBUSDT'):\n ''' Top up BNB balance if it drops below minimum specified balance '''\n\n bnb_balance = self.client.get_asset_balance(asset='BNB')\n bnb_balance = float(bnb_balance['free'])\n if bnb_balance < min_balance:\n qty = round(topup - bnb_balance, 5)\n print(f\"Quantity to topup: {qty}\")\n order = self.client.order_market_buy(symbol=symbol, quantity=qty)\n return order\n return False\n","repo_name":"christopherbengtsson/trading-bot","sub_path":"binance_client.py","file_name":"binance_client.py","file_ext":"py","file_size_in_byte":10208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40830347318","text":"from PIL import Image\n\n#Images preprocessed to be of same size 256X256\n\nfor i in range(1,16):\n im = Image.open(\"VanGogh/img\"+str(i)+\".jpg\")\n new_width = 256 \n new_height = 256\n im = im.resize((new_width, new_height), Image.ANTIALIAS)\n im.save(\"VanGogh/img\"+str(i)+\".jpg\")\n\n","repo_name":"cs60050/ArtificiallyArtistic","sub_path":"NaiveImplementation/ImageResize.py","file_name":"ImageResize.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"26954302381","text":"# Import libraries\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport PIL\nfrom tensorflow.keras.models import load_model\nfrom tabulate import tabulate\n\n\n# Apply initial preprocessing to sudoku image\ndef preprocess(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (3, 3), 6)\n threshold_img = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2)\n return threshold_img\n\n\n# Get the main outline of the contour image\ndef main_outline(contour_img):\n biggest = np.array([])\n max_area = 0\n\n for i in contour_img:\n area = cv2.contourArea(i)\n if area > 100:\n peri = cv2.arcLength(i, True)\n approx = cv2.approxPolyDP(i, 0.02 * peri, True)\n\n if area > max_area and len(approx) == 4:\n biggest = approx\n max_area = area\n\n return biggest, max_area\n\n\n# Reframe the sudoku image so that sudoku occupies the whole image removing useless regions\ndef reframe(points):\n points = points.reshape((4, 2))\n points_new = np.zeros((4, 1, 2), dtype=np.int32)\n\n add = points.sum(1)\n points_new[0] = points[np.argmin(add)]\n points_new[3] = points[np.argmax(add)]\n\n diff = np.diff(points, axis=1)\n points_new[1] = points[np.argmin(diff)]\n points_new[2] = points[np.argmax(diff)]\n\n return points_new\n\n\n# Split the (9x9) matrix into 81 individual cells\ndef splitcells(img):\n rows = np.vsplit(img, 9)\n cells = []\n\n for r in rows:\n cols = np.hsplit(r, 9)\n for cell in cols:\n cells.append(cell)\n\n return cells\n\n\n# Crop each cell to keep only the digit in each cell\ndef CropCell(cells):\n cropped_cells = []\n\n for image in cells:\n img = np.array(image)\n img = img[6:46, 6:46]\n img = Image.fromarray(img)\n cropped_cells.append(img)\n\n return cropped_cells\n\n# Backtracking Algorithm to solve sudoku\ndef is_safe(grid, row, col, num):\n for c in range(9):\n if c!=col and grid[row][c] == num:\n return False\n\n for r in range(9):\n if r!=row and grid[r][col] == num:\n return False\n\n sr = row - row % 3\n sc = col - col % 3\n\n for i in range(3):\n for j in range(3):\n if i+sr!=row and j+sc!=col and grid[i + sr][j + sc] == num:\n return False\n\n return True\n\nn = 9\ndef solve_sudoku(grid, row, col):\n if row == n-1 and col==n:\n return True\n\n if col == n:\n row += 1\n col = 0\n\n if grid[row][col] > 0:\n return solve_sudoku(grid, row, col + 1)\n\n for num in range(1, n + 1, 1):\n if is_safe(grid, row, col, num):\n grid[row][col] = num\n if solve_sudoku(grid, row, col + 1):\n return True\n grid[row][col] = 0\n\n return False\n\n# Function to build sudoku matrix after processing each image\ndef build_sudoku_matrix(filename):\n sudoku = cv2.imread(f'static\\{filename}')\n sudoku = cv2.resize(sudoku, (450, 450))\n thres = preprocess(sudoku)\n\n contour_1 = sudoku.copy()\n contour_2 = sudoku.copy()\n\n cont, hierarchy = cv2.findContours(thres, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(contour_1, cont, -1, (0, 255, 0), 3)\n\n # Draw main outline of sudoku in the image\n black_img = np.zeros((450, 450, 3), np.uint8)\n biggest, maxArea = main_outline(cont)\n\n if biggest.size != 0:\n biggest = reframe(biggest)\n cv2.drawContours(contour_2, biggest, -1, (0, 255, 0), 10)\n\n pts1 = np.float32(biggest)\n pts2 = np.float32([[0, 0], [450, 0], [0, 450], [450, 450]])\n\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imagewrap = cv2.warpPerspective(sudoku, matrix, (450, 450))\n imagewrap = cv2.cvtColor(imagewrap, cv2.COLOR_BGR2GRAY)\n\n # Apply binary thresholding\n (thresh, bw_img) = cv2.threshold(imagewrap, 150, 255, cv2.THRESH_BINARY)\n\n # Split cells and crop margins\n sudoku_cells = splitcells(bw_img)\n sudoku_cells_croped = CropCell(sudoku_cells)\n\n # Load Custom CNN Model for digits prediction\n model = load_model(\"Model.h5\")\n model.load_weights(\"Model_weights.h5\")\n\n # Build Sudoku Grid\n grid = [[0 for x in range(9)]for y in range(9)]\n\n # Get predictions and fill digits in sudoku grid\n itr = 0\n for i in range(9):\n for j in range(9):\n img = np.array(sudoku_cells_croped[itr])\n img = img / 255.0\n\n pred = model.predict(img.reshape(1, 40, 40, 1))\n\n if np.max(pred) > 0.95:\n grid[i][j] = np.argmax(pred)\n else:\n grid[i][j] = 0\n\n itr += 1\n\n return grid\n\n# Function to solve the sudoku grid\ndef sudoku_result(grid):\n for i in range(0,9):\n for j in range(0,9):\n if grid[i][j]!=0 and is_safe(grid, i, j, grid[i][j])==False:\n return -1\n\n if solve_sudoku(grid, 0, 0):\n return grid\n else:\n return -1","repo_name":"pranshu-09/Sudoku-Solver-Web-Application","sub_path":"solve_sudoku.py","file_name":"solve_sudoku.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9675934732","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\n\n\ndef get_score(i, name):\n return i * sum(list(map(lambda c: ord(c) - ord('A') + 1, name)))\n\n\ndef solve():\n with open('p022_names.txt', 'r') as name_file:\n reader = csv.reader(name_file)\n names = list(reader)[0]\n\n names.sort()\n\n scores = [get_score(i + 1, name) for i, name in enumerate(names)]\n return sum(scores)\n\n\ndef main():\n print(get_score(938, 'COLIN'))\n print(solve())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"masaponto/project-euler","sub_path":"p022/p022.py","file_name":"p022.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10646002573","text":"# filter\nfrom functools import reduce\nmy_list = [1, 4, 5, 6, 9, 13, 19, 21]\nodd = list(filter(lambda x: x % 2 != 0, my_list))\nprint(odd)\n\n# map\nmy_list_2 = [1, 2, 3, 4, 5]\nsquares = list(map(lambda x: x**2, my_list_2))\nprint(squares)\n\n# reduce\nmy_list_3 = [2, 2, 2, 2, 2]\nall_multiplied = reduce(lambda a, b: a * b, my_list_3)\nprint(all_multiplied)\n","repo_name":"nelodev/intermediate-python","sub_path":"high_order_functions.py","file_name":"high_order_functions.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36799777743","text":"# valueIterationAgents.py\n# -----------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nimport mdp, util\n\nfrom learningAgents import ValueEstimationAgent\n\n#import numpy as np\n#import matplotlib.pyplot as plt\n#import csv\n\nclass ValueIterationAgent(ValueEstimationAgent):\n \"\"\"\n * Please read learningAgents.py before reading this.*\n\n A ValueIterationAgent takes a Markov decision process\n (see mdp.py) on initialization and runs value iteration\n for a given number of iterations using the supplied\n discount factor.\n \"\"\"\n def __init__(self, mdp, discount = 0.9, iterations = 100):\n \"\"\"\n Your value iteration agent should take an mdp on\n construction, run the indicated number of iterations\n and then act according to the resulting policy.\n\n Some useful mdp methods you will use:\n mdp.getStates()\n mdp.getPossibleActions(state)\n mdp.getTransitionStatesAndProbs(state, action)\n mdp.getReward(state, action, nextState)\n mdp.isTerminal(state)\n \"\"\"\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Write value iteration code here\n \"*** YOUR CODE HERE ***\"\n ## three loops: for iteration,for states,for actions\n j=0\n tt=0\n n=self.iterations\n sset=self.mdp.getStates()\n Vvalue=[]\n #print(sset)\n #print(len(sset))\n def largest(array,N):\n larg = array[0] # Initial value \n i=1 # compare every element with \n while i larg: \n larg = array[i] # current max\n i=i+1\n return larg\n while j larg: \n larg = array[i] # current max\n i=i+1\n #print (\"Largest in given array is\",maxm)\n return larg\n opt_policy= None\n if dt==False:# if it's not terminal state\n #acts=self.mdp.getPossibleActions(state)\n Q_value=[]\n #get all Qvalue\n sets=self.mdp.getPossibleActions(state)\n #print(len(sets))\n #print(sets[0])\n t1=0\n while t12:\n for i in range(self.n-1,1,-1) :\n R,M = self._get_RM(self.A[0:i,0:i],self.b[0:i],i)\n sh = M.shape\n self.MM.append(zeros(self.M.shape))\n self.MM[-1][0:sh[0],0:sh[1]] = M\n self.RR.append(zeros(self.R.shape))\n self.RR[-1][0:sh[0]] = R\n \n def fz(self,z) :\n z = reshape(array(z),[-1,1])\n if len(z) == self.n-1 :\n f = self.r*self.R+self.M@z\n if any(x<0 for x in f) : # invalid flow; start working thru fewer-link flows\n for R,M in zip(self.RR,self.MM) :\n# n = len(R)\n# print(M)\n# print(R)\n# print(z[0:(n-1)])\n fhere = self.r*R + M@z\n if not any(x<0 for x in fhere) :\n f = fhere\n break # implicit: else loop again\n # finally, we may not have fixed it:\n if any(x<0 for x in f):\n f = zeros([self.n,1])\n f[0] = self.r\n return f\n else :\n raise IndexError('z is the wrong length; needs to be n-1')\n \n\n \nclass ParallelFixed(Parallel) :\n def __init__(self,a,b) :\n self.A = diag(a)\n self.b = array(b).reshape([-1,1])\n self.n = len(a)\n \n def ft(self,t) :\n # t is n-vector of tolls (or n-list)\n # this should be extremely slow because it instantiates a new net, but it seems to work\n t = array(t)\n t = t.reshape([-1,1])\n newconst = t+self.b\n idx = newconst[:,0].argsort(axis=0)\n fakeconst = newconst[idx] # sorted b + t\n tempNet = Parallel(self.A.diagonal()[idx],fakeconst)\n tempNet.r = self.r\n unorderedflow = tempNet.fz([1]*(self.n-1))\n# print(unorderedflow)\n# print(idx)\n flow = zeros([self.n,1])\n flow[idx] = unorderedflow\n return flow\n \n \nclass twoPathGeneric(Network):\n # ae is length 3 list or array\n # This network:\n # e3 e1\n # 0-------0--------0\n # | |\n # | e2 |\n # |--------|\n # constructs A for this network\n\n def __init__(self,a,b):\n # ae is length 3 list or array\n # if len(b)=2, b is path-b's;\n # if len(b)=3, b is edge-b's\n self.A = diag(a[0:2])+a[2]*ones([2,2])\n if len(b) == 2: # case when b corresponds to path b's\n self.b = array(b)\n else : # b corresponds to edge b's\n self.b = array([b[0:2]])+b[2]*ones(2)\n self.populate()\n \nclass threePathGeneric(Network):\n # This network: (different numbers from 6/17/16 p.3, but same net)\n #\n # e1\n # | ----------------|\n # | |\n # e4 | e2 e5 |\n # 0-------0--------0--------0\n # | |\n # | e3 |\n # |----------------|\n\n def __init__(self,a,b,permutation=[0,1,2]):\n # ae is length 5 list or array\n # if len(b)=3, b is path-b's;\n # if len(b)=5, b is edge-b's\n m12 = zeros([3,3])\n m12[0:2,0:2] = ones([2,2])\n m23 = zeros([3,3])\n m23[1:3,1:3] = ones([2,2])\n permute = array(permutation)\n self.A = diag(a[0:3])+a[3]*m12+a[4]*m23\n self.A = self.A[permute,:][:,permute]\n if len(b) == 3: # case when b corresponds to path b's\n self.b = array(b)\n else : # b corresponds to edge b's\n self.b = array(b[0:3])+b[3]*m12[:0],+b[4]*m23[:1]\n self.b = self.b[permute]\n self.populate()\n \nclass fourPathGeneric1(Network):\n # This network: (different numbers from 6/17/16 p.6, but same net)\n #\n # e1\n # | --------------------------------|\n # | e2 |\n # | |---------------| |\n # e5 | e6 | e3 e7 | e8 |\n # 0-------0--------0-------0-------0--------0\n # | |\n # | e4 |\n # |------------------------|\n\n def __init__(self,a,b,permute=[0,1,2,3]):\n # ae is length 5 list or array\n # if len(b)=4, b is path-b's;\n # if len(b)=8, b is edge-b's\n # note: permute is confusing, but to see path labels on above diagram,\n # just execute reshape(permute[permute]+1,[4,1])\n Ae5 = zeros([4,4])\n Ae5[0:3,0:3] = ones([3,3])\n Ae6 = zeros([4,4])\n Ae6[1:3,1:3] = ones([2,2])\n Ae7 = zeros([4,4])\n Ae7[2:4,2:4] = ones([2,2])\n Ae8 = zeros([4,4])\n Ae8[1:4,1:4] = ones([3,3])\n self.A = diag(a[0:4])+a[4]*Ae5+a[5]*Ae6+a[6]*Ae7+a[7]*Ae8\n self.A = self.A[permute,:][:,permute]\n if len(b) == 4: # case when b corresponds to path b's\n self.b = array(b)\n else : # b corresponds to edge b's\n self.b = array(b[0:4])+b[4]*Ae5[:,0]+b[5]*Ae6[:,1]+b[6]*Ae7[:,2]+b[7]*Ae8[:,3]\n self.b = self.b[permute]\n self.populate()\n \n","repo_name":"biophil/routing-games","sub_path":"linear/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19857560448","text":"import pickle\n\nfrom pyro.infer import MCMC, Predictive\nfrom scipy.constants import value\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport numpy as np\nimport os, sys\n\nfrom bnn_utils import *\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nfrom samplers.sghmc import SGHMC\n\n\ndef test_sghmc(model, posterior_samples, test_loader):\n predictive = pyro.infer.Predictive(model=model, posterior_samples=posterior_samples, return_sites=(\"_RETURN\", \"obs\"))\n\n acc = test_posterior(predictive, test_loader)\n\n print(f\"Test Accuracy: {acc:.3f}\")\n return acc\n\n\ndef sample_sghmc(model, train_loader, num_samples, num_burnin, step_size=0.1, num_steps=4, friction=0.1, resample_r_freq=1):\n pyro.clear_param_store()\n sghmc_kernel = SGHMC(model, step_size=step_size, num_steps=num_steps, friction=friction, resample_r_freq=resample_r_freq)\n mcmc = MCMC(sghmc_kernel, num_samples=num_samples, warmup_steps=num_burnin, disable_progbar=False)\n mcmc.run(train_loader)\n posterior_samples = mcmc.get_samples()\n return posterior_samples\n\n\ndef tune_sghmc_hyperparameters(model, num_burnin=50, num_samples=800):\n train_dataset = MNIST_50('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), ]), length=50000)\n val_dataset = MNIST_50('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), ]), length=10000, offset=50000)\n test_dataset = datasets.MNIST('./data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), ]))\n\n train_loader = DataLoader(train_dataset, batch_size=500, shuffle=False)\n val_loader = DataLoader(val_dataset, batch_size=500, shuffle=False)\n test_loader = DataLoader(test_dataset, batch_size=500, shuffle=False)\n\n model = BNN(28 ** 2, 100, 10)\n\n print(\"Testing friction and resampling of r\")\n print(\"(step_size=5e-2, num_steps=4)\")\n\n friction = [0.1, 1, 10]\n resample_r_freq = [1, 5, 10, 25]\n\n best_acc = 0\n best_posterior_samples = None\n best_friction = None\n best_resample_r_freq = None\n\n results_friq_resample_r_freq = np.zeros((len(friction), len(resample_r_freq)))\n\n for i, f in enumerate(friction):\n for j, r in enumerate(resample_r_freq):\n posterior_samples = sample_sghmc(model, train_loader, step_size=0.05, num_steps=4, num_samples=num_samples, num_burnin=num_burnin, friction=f, resample_r_freq=r)\n predictive = Predictive(model, posterior_samples, return_sites=['_RETURN', 'OBS'])\n acc = test_posterior(predictive, val_loader)\n print(f\"Friction={f}, Resample_r_freq={r}, acc={acc:.3f}\")\n if acc >= best_acc:\n best_friction = f\n best_resample_r_freq = r\n best_acc = acc\n best_posterior_samples = posterior_samples\n results_friq_resample_r_freq[i, j] = acc\n\n print(f\"Best Friction={best_friction}, Best Resample_r_freq={best_resample_r_freq}\")\n\n np.savetxt(\"results/mnist_sghmc_friq_resample_r_freq.csv\", results_friq_resample_r_freq)\n\n print(\"Testing number of steps and step sizes\")\n\n num_steps = [2, 4, 8, 16]\n step_size = [0.0125, 0.025, 0.05, 0.1]\n\n best_acc = 0\n best_num_steps = None\n best_step_size = None\n\n results_num_steps_step_size = np.zeros((len(num_steps), len(step_size)))\n\n for i, num_steps_ in enumerate(num_steps):\n for j, step_size_ in enumerate(step_size):\n posterior_samples = sample_sghmc(model, train_loader, step_size=step_size_, num_steps=num_steps_, num_samples=num_samples,\n num_burnin=num_burnin, friction=best_friction, resample_r_freq=best_resample_r_freq)\n predictive = Predictive(model, posterior_samples, return_sites=['_RETURN', 'OBS'])\n acc = test_posterior(predictive, val_loader)\n print(f\"num_steps={num_steps_}, step_size={step_size_}, accuracy={acc:.3f}\")\n if acc >= best_acc:\n best_num_steps = num_steps_\n best_step_size = step_size_\n best_acc = acc\n best_posterior_samples = posterior_samples\n\n results_num_steps_step_size[i, j] = acc\n\n print(f\"Best num_steps={best_num_steps}, Best step_size={best_step_size}\")\n\n np.savetxt(\"results/mnist_sghmc_num_steps_step_size.csv\", results_num_steps_step_size)\n\n predictive = Predictive(model, best_posterior_samples, return_sites=['_RETURN', 'OBS'])\n test_acc = test_posterior(predictive, test_loader)\n print(f\"Final test acc={test_acc:.3f}\")\n\n with open(f\"results/posterior_samples_sghmc.pkl\", \"wb\") as f:\n pickle.dump(best_posterior_samples, file=f)\n\n return best_posterior_samples, best_friction, best_resample_r_freq, best_num_steps, best_step_size\n\n\nif __name__ == \"__main__\":\n print(\"Choose an experiment from above to run\")\n bnn = BNN(28 * 28, 100, 10)\n posterior_samples, friction, resample_r_freq, num_steps, step_size = tune_sghmc_hyperparameters(bnn, num_samples=800)\n","repo_name":"ATMLGroup1-2021/SGHMC","sub_path":"experiments/mnist_sghmc.py","file_name":"mnist_sghmc.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38759714321","text":"import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom data.binary_dataset import BinaryDataSet\nfrom models.ncf_model import NcfModel\nfrom options.train_option import TrainOption\nfrom options.util_option import log\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\nif __name__ == '__main__':\n opt = TrainOption().parse()\n\n dataset = BinaryDataSet(opt.train_filepath, opt.negative_num)\n test_dataset = BinaryDataSet(opt.test_filepath, opt.negative_num)\n opt.user_size = dataset.user_size\n opt.movie_size = dataset.movie_size\n log(opt)\n\n dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, collate_fn=dataset.collate_fn,\n num_workers=2)\n test_dataloader = DataLoader(test_dataset, batch_size=opt.test_batch_size, shuffle=False,\n collate_fn=dataset.collate_fn)\n model = NcfModel(opt)\n print(model.model)\n\n HR_max = 0\n NDCG_max = 0\n times = 0\n for epoch in range(opt.epoch):\n dataloader = tqdm(dataloader)\n dataloader.set_description(\n '[%s%04d/%04d %s=%f]' % ('Epoch:', epoch + 1, opt.epoch, 'lr', model.opt.learning_rate))\n\n # train\n model.model.train()\n loss_sum = 0\n for i, data in enumerate(dataloader, 0):\n model.set_input(data)\n model.optimize_parameters()\n loss_sum += model.loss.item()\n dataloader.set_postfix({'loss': loss_sum / (i + 1)})\n\n model.model.eval()\n HR, NDCG = model.test(test_dataloader)\n\n if HR > HR_max:\n # save the model\n HR_max = HR\n NDCG_max = NDCG\n times = epoch\n model.save_model(opt.checkpoint_root, f'NCF_epoch{epoch}')\n\n print(f'save the best HR model in epoch {times} with HR:{HR_max} and NDCG:{NDCG_max}')\n","repo_name":"JudgementH/NCF","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41861418936","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader as Dataloader\r\nfrom torchvision import models, transforms\r\nfrom torch.autograd import Variable\r\n# from torch.optim.lr_scheduler import *\r\n\r\nimport os\r\nimport math\r\nimport re\r\nimport glob\r\nimport pickle\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport sys\r\nimport resnet\r\nfrom modules import MFH, GatedTanh, CSF, CS\r\n\r\nstdModule = resnet.resnet152(True)\r\n#print(list(list(stdModule.layer4.children())[0:-1]))\r\n\r\n#CSFMODEL(args.l, args.s, args.g, len(train_set.codebook['itow']), len(train_set.codebook['itoa']) + 1, hidden_size=1024, emb_size=emb_size)\r\nclass CSFMODEL(nn.Module):\r\n def __init__(self, use_gru, layers, submodel, grad, num_words, num_ans, hidden_size=512, emb_size=300, inplanes=512 * 4, planes=512, stride=1):\r\n super(CSFMODEL, self).__init__()\r\n self.layers=layers\r\n self.use_gru=use_gru\r\n\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.avgpool = nn.AvgPool2d(7, stride=1)\r\n self.fc = nn.Linear(2048, 1024)\r\n\r\n # 一开始的input是B x S, 但是Embedding S x B -> S x B x I,所以要先转置成S x B\r\n self.we = nn.Embedding(num_words, emb_size, padding_idx=0)\r\n self.lstm = nn.LSTM(input_size=emb_size,\r\n hidden_size=hidden_size,\r\n num_layers=1,\r\n batch_first=True)\r\n\r\n self.gru = nn.GRU(input_size=emb_size,\r\n hidden_size=hidden_size,\r\n num_layers=1,\r\n batch_first=True)\r\n self.lstmdp = nn.Dropout(0.3)\r\n\r\n # CSF(img_size, h_size, latent_dim, output_size, block_count) img_size=[C,H,W]\r\n if submodel=='csf':\r\n self.csf1 = CSF((512, 7, 7), hidden_size, 4, 1024, 2)\r\n self.csf2 = CSF((512, 7, 7), hidden_size, 4, 1024, 2)\r\n self.csf3 = CSF((2048, 7, 7), hidden_size, 4, 1024, 2)\r\n else:\r\n #(self, img_size, h_size, k_size=512)\r\n self.csf1 = CS((512, 7, 7), hidden_size, k_size=512)\r\n self.csf2 = CS((512, 7, 7), hidden_size, k_size=512)\r\n self.csf3 = CS((2048, 7, 7), hidden_size, k_size=512)\r\n\r\n\r\n self.pred_mfh = MFH(x_size=1024, y_size=hidden_size, latent_dim=4, output_size=1024,block_count=2) # (batch_size,36,o) or (batch_size,o)\r\n # self.pred_net = nn.Sequential(\r\n # nn.Linear(2048, num_ans),\r\n # nn.Sigmoid(dim=1))\r\n self.pred_net=nn.Linear(2048, num_ans)\r\n\r\n # initialization\r\n # Returns an iterator over all modules in the network. Duplicate modules are returned only once.\r\n # 这一部分用来对模型中的conv随机初始化参数, 对batchnorm层初始化参数\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # kernel: H*W*C\r\n # Fills self tensor with elements samples from the normal distribution parameterized by mean and std.\r\n # normal_(mean=0, std=1, *, generator=None)\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n stdconv1 = list(stdModule.layer4.children())[2].conv1\r\n self.conv1.weight = nn.Parameter(list(stdconv1.parameters())[0].data.clone())\r\n\r\n stdbn1 = list(stdModule.layer4.children())[2].bn1\r\n self.bn1.weight = nn.Parameter(list(stdbn1.parameters())[0].data.clone())\r\n self.bn1.bias = nn.Parameter(list(stdbn1.parameters())[1].data.clone())\r\n\r\n if not grad :\r\n for param in list(self.conv1.parameters()):\r\n param.requires_gard = False\r\n for param in list(self.bn1.parameters()):\r\n param.requires_gard = False\r\n\r\n stdconv2 = list(stdModule.layer4.children())[2].conv2\r\n self.conv2.weight = nn.Parameter(list(stdconv2.parameters())[0].data.clone())\r\n\r\n stdbn2 = list(stdModule.layer4.children())[2].bn2\r\n self.bn2.weight = nn.Parameter(list(stdbn2.parameters())[0].data.clone())\r\n self.bn2.bias = nn.Parameter(list(stdbn2.parameters())[1].data.clone())\r\n\r\n if (not grad) or (grad and layers<3 ) :\r\n for param in list(self.conv2.parameters()):\r\n param.requires_gard = False\r\n for param in list(self.bn2.parameters()):\r\n param.requires_gard = False\r\n\r\n stdconv3 = list(stdModule.layer4.children())[2].conv3\r\n self.conv3.weight = nn.Parameter(list(stdconv3.parameters())[0].data.clone())\r\n\r\n stdbn3 = list(stdModule.layer4.children())[2].bn3\r\n self.bn3.weight = nn.Parameter(list(stdbn3.parameters())[0].data.clone())\r\n self.bn3.bias = nn.Parameter(list(stdbn3.parameters())[1].data.clone())\r\n\r\n if (not grad) or (grad and layers<2) :\r\n for param in list(self.conv3.parameters()):\r\n param.requires_gard = False\r\n for param in list(self.bn3.parameters()):\r\n param.requires_gard = False\r\n\r\n\r\n def forward(self, que, img): # img: [bs,2048,7,7] que: (bs,14)\r\n\r\n # process que\r\n # (bs,14) => (bs,14,300) question为14个word index, list 1d length 14, 每次forward都只对一个batch #2d tensor\r\n emb = F.tanh(self.we(que))\r\n # (bs, 14,300)->(1, bs, 512) question vector 只取最后的H (num_layers * num_directions, batch_size, hidden_size) 所以要squeeze(dim=0)\r\n\r\n if not self.use_gru:\r\n qouput, hn = self.lstm(emb)\r\n h,_=hn#(1, bs, 1024)\r\n else:\r\n _,h = self.gru(emb)#(1, bs, 1024)\r\n\r\n h = self.lstmdp(h).squeeze(dim=0) # (bs, 512)\r\n\r\n # process image tensor\r\n origin = img.clone()\r\n\r\n # first conv\r\n img = self.conv1(img) # [bs,512,7,7]\r\n img = self.bn1(img) # [bs,512,7,7]\r\n\r\n # first CSF\r\n # (bs,512,7,7) (bs,h_size) => (bs,512,7,7)\r\n if self.layers>=3 :\r\n img = self.csf1(img, h)\r\n\r\n # second conv\r\n img = self.conv2(img) # [bs,512,7,7]\r\n img = self.bn2(img) # [bs,512,7,7]\r\n\r\n # second CSF\r\n # (bs,512,7,7) (bs,h_size) => (bs,512,7,7)\r\n if self.layers>=2 :\r\n img = self.csf2(img, h)\r\n\r\n # third conv\r\n img = self.conv3(img) # [bs,2048,7,7]\r\n img = self.bn3(img) # [bs,2048,7,7]\r\n\r\n # third CSF\r\n # (bs,2048,7,7) (bs,h_size) => (bs,2048,7,7)\r\n if self.layers>=1 :\r\n img = self.csf3(img, h)\r\n\r\n img = img + origin # (bs,2048,7,7)\r\n img = self.relu(img) # (bs,2048,7,7)\r\n\r\n img_feature = self.avgpool(img) # (bs,2048,1,1)\r\n img_feature = img_feature.view(img_feature.size(0), -1) # (bs,2048)\r\n img_feature = self.fc(img_feature) # (bs,1024)\r\n\r\n fuse = self.pred_mfh(img_feature, h) # (bs,1024) (bs,512) => (bs,2048)\r\n score = self.pred_net(fuse)#(bs,3092)\r\n return score\r\n\r\n","repo_name":"kyocen/Graduation-Design-VQA-based-on-deep-learning","sub_path":"CSFMODEL.py","file_name":"CSFMODEL.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"6166590366","text":"from neon import NervanaObject\nfrom neon.util.argparser import NeonArgparser\nfrom neon.initializers import Constant, GlorotUniform, Xavier\nfrom neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine\nfrom neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule\nfrom neon.transforms import Rectlin, Softmax, CrossEntropyMulti\nfrom neon.models import Model\nfrom neon.data import ArrayIterator\nfrom neon.callbacks.callbacks import Callbacks\n\nimport numpy as np\nparser = NeonArgparser(__doc__)\nargs = parser.parse_args()\n\nNervanaObject.be.bsz = 16\nNervanaObject.be.enable_winograd = 4\n\n# setup data provider\nX_train = np.random.uniform(-1, 1, (16, 3 * 224 * 224))\ny_train = np.random.randint(0, 999, (16, 1000))\ntrain = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))\n\n# layers = [Conv((3, 3, 64), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Pooling(2, strides=2),\n# Conv((3, 3, 128), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Pooling(2, strides=2),\n# Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Pooling(2, strides=2),\n# Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Pooling(2, strides=2),\n# Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),\n# Pooling(2, strides=2),\n# Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),\n# Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),\n# Affine(nout=1000, init=Gaussian(scale=0.01), activation=Softmax())]\n# model = Model(layers=layers)\n\n\"\"\"\nModified from https://github.com/NervanaSystems/ModelZoo/blob/master/ImageClassification/ILSVRC2012/VGG/vgg_neon.py\n\"\"\"\n\ninit1 = Xavier(local=True)\ninitfc = GlorotUniform()\n\nrelu = Rectlin()\nconv_params = {'init': init1,\n 'strides': 1,\n 'padding': 1,\n 'bias': Constant(0),\n 'activation': relu}\n\n# Set up the model layers\nlayers = []\nfor nofm in [64, 128, 256, 512, 512]:\n layers.append(Conv((3, 3, nofm), **conv_params))\n layers.append(Conv((3, 3, nofm), **conv_params))\n if nofm > 128:\n layers.append(Conv((3, 3, nofm), **conv_params))\n # if args.vgg_version == 'E':\n # layers.append(Conv((3, 3, nofm), **conv_params))\n layers.append(Pooling(2, strides=2))\n\nlayers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu))\nlayers.append(Dropout(keep=0.5))\nlayers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu))\nlayers.append(Dropout(keep=0.5))\nlayers.append(Affine(nout=1000, init=initfc, bias=Constant(0), activation=Softmax()))\n\nmodel = Model(layers=layers)\n\nweight_sched = Schedule([22, 44, 65], (1 / 250.)**(1 / 3.))\nopt_gdm = GradientDescentMomentum(0.01, 0.0, wdecay=0.0005, schedule=weight_sched)\nopt = MultiOptimizer({'default': opt_gdm})\ncost = GeneralizedCost(costfunc=CrossEntropyMulti())\ncallbacks = Callbacks(model)\n\nimport time\n\nnum_epochs=100\nt0 = time.time()\n# model.benchmark(train, cost=cost, optimizer=opt, niterations=100, nskip=1)\nmodel.fit( train, cost=cost, optimizer=opt, num_epochs=100, callbacks=callbacks )\nt1 = time.time()\n\nprint(\"Batch size: %d\" %(NervanaObject.be.bsz))\nprint(\"Iterations: %d\" %(num_epochs))\nprint(\"Time per iteration: %7.3f ms\" %((t1 - t0) *1000 / num_epochs))\n\n","repo_name":"aizvorski/vgg-benchmarks","sub_path":"benchmark_neon.py","file_name":"benchmark_neon.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"77"} +{"seq_id":"71154217848","text":"import random\nfrom enum import StrEnum, auto\n\nimport pygame\n\nfrom src.entity import Entity\nfrom src.utils import get_font\n\n\nclass CitizenType(StrEnum):\n GREEN = auto()\n BLUE = auto()\n RED = auto()\n YELLOW = auto()\n\n\nclass Citizen(Entity):\n IMAGES = {\n T: pygame.image.load(f\"assets/{T.value}.png\").convert_alpha()\n for T in CitizenType\n }\n ORIGINAL_SPEED = 50.0\n FONT = get_font(\"assets/fonts/regular.ttf\", 32)\n SPEED = ORIGINAL_SPEED\n\n def __init__(self, type_: CitizenType | None = None) -> None:\n if type_ is None:\n self.type_ = random.choice(tuple(CitizenType))\n else:\n self.type_ = type_\n super().__init__(self.IMAGES[self.type_], self.SPEED)\n self.vertical_diff = 0.0\n self.surf_speed = 150.0\n self.surf_alpha = 255.0\n self.alpha_reduction_speed = 400.0\n\n def on_death(self):\n super().on_death()\n\n if self.alive:\n return False\n\n if self.type_ == self.shared.sidebar.target:\n p = self.shared.target.score_gain * self.shared.target.score_multiplier\n color = \"green\"\n symbol = \"+\"\n else:\n p = self.shared.target.score_gain\n color = \"red\"\n symbol = \"-\"\n self.score_surf = self.FONT.render(f\"{symbol}{p:.0f}\", True, color)\n self.score_rect = self.score_surf.get_rect(center=self.rect.center)\n\n def update(self):\n super().update()\n if self.alive or self.surf_alpha <= 0.0:\n return\n\n self.vertical_diff -= self.surf_speed * self.shared.dt\n self.surf_alpha -= self.alpha_reduction_speed * self.shared.dt\n self.score_surf.set_alpha(self.surf_alpha)\n\n def draw(self):\n super().draw()\n if self.alive:\n return\n\n self.shared.game_surface.blit(\n self.score_surf, self.score_rect.move(0, self.vertical_diff)\n )\n","repo_name":"blankRiot96/DSTM","sub_path":"src/citizen.py","file_name":"citizen.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31975146964","text":"from functools import partial\nimport random\nimport sys\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom . import exceptions\nfrom . import log\nfrom .lockingqueue import LockingQueue\nfrom .lock import Lock\nfrom .getset import GetSet\n\n\ndef _run_async(func, *args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n\n\ndef _map_async(func, *iterables):\n tpe = ThreadPoolExecutor(sys.maxsize)\n futures = [tpe.submit(func, *args) for args in zip(*iterables)]\n return (f.result() for f in as_completed(futures))\n\n\nclass MajorityRedis(object):\n def __init__(self, clients, n_servers, lock_timeout=30, polling_interval=25,\n run_async=_run_async, map_async=_map_async,\n getset_history_prefix='', threadsafe=False):\n \"\"\"Initializes MajorityRedis connection to multiple independent\n non-replicated Redis Instances. This MajorityRedis client contains\n algorithms and operations based on majority vote of the redis servers.\n\n Please initialize it by passing the following parameters:\n\n `clients` - a list of redis.StrictRedis clients,\n each connected to a different Redis server\n `n_servers` - the number of Redis servers in your cluster\n (whether or not you have a client connected to it)\n This should be a universally constant number.\n n_servers // 2 + 1 == quorum, or the smallest possible majority.\n `lock_timeout` - for locks.\n number of seconds after which the lock is invalid.\n Increase if you have large socket_timeout in redis clients,\n long network delays or long periods where\n your python code is paused while running long-running C code.\n Should be longer than both polling_interval and socket_timeout.\n `polling_interval` - if using anything that polls in the background\n (ie Lock and LockingQueue), you should set the\n polling interval to some value larger than the largest\n socket_timeout on all your clients but smaller than lock_timeout\n `run_async` - a function that receives a function and its arguments\n and runs it in the background. run_async(func, *args, **kwargs)\n By default, uses Python's threading module.\n `map_async` - a function of form map(func, iterable) that maps func on\n iterable sequence. By default, uses Python's threading module.\n `getset_history_prefix` - a prefix for a key that majorityredis uses to\n store the history of reads and writes to redis keys.\n `threadsafe` (bool) This applies to instances of Lock and LockingQueue.\n By default, instances of a class share ownership of the\n values they can modify. For instance, if lock1 locks a key,\n lock2 can unlock that same key. If `threadsafe` is true, however,\n ownership is isolated to the instance, and lock2 cannot unlock\n lock1's locked keys.\n \"\"\"\n if len(clients) < n_servers // 2 + 1:\n raise exceptions.MajorityRedisException(\n \"Must connect to at least half of the redis servers to\"\n \" obtain majority\")\n _socket_timeout = max(\n c.connection_pool.connection_kwargs['socket_timeout']\n for c in clients)\n if not _socket_timeout < polling_interval:\n log.warn(\n \"Polling_interval was not greater than socket_timeout.\"\n \" If there is network contention, your locks will be lost.\",\n extra=dict(polling_interval=polling_interval,\n socket_timeout=_socket_timeout))\n if not polling_interval <= lock_timeout:\n raise exceptions.MajorityRedisException(\n \"It was not the case that\"\n \" polling_interval < lock_timeout.\"\n \" The socket_timeout is a config setting on your redis clients\")\n self._run_async = run_async\n self._client_id = random.randint(1, sys.maxsize)\n self._clients = clients\n self._clock_drift = 0 # TODO\n self._map_async = map_async\n self._n_servers = n_servers\n self._polling_interval = polling_interval\n self._lock_timeout = lock_timeout\n self._getset_history_prefix = getset_history_prefix\n self._threadsafe = threadsafe\n\n getset = GetSet(self)\n self.get = getset.get\n self.set = getset.set\n self.ttl = getset.ttl\n self.incrby = getset.incrby\n self.delete = getset.delete\n self.exists = getset.exists\n self.Lock = partial(Lock, self)\n self.LockingQueue = partial(LockingQueue, self)\n","repo_name":"adgaudio/MajorityRedis","sub_path":"majorityredis/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31801572089","text":"import uno\nimport os.path\nfrom com.sun.star.beans import PropertyValue\n\n# Open LibreOffice Calc\nlocalContext = uno.getComponentContext()\nresolver = localContext.ServiceManager.createInstanceWithContext(\"com.sun.star.bridge.UnoUrlResolver\", localContext)\ncontext = resolver.resolve(\"uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext\")\ndesktop = context.ServiceManager.createInstanceWithContext(\"com.sun.star.frame.Desktop\", context)\n\n# Replace \"/path/to/your/file.csv\" with the actual path to your CSV file\nfile_path = \"./output.csv\"\n\n# Check if the file exists\nif not os.path.isfile(file_path):\n print(\"Error: The specified file does not exist.\")\n exit()\n\n# Open the file for reading\nfile = open(file_path, \"r\")\n \n# Read the contents of the file into a 2D list\ncsv_data = []\nfor line in file:\n csv_data.append(line.strip().split(\",\"))\n\n# Close the file\nfile.close()\n\n# Get the current document\nmodel = desktop.getCurrentComponent()\n# Create a new sheet\nsheets = model.getSheets()\nsheet = sheets.getByName(\"Sheet1\")\nmodel.insertSheet(sheets.getCount())\n\n# Insert the data into the sheet\nfor i in range(len(csv_data)):\n for j in range(len(csv_data[i])):\n cell = sheet.getCellByPosition(j, i)\n cell.setValue(float(csv_data[i][j]))\n\n# Save the document\nprops = PropertyValue()\nprops.Name = \"FilterName\"\nprops.Value = \"calc8\"\n\nmodel.storeToURL(\"file:///./output.ods\", tuple([props]))\n\n# Close the document\nmodel.close(True)\n","repo_name":"metetrkn/coin_reporter","sub_path":"project/libre_parser.py","file_name":"libre_parser.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33096799016","text":"import numpy as np\nfrom pysimenv.core.simulator import Simulator\nfrom pysimenv.multicopter.base import MulticopterDyn\nfrom pysimenv.multicopter.model import QuadBase\nfrom pysimenv.multicopter.control import BSControl\n\n\ndef main():\n model_params = {'m': 1.023, 'J': np.diag([9.5, 9.5, 1.86])*1e-3}\n alpha = np.array([16., 14., 16., 14., 16., 14., 2.5, 0.5, 2.5, 0.5, 2.5, 0.5])\n\n dyn = MulticopterDyn(p_0=np.zeros(3), v_0=np.zeros(3), R_0=np.identity(3), omega_0=np.zeros(3), **model_params,\n name='dynamic model')\n control = BSControl(alpha=alpha, **model_params, name='controller')\n model = QuadBase(dyn=dyn, control=control, name='quad-rotor')\n\n simulator = Simulator(model)\n simulator.propagate(dt=0.01, time=10., save_history=True, sigma_d=np.array([2., 2., -2., np.deg2rad(15.)]))\n model.dyn.plot_path()\n model.dyn.plot_euler_angles()\n model.dyn.default_plot(show=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"minii93/pysimenv","sub_path":"pysimenv/multicopter/test_bs_control.py","file_name":"test_bs_control.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29265483172","text":"from roomStructure import Room, locations, objId\nimport shelve\nfrom items import itemlist, weapon, empty\nimport render\nfrom numpy import array\nfrom rooms import roomDat\nimport rooms\n\nOPTIONS = [\"exit\", \"h\", \"help\", \"?\", \"w\", \"a\", \"s\", \"d\", \" \", \"q\", \"e\", \"1\", \"2\", \"3\", \"4\", \"5\"]\nGAMEHELP = '''\n\"exit\" ------------------ Exit game\n\"h\"/\"help\"/\"?\" ---------- Open help menu\n\"w\", \"a\", \"s\", \"d\" ------ Move/Turn direction\n\"Space\" ----------------- Attack/Use Item\n\"e\" --------------------- Interact\n\"q\" --------------------- Drop Item\n\"1-5\" ------------------- Select Inventory\n'''\ndirections = {'w':'^', '^':array([0, -1]),\n'a':'<', '<':array([-1, 0]),\n's':'v', 'v':array([0, 1]),\n'd':'>', '>':array([1, 0]),}\n\nclass player:\n\tdef __init__(self):\n\t\tself.reset()\n\t\n\tdef reset(self):\n\t\tself.slot = 0\n\t\tself.heal = 1\n\t\tself.health = 100\n\t\tself.maxHealth = 100\n\t\tself.mapPos = array([0, 0, 0])\n\t\tself.roomPos = array([2, 1])\n\t\tself.dir = '^'\n\t\tself.save = ''\n\t\tself.play = True\n\t\tself.facing = ' '\n\t\tself.inventory = [itemlist[1], itemlist[2], itemlist[0], itemlist[0], itemlist[0]]\n\t\tself.score = 0\n\n\tdef getFacing(self):\n\t\troomDat = locations[str(playerDat.mapPos)].room_objects\n\t\tnewPos = self.roomPos + directions[self.dir]\n\t\tprint(roomDat[newPos[1]][newPos[0]])\n\t\tself.facing = roomDat[newPos[1]][newPos[0]]\n\nplayerDat = player()\n\n\ndef saveGame():\n\twith open(\"saves.txt\", \"r\") as file:\n\t\tsaves = []\n\t\tfor line in file:\n\t\t\tprint(line)\n\t\t\tsaves.append(line)\n\t\tprint(saves)\n\twith open(\"saves.txt\", \"a\") as file:\n\t\tif playerDat.save+'\\n' not in saves:\n\t\t\tfile.write(f\"{playerDat.save}\\n\")\n\ts = shelve.open(f'{playerDat.save}.dat')\n\ts['playerDat'] = [playerDat.slot, playerDat.health, playerDat.maxHealth, playerDat.mapPos, playerDat.roomPos, playerDat.dir, playerDat.save, playerDat.inventory, playerDat.score, None]\n\ts['roomDat'] = rooms.saveRooms()\n\ts.sync()\n\ts.close()\n\ndef move(dir):\n\tnewPos = playerDat.roomPos + directions[directions[dir]]\n\tif playerDat.dir == directions[dir]:\n\t\tobjId[playerDat.facing].runInto(newPos)\n\t\tnewPos = playerDat.roomPos + directions[directions[dir]]\n\t\tgameStep()\n\t\trender.render()\n\telse:\n\t\tplayerDat.dir = directions[dir]\n\t\tplayerDat.getFacing()\n\t\trender.render()\n\ndef attack():\n\tif playerDat.inventory[playerDat.slot].type == 'weapon':\n\t\tinput(playerDat.inventory[playerDat.slot].hitText[playerDat.facing])\n\t\tif playerDat.facing == 'e':\n\t\t\tattackPos = playerDat.roomPos + directions[playerDat.dir]\n\t\t\tlocations[str(playerDat.mapPos)].room_objects[attackPos[1]][attackPos[0]] = ' '\n\t\t\tplayerDat.score += 1\n\t\t\t\n\telse:\n\t\tinput(input(empty.hitText[playerDat.facing]))\n\trender.render()\n\tgameStep()\n\ndef quit():\n\tsave = input(\"Would you like to save? y/n\\n\").lower()\n\tif save in [\"y\", \"yes\"]:\n\t\tsaveGame()\n\t\tplayerDat.play = False\n\telif save in [\"n\", \"no\"]:\n\t\tconfirm = input(\"Are you sure you want to quit without saving?\\n\")\n\t\tif confirm in [\"y\", \"yes\"]:\n\t\t\tprint(\"Goodbye\")\n\t\t\tplayerDat.play = False\n\t\telse:\n\t\t\tprint(\"Game not Quit\")\n\telse:\n\t\tprint(\"Invalid command\\nGame not Quit\\n\")\n\ndef gameStep():\n\tfrom enemy import getMonsters, spawnMonster\n\tplayerDat.health += playerDat.heal\n\tif playerDat.health > playerDat.maxHealth:\n\t\tplayerDat.health = playerDat.maxHealth\n\tmonsters = getMonsters()\n\tfor monster in monsters:\n\t\tmonster.enemyMove()\n\tspawnMonster()\n\tplayerDat.getFacing()\n\n\n\ndef main(name):\n\tplayerDat.save = name\n\tchoice = ''\n\trender.render()\n\tplayerDat.play = True\n\twhile playerDat.play:\n\t\twhile choice not in OPTIONS:\n\t\t\tchoice = input(\"What would you like to do?\\n\").lower()\n\t\t\tif choice not in OPTIONS:\n\t\t\t\tprint(\"\\nInvalid command, please try again.\\n? or help for list of commands\\n\")\n\t\n\t\t#Gameplay loop\n\t\tif choice in OPTIONS[1:4]: #Help\n\t\t\tprint(GAMEHELP)\n\t\t\tinput()\n\t\telif choice in OPTIONS[4:8]: #Move\n\t\t\tmove(choice)\n\t\telif choice in OPTIONS[8:9]: #attack\n\t\t\tattack()\n\t\telif choice in OPTIONS[11:16]: #Inventory\n\t\t\tplayerDat.slot = int(choice)-1\n\t\t\trender.render()\n\t\tif choice == 'exit': #exit Game\n\t\t\tquit()\n\t\telse:\n\t\t\tchoice = ''\n\t\tif playerDat.health <= 0:\n\t\t\tinput(\"Game over!\")\n\t\t\tplayerDat.play = False\n","repo_name":"MrLobot16/Text-Game","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6145325915","text":"import random\nfrom flask import Flask, request, jsonify\nimport threading\nimport os\n\nimport world\n\napp = Flask(__name__, static_url_path='', static_folder='static')\n\n@app.get(\"/maps\")\ndef get_map():\n\troom_id = int( request.args.get('id') )\n\treturn jsonify(world.get_room_map(room_id))\n\n@app.get(\"/tick_data\")\ndef get_tick_data():\n\treturn jsonify(world.get_tick_data())\n\n@app.get(\"/ship_code\")\ndef get_ship_code():\n\tship_id = ( request.args.get(\"id\") )\n\twith open(\"ship_script_\" + ship_id + \".py\", \"r\") as f:\n\t\tdata = f.read()\n\n\treturn (data)\n\n@app.post(\"/ship_code\")\ndef post_ship_code():\n\tship_id = ( request.args.get(\"id\") )\n\tcode = request.data.decode(\"utf-8\")\n\twith open(\"ship_script_\" + ship_id + \".py\", \"w\") as f:\n\t\tdata = f.write(code)\n\n\tworld.reload_ship(ship_id)\n\n\treturn \"ok\"\n\n\n\n\nworld_thread = threading.Thread(target=world.cycle)\nworld_thread.start()\n\n# Bind to PORT if defined, otherwise default to 5000.\nport = int( os.environ.get('PORT', 5000))\napp.run(host=\"0.0.0.0\", port=port, debug=True)","repo_name":"night-glider/diploma-cheaps","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13821918677","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\nfrom helpers import SqlQueries\n\n\ndefault_args = {\n 'owner': 'udacity',\n 'start_date': datetime(2019, 1, 12),\n 'depends_on_past': False,\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n 'catchup': False,\n 'email_on_retry': False,\n\n # to prevent execution of multiple DAGs\n 'max_active_runs': 1,\n\n # adding some common arguments for the tasks within the DAG\n 'redshift_conn_id': 'redshift',\n 'aws_credentials_id': 'aws_credentials',\n 'region': 'us-west-2'\n}\n\ndata_quality_checks=[\n {'sql': \"SELECT COUNT(*) FROM artists WHERE artistid is null\", 'expected_result': 0},\n {'sql': \"SELECT COUNT(*) FROM users WHERE userid is null\", 'expected_result': 0},\n {'sql': \"SELECT COUNT(*) FROM songs WHERE songid is null\", 'expected_result': 0}\n ]\n\nwith DAG(\"s3_to_redshift\",\n default_args=default_args,\n description='Load and transform S3 data in Redshift with Airflow',\n schedule_interval='0 * * * *') as dag:\n\n start_operator = DummyOperator(task_id='Begin_execution')\n\n stage_events_to_redshift = StageToRedshiftOperator(\n task_id='Stage_events',\n table_name=\"staging_events\",\n s3_path=\"s3://udacity-dend/log-data\",\n json_format=\"s3://udacity-dend/log_json_path.json\",\n provide_context=True\n )\n\n stage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n table_name=\"staging_songs\",\n s3_path=\"s3://udacity-dend/song_data\",\n provide_context=True\n )\n\n load_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n sql_query = SqlQueries.songplay_table_insert,\n table_name = 'songplays',\n mode=\"append\" # better to append to fact tables\n )\n\n load_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table',\n sql_query = SqlQueries.user_table_insert,\n table_name = 'users',\n mode=\"truncate-insert\"\n )\n\n load_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n sql_query = SqlQueries.song_table_insert,\n table_name = 'songs',\n mode=\"truncate-insert\"\n )\n\n load_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n sql_query = SqlQueries.artist_table_insert,\n table_name = 'artists',\n mode=\"truncate-insert\"\n )\n\n load_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n sql_query = SqlQueries.time_table_insert,\n table_name = 'time',\n mode=\"truncate-insert\"\n )\n\n run_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n data_checks=data_quality_checks\n )\n\n end_operator = DummyOperator(task_id='Stop_execution')\n\n\n# using list to set task dependecies\nstart_operator >> [stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table\nload_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks >> end_operator\n","repo_name":"osin-vladimir/udacity-data-engineer","sub_path":"5-data-pipelines-airflow/dags/udac_example_dag.py","file_name":"udac_example_dag.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28715049884","text":"# python3.8 -m pyserini.index -collection JsonCollection -generator DefaultLuceneDocumentGenerator -threads 1 -input json -index index -storePositions -storeDocvectors -storeRaw\nimport json\nfrom pyserini.index import IndexReader\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport sys\nfrom subprocess import call\nimport tqdm\nfrom sentence_transformers import SentenceTransformer, util\nimport torch\nimport warnings\nimport time\nimport os\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef csv2json(tag_file, json_folder, json_file):\n # csvfile = pd.read_csv(\"tag_10_bert_1000.csv\")\n csvfile = pd.read_csv(tag_file)\n # jsonfile = open('json/tag_10_bert_1000.json', 'w')\n jsonfile = open(os.path.join(json_folder, json_file), 'w')\n for i in tqdm.tqdm(range(csvfile.shape[0])):\n dic = {\"id\": csvfile.loc[i,'Title'], \"contents\": csvfile.loc[i,'Title_and_tag']}\n json.dump(dic, jsonfile)\n jsonfile.write('\\n')\n \n\nclass Ranker(object):\n def __init__(self, index_reader):\n self.index_reader = index_reader\n\n def score(query): \n rank_score = 0\n return rank_score\n\n\nclass BM25Ranker(Ranker):\n def __init__(self, index_reader, doc_id_list, query_list):\n super(BM25Ranker, self).__init__(index_reader)\n \n self.avg_d_len = index_reader.stats()['total_terms'] / index_reader.stats()['documents'] # avg_dl\n self.N = index_reader.stats()['documents']\n self.doc_vector_dic = {}\n for doc_id in tqdm.tqdm(doc_id_list):\n self.doc_vector_dic[doc_id] = index_reader.get_document_vector(doc_id)\n \n self.analyze_query_list = []\n for query in tqdm.tqdm(query_list): \n self.analyze_query_list.append([index_reader.analyze(term)[0] if len(index_reader.analyze(term)) > 0 else \"\" for term in query.split(\" \")])\n\n def score(self, query, doc_id, k1=1.2, b=0.75, k3=1.2):\n rank_score = 0\n \n index_reader = self.index_reader\n avg_d_len = self.avg_d_len\n N = self.N\n analyzed_query = self.analyze_query_list[query]\n doc_vector = self.doc_vector_dic[str(doc_id)]\n d_len = sum(doc_vector.values()) # |d|\n \n k1 = 0.5\n b = 1.5\n k3 = 1.2\n \n for analyzed_term in analyzed_query:\n if analyzed_term in doc_vector.keys():\n tf_q = analyzed_query.count(analyzed_term) # c(w,q) \n df, _ = index_reader.get_term_counts(analyzed_term, analyzer=None) # df(w) \n tf = doc_vector[analyzed_term] # c(w,d)\n term1 = np.log((N - df + 0.5) / (df + 0.5))\n term2 = (k1 + 1) * tf / (k1 * ((1 - b) + b * d_len / avg_d_len) + tf)\n term3 = (k3 + 1) * tf_q / (k3 + tf_q)\n rank_score += term1 * term2 * term3\n\n return rank_score\n\ndef bm25(input_corpus, input_query, output_file, res_doc_num = 10):\n json_folder = \"json\"\n index_folder = \"index\"\n csv2json(input_corpus, json_folder, input_corpus.split('.')[0]+'.json')\n call([\"python3.8\", \"-m\", \"pyserini.index\", \"-collection\", \"JsonCollection\", \"-generator\", \"DefaultLuceneDocumentGenerator\", \"-threads\", \"1\", \"-input\", json_folder, \"-index\", index_folder, \"-storePositions\", \"-storeDocvectors\", \"-storeRaw\"])\n \n index_reader = IndexReader(index_folder)\n f_doc = pd.read_csv(input_corpus)\n f_query = pd.read_csv(input_query)\n doc_id_list = list(f_doc['Title'])\n query_list = list(f_query['Query Description'])\n ranker = BM25Ranker(index_reader, doc_id_list, query_list)\n f_query_id = []\n f_query_content = []\n f_tag = []\n f_title = []\n f_link = []\n f_category = []\n f_author = []\n f_image = []\n \n tag_file = pd.read_csv(input_corpus)\n f_tag_dic = {}\n for i in range(tag_file.shape[0]):\n tmptmp = []\n tmptmp.append(tag_file.loc[i,'Tag'])\n tmptmp.append(tag_file.loc[i,'Link'])\n tmptmp.append(tag_file.loc[i,'Category'])\n tmptmp.append(tag_file.loc[i,'Author'])\n tmptmp.append(tag_file.loc[i,'Image'])\n f_tag_dic[tag_file.loc[i,'Title']] = tmptmp\n \n for q in range(len(query_list)):\n score = {}\n for doc_id in tqdm.tqdm(doc_id_list):\n s = ranker.score(q, doc_id)\n score[doc_id] = s\n score_s = sorted(score.items(), key=lambda x: x[1], reverse=True)\n j = 0\n j_uniq = 0\n while j <= res_doc_num:\n if doc_id not in f_title:\n doc_id = score_s[j][0]\n f_query_id.append(str(q))\n f_query_content.append(query_list[q])\n f_title.append(doc_id)\n f_tag.append(f_tag_dic[doc_id][0])\n f_link.append(f_tag_dic[doc_id][1])\n f_category.append(f_tag_dic[doc_id][2])\n f_author.append(f_tag_dic[doc_id][3])\n f_image.append(f_tag_dic[doc_id][4])\n j_uniq += 1\n j += 1\n \n f_df = pd.DataFrame({'QueryId': f_query_id, 'QueryCtonten': f_query_content, 'Title': f_title, 'Tag': f_tag,\n 'Link': f_link, 'Category': f_category, 'Author': f_author, 'Image': f_image})\n f_df.to_csv(output_file, index=False)\n \n return \n\n\ndef bert(input_corpus, input_query, output_file, min_doc_num = 15, max_doc_num = 30, threshold = 0.25):\n embedder = SentenceTransformer('all-MiniLM-L6-v2')\n # embedder = SentenceTransformer('bert-base-nli-mean-tokens') #BERT BASE\n # embedder = SentenceTransformer('bert-large-nli-stsb-mean-tokens') # LARGE BERT\n f_corpus = pd.read_csv(input_corpus)\n corpus = list(f_corpus['Title_and_tag'])\n corpus_dic = {}\n for i in tqdm.tqdm(range(len(corpus))):\n corpus_dic[f_corpus.loc[i,'Title_and_tag']] = f_corpus.loc[i,'Title']\n corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)\n\n f_query = pd.read_csv(input_query)\n queries = list(f_query['Query Description'])\n\n f_query_id = []\n f_query_content = []\n f_tag = []\n f_title = []\n f_link = []\n f_category = []\n f_author = []\n f_image = []\n \n top_k = min(150, len(corpus))\n for q in tqdm.tqdm(range(len(queries))):\n query_embedding = embedder.encode(queries[q], convert_to_tensor=True)\n # We use cosine-similarity and torch.topk to find the highest 5 scores\n cos_scores = util.pytorch_cos_sim(query_embedding, corpus_embeddings)[0]\n top_results = torch.topk(cos_scores, k=top_k)\n\n min_doc_num_idx = 0\n max_doc_num_idx = 0\n uniq_j = 0\n f_title_tmp = []\n for score, idx in zip(top_results[0], top_results[1]):\n if (score >= threshold or min_doc_num_idx <= min_doc_num) and max_doc_num_idx < max_doc_num:\n if corpus_dic[corpus[idx]] not in f_title_tmp:\n # print(corpus[idx], \"(Score: {:.4f})\".format(score))\n f_query_id.append(str(q))\n f_query_content.append(queries[q])\n f_title.append(corpus_dic[corpus[idx]])\n idx = idx.item()\n f_tag.append(f_corpus.loc[idx, 'Tag'])\n f_link.append(f_corpus.loc[idx, 'Link'])\n f_category.append(f_corpus.loc[idx, 'Category'])\n f_author.append(f_corpus.loc[idx, 'Author'])\n f_image.append(f_corpus.loc[idx, 'Image'])\n uniq_j+=1\n f_title_tmp.append(corpus_dic[corpus[idx]])\n min_doc_num_idx += 1\n max_doc_num_idx += 1\n else:\n break\n print(\"===\",uniq_j)\n\n f_df = pd.DataFrame({'QueryId': f_query_id, 'QueryCtonten': f_query_content, 'Title': f_title, 'Tag': f_tag,\n 'Link': f_link, 'Category': f_category, 'Author': f_author, 'Image': f_image})\n f_df.to_csv(output_file, index=False)\n return \n \n \n \nif __name__ == '__main__':\n res_doc_num = 10\n ticks1 = time.time()\n # bm25('tag_10_bert_1000.csv', 'query_20.csv', 'a.csv', res_doc_num)\n # bert('tag_10_textrank_1000.csv', 'query_20.csv', 'out_10_textrank_bert_1000.csv', res_doc_num)\n bert('tag_10_textrank_2000.csv', 'query_20.csv', 'out_10_textrank_bert_2000.csv', 15, 30, 0.25)\n ticks2 = time.time()\n print(ticks2-ticks1)\n \n \n \n","repo_name":"yexinyinancy/SI650_project","sub_path":"IR.py","file_name":"IR.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16136257493","text":"#!/usr/local/bin/python\n#Created on 3/26/13\n\n__author__ = 'Juan A. Ugalde'\n\n#import sys\nfrom collections import defaultdict\nfrom SummarizeOrthoMCLResults import read_genome_list\n\n\ndef parse_annotation_folder(genome_jgi_list, annotation_folder):\n \"\"\"\n Takes the folder with annotation files, and parse each annotation\n Returns a dictionary with all the annotation\n \"\"\"\n genomes_cog_number = {}\n genomes_cog_category = {}\n genomes_product_name = {}\n genomes_pfam_number = {}\n\n description_cogs = {}\n description_pfams = {}\n\n for genome in genome_jgi_list:\n #This required the file to have the extension .info.xls\n #This can be changed later\n genome_file = annotation_folder + \"/\" + genome + \".info.xls\"\n cog_number, cog_category, product_name, pfam_number, desc_cog, desc_pfam = parse_jgi_annotation(genome_file)\n\n genomes_cog_number.update(cog_number)\n genomes_cog_category.update(cog_category)\n genomes_product_name.update(product_name)\n genomes_pfam_number.update(pfam_number)\n description_cogs.update(desc_cog)\n description_pfams.update(desc_pfam)\n\n return genomes_cog_number, genomes_cog_category, genomes_product_name, genomes_pfam_number, \\\n description_cogs, description_pfams\n\n\ndef parse_jgi_annotation(jgi_file):\n \"\"\"\n Takes a jgi annotation file, and returns dictionaries with the annotations.\n \"\"\"\n\n import re\n\n cog_number = {}\n cog_category = {}\n product_name = {}\n pfam_number = {}\n\n description_cogs = {}\n description_pfams = {}\n\n input_file = open(jgi_file, 'r')\n\n for line in input_file:\n line = line.rstrip('\\n')\n if not line.startswith(\"gene_oid\"):\n\n gene_oid, locus_tag, source, cluster_information, gene_information, evalue = line.split(\"\\t\")\n\n search_cog_number = re.match('(COG\\d+)', source)\n\n if source.startswith(\"COG_category\"):\n cog_category[gene_oid] = cluster_information\n\n if search_cog_number:\n cog_number[gene_oid] = source\n description_cogs[source] = cluster_information\n\n if source.startswith(\"pfam\"):\n pfam_number[gene_oid] = source\n\n description_pfams[source] = cluster_information\n\n if source.startswith(\"Product_name\"):\n product_name[gene_oid] = gene_information\n\n return cog_number, cog_category, product_name, pfam_number, description_cogs, description_pfams\n\n\ndef get_cluster_information(input_cluster_file):\n cluster_file = open(input_cluster_file, 'r')\n\n clusters = {}\n\n for line in cluster_file:\n line = line.rstrip()\n\n cluster_id, gene_list = line.split(\"\\t\")\n clusters[cluster_id] = gene_list\n\n return clusters\n\n\ndef annotate_cluster(annotation, clusters):\n \"\"\"\n This function takes an annotation an the list of clusters and annotates each cluster based on\n the majority rule. The return is a dictionary with the annotation of each cluster, and a dictionary\n with the cluster with conflicts\n \"\"\"\n annotated_clusters = {}\n total_conflicts = defaultdict(lambda: defaultdict(int))\n unresolved_conflicts = defaultdict(lambda: defaultdict(int))\n\n for cluster in clusters:\n protein_id_list = [id_tag.split(\"|\")[1] for id_tag in clusters[cluster].split(\",\")]\n\n summary_annotation = defaultdict(int)\n\n for protein_id in protein_id_list:\n if not protein_id in annotation:\n continue\n else:\n protein_info = annotation[protein_id]\n summary_annotation[protein_info] += 1\n\n if len(summary_annotation) == 0:\n continue\n\n elif len(summary_annotation) == 1:\n annotated_clusters[cluster] = summary_annotation.keys()[0]\n\n else:\n total_conflicts[cluster] = summary_annotation\n #Get the total number of values\n total_annotations = sum(summary_annotation.itervalues())\n top_hit = None\n\n for hit in summary_annotation:\n if summary_annotation[hit] / float(total_annotations) > float(0.5):\n top_hit = hit\n\n #Check where no decision was made\n if top_hit is None:\n unresolved_conflicts[cluster] = summary_annotation\n else:\n annotated_clusters[cluster] = top_hit\n\n return annotated_clusters, total_conflicts, unresolved_conflicts\n\nif __name__ == '__main__':\n import os\n import argparse\n\n program_description = \"This script annotates a cluster generated by the SummarizeOrthoMCLResults script\"\n\n parser = argparse.ArgumentParser(description=program_description)\n\n #Arguments\n parser.add_argument(\"-l\", \"--genome_list_index\", type=str,\n help=\"File with the genome list. Format GenomeID, FullName, ShortName\", required=True)\n parser.add_argument(\"-a\", \"--annotation_folder\", type=str,\n help=\"Folder with the annotation files from JGI\", required=True)\n parser.add_argument(\"-c\", \"--cluster_file\", type=str,\n help=\"Cluster file\", required=True)\n parser.add_argument(\"-o\", \"--output_directory\", type=str,\n help=\"Output folder\", required=True)\n\n args = parser.parse_args()\n\n #Create the output directory\n if not os.path.exists(args.output_directory):\n os.makedirs(args.output_directory)\n\n #####Read the genome list\n genome_id_dictionary, genome_count = read_genome_list(args.genome_list_index)\n\n ##Read the annotation\n cog_number, cog_category, product_name, pfam_number, description_cogs, description_pfams \\\n = parse_annotation_folder(genome_id_dictionary.keys(), args.annotation_folder)\n\n ##Get the cluster information\n cluster_information = get_cluster_information(args.cluster_file)\n\n ###Consolidate the annotation for each cluster\n #Cog Number\n cog_number_annotated_clusters, cog_number_total_conflicts, cog_number_unresolved_conflicts\\\n = annotate_cluster(cog_number, cluster_information)\n\n #Cog category\n cog_category_annotated_clusters, cog_category_total_conflicts, cog_category_unresolved_conflicts = \\\n annotate_cluster(cog_category, cluster_information)\n\n #Product name\n product_annotated_clusters, product_total_conflicts, product_unresolved_conflicts = \\\n annotate_cluster(product_name, cluster_information)\n\n #Pfam number\n pfam_annotated_clusters, pfam_total_conflicts, pfam_unresolved_conflicts = \\\n annotate_cluster(pfam_number, cluster_information)\n\n ###Print the outputs\n #Open files\n output_cog_number = open(args.output_directory + \"/cog_number_clusters.txt\", 'w')\n output_cog_number_conflicts = open(args.output_directory + \"/cog_number_conflicts.txt\", 'w')\n output_cog_category = open(args.output_directory + \"/cog_category_clusters.txt\", 'w')\n output_cog_category_conflicts = open(args.output_directory + \"/cog_category_conflicts.txt\", 'w')\n output_pfam = open(args.output_directory + \"/pfam_clusters.txt\", 'w')\n output_pfam_conflicts = open(args.output_directory + \"/pfam_conflicts.txt\", 'w')\n output_product = open(args.output_directory + \"/product_name_clusters.txt\", 'w')\n output_product_conflicts = open(args.output_directory + \"/product_name_conflicts.txt\", 'w')\n\n ##Print log file\n logfile = open(args.output_directory + \"/logfile.txt\", 'w')\n\n ##Total number of clusters\n logfile.write(\"Total number of analyzed clusters: %d\" % len(cluster_information) + \"\\n\")\n\n ##Write output files\n #Cogs\n for cluster in cog_number_annotated_clusters:\n output_cog_number.write(cluster + \"\\t\" + cog_number_annotated_clusters[cluster] + \"\\t\" +\n description_cogs[cog_number_annotated_clusters[cluster]] + \"\\n\")\n\n for cluster in cog_number_unresolved_conflicts:\n print_list = []\n for cog in cog_number_total_conflicts[cluster]:\n cog_info = description_cogs[cog]\n print_list.append(cog)\n print_list.append(cog_info)\n print_list.append(str(cog_number_total_conflicts[cluster][cog]))\n\n output_cog_number_conflicts.write(cluster + \"\\t\" + \"\\t\".join(print_list) + \"\\n\")\n\n logfile.write(\"COG Number\" + \"\\n\")\n logfile.write(\"Total annotated cluster: %d\" % len(cog_number_annotated_clusters) + \"\\n\")\n logfile.write(\"Total number of unresolved conflicts: %d\" % len(cog_number_unresolved_conflicts) + \"\\n\" + \"\\n\")\n\n #Cog category\n for cluster in cog_category_annotated_clusters:\n output_cog_category.write(cluster + \"\\t\" + cog_category_annotated_clusters[cluster] + \"\\n\")\n\n for cluster in cog_category_unresolved_conflicts:\n print_list = []\n for cog_category in cog_category_unresolved_conflicts[cluster]:\n print_list.append(cog_category)\n print_list.append(str(cog_category_unresolved_conflicts[cluster][cog_category]))\n\n output_cog_category_conflicts.write(cluster + \"\\t\" + \"\\t\".join(print_list) + \"\\n\")\n\n logfile.write(\"COG Category\" + \"\\n\")\n logfile.write(\"Total annotated cluster: %d\" % len(cog_category_annotated_clusters) + \"\\n\")\n logfile.write(\"Total number of unresolved conflicts: %d\" % len(cog_category_unresolved_conflicts) + \"\\n\" + \"\\n\")\n\n #Pfam\n for cluster in pfam_annotated_clusters:\n output_pfam.write(cluster + \"\\t\" + pfam_annotated_clusters[cluster] + \"\\t\" +\n description_pfams[pfam_annotated_clusters[cluster]] + \"\\n\")\n\n for cluster in pfam_unresolved_conflicts:\n print_list = []\n for pfam in pfam_unresolved_conflicts[cluster]:\n pfam_info = description_pfams[pfam]\n print_list.append(pfam)\n print_list.append(pfam_info)\n print_list.append(str(pfam_unresolved_conflicts[cluster][pfam]))\n\n output_pfam_conflicts.write(cluster + \"\\t\" + \"\\t\".join(print_list) + \"\\n\")\n\n logfile.write(\"Pfam number\" + \"\\n\")\n logfile.write(\"Total annotated cluster: %d\" % len(pfam_annotated_clusters) + \"\\n\")\n logfile.write(\"Total number of unresolved conflicts: %d\" % len(pfam_unresolved_conflicts) + \"\\n\" + \"\\n\")\n\n #Product name\n for cluster in product_annotated_clusters:\n output_product.write(cluster + \"\\t\" + product_annotated_clusters[cluster] + \"\\n\")\n\n for cluster in product_unresolved_conflicts:\n print_list = []\n for product in product_unresolved_conflicts[cluster]:\n print_list.append(product)\n print_list.append(str(product_unresolved_conflicts[cluster][product]))\n\n output_product_conflicts.write(cluster + \"\\t\" + \"\\t\".join(print_list) + \"\\n\")\n\n logfile.write(\"Product name\" + \"\\n\")\n logfile.write(\"Total annotated cluster: %d\" % len(product_annotated_clusters) + \"\\n\")\n logfile.write(\"Total number of unresolved conflicts: %d\" % len(product_unresolved_conflicts) + \"\\n\" + \"\\n\")\n\n\n\n\n\n\n\n\n\n\n #Close files\n output_cog_number.close()\n output_cog_number_conflicts.close()\n output_cog_category.close()\n output_cog_category_conflicts.close()\n output_pfam.close()\n output_pfam_conflicts.close()\n output_product.close()\n output_product_conflicts.close()\n logfile.close()\n\n #print len(cog_number_annotated_clusters), len(cog_number_total_conflicts), len(cog_number_unresolved_conflicts)\n #print len(product_annotated_clusters), len(product_conflicts_total_conflicts), len(product_unresolved_conflicts)\n #print len(pfam_annotated_clusters), len(pfam_total_conflicts), len(pfam_unresolved_conflicts)\n #print len(cog_category_annotated_clusters), len(cog_category_total_conflicts), len(cog_category_unresolved_conflicts)\n #print cog_category_unresolved_conflicts\n\n","repo_name":"juanu/SequenceClusterScripts","sub_path":"AnnotateOrthoMCL_Clusters.py","file_name":"AnnotateOrthoMCL_Clusters.py","file_ext":"py","file_size_in_byte":11820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42822370212","text":"import sys\nimport yaml\nimport pprint\nimport math\nfrom zabbix_api import ZabbixAPI\n\n\nTRIGGERNAME_SEARCH_FORMAT = 'Interface {0}('\n\nLINKDOWN_TRIGGER_FILTER = [\n \": リンクダウン\"\n]\n\nPORTSTATE_TRIGGER_FILTER = [\n \": アウトバウンド\",\n \": インバウンド\"\n]\n\ndef filterLinkdownTrigger(description):\n for str in LINKDOWN_TRIGGER_FILTER:\n if str in description:\n return True\n else:\n continue\n\n return False\ndef filterPortStateTrigger(description):\n for str in PORTSTATE_TRIGGER_FILTER:\n if str in description:\n return True\n else:\n continue\n\n return False\n\ndef findTriggerFromTriggerList(triggerList, filter):\n items = {\n 'link_trigger': [],\n 'port_trigger': []\n } \n for item in triggerList:\n if filter in item['description'] and filterLinkdownTrigger(item['description']):\n items['link_trigger'].append({\n \"trigger\": item['description'],\n \"triggerid\": item['triggerid']\n })\n elif filter in item['description'] and filterPortStateTrigger(item['description']):\n items['port_trigger'].append({\n \"trigger\": item['description'],\n \"triggerid\": item['triggerid']\n })\n else:\n continue\n\n return items\n\ntemplatePath = sys.argv[1]\nwith open(templatePath, 'r') as f:\n ifTemplate = yaml.safe_load(f)\n\nconfigPath = './conf.yml'\nwith open(configPath, 'r') as f:\n conf = yaml.safe_load(f)\n\nzapi = ZabbixAPI(server=conf.get('zabbix_server'))\nzapi.login(conf.get('zabbix_user'), conf.get('zabbix_password'))\nhostdata = zapi.host.get({\n \"filter\": {\n \"host\": [\n ifTemplate.get('hostname')\n ]\n }})\nhostid = hostdata[0].get('hostid')\ndescriptionFilter = []\ndescriptionFilter.extend(LINKDOWN_TRIGGER_FILTER)\ndescriptionFilter.extend(PORTSTATE_TRIGGER_FILTER)\ntriggers = zapi.trigger.get({\n \"output\": \"extend\",\n \"hostids\": hostid,\n \"search\": {\n \"description\": descriptionFilter, \n },\n \"searchByAny\": True,\n \"sortfield\": \"description\"\n})\nimages = dict()\nimages_result = zapi.image.get({\n \"output\": \"extend\",\n \"search\": {\n \"name\": [\n 'port_normal',\n 'port_disable',\n 'port_error',\n 'link_normal',\n 'link_disable',\n 'link_error',\n ], \n },\n \"searchByAny\": True,\n})\nfor item in images_result:\n images[item['name']] = item['imageid']\n\ninterfaces={}\nfor cardIndex in range(ifTemplate.get('line_card')):\n interfaces[str(cardIndex+1)] = {\n \"port\": [],\n \"uplink\": [],\n }\n\n for portIndex in range(ifTemplate.get('interface_port')):\n search_name = TRIGGERNAME_SEARCH_FORMAT.format(\n ifTemplate['ifname_format'].format(\n cardnumber=cardIndex+1,\n ifnumber=portIndex+1,\n )\n )\n item = findTriggerFromTriggerList(triggers, search_name)\n interfaces[str(cardIndex+1)]['port'].append(item)\n\n for ulPortIndex in range(ifTemplate.get('uplink_port')):\n search_name = TRIGGERNAME_SEARCH_FORMAT.format(\n ifTemplate['ifname_format'].format(\n cardnumber=cardIndex+1,\n ifnumber=ifTemplate.get('interface_port') + ulPortIndex+1,\n )\n )\n item = findTriggerFromTriggerList(triggers, search_name)\n interfaces[str(cardIndex+1)]['uplink'].append(item)\n\nmapSchema = {\n \"name\": 'PortMap: ' + ifTemplate.get('hostname'),\n \"width\": 1280,\n \"height\": 100 + (int(ifTemplate.get('line_card')) * 200),\n \"highlight\": 0,\n \"label_format\": 1,\n \"label_type_trigger\": 4,\n \"selements\": [],\n \"links\": [],\n \"shapes\": []\n}\n\nTOP_PORT_COORDINATE = {\n 'x': 74,\n 'y': 154 \n}\nTOP_LINK_COORDINATE = {\n 'x': 85,\n 'y': 125\n}\nTOP_LABEL_COORDINATE = {\n 'x': 80,\n 'y': 100\n}\nportCoordinate = TOP_PORT_COORDINATE\nlinkCoordinate = TOP_LINK_COORDINATE\nlabelCoordinate = TOP_LABEL_COORDINATE\ncardCoordinate = {\n 'x': 5,\n 'y': 175\n}\n\ntitleShape = {\n \"type\": 0,\n \"x\": 30,\n \"y\": 34,\n \"width\": 1000,\n \"height\": 32,\n \"text\": ifTemplate['hostname'] + ' Port Map',\n \"font_size\": 20,\n \"text_halign\": 1\n} \nsubtitle = \"device: {0}({1} ports, {2} uplinks)\".format(\n ifTemplate['devicename'],\n str(ifTemplate['interface_port']),\n str(ifTemplate['uplink_port']),\n)\nsubTitleShape = {\n \"type\": 0,\n \"x\": 30,\n \"y\": 59,\n \"width\": 1000,\n \"height\": 22,\n \"text\": subtitle,\n \"font_size\": 14,\n \"text_halign\": 1\n}\nmapSchema['shapes'].append(titleShape)\nmapSchema['shapes'].append(subTitleShape)\n\nfor cardIndex in interfaces.keys():\n cardShape = {\n \"type\": 0,\n \"x\": cardCoordinate['x'],\n \"y\": cardCoordinate['y'],\n \"width\": 50,\n \"height\": 30,\n \"text\": \"Card-\" + cardIndex\n } \n mapSchema['shapes'].append(cardShape)\n\n i = 0\n for port in interfaces[cardIndex]['port']:\n i += 1\n xOffcet = (math.ceil(i/2) - 1) * 40\n yPortOffcet = 40 if i % 2 == 0 else 0\n yLinkOffcet = 120 if i % 2 == 0 else 0\n yLabelOffcet = 160 if i % 2 == 0 else 0\n mapSchema['selements'].append({\n \"elements\": [{\"triggerid\": item[\"triggerid\"]} for item in port['port_trigger']],\n \"elementtype\": 2,\n \"iconid_off\": images['port_normal'],\n \"iconid_on\": images['port_error'],\n \"iconid_disabled\": images['port_disable'],\n \"label\": \"\",\n \"x\": portCoordinate['x'] + xOffcet,\n \"y\": portCoordinate['y'] + yPortOffcet,\n })\n mapSchema['selements'].append({\n \"elements\": [{\"triggerid\": item[\"triggerid\"]} for item in port['link_trigger']],\n \"elementtype\": 2,\n \"iconid_off\": images['link_normal'],\n \"iconid_on\": images['link_error'],\n \"iconid_disabled\": images['link_disable'],\n \"label\": \"\",\n \"x\": linkCoordinate['x'] + xOffcet,\n \"y\": linkCoordinate['y'] + yLinkOffcet,\n })\n mapSchema['shapes'].append({\n \"type\": 0,\n \"x\": labelCoordinate['x'] + xOffcet,\n \"y\": labelCoordinate['y'] + yLabelOffcet,\n \"width\": 20,\n \"height\": 20,\n \"text\": str(i) \n })\n\n for port in interfaces[cardIndex]['uplink']:\n i += 1\n xOffcet = (math.ceil(i/2) - 1) * 40 + 20\n yPortOffcet = 40 if i % 2 == 0 else 0\n yLinkOffcet = 120 if i % 2 == 0 else 0\n yLabelOffcet = 160 if i % 2 == 0 else 0\n mapSchema['selements'].append({\n \"elements\": [{\"triggerid\": item[\"triggerid\"]} for item in port['port_trigger']],\n \"elementtype\": 2,\n \"iconid_off\": images['port_normal'],\n \"iconid_on\": images['port_error'],\n \"iconid_disabled\": images['port_disable'],\n \"label\": \"\",\n \"x\": portCoordinate['x'] + xOffcet,\n \"y\": portCoordinate['y'] + yPortOffcet,\n })\n mapSchema['selements'].append({\n \"elements\": [{\"triggerid\": item[\"triggerid\"]} for item in port['link_trigger']],\n \"elementtype\": 2,\n \"iconid_off\": images['link_normal'],\n \"iconid_on\": images['link_error'],\n \"iconid_disabled\": images['link_disable'],\n \"label\": \"\",\n \"x\": linkCoordinate['x'] + xOffcet,\n \"y\": linkCoordinate['y'] + yLinkOffcet,\n })\n mapSchema['shapes'].append({\n \"type\": 0,\n \"x\": labelCoordinate['x'] + xOffcet,\n \"y\": labelCoordinate['y'] + yLabelOffcet,\n \"width\": 20,\n \"height\": 20,\n \"text\": str(i) \n })\n\nzapi.map.create(mapSchema)","repo_name":"sy4may0/ZabbixPortMap","sub_path":"ZabbixIFPortMap.py","file_name":"ZabbixIFPortMap.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3670439720","text":"import pandas as pd\r\nimport inflection\r\nimport plotly.express as px\r\nimport streamlit as st\r\n\r\nfrom PIL import Image\r\n\r\n# Load dataset\r\ndef load_data(path):\r\n data = pd.read_csv(path)\r\n return data\r\n\r\n# Rename columns set underscore\r\ndef rename_columns(dataframe):\r\n df = dataframe.copy()\r\n title = lambda x: inflection.titleize(x)\r\n snakecase = lambda x: inflection.underscore(x)\r\n spaces = lambda x: x.replace(\" \", \"\")\r\n cols_old = list(df.columns)\r\n cols_old = list(map(title, cols_old))\r\n cols_old = list(map(spaces, cols_old))\r\n cols_new = list(map(snakecase, cols_old))\r\n df.columns = cols_new\r\n return df\r\n\r\n# Country names function\r\nCOUNTRIES = {\r\n1: \"India\",\r\n14: \"Australia\",\r\n30: \"Brazil\",\r\n37: \"Canada\",\r\n94: \"Indonesia\",\r\n148: \"New Zeland\",\r\n162: \"Philippines\",\r\n166: \"Qatar\",\r\n184: \"Singapure\",\r\n189: \"South Africa\",\r\n191: \"Sri Lanka\",\r\n208: \"Turkey\",\r\n214: \"United Arab Emirates\",\r\n215: \"England\",\r\n216: \"United States of America\",\r\n}\r\n\r\ndef country_name(country_id):\r\n return COUNTRIES[country_id]\r\n\r\n\r\n# Convert values to us dollar\r\nexchange_rates = {\r\n 'Botswana Pula(P)': 0.018,\r\n 'Brazilian Real(R$)': 0.20,\r\n 'Emirati Diram(AED)': 0.27,\r\n 'Indian Rupees(Rs.)': 0.012,\r\n 'Indonesian Rupiah(IDR)': 0.000067,\r\n 'NewZealand($)': 0.62,\r\n 'Pounds(£)': 1.24,\r\n 'Qatari Rial(QR)': 0.27,\r\n 'Rand(R)': 0.053,\r\n 'Sri Lankan Rupee(LKR)': 0.0033,\r\n 'Turkish Lira(TL)': 0.050,\r\n 'Dollar($)' : 1.0\r\n}\r\n\r\ndef convert_to_usd(amount, currency):\r\n if currency in exchange_rates:\r\n return amount * exchange_rates[currency]\r\n else:\r\n return None\r\n\r\n\r\n# Data cleaning\r\ndef data_transform(df):\r\n # drop rows with null values\r\n df = df.dropna()\r\n\r\n # drop duplicate values\r\n df = df.drop_duplicates().reset_index(drop=True)\r\n\r\n #simplify the cuisines column\r\n df[\"cuisines\"] = df.loc[:, \"cuisines\"].apply(lambda x: x.split(\",\")[0])\r\n\r\n # drop outliers\r\n filt = df['amount_usd'] != 25000017.0\r\n df = df.loc[filt, :]\r\n return df\r\n\r\ndef data_viz(df):\r\n # Set streamlit page\r\n st.set_page_config(layout='wide')\r\n\r\n # Sidebar configuration\r\n image = Image.open('logo.png')\r\n st.sidebar.image(image, width=150)\r\n st.sidebar.header('Food Zone')\r\n st.sidebar.subheader('Your food in your zone')\r\n st.sidebar.write(\"\"\"___\"\"\")\r\n\r\n # Country filter\r\n st.sidebar.markdown('# Filters')\r\n country_filter = st.sidebar.multiselect(label='Choose the countries',\r\n options=df['country_code'].unique(),\r\n default=df['country_code'].unique())\r\n\r\n # Resturant number filter\r\n number_filter = st.sidebar.slider(label='Number of Restaurants', max_value=20, min_value=1,\r\n value=10)\r\n\r\n # Cuisine type filter\r\n cuisine_filter = st.sidebar.multiselect(label='Cuisines', \r\n options=df['cuisines'].unique(),\r\n default=df['cuisines'].unique())\r\n \r\n # Country Filter functionality \r\n select_row = df['country_code'].isin(country_filter)\r\n df = df.loc[select_row, :]\r\n\r\n # Cuisine type filter functionality\r\n cuisine_row_filter = df['cuisines'].isin(cuisine_filter)\r\n df = df.loc[cuisine_row_filter, :]\r\n\r\n st.sidebar.write(\"\"\"___\"\"\")\r\n\r\n\r\n # Home Page\r\n #restaurant_by_cuisine = df[['restaurant_id', 'cuisines']].groupby('cuisines').count().sort_values('restaurant_id', ascending=False).reset_index()\r\n st.title('🍽️ Cuisines Vison')\r\n\r\n # Metrics\r\n with st.container():\r\n \r\n st.header('Best Restaurant of the Biggest Cuisines')\r\n col1, col2, col3, col4, col5 = st.columns(5)\r\n\r\n with col1: #North indian\r\n df1 = df[df['cuisines'] == 'Indian']\r\n df2 = df1[['restaurant_name', 'aggregate_rating', 'country_code', 'amount_usd', 'city' ]].groupby('restaurant_name').max().sort_values('aggregate_rating', ascending=False).reset_index()\r\n help1 = (f'Place: {df2.iloc[0,4]}/ {df2.iloc[0,2]} \\n\\n Price for Two: U${df2.iloc[0,3]} \\n\\n ' )\r\n st.metric(value=df2.iloc[0,1], \r\n label=(f'Indian: {df2.iloc[0,0]}'),\r\n help= help1)\r\n \r\n\r\n with col2: # American\r\n df1 = df[df['cuisines'] == 'American']\r\n df2 = df1[['restaurant_name', 'aggregate_rating', 'country_code', 'amount_usd', 'city']].groupby('restaurant_name').max().sort_values('aggregate_rating', ascending=False).reset_index()\r\n help1 = (f'Place: {df2.iloc[0,4]}/ {df2.iloc[0,2]} \\n\\n Price for Two: U${df2.iloc[0,3]} \\n\\n ' )\r\n st.metric(value=df2.iloc[0,1], \r\n label=(f'American: {df2.iloc[0,0]}'),\r\n help = help1)\r\n \r\n with col3: # Cafe\r\n df1 = df[df['cuisines'] == 'Cafe']\r\n df2 = df1[['restaurant_name', 'aggregate_rating', 'country_code', 'amount_usd', 'city']].groupby('restaurant_name').max().sort_values('aggregate_rating', ascending=False).reset_index()\r\n help1 = (f'Place: {df2.iloc[0,4]}/ {df2.iloc[0,2]} \\n\\n Price for Two: U${df2.iloc[0,3]} \\n\\n ' )\r\n st.metric(value=df2.iloc[0,1], \r\n label=(f'Cafe: {df2.iloc[0,0]}'),\r\n help= help1)\r\n\r\n with col4: # Italian\r\n df1 = df[df['cuisines'] == 'Italian']\r\n df2 = df1[['restaurant_name', 'aggregate_rating', 'country_code', 'amount_usd', 'city']].groupby('restaurant_name').max().sort_values('aggregate_rating', ascending=False).reset_index()\r\n help1 = (f'Place: {df2.iloc[0,4]}/ {df2.iloc[0,2]} \\n\\n Price for Two: U${df2.iloc[0,3]} \\n\\n ' )\r\n st.metric(value=df2.iloc[0,1], \r\n label=(f'Italian: {df2.iloc[0,0]}'),\r\n help=help1)\r\n \r\n with col5: # Pizza\r\n df1 = df[df['cuisines'] == 'Pizza']\r\n df2 = df1[['restaurant_name', 'aggregate_rating', 'country_code', 'amount_usd', 'city']].groupby('restaurant_name').max().sort_values('aggregate_rating', ascending=False).reset_index()\r\n help1 = (f'Place: {df2.iloc[0,4]}/ {df2.iloc[0,2]} \\n\\n Price for Two: U${df2.iloc[0,3]} \\n\\n ' )\r\n st.metric(value=df2.iloc[0,1], \r\n label=(f'Pizza: {df2.iloc[0,0]}'),\r\n help= help1)\r\n\r\n\r\n\r\n\r\n # Top N Restaurants table\r\n with st.container():\r\n st.title(f'Top {number_filter} Resturants')\r\n df1 = df.sort_values('aggregate_rating', ascending=False).head(number_filter).reset_index(drop=True)\r\n df2 = df1.loc[:, ['restaurant_id', 'restaurant_name', 'country_code', \r\n 'city', 'cuisines', 'amount_usd', 'aggregate_rating', 'rating_text']]\r\n st.dataframe(df2, use_container_width=True)\r\n\r\n\r\n # Best and Worst rated cuisines\r\n with st.container():\r\n col1, col2 = st.columns(2)\r\n\r\n # Top N best rated cuisines\r\n with col1:\r\n dff = df[df['votes'] >= 100]\r\n df1 = dff[['cuisines', 'aggregate_rating']].groupby('cuisines').mean().sort_values('aggregate_rating', ascending=False).reset_index()\r\n df2 = df1[df1['cuisines'] != 'Others']\r\n df3 = df2.iloc[:number_filter, :]\r\n fig = px.bar(\r\n df3, \r\n x='cuisines', \r\n y='aggregate_rating',\r\n title=(f' Top {number_filter} best rated cuisines'),\r\n text_auto=True\r\n )\r\n st.plotly_chart(fig, use_container_width=True)\r\n\r\n # Top N worst rated cuisines\r\n with col2:\r\n df1 = df[['cuisines', 'aggregate_rating']].groupby('cuisines').mean().sort_values('aggregate_rating', ascending=True).reset_index()\r\n df2 = df1[df1['aggregate_rating'] >= 1]\r\n df3 = df2.iloc[:number_filter, :]\r\n fig = px.bar(\r\n df3,\r\n x='cuisines',\r\n y='aggregate_rating',\r\n title=(f' Top {number_filter} worst rated cuisines'),\r\n text_auto=True\r\n )\r\n st.plotly_chart(fig, use_container_width=True)\r\n \r\n \r\ndef main(): \r\n # Load data\r\n data = load_data('zomato.csv')\r\n\r\n # Raname columns\r\n data = rename_columns(data)\r\n\r\n # Map country codes to name\r\n data['country_code'] = data['country_code'].map(country_name)\r\n\r\n # Convert avg cost to USD\r\n data[\"amount_usd\"] = data.apply(lambda row: convert_to_usd(row[\"average_cost_for_two\"], row[\"currency\"]), axis=1)\r\n \r\n # Apply data cleaning\r\n df = data_transform(data)\r\n\r\n # Perform data viz\r\n data_viz(df)\r\n\r\n# Run the main functon\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"natan-matos/foodzone","sub_path":"pages/04_🍽️Cuisines.py","file_name":"04_🍽️Cuisines.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22940354369","text":"def fib(n, memo={}):\r\n # recurse approach:\r\n # if n <= 2: return 1\r\n # return fib(n-1) + fib(n-2)\r\n # TC: bigO(2^n), Space complexity: bigO(n)\r\n\r\n # DP approach:\r\n if n in memo: return memo[n]\r\n if n <= 2: return 1\r\n memo[n] = fib(n-1, memo) + fib(n-2, memo)\r\n return memo[n]\r\n\r\n # TC: bigO(n), Space complexity: bigO(n)\r\n\r\nprint(fib(70))","repo_name":"RahimMahat/PythonProjects","sub_path":"DSA/DP/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73272018487","text":"#!/usr/bin/env Python3\n# จักรกฤษณ์ บุญเนตร\n# 600510533\n# Lab 06\n# Problem 2\n# 204111 Sec 01A\n\ndef main():\n n = int(input())\n ans = longest_digit_run(n)\n print(ans)\n\ndef longest_digit_run(n):\n last = -1 # กำหนดค่าล่าสุด (ค่าที่ไม่ใช่ 0-9)\n line = 1 # กำหนดความยาว\n most = 1 # กำหนดค่าสูงสุด\n while n != 0:\n present = n % 10 # จะได้ค่าปัจจุบันเป็นเลขหลักเดียว (0-9)\n if present == last : # ถ้าค่าปัจจุบันเท่ากับค่าน้อยๆ\n line = line + 1 \n else: # ถ้าไม่ใช่ line จะ = 1\n line = 1\n last = present # อัพเดตค่า\n most = max(most, line)\n n //= 10 # จะได้เลขหลักเดียวที่น้อยลงเรื่อยๆเรียงตามหลัก\n\n return most\n\nif __name__ == '__main__' :\n main()","repo_name":"ChampionJakkrit/204111_FUNDAMENTALS_OF_PROGRAMMING","sub_path":"Laboratory 6/Lab06_5_600510533.py","file_name":"Lab06_5_600510533.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35805830704","text":"import pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport datetime as dt\nimport streamlit as st\n\n\nst.set_page_config(page_title='EV Analysis')\nst.title(\"ELECTRIC VEHICLE ANALYSIS\")\nst.header(\"2017-2022\")\n\n# READ FILE WITH FUNCTION TO OMPTIMIZING COMPUTATION\n@st.experimental_memo(ttl=60,max_entries=10)\n@st.cache(suppress_st_warning=True) # 👈 Added this\n\ndef read_masDat(): \n return pd.read_csv(\"https://drive.google.com/uc?export=download&id=1FEodh3Mxo-tEG65rlQr9BNukNdvHJfEA\",\n sep=\";\")\nevMasDat=read_masDat()\n\nevMasDat=evMasDat.astype({\"Date\":\"datetime64[ns]\"})\nevMasDat['Year']=evMasDat['Date'].dt.year\nevMasDat['Month']=evMasDat['Date'].dt.month\n\n# DESCRIBE\nevDesc=evMasDat.describe()\n\n# SIDEBAR SETUP\nst.sidebar.title(\"CONTROL PAGE\")\nselectDashboard = st.sidebar.selectbox(\n \"CHOOSE DASHBOARD :\",\n (\"DESCRIPTIVE ANALYTICS\", \"PREDICTIVE ANALYTICS\",\"PROFILE\"))\n\nselectDate = st.sidebar.date_input(\n \"CHOOSE DATA RANGE :\",\n value=(dt.date(2017, 1, 31),dt.date(2022, 5, 31)))\nst.sidebar.write(\"Select Date temporarely not running for a while..\")\n\nselectValue = st.sidebar.radio(\n \"SELECT VALUE :\",\n ('PHEV-EV TOTAL', 'EV-NON EV TOTAL'))\n\nif selectValue == 'PHEV-EV TOTAL':\n stVal,endVal=0,2\nelse:\n stVal,endVal=2,4\n\n\n# LIST SELECTION\nlistVPU=evMasDat['Vehicle Primary Use'].sort_values(ascending=True).unique().tolist()\nlistState=evMasDat['State'].sort_values(ascending=True).unique().tolist() ; listState.insert(0,\"ALL\")\nlistCountry=evMasDat['County'].sort_values(ascending=True).unique().tolist(); listCountry.insert(0,\"ALL\")\n\n# DATA PREP\nlistValue=evMasDat[['Plug-In Hybrid Electric Vehicles (PHEVs)',\n 'Battery Electric Vehicles (BEVs)',\n 'Electric Vehicle (EV) Total',\n 'Non-Electric Vehicle Total',\n 'Total Vehicles']].columns.values.tolist()\n\n# WIDGET\nvpuSelection = st.multiselect('Vehicle Primary Use : ',\n listVPU,\n default=listVPU)\n\nstateSelection = st.selectbox('State : ',\n listState)\n\nif stateSelection != \"ALL\":\n lsevMasDat=evMasDat[evMasDat['State']==stateSelection]\n listCountry=lsevMasDat['County'].sort_values(ascending=True).unique().tolist()\n countrySelection = st.selectbox('Country : ',\n listCountry)\nelse: \n countrySelection = st.selectbox('Country : ',\n listCountry)\n\n# FILTER DATAFRAME\nif stateSelection == \"ALL\" and countrySelection == \"ALL\" :\n evMasDatFil=evMasDat[(evMasDat['Vehicle Primary Use'].isin(vpuSelection))]\n\nelif stateSelection != \"ALL\" and countrySelection == \"ALL\" : \n evMasDatFil=evMasDat[(evMasDat['Vehicle Primary Use'].isin(vpuSelection)) &\n (evMasDat['State']==stateSelection)& (evMasDat['County'].notnull())]\n \nelif stateSelection == \"ALL\" and countrySelection != \"ALL\" :\n evMasDatFil=evMasDat[(evMasDat['Vehicle Primary Use'].isin(vpuSelection)) &\n (evMasDat['State'].notnull())& (evMasDat['County']==countrySelection)]\n\nelif stateSelection != \"ALL\" and countrySelection != \"ALL\" :\n evMasDatFil=evMasDat[(evMasDat['Vehicle Primary Use'].isin(vpuSelection)) &\n (evMasDat['State']==stateSelection)& (evMasDat['County']==countrySelection)]\n\n \n \ngbVchTotal=evMasDatFil.groupby(['Year','Month'])[listValue].sum().reset_index()\n\n#PLOTING TOTAL VEHICLE\nvhcTotal=px.bar(gbVchTotal,x=\"Year\",\n y=listValue[4],\n title=\"AMERICANS VEHICLE TRENDS 2017-2022\",\n color=\"Month\",\n height=500,\n text_auto=\".3s\")\n\n# PLOTING TOTAL EV\nvhcEVTotal=px.bar(gbVchTotal,x=\"Year\",\n y=listValue[stVal:endVal],\n title=\"AMERICANS ELECTRIC VEHICLE TRENDS 2017-2022 - \" + selectValue,\n height=500,\n barmode=\"group\")\n\n@st.cache(suppress_st_warning=True)\ndef stat_year(): \n return evMasDat.groupby(['Year','State'])[listValue].sum().reset_index()\ngbStateTotal=stat_year()\n\nvhcStateYr=px.line(gbStateTotal,x=\"Year\",\n y=listValue[0:3],\n title=\"AMREICANS ELECTRIC VEHICLE TRENDS BY STATE 2017-2022\",\n height=500,\n color='State')\n\n# SHOW VISUALIZATION\nif selectDashboard == \"DESCRIPTIVE ANALYTICS\" :\n st.header(\"DESCRIPTIVE ANALYTICS PAGES\")\n st.plotly_chart(vhcTotal)\n st.plotly_chart(vhcEVTotal)\n st.write(\"DETAILS ELECTRIC VEHICLE 2017-2022\")\n st.dataframe(evMasDatFil)\n st.write(\"DESCRIBE ELECTRIC VEHICLE 2017-2022\")\n st.dataframe(evDesc)\n st.plotly_chart(vhcStateYr)\nelif selectDashboard == \"PREDICTIVE ANALYTICS\" :\n st.header(\"PREDICTIVE ANALYTICS PAGES - IN PROGRESS MODELING\")\nelse:\n st.header(\"PROFILE PAGES\")\n st.write(\"visit my github repository at : https://github.com/dickysepta\")\n\n\n# CLEAR EXPERIMENTAL MEMOS\nst.experimental_memo.clear()\n","repo_name":"dickysepta/DataAnalytic","sub_path":"EV_Analysis.py","file_name":"EV_Analysis.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21297635874","text":"\nimport numpy as np\nimport random\n\n# функция, которая выполняет ходы игроков\ndef player(field, count):\n count += 1\n if count % 2 != 0:\n print(f\"Ход №{count}, ходит игрок 1 \\n\")\n value = zero\n elif count % 2 == 0:\n value = crest\n print(f\"Ход №{count}, ходит игрок 2 \\n\")\n else:\n print(\"Что-то пошло не так\")\n exit()\n while True:\n number_row = random.randint(0, a - 1 )\n number_column = random.randint(0, b - 1)\n number = (number_row,number_column)\n if field[number] == 1:\n field[number] = value\n print(f\"{field}\\n\")\n check_win(field, count)\n player(field, count)\n\n\n# функция, которая проверяет условие победы\ndef check_win(field, count):\n # по столбцам\n column_sum = (np.sum(field, axis=0))\n for i in range(b):\n if column_sum[i] == 0:\n print(f\"Победил игрок 1\")\n exit()\n elif column_sum[i] == crest_check_column:\n print(f\"Победил игрок 2\")\n exit()\n\n # по строкам\n row_sum = (np.sum(field, axis=1))\n for i in range(a):\n if row_sum[i] == 0:\n print(f\"Победил игрок 1\")\n exit()\n elif row_sum[i] == crest_check_row:\n print(f\"Победил игрок 2\")\n exit()\n\n # проверка победы по диагонали запускается только для квадратных матриц\n if a == b:\n diagonal_1 = np.diagonal(field)\n diagonal_2 = np.fliplr(field).diagonal()\n\n if np.sum(diagonal_1) == 0 or np.sum(diagonal_2) == 0:\n print(f\"Победил игрок 1\")\n exit()\n elif np.sum(diagonal_1) == crest_check_column or np.sum(diagonal_2) == crest_check_column:\n print(f\"Победил игрок 2\")\n exit()\n\n # ничья после поледнего хода\n if count == max_hod:\n print(\"Ничья\")\n exit()\n\n return\n\n\na = int(input(\"Введите количество строк:\"))\nb = int(input(\"Введите количество столбцов:\"))\nprint(f\"Задано игровое поле {a}x{b}\")\nfield = np.ones((a,b), dtype = np.int32)\n\nzero = 0\n# в качестве Х принимаем число 7\ncrest = 7\ncrest_check_column = 7 * a\ncrest_check_row = 7 * b\n# максимальное количество ходов\nmax_hod= a * b\ncount = 0\n\nprint(\"----------------------------\")\nprint(\"Игра началась\")\nplayer(field, count)\n","repo_name":"AnatoliiKropotov/perf","sub_path":"nedra/crestiki_noliki.py","file_name":"crestiki_noliki.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18972921742","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom libft.activations import ReLU, Softmax\nfrom libft.layers import Dense, Input\nfrom libft.losses import BinaryCrossentropy\nfrom libft.metrics import BinaryAccuracy\nfrom libft.models import Sequential\nfrom libft.optimizers import SGD\nfrom libft.preprocessing import to_categorical\n\nnp.seterr(all='raise')\n\nX = np.random.rand(500, 30)\nX_std = np.copy(X)\nX_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\nX_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n\ny = to_categorical(np.random.randint(0, 2, 500))\n\nmodel = Sequential([\n Input(input_shape=(30,)),\n Dense(16),\n ReLU(),\n Dense(2),\n Softmax(),\n])\n\nmodel.summary()\n\nmodel.compile(loss=BinaryCrossentropy(),\n optimizer=SGD(learning_rate=1e-4),\n metrics=BinaryAccuracy())\n\nepochs = 100\nhistory = model.fit(X_std, y,\n batch_size=128,\n epochs=epochs,\n verbose=1,\n validation_split=0.1)\n\nepoch_range = range(1, epochs + 1)\nplt.plot(epoch_range, model.losses['training'], c='blue')\nplt.plot(epoch_range, model.losses['validation'], c='red', ls='dashed')\nplt.xlabel('Epochs')\nplt.ylabel('Cost function')\nplt.show()\n\nplt.plot(epoch_range, model.metrics['training'], c='blue')\nplt.plot(epoch_range, model.metrics['validation'], c='red', ls='dashed')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.show()\n","repo_name":"kcosta42/Multilayer_Perceptron","sub_path":"tests/test_softmax.py","file_name":"test_softmax.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"665588738","text":"\"\"\"\n@file bucketsort.py\n@author qu-gg\n\nImplementation of BucketSort using InsertionSort underneath\n\"\"\"\n\n\ndef insertion_sort(array):\n \"\"\"\n Implementation of Insertion Sort, which puts each element of an array into its correct place before\n continuing the iteration\n :param array: array to sort\n :return:\n \"\"\"\n counter = 1\n while counter < len(array):\n index = counter\n while index > 0 and array[index - 1] > array[index]:\n temp = array[index]\n array[index] = array[index - 1]\n array[index - 1] = temp\n index -= 1\n counter += 1\n return array\n\n\ndef bucket_sort(array, radix):\n \"\"\"\n Implementation of Bucket Sort, which splits up values into different buckets and sorts those buckets\n individually before concatenating them in order\n :param array: array to sort\n :param radix: number of buckets\n :return: sorted array\n \"\"\"\n buckets = [[] for _ in range(radix)]\n for num in array:\n buckets[int(num * radix)].append(num)\n\n for bucket in buckets:\n if len(bucket) > 1:\n insertion_sort(bucket)\n\n sorted = []\n for bucket in buckets:\n sorted += bucket\n\n return sorted\n","repo_name":"qu-gg/py-algs","sub_path":"Sorts/bucketsort.py","file_name":"bucketsort.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11067073656","text":"# Test data generation\n\n# To install lorem package\n# python -m pip install lorem --user\nfrom classes import User, Tours, Ratings, Chat, Bookings\nimport math\nimport random\nimport lorem\nimport csv\nimport time\nimport datetime\n\n# Numbers to generate (CAN CHANGE)\nACCNUM = 500 # Accounts\nTOURSNUM = 200 # created tours\nBOOKINGSNUM = 10000 # no of bookings\nRATINGSNUM = 10000 # no of ratings\nCHATNUM = 10000 # no of dialogs sent\n\n# Filenames\nSINGLE_NAME_INPUT = \"singleName.txt\"\nUSER_ACC_SQL = \"userAccSQL.sql\"\nUSER_ACC_DATA = \"userAccData.csv\"\nTOUR_SQL = \"tourSQL.sql\"\nTOUR_DATA = \"tourData.csv\"\nBOOKING_SQL = \"bookingSQL.sql\"\nBOOKING_DATA = \"bookingData.csv\"\nRATING_SQL = \"ratingSQL.sql\"\nRATING_DATA = \"ratingData.csv\"\nCHAT_SQL = \"chatSQL.sql\"\nCHAT_DATA = \"chatData.csv\"\n\nALL_SQL = \"all_sql.sql\"\n\n# Stored memory\nuser_accounts = []\nsingle_name = {}\ntour_list = []\nbooking_list = []\nratings_list = []\nchat_list = []\n\n\n#############################################################################\n# create user accounts\n\n\ndef read_single_name():\n count = 0\n\n with open(SINGLE_NAME_INPUT) as names:\n for line in names:\n single_name[count] = line.strip()\n count += 1\n\n return count\n\n\ndef single_name_get(count):\n return single_name.get(random.randint(0, count))\n\n\ndef write_user_accounts():\n # Write account into insert statements\n user_account_SQL = open(USER_ACC_SQL, \"w+\")\n\n for i in range(0, len(user_accounts)):\n user_account_SQL.write(user_accounts[i].insert_statement() + \"\\n\")\n\n user_account_SQL.close()\n\n # Keep data in csv file\n with open(USER_ACC_DATA, mode='w+', newline='') as user_account_manual:\n fieldnames = user_accounts[0].data.keys()\n writer = csv.DictWriter(user_account_manual, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(0, ACCNUM):\n writer.writerow(user_accounts[i].data)\n\n\ndef user_acc_generator():\n count = read_single_name()\n\n for i in range(0, ACCNUM):\n # Make account and save to list for future code upgrade\n user_accounts.append(\n User(single_name_get(count), single_name_get(count)))\n\n write_user_accounts()\n\n##############################################################################\n# Create tours\n# Pick 1k times\n\n\ndef pick_acc():\n return random.choice(user_accounts)\n\n\ndef write_tour_list():\n # Write account into insert statements\n tour_sql_writer = open(TOUR_SQL, \"w+\")\n\n for i in range(0, len(tour_list)):\n tour_sql_writer.write(tour_list[i].insert_statement() + \"\\n\")\n\n tour_sql_writer.close()\n\n # Keep data in csv file\n with open(TOUR_DATA, mode='w+', newline='') as tours_manual:\n fieldnames = tour_list[0].data.keys()\n writer = csv.DictWriter(tours_manual, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(0, TOURSNUM):\n writer.writerow(tour_list[i].data)\n\n\ndef tour_generator():\n for i in range(0, TOURSNUM):\n tourguide = pick_acc()\n tour_list.append(Tours(tourguide.userID))\n\n write_tour_list()\n\n##############################################################################\n# Create bookings\n# Pick 1000 times random user check not booking own tour and book\n\n\ndef pick_Tour():\n return random.choice(tour_list)\n\n\ndef write_booking_list():\n # Write account into insert statements\n booking_sql_writer = open(BOOKING_SQL, \"w+\")\n\n for i in range(0, len(booking_list)):\n booking_sql_writer.write(booking_list[i].insert_statement() + \"\\n\")\n\n booking_sql_writer.close()\n\n # Keep data in csv file\n with open(BOOKING_DATA, mode='w+', newline='') as booking_manual:\n fieldnames = booking_list[0].data.keys()\n writer = csv.DictWriter(booking_manual, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(0, BOOKINGSNUM):\n writer.writerow(booking_list[i].data)\n\n\ndef booking_generator():\n for i in range(0, BOOKINGSNUM):\n tourist = pick_acc()\n while True:\n tour = pick_Tour()\n if tour.userID != tourist.userID:\n break\n booking_list.append(\n Bookings(tourist.userID, tour.userID, tour.tourName))\n\n write_booking_list()\n\n##############################################################################\n# Create ratings\n# Pick random 1k times user rate user. Must be unique\n\n\ndef write_ratings_list():\n # Write account into insert statements\n ratings_sql_writer = open(RATING_SQL, \"w+\")\n\n for i in range(0, len(ratings_list)):\n ratings_sql_writer.write(ratings_list[i].insert_statement() + \"\\n\")\n\n ratings_sql_writer.close()\n\n # Keep data in csv file\n with open(RATING_DATA, mode='w+', newline='') as rating_manual:\n fieldnames = ratings_list[0].data.keys()\n writer = csv.DictWriter(rating_manual, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(0, RATINGSNUM):\n writer.writerow(ratings_list[i].data)\n\n\ndef setState():\n if random.randint(0, 1) == 0:\n return \"tourist\"\n else:\n return \"tourguide\"\n\n\ndef ratings_generator():\n for i in range(0, RATINGSNUM):\n while True:\n userA, userB = pick_acc(), pick_acc()\n if userA != userB:\n rate = Ratings(userA.userID, userB.userID, setState())\n check = False\n for i in ratings_list:\n if rate == i:\n check = True\n if check == False:\n break\n\n ratings_list.append(rate)\n\n write_ratings_list()\n\n##############################################################################\n# Create chats\n\n\ndef write_chat_list():\n # Write account into insert statements\n chat_sql_writer = open(CHAT_SQL, \"w+\")\n\n for i in range(0, len(chat_list)):\n chat_sql_writer.write(chat_list[i].insert_statement() + \"\\n\")\n\n chat_sql_writer.close()\n\n # Keep data in csv file\n with open(CHAT_DATA, mode='w+', newline='') as chat_manual:\n fieldnames = chat_list[0].data.keys()\n writer = csv.DictWriter(chat_manual, fieldnames=fieldnames)\n writer.writeheader()\n\n for i in range(0, CHATNUM):\n writer.writerow(chat_list[i].data)\n\n\ndef chat_generator():\n for i in range(0, CHATNUM):\n while True:\n userA, userB = pick_acc(), pick_acc()\n if userA != userB:\n break\n chat_list.append(Chat(userA.userID, userB.userID))\n\n write_chat_list()\n\n\n##############################################################################\n\ndef final_sql():\n writer = open(ALL_SQL, \"w+\")\n # writer .write(\n # \"DELETE FROM [dbo].[chat];\\nDELETE FROM [dbo].[ratings];\\nDELETE FROM [dbo].[bookings];\\nDELETE FROM [dbo].[tours];\\nDELETE FROM [dbo].[users];\\n\")\n for i in range(0, len(user_accounts)):\n writer.write(user_accounts[i].insert_statement() + \"\\n\")\n for i in range(0, len(tour_list)):\n writer.write(tour_list[i].insert_statement() + \"\\n\")\n for i in range(0, len(booking_list)):\n writer.write(booking_list[i].insert_statement() + \"\\n\")\n for i in range(0, len(ratings_list)):\n writer.write(ratings_list[i].insert_statement() + \"\\n\")\n for i in range(0, len(chat_list)):\n writer.write(chat_list[i].insert_statement() + \"\\n\")\n writer.close()\n\n\nif __name__ == '__main__':\n user_acc_generator()\n tour_generator()\n booking_generator()\n ratings_generator()\n chat_generator()\n final_sql()\n\n print(\"Program Complete.\")\n","repo_name":"genes3e7/veto-tours","sub_path":"test data generator/gentestdata.py","file_name":"gentestdata.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"749796432","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n在固定文本表征的前提下,实现流式聚类\n\"\"\"\n#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport sys\nsys.path.append('/home/dell/GraduationProject/')\nfrom TextFiltering.stream import MONGO\nfrom Baseline.tf_idf import *\nfrom TextFiltering.twitter_preprocessor import TwitterPreprocessor\nfrom Baseline.cluster_function import *\n# 初始化分词实例\nCut = TwitterPreprocessor()\n\n\ndef single_cluster(doc, eps_value, sample_value):\n # 抽取单词和实体\n token_w = []\n token_e = []\n for c in doc:\n words = Cut.get_token(c[\"text\"])\n entity = Cut.entity_recognition(c[\"text\"])\n token_w.append(' '.join(words))\n token_e.append(' '.join(entity))\n w_embeddings = feature_vector(token_w)\n e_embeddings = feature_vector(token_e)\n distance = w_embeddings + 2 * e_embeddings\n # print(\"distance:\", str(len(distance)))\n db = my_db(eps=eps_value, min_sample=sample_value, metric='precomputed', corpus_distance=distance)\n # 需要返回数据集和数据集对应的“表征点数量”\n cluster, cluster_point, noise = presentation_point(db, doc)\n return cluster, cluster_point, noise\n\n\ndef our_method(res, eps=1.7, second_eps=1.7, const_sample=3):\n \"\"\"\n 传入的数据是Json包类型\n :param res:\n :return:\n \"\"\"\n # 对原始推文第一次聚类\n clusters, core, noise_data = single_cluster(res, eps_value=eps, sample_value=const_sample)\n # 判断已经聚类得到的数据否需要进行deep clustering\n event_queue = []\n result = []\n for i in range(len(clusters)):\n if len(core[i]) > 15: # 如果缩减后的核心点数量仍然很多\n event_queue.append(clusters[i])\n else:\n result.append(clusters[i])\n while event_queue:\n # print(len(event_queue), flush=True)\n new_clusters, new_core, new_noise = single_cluster(event_queue[0], eps_value=second_eps, sample_value=const_sample)\n noise_data.extend(new_noise)\n for j in range(len(new_clusters)):\n if len(new_core[j]) > 15: # 如果缩减后的核心点数量仍然很多\n # print(len(new_clusters[j]), flush=True)\n event_queue.append(new_clusters[j])\n else:\n result.append(new_clusters[j])\n event_queue.pop(0)\n if second_eps > 0.5:\n second_eps -= 0.2\n const_sample += 2\n # 对待每个event_cluster执行Event_merge\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n pass\n\n","repo_name":"lurenZJF/GraduationProject","sub_path":"Detect/merge_index.py","file_name":"merge_index.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"27229301013","text":"import logging\n\n\nclass MetadataEventEnricher:\n\n def __init__(self, apache_atlas_facade):\n self.__apache_atlas_facade = apache_atlas_facade\n\n def enrich_entities_attributes_and_classifications(self, guids):\n entities_dict = self.__apache_atlas_facade.fetch_entities(guids)\n for guid in guids:\n fetched_entity_dict = entities_dict.get(guid)\n\n if fetched_entity_dict:\n fetched_entity_dict[\n 'classifications'] = self.__apache_atlas_facade.\\\n fetch_entity_classifications(guid)\n logging.info('Entities: %s scrapped!', guids)\n logging.info('')\n return entities_dict\n\n def enrich_entity_types_relationships(self, entities, entity_types_dict):\n # We need to enrich entity types relationships because the event\n # does not contain it.\n self.__enrich_event_relationships(entities, entity_types_dict)\n\n def __enrich_event_relationships(self, entities, entity_types_dict):\n for guid, entity in entities.items():\n data = entity['data']\n attributes = data['attributes']\n\n for key, attribute in attributes.items():\n if isinstance(attribute, list):\n for item in attribute:\n self.__enrich_relationship_dict(\n item, entity_types_dict)\n else:\n self.__enrich_relationship_dict(attribute,\n entity_types_dict)\n\n def __enrich_relationship_dict(self, attribute_dict, entity_types_dict):\n if isinstance(attribute_dict, dict):\n type_name = attribute_dict.get('typeName')\n guid = attribute_dict.get('guid')\n data = attribute_dict.get('data')\n # Verify if the attribute implements an entity type\n # and if the attribute data is not fetched.\n if type_name and guid and not data:\n entity_type = entity_types_dict.get(type_name)\n if entity_type:\n relationship_entities = entity_type['entities']\n fetched_entity = relationship_entities.get(guid)\n if not fetched_entity:\n entities_dict = \\\n self.\\\n enrich_entities_attributes_and_classifications(\n [guid])\n if entities_dict:\n fetched_entity_dict = entities_dict.get(guid)\n if fetched_entity_dict:\n relationship_entities[\n guid] = fetched_entity_dict\n","repo_name":"GoogleCloudPlatform/datacatalog-connectors-hive","sub_path":"google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_event_enricher.py","file_name":"metadata_event_enricher.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"77"} +{"seq_id":"29395969699","text":"def levenshtein(word1, word2):\n m = len(word1)\n n = len(word2)\n\n # Crear una matriz de distancias\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n # Inicializar la primera fila y la primera columna de la matriz\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n\n # Calcular las distancias\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j - 1], dp[i][j - 1], dp[i - 1][j]) + 1\n\n # Obtener la distancia entre las palabras\n distance = dp[m][n]\n\n # Determinar el resultado según la distancia\n if distance > 1:\n return \"+1\"\n elif distance == 1:\n if m > n:\n return \"IB\"\n elif m < n:\n return \"IB\"\n else:\n return \"1S\"\n elif distance == 0:\n return \"0D\"\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema10_ej2/tema10_ej2_ce0dd21327515de533938fbe6e51268e.py","file_name":"tema10_ej2_ce0dd21327515de533938fbe6e51268e.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11157651339","text":"# Prime number checker\n\nnum = int(input(\"Please enter a number: \"))\n\ndef prime(num):\n check = [2,3,4,5,6,7,8,9]\n check_two = []\n for c in check:\n if num % c == 0:\n check_two.append(num % c)\n elif num == c:\n continue\n if len(check_two) > 1:\n print(f\"{num} is not a prime\")\n else:\n print(f\"{num} is a prime\")\nprime(num)","repo_name":"XDCoder3289/hundred-days-of-python","sub_path":"prime_num.py","file_name":"prime_num.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38615837098","text":"import vtk\nimport sys\nimport os\nimport os.path\nfrom .iocallback import FileLoadCallback\nfrom .obj import OBJReader\n\nclass VtkIO:\n def get_reader(self, file_extension):\n '''Returns a reader that can read the file type having the provided extension. Returns None if no such reader.'''\n lower_file_ext = file_extension.lower()\n #if (lower_file_ext == \".tiff\" or lower_file_ext == \".tif\"):\n # return vtk.vtkTIFFReader()\n if (lower_file_ext == \".vtk\"):\n return vtk.vtkPolyDataReader()\n if (lower_file_ext == \".ply\"):\n return vtk.vtkPLYReader()\n if (lower_file_ext == \".obj\"):\n return OBJReader()\n \n return None\n\n\n def load_file(self, file_name):\n '''Loads and returns the provided file. Returns None uppon failure.'''\n # Get the right data reader depending on the file extension\n data_reader = self.get_reader(os.path.splitext(file_name)[1])\n if not data_reader:\n return None\n\n data_reader.SetFileName(file_name)\n data_reader.Update()\n return data_reader.GetOutput()\n\n\n def load_files(self, file_names, file_load_callback = None):\n \"\"\"Returns the loaded data (the ones that could be loaded) in a list of pairs (file name, data).\"\"\"\n # Make sure that 'file_load_callback' has the right type\n if file_load_callback and not isinstance(file_load_callback, FileLoadCallback):\n file_load_callback = None\n \n # Tell the callback that the loading begins\n if file_load_callback:\n file_load_callback.init_loading(len(file_names))\n\n file_name_data_pairs = list()\n counter = 0\n\n # Load the files\n for file_name in file_names:\n counter += 1\n # Make sure we have a file\n if os.path.isfile(file_name):\n data = self.load_file(file_name)\n if data:\n file_name_data_pairs.append((file_name, data))\n\n # Update the callback\n if file_load_callback:\n file_load_callback.file_loaded(counter)\n\n # Done with loading\n if file_load_callback:\n file_load_callback.loading_done()\n\n return file_name_data_pairs\n","repo_name":"papazov3d/invipy","sub_path":"inout/vtkio.py","file_name":"vtkio.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40674311060","text":"import logging\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\nclass Class2D(ABC):\n \"\"\"\n Base class for 2D Image Classification methods.\n \"\"\"\n\n def __init__(\n self,\n src,\n n_nbor=100,\n seed=None,\n dtype=None,\n ):\n \"\"\"\n Base constructor of an object for classifying 2D images.\n\n :param src: ImageSource or subclass, provides images.\n :param n_nbor: Number of nearest neighbors to compute.\n :param seed: Optional RNG seed to be passed to random methods, (example Random NN).\n :param dtype: Numpy dtype, defaults to `src.dtype`.\n \"\"\"\n self.src = src\n\n if dtype is not None:\n self.dtype = np.dtype(dtype)\n if self.dtype != self.src.dtype:\n logger.warning(\n f\"Class2D src.dtype {self.src.dtype} does not match self.dtype {self.dtype}.\"\n )\n else:\n self.dtype = self.src.dtype\n\n self.n_nbor = n_nbor\n self.seed = seed\n\n @abstractmethod\n def classify(self):\n \"\"\"\n Classify the images from Source into classes with similar viewing angles.\n\n Returns classes and associated metadata (classes, reflections, distances)\n \"\"\"\n","repo_name":"ComputationalCryoEM/ASPIRE-Python","sub_path":"src/aspire/classification/class2d.py","file_name":"class2d.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"77"} +{"seq_id":"37416261187","text":"import pandas as pd\r\nimport os\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\n# Defining the source path to read the original file and detsination path to write the final .csv file\r\nsource_path = '/Users/soodk2/Documents/Roche_documents/deviation_en_all.xlsx'\r\ndestination_path = '/Users/soodk2/Documents/Extracted_files/Preprocessed_data/Filtered_deviation_records.csv'\r\n\r\n\r\n# This function filters the categories that have less than 20 records \r\ndef filtering_records(source_path, destination_path):\r\n # Reading the original file\r\n df=pd.read_excel(source_path)\r\n\r\n ########################## REMOVING THE CATEGORIES WHICH HAVE LESS THAN 20 RECORDS ###########################\r\n # Getting the frequency count of the event_classification column\r\n print(df.groupby('event_classification').count())\r\n\r\n # Filtering event classification which has number of records less than 20\r\n df = df.groupby('event_classification').filter(lambda x: len(x) >= 20)\r\n print(df.groupby('event_classification').count())\r\n\r\n # Getting the frequency count of the event_details\r\n print(df.groupby('event_details').count())\r\n\r\n # Filtering event details which has number of records less than 20\r\n df = df.groupby('event_details').filter(lambda x: len(x) >= 20)\r\n print(df.groupby('event_details').count())\r\n\r\n # Exporting the filtered data frame to a .csv file\r\n df.to_csv(destination_path, header=True, index=False)\r\n\r\n\r\n# This functions creates .txt files and also creates data frames, both according to the specified format\r\ndef extract_to_folder(df, path, label):\r\n # Extracting the combined_text into .txt file with rec_id as the file name\r\n # Creating a new data frame with the sepcific format\r\n df_temp = pd.DataFrame(columns = ['Split_name', 'File_name', 'Category'])\r\n row_indexer = 1\r\n for i in range(df.shape[0]):\r\n file_name = str(df.iloc[i,4]) + '.txt'\r\n text = str(df.iloc[i,10])\r\n # Creating two rows for each record\r\n df_temp = df_temp.append([{'Split_name': label, 'File_name': str(os.path.join(path,file_name)), 'Category':'NULL'}]*2,\r\n ignore_index=True)\r\n # Defining categories list\r\n categories = []\r\n categories.append(str(df.iloc[i,6])) ; categories.append(str(df.iloc[i,7]))\r\n # Creating two records for the single file\r\n for j in range(2):\r\n df_temp.iloc[row_indexer + j - 1, 2] = categories[j]\r\n \r\n with open(os.path.join(path,file_name), \"w\") as f:\r\n if text:\r\n f.write(text)\r\n row_indexer += 2\r\n return df_temp\r\n\r\n\r\n# Reading the original file and cretaing a .csv file with the categories containing at least 20 records\r\nfiltering_records(source_path, destination_path)\r\n\r\n# Importing the file containing the filtered records into a data frame\r\ndf = pd.read_csv('/Users/soodk2/Documents/Extracted_files/Preprocessed_data/Filtered_deviation_records.csv')\r\n\r\n\r\n# Getting the indices of the columns- rec_id and combined_text\r\nprint(df.columns.get_loc('rec_id')) # 4\r\nprint(df.columns.get_loc('combined_text')) # 10\r\nprint(df.columns.get_loc('event_classification')) # 6\r\nprint(df.columns.get_loc('event_details')) # 7\r\n\r\n# Splitting the data frame into train, validation and test data frames\r\ntrain, val_test = train_test_split(df,test_size = 0.3, random_state = 42)\r\nval, test = train_test_split(val_test, test_size = 0.15, random_state = 42)\r\n\r\n# Creating a list to store the train, test and validation data frames\r\ndf_list = [] ; df_list.append(train) ; df_list.append(val) ; df_list.append(test)\r\n\r\n# Printing the shapes of the train, validation and test data frames\r\nprint(train.shape) ; print(val.shape) ; print(test.shape)\r\n\r\n# Folder directory where we write the files\r\nfolder_path = '/Users/soodk2/Documents/Extracted_files/Record_text_files'\r\n\r\n# Writing the folder names to a list\r\nfolder_names = ['/Train', '/Validation', '/Test']\r\n\r\n# Defining labels to a list\r\nlabels = ['TRAIN', 'VALIDATION', 'TEST']\r\n\r\n# Creating a list to hold the new data frames created according to the specified format\r\ndf_created = []\r\n\r\n# Looping thrice- once for each train, validation and test sets\r\nfor i in range(3):\r\n actual_path = folder_path + folder_names[i]\r\n df_created.append(extract_to_folder(df_list[i], actual_path, labels[i]))\r\n\r\n# Exporting the new data frames created to .csv files\r\nfor i in range(3):\r\n actual_path = folder_path + folder_names[i]\r\n file_name = actual_path + '/' + labels[i] + '.csv'\r\n df_created[i].to_csv(file_name, header = True, index = True)\r\n\r\n\r\n","repo_name":"ksood123/Data_preprocessing_deviation_records","sub_path":"src/source_code.py","file_name":"source_code.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12946304854","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nfrom collections import defaultdict\nimport gzip\nimport logging\nfrom rich.logging import RichHandler # type: ignore\nimport sys\nfrom tqdm import tqdm # type: ignore\nfrom typing import DefaultDict, IO, List, Tuple\n\nversion = \"0.0.2\"\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(message)s\",\n handlers=[RichHandler(markup=True, rich_tracebacks=True)],\n)\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nGroup UMIs starting from chrom|pos|seq|qual UMI files, one per strand.\nShifts reads from positive strand based on cutsite length.\nInput files are expected to be sorted, this is currently not checked.\n\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n)\n\nparser.add_argument(\"plus\", type=str, help=\"Path to plus strand file.\")\nparser.add_argument(\"revs\", type=str, help=\"Path to rev strand file.\")\nparser.add_argument(\"output\", type=str, help=\"Path to output file.\")\n\nparser.add_argument(\"--len\", type=int, help=\"Cutsite length. Default: 0\", default=0)\nparser.add_argument(\n \"--sep\", type=str, help=\"Column separator. Default: TAB\", default=\"\\t\"\n)\n\nparser.add_argument(\n \"--compress-level\",\n type=int,\n default=0,\n help=\"\"\"GZip compression level. Default: 0 (i.e., no compression).\"\"\",\n)\n\nparser.add_argument(\n \"--version\",\n action=\"version\",\n version=f\"{sys.argv[0]} v{version}\",\n)\n\nargs = parser.parse_args()\n\n\ndef get_ih(path: str) -> IO:\n if path.endswith(\".gz\"):\n return gzip.open(path, \"rt\")\n else:\n return open(path, \"r\")\n\n\ndef get_oh(path: str, compress_level: int = 0) -> IO:\n if 0 == compress_level:\n return open(path, \"w+\")\n else:\n if not path.endswith(\".gz\"):\n path += \".gz\"\n return gzip.open(path, \"wt+\", compress_level)\n\n\nUMIDict = DefaultDict[str, DefaultDict[int, Tuple[List[str], List[str]]]]\n\n\ndef populate_dict(\n umi_dict: UMIDict, path: str, cs_len: int = 0, sep: str = \"\\t\"\n) -> UMIDict:\n with get_ih(path) as IH:\n for line in tqdm(IH, \"Record\"):\n chrom, pos, seq, qual = line.strip().split(\"\\t\")\n pos = int(pos) - cs_len\n umi_dict[chrom][pos][0].append(seq)\n umi_dict[chrom][pos][1].append(qual)\n return umi_dict\n\n\numi_dict: UMIDict = defaultdict(lambda: defaultdict(lambda: ([], [])))\n\nlogging.info(f\"Processing plus strand ('{args.plus}'), shifting of {args.len} bases\")\numi_dict = populate_dict(umi_dict, args.plus, args.len, sep=args.sep)\n\nlogging.info(f\"Processing rev strand ('{args.revs}')\")\numi_dict = populate_dict(umi_dict, args.revs, sep=args.sep)\n\nwith get_oh(args.output, args.compress_level) as OH:\n logging.info(f\"Writing output to: {OH.name}\")\n if args.compress_level > 0:\n logging.info(f\"Compression level: {args.compress_level}\")\n for chrom, pos_dict in tqdm(umi_dict.items(), desc=\"Chromosome\"):\n for pos, (seq, qual) in tqdm(pos_dict.items(), desc=\"Position\"):\n OH.write(\n args.sep.join([chrom, str(pos), \" \".join(seq), \" \".join(qual)]) + \"\\n\"\n )\n","repo_name":"claireleblanc/GPSeq-Pipeline","sub_path":"GPSeq_processing-docker/scripts/group_umis.py","file_name":"group_umis.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"33623035577","text":"# Press Mayús+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n# -*- coding: utf-8 -*-\nimport os\nimport shutil\nimport time\nimport pandas as pd\n\n\ndef treat_ep_files(filepath_ep, tracker):\n df = pd.read_csv(filepath_ep, on_bad_lines='skip')\n df['Time'] = pd.to_datetime(df['Time'], dayfirst=True)\n df.set_index('Time', inplace=True)\n df = df.tz_localize('Europe/Madrid')\n df.reset_index(inplace=True)\n df['Time'] = df['Time'].dt.tz_localize(None)\n\n energy_cols = [col for col in df.columns if \"(Wh)\" in col]\n df.dropna(subset=energy_cols, how='all', inplace=True)\n\n path = os.path.join(path_output_files[tracker], os.path.basename(filepath_ep))\n df.to_csv(path, index=False)\n return path\n\n\ndef push_into_repo(filepath_to_push, tracker):\n new_data = pd.read_csv(filepath_to_push)\n database = pd.read_csv(paths_repos[tracker])\n\n overlapping_records = database[database['Time'].isin(new_data['Time'])]\n if overlapping_records.empty:\n database = pd.concat([database, new_data])\n else:\n database.set_index('Time', inplace=True)\n new_data.set_index('Time', inplace=True)\n # database = database.combine_first(dfs)\n database.update(new_data)\n database.reset_index(inplace=True)\n\n # Order the database by 'Datetime' in ascending order\n database.sort_values(by='Time', inplace=True)\n database.to_csv(paths_repos[tracker], index=False)\n\n\ndef cut_and_paste_file(source_path, destination_path):\n try:\n shutil.move(source_path, destination_path)\n except FileNotFoundError:\n print(\"Source file not found.\")\n except PermissionError:\n print(\"Permission denied. Unable to cut and paste the file.\")\n except Exception as e:\n print(\"An error occurred while cutting and pasting the file:\", str(e))\n\n\ndef resample_24(filepath):\n df = pd.read_csv(filepath, parse_dates=['Time'], index_col='Time')\n df = df.resample('H').asfreq()\n # Create a date range from 1 AM of the first day to 12 AM of the next day\n start_date = df.index[0].replace(hour=1, minute=0, second=0)\n end_date = start_date + pd.DateOffset(days=1) - pd.DateOffset(hours=1)\n date_range = pd.date_range(start=start_date, end=end_date, freq='H')\n\n # Reindex the dataframe with the new date range\n df = df.reindex(date_range)\n df.index.name = 'Time'\n df = df.fillna(0)\n df.to_csv(filepath)\n\n\ndef delete_file(file_path):\n if os.path.exists(file_path):\n os.remove(file_path)\n print('File '+file_path+' removed correctly!')\n\n\ndef main(full_input_path, tracker_num):\n\n path_out = treat_ep_files(full_input_path, tracker_num)\n # To save on raw data parsed the file as it came\n out_total = os.path.join(paths_raw_parsed[tracker_num], os.path.basename(full_input_path))\n\n cut_and_paste_file(full_input_path, out_total)\n resample_24(path_out)\n push_into_repo(path_out, tracker_num)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n PATH_INPUT_FILES_T1 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\InputData\\InversorTracker1Gijon\"\n PATH_INPUT_FILES_T2 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\InputData\\InversorTracker2Gijon\"\n\n PATH_RAW_PARSED_T1 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\RawDataParsed\\Inversor_Tracker1_Gijon\"\n PATH_RAW_PARSED_T2 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\RawDataParsed\\Inversor_Tracker2_Gijon\"\n paths_raw_parsed = [\"\", PATH_RAW_PARSED_T1, PATH_RAW_PARSED_T2]\n\n PATH_OUTPUT_FILES_T1 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\TratarDatos\\DatosTratados\\DatosInversor_Tracker1_Gijon\"\n PATH_OUTPUT_FILES_T2 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\TratarDatos\\DatosTratados\\DatosInversor_Tracker2_Gijon\"\n path_output_files = [\"\", PATH_OUTPUT_FILES_T1, PATH_OUTPUT_FILES_T2]\n\n PATH_REPO_INVERTER_T1 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\TratarDatos\\DatosTratados\\InverterDataT1.csv\"\n\n PATH_REPO_INVERTER_T2 = r\"C:\\Users\\ServidorCEDS\\Documents\\MonitorizacionPV\\TratarDatos\\DatosTratados\\InverterDataT2.csv\"\n\n paths_repos = [\"\", PATH_REPO_INVERTER_T1, PATH_REPO_INVERTER_T2]\n while True:\n # Check the first path for CSV files\n for file in os.listdir(PATH_INPUT_FILES_T1):\n if file.lower().endswith('.csv'):\n file_inverter = os.path.join(PATH_INPUT_FILES_T1, file)\n main(file_inverter, 1)\n\n # Check the second path for CSV files\n for file in os.listdir(PATH_INPUT_FILES_T2):\n if file.lower().endswith('.csv'):\n file_inverter = os.path.join(PATH_INPUT_FILES_T2, file)\n main(file_inverter, 2)\n time.sleep(10)\n","repo_name":"GaremoP/InverterScript","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31894143109","text":"import numpy as np\nimport zmq\n\ndef openSocket(addr):\n\n ctx = zmq.Context()\n sock = ctx.socket(zmq.PUSH)\n sock.bind(addr) \n return sock\n\n\nsock = openSocket('tcp://*:15512')\n\n\nFs = 16*7400\n\nFc = 1000\nFc2 = 500\nFc3 = 800\nsigLen = 2\nsigLen2 = 1\nsigLen3 = 0.03\n\nlenS = int(Fs * sigLen)\nlenS2 = int(Fs * sigLen2)\nlenS3 = int(Fs * sigLen3)\n\nsig = np.exp(1j*2*np.pi*np.arange(lenS)*Fc/Fs)\nsig3 = np.exp(1j*2*np.pi*np.arange(lenS3)*Fc3/Fs)\nsig2 = np.exp(1j*2*np.pi*np.arange(lenS2)*Fc2/Fs)\nsig = np.r_[0.00001*sig2,sig,0.00001*sig2,sig3,sig2*0.0001,sig3,sig2*0.0001,sig3,sig2*0.0001,sig3,sig2*0.0001,sig3,sig2*0.0001]\nsock.send(sig.astype(np.complex64).tobytes())\n","repo_name":"epeters13/gr-receive_muter","sub_path":"examples/send_test_sig.py","file_name":"send_test_sig.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3719099133","text":"from torch.utils.data import Dataset, DataLoader\nimport numpy as np\n\n\ndef load_data(load_path):\n lines = []\n with open(load_path, 'r') as f:\n for line in f:\n lines.append(line)\n return lines\n\n\nclass WikiDataset(Dataset):\n def __init__(self, train):\n super(WikiDataset, self).__init__()\n self.train = train\n\n def __len__(self):\n return len(self.train)\n\n def __getitem__(self, item):\n return self.train[item]\n\n\nclass T5_Collate(object):\n def __init__(self, tokenizer, device='cuda'):\n self.tokenizer = tokenizer\n self.device = device\n\n def __call__(self, batch):\n X = []\n y = []\n for idx in range(len(batch)):\n masked, mask = self.mask_span(batch[idx])\n X.append(masked)\n y.append(mask)\n tokenized_X = self.tokenizer.batch_encode_plus(X, padding='max_length', truncation=True, max_length=512,\n return_tensors='pt')\n tokenized_y = self.tokenizer.batch_encode_plus(y, padding='max_length', truncation=True, max_length=512,\n return_tensors='pt')\n arg_dict = {\n 'input_ids': tokenized_X['input_ids'].to(self.device),\n # 'decoder_input_ids': tokenized_y['input_ids'].to(self.device),\n 'labels':tokenized_y['input_ids'].to(self.device)\n }\n return arg_dict\n\n def mask_span(self, paragraph):\n mask = \"\"\n split = paragraph.split()\n cursor = 0\n id = 0\n while cursor < len(split):\n if np.random.binomial(1, 0.3) == 1:\n span_length = np.random.geometric(0.4)\n id_token = self.extra_id(id) + \" \"\n id += 1\n mask += id_token\n if span_length >= len(split) - cursor:\n mask += \" \".join(split[cursor:]) + \" \"\n split[cursor:] = [id_token]\n cursor += span_length + 1\n else:\n mask += \" \".join(split[cursor:cursor + span_length]) + \" \"\n split[cursor:cursor + span_length] = [id_token]\n cursor += span_length + 1\n else:\n cursor += 1\n return \" \".join(split), mask + \"\"\n\n def extra_id(self, id):\n return f\"\"\n\n\ndef WikiDataloader(dataset, collate_fn, batch_size=100, shuffle=False):\n return DataLoader(WikiDataset(dataset), batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn)\n","repo_name":"ciprutdavid/AMNLP_Project","sub_path":"project/pretraining/t5_baseline_pretrain_dataset.py","file_name":"t5_baseline_pretrain_dataset.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26887509608","text":"from collections import defaultdict\nfrom typing import Optional\n\nfrom leetcode.tree.binary_tree_traversals import TreeNode\n\n\nclass Solution:\n def pseudoPalindromicPaths(self, root: Optional[TreeNode]) -> int:\n pseudo_pallindromic_path_count = 0\n val_count_dict = defaultdict(int)\n\n def helper(node):\n nonlocal pseudo_pallindromic_path_count\n if node is not None:\n val_count_dict[node.val] += 1\n if node.left is None and node.right is None:\n odd_count = 0\n for val in val_count_dict.values():\n if val % 2 != 0:\n odd_count += 1\n if odd_count > 1:\n break\n if odd_count <= 1:\n pseudo_pallindromic_path_count += 1\n else:\n if node.left is not None:\n helper(node.left)\n if node.right is not None:\n helper(node.right)\n val_count_dict[node.val] -= 1\n\n helper(root)\n return pseudo_pallindromic_path_count\n\n\nif __name__ == '__main__':\n root_node1 = TreeNode(2)\n root_node1.left = TreeNode(3)\n root_node1.right = TreeNode(1)\n root_node1.left.left = TreeNode(3)\n root_node1.left.right = TreeNode(1)\n root_node1.right.right = TreeNode(1)\n print(Solution().pseudoPalindromicPaths(root_node1))\n","repo_name":"pk0912/ProgrammingPractice","sub_path":"leetcode/tree/binary_tree_path_problems/pseudo_pallindromic_paths.py","file_name":"pseudo_pallindromic_paths.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9024661448","text":"# a= input(\"what is your name\\n\")\r\n# b= input(\"how much do u earn?\")\r\n#\r\n# if int(b)==0:\r\n# raise ZeroDivisionError(\"b is zero so stopping the program\")\r\n#\r\n# if a.isnumeric():\r\n# # pass\r\n# raise Exception(\"numbers are not allowed\")\r\n#\r\n# print(f\"hello {a}\")\r\n#1000 lines of code taking one hour\r\n\r\n\r\n\r\n\r\n\r\nc = input(\"enter ur name\\n\")\r\nc= c.lower()\r\ntry:\r\n print(a)\r\nexcept Exception as e:\r\n if c==\"harry\":\r\n raise ValueError(\"Harry is blocked He is not allowed\")\r\n\r\n print(\"exception handled\")","repo_name":"Juilee27/Python","sub_path":"PyCharm/raiseee.py","file_name":"raiseee.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43692405051","text":"from AccessControl import ClassSecurityInfo\nfrom ftw.calendarwidget.browser.widgets import FtwCalendarWidget\nfrom ftw.contentpage import _\nfrom ftw.contentpage.config import PROJECTNAME\nfrom ftw.contentpage.content.contentpage import ContentPage\nfrom ftw.contentpage.content.contentpage import ContentPageSchema\nfrom ftw.contentpage.content.textblock import image_schema\nfrom ftw.contentpage.interfaces import IEventPage\nfrom Products.Archetypes import atapi\nfrom Products.ATContentTypes.config import HAS_LINGUA_PLONE\nfrom Products.ATContentTypes.content.schemata import finalizeATCTSchema\nfrom zope.interface import implements\nfrom Products.ATContentTypes.lib.calendarsupport import CalendarSupportMixin\n\n\nif HAS_LINGUA_PLONE:\n from Products.LinguaPlone.public import registerType\nelse:\n from Products.Archetypes.atapi import registerType\n\n\nEventSchema = ContentPageSchema.copy() + atapi.Schema((\n atapi.DateTimeField(\n 'startDate',\n required=True,\n searchable=False,\n accessor='start',\n languageIndependent=True,\n widget=FtwCalendarWidget(\n helper_js=('++resource++ftw.contentpage.resources/start_end_date_helper.js',),\n label=_(u'label_event_start', default=u'Event Starts'),\n description=_(u'help_start',\n default=u\"Date and Time, when the event begins.\"),\n ),\n ),\n\n atapi.DateTimeField(\n 'endDate',\n required=True,\n searchable=False,\n accessor='end',\n languageIndependent=True,\n widget=FtwCalendarWidget(\n label=_(u'label_event_end', default=u'Event Ends'),\n description=_(u'help_end',\n default=u\"Date and Time, when the event ends.\"),\n ),\n ),\n\n atapi.BooleanField(\n 'wholeDay',\n default=False,\n languageIndependent=True,\n widget=atapi.BooleanWidget(\n label=_(u'label_whole_day_event', u'Whole day event'),\n description=_(u'help_whole_day', default=u\"Event lasts whole day\"),\n ),\n ),\n atapi.StringField(\n 'location',\n searchable=True,\n widget=atapi.StringWidget(\n label=_(u'label_event_location', default=u'Event Location'),\n description=_(u'help_event_location', default=u\"\"),\n ),\n ),\n))\n\nfinalizeATCTSchema(EventSchema)\n# finalizeATCTSchema moves 'location' into 'categories', we move it back:\nEventSchema.changeSchemataForField('location', 'default')\n\n# Protect the teaser image with a specific permission\npermission = \"ftw.contentpage: Edit teaser image on EventPage\"\nfor name in image_schema.keys():\n EventSchema[name].write_permission = permission\n\n\nclass EventPage(ContentPage, CalendarSupportMixin):\n implements(IEventPage)\n\n meta_type = \"EventPage\"\n schema = EventSchema\n security = ClassSecurityInfo()\n\n security.declarePublic('show_description')\n def show_description(self):\n return False\n\n security.declarePrivate('get_addressblock')\n def get_addressblock(self):\n blocks = self.getFolderContents(\n contentFilter={'portal_type': ['AddressBlock']}, full_objects=True)\n if not len(blocks) > 0:\n return None\n return blocks[0]\n\n security.declareProtected(\"View\", 'contact_name')\n def contact_name(self):\n block = self.get_addressblock()\n if block:\n return block.getAddressTitle()\n return ''\n\n security.declareProtected(\"View\", 'contact_phone')\n def contact_phone(self):\n block = self.get_addressblock()\n if block:\n return block.getPhone()\n return ''\n\n security.declareProtected(\"View\", 'contact_email')\n def contact_email(self):\n block = self.get_addressblock()\n if block:\n return block.getEmail()\n return ''\n\n security.declareProtected(\"View\", 'getLocation')\n def getLocation(self):\n field_value = self.getField('location').get(self)\n if not field_value:\n block = self.get_addressblock()\n complete_address = ''\n if block:\n street = block.getAddress()\n if street:\n complete_address = complete_address + street + ','\n zip = block.getZip()\n if zip:\n complete_address = complete_address + ' ' + zip\n city = block.getCity()\n if city:\n complete_address = complete_address + ' ' + city\n return complete_address.strip(',')\n return ''\n else:\n return field_value\n\n security.declareProtected(\"View\", 'event_url')\n def event_url(self):\n block = self.get_addressblock()\n if block:\n return block.getWww()\n return ''\n\nregisterType(EventPage, PROJECTNAME)\n","repo_name":"4teamwork/ftw.contentpage","sub_path":"ftw/contentpage/content/eventpage.py","file_name":"eventpage.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"1663549192","text":"#!/usr/bin/env python3\n# -*-coding:utf-8 -*-\n\"\"\"\n@Time: 2020/9/17\n@IDEName: PyCharm\n@FileName:dataBase.py\n\"\"\"\nimport os\nimport sys\nimport pymysql\nimport traceback\nsys.path.append(os.path.abspath(os.path.dirname(__file__)).replace(\"Common\", \"\") + \"Config\") # Config目录\nsys.path.append(os.path.abspath(os.path.dirname(__file__))) # 当前目录\nfrom Common.log import MyLog\nfrom pymysql.err import MySQLError\nfrom Config.read_config import YamlHandler, yaml_conf_read\n\nlogger = MyLog()\ndb_config = YamlHandler(yaml_conf_read).read_config()[\"MySQL\"]\n\n\nclass MyDb(object):\n\n def __init__(self, host=db_config[\"host\"], port=db_config[\"port\"], user=db_config[\"user\"],\n password=db_config[\"password\"], db_name=db_config[\"db_name\"]):\n try:\n self.conn = pymysql.connect(host=host, port=port, user=user, password=password, db=db_name)\n self.cursor = self.conn.cursor()\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库连接的异常:\" + traceback.format_exc())\n else:\n print(\"数据库连接正常\")\n logger.info(\"数据库连接正常\")\n\n def slt_all_data(self, table):\n \"\"\"查询所有数据\"\"\"\n try:\n self.cursor.execute(f\"select * from {table}\")\n all_data = self.cursor.fetchall()\n logger.info(\"查询数据:\" + str(f\"select * from {table}\"))\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库执行查询语句的异常:\" + traceback.format_exc())\n else:\n return all_data\n\n def slt_one_data(self, table, condition):\n \"\"\"查询一条数据\"\"\"\n try:\n self.cursor.execute(f\"select * from {table} where {condition}\")\n one_data = self.cursor.fetchone()\n logger.info(\"查询数据:\" + str(f\"select * from {table} where {condition}\"))\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库查询语句的异常:\" + traceback.format_exc())\n else:\n return one_data\n\n def del_data(self, table, condition):\n \"\"\"删除一条数据\"\"\"\n try:\n self.cursor.execute(f\"delete from {table} where {condition}\")\n self.conn.commit()\n logger.info(\"删除数据:\" + str(f\"delete from {table} where {condition}\"))\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库删除语句的异常:\" + traceback.format_exc())\n self.conn.rollback() # 回滚当前事务\n\n def upd_date(self, table, obj, condition):\n \"\"\"更新一条数据\"\"\"\n try:\n self.cursor.execute(f\"update {table} set {obj} where {condition}\")\n self.conn.commit()\n logger.info(\"更新数据:\" + str(f\"update {table} set {obj} where {condition}\"))\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库修改语句的异常:\" + traceback.format_exc())\n self.conn.rollback()\n\n def ins_data(self, table, *args):\n \"\"\"插入一条数据\"\"\"\n try:\n self.cursor.execute(f\"insert into {table}{args} values {args}\")\n self.conn.commit()\n logger.info(\"插入数据:\" + str(f\"insert into {table}{args} values {args}\"))\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库插入语句的异常:\" + traceback.format_exc())\n self.conn.rollback()\n\n def close_db(self):\n try:\n self.cursor.close()\n self.conn.close()\n except MySQLError:\n traceback.print_exc()\n logger.error(\"数据库关闭连接的错误:\" + traceback.format_exc())\n else:\n print(\"关闭数据库连接\")\n logger.info(\"关闭数据库连接\")\n\n\nif __name__ == '__main__':\n db = MyDb()\n print(db_config)\n\n\n\n\n\n\n\n","repo_name":"wxfgithubone/ApiAutoTestPractice","sub_path":"Common/dataBase.py","file_name":"dataBase.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6824411038","text":"\"\"\"\nSystems of Autonomous Ordinary Differential Equations\n\nUsed for analyzing direction fields of systems of autonomous ODEs,\ndetermining trajectories of initial conditions, and finding stability,\npoint classification, and locally linear systems of critical points.\n\"\"\"\n\nimport os\nimport re\nimport warnings\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\n\nfrom sympy.abc import *\nfrom sympy import solve, Derivative, Matrix, nsimplify\n\n\nclass StabilityAnalysis(object):\n \"\"\"Wrapper class which displays an analysis of critical point stabilities.\"\"\"\n def __init__(self, eigenvalues, jacobians):\n self._cp_types = {}\n self._stabilities = {}\n self._original_mapping = eigenvalues.copy()\n self._jacobians = jacobians.copy()\n for cp, eigenvalue in self._original_mapping.items():\n cp_type, stability = self._determine_stability(eigenvalue)\n self._cp_types[cp] = cp_type\n self._stabilities[cp] = stability\n\n def __repr__(self):\n ret = []\n fmt = \"{0:<19}{1:<26}{2:<24}{3}\"\n lsys_format = \"d/dt(x, y) = ({0}\"\n ret.append(fmt.format(\n 'Critical Point', 'Type of Critical Point',\n 'Stability', 'Linear System') + '\\n')\n ret.append('-' * (len(ret[0]) + 18) + '\\n')\n for cp in self._stabilities.keys():\n p_cp = f\"({nsimplify(f'{cp[0]}')}, {nsimplify(f'{cp[1]}')})\"\n if len(p_cp) > 19:\n p_cp = f\"({round(cp[0], 3)}, {round(cp[1], 3)})\"\n out = fmt.format(\n p_cp, self._cp_types[cp],\n self._stabilities[cp],\n lsys_format.format(\n [nsimplify(i) for i in self._jacobians[cp].tolist()[0]]))\n out += \"\\n\" + \" \" * out.index('[') + \\\n f\"{[nsimplify(i) for i in self._jacobians[cp].tolist()[1]]})(x, y)\\n\"\n ret.append(out)\n return ''.join(ret)\n\n def __getitem__(self, item):\n if item not in self._stabilities.keys():\n raise KeyError(\"Expected one of the critical points.\")\n return {'Critical Point Type': self._cp_types[item],\n 'Stability': self._stabilities[item]}\n\n @property\n def info(self):\n return {i: self[i] for i in self._stabilities.keys()}\n\n @staticmethod\n def _is_real(value):\n return isinstance(value, float)\n\n @staticmethod\n def _is_complex(value):\n return isinstance(value, complex)\n\n def _determine_stability(self, eigenvalues):\n e1, e2 = eigenvalues\n if self._is_real(e1) and self._is_real(e2):\n if e1 == e2: # repeated real eigenvalues\n node = 'Proper or Improper Node'\n if e1 > 0:\n stability = 'Unstable'\n else:\n stability = 'Asymptotically Stable'\n return node, stability\n else: # two distinct real eigenvalues\n if e1 > e2 >= 0 or e2 > e1 >= 0:\n return 'Node', 'Unstable'\n if e1 < e2 <= 0 or e2 < e1 <= 0:\n return 'Node', 'Asymptotically Stable'\n if e1 <= 0 <= e2 or e2 <= 0 <= e1:\n return 'Saddle Point', 'Unstable'\n else: # complex eigenvalues\n if not self._is_complex(e1) and self._is_complex(e2):\n raise ValueError(\n f\"Error in computing stability: got one real \"\n f\"and one complex eigenvalue: {e1, e2}.\")\n real_part = e1.real\n if real_part > 0:\n return 'Spiral Point', 'Unstable'\n elif real_part < 0:\n return 'Spiral Point', 'Asymptotically Stable'\n else:\n return 'Center', 'Stable'\n\n\nclass system(object):\n \"\"\"Generates a system of differential equations.\n\n You can instantiate a `system` object using a string expression:\n\n > sys = system('2x - 3, xy^2')\n\n This assumes that you are using a system dependent on two arbitrary\n variables (traditionally this would be `x` and `y`, but any two\n letters are supported as long as they are the only two letters used).\n\n The following conditions apply for typing:\n\n 1. Any numbers which happen to precede these two variables will be\n considered coefficients of the expression (e.g., `2x` would be\n expanded to `2 * x`, and `245xy` would be `245 * x * y`).\n 2. Exponents can be achieved using either the `**` or `^` symbol.\n If you want an entire expression to be squared, then either wrap\n it in parenthesis, brackets, or braces (e.g., `2x ** 2` would\n be expanded to `2 * x ** 2`, while `2 ** 2x` would be expanded\n to `2 ** (2 * x)`, and `(2x) ** 2` would be `(2 * x) ** 2`).\n 3. Different terms are distinguished by use of spaces or operators\n such as `+` or `-` (e.g., `x-y` would be `x - y`). If there are\n two digits next to each other they will be deemed as one single\n number (e.g., `21xy` will be `21 * x * y`).\n\n You can either pass two expressions or use a comma in a single string to\n indicate where the two systems are independent.\n\n Parameters:\n sys: The first equation, or a string containing the whole system.\n sys2: The second equation if passing independent strings.\n \"\"\"\n # Stores predefined SymPy symbols for variables.\n _symbol_cache = {}\n\n # Allowed letter phrases for trigonometry.\n _allowed_phrases = ('sin', 'cos', 'sec', 'csc', 'tan', 'cot')\n _allowed_phrases = _allowed_phrases + tuple(\n ['arc' + i for i in _allowed_phrases]) + ('e', 'sqrt')\n\n @classmethod\n def matrix(cls, mat):\n \"\"\"Creates a system from a 2x2 matrix of coefficients.\n\n If you want to make a system which follows the equation format\n of x' = Ax, then this does that from the matrix `A`.\n\n Parameters:\n mat: A 2x2 matrix (numpy array, list of lists), or a\n single 1-dimensional list with 4 values.\n \"\"\"\n mat = np.array(mat)\n if mat.ndim == 1:\n mat = mat.reshape((2, 2))\n return cls(\n f'{mat[0][0]}x + {mat[0][1]}y',\n f'{mat[1][0]}x + {mat[1][1]}y')\n\n @classmethod\n def polar(cls, sys, sys2):\n \"\"\"Creates a system from a set of polar equations.\n\n Polar equations should use the variables `r` and `theta`.\n\n Parameters:\n sys: A polar equation.\n sys2: A second polar equation.\n \"\"\"\n sys = sys.replace('r', 'x')\n sys = sys.replace('theta', 'y')\n sys2 = sys2.replace('r', 'x')\n sys2 = sys2.replace('theta', 'y')\n return cls(sys, sys2, polar=True)\n\n def __init__(self, sys, sys2=None, **kwargs):\n # Get the two independent systems.\n if sys2 is None:\n if ',' not in sys:\n raise ValueError(\n \"Got only one string, but it contains no comma. \"\n \"Expected two equations for a system.\")\n expr1, expr2 = sys.split(',')\n else:\n expr1, expr2 = sys, sys2\n\n # Pre-format the expressions for potential letters/phrases.\n expr1 = expr1.strip()\n expr2 = expr2.strip()\n expr1 = expr1.replace(' ', '')\n expr2 = expr2.replace(' ', '')\n expr1, fmt_1 = self._pre_alpha_format(expr1)\n expr2, fmt_2 = self._pre_alpha_format(expr2)\n\n # Parse and format the expressions, and get the\n # variables which are being used by the system.\n expr1, var1 = self._parse_expr(expr1)\n expr2, var2 = self._parse_expr(expr2)\n\n # Check the variables.\n all_vars = np.unique(var1 + var2)\n if len(all_vars) > 2:\n raise ValueError(\n f\"Got more than 2 independent variables: {all_vars}.\")\n self._vars = all_vars\n\n # Post-format the expression.\n expr1 = self._post_alpha_format(expr1, fmt_1)\n expr2 = self._post_alpha_format(expr2, fmt_2)\n\n # Set the printing version.\n self._pprint_exprs = (expr1, expr2)\n\n # Expand the coefficients to proper expressions and compile them\n # for evaluation, but store the originals as a property.\n self.expr1 = self._expand_coefficients(expr1, all_vars)\n self.expr2 = self._expand_coefficients(expr2, all_vars)\n self._expressions = (self.expr1, self.expr2)\n\n # Parse for a polar equation.\n self._polar = kwargs.get('polar', False)\n if self._polar:\n self._vars = np.array(['r', 'theta'])\n self.expr1 = self.expr1.replace('x', 'r')\n self.expr1 = self.expr1.replace('y', 'theta')\n self.expr2 = self.expr2.replace('x', 'r')\n self.expr2 = self.expr2.replace('y', 'theta')\n self._expressions = (self.expr1, self.expr2)\n\n # Find the critical points of the expression.\n cp = kwargs.get('cp', True)\n self._cps = None\n if cp:\n sympy_vars = self._gen_sympy_symbols(self._vars)\n try:\n cps = solve(self._expressions, *sympy_vars, set=True)[1]\n except IndexError:\n cps = None\n self._cps = self._parse_nums(cps)\n\n # Parse the expressions again for evaluation (`sin` -> `np.sin`).\n self.expr1 = self._parse_for_eval(self.expr1)\n self.expr2 = self._parse_for_eval(self.expr2)\n\n def __repr__(self):\n if self._polar:\n dx, dy = 'dr/dt', 'd0/dt'\n else:\n dx, dy = 'dx/dt', 'dy/dt'\n return (\"system(\"\n f\"{dx} = {self._repr_expr(self._pprint_exprs[0])}\"\n \", \"\n f\"{dy} = {self._repr_expr(self._pprint_exprs[1])}\"\n \")\")\n\n @property\n def expressions(self):\n return self._expressions\n\n @property\n def critical_points(self):\n return self._cps\n\n @property\n def real_critical_points(self):\n if self._cps is None:\n return None\n return [i for i in self._cps if not isinstance(i[0], complex)]\n\n def __call__(self, x, y):\n if self._polar:\n r, theta = (x ** 2 + y ** 2) ** (1/2), np.arctan2(y, x)\n ret1 = eval(self.expr1, globals(), locals())\n ret2 = eval(self.expr2, globals(), locals())\n res1 = (r + ret1) * np.cos(theta + ret2) - x\n res2 = (r + ret1) * np.cos(theta + ret2) - y\n else:\n res1 = eval(self.expr1, globals(), locals())\n res2 = eval(self.expr2, globals(), locals())\n return np.array([res1, res2])\n\n @staticmethod\n def _repr_expr(expr):\n expr = expr.replace('**', '~~') # for parsing multiplication.\n symbol_parse = lambda x, expr: f\" {x} \".join(expr.split(f'{x}'))\n for sym in ['+', '-', '**']:\n expr = symbol_parse(sym, expr)\n expr = expr.replace('~~', '**')\n expr = symbol_parse('**', expr)\n expr = expr.replace(' ', ' ').replace(' ', ' ')\n expr = expr.replace('( -', '(-')\n return expr\n\n def _pre_alpha_format(self, expr):\n fmt_list = []\n for phrase in self.__class__._allowed_phrases:\n if expr.find(phrase) != -1:\n fmt_list.append(phrase)\n expr = expr.replace(phrase, '~')\n return expr, fmt_list\n\n @staticmethod\n def _post_alpha_format(expr, fmt_dict):\n manip_offset = 0\n while True:\n occur = [m.start(0) for m in re.finditer('~', expr)]\n if len(occur) == 0:\n break\n expr = expr[:occur[0]] + fmt_dict[manip_offset] + \\\n expr[occur[0] + 1:] # noqa\n manip_offset += 1\n return expr\n\n @staticmethod\n def _parse_expr(expr):\n # Replace any `^` exponential with `**`.\n expr = expr.replace('^', '**')\n\n # Get the two independent variables.\n letters = \"\".join(re.findall(\"[a-zA-Z]+\", expr))\n\n # Replace all brackets and braces with parenthesis.\n expr = expr.replace(r'\\{', '(')\n expr = expr.replace('[', '(')\n expr = expr.replace(r'\\}', ')')\n expr = expr.replace(']', ')')\n\n # Return the system + variables.\n return expr, list(iter(letters))\n\n @staticmethod\n def _expand_coefficients(expr, var):\n def _regen_parser(expr_):\n return np.array(list(iter(expr_)))\n\n # Get the locations of the variables and find any coefficients before\n # variables (also track the expansion of the string since it is being\n # manipulated while we are parsing through the loop).\n for v in [*var, *['s', 'c', 't']]: # trigonometry\n manip_tracker = 0\n parser = _regen_parser(expr)\n loc = np.where(parser == v)[0]\n for idx in loc:\n if idx != 0:\n if parser[idx - 1].item().isdigit():\n num_indexes, start_pos = [idx - 1], 2\n try:\n while True:\n if idx - start_pos != 0:\n break\n if parser[idx - start_pos].item().isdigit():\n num_indexes.append(idx - start_pos)\n start_pos += 1\n else:\n break\n except IndexError:\n pass\n\n # Verify that it is a valid number and expand.\n if ''.join(parser[num_indexes]).isdigit():\n insert_index = max(num_indexes) + 1 + manip_tracker\n expr = expr[:insert_index] + '*' + expr[insert_index:]\n manip_tracker += 1\n\n if parser[idx - 1].item() in var:\n insert_index = idx + manip_tracker\n expr = expr[:insert_index] + '*' + expr[insert_index:]\n manip_tracker += 1\n\n # For any sets of multiplied parenthesis, add a `*`.\n manip_tracker = 0\n parser = _regen_parser(expr)\n paren_loc = np.where(parser == ')')[0]\n for loc in paren_loc:\n if loc + 1 + manip_tracker == len(expr):\n continue\n if (expr[loc + manip_tracker + 1] == '('\n or expr[loc + manip_tracker + 1].isalpha()\n or expr[loc + manip_tracker + 1].isdigit()):\n if (expr[loc + manip_tracker - 1].isalpha() and\n expr[loc + manip_tracker - 1] in ['n', 's', 'c', 't']):\n continue\n insert_index = loc + 1 + manip_tracker\n expr = expr[:insert_index] + '*' + expr[insert_index:]\n manip_tracker += 1\n\n # Do the same multiplication for the reverse case, namely\n # when there is a variable or other expression before a\n # parenthesis, like `2(x + y)` or `x(2 + x)`.\n manip_tracker = 0\n parser = _regen_parser(expr)\n paren_loc = np.where(parser == '(')[0]\n for loc in paren_loc:\n if loc + 1 + manip_tracker == len(expr):\n continue\n if (expr[loc + manip_tracker - 1] == '('\n or expr[loc + manip_tracker - 1].isalpha()\n or expr[loc + manip_tracker - 1].isdigit()):\n if (expr[loc + manip_tracker - 1].isalpha() and\n expr[loc + manip_tracker - 1] in ['n', 's', 'c', 't']):\n continue\n insert_index = loc + manip_tracker\n expr = expr[:insert_index] + '*' + expr[insert_index:]\n manip_tracker += 1\n\n # Return the parsed string expression.\n return expr\n\n def _parse_for_eval(self, expr):\n phrase_index = 0\n while True:\n try:\n phrase = self.__class__._allowed_phrases[phrase_index]\n except IndexError:\n break\n if expr.find(phrase) != -1:\n sub = 'np.' + phrase\n expr = expr[:expr.find(phrase)] + sub + \\\n expr[expr.find(phrase) + len(phrase):] # noqa\n phrase_index += 1\n return expr\n\n @classmethod\n def _gen_sympy_symbols(cls, sym):\n ret_symbols, exist_symbols = [0 for _ in sym], []\n for idx, var in enumerate(sym):\n if var in cls._symbol_cache.keys():\n ret_symbols[idx] = cls._symbol_cache[var]\n exist_symbols.append(ret_symbols[idx])\n non_symbols = np.where(np.array(ret_symbols) == 0)[0]\n new_symbols = np.array(sym)[non_symbols]\n if len(new_symbols) != 0:\n new_gen_symbols = symbols(' '.join(new_symbols))\n if not isinstance(new_gen_symbols, (list, tuple)):\n new_gen_symbols = (new_gen_symbols, )\n for name, symbol in zip(new_symbols, new_gen_symbols):\n cls._symbol_cache[name] = symbol\n return (*exist_symbols, ) + (*new_gen_symbols, )\n return tuple(exist_symbols)\n\n @staticmethod\n def _parse_nums(nums):\n if nums is None:\n return nums\n nums = tuple(nums)\n if len(nums) == 0:\n return None\n if not hasattr(nums[0], '__len__'):\n try:\n return [(float(nums[0]), float(nums[1]))]\n except TypeError:\n # complex numbers (or critical points).\n cp_complex = ([complex(i) for i in nums])\n return [tuple([\n c.real if c.imag == 0 else c for c in cp_complex])]\n try:\n return [\n tuple([float(i) for i in cp]) for cp in nums]\n except TypeError:\n try:\n # complex numbers (or critical points).\n cp_complex = [tuple([complex(i) for i in cp]) for cp in nums]\n return [\n tuple([c.real if c.imag == 0 else c for c in cp])\n for cp in cp_complex]\n except TypeError:\n return None\n\n def plot(self, x_range=None, y_range=None, grid_vecs=15,\n *, ic=None, cp=True, ba=False, t_range=(-50, 50),\n figsize=None, grid=True, plot_type='quiver',\n normalize_quiver=False, quiver_params=None,\n return_figure=False, save_figure=False) -> None:\n \"\"\"Plots an autonomous direction field of vectors.\n\n Parameters:\n x_range: A two-tuple with the min/max values for the x-axis.\n This is optional, if unprovided, then `cp` must be set\n to `True` and the plot will be generated based on the\n critical points. If there are no critical points or the\n only critical point is `(0, 0)`, then the range defaults\n to `(-5, 5)` (for both axes).\n y_range: A two-tuple with the min/max values for the y-axis.\n This is optional, see `x_range` for more information. If\n you want the same range for both the `x` and `y` axes,\n however, then pass it to `x_range` and leave this as `None`.\n grid_vecs: An optional tuple with the number of vectors\n to plot along the x- and y-axes. Defaults to (15, 15).\n ic: An optional tuple of initial conditions for the equation.\n This can also be up to 5 distinct initial conditions.\n cp: Whether to display the critical points of the system. Is\n set to `True` by default, but can be toggled off.\n ba: An optional tuple of x and y coordinates for which the\n method will attempt to find a basin of attraction for.\n t_range: An optional range of `t` values for initial values.\n Defaults to -10 to 10 if not provided and ICs are.\n figsize: An optional tuple containing the size of the figure.\n grid: Whether to keep the grid on or off.\n plot_type: Whether to use `plt.streamplot` or `plt.quiver`,\n basically a stream field versus constant arrows for a\n direction field. Set to `quiver` by default, change to\n `stream` for a stream field. Set to `stream` by default\n for polar coordinates, however.\n normalize_quiver: Whether to normalize the length of the\n quiver arrows, e.g. set them to a constant length.\n quiver_params: An optional dictionary with parameters for the\n `plt.quiver` method which plots the vectors.\n return_figure: If set to `True`, this method will return the\n figure instead of displaying it (for saving, etc).\n save_figure: If you want to save the figure, then pass a file\n path to this argument and it will save it to that path.\n \"\"\"\n # Determine the axis ranges.\n if x_range and not y_range:\n y_range = (x_range[0], x_range[1])\n if not all([i is None for i in [x_range, y_range]]):\n if not all([i is not None for i in [x_range, y_range]]):\n raise TypeError(\"Expected either both ranges or neither.\")\n else:\n if self.real_critical_points is None:\n x_range, y_range = (-5, 5), (-5, 5)\n else:\n c_p = np.array(self.real_critical_points)\n if len(c_p) == 1 and (c_p == np.array([[0, 0]])).all():\n x_range, y_range = (-5, 5), (-5, 5)\n else:\n def _round_fourth(x):\n return round(x * 4) / 4\n\n xs, ys = c_p[:, 0], c_p[:, 1]\n x_dist = xs.max() - xs.min()\n if x_dist == 0.0:\n x_dist = 5.0\n x_range = (_round_fourth(xs.min() - 0.3 * x_dist),\n _round_fourth(xs.max() + 0.3 * x_dist))\n y_dist = ys.max() - ys.min()\n if y_dist == 0.0:\n y_dist = 5.0\n y_range = (_round_fourth(ys.min() - 0.3 * y_dist),\n _round_fourth(ys.max() + 0.3 * y_dist))\n\n # Unpack the axis ranges and create the vector grid.\n x_min, x_max = x_range\n y_min, y_max = y_range\n if isinstance(grid_vecs, int):\n grid_vecs = (grid_vecs, grid_vecs)\n x = np.linspace(x_min, x_max, grid_vecs[0])\n y = np.linspace(y_min, y_max, grid_vecs[1])\n X, Y = np.meshgrid(x, y)\n\n # Calculate the derivatives from the system.\n dX, dY = self(X, Y)\n\n # Create the figure.\n if quiver_params is None:\n quiver_params = {}\n quiver_params.setdefault('scale', 15)\n quiver_params.setdefault('headwidth', 5)\n if figsize is None:\n figsize = (6, 6)\n plt.figure(figsize=figsize)\n\n # Normalize the arrows if requested to.\n if normalize_quiver:\n dX /= np.sqrt(dX ** 2 + dY ** 2) + 1e-6\n dY /= np.sqrt(dX ** 2 + dY ** 2) + 1e-6\n\n # Plot the direction field.\n if self._polar:\n plt.streamplot(X, Y, dX, dY, color='k', zorder=5)\n else:\n if plot_type == 'stream':\n plt.streamplot(X, Y, dX, dY, color='k', zorder=5)\n else:\n plt.quiver(X, Y, dX, dY, pivot='mid',\n color='k', zorder=5, **quiver_params)\n plt.xlim(x_range), plt.ylim(y_range)\n if grid:\n plt.grid('on', zorder=0)\n\n # Plot the critical points.\n if not isinstance(cp, bool) or cp is True:\n if hasattr(cp, '__len__'):\n if not hasattr(cp[0], '__len__'): # noqa\n cp = np.array([cp])\n else:\n if self.real_critical_points is not None:\n cp = self.real_critical_points\n else:\n cp = None\n if cp is not None:\n cp = np.array(self.real_critical_points)\n plt.scatter(cp[:, 0], cp[:, 1], c='b', s=10,\n edgecolors='b', alpha=0.8, linewidths=5, zorder=10)\n\n # If initial conditions are provided, plot a trajectory.\n if ic is not None:\n colors = ['red', 'green', 'yellow', 'orange', 'purple']\n if isinstance(ic[0], (int, float)):\n ic = [ic]\n assert len(ic) <= len(colors), \\\n f\"Cannot have more than {len(colors)} initial conditions.\"\n\n for idx, cond in enumerate(ic):\n # Modify the signature of `dv` for the solver.\n def dv_mod(s, t):\n return self(s[0], s[1])\n\n cond = np.array([cond[0], cond[1]])\n t = np.linspace(t_range[0], t_range[1], (t_range[1] - t_range[0]) * 10)\n s = odeint(dv_mod, cond, t)\n line = plt.plot(s[:, 0], s[:, 1], color=colors[idx], zorder=15)[0] # noqa\n\n # If a basin of attraction point is provided, try to find it.\n if ba:\n assert len(ba) == 2, \"Expected one critical point for basin of attraction.\" # noqa\n ba = tuple([float(b) for b in ba]) # noqa\n assert ba in self.real_critical_points, \"Expected a critical point.\"\n ba = list(ba)\n coords = list(zip(X.flat, Y.flat))\n\n # Find the trajectories for each coordinate, see if it falls in the basin.\n valid_trajectories, areas = {}, []\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n with tqdm(desc=\"Generating Basin of Attraction\", leave=False,\n total=len(coords)) as p_bar:\n for idx, cond in enumerate(coords):\n # Modify the signature of `dv` for the solver.\n def dv_mod(s, t):\n nonlocal self\n return self(s[0], s[1])\n\n cond = np.array([cond[0], cond[1]])\n t = np.linspace(t_range[0], t_range[1],\n (t_range[1] - t_range[0]) * 10)\n s = odeint(dv_mod, cond, t)\n s = np.array([[round(x, 3) for x in i] for i in s])\n s[s == -0.0] = 0.0\n s = s.tolist()\n if ba in s:\n # Calculate the areas of the trajectories.\n def _shoelace(co):\n co = np.array(co)\n x, y = co[:, 0], co[:, 1]\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1))\n - np.dot(y, np.roll(x, 1)))\n valid_trajectories[tuple(cond.tolist())] = s # noqa\n areas.append(_shoelace(s))\n p_bar.update(1)\n p_bar.close()\n\n # Filter through the trajectories for the outermost one.\n try:\n traj = valid_trajectories[list(valid_trajectories.keys())\n [np.argmax(areas)]] # noqa\n except ValueError: # no basin\n pass\n else:\n traj = np.array(traj) # noqa\n plt.fill(traj[:, 0], traj[:, 1], color='blue')\n\n # Save the figure.\n if save_figure:\n savefig = plt.gcf()\n savefig.savefig(os.path.expanduser(save_figure))\n\n # Display the figure.\n if return_figure:\n return plt.gcf()\n plt.show()\n\n def analyze_stability(self, cp=None):\n \"\"\"Analyzes the stability of the system's critical points.\n\n Parameters:\n cp: A specific critical point to analyze stability of.\n \"\"\"\n # Construct an array representing the Jacobian:\n # [[∂F/∂x, ∂F/∂y], [∂G/∂x, ∂G/∂y]], although with\n # arbitrary variables substituted for `x` and `y`.\n symbols = self._gen_sympy_symbols(self._vars)\n dv = lambda expr, symbol: Derivative(expr, symbol).doit()\n expr1 = self.expr1.replace('np.', '')\n expr2 = self.expr2.replace('np.', '')\n jacobian = [dv(expr1, symbols[0]), dv(expr1, symbols[1]),\n dv(expr2, symbols[0]), dv(expr2, symbols[1])]\n\n # Check if a specific critical point is provided. Otherwise,\n # determine the stability of all of the critical points.\n if cp is not None:\n if cp not in self.real_critical_points:\n if cp in self.critical_points:\n raise NotImplementedError(\n \"Cannot analyze non-real critical points.\")\n raise ValueError(f\"Invalid critical point {cp}, \"\n f\"should be in {self.real_critical_points}\")\n cp = [cp]\n else:\n cp = self.real_critical_points\n\n # Determine the eigenvalues for the points.\n eigenvalues, jacobians = {}, {}\n for point in cp:\n mat = []\n for expr in jacobian:\n expr = expr.subs(symbols[0], point[0])\n expr = expr.subs(symbols[1], point[1])\n mat.append(float(expr))\n mat = np.array(mat).reshape((2, 2))\n\n # Calculate eigenvalues.\n s_mat = Matrix(mat)\n values = s_mat.eigenvals(multiple=True)\n values = self._parse_nums(values)\n eigenvalues[point] = values[0]\n jacobians[point] = mat\n\n # Return the stability analysis.\n return StabilityAnalysis(eigenvalues, jacobians)\n\n def analyze(self, *args, **kwargs):\n \"\"\"Run an analysis of the provided system.\n\n Namely, this method is a wrapped for the methods `system.plot` and\n `system.analyze_stability`, which respectively create a plot of\n the system and return an analysis of the stability of the eigenvalues.\n\n See those methods for a list of valid parameters.\n \"\"\"\n cp = kwargs.pop('cp', None)\n self.plot(*args, **kwargs)\n return self.analyze_stability(cp)\n\n\n\n","repo_name":"amogh7joshi/mathematics","sub_path":"stability/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":30498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9807456895","text":"from __future__ import print_function\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\nimport functools\nimport inspect\nimport linecache\nimport optparse\nimport os\nimport sys\n\nfrom _line_profiler import LineProfiler as CLineProfiler\n\n# Python 2/3 compatibility utils\n# ===========================================================\nPY3 = sys.version_info[0] == 3\n\n# exec (from https://bitbucket.org/gutworth/six/):\nif PY3:\n import builtins\n exec_ = getattr(builtins, \"exec\")\n del builtins\nelse:\n def exec_(_code_, _globs_=None, _locs_=None):\n \"\"\"Execute code in a namespace.\"\"\"\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")\n\n# ============================================================\n\nCO_GENERATOR = 0x0020\ndef is_generator(f):\n \"\"\" Return True if a function is a generator.\n \"\"\"\n isgen = (f.__code__.co_flags & CO_GENERATOR) != 0\n return isgen\n\n\nclass LineProfiler(CLineProfiler):\n \"\"\" A profiler that records the execution times of individual lines.\n \"\"\"\n\n def __call__(self, func):\n \"\"\" Decorate a function to start the profiler on function entry and stop\n it on function exit.\n \"\"\"\n self.add_function(func)\n if is_generator(func):\n wrapper = self.wrap_generator(func)\n else:\n wrapper = self.wrap_function(func)\n return wrapper\n\n def wrap_generator(self, func):\n \"\"\" Wrap a generator to profile it.\n \"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n g = func(*args, **kwds)\n # The first iterate will not be a .send()\n self.enable_by_count()\n try:\n item = next(g)\n finally:\n self.disable_by_count()\n input = (yield item)\n # But any following one might be.\n while True:\n self.enable_by_count()\n try:\n item = g.send(input)\n finally:\n self.disable_by_count()\n input = (yield item)\n return wrapper\n\n def wrap_function(self, func):\n \"\"\" Wrap a function to profile it.\n \"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n self.enable_by_count()\n try:\n result = func(*args, **kwds)\n finally:\n self.disable_by_count()\n return result\n return wrapper\n\n def dump_stats(self, filename):\n \"\"\" Dump a representation of the data to a file as a pickled LineStats\n object from `get_stats()`.\n \"\"\"\n lstats = self.get_stats()\n with open(filename, 'wb') as f:\n pickle.dump(lstats, f, pickle.HIGHEST_PROTOCOL)\n\n def print_stats(self, stream=None, stripzeros=False):\n \"\"\" Show the gathered statistics.\n \"\"\"\n lstats = self.get_stats()\n show_text(lstats.timings, lstats.unit, stream=stream, stripzeros=stripzeros)\n\n def run(self, cmd):\n \"\"\" Profile a single executable statment in the main namespace.\n \"\"\"\n import __main__\n main_dict = __main__.__dict__\n return self.runctx(cmd, main_dict, main_dict)\n\n def runctx(self, cmd, globals, locals):\n \"\"\" Profile a single executable statement in the given namespaces.\n \"\"\"\n self.enable_by_count()\n try:\n exec_(cmd, globals, locals)\n finally:\n self.disable_by_count()\n return self\n\n def runcall(self, func, *args, **kw):\n \"\"\" Profile a single function call.\n \"\"\"\n self.enable_by_count()\n try:\n return func(*args, **kw)\n finally:\n self.disable_by_count()\n\n def add_module(self, mod):\n \"\"\" Add all the functions in a module and its classes.\n \"\"\"\n from inspect import isclass, isfunction\n\n nfuncsadded = 0\n for item in mod.__dict__.values():\n if isclass(item):\n for k, v in item.__dict__.items():\n if isfunction(v):\n self.add_function(v)\n nfuncsadded += 1\n elif isfunction(item):\n self.add_function(item)\n nfuncsadded += 1\n\n return nfuncsadded\n\n\ndef show_func(filename, start_lineno, func_name, timings, unit, stream=None, stripzeros=False):\n \"\"\" Show results for a single function.\n \"\"\"\n if stream is None:\n stream = sys.stdout\n\n template = '%6s %9s %12s %8s %8s %-s'\n d = {}\n total_time = 0.0\n linenos = []\n for lineno, nhits, time in timings:\n total_time += time\n linenos.append(lineno)\n\n if stripzeros and total_time == 0:\n return\n\n stream.write(\"Total time: %g s\\n\" % (total_time * unit))\n if os.path.exists(filename) or filename.startswith(\" 0:\n return book['book_id'].iloc[0]\n else:\n return None\n\ndef book_read(user_id):\n '''Take user_id and return list of book that user has read'''\n books_list = list(books['book_id'])\n book_read_list = list(ratings['book_id'][ratings['user_id'] == user_id])\n return books_list, book_read_list\n\ndef get_new_user_id(title_ratings):\n # Get book IDs for the given book titles\n book_ids = [get_book_id(title) for title in title_ratings.keys()]\n \n # Create a new row for the new user\n new_user_id = ratings['user_id'].max() + 1\n new_user_ratings = pd.DataFrame({\n 'user_id': [new_user_id] * len(book_ids),\n 'book_id': book_ids,\n 'rating': list(title_ratings.values())\n })\n \n # Append the new user's ratings to the existing ratings dataframe\n updated_ratings_df = pd.concat([ratings, new_user_ratings], ignore_index=True)\n \n # Return the ID of the new user and the updated ratings dataframe\n return new_user_id, updated_ratings_df\n\n\ndef get_recommendation_svd(user_id, n=5):\n '''Give n recommendation to user_id'''\n \n all_books, user_books = book_read(user_id)\n next_books = [book for book in all_books if book not in user_books]\n \n if n <= len(next_books):\n ratings = []\n for book in next_books:\n est = svd_model.predict(user_id, book).est\n ratings.append((book, est))\n ratings = sorted(ratings, key=lambda x: x[1], reverse=True)\n book_ids = [id for id, rate in ratings[:n]]\n return books[books.book_id.isin(book_ids)][['book_id', 'title', 'authors', 'year', 'pages', 'description', 'genres', 'average_rating', 'small_image_url']]\n else:\n print('Please reduce your recommendation request')\n return\n \n\ndef simple_recommender(books, n=6):\n v = books['ratings_count']\n m = books['ratings_count'].quantile(0.95)\n R = books['average_rating']\n C = books['average_rating'].median()\n score = (v/(v+m) * R) + (m/(m+v) * C) \n books['score'] = score\n \n qualified = books.sort_values('score', ascending=False)\n recommended_books = qualified[['book_id', 'title', 'authors', 'year', 'genres',\n 'average_rating','small_image_url']].head(n)\n recommended_books_dict = recommended_books.to_dict(orient='index')\n return recommended_books_dict\n\n\nuser_ratings = {}\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef home():\n error = False\n error_message = \"\"\n if request.method == \"POST\":\n try:\n title = request.form[\"movie_input\"]\n rating = int(request.form[\"rating_input\"])\n if title and rating:\n user_ratings[title] = rating\n flash(f\"Successfully added [{title}]\", \"info\")\n else:\n error = True\n error_message = \"Title or rating is missing.\"\n except Exception as e:\n print(f\"EXCEPTION AT HOME: {e}\")\n error = True\n error_message = \"Invalid rating input.\"\n print(\"User ratings:\",user_ratings)\n get_recommendation(user_ratings)\n recommended_books = simple_recommender(books, 5)\n return render_template(\"home.html\", error=error, error_msg=error_message, recommended_books=recommended_books)\n\n\n@app.route(\"/recommend\",methods=[\"POST\"])\ndef get_recommendation(user_ratings):\n new_user_id, updated_ratings_df = get_new_user_id(user_ratings)\n print(new_user_id)\n recommended_books = get_recommendation_svd(123)\n print(recommended_books)\n\n@app.route(\"/genres/\", methods=[\"GET\"])\ndef genres(genre):\n genres_based_books = books[books.genres.str.contains(genre, case=False)].head(9)\n genres_based_dict = genres_based_books.to_dict(orient='index')\n # print(genres_based_dict)\n # genres_based_dict = json.loads(genres_based_dict)\n return render_template(\"genres.html\", genre_books=genres_based_dict, genre=genre)\n\n\nif __name__ == '__main__':\n app.run(debug = True)","repo_name":"Saurab-Shrestha/Book-recommendation-system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41964193077","text":"import requests, traceback, logging\nfrom timestamp_convertor import timestamp_convertor2\nfrom listing_details_crawler import get_listing_details_table\nfrom orders_crawler import get_orders_pdf\nimport urllib3\nurllib3.disable_warnings()\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')\nfile_handler = logging.FileHandler('tribunals.log')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nsession = requests.session()\n\ncase_details_table = []\napplicant_name_table = []\nrespondant_name_table = []\napplicant_legal_representative_table = []\nrespondent_legal_representative_table = []\nfirst_hearing_details_table = []\nlast_hearing_details_table = []\nnext_hearing_details_table = []\ncase_history_table = []\ncase_history_details_table = []\norder_history_table = []\nias_other_application_table = []\nconnected_cases_table = []\n\n# input_data = {\n# \"location\": \"delhi\",\n# \"filing_no\": \"9910110001532018\"\n# }\n\ndef get_tribunals_detail_data(input_data, token, xsrf_token, laravel_session):\n try:\n filing_no = input_data.get('filing_no')\n bench_name = input_data.get('location')\n\n final_page_payload = f\"search_type=view_details&filing_no={filing_no}&_token={token}&bench_name={bench_name}\"\n \n final_page_headers = {\n 'Accept': 'text/plain, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9,hi;q=0.8',\n 'Connection': 'keep-alive',\n 'Content-Length': f'{len(final_page_payload)}',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cookie': f'_ga=GA1.3.1468851884.1678795714; _gid=GA1.3.347674307.1678795714; XSRF-TOKEN={xsrf_token}; laravel_session={laravel_session}',\n 'Host': 'nclat.nic.in',\n 'Origin': 'https://nclat.nic.in',\n 'Referer': 'https://nclat.nic.in/display-board/cases',\n 'sec-ch-ua': '\"Chromium\";v=\"110\", \"Not A(Brand\";v=\"24\", \"Google Chrome\";v=\"110\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"Windows\"',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n final_page_response = session.post(\"https://nclat.nic.in/display-board/view_details\", headers=final_page_headers, data=final_page_payload, verify=False)\n if final_page_response.status_code==200:\n logger.info('we have got final page response successfully')\n else:\n logger.info('we have not got proper final page response')\n\n detail_data_list = final_page_response.json().get('data')\n\n case_details_data_list = detail_data_list.get('case_details')\n if case_details_data_list == []:\n logger.info(f'No data found for {input_data}')\n return case_details_table, applicant_name_table, respondant_name_table, applicant_legal_representative_table, respondent_legal_representative_table, first_hearing_details_table, last_hearing_details_table, next_hearing_details_table, case_history_table, case_history_details_table, order_history_table, ias_other_application_table, connected_cases_table\n \n for case_details_data in case_details_data_list:\n filing_no = case_details_data.get('filing_no')\n date_of_filing_format = case_details_data.get('date_of_filing')\n if date_of_filing_format:\n date_of_filing = timestamp_convertor2(date_of_filing_format)\n else:\n date_of_filing = date_of_filing_format\n case_no = case_details_data.get('case_type') +'/'+ case_details_data.get('case_no') +'/'+ case_details_data.get('case_year')\n registration_date_format = case_details_data.get('registration_date')\n if registration_date_format:\n registration_date = timestamp_convertor2(registration_date_format)\n else:\n registration_date = registration_date_format\n if case_details_data.get('status')=='P':\n status = 'Pending'\n elif case_details_data.get('status')=='D':\n status = 'Disposed'\n data = {\n 'filing_no': filing_no,\n 'date_of_filing': date_of_filing,\n 'case_no': case_no,\n 'registration_date': registration_date,\n 'status': status,\n }\n case_details_table.append(data)\n logger.info('case_details_table save successfully')\n\n\n party_details_data_list = detail_data_list.get('party_details')\n applicant_name_data = party_details_data_list.get('applicant_name')\n applicant_data = {}\n applicant_data['applicant_name'] = [i.get('name').strip() for i in applicant_name_data]\n applicant_name_table.append(applicant_data)\n logger.info('applicant_name_table save successfully')\n\n respondant_name_data = party_details_data_list.get('respondant_name')\n respondant_data = {}\n respondant_data['respondant_name'] = [i.get('name').strip() for i in respondant_name_data]\n respondant_name_table.append(respondant_data)\n logger.info('respondant_name_table save successfully')\n\n\n legal_representative_data_list = detail_data_list.get('legal_representative')\n applicant_legal_representative_name_data = legal_representative_data_list.get('applicant_legal_representative_name')\n applicant_legal_representative_data = {}\n applicant_legal_representative_data['applicant_legal_representative_name'] = [i.strip() for i in applicant_legal_representative_name_data]\n applicant_legal_representative_table.append(applicant_legal_representative_data)\n logger.info('applicant_legal_representative_table save successfully')\n\n respondent_legal_representative_name_data = legal_representative_data_list.get('respondent_legal_representative_name')\n respondent_legal_representative_data = {}\n respondent_legal_representative_data['respondent_legal_representative_name'] = [i.strip() for i in respondent_legal_representative_name_data]\n respondent_legal_representative_table.append(respondent_legal_representative_data)\n logger.info('respondent_legal_representative_table save successfully')\n\n\n first_hearing_details_data = detail_data_list.get('first_hearing_details')\n if first_hearing_details_data!=[]:\n court_no = first_hearing_details_data.get('court_no')\n hearing_date_format = first_hearing_details_data.get('hearing_date')\n if hearing_date_format:\n hearing_date = timestamp_convertor2(hearing_date_format)\n else:\n hearing_date = hearing_date_format\n coram = first_hearing_details_data.get('coram')\n stage_of_case = first_hearing_details_data.get('stage_of_case')\n data = {\n 'court_no': court_no,\n 'hearing_date': hearing_date,\n 'coram': coram,\n 'stage_of_case': stage_of_case\n }\n first_hearing_details_table.append(data)\n logger.info('first_hearing_details_table save successfully')\n\n\n last_hearing_details_data = detail_data_list.get('last_hearing_details')\n if last_hearing_details_data!=[]:\n court_no = last_hearing_details_data.get('court_no')\n hearing_date_format = last_hearing_details_data.get('hearing_date')\n if hearing_date_format:\n hearing_date = timestamp_convertor2(hearing_date_format)\n else:\n hearing_date = hearing_date_format\n coram = last_hearing_details_data.get('coram')\n stage_of_case = last_hearing_details_data.get('stage_of_case')\n data = {\n 'court_no': court_no,\n 'hearing_date': hearing_date,\n 'coram': coram,\n 'stage_of_case': stage_of_case\n }\n last_hearing_details_table.append(data)\n logger.info('last_hearing_details_table save successfully')\n\n\n next_hearing_details_data = detail_data_list.get('next_hearing_details')\n if next_hearing_details_data!=[]:\n hearing_date_format = next_hearing_details_data.get('hearing_date')\n if hearing_date_format:\n hearing_date = timestamp_convertor2(hearing_date_format)\n else:\n hearing_date = hearing_date_format\n court_no = next_hearing_details_data.get('court_no')\n proceedings_summary = next_hearing_details_data.get('coram')\n stage_of_case = next_hearing_details_data.get('stage_of_case')\n data = {\n 'hearing_date': hearing_date,\n 'court_no': court_no,\n 'proceedings_summary': proceedings_summary,\n 'stage_of_case': stage_of_case\n }\n next_hearing_details_table.append(data)\n logger.info('next_hearing_details_table save successfully')\n\n\n case_history_data_list = detail_data_list.get('case_history')\n for count, case_history_data in enumerate(case_history_data_list, start=1):\n sr_no = count\n hearing_date_format = case_history_data.get('hearing_date')\n if hearing_date_format:\n hearing_date = timestamp_convertor2(hearing_date_format)\n else:\n hearing_date = hearing_date_format\n court_no = case_history_data.get('court_no')\n purpose = case_history_data.get('purpose')\n action = 'View'\n data = {\n 'sr_no': sr_no,\n 'hearing_date': hearing_date,\n 'court_no': court_no,\n 'purpose': purpose,\n 'action': action\n }\n case_history_table.append(data)\n case_history_details_table.append(get_listing_details_table(session, hearing_date_format, filing_no, bench_name, token, xsrf_token, laravel_session))\n logger.info('case_history_table save successfully')\n logger.info('case_history_details_table save successfully')\n\n\n order_history_data_list = detail_data_list.get('order_history')\n for count, order_history_data in enumerate(order_history_data_list, start=1):\n sr_no = count\n order_date_format = order_history_data.get('order_date')\n if order_date_format:\n order_date = timestamp_convertor2(order_date_format)\n else:\n order_date = order_date_format\n order_type_symbol = order_history_data.get('order_type')\n if order_type_symbol=='D':\n order_type = 'Daily Order'\n elif order_type_symbol=='J':\n order_type = 'Final Order / Judgement'\n view = 'Download'\n orders_filename = get_orders_pdf(session, bench_name, filing_no, order_date_format, order_type_symbol, token, xsrf_token, laravel_session)\n data = {\n 'sr_no': sr_no,\n 'order_date': order_date,\n 'order_type': order_type,\n 'view': view,\n 'orders_url': orders_filename\n }\n order_history_table.append(data)\n logger.info('order_history_table save successfully')\n\n\n ias_other_application_data_list = detail_data_list.get('ias_other_application')\n for count, ias_other_application_data in enumerate(ias_other_application_data_list, start=1):\n sr_no = count\n filing_no = ias_other_application_data.get('filing_no')\n case_no = ias_other_application_data.get('case_no')\n date_of_filing_format = ias_other_application_data.get('filing_date')\n if date_of_filing_format:\n date_of_filing = timestamp_convertor2(date_of_filing_format)\n else:\n date_of_filing = date_of_filing_format\n registration_date_format = ias_other_application_data.get('registration_date')\n if registration_date_format:\n registration_date = timestamp_convertor2(registration_date_format)\n else:\n registration_date = registration_date_format\n if ias_other_application_data.get('status')=='P':\n status = 'Pending'\n elif ias_other_application_data.get('status')=='D':\n status = 'Disposed'\n data = {\n 'sr_no': sr_no,\n 'filing_no': filing_no,\n 'case_no': case_no,\n 'date_of_filing': date_of_filing,\n 'registration_date': registration_date,\n 'status': status\n }\n ias_other_application_table.append(data)\n logger.info('ias_other_application_table save successfully')\n\n\n connected_cases_data_list = detail_data_list.get('connected_cases')\n for count, connected_cases_data in enumerate(connected_cases_data_list, start=1):\n sr_no = count\n filing_no = connected_cases_data.get('filing_no')\n case_no = connected_cases_data.get('case_no')\n date_of_filing_format = ias_other_application_data.get('filing_date')\n if date_of_filing_format:\n date_of_filing = timestamp_convertor2(date_of_filing_format)\n else:\n date_of_filing = date_of_filing_format\n registration_date_format = ias_other_application_data.get('registration_date')\n if registration_date_format:\n registration_date = timestamp_convertor2(registration_date_format)\n else:\n registration_date = registration_date_format\n if connected_cases_data.get('status')=='P':\n status = 'Pending'\n elif connected_cases_data.get('status')=='D':\n status = 'Disposed'\n data = {\n 'sr_no': sr_no,\n 'filing_no': filing_no,\n 'case_no': case_no,\n 'date_of_filing': date_of_filing,\n 'registration_date': registration_date,\n 'status': status\n }\n connected_cases_table.append(data)\n logger.info('connected_cases_table save successfully')\n\n return case_details_table, applicant_name_table, respondant_name_table, applicant_legal_representative_table, respondent_legal_representative_table, first_hearing_details_table, last_hearing_details_table, next_hearing_details_table, case_history_table, case_history_details_table, order_history_table, ias_other_application_table, connected_cases_table\n\n except Exception as e:\n logger.info(f\"Error in get_tribunals_detail_data :- {traceback.format_exc()}\")\n\n\n# print(get_tribunals_detail_data(input_data, \"U4txk3uhMdDVEbITXFdMuIppsTHrIuApCEmTNNb5\", \"eyJpdiI6Im9RMDNCL0VLTWdJMGNLSDhOd0d2SVE9PSIsInZhbHVlIjoiM2pSU3lMOXBwZ2lLNUZlSEp2NnJTQWx6M1Jyc3UyMHpoYmJ6bGFKNFdiRFNOb1JKME9ONDRUd3FxUVVqYStKRFRkTU1QVUFKa0tWclB1R0dDRzI2VDVhUzlyTGs5anFSRXRDMTZBMHpFREV5dE11QXc0YS9UUlFHK1NpRElnTmkiLCJtYWMiOiI1ZjdjNzM4ZmQ4ZWVjNTUzZGJhNTc1ZDAyNjhiYzFmZTEzMzEzODc5M2Q5MGIwNGI0NmJkMGQ4ZjZkOWRiYmMwIiwidGFnIjoiIn0%3D\", \"eyJpdiI6InZTY1gremZtaHJ4QUdYYVhUT3dJN3c9PSIsInZhbHVlIjoiUzJ5N1ZQOTFGbVpTa2ZSUEx5VCtpb1NLaHdEcHQ1cXRqM1ZMYW9EVWE2L3o3VzRHR1B0TTIwLzcyNk1IenRqaC9HTGdlUE1lVGgwZTk5bGtKb0huaXArek9WNjhhL0EwaGtiaTh6dkx0cTdXRWlCNm1pTFk1T3NNRENyQ2MvakgiLCJtYWMiOiIzMTM3NzFjNDMzMjEyMmMyNmM1OTk0N2MzMzg1ZWNlZWEzYjZkZmU4NDgwZTNhZjUwNjRjMDQ4OWMwMDFlZjVjIiwidGFnIjoiIn0%3D\"))\n\n","repo_name":"Shubham100718/dockerize_python_app","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":15896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44171588131","text":"from finance_complaint.constant.model import S3_MODEL_BUCKET_NAME, S3_MODEL_DIR_KEY\nfrom finance_complaint.constant.prediction_pipeline_config.file_config import S3_DATA_BUCKET_NAME, PYSPARK_S3_ROOT\nfrom finance_complaint.entity.config_entity import PredictionPipelineConfig\nfrom finance_complaint.entity.schema import FinanceDataSchema\nfrom pyspark.sql import DataFrame\nfrom finance_complaint.cloud_storage import SimpleStorageService\nfrom finance_complaint.exception import FinanceException\nimport os, sys\nfrom finance_complaint.config.spark_manager import spark_session\nfrom finance_complaint.logger import logger\nfrom typing import List, Tuple\nfrom finance_complaint.entity.estimator import S3FinanceEstimator\n\n\nclass PredictionPipeline:\n\n def __init__(self, pipeline_config=PredictionPipelineConfig()) -> None:\n self.__pyspark_s3_root = PYSPARK_S3_ROOT\n self.pipeline_config: PredictionPipelineConfig = pipeline_config\n self.s3_storage: SimpleStorageService = SimpleStorageService(s3_bucket_name=S3_DATA_BUCKET_NAME,\n region_name=pipeline_config.region_name)\n self.schema: FinanceDataSchema = FinanceDataSchema()\n\n def read_file(self, file_path: str) -> DataFrame:\n try:\n file_path = self.get_pyspark_s3_file_path(dir_path=file_path)\n df = spark_session.read.parquet(file_path)\n return df.limit(100)\n except Exception as e:\n raise FinanceException(e, sys)\n\n def write_file(self, dataframe: DataFrame, file_path: str) -> bool:\n try:\n\n if file_path.endswith(\"csv\"):\n file_path = os.path.dirname(file_path)\n\n file_path = self.get_pyspark_s3_file_path(dir_path=file_path)\n print(file_path)\n logger.info(f\"writing parquet file at : {file_path}\")\n dataframe.write.parquet(file_path,mode=\"overwrite\")\n return True\n except Exception as e:\n raise FinanceException(e, sys)\n\n def get_pyspark_s3_file_path(self, dir_path) -> str:\n return os.path.join(self.__pyspark_s3_root, dir_path)\n\n def is_valid_file(self, file_path) -> bool:\n \"\"\"\n file_path\n \"\"\"\n try:\n dataframe: DataFrame = self.read_file(file_path)\n columns = dataframe.columns\n missing_columns = []\n for column in self.schema.required_prediction_columns:\n if column not in columns:\n missing_columns.append(column)\n if len(missing_columns) > 0:\n logger.info(f\"Missing columns: {missing_columns}\")\n logger.info(f\"Existing columns: {columns}\")\n return False\n return True\n except Exception as e:\n raise FinanceException(e, sys)\n\n def get_valid_files(self, file_paths: List[str]) -> Tuple[List[str], List[str]]:\n \"\"\"\n Returns: Tuple containing two items\n item1: valid file name list\n item2: invalid file name list\n \"\"\"\n try:\n valid_file_paths: List[str] = []\n invalid_file_paths: List[str] = []\n for file_path in file_paths:\n is_valid = self.is_valid_file(file_path=file_path)\n if is_valid:\n valid_file_paths.append(file_path)\n else:\n invalid_file_paths.append(file_path)\n return valid_file_paths, invalid_file_paths\n\n except Exception as e:\n raise FinanceException(e, sys)\n\n def start_batch_prediction(self):\n try:\n input_dir = self.pipeline_config.input_dir\n files= [input_dir]\n logger.info(f\"Files: {files}\")\n valid_files, invalid_files = self.get_valid_files(file_paths=files)\n invalid_files = valid_files\n if len(invalid_files) > 0:\n logger.info(f\"{len(invalid_files)}: invalid file found:\")\n failed_dir = self.pipeline_config.failed_dir\n for invalid_file in invalid_files:\n logger.info(f\"Moving invalid file {invalid_file} to failed dir: {failed_dir}\")\n #self.s3_storage.move(source_key=invalid_file, destination_dir_key=failed_dir)\n \n\n if len(valid_files) == 0:\n logger.info(f\"No valid file found.\")\n return None\n\n estimator = S3FinanceEstimator(bucket_name=S3_MODEL_BUCKET_NAME, s3_key=S3_MODEL_DIR_KEY)\n for valid_file in valid_files:\n logger.info(\"Staring prediction of file: {valid_file}\")\n dataframe: DataFrame = self.read_file(valid_file)\n #dataframe = dataframe.drop(self.schema.col_consumer_disputed)\n transformed_dataframe = estimator.transform(dataframe=dataframe)\n required_columns = self.schema.required_prediction_columns + [self.schema.prediction_label_column_name]\n logger.info(f\"Saving required_columns: {required_columns}\")\n transformed_dataframe=transformed_dataframe.select(required_columns)\n transformed_dataframe.show()\n prediction_file_path = os.path.join(self.pipeline_config.prediction_dir, os.path.basename(valid_file))\n logger.info(f\"Writing prediction file at : [{self.pipeline_config.prediction_dir}] \")\n self.write_file(dataframe=transformed_dataframe, file_path=prediction_file_path)\n archive_file_path = os.path.join(self.pipeline_config.archive_dir, os.path.basename(valid_file))\n logger.info(f\"Arching valid input files at: [{archive_file_path}]\")\n self.write_file(dataframe=dataframe, file_path=archive_file_path)\n\n\n except Exception as e:\n raise FinanceException(e, sys)\n","repo_name":"Machine-Learning-01/finance-complaint","sub_path":"finance_complaint/pipeline/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"1152107456","text":"import sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\n\ngraph = [[float('inf')]*(N+1) for _ in range(N+1)]\nfor i in range(1, N+1):\n graph[i][i] = 0\n\n\nfor _ in range(M):\n u, v, b = map(int, input().rstrip().split())\n if b == 1:\n graph[v][u] = 0\n graph[u][v] = 0\n else:\n graph[u][v] = 0\n graph[v][u] = 1\n\nfor k in range(1, N+1):\n for i in range(1, N+1):\n for j in range(1, N+1):\n graph[i][j] = min(graph[i][j], graph[i][k]+graph[k][j])\n\nS = int(input())\nans = [0] * S\nfor i in range(S):\n s, e = map(int, input().rstrip().split())\n ans[i] = graph[s][e]\n\nprint(*ans, sep='\\n')\n\n# print(*graph, sep='\\n')\n","repo_name":"hjyoon/baekjoon-answers","sub_path":"_11000/11562.py","file_name":"11562.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15195096435","text":"#!/usr/bin/python\n\nimport bluetooth, os, time, sys, threading\n\n# The in directory for new pcap files\nPCAP_DIR = \"/tmp/pcaps\"\nGPSPATH = '/tmp/gpsfifo'\nSERVICE_NAME = \"EyeOfTechnology\"\nLOGFILE = \"/var/log/iot.log\"\nis_running = True\n\n\ndef _curr_time():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef _format_log(logstring):\n return _curr_time() + \": \" + logstring + \"\\n\"\n\n\"\"\"\n def bt_loop(ld):\n '''\n Connects to a device and then transmits pcaps.\n '''\n ld.write(_format_log(\"Staring service\"))\n sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n ld.write(_format_log(\"Got bluetooth socket\"))\n\n # All the services with this name should be fine\n service_desc = get_connection(ld)\n\n # Getting service information \n port = service_desc['port']\n target_address = service_desc['host']\n\n # Connecting to the device\n sock.connect((target_address, port))\n ld.write(_format_log(\"Connected to android device\"))\n while True:\n # Loop through the in directory and send over files\n time.sleep(2)\n files = os.listdir(PCAP_DIR)\n for f in files:\n fd = open(PCAP_DIR + '/' + f, 'rb')\n temp = fd.read()\n sock.send(temp)\n ld.write(_format_log(\"Sending \" + f))\n fd.close()\n os.remove(PCAP_DIR + \"/\" + f)\n \"\"\"\n\n\n\"\"\"\n def receive_loop(ld):\n ld.write(_format_log(\"Staring service\"))\n sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n ld.write(_format_log(\"Got bluetooth socket\"))\n\n # All the services with this name should be fine\n service_desc = get_connection(ld)\n\n # Getting service information \n port = service_desc['port']\n target_address = service_desc['host']\n\n # Connecting to the device\n sock.connect((target_address, port))\n ld.write(_format_log(\"Connected to android device\"))\n while True:\n time.sleep(2)\n print \"Getting data\"\n data = sock.recv(1024)\n print \"Data: \" + data\n \"\"\"\n\n\ndef send_data(ld, sock):\n global is_running\n while is_running:\n try:\n # Loop through the in directory and over files\n time.sleep(2)\n files = os.listdir(PCAP_DIR)\n for f in files:\n fn, fe = os.path.splitext(f)\n if fe == \".pcap\":\n fd = open(PCAP_DIR + '/' + f, 'rb')\n temp = fd.read()\n sock.send(str(len(temp)).zfill(8))\n sock.sendall(temp)\n #ld.write(_format_log(\"Sending \" + f))\n fd.close()\n os.remove(PCAP_DIR + \"/\" + f)\n except Exception as e:\n is_running = False\n #ld.write(_format_log(str(e)))\n #ld.write(_format_log(\"Send thread stopped\"))\n\n\ndef receive_data(ld, sock):\n global is_running\n while is_running:\n try:\n time.sleep(7)\n data = sock.recv(200)\n with open (GPSPATH, 'w') as fd:\n fd.write(data + \";\\n\")\n except Exception as e:\n is_running = False\n #ld.write(_format_log(str(e)))\n #ld.write(_format_log(\"Receive thread stopped\"))\n\n\ndef connect_bluetooth(ld):\n socket = get_bluetooth_socket(ld)\n # any service with the name should be fine\n service = get_bluetooth_services(ld, SERVICE_NAME)[0]\n socket.connect((service['host'], service['port']))\n\n #ld.write(_format_log(\"Connected to android device\"))\n return socket\n\n\ndef get_bluetooth_services(ld, name):\n services = []\n while len(services) < 1:\n try:\n # Search for the service\n services = bluetooth.find_service(name=name)\n except bluetooth.btcommon.BluetoothError as e:\n error_msg = str(e)\n #if not error_msg == \"error accessing bluetooth device\":\n #ld.write(_format_log(str(e)))\n return services\n\n\ndef get_bluetooth_socket(ld):\n sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n #ld.write(_format_log(\"Got bluetooth socket\"))\n return sock\n\n\ndef setup_logs(path):\n if os.path.isfile(path):\n return open(path, 'a', 0)\n else:\n return open(path, 'w', 0)\n\n\ndef start_threads(ld, sock):\n sock.setblocking(True)\n s = threading.Thread(target=send_data, args=(ld, sock))\n r = threading.Thread(target=receive_data, args=(ld, sock))\n s.start()\n r.start()\n return s, r\n\n\ndef handle_exception(ld, e, sock):\n is_running = False\n if sock is not None:\n sock.close()\n\n #ld.write(_format_log(str(e)))\n #ld.write(_format_log(\"Out of send and receive threads\"))\n\n is_running = True\n #ld.write(_format_log(\"Restarting service\"))\n\n\nif __name__==\"__main__\":\n #ld = setup_logs(LOGFILE)\n #ld.write(_format_log(\"Starting service\"))\n ld = None\n\n while True:\n socket = None\n is_running = True\n try:\n socket = connect_bluetooth(ld)\n\n s, r = start_threads(ld, socket)\n s.join()\n r.join()\n except Exception as e:\n handle_exception(ld, e, socket)\n","repo_name":"praetorian-inc/rpi-setup","sub_path":"rpi/bt.py","file_name":"bt.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"24001271488","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.rst') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='python-spore-codec',\n version='0.3.1',\n description='Spore codec for CoreAPI specification',\n long_description=readme,\n author='Arnaud Grausem',\n author_email='arnaud.grausem@gmail.com',\n url='https://github.com/unistra/python-spore-codec',\n license='GPLv3',\n include_package_data=True,\n install_requires=['coreapi'],\n # extra_requires={\n # 'test': ['coverage', 'pytest', 'pytest-cov']\n # },\n packages=find_packages(exclude=('tests', 'docs')),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries',\n 'Environment :: Web Environment',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n keywords='coreapi spore api-description rest-api'\n)\n","repo_name":"agrausem/python-spore-codec","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71274324090","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing as skpp\nfrom sklearn.decomposition import PCA\n\nfrom enum import Enum\n\nimport util\nfrom constants import DataDir\nimport constants as gv\n\ndef window_stack(arr, width=16, stepsize=None):\n if stepsize is None:\n stepsize = int(width/2)\n\n nWindows = int(arr.shape[0] / stepsize -1)\n indexer = np.arange(width)[None, :] + stepsize*np.arange(nWindows)[:,None]\n arr = arr[indexer]\n return arr\n\ndef get_df():\n print(\"Reading data\")\n df = util.get_clean_dataframe_from_file(DataDir.all_tables)\n df = util.convert_input_column_type(df)\n return df\n\ndef get_min_max_data(df:pd.DataFrame = None):\n if df is None:\n df = get_df()\n\n data = gv.x_y(*util.get_input_output(df, class_type='binary'))\n scaler = skpp.MinMaxScaler()\n data.x = scaler.fit_transform(data.x)\n return data\n\n\nclass network_window:\n class window_type(Enum):\n ALL_BENIGN = 0\n ALL_HETERO = 1\n MIXED = 2\n\n def __init__(self, packetWindows, nMaliciousGivenWindow):\n self.windows = packetWindows\n self.nMalicious = nMaliciousGivenWindow\n self.windowType = self.get_window_type()\n\n def perform_pca(self):\n pca = PCA(0.99, random_state=42)\n shape = self.windows.shape\n pcaShape = (-1, self.windows.shape[-1])\n benign = self.get_n_malicious(0)\n benign = np.reshape(benign, pcaShape)\n self.windows = np.reshape(self.windows,pcaShape)\n pca.fit(benign)\n self.windows = pca.transform(self.windows)\n self.windows = np.reshape(self.windows, shape[:2] + (self.windows.shape[-1],))\n print(\"%d PCA components\" %(self.windows.shape[-1]))\n return\n\n def get_window_type(self):\n if self.nMalicious.sum(axis=-1) == 0:\n return network_window.window_type.ALL_BENIGN\n elif 0 in self.nMalicious:\n return network_window.window_type.MIXED\n else:\n return network_window.window_type.ALL_HETERO\n\n def get_at_least_n_malicious(self, nMalicious):\n assert self.windows.shape[0] == self.nMalicious.shape[0]\n return self.windows[self.nMalicious >= nMalicious]\n\n def get_n_malicious(self, nMalicious):\n assert self.windows.shape[0] == self.nMalicious.shape[0]\n return self.windows[self.nMalicious == nMalicious]\n\n def _get_homogeneous_benign_mask(self):\n assert self.windows.shape[0] == self.nMalicious.shape[0]\n return self.nMalicious == 0\n\n def get_homogeneous_benign(self):\n homoMask = self._get_homogeneous_benign_mask()\n nw = network_window(\n self.windows[homoMask], self.nMalicious[homoMask]\n )\n assert nw.windowType == network_window.window_type.ALL_BENIGN\n return nw\n\n def get_only_heterogeneous(self):\n heteroMask = ~self._get_homogeneous_benign_mask()\n nw = network_window(\n self.windows[heteroMask], self.nMalicious[heteroMask]\n )\n assert nw.windowType == network_window.window_type.ALL_HETERO\n return nw\n\n @staticmethod\n def get_window_data(nTimeSteps, firstN=None):\n data = get_min_max_data()\n data.x = window_stack(data.x[:firstN], nTimeSteps)\n data.y = window_stack(data.y.to_numpy()[:firstN], nTimeSteps)\n data.y = data.y.sum(axis=-1)\n nw = network_window(data.x, data.y)\n return nw\n\n","repo_name":"kolxy/cyber-security-analysis","sub_path":"timeDataProcessing.py","file_name":"timeDataProcessing.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"40149438265","text":"\"\"\"\nGiven an array of nuts and an array of bolts, match the nuts to the bolts of the same size.\nAssume 1:1 mapping between nuts to bolts.\nNuts cannot be compared against each other, ie no sorting them.\nBolts cannot be compared against each other, ie no sorting them\nNuts can only be compared to bolts and vice versa\n\nhttp://www.geeksforgeeks.org/nuts-bolts-problem-lock-key-problem/\n\"\"\"\n\n\ndef match(nuts_arr, bolts_arr):\n \"\"\"\n Given: nuts_arr and bolts_arr of equal length\n :param nuts_arr:\n :param bolts_arr:\n :return: Returns a set of matched pairs of nuts and bolts\n \"\"\"\n _match(nuts_arr, bolts_arr, 0, len(nuts_arr) - 1)\n # print nuts_arr, bolts_arr\n # bolts_arr at this point is almost sorted but needs 1 rotation, eg [4, 1, 2, 3]\n return set(zip(nuts_arr, bolts_arr[1:] + bolts_arr[:1]))\n\n\ndef _match(nuts_arr, bolts_arr, lo, hi):\n \"\"\"\n :param nuts_arr:\n :param bolts_arr:\n :param lo: start index, int\n :param hi: end index, int\n :return:\n \"\"\"\n # print nuts_arr, bolts_arr, lo, hi\n if hi <= lo:\n # for 0-sized list, lo = 0 , hi = len(list) = 0\n return\n\n # instead of bolts_arr[hi], you can choose seed_pivot as median of 3 random\n # elements drawn from bolts_arr\n # choose last element of bolts array for nuts partition\n j = _partition(nuts_arr, lo, hi, bolts_arr[hi])\n # print j\n\n # now using the partition of nuts, choose that for bolts partition\n _partition(bolts_arr, lo, hi, j)\n\n # recurse on subarrays but excluding the chosen partition j\n _match(nuts_arr, bolts_arr, lo, j-1)\n _match(nuts_arr, bolts_arr, j+1, hi)\n\n\ndef _partition(arr, lo, hi, pivot):\n \"\"\"\n Regular partition function of quick sort, but with an extra argument pivot\n :param arr:\n :param lo:\n :param hi:\n :param pivot:\n :return:\n \"\"\"\n i = lo\n j = hi\n while True:\n while arr[i] < pivot:\n i += 1\n if i == hi:\n break\n\n while arr[j] > pivot:\n j -= 1\n if j == lo:\n break\n\n if i >= j:\n break\n _swap(arr, i, j)\n\n _swap(arr, lo, j)\n return j\n\n\ndef _swap(a, index1, index2):\n \"\"\"\n Swap 2 elements in a list\n :param a: a list\n :param index1: index1 to be swapped, int\n :param index2: index2 to be swapped, int\n :return: input list a with elements at index1, index2 swapped\n \"\"\"\n a[index1], a[index2] = a[index2], a[index1]\n return a\n","repo_name":"sureshrmdec/algorithms","sub_path":"app/sorting/nuts_bolts_match.py","file_name":"nuts_bolts_match.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28470496714","text":"def solution(s):\n s = s.lower().capitalize()\n answer = ''\n for i in range(len(s)):\n if i != 0 and s[i - 1] == \" \":\n answer += s[i].upper()\n else:\n answer += s[i]\n return answer\n\n\n# Test Cases\nprint(solution(\"3people unFollowed me\"))\nprint(solution(\"for the last week\"))\n\n\n\"\"\"\n# 문제가 개편되었습니다. 이로 인해 함수 구성이나 테스트케이스가 변경되어, 과거의 코드는 동작하지 않을 수 있습니다.\n# 새로운 함수 구성을 적용하려면 [코드 초기화] 버튼을 누르세요. 단, [코드 초기화] 버튼을 누르면 작성 중인 코드는 사라집니다.\ndef Jaden_Case(s):\n # 함수를 완성하세요\n return s.title()\n\n# 아래는 테스트로 출력해 보기 위한 코드입니다.\nprint(Jaden_Case(\"3people unFollowed me for the last week\"))\n\n\n?!\n\"\"\"\n","repo_name":"909ma/Repository-for-Study","sub_path":"프로그래머스/Python_JadenCase 문자열 만들기.py","file_name":"Python_JadenCase 문자열 만들기.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18065934489","text":"from kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock \nfrom random import randint\nfrom kivy.config import Config\nfrom kivy.core.audio import SoundLoader\nfrom kivy.core.window import Window\nfrom kivy.graphics import Color\n\n#icon\nConfig.set('kivy','window_icon','icon.png')\n\n#increase the speed of ball\ngame_level = 5\n\n#SFX\nbgm = SoundLoader.load('ball_hit.wav')\nmiss = SoundLoader.load('miss.wav')\n\n#BgColor (RGBA)\nWindow.clearcolor = (52/255.0, 111/255.0, 207/255.0, 1)\n\n\n\nclass PongPaddle(Widget):\n\tscore = NumericProperty(0)\n\n\tdef bounce_ball(self, ball):\n\n\t\t if self.collide_widget(ball):\n\t\t \tbgm.play()\n\t\t \tball.velocity_x *= -1 #comment it to run lower code\n# to increase the speed of ball on each collision with paddle uncomment lower line\n\t\t\t#ball.velocity_x *= -1.1\n\n\nclass PongBall(Widget):\n\n\t# velocity of the ball on x and y axis\n velocity_x = NumericProperty(0)\n velocity_y = NumericProperty(0)\n\n # referencelist property so we can use ball.velocity as\n velocity = ReferenceListProperty(velocity_x, velocity_y)\n\n # ``move`` function will move the ball one step. This\n # will be called in equal intervals to animate the ball \n def move(self):\n self.pos = Vector(*self.velocity) + self.pos \n\n\nclass PongGame(Widget): #moving the ball by calling the move() and other objects\n\t\n\tball = ObjectProperty(None) \n\tleft_player = ObjectProperty(None)\n\tright_player = ObjectProperty(None)\n\n\tdef serve_ball(self):\n\t\tself.ball.velocity = Vector(game_level, 0).rotate(randint(0, 360))\n\n\tdef update(self, dt):\n\t\tself.ball.move()\n\n\t\t# bounce off at top/bottom\n\t\tif (self.ball.y < 0) or (self.ball.y > self.height - 15):\n\t\t\tself.ball.velocity_y *= -1\n\n\t\t# bounce off left\n\t\tif self.ball.x < 0:\n\t\t\tself.ball.velocity_x *= -1\n\t\t\tself.right_player.score += 1\n\t\t\tmiss.play()\n\n\t\t# bounce off right\n\t\tif self.ball.x > self.width - 15:\n\t\t\tself.ball.velocity_x *= -1\n\t\t\tself.left_player.score += 1\n\t\t\tmiss.play()\n\n\t\t#idk it works or not, but it's to prevent ball moving along y axis in a straight line loop\n\t\t#if self.ball.velocity == Vector(self.center_x, game_level):\n\t\t#\tPongApp().run()\n\n\t\t# making ball bounce on paddle\n\t\tself.left_player.bounce_ball(self.ball)\n\t\tself.right_player.bounce_ball(self.ball)\n\n# on_touch_move()-\tWhen we drag our finger on the screen\n\tdef on_touch_move(self, touch):\n\n\t\tif touch.x < self.width / 1/4:\n\t\t\tself.left_player.center_y = touch.y\n\n\t\tif touch.x > self.width * 3/4:\n\t\t\tself.right_player.center_y = touch.y\n\t\t\n\t\telse :\n\t\t\tpass\n\n\nclass PongApp(App):\t# Building the kivy App\n\tdef build(self):\n\n\t\tgame = PongGame()\n\n\t\tgame.serve_ball() \n\n\t\tClock.schedule_interval(game.update,1.0/60.0) #drawing 60 frames of ball per socond\n\n\t\treturn game\n\nPongApp().run()\n","repo_name":"Ritwikrajsingh/PingPong","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6330357518","text":"import tensorflow as tf\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\n\r\n###########################################################################################\r\n# set gpu\r\n###########################################################################################\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\nif gpus:\r\n # Restrict TensorFlow to only allocate xGB of memory on the first GPU\r\n try:\r\n tf.config.experimental.set_memory_growth(gpus[0], True)\r\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\r\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\r\n except RuntimeError as e:\r\n print(e)\r\n\r\n###########################################################################################\r\n# set dataset\r\n###########################################################################################\r\n\r\nlabel = ['fake', 'live']\r\npath = 'E:/Iris dataset/Warsaw_labeling_iris_data/'\r\n\r\ntrain_path = f'{path}/train_cyclegan'\r\nval_path = f'{path}/test_cyclegan/val'\r\ntest_path = f'{path}/test_cyclegan/test'\r\n\r\n# train_path = f'{path}/train_crop'\r\n# val_path = f'{path}/test_crop/known'\r\n# test_path = f'{path}/test_crop/unknown'\r\n\r\n\r\ntrain_data = os.listdir(train_path)\r\ntest_data = os.listdir(test_path)\r\n\r\n# original data\r\n# batchsz = 4\r\n# traincnt = 4513\r\n# testcnt = 4510\r\n# valcnt = 2990\r\n\r\n# gan fake data\r\nbatchsz = 4\r\ntraincnt = 3688\r\ntestcnt = 4700\r\nvalcnt = 1948\r\n\r\ntrain_datagen = ImageDataGenerator(rescale=1./255)\r\ntrain_generator = train_datagen.flow_from_directory(train_path, target_size=(224, 224), batch_size=batchsz)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\ntest_generator = train_datagen.flow_from_directory(test_path, target_size=(224, 224), batch_size=batchsz)\r\n\r\nval_datagen = ImageDataGenerator(rescale=1./255)\r\nval_generator = train_datagen.flow_from_directory(val_path, target_size=(224, 224), batch_size=batchsz)\r\n\r\ninput_shape = (224, 224, 3)\r\n\r\n###########################################################################################\r\n# model\r\n###########################################################################################\r\n\r\nmodel_d = tf.keras.applications.DenseNet121(weights='imagenet', include_top=False, input_shape=(224, 224, 3))\r\n\r\nx = model_d.output\r\n\r\nx = tf.keras.layers.GlobalAveragePooling2D()(x)\r\n# x = tf.keras.layers.BatchNormalization()(x)\r\n# x = tf.keras.layers.Dropout(0.5)(x)\r\nx = tf.keras.layers.Dense(1024, activation='relu')(x)\r\nx = tf.keras.layers.Dense(512, activation='relu')(x)\r\n# x = tf.keras.layers.BatchNormalization()(x)\r\n# x = tf.keras.layers.Dropout(0.5)(x)\r\n\r\npreds = tf.keras.layers.Dense(2, activation='softmax')(x) #FC-layer\r\n\r\nmodel = tf.keras.Model(inputs=model_d.input, outputs=preds)\r\n\r\nfor layer in model.layers[:-8]:\r\n layer.trainable = False\r\n\r\nfor layer in model.layers[-8:]:\r\n layer.trainable = True\r\n\r\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n###########################################################################################\r\n# set ckp\r\n###########################################################################################\r\n\r\nckp_path = '../training/densnet121/warsaw/gan_fake_war_2fold/ckp-{epoch:04d}.ckpt'\r\nckp_dir = os.path.dirname(ckp_path)\r\n\r\nlatest = tf.train.latest_checkpoint(ckp_dir)\r\nprint(ckp_dir)\r\nmodel.load_weights(latest)\r\n\r\n###########################################################################################\r\n# predict\r\n###########################################################################################\r\n\r\nmodel.evaluate(train_generator, verbose=1)","repo_name":"dmdm2002/Tensorflow_BasicModels","sub_path":"Dense121/dense121_predict.py","file_name":"dense121_predict.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14288125382","text":"import cv2\nimport numpy as np\nimport os\nimport argparse\n\ndef test(path='../../images/diff/', basename=\"diff_\", count_max=120 , rgb_threshold = [18, 18, 18], white_blob_size=2000, black_blob_size=2000 ):\n\n ref_image = None\n count = 0\n while True:\n img_path = path + basename + str(count) + '.bmp'\n count += 1\n if count == count_max:\n count = 0\n\n if not os.path.exists(img_path):\n # print( img_path + ' not found')\n continue\n\n # the first image is taken as the reference image\n if ref_image is None:\n print('choosing the ref image: ' + img_path)\n ref_image = cv2.imread(img_path)\n print(ref_image.shape)\n if len(ref_image.shape) == 2:\n print(\"grayscale image are not supported\")\n exit(1)\n else:\n num_ch = ref_image.shape[2]\n img_width = ref_image.shape[1]\n img_height = ref_image.shape[0]\n print(\"number of color channels: \" + str(num_ch))\n if num_ch != 3:\n print(\"should be 3!\")\n exit(1)\n # back to the while loop to read another image\n continue\n\n # read the new image and display it along with ref\n image = cv2.imread(img_path)\n cv2.imshow('ref_image', ref_image)\n cv2.imshow('image', image)\n\n # create a new image that will show the diff mask\n diff_image = cv2.imread(img_path)\n\n # First part of the algo\n # comparing each pixel of the current pic with the corresponding one on the ref pic\n # if they are similar, set the pixel in resulting diff mask to [0,0,0] else [255,0,0]\n for x in range(0, img_width):\n for y in range(0, img_height):\n val = [0, 0, 0]\n color_val_sum = 0\n for ch in range(0, 3):\n color_val = abs(int(image[y,x][ch]) - int(ref_image[y,x][ch]))\n color_val_sum += color_val\n if color_val > rgb_threshold[ch]:\n val = [255, 0, 0]\n break\n if color_val_sum > 40:\n val = [255, 0, 0]\n diff_image[y, x] = val\n\n # Second part of the algorithm\n # removing small blobs (white or black)\n if True:\n for x in range(0, img_width):\n for y in range(0, img_height):\n\n # we use the second color channel of each pixel in the diff image, to know\n # if is has already been checked or not\n if diff_image[y, x][1] == 0: # not checked yet\n # we found a pixel that has not been checked yet\n # storing its value (0 or 255)\n # and adding its coordinates to the pix_list array that will group all the pixels within the same blob\n pix_val = int(diff_image[y, x][0])\n pix_list = [[x,y]]\n diff_image[y, x][1] = 255 # set this pixel as checked now\n # function that is going to check the neighbours of the current pixel\n # if they have the same value, they are added to the pix_list and their neighbours are then being checked (and so on)\n # if they don't have the same value, nothing is done\n find_close_pixels(diff_image, [x,y], pix_list, pix_val)\n\n if pix_val == 255: #white\n # if the white blob is too small, remove it (set all its pixels to black)\n if len(pix_list) < white_blob_size:\n for pix in pix_list:\n diff_image[pix[1], pix[0]][0] = 0\n else:\n # if the black blob is too small, remove it (set all its pixels to white)\n if len(pix_list) < black_blob_size:\n for pix in pix_list:\n diff_image[pix[1], pix[0]][0] = 255\n\n\n cv2.imshow('diff_mask', diff_image)\n\n # create a new image that will show the resulting image\n resulting_image = cv2.imread(img_path)\n for x in range(0, img_width):\n for y in range(0, img_height):\n # for each pixel, depending on the mask, we set the pixel value to black or to the original value\n if diff_image[y, x][0] == 0:\n resulting_image[y,x] = [0, 0, 0]\n else:\n resulting_image[y, x] = image[y,x]\n\n cv2.imshow('resulting_image', resulting_image)\n cv2.waitKey(1000)\n\n\ndef find_close_pixels(diff_image, coord, pix_list, pix_val):\n\n check_neighbour_list = [coord]\n # look into the neighbour list\n while check_neighbour_list:\n to_check = check_neighbour_list.pop(-1)\n x = to_check[0]\n y = to_check[1]\n # check if it is inside the pixel\n if x > 1 and x < 320-1 and y > 1 and y < 240 -1:\n for i in range(x - 1, x + 2):\n for j in range(y - 1, y + 2):\n # if this the same value as the original one, set it check and add it to the list\n if diff_image[j, i][1] == 0 and diff_image[j, i][0] == pix_val:\n diff_image[j, i][1] = 255\n pix_list += [[i,j]]\n check_neighbour_list += [[i,j]]\n else:\n # be more careful about the image limits here but do exactly the same as before\n for i in range(x - 1, x + 2):\n for j in range(y - 1, y + 2):\n if i < 0 or i >= 320 or j <0 or j >=240:\n continue\n if diff_image[j, i][1] == 0 and diff_image[j, i][0] == pix_val:\n diff_image[j, i][1] = 255\n pix_list += [[i,j]]\n check_neighbour_list += [[i,j]]\n\ndef main(args=None):\n\n p = argparse.ArgumentParser()\n p.add_argument('-p', '--path', help='path to the images to diff, the first one is the ref', default='../../images/diff/')\n p.add_argument('-bn', '--basename', help='basename of the images, they all should be in the format \"basename\" + index + \".bmp\"',\n default='diff_')\n p.add_argument('-cm', '--count_max', help='image max count. Uses images with index 0 to count_max', default=120)\n p.add_argument('-wbs', '--white_blob_size', help='minimum size for a white blob not to be remove', default=2000)\n p.add_argument('-bbs', '--black_blob_size', help='minimum size for a black blob not to be remove', default=2000)\n p.add_argument('-rgbt', '--rgb_threshold', help='threshold to determine if a pixel is comparable to the ref pixel or not', default=[18, 18, 18])\n\n pargs = p.parse_args(args)\n\n test(pargs.path, pargs.basename, pargs.count_max, pargs.rgb_threshold, int(pargs.white_blob_size), int(pargs.black_blob_size))\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"ARM-software/EndpointAI","sub_path":"ProofOfConcepts/Vision/OpenMvStereoVision/src/prestudies/diff_function.py","file_name":"diff_function.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"77"} +{"seq_id":"23051752600","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport pandas as pd\r\nimport spacy\r\nimport heapq\r\nimport re\r\nimport nltk\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nfrom flask import Flask, Response, render_template,request\r\nimport plotly\r\nimport plotly.graph_objs as go\r\npd.options.plotting.backend = \"plotly\"\r\n\r\n \r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n\t\r\n global df,dfcity\r\n df = pd.DataFrame(columns = ['Date','Year','Time', 'City','URL', 'Headline','Type'])\r\n\r\n dfcity = pd.DataFrame(columns=['City','CrimeRate'])\r\n\r\n NoCrime = getNews('https://timesofindia.indiatimes.com/city/mumbai','Mumbai')\r\n dfcity = dfcity.append({'City':'Mumbai','CrimeRate':NoCrime},ignore_index = True)\r\n\r\n NoCrime = getNews('https://timesofindia.indiatimes.com/city/delhi','Delhi')\r\n dfcity = dfcity.append({'City':'Delhi','CrimeRate':NoCrime},ignore_index = True)\r\n\r\n NoCrime = getNews('https://timesofindia.indiatimes.com/city/bangalore','Bengaluru')\r\n dfcity = dfcity.append({'City':'Bengaluru','CrimeRate':NoCrime},ignore_index = True)\r\n\r\n NoCrime = getNews('https://timesofindia.indiatimes.com/city/kolkata','Kolkata')\r\n dfcity = dfcity.append({'City':'Kolkata','CrimeRate':NoCrime},ignore_index = True)\r\n\r\n NoCrime = getNews('https://timesofindia.indiatimes.com/city/amritsar','Amritsar')\r\n dfcity = dfcity.append({'City':'Amritsar','CrimeRate':NoCrime},ignore_index = True)\r\n\r\n values=dfcity['CrimeRate']\r\n labels=dfcity['City']\r\n legend=\"Daily Crime Rates\"\r\n\r\n\r\n return render_template('index.html',values=values, labels = labels, legend=legend)\r\n\r\ndef prog_sent(news):\r\n\r\n typeOfCrime=\"\"\r\n pattern1 = [r'\\b(?i)'+'abduction'+r'\\b',r'\\b(?i)'+'assaulted'+r'\\b',r'\\b(?i)'+'rape'+r'\\b',r'\\b(?i)'+'abuse'+r'\\b',r'\\b(?i)'+'maltreatment'+r'\\b',r'\\b(?i)'+'rapine'+r'\\b']\r\n pattern2 = [r'\\b(?i)'+'bribe'+r'\\b',r'\\b(?i)'+'fraud'+r'\\b',r'\\b(?i)'+'cheat'+r'\\b',r'\\b(?i)'+'property'+r'\\b',r'\\b(?i)'+'fraudster'+r'\\b',r'\\b(?i)'+'swindler'+r'\\b',r'\\b(?i)'+'cheater'+r'\\b',r'\\b(?i)'+'trickster'+r'\\b']\r\n pattern3 = [r'\\b(?i)'+'Hitting'+r'\\b',r'\\b(?i)'+'slapping'+r'\\b',r'\\b(?i)'+'biting'+r'\\b',r'\\b(?i)'+'choking'+r'\\b',r'\\b(?i)'+'Aggressive'+r'\\b']\r\n pattern4 = [r'\\b(?i)'+'addictive'+r'\\b',r'\\b(?i)'+'heroin'+r'\\b',r'\\b(?i)'+'weed'+r'\\b',r'\\b(?i)'+'narcotics'+r'\\b',r'\\b(?i)'+'dope'+r'\\b',r'\\b(?i)'+'anesthetic'+r'\\b']\r\n pattern5 = [r'\\b(?i)'+'smuggling'+r'\\b',r'\\b(?i)'+'gunrunning'+r'\\b',r'\\b(?i)'+'gun'+r'\\b',r'\\b(?i)'+'bomb'+r'\\b']\r\n pattern6 = [r'\\b(?i)'+'capture'+r'\\b',r'\\b(?i)'+'hijack'+r'\\b',r'\\b(?i)'+'seize'+r'\\b',r'\\b(?i)'+'snatch'+r'\\b',r'\\b(?i)'+'steal'+r'\\b',r'\\b(?i)'+'lure'+r'\\b']\r\n pattern7 = [r'\\b(?i)'+'bloodshed'+r'\\b',r'\\b(?i)'+'homicide'+r'\\b',r'\\b(?i)'+'shooting'+r'\\b',r'\\b(?i)'+'kill'+r'\\b',r'\\b(?i)'+'slay'+r'\\b']\r\n pattern8 = [r'\\b(?i)'+'burglary'+r'\\b',r'\\b(?i)'+'embezzlement'+r'\\b',r'\\b(?i)'+'heist'+r'\\b',r'\\b(?i)'+'theft'+r'\\b',r'\\b(?i)'+'wrongdoing'+r'\\b']\r\n \r\n output = []\r\n flag = 0\r\n for pat in pattern1:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Rape\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern2:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Financial Fraud\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern3:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Domestic violence\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern4:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Illegal drug trade\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern5:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Arms trafficking\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern6:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"kidnapping\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern7:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Murder\"\r\n output.append(typeOfCrime)\r\n break\r\n for pat in pattern8:\r\n if re.search(pat, news) != None:\r\n typeOfCrime=\"Robbery\"\r\n output.append(typeOfCrime)\r\n break\r\n if len(output)>=1:\r\n return output\r\n else:\r\n return \"\"\r\n\r\ndef getNews(URL,city):\r\n\r\n global df\r\n page = requests.get(URL)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n #list of crimes on indantimes \r\n results = soup.find_all(class_='list5 clearfix')\r\n noCrime=0\r\n for ul in results:\r\n for li in ul.find_all('li'):\r\n link = li.find('a',href=True)\r\n if link != None:\r\n\r\n URL1 = URL+link['href']\r\n page1 = requests.get(URL1)\r\n soup = BeautifulSoup(page1.content, 'html.parser')\r\n results1 = soup.find(\"div\",{'class':'_3Mkg- byline'})\r\n if results1 != None:\r\n l1=results1.text.split(\"|\")\r\n l2=l1[len(l1)-1].split(',')\r\n l2[0]=l2[0].replace('Updated:','')\r\n results2 = soup.find(\"div\",{'class':'ga-headlines'})\r\n news = results2.text\r\n \r\n if results2 != None:\r\n typeOfCrime = prog_sent(news)\r\n if len(typeOfCrime)>=1:\r\n typeOfCrime=typeOfCrime[0]\r\n noCrime+=1\r\n else:\r\n typeOfCrime=None\r\n df = df.append({'Date' :l2[0] ,'Year':l2[1] , 'Time':l2[1],'City':city,'URL' :URL1 , 'Headline' :link['title'] ,'Type':typeOfCrime},ignore_index = True)\r\n df=df.dropna(axis=0, how='any')\r\n print(noCrime)\r\n \r\n return noCrime\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"sayali1910/NewsBEProject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43317612663","text":"import sys\nimport time\nimport operator\n\ndef is_range_overlapping(x1, x2, y1, y2):\n return x1 <= y2 and y1 <= x2\n\ntest_cases = open(sys.argv[1], 'r')\nfor test in test_cases:\n test = test.strip()\n if len(test) == 0:\n continue\n pattern = '%b %Y'\n ranges = []\n for date_range in test.split(';'):\n date1 = date_range.strip().split('-')[0]\n date2 = date_range.strip().split('-')[1]\n epoch1 = int(time.mktime(time.strptime(date1, pattern)))\n epoch2 = int(time.mktime(time.strptime(date2, pattern)))\n ranges.append([epoch1, True])\n ranges.append([epoch2, False])\n ranges = sorted(ranges, key=operator.itemgetter(0))\n queue = [ranges[0][0]]\n result = 0\n for i in range(1, len(ranges)):\n epoch = ranges[i][0]\n starting = ranges[i][1]\n if starting:\n queue.append(epoch)\n else:\n if len(queue) == 1:\n months_diff = round(((epoch - queue[0]) * 12 / 3.15569e7) + 1)\n result += int(months_diff)\n queue.pop()\n print(int(result / 12))\n\ntest_cases.close()\n","repo_name":"daleysoftware/codeeval","sub_path":"0-easy/working-experience/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"77"} +{"seq_id":"21240850024","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nimport json\nfrom base64 import b64decode\n\n\nclass PePosRecoverWizard(models.TransientModel):\n\t_name = \"pe.pos.recover.wizard\"\n\t_description = \"Recovery POS Order\"\n\n\tname = fields.Char(\"Number\")\n\tfname = fields.Char(\"File Name\")\n\tfdatas = fields.Binary(\"Json Invoice\")\n\tsession_id = fields.Many2one(\n\t\t'pos.session', string='Session', domain=\"[('state', '=', 'opened')]\")\n\tis_new = fields.Boolean(\"Is New\")\n\n\tdef check_invoice_number(self):\n\t\tself.ensure_one()\n\t\tif self.name:\n\t\t\tif self.env['account.move'].search([('name', '=', self.name)]):\n\t\t\t\traise ValidationError(_(\"It is not a json file\"))\n\t\treturn True\n\n\tdef get_fdatas(self):\n\t\tself.ensure_one()\n\t\tres = False\n\t\tif self.fdatas:\n\t\t\ttry:\n\t\t\t\tres = json.loads(str(b64decode(self.fdatas), 'utf-8'))\n\t\t\texcept Exception:\n\t\t\t\traise ValidationError(_(\"It is not a json file\"))\n\t\t# else:\n\t\t# raise ValidationError(_(\"There is no data to process\"))\n\t\treturn res\n\n\t@api.onchange('fdatas')\n\tdef onchange_fdatas(self):\n\t\tres = self.get_fdatas()\n\t\tif res:\n\t\t\tself.name = res.get('number', False)\n\t\t\tself.session_id = res.get(\"pos_session_id\", False)\n\t\t\tself.check_invoice_number()\n\n\tdef action_view_pos_order(self, order_id):\n\t\tself.ensure_one()\n\t\taction = self.env.ref('point_of_sale.action_pos_pos_form').read()[0]\n\t\tif order_id:\n\t\t\taction['views'] = [\n\t\t\t\t(self.env.ref('point_of_sale.view_pos_pos_form').id, 'form')]\n\t\t\taction['res_id'] = order_id\n\t\telse:\n\t\t\taction = {'type': 'ir.actions.act_window_close'}\n\t\treturn action\n\n\tdef _default_session(self):\n\t\tsession_id = self.env['pos.session'].search(\n\t\t\t[('state', '=', 'opened'), ('user_id', '=', self.env.uid)], limit=1)\n\t\tif not session_id:\n\t\t\tsession_id = self.env['pos.session'].search(\n\t\t\t\t[('state', '=', 'opened')], limit=1)\n\t\treturn session_id\n\n\tdef create_order(self):\n\t\tself.ensure_one()\n\t\tres = self.get_fdatas()\n\t\tres['number'] = self.name\n\t\tif self.is_new:\n\t\t\tvals = {}\n\t\t\tif self.session_id.state == 'opened':\n\t\t\t\tres[\"pos_session_id\"] = self.session_id.id\n\t\t\telse:\n\t\t\t\tres[\"pos_session_id\"] = self._default_session().id\n\t\t\tself.check_invoice_number()\n\t\t\tvals = self.env['pos.order']._order_fields(res)\n\t\t\torder_id = self.env['pos.order'].create(vals).id\n\t\telse:\n\t\t\tres[\"pos_session_id\"] = self.session_id.id\n\t\t\tself.check_invoice_number()\n\t\t\torders = [{'id': res['uid'], 'data':res}]\n\t\t\torder_id = self.env['pos.order'].create_from_ui(orders)\n\t\t\torder_id = order_id and order_id[0].get('id') or False\n\t\treturn self.action_view_pos_order(order_id)\n","repo_name":"Lobonick/cens-test","sub_path":"solse_pe_cpe_pos/wizard/pos_recovery_wizard.py","file_name":"pos_recovery_wizard.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30489497739","text":"from xblock.fields import Scope, String, Float, Boolean\nfrom xblock_htmlacademy.settings import DefaultedDescriptor, CONFIGURATION\n\n\nclass HTMLAcademyXBlockFields(object):\n\n display_name = String(\n display_name=\"Display name\",\n default='HTMLAcademy Assignment',\n help=\"This name appears in the horizontal navigation at the top of the page.\",\n scope=Scope.settings\n )\n\n course_name = String(\n display_name=\"Course name\",\n scope=Scope.settings\n )\n\n iteration_id = String(\n display_name=\"Course short name\",\n scope=Scope.settings\n )\n\n course_element = String(\n display_name=\"Course element\",\n scope=Scope.settings\n )\n\n description = String(\n display_name=\"Description\",\n scope=Scope.settings\n )\n\n weight = Float(\n display_name=\"Max score\",\n scope=Scope.settings,\n default=0,\n )\n\n lab_url = DefaultedDescriptor(\n base_class=String,\n display_name=\"URL to open lab\",\n scope=Scope.settings,\n default=CONFIGURATION.get('LAB_URL')\n )\n\n api_url = DefaultedDescriptor(\n base_class=String,\n display_name=\"URL to get into API\",\n scope=Scope.settings,\n default=CONFIGURATION.get('API_URL')\n )\n\n secret_key = DefaultedDescriptor(\n base_class=String,\n display_name=\"Key for hashing API request\",\n scope=Scope.settings,\n default=CONFIGURATION.get('SECRET')\n )\n\n \"\"\"\n [{\"10.05.2015 10:00:00\" : 0.07}, {\"10.05.2015 10:05:00\" : 0.14}]\n \"\"\"\n history = String(\n scope=Scope.user_state,\n default=\"[]\"\n )\n\n \"\"\"\n Needs to create StudentModule when user press \"Start\" button\n \"\"\"\n started = Boolean(\n scope=Scope.user_state,\n default=False\n )\n","repo_name":"openeduITMO/ifmo-xblock-htmlacademy","sub_path":"xblock_htmlacademy/xblock_academy_fields.py","file_name":"xblock_academy_fields.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19627218556","text":"#!/usr/bin/env python\n\nimport wx\nimport operator\n\nNORMAL_TEXT_COLOUR = \"BLACK\"\nREFRESH_TEXT_COLOUR = \"DIM GREY\"\nERROR_TEXT_COLOUR = \"RED\"\nDOWNLOADED_TEXT_COLOUR = \"DIM GREY\"\n\nclass ChannelTree(wx.TreeCtrl):\n\n def clear(self):\n \"\"\" Remove all data from the tree \"\"\"\n self.DeleteAllItems()\n self.AddRoot(text=\"\")\n\n def add(self, channel):\n \"\"\" Add a new channel to the tree \"\"\"\n item = self.AppendItem(parent=self.GetRootItem(),\n text=channel.title,\n data=wx.TreeItemData(channel))\n self.SetItemBold(item)\n channel.view_item = item\n self.update(channel)\n self.Expand(item)\n\n def update(self, channel):\n \"\"\" Update the tree data for the specified channel \"\"\"\n channel_item = channel.view_item\n self.DeleteChildren(channel_item)\n if channel.is_refreshing:\n refresh_text = self.AppendItem(parent=channel_item,\n text=\"Refreshing...\")\n self.SetItemTextColour(refresh_text, \n wx.NamedColour(REFRESH_TEXT_COLOUR))\n elif channel.error_message is not None:\n refresh_text = self.AppendItem(parent=channel_item,\n text=\"ERROR: %s\" % \n channel.error_message)\n self.SetItemTextColour(refresh_text, \n wx.NamedColour(ERROR_TEXT_COLOUR))\n else:\n programmes = channel.subscribed_programmes.values()\n programmes.sort(key=operator.attrgetter('name'))\n for programme in programmes:\n item = self.AppendItem(parent=channel_item,\n text=programme.name,\n data=wx.TreeItemData(programme))\n programme.view_item = item\n self.update_programme_text_colour(programme)\n\n def refresh_selected_programme(self):\n self.update_programme_text_colour(self.get_selected_programme())\n\n def update_programme_text_colour(self, programme):\n \"\"\" Update the display colour of the specified programme \"\"\"\n if programme is not None:\n for episode in programme.episodes:\n if not episode.downloaded:\n self.SetItemTextColour(programme.view_item,\n wx.NamedColour(NORMAL_TEXT_COLOUR))\n return\n # Programme contains only downloaded episodes\n self.SetItemTextColour(programme.view_item,\n wx.NamedColour(DOWNLOADED_TEXT_COLOUR))\n\n def get_selected_programme(self):\n item = self.GetSelection()\n if not item.IsOk():\n return None\n \n programme = self.GetItemData(item).GetData()\n if hasattr(programme, \"episodes\"):\n return programme\n else:\n return None\n\n def get_selected_channel(self):\n \"\"\" Return the channel of the currently selected item \"\"\"\n item = self.GetSelection()\n if not item.IsOk():\n return None\n\n parent = self.GetItemParent(item)\n channel = self.GetItemData(parent).GetData()\n return channel\n \n\n def delete_programme(self, programme):\n # After deletion, go to the next item\n next_item = self.GetNextSibling(programme.view_item)\n # Or the previous item\n if not next_item.IsOk():\n next_item = self.GetPrevSibling(programme.view_item)\n # Or the parent\n if not next_item.IsOk():\n next_item = self.GetItemParent(programme.view_item)\n\n self.Delete(programme.view_item)\n self.SelectItem(next_item)\n\n\n","repo_name":"sneakypete81/iplayer-get","sub_path":"idiotbox/view/ChannelTree.py","file_name":"ChannelTree.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29475534473","text":"\"\"\"\nТестовый даг для практики 1\n\"\"\"\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\nimport logging\n\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator\n\nDEFAULT_ARGS = {\n 'start_date': days_ago(5),\n 'owner': 'i-fahrutdinov',\n 'poke_interval': 600\n}\n\nwith DAG(\"i-fahrutdinov_practice1\",\n schedule_interval='@daily',\n default_args=DEFAULT_ARGS,\n max_active_runs=1,\n tags=['i-fahrutdinov']\n ) as dag:\n\n dummy = DummyOperator(task_id=\"start\")\n\n echo_ds = BashOperator(\n task_id='echo_ds',\n bash_command='echo {{ ds_nodash }}',\n dag=dag\n )\n\n def hello_world(**context):\n ed = context['execution_date']\n conf = context['conf']\n logging.info(f\"Hello World! \\nMy execution date is | {ed} | with config: {conf}.\")\n\n hello_world = PythonOperator(\n task_id='hello_world',\n python_callable=hello_world,\n dag=dag\n )\n\n dummy >> echo_ds >> hello_world\n","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/dags/i-fahrutdinov/i-fahrutdinov_practice1.py","file_name":"i-fahrutdinov_practice1.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25973509592","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author : MG\n@Time : 19-2-20 下午8:59\n@File : add_layer\n@contact : mmmaaaggg@163.com\n@desc : \n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef add_layer(input, in_size, out_size, activation_function=None):\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n Wx_plus_b = tf.matmul(input, Weights) + biases\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n\n return outputs\n\n\nx_data = np.linspace(-1, 1, 300)[:, np.newaxis]\nnoise = np.random.normal(0, 0.05, x_data.shape)\ny_data = np.square(x_data) - 0.5 + noise\n\nxs = tf.placeholder(tf.float32, [None, 1])\nys = tf.placeholder(tf.float32, [None, 1])\n\nl1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)\nprediction = add_layer(l1, 10, 1, activation_function=None)\n\nloss = tf.reduce_mean(\n tf.reduce_sum(\n tf.square(ys - prediction),\n reduction_indices=[1])\n)\n\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\ninit = tf.global_variables_initializer()\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.scatter(x_data, y_data)\n# plt.show()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(1000):\n sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\n if i % 50 == 0:\n print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))\n try:\n ax.lines.remove(lines[0])\n plt.pause(0.1)\n except Exception:\n pass\n\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n lines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n\n","repo_name":"mmmaaaggg/RefUtils","sub_path":"src/fh_tools/language_test/tensorflow_demo/base_demo/add_layer.py","file_name":"add_layer.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15938787393","text":"from flask_wtf import FlaskForm as Form\nfrom flask_wtf.file import FileField, FileRequired\nfrom wtforms import ValidationError\n\nCSV_SCHEMA = [\n 'id',\n 'name',\n 'speciality',\n 'address',\n 'city',\n 'state',\n 'zip',\n 'lat',\n 'lng',\n]\n\n\nclass FileSizeValidator(object):\n def __init__(self, message=None, max_size=5 * 1024 * 1024): # 5MB\n self.max_size = max_size\n if message is None:\n message = u'The file size exceeded max_size({})'.format(\n self.max_size)\n self.message = message\n\n def __call__(self, form, field):\n if field.data:\n file_size = len(field.data.stream.read())\n field.data.stream.seek(0) # Cleanup for the user\n if file_size > self.max_size:\n raise ValidationError(self.message)\n\n\nclass CSVFileValidator(object):\n def __init__(self, mime_message=None, header_message=None):\n if mime_message is None:\n mime_message = u'The provided file is not a CSV file.'\n self.mime_message = mime_message\n if header_message is None:\n header_message = (\n u'The provided CSV file does not conform to the database schema.'\n )\n self.header_message = header_message\n\n def __call__(self, form, field):\n if field.data:\n if field.data.mimetype != 'text/csv':\n raise ValidationError(self.mime_message)\n\n # Check that the uploaded CSV matches our schema by reading the first line of the CSV file\n file_csv_schema = (\n field.data.stream.readline().decode('utf-8').strip().split(',')\n ) # Creates list of schema\n field.data.stream.seek(0) # Cleanup for the user\n\n # Check that the CSV file's schema at least contains what our database requires\n for item in CSV_SCHEMA:\n if item not in file_csv_schema:\n raise ValidationError(\n self.header_message +\n u' Missing column: {}'.format(item)\n )\n\n\nclass ProviderImportForm(Form):\n file = FileField(\n validators=[FileRequired(), FileSizeValidator(), CSVFileValidator()],\n description=\"Import Provider data from CSV file.\",\n )\n","repo_name":"hXtreme/HCP-Project","sub_path":"app/admin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73852561847","text":"\n# coding: utf-8\n\n# In[3]:\n\nget_ipython().magic(u'matplotlib inline')\nimport pandas as pd\nbikes = pd.read_csv(\"bike_rental_day.csv\")\n\n\n# # Finding the number of combinations\n# We can calculate probabilities greater than or equal to a threshold with our bike sharing data. We found that the probability of having more riders than 4000 is about .6. We can use this to find the probability that in 10 days, 7 or more days have more than 4000 riders\n\n# In[5]:\n\nimport math\ndef find_outcome_combinations(N,k):\n numerator = math.factorial(N)\n denominator = math.factorial(k) * math.factorial(N - k)\n return numerator / float(denominator)\ncombination_7 = find_outcome_combinations(10,7)\ncombination_7\n\n\n# # Statistical significance\n# Typically, researchers will use 5% as a significance threshold -- if an event would only happen 5% or less of the time by random chance, then it is statistically significant. If an event would happen more than 5% of the time by random chance, then it isn't statistically significant.\n# In our case, there is 12% chance that the weather would be sunny 8 days out of 10 by random chance. We add this to 4% for 9 days out of 10, and .6% for 10 days out of 10 to get a 16.6% total chance of the sunny outcome happening 8 or more time in our 10 days. Our result isn't statistically significant, so we'd have to go back to the lab and spend some time adding more flux capacitors to our weather control device.\n","repo_name":"kyohei-koga/study_data_science","sub_path":"Step_5_Probability_and_Statistics/02Probability_and_Statistics_in_Python_Intermediate/02Calculating_probabilities.py","file_name":"02Calculating_probabilities.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38059219491","text":"import argparse\n\nfrom dateutil.relativedelta import relativedelta\nimport geopandas as gpd\nimport descarteslabs as dl\nfrom tqdm import tqdm\n\nfrom scripts import contour_gen, dl_utils\n\n\nDL_SYSTEM_PARAMS = {\n 'image': ('us.gcr.io/dl-ci-cd/images/tasks/public/' +\n 'py3.8:v2020.09.22-5-ga6b4e5fa'),\n 'cpus': 1,\n 'maximum_concurrency': 30,\n 'memory': '6Gi',\n 'retry_count': 4,\n 'task_timeout': 20000,\n 'include_modules': ['scripts.dl_utils', 'scripts.contour_gen'],\n 'requirements': ['opencv-python-headless', 'tqdm', 'shapely==1.8.0']\n}\n\n\ndef load_coords(confirmed_sites_file):\n confirmed_sites = gpd.read_file(\n f\"../data/site_metadata/{confirmed_sites_file}.geojson\")\n coords = [[site.x, site.y] for site in confirmed_sites['geometry']]\n names = confirmed_sites['id']\n print(len(confirmed_sites), 'sites loaded')\n return coords, names\n\n\ndef run_model(coord, name, **kwargs):\n \"\"\"Wrap a call to DescartesRun for use in DL async processing.\n\n Required kwargs:\n product_id: String ID of a DL catalog product\n model_name: Model name in DL storage\n start_date, end_date: Isoformat date strings\n\n Optional kwargs are passed to the instantiation of DescartesRun.\n \"\"\"\n import contour_gen\n runner = contour_gen.DescartesContourRun(**kwargs)\n runner(coord, name)\n #runner(kwargs['start_date'], kwargs['end_date'])\n\n\ndef main(*args):\n \"\"\"Deploy a model on the Descartes Labs platform.\n\n Args:\n args:list: Can include any pair of form (flag, argument) passed to\n the argument parser, e.g. ['--roi_file', '../data/bali.json'].\n Cannot be None if calling from an interpreter. Give [] instead.\n \"\"\"\n parser = argparse.ArgumentParser('Configure contour generation')\n parser.add_argument('--file_name',\n type=str)\n parser.add_argument('--product_id',\n type=str,\n help='ID of catalog product',\n default='earthrise:contours')\n parser.add_argument('--product_name',\n type=str,\n help='Name of catalog product',\n default='TPA NN TOA')\n parser.add_argument('--model_file',\n type=str,\n help='Local path to model file to upload',\n default='')\n parser.add_argument('--model_name',\n type=str,\n help='Model name in DL Storage',\n default='model_filtered_toa-12-09-2020.h5')\n parser.add_argument('--mosaic_period',\n type=int,\n help='Months over which to mosaic image data',\n default=1)\n parser.add_argument('--mosaic_method',\n type=str,\n help='Compositing method: \"median\"/\"min\"/\"min_masked\"',\n default='min')\n parser.add_argument('--spectrogram_interval',\n type=int,\n help=('Spectrogram time interval, in mosaic periods'),\n default=6)\n # Note on dates: Date range should be longer than the spectrogram length.\n # Starting on successive mosaic periods (typically: monthly), as many\n # spectrograms are created as fit in the date range.\n parser.add_argument('--start_date',\n type=str,\n help='Isoformat start date for predictions',\n default='2020-06-01')\n parser.add_argument('--end_date',\n type=str,\n help='Isoformat end date for predictions',\n default='2020-10-01'),\n parser.add_argument('--rect_width',\n type=float,\n default=0.01)\n parser.add_argument('--endpoint',\n type=str)\n parser.add_argument('--run_local',\n action='store_true',\n help='Run model locally rather than async on DL.')\n args = parser.parse_args(*args)\n\n #tiles = dl_utils.get_tiles_from_roi(args.roi_file, args.tilesize, args.pad)\n coords, names = load_coords(args.file_name)\n # This init handles product creation and model upload.\n runner = contour_gen.DescartesContourRun(**vars(args))\n\n if args.run_local:\n for coord, name in zip(tqdm(coords), names):\n runner(coord=coord, name=name)\n else:\n async_func = dl.Tasks().create_function(\n run_model, name=args.product_name, **DL_SYSTEM_PARAMS)\n\n for coord, name in zip(tqdm(coords), names):\n async_func(coord, name, **vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"earthrise-media/plastics","sub_path":"scripts/deploy_contours.py","file_name":"deploy_contours.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"11148588729","text":"import ideal_observer as i_o\nimport linear_markov\nimport triplets\nimport data_handling as dh\nimport pandas as pd\nimport numpy as np\nfrom utils import pd_multiplicator, get_git_hash\nimport filters\nimport traceback\nfrom definitions import OUTPUT_DIR\nimport argparse\nfrom os.path import join as pj\nfrom functools import partial, reduce\nimport pprint\nfrom ideal_observer import STATE_PRIOR_ENTROPIES, STATE_POSTERIOR_ENTROPIES,\\\n STATE_POSTERIOR_DIFFERENCE, PRED_DIFFERENCE, PRED_ENTROPIES, \\\n PRED_PROB_ALL, PRED_PROBS, LOG_PRED_PROBS, LOG_PRED_PROB\nimport itertools\nimport scipy.stats as sp\n\nMODELS = {'iHMM': i_o.IdealObserverSamples,\n 'Markov': i_o.IdealObserverSamples,\n 'LinearMarkov': linear_markov.LinearMarkovSamples,\n 'Triplet': triplets.TripletModel,\n 'GroundTruth': partial(i_o.GroundTruth,\n transition_noise=0.13,\n emission_noise=0.1)}\n\ndef data_and_model(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute):\n\n if participant_test is None:\n participant_test = participant_train\n data_train = dh.import_data(participant = participant_train, block = e_train)\n data_test = dh.import_data(participant = participant_test, block = e_test)\n\n if participant_test and permute:\n data_test = permute_keys(data_train, data_test)\n else:\n data_test['permuted'] = False\n\n if '.ini' in ini:\n ini = ini.split('.')[0]\n data_test['ini'] = ini\n data_test['model'] = model_name\n data_test['filters'] = filters.get_filter(data_test,ini)\n data_train['filters'] = filters.get_filter(data_train,ini)\n data_test['commit'] = commit\n data_test['e_train'] = e_train\n data_test['e_test'] = e_test\n data_test['participant_test'] = participant_test.split('_')[-1]\n data_test['participant_train'] = participant_train.split('_')[-1]\n\n if model_name == 'Markov':\n ini = ini+'_markov'\n\n kwargs = {}\n if model_name == 'GroundTruth':\n kwargs['sequence'] = data_train['Y'][data_train['trial_type']=='P'][:4]\n model = MODELS[model_name](participant = modifier+participant_train,\n ini = ini, commit = commit, epoch = e_train, last_n_samples=last_n_samples,\n **kwargs)\n return data_train, data_test, model\n\n@pd_multiplicator\ndef residuals(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False):\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n data_test['r'] = model.calculate_residuals(data_test)\n data_test['r'] = np.mean(data_test['r'],0)\n df = pd.DataFrame(data_test)\n return df\n\n@pd_multiplicator\ndef entropy(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False):\n assert (model_name == 'iHMM' or model_name=='GroundTruth')\n try:\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n\n prediction = i_o.state_and_prediction_entropy(model, data_test['Y'])\n for key in [STATE_POSTERIOR_ENTROPIES, STATE_PRIOR_ENTROPIES,\\\n STATE_POSTERIOR_DIFFERENCE, PRED_ENTROPIES, PRED_DIFFERENCE,\\\n PRED_PROBS]:\n data_test[key] = np.mean(prediction[key],0)\n\n data_test[PRED_ENTROPIES] = prediction[PRED_ENTROPIES]\n data_test['K_mean'] = np.mean([np.size(sample['phi'],0) for sample in model.samples])\n df = pd.DataFrame(data_test)\n\n except Exception as e:\n print('Failed: p_train', participant_train,'p_test', participant_test,\n 'i', ini, 'c', commit, 'e_train', e_train, 'e_test', e_test,\n 'model_name', model_name, 'last_n_samples', last_n_samples)\n print(e)\n tb = traceback.format_exc()\n print(tb)\n df = pd.DataFrame()\n return df\n\n@pd_multiplicator\ndef predicted_rt(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False):\n\n try:\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n\n if model_name == 'Triplet':\n data_test['rt_predicted'] = model.predict_rt(data_test, data_train)\n else:\n data_test['rt_predicted'] = model.predict_rt(data_test['Y'])\n df = pd.DataFrame(data_test)\n\n except Exception as e:\n print('Failed: p_train', participant_train,'p_test', participant_test,\n 'i', ini, 'c', commit, 'e_train', e_train, 'e_test', e_test,\n 'model_name', model_name, 'last_n_samples', last_n_samples)\n print(e)\n tb = traceback.format_exc()\n df = pd.DataFrame()\n print(tb)\n return df\n\n@pd_multiplicator\ndef fingerprint(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False, length=3):\n\n def add_sequence_to_data(data, length):\n seq = ''\n data['sequence'] = ['']*len(data['Y'])\n for i,y in enumerate(data['Y']):\n # RESTART STREAM MISSING BUT SEQ LENGTHS ARE SHORTER THAN FILTERED\n # STARTS OF EACH BLOCK\n if len(seq)==length:\n seq = seq[1:]\n seq += str(y+1)\n data['sequence'][i] = seq\n return data\n\n try:\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n\n sequences = [seq for seq in itertools.product(*[[0,1,2,3]]*length)]\n data = {key: data_test[key] \\\n for key in ['commit', 'participant_test', 'model', 'e_train', 'ini']\n }\n\n data_test = add_sequence_to_data(data_test, length)\n df = pd.DataFrame(data_test)\n df = df[df.correct_response==1]\n df = df[df.filters==True]\n df = df.groupby(['sequence'])\\\n [[\"rt\"]].agg([np.mean,sp.sem,'count']).reset_index()\n df.columns = [\"_\".join(x) for x in df.columns.ravel()]\n\n #print(df.head())\n data['sequence'] = []\n data['predicted_probability'] = []\n data['log_predicted_probability'] = []\n data['rt_predicted'] = []\n\n for seq in sequences:\n data['sequence'].append(\n ''.join(map(lambda x: str(x+1),seq)))\n prediction = model.seed_and_predict(seq)\n data['predicted_probability'].append(np.mean(prediction['predicted_probability']))\n data['log_predicted_probability'].append(np.mean(np.log(prediction['predicted_probability'])))\n data['rt_predicted'].append(np.mean(prediction['rt']))\n df = pd.merge(pd.DataFrame(data),df,\n left_on='sequence', right_on='sequence_', how='outer')\n\n except Exception as e:\n print('Failed: p_train', participant_train,'p_test', participant_test,\n 'i', ini, 'c', commit, 'e_train', e_train, 'e_test', e_test,\n 'model_name', model_name, 'last_n_samples', last_n_samples)\n print(e)\n tb = traceback.format_exc()\n df = pd.DataFrame()\n print(tb)\n return df\n\ndef rt_correlation(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False, parallel=1):\n\n df = predicted_rt(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute, parallel=parallel)\n df = df[df.filters==True]\n df = df[df.correct_response==1]\n print('Computing correlations.')\n df = df.groupby(['participant_train','participant_test',\n 'commit','e_train','e_test','model','permuted','block','trial'])\\\n [[\"rt\", \"rt_predicted\"]].mean().reset_index()\n\n # CUTOFF LARGE RTs\n df['z'] = df.groupby(\n ['participant_train','participant_test',\n 'commit','e_train','e_test','model','permuted'],\n group_keys=False)\\\n .apply(lambda g: (g.rt-g.rt.mean())/g.rt.std())\n df = df[df.z < 3.0]\n\n del df['z']\n\n df = df.groupby(['participant_train','participant_test',\n 'commit','e_train','e_test','model','permuted'])\\\n [[\"rt\",\"rt_predicted\"]]\\\n .corr('pearson')[[\"rt_predicted\"]].iloc[0::2,:]\\\n .rename(columns={\"rt_predicted\": \"correlation\"})\n print(df.head())\n return df\n\n@pd_multiplicator\ndef pred_probs(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False):\n\n try:\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n if model_name == 'Triplet':\n predictions = model.generate_predictive_probabilities(data_test)\n else:\n predictions = model.generate_predictive_probabilities(data_test['Y'])\n data_test[LOG_PRED_PROB] = np.mean(predictions[LOG_PRED_PROBS],0)\n pred_prob_all = np.array(predictions[PRED_PROB_ALL])\n for event in range(4):\n data_test['y'+str(event)] = np.mean(pred_prob_all,0)[event,:]\n df = pd.DataFrame(data_test)\n\n except Exception as e:\n print('Failed: p_train', participant_train,'p_test', participant_test,\n 'i', ini, 'c', commit, 'e_train', e_train, 'e_test', e_test,\n 'model_name', model_name, 'last_n_samples', last_n_samples)\n print(e)\n tb = traceback.format_exc()\n df = pd.DataFrame()\n #print(tb)\n return df\n\n@pd_multiplicator\ndef model_samples(participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier='', permute=False, n_models=10):\n\n try:\n data_train, data_test, model = data_and_model(\n participant_train, participant_test,\n ini, commit, e_train, e_test, model_name,\n last_n_samples, modifier, permute)\n\n d = {key: [sample[key] for sample in model.samples[-n_models:]]\n for key in model.samples[-1].keys()}\n if not ('iteration' in d):\n d['iteration'] = range(len(model.samples)-n_models,len(model.samples))\n d['participant_train'] = participant_train\n d['ini'] = ini\n d['e_train'] = e_train\n d['model'] = model_name\n d['commit'] = commit\n d['sequence'] = [(data_train['Y'][data_train['trial_type']=='P'])[0:4]] * n_models\n #print(d)\n df = pd.DataFrame(d)\n\n except Exception as e:\n print('Failed: p_train', participant_train,'p_test', participant_test,\n 'i', ini, 'c', commit, 'e_train', e_train, 'e_test', e_test,\n 'model_name', model_name, 'last_n_samples', last_n_samples)\n #print(e)\n tb = traceback.format_exc()\n df = pd.DataFrame()\n print(tb)\n return df\n\ndef permute_keys(data_template,data):\n def keymap(key,sequence_template, sequence):\n return sequence_template[np.where(sequence==key)][0]\n\n sequence_template = np.array(data_template['Y'][data_template['trial_type']=='P'][:5])\n sequence = np.array(data['Y'][data['trial_type']=='P'][:5])\n _data = data.copy()\n _data['Y'] = np.array([keymap(k, sequence_template, sequence) for k in data['Y']])\n _data['permuted'] = True\n return _data\n\ndef prediction_correlation(df):\n return(df[(df.filters==True) & (df.correct_response==1)].groupby( \\\n ['participant','ini','commit','e_train','model'])[['rt','rt_predicted']].corr( \\\n )['rt_predicted'][0::2].reset_index().rename(columns={'rt_predicted': 'performance'}))\n\ndef map_response(r):\n if (r == 1) or (r == 'z'):\n return(0)\n if (r == 2) or (r == 'c'):\n return(1)\n if (r == 4) or (r == 'b'):\n return(2)\n if (r == 5) or (r == 'm'):\n return(3)\n\ndef error_performance(df):\n prediction_histogram = []\n predictions = np.vstack([df['y'+str(i)] for i in range(4)])\n for t in range(len(df)):\n Y = df.Y.iloc[t]\n prediction_histogram.append(np.where(np.sort(predictions[:,t])[::-1]==predictions[map_response(df['first_response'].iloc[t]),t])[0][0]+1)\n df['model_event'] = prediction_histogram\n return(df)\n\ndef compute_results(commit, template_name, parallel):\n \"\"\"Generates all results csv files for the figures and statistics.\n Inputs:\n commit: first 7 character id of commit\n template_name: result computation parameter settings\n parallel: number of parallel threads to use\"\"\"\n\n FUNCTIONS = {'predicted_rt': predicted_rt,\n 'predicted_prob': pred_probs,\n 'residuals': residuals,\n 'entropy': entropy,\n 'rt_correlation': rt_correlation,\n 'fingerprint': fingerprint,\n 'model_samples': partial(model_samples, n_models=5)}\n\n template = dh.get_template(template_name)\n experiment = template['EXPERIMENT']['name']\n\n if 'participant_test' in template['EXPERIMENT']:\n if template['EXPERIMENT']['participant_test'].isdigit():\n participant_test = ['_'.join([experiment,p]) for p in template['EXPERIMENT']['participant_test'].split(',')]\n else:\n participant_test = template['EXPERIMENT']['participant_test'].split(',')\n else:\n participant_test = None\n\n if 'participant_train' in template['EXPERIMENT']:\n participant_train = ['_'.join([experiment,p]) for p in template['EXPERIMENT']['participant_train'].split(',')]\n else:\n participant_train = ['_'.join([experiment,p]) for p in template['EXPERIMENT']['participants'].split(',')]\n\n last_n_samples = int(template['SAMPLES']['last_n_samples'])\n inis = template['SAMPLES']['inis'].split(',')\n permute = False\n\n git_hash = get_git_hash()\n result_names = template.keys()\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(template)\n\n for result_name in result_names:\n if not result_name in ['EXPERIMENT','SAMPLES','DEFAULT']:\n # PARTICIPANTS\n #participant_train = participants\n if template[result_name]['design_subject'] == 'across':\n if participant_test is None:\n participant_test = participant_train\n permute=[]\n if 'T' in template[result_name]['permute']:\n permute.append(True)\n if 'F' in template[result_name]['permute']:\n permute.append(False)\n else:\n participant_test = None\n\n # FUNCTION\n fun = partial(FUNCTIONS[template[result_name]['function']], parallel=parallel)\n if template[result_name]['function'] == 'fingerprint':\n fun = partial(fun,length=int(template[result_name]['length']))\n model_names = template[result_name]['models'].split(',')\n epoch_trains = template[result_name]['epoch_train'].split(',')\n epoch_tests = template[result_name]['epoch_test'].split(',')\n\n if template[result_name]['design_session'] == 'within':\n df = pd.DataFrame()\n for e_train, e_test in zip(epoch_trains, epoch_tests):\n df = pd.concat([df,fun(participant_train, participant_test, inis, commit,\n e_train, e_test, model_names, last_n_samples, permute=permute)])\n if template[result_name]['design_session'] == 'across':\n df = fun(participant_train, participant_test, inis, commit,\n epoch_trains, epoch_tests, model_names, last_n_samples, permute=permute)\n\n print('Finished', result_name)\n output = pj(OUTPUT_DIR,commit,'_'.join([git_hash, result_name, template_name]))\n if ('file_type' in template[result_name]):\n file_type = template[result_name]['file_type']\n output += '.' + file_type\n if file_type == 'json':\n df.to_json(output, orient='records')\n print('Saved results in', output)\n else:\n print('UNKNOWN FILE FORMAT.')\n else:\n output += '.csv'\n df.to_csv(output, float_format=\"%g\")\n print('Saved results in', output)\n\ndef main(args):\n compute_results(args.c, args.t, args.p)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Compute data for plots and statistics by template.')\n parser.add_argument('-c', type=str, default=None, help=\"Commit id (first 7 characters)\")\n parser.add_argument('-t', type=str, default=None, help=\"Template filename\")\n parser.add_argument('-p', type=int, default=1, help=\"Number of parallel processes\")\n args = parser.parse_args()\n main(args)\n","repo_name":"mzperix/asrt-beamsampling","sub_path":"Python/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":17277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9385726767","text":"import json\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\nfrom _pytest.capture import CaptureFixture\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom flake8 import __version__ as flake_version\n\nfrom flake8_nb import FLAKE8_VERSION_TUPLE\nfrom flake8_nb import __version__\nfrom flake8_nb.__main__ import main\nfrom flake8_nb.parsers.notebook_parsers import InvalidNotebookWarning\nfrom flake8_nb.parsers.notebook_parsers import NotebookParser\nfrom tests import TEST_NOTEBOOK_BASE_PATH\n\n\n@pytest.mark.parametrize(\"keep_intermediate\", [True, False])\n@pytest.mark.parametrize(\n \"notebook_cell_format,expected_result\",\n [\n (\"{nb_path}#In[{exec_count}]\", \"expected_output_exec_count\"),\n (\"{nb_path}:code_cell#{code_cell_count}\", \"expected_output_code_cell_count\"),\n (\"{nb_path}:cell#{total_cell_count}\", \"expected_output_total_cell_count\"),\n ],\n)\ndef test_run_main(\n capsys, keep_intermediate: bool, notebook_cell_format: str, expected_result: str\n):\n argv = [\"flake8_nb\"]\n if keep_intermediate:\n argv.append(\"--keep-parsed-notebooks\")\n argv += [\"--notebook-cell-format\", notebook_cell_format]\n argv += [\"--exclude\", \"*.tox/*,*.ipynb_checkpoints*,*/docs/*\"]\n with pytest.raises(SystemExit):\n with pytest.warns(InvalidNotebookWarning):\n main([*argv, TEST_NOTEBOOK_BASE_PATH])\n captured = capsys.readouterr()\n result_output = captured.out\n result_list = result_output.replace(\"\\r\", \"\").split(\"\\n\")\n result_list.remove(\"\")\n expected_result_path = os.path.join(\n os.path.dirname(__file__), \"data\", f\"{expected_result}.txt\"\n )\n with open(expected_result_path) as result_file:\n expected_result_list = result_file.readlines()\n assert len(expected_result_list) == len(result_list)\n for expected_result in expected_result_list:\n assert any(result.endswith(expected_result.rstrip(\"\\n\")) for result in result_list)\n\n if keep_intermediate:\n assert os.path.exists(NotebookParser.temp_path)\n NotebookParser.clean_up()\n\n\ndef test_run_main_use_config(capsys, tmp_path: Path):\n test_config = tmp_path / \"setup.cfg\"\n test_config.write_text(\"[flake8_nb]\\nextend-ignore = E231,F401\")\n\n argv = [\"flake8_nb\", \"--config\", test_config.resolve().as_posix()]\n with pytest.raises(SystemExit):\n with pytest.warns(InvalidNotebookWarning):\n main([*argv, TEST_NOTEBOOK_BASE_PATH])\n captured = capsys.readouterr()\n result_output = captured.out\n result_list = result_output.replace(\"\\r\", \"\").split(\"\\n\")\n result_list.remove(\"\")\n expected_result_path = os.path.join(\n os.path.dirname(__file__), \"data\", \"expected_output_config_test.txt\"\n )\n with open(expected_result_path) as result_file:\n expected_result_list = result_file.readlines()\n assert len(expected_result_list) == len(result_list)\n for expected_result in expected_result_list:\n assert any(result.endswith(expected_result.rstrip(\"\\n\")) for result in result_list)\n\n\n@pytest.mark.parametrize(\"config_file_name\", (\"setup.cfg\", \"tox.ini\", \".flake8_nb\"))\ndef test_config_discovered(\n config_file_name: str, tmp_path: Path, monkeypatch: MonkeyPatch, capsys: CaptureFixture\n):\n \"\"\"Check that config file is discovered.\"\"\"\n\n test_config = tmp_path / config_file_name\n test_config.write_text(\"[flake8_nb]\\nextend-ignore = E231,F401\")\n\n shutil.copytree(TEST_NOTEBOOK_BASE_PATH, tmp_path / \"notebooks\")\n\n with monkeypatch.context() as m:\n m.chdir(tmp_path)\n with pytest.raises(SystemExit):\n with pytest.warns(InvalidNotebookWarning):\n main([\"flake8_nb\"])\n captured = capsys.readouterr()\n result_output = captured.out\n result_list = result_output.replace(\"\\r\", \"\").split(\"\\n\")\n result_list.remove(\"\")\n expected_result_path = os.path.join(\n os.path.dirname(__file__), \"data\", \"expected_output_config_test.txt\"\n )\n with open(expected_result_path) as result_file:\n expected_result_list = result_file.readlines()\n assert len(expected_result_list) == len(result_list)\n for expected_result in expected_result_list:\n assert any(result.endswith(expected_result.rstrip(\"\\n\")) for result in result_list)\n\n\ndef test_run_main_all_excluded(capsys):\n argv = [\"flake8_nb\"]\n argv += [\n \"--exclude\",\n f\"*.tox/*,*.ipynb_checkpoints*,*/docs/*,{TEST_NOTEBOOK_BASE_PATH}\",\n ]\n with pytest.raises(SystemExit):\n with pytest.warns(InvalidNotebookWarning):\n main([*argv, TEST_NOTEBOOK_BASE_PATH])\n captured = capsys.readouterr()\n result_output = captured.out\n result_list = result_output.replace(\"\\r\", \"\").split(\"\\n\")\n result_list.remove(\"\")\n assert len(result_list) == 0\n\n\n@pytest.mark.parametrize(\"keep_intermediate\", [True, False])\n@pytest.mark.parametrize(\"cli_entrypoint\", [\"flake8_nb\", \"flake8-nb\"])\n@pytest.mark.parametrize(\n \"notebook_cell_format,expected_result\",\n [\n (\"{nb_path}#In[{exec_count}]\", \"expected_output_exec_count\"),\n (\"{nb_path}:code_cell#{code_cell_count}\", \"expected_output_code_cell_count\"),\n (\"{nb_path}:cell#{total_cell_count}\", \"expected_output_total_cell_count\"),\n ],\n)\ndef test_syscall(\n cli_entrypoint: str, keep_intermediate: bool, notebook_cell_format: str, expected_result: str\n):\n argv = [cli_entrypoint]\n if keep_intermediate:\n argv.append(\"--keep-parsed-notebooks\")\n argv += [\"--notebook-cell-format\", notebook_cell_format]\n argv += [\"--exclude\", \"*.tox/*,*.ipynb_checkpoints*,*/docs/*\"]\n proc = subprocess.Popen(\n [*argv, TEST_NOTEBOOK_BASE_PATH], stdout=subprocess.PIPE, universal_newlines=True\n )\n result_list = [str(line) for line in proc.stdout]\n expected_result_path = os.path.join(\n os.path.dirname(__file__), \"data\", f\"{expected_result}.txt\"\n )\n with open(expected_result_path) as result_file:\n expected_result_list = result_file.readlines()\n\n print(\"\\n\".join(expected_result_list))\n print(\"#\" * 80)\n print(\"\\n\".join(result_list))\n assert len(expected_result_list) == len(result_list)\n\n for expected_result in expected_result_list:\n assert any(result.endswith(expected_result) for result in result_list)\n\n\ndef test_flake8_nb_module_call():\n \"\"\"Call flake8_nb as python module ``python -m flake8_nb --help``.\"\"\"\n output = subprocess.run(\n [sys.executable, \"-m\", \"flake8_nb\", \"--help\"], capture_output=True, check=True\n )\n assert output.returncode == 0\n assert output.stdout.decode().startswith(\"usage: flake8_nb [options] file file ...\")\n\n\n@pytest.mark.skipif(FLAKE8_VERSION_TUPLE < (5, 0, 0), reason=\"Only implemented for flake8>=5.0.0\")\ndef test_flake8_nb_bug_report():\n \"\"\"Debug information.\"\"\"\n output = subprocess.run(\n [sys.executable, \"-m\", \"flake8_nb\", \"--bug-report\"], capture_output=True, check=True\n )\n assert output.returncode == 0\n info = json.loads(output.stdout.decode())\n\n assert \"flake8-version\" in info\n assert info[\"flake8-version\"] == flake_version\n assert info[\"version\"] == __version__\n\n assert not any(plugin[\"plugin\"] == \"flake8-nb\" for plugin in info[\"plugins\"])\n","repo_name":"s-weigand/flake8-nb","sub_path":"tests/test__main__.py","file_name":"test__main__.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"1701954277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 28 17:34:31 2018\n\n@author: quentin\n\"\"\"\n\n###############################################################################\n# FUNCTIONS TO SAVE DATA\n###############################################################################\n\nimport os\nimport sys\nimport pandas\n\n\n# SAVE DATAFRAME TO CSV\ndef SaveDfToCsv(path, csvName, dataDf):\n \n # Check if dataBaseDf is a pandas' dataframe\n if(not isinstance(dataDf, pandas.DataFrame)):\n \n sys.stderr.write(\"Argument given in SaveDfToCsv is not a Dataframe. \");\n sys.exit(-1); \n \n # Check if csv name has the right form\n if(csvName.find('.csv') > -1):\n # Erase .csv\n csvName.replace('.csv', '');\n \n csvToSave = path + csvName + '.csv'; \n dataDf.to_csv(csvToSave);\n \n print(\"Data saved : \" + csvToSave);\n \n\n# CHECK IF DATA CSV NAME EXISTS AND CHANGES IT IN CASE\ndef CheckDataCsv(dataPath, dataCsvName):\n \n # Check if date folder exists\n if(not(os.path.isdir(dataPath))):\n \n # Create folder\n os.makedirs(dataPath);\n # Returns dataCsvName\n return dataCsvName;\n \n else:\n \n # Index to increment dataCsvName\n csvNameIndex = 1;\n dataCsvNameTmp = dataCsvName; \n \n while(os.path.exists(dataPath + dataCsvNameTmp + '.csv')):\n \n # Loop to increment dataCsvName\n dataCsvNameTmp = dataCsvName + '_' + str(csvNameIndex);\n csvNameIndex = csvNameIndex + 1;\n \n dataCsvName = dataCsvNameTmp;\n return dataCsvName;\n \n","repo_name":"CIRED/gridded_dataset_192_cities","sub_path":"4. Real estate/saveDataTools.py","file_name":"saveDataTools.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74092722168","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 21 14:46:15 2021\r\n\r\n@author: Hansi\r\n\r\nloading of dirs and applying trafo and alpha to background and foreground img.\r\n\"\"\"\r\nimport cv2\r\nimport random\r\nimport os\r\nimport math\r\nfrom alpha import alpha_channel\r\nfrom trafo import geometric_transformation\r\nimport read_names as classes\r\nfrom write import write_obj\r\nfrom process import process_images\r\n\r\n# Amount of files\r\ncount = 215 # determines the number of generate images from each template and corresponding background\r\nimage_counter = 0\r\n\r\n# Paths for pictures and .txt-files\r\ndir_path = os.path.dirname(os.path.realpath(__file__)) # path to current directory\r\npath = os.path.join(dir_path, 'generated_images') # dir path for generated images\r\nimg_dir = os.path.join(dir_path, 'templates') # path for template images\r\nbackground_dir = os.path.join(dir_path, 'backgrounds') # separate folder for background images\r\n\r\n# Classes from the obj.txt file\r\nclasses = classes.read_classes()\r\n\r\n\r\nfor background in os.listdir(background_dir):\r\n background_path = os.path.join(background_dir, '{}'.format(background))\r\n for obj in os.listdir(img_dir):\r\n # call name of the image\r\n name = '{}'.format(obj)\r\n # split name\r\n # 0: class\r\n # 1: MOD\r\n # 2: Radius\r\n # 3: x coordinate\r\n # 4: y coordinate\r\n # 5: image index number\r\n variables = name.split('_')\r\n MOD = float(variables[1]) # minimum object distance\r\n print(variables)\r\n image_path = os.path.join(img_dir, '{}'.format(name))\r\n for i in range(count):\r\n image_counter += 1\r\n print(image_counter)\r\n if image_counter == count:\r\n image_counter = 0\r\n # Get foreground and background\r\n img1 = cv2.imread(image_path, -1) # template image\r\n img2 = cv2.imread(background_path) # background image\r\n\r\n # Rotation angle for geometric transformation\r\n rotation_angle = random.randrange(0, 360, 1)\r\n\r\n # Scaling of img1 using similar triangles\r\n z_in = MOD\r\n z_out = random.uniform(2.0, 3.5)\r\n scale = z_in / z_out\r\n # print(x_in, 1/x_out)\r\n\r\n rotated_img = geometric_transformation(img1, rotation_angle, scale, float(variables[3]),\r\n float(variables[4]))\r\n # print(rotated_img.shape[0], rotated_img.shape[1])\r\n x_offset = random.randrange(1, img2.shape[1] - rotated_img.shape[1],\r\n 1)\r\n y_offset = random.randrange(1, img2.shape[0] - rotated_img.shape[0],\r\n 1)\r\n # print(x_offset, y_offset)\r\n # Overlaying the object to the background and transparent image\r\n contour_img, overlay = alpha_channel(rotated_img, img2, x_offset, y_offset)\r\n\r\n # Optional - draw bounding rectangle and circle\r\n radius = float(scale) * float(variables[2])\r\n x_center = float(scale)*float(variables[3])\r\n y_center = float(scale)*float(variables[4])\r\n\r\n x_rect1 = x_offset + x_center - radius\r\n y_rect1 = y_offset + y_center - radius\r\n x_rect2 = x_offset + x_center + radius\r\n y_rect2 = y_offset + y_center + radius\r\n\r\n xC = x_offset + x_center\r\n yC = y_offset + y_center\r\n # print(radius, x_center, y_center, x_rect1, y_rect1)\r\n # cv2.circle(overlay,(x_offset+x_center,y_offset+y_center),3,(0,255,0),3) ONLY FOR VISUALISATION\r\n #cv2.rectangle(overlay, (x_rect1, y_rect1),(x_rect2, y_rect2), (255, 0, 0), 1)\r\n\r\n # Saving the picture and writing the corresponding .txt-file\r\n write_obj(path, scale, variables, xC, yC, overlay, image_counter)\r\n\r\nprocess_images(20, dir_path)\r\n\r\n","repo_name":"mtrimmel/yolo-data-segmentation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31319214788","text":"# -*- coding: utf-8 -*-\n\nimport discord\n\nfrom dpy_toolbox import Bot, MessageFilter\n\nbot = Bot(command_prefix='!', intents=discord.Intents.all(), toolbox=True)\nTOKEN = '' # BAD\n\n\nasync def msg_cb_ex(message: discord.Message) -> None:\n await message.reply(f\"Hi {message.author}, I'm dad!\")\n\n\n@bot.event\nasync def on_ready() -> None:\n print(f'Running as {bot.user}')\n\n await bot.toolbox.add_message_callback(\n \"my_command_name\",\n msg_cb_ex,\n MessageFilter(\n startswith_content=(\"Hi\", \"hi\")\n )\n )\n\nbot.run(TOKEN)\n","repo_name":"TheWever/dpy-toolbox","sub_path":"examples/message_callback.py","file_name":"message_callback.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"14918099176","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.staging.training import controller\nfrom official.staging.training import standard_runnable\n\n\ndef all_strategy_combinations():\n \"\"\"Gets combinations of distribution strategies.\"\"\"\n return combinations.combine(\n strategy=[\n strategy_combinations.one_device_strategy,\n strategy_combinations.tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n ],\n mode=\"eager\",\n )\n\n\ndef create_model():\n x = tf.keras.layers.Input(shape=(3,), name=\"input\")\n y = tf.keras.layers.Dense(4, name=\"dense\")(x)\n model = tf.keras.Model(x, y)\n return model\n\n\ndef summaries_with_matching_keyword(keyword, summary_dir):\n \"\"\"Yields summary protos matching given keyword from event file.\"\"\"\n event_paths = tf.io.gfile.glob(os.path.join(summary_dir, \"events*\"))\n for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):\n if event.summary is not None:\n for value in event.summary.value:\n if keyword in value.tag:\n tf.compat.v1.logging.error(event)\n yield event.summary\n\n\ndef check_eventfile_for_keyword(keyword, summary_dir):\n \"\"\"Checks event files for the keyword.\"\"\"\n return any(summaries_with_matching_keyword(keyword, summary_dir))\n\n\ndef dataset_fn(ctx):\n del ctx\n inputs = np.zeros((10, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10, drop_remainder=True)\n return dataset\n\n\nclass TestRunnable(standard_runnable.StandardTrainable,\n standard_runnable.StandardEvaluable):\n \"\"\"Implements the training and evaluation APIs for the test model.\"\"\"\n\n def __init__(self):\n standard_runnable.StandardTrainable.__init__(self)\n standard_runnable.StandardEvaluable.__init__(self)\n self.strategy = tf.distribute.get_strategy()\n self.model = create_model()\n self.optimizer = tf.keras.optimizers.RMSprop()\n self.global_step = self.optimizer.iterations\n self.train_loss = tf.keras.metrics.Mean(\"train_loss\", dtype=tf.float32)\n self.eval_loss = tf.keras.metrics.Mean(\"eval_loss\", dtype=tf.float32)\n\n def build_train_dataset(self):\n return self.strategy.experimental_distribute_datasets_from_function(\n dataset_fn)\n\n def train_step(self, iterator):\n\n def _replicated_step(inputs):\n \"\"\"Replicated training step.\"\"\"\n inputs, targets = inputs\n with tf.GradientTape() as tape:\n outputs = self.model(inputs)\n loss = tf.math.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, self.model.variables)\n self.optimizer.apply_gradients(zip(grads, self.model.variables))\n self.train_loss.update_state(loss)\n\n self.strategy.run(_replicated_step, args=(next(iterator),))\n\n def train_loop_end(self):\n return {\n \"loss\": self.train_loss.result(),\n }\n\n def build_eval_dataset(self):\n return self.strategy.experimental_distribute_datasets_from_function(\n dataset_fn)\n\n def eval_begin(self):\n self.eval_loss.reset_states()\n\n def eval_step(self, iterator):\n\n def _replicated_step(inputs):\n \"\"\"Replicated evaluation step.\"\"\"\n inputs, targets = inputs\n outputs = self.model(inputs)\n loss = tf.math.reduce_sum(outputs - targets)\n self.eval_loss.update_state(loss)\n\n self.strategy.run(_replicated_step, args=(next(iterator),))\n\n def eval_end(self):\n return {\n \"eval_loss\": self.eval_loss.result(),\n }\n\n\nclass ControllerTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(ControllerTest, self).setUp()\n self.model_dir = self.get_temp_dir()\n\n def test_no_checkpoint(self):\n test_runnable = TestRunnable()\n # No checkpoint manager and no strategy.\n test_controller = controller.Controller(\n train_fn=test_runnable.train,\n eval_fn=test_runnable.evaluate,\n global_step=test_runnable.global_step,\n train_steps=10,\n steps_per_loop=2,\n summary_dir=os.path.join(self.model_dir, \"summaries/train\"),\n summary_interval=2,\n eval_summary_dir=os.path.join(self.model_dir, \"summaries/eval\"),\n eval_steps=2,\n eval_interval=5)\n test_controller.train(evaluate=True)\n self.assertEqual(test_runnable.global_step.numpy(), 10)\n # Loss and accuracy values should be written into summaries.\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/train\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"loss\", os.path.join(self.model_dir, \"summaries/train\")))\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/eval\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"eval_loss\", os.path.join(self.model_dir, \"summaries/eval\")))\n # No checkpoint, so global step starts from 0.\n test_runnable.global_step.assign(0)\n test_controller.train(evaluate=True)\n self.assertEqual(test_runnable.global_step.numpy(), 10)\n\n def test_no_checkpoint_and_summaries(self):\n test_runnable = TestRunnable()\n # No checkpoint + summary directories.\n test_controller = controller.Controller(\n train_fn=test_runnable.train,\n eval_fn=test_runnable.evaluate,\n global_step=test_runnable.global_step,\n train_steps=10,\n steps_per_loop=2,\n eval_steps=2,\n eval_interval=5)\n test_controller.train(evaluate=True)\n self.assertEqual(test_runnable.global_step.numpy(), 10)\n\n @combinations.generate(all_strategy_combinations())\n def test_train_and_evaluate(self, strategy):\n with strategy.scope():\n test_runnable = TestRunnable()\n\n checkpoint = tf.train.Checkpoint(\n model=test_runnable.model, optimizer=test_runnable.optimizer)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n self.model_dir,\n max_to_keep=None,\n step_counter=test_runnable.global_step,\n checkpoint_interval=10)\n test_controller = controller.Controller(\n strategy=strategy,\n train_fn=test_runnable.train,\n eval_fn=test_runnable.evaluate,\n global_step=test_runnable.global_step,\n train_steps=10,\n steps_per_loop=2,\n summary_dir=os.path.join(self.model_dir, \"summaries/train\"),\n summary_interval=2,\n checkpoint_manager=checkpoint_manager,\n eval_summary_dir=os.path.join(self.model_dir, \"summaries/eval\"),\n eval_steps=2,\n eval_interval=5)\n test_controller.train(evaluate=True)\n\n # Checkpoints are saved.\n self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, \"ckpt*\")))\n\n # Loss and accuracy values should be written into summaries.\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/train\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"loss\", os.path.join(self.model_dir, \"summaries/train\")))\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/eval\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"eval_loss\", os.path.join(self.model_dir, \"summaries/eval\")))\n\n @combinations.generate(all_strategy_combinations())\n def test_train_only(self, strategy):\n with strategy.scope():\n test_runnable = TestRunnable()\n\n checkpoint = tf.train.Checkpoint(\n model=test_runnable.model, optimizer=test_runnable.optimizer)\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n self.model_dir,\n max_to_keep=None,\n step_counter=test_runnable.global_step,\n checkpoint_interval=10)\n test_controller = controller.Controller(\n strategy=strategy,\n train_fn=test_runnable.train,\n global_step=test_runnable.global_step,\n train_steps=10,\n steps_per_loop=2,\n summary_dir=os.path.join(self.model_dir, \"summaries/train\"),\n summary_interval=2,\n checkpoint_manager=checkpoint_manager,\n eval_summary_dir=os.path.join(self.model_dir, \"summaries/eval\"),\n )\n test_controller.train(evaluate=False)\n\n # Checkpoints are saved.\n self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, \"ckpt*\")))\n\n # Only train summaries are written.\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/train\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"loss\", os.path.join(self.model_dir, \"summaries/train\")))\n self.assertFalse(\n tf.io.gfile.exists(os.path.join(self.model_dir, \"summaries/eval\")))\n\n @combinations.generate(all_strategy_combinations())\n def test_evaluate_only(self, strategy):\n with strategy.scope():\n test_runnable = TestRunnable()\n\n checkpoint = tf.train.Checkpoint(model=test_runnable.model)\n checkpoint.save(os.path.join(self.model_dir, \"ckpt\"))\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n self.model_dir,\n max_to_keep=None,\n step_counter=test_runnable.global_step)\n test_controller = controller.Controller(\n strategy=strategy,\n eval_fn=test_runnable.evaluate,\n global_step=test_runnable.global_step,\n checkpoint_manager=checkpoint_manager,\n summary_dir=os.path.join(self.model_dir, \"summaries/train\"),\n eval_summary_dir=os.path.join(self.model_dir, \"summaries/eval\"),\n eval_steps=2,\n eval_interval=5)\n test_controller.evaluate()\n\n # Only eval summaries are written\n self.assertFalse(\n tf.io.gfile.exists(os.path.join(self.model_dir, \"summaries/train\")))\n self.assertNotEmpty(\n tf.io.gfile.listdir(os.path.join(self.model_dir, \"summaries/eval\")))\n self.assertTrue(\n check_eventfile_for_keyword(\n \"eval_loss\", os.path.join(self.model_dir, \"summaries/eval\")))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"yitu-opensource/MobileNeXt","sub_path":"mobile_deployment/tensorflow/slim/models/official/staging/training/controller_test.py","file_name":"controller_test.py","file_ext":"py","file_size_in_byte":10305,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"77"} +{"seq_id":"21920857706","text":"# stdlib imports\nimport os\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport logging\n\n# third party imports\nfrom impactutils.textformat.text import pop_round_short, round_to_nearest\nfrom impactutils.textformat.text import dec_to_roman\nfrom impactutils.colors.cpalette import ColorPalette\nfrom impactutils.comcat.query import ComCatInfo\nfrom impactutils.io.cmd import get_command_output\nimport numpy as np\n\nLATEX_TO_PDF_BIN = 'pdflatex'\n\nLATEX_SPECIAL_CHARACTERS = OrderedDict([('\\\\', '\\\\textbackslash{}'),\n ('{', '\\{'),\n ('}', '\\}'),\n ('#', '\\#'),\n ('$', '\\$'),\n ('%', '\\%'),\n ('&', '\\&'),\n ('^', '\\\\textasciicircum{}'),\n ('_', '\\_'),\n ('~', '\\textasciitilde{}')])\n\nDEFAULT_PAGER_URL = 'http://earthquake.usgs.gov/data/pager/'\nMIN_DISPLAY_POP = 1000\n\n\ndef texify(text):\n newtext = text\n for original, replacement in LATEX_SPECIAL_CHARACTERS.items():\n newtext = newtext.replace(original, replacement)\n return newtext\n\n\ndef create_onepager(pdata, version_dir, debug=False):\n \"\"\"\n :param pdata:\n PagerData object.\n :param version_dir: \n Path of event version directory.\n :param debug:\n bool for whether or not to add textpos boxes to onepager.\n \"\"\"\n\n # ---------------------------------------------------------------------------\n # Sort out some paths\n # ---------------------------------------------------------------------------\n\n # Locaiton of this module\n mod_dir, dummy = os.path.split(__file__)\n\n # losspager package direcotry\n losspager_dir = os.path.join(mod_dir, '..')\n\n # Repository root directory\n root_dir = os.path.join(losspager_dir, '..')\n\n # Data directory\n data_dir = os.path.join(losspager_dir, 'data')\n\n # Onepager latex template file\n template_file = os.path.join(data_dir, 'onepager2.tex')\n\n # ---------------------------------------------------------------------------\n # Read in pager data and latex template\n # ---------------------------------------------------------------------------\n\n json_dir = os.path.join(version_dir, 'json')\n pdict = pdata._pagerdict\n edict = pdata.getEventInfo()\n\n with open(template_file, 'r') as f:\n template = f.read()\n\n # ---------------------------------------------------------------------------\n # Fill in template values\n # ---------------------------------------------------------------------------\n\n # Sort out origin time\n olat = edict['lat']\n olon = edict['lon']\n otime_utc = edict['time']\n date_utc = datetime.strptime(otime_utc, \"%Y-%m-%d %H:%M:%S\")\n\n date_local = pdata.local_time\n DoW = date_local.strftime('%a')\n otime_local = date_local.strftime('%H:%M:%S')\n otime_local = DoW + ' ' + otime_local\n template = template.replace(\"[ORIGTIME]\", otime_utc)\n template = template.replace(\"[LOCALTIME]\", otime_local)\n\n # Some paths\n template = template.replace(\"[VERSIONFOLDER]\", version_dir)\n template = template.replace(\"[HOMEDIR]\", root_dir)\n\n # Magnitude location string under USGS logo\n magloc = 'M %.1f, %s' % (edict['mag'], texify(edict['location']))\n template = template.replace(\"[MAGLOC]\", magloc)\n\n # Pager version\n ver = \"Version \" + str(pdict['pager']['version_number'])\n template = template.replace(\"[VERSION]\", ver)\n template = template.replace(\"[VERSIONX]\", \"2.5\")\n\n # Epicenter location\n lat = edict['lat']\n lon = edict['lon']\n dep = edict['depth']\n if lat > 0:\n hlat = \"N\"\n else:\n hlat = \"S\"\n if lon > 0:\n hlon = \"E\"\n else:\n hlon = \"W\"\n template = template.replace(\"[LAT]\", '%.4f' % abs(lat))\n template = template.replace(\"[LON]\", '%.4f' % abs(lon))\n template = template.replace(\"[HEMILAT]\", hlat)\n template = template.replace(\"[HEMILON]\", hlon)\n template = template.replace(\"[DEPTH]\", '%.1f' % dep)\n\n # Tsunami warning? --- need to fix to be a function of tsunamic flag\n if edict['tsunami']:\n template = template.replace(\n \"[TSUNAMI]\", \"FOR TSUNAMI INFORMATION, SEE: tsunami.gov\")\n else:\n template = template.replace(\"[TSUNAMI]\", \"\")\n\n if pdata.isScenario():\n elapse = ''\n else:\n elapse = \"Created: \" + \\\n pdict['pager']['elapsed_time'] + \" after earthquake\"\n template = template.replace(\"[ELAPSED]\", elapse)\n template = template.replace(\"[IMPACT1]\",\n texify(pdict['comments']['impact1']))\n template = template.replace(\"[IMPACT2]\",\n texify(pdict['comments']['impact2']))\n template = template.replace(\"[STRUCTCOMMENT]\",\n texify(pdict['comments']['struct_comment']))\n\n # Summary alert color\n template = template.replace(\"[SUMMARYCOLOR]\",\n pdata.summary_alert.capitalize())\n template = template.replace(\"[ALERTFILL]\",\n pdata.summary_alert)\n\n # fill in exposure values\n max_border_mmi = pdata._pagerdict['population_exposure']['maximum_border_mmi']\n explist = pdata.getTotalExposure()\n pophold = 0\n for mmi in range(1, 11):\n iexp = mmi - 1\n if mmi == 2:\n pophold += explist[iexp]\n continue\n elif mmi == 3:\n pop = explist[iexp] + pophold\n macro = '[MMI2-3]'\n else:\n pop = explist[iexp]\n macro = '[MMI%i]' % mmi\n if pop < 1000:\n pop = round_to_nearest(pop, round_value=1000)\n if max_border_mmi > mmi and mmi <= 4:\n if pop == 0:\n popstr = '--*'\n else:\n if pop < 1000:\n pop = round_to_nearest(pop, round_value=1000)\n popstr = pop_round_short(pop) + '*'\n else:\n popstr = pop_round_short(pop)\n template = template.replace(macro, popstr)\n\n # MMI color pal\n pal = ColorPalette.fromPreset('mmi')\n\n # Historical table\n htab = pdata.getHistoricalTable()\n if htab[0] is None:\n # use pdata.getHistoricalComment()\n htex = pdata.getHistoricalComment()\n else:\n # build latex table\n htex = \"\"\"\n\\\\begin{tabularx}{7.25cm}{lrc*{1}{>{\\\\centering\\\\arraybackslash}X}*{1}{>{\\\\raggedleft\\\\arraybackslash}X}}\n\\hline\n\\\\textbf{Date} &\\\\textbf{Dist.}&\\\\textbf{Mag.}&\\\\textbf{Max} &\\\\textbf{Shaking}\\\\\\\\\n\\\\textbf{(UTC)}&\\\\textbf{(km)} & &\\\\textbf{MMI(\\#)}&\\\\textbf{Deaths} \\\\\\\\\n\\hline\n[TABLEDATA]\n\\hline\n\\multicolumn{5}{p{7.2cm}}{\\\\small [COMMENT]}\n\\end{tabularx}\"\"\"\n comment = pdata._pagerdict['comments']['secondary_comment']\n htex = htex.replace(\"[COMMENT]\", texify(comment))\n tabledata = \"\"\n nrows = len(htab)\n for i in range(nrows):\n date = htab[i]['Time'].split()[0]\n dist = str(int(htab[i]['Distance']))\n mag = str(htab[i]['Magnitude'])\n mmi = dec_to_roman(np.round(htab[i]['MaxMMI'], 0))\n col = pal.getDataColor(htab[i]['MaxMMI'])\n texcol = \"%s,%s,%s\" % (col[0], col[1], col[2])\n nmmi = pop_round_short(htab[i]['NumMaxMMI'])\n mmicell = '%s(%s)' % (mmi, nmmi)\n shakedeath = htab[i]['ShakingDeaths']\n if np.isnan(shakedeath):\n death = \"--\"\n else:\n death = pop_round_short(shakedeath)\n row = '%s & %s & %s & \\cellcolor[rgb]{%s} %s & %s \\\\\\\\ '\\\n '\\n' % (date, dist, mag, texcol, mmicell, death)\n tabledata = tabledata + row\n htex = htex.replace(\"[TABLEDATA]\", tabledata)\n template = template.replace(\"[HISTORICAL_BLOCK]\", htex)\n\n # City table\n ctex = \"\"\"\n\\\\begin{tabularx}{7.25cm}{lXr}\n\\hline\n\\\\textbf{MMI} & \\\\textbf{City} & \\\\textbf{Population} \\\\\\\\\n\\hline\n[TABLEDATA]\n\\hline\n\\end{tabularx}\"\"\"\n ctab = pdata.getCityTable()\n nrows = len(ctab.index)\n tabledata = \"\"\n for i in range(nrows):\n mmi = dec_to_roman(np.round(ctab['mmi'].iloc[i], 0))\n city = ctab['name'].iloc[i]\n if ctab['pop'].iloc[i] == 0:\n pop = '$<$1k'\n else:\n if ctab['pop'].iloc[i] < 1000:\n popnum = round_to_nearest(\n ctab['pop'].iloc[i], round_value=1000)\n else:\n popnum = ctab['pop'].iloc[i]\n pop = pop_round_short(popnum)\n col = pal.getDataColor(ctab['mmi'].iloc[i])\n texcol = \"%s,%s,%s\" % (col[0], col[1], col[2])\n if ctab['on_map'].iloc[i] == 1:\n if ctab['pop'].iloc[i] == 0:\n pop = '\\\\boldmath$<$\\\\textbf{1k}'\n row = '\\\\rowcolor[rgb]{%s}\\\\textbf{%s} & \\\\textbf{%s} & '\\\n '%s\\\\\\\\ \\n' % (texcol, mmi, city, pop)\n else:\n row = '\\\\rowcolor[rgb]{%s}\\\\textbf{%s} & \\\\textbf{%s} & '\\\n '\\\\textbf{%s}\\\\\\\\ \\n' % (texcol, mmi, city, pop)\n else:\n row = '\\\\rowcolor[rgb]{%s}%s & %s & '\\\n '%s\\\\\\\\ \\n' % (texcol, mmi, city, pop)\n tabledata = tabledata + row\n ctex = ctex.replace(\"[TABLEDATA]\", tabledata)\n template = template.replace(\"[CITYTABLE]\", ctex)\n\n eventid = edict['eventid']\n\n # query ComCat for information about this event\n # fill in the url, if we can find it\n try:\n ccinfo = ComCatInfo(eventid)\n eventid, allids = ccinfo.getAssociatedIds()\n event_url = ccinfo.getURL() + '#pager'\n except:\n event_url = DEFAULT_PAGER_URL\n\n eventid = \"Event ID: \" + eventid\n template = template.replace(\"[EVENTID]\", texify(eventid))\n template = template.replace(\"[EVENTURL]\", texify(event_url))\n\n # Write latex file\n tex_output = os.path.join(version_dir, 'onepager.tex')\n with open(tex_output, 'w') as f:\n f.write(template)\n\n pdf_output = os.path.join(version_dir, 'onepager.pdf')\n stderr = ''\n try:\n cwd = os.getcwd()\n os.chdir(version_dir)\n cmd = '%s -interaction nonstopmode --output-directory %s %s' % (\n LATEX_TO_PDF_BIN, version_dir, tex_output)\n logging.info('Running %s...' % cmd)\n res, stdout, stderr = get_command_output(cmd)\n os.chdir(cwd)\n if not res:\n return (None, stderr)\n else:\n if os.path.isfile(pdf_output):\n return (pdf_output, stderr)\n else:\n pass\n except Exception as e:\n pass\n finally:\n os.chdir(cwd)\n return (None, stderr)\n","repo_name":"mhearne-usgs/pager","sub_path":"losspager/onepager/onepager.py","file_name":"onepager.py","file_ext":"py","file_size_in_byte":10802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"20980511713","text":"from fileinput import filename\r\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton, reply_keyboard\r\nfrom aiogram.types.inline_keyboard import InlineKeyboardButton, InlineKeyboardMarkup\r\n \r\n\r\ninlineButton1 = InlineKeyboardButton('Меню #1', callback_data='inlineButton1')\r\ninlineButton2 = InlineKeyboardButton('Меню #2', callback_data='inlineButton2')\r\ninlineButton3 = InlineKeyboardButton('Меню #3', callback_data='inlineButton3')\r\ninlineButton4 = InlineKeyboardButton('Меню #4', callback_data='inlineButton4')\r\ninlineButton5 = InlineKeyboardButton('Меню #5', callback_data='inlineButton5')\r\ninlineButton6 = InlineKeyboardButton('Меню #6', callback_data='inlineButton6')\r\ninlineButton7 = InlineKeyboardButton('Меню #7', callback_data='inlineButton7')\r\ninlineButton8 = InlineKeyboardButton('Меню #8', callback_data='inlineButton8')\r\n\r\nchooseButtons1 = InlineKeyboardMarkup(row_width=4).insert(inlineButton1).insert(inlineButton2).insert(inlineButton3).insert(inlineButton4).insert(inlineButton5).insert(inlineButton6).insert(inlineButton7).insert(inlineButton8)\r\n\r\n#блок выбора продукции\r\ninlineButton11 = InlineKeyboardButton('Морозильні скрині', callback_data='inlineButton11')\r\ninlineButton12 = InlineKeyboardButton('Холодильні вітрини', callback_data='inlineButton12')\r\ninlineButton13 = InlineKeyboardButton('Морозильні скрині для вагового морозива', callback_data='inlineButton13')\r\ninlineButton14 = InlineKeyboardButton('Кондитерські холодильні вітрини', callback_data='inlineButton14')\r\ninlineButton15 = InlineKeyboardButton('Шафи', callback_data='inlineButton15')\r\ninlineButton16 = InlineKeyboardButton('Торговельне обладнання', callback_data='inlineButton16')\r\ninlineButton17 = InlineKeyboardButton('Комплектуючі', callback_data='inlineButton17')\r\ninlineButton18 = InlineKeyboardButton('Обладнання зі знижкою', callback_data='inlineButton18')\r\ninlineButton19 = InlineKeyboardButton('Морозильні скрині з гнутим склом \"прикасові\"', callback_data='inlineButton9')\r\ninlineButton20 = InlineKeyboardButton('Cкрині з гнутим склом серії S', callback_data='inlineButton10')\r\ninlineButton21 = InlineKeyboardButton('Морозильні скрині з гнутим склом серії SF', callback_data='inlineButton11')\r\ninlineButton22 = InlineKeyboardButton('Морозильні скрині з прямим склом', callback_data='inlineButton12')\r\ninlineButton23 = InlineKeyboardButton('Морозильні скрині з глухою кришкою', callback_data='inlineButton13')\r\ninlineButton24 = InlineKeyboardButton('Морозильні скрині - бонетного типу', callback_data='inlineButton14')\r\ninlineButton25 = InlineKeyboardButton('Холодильна вітрина серії VGL (+2 ÷ +8°C)', callback_data='inlineButton15')\r\ninlineButton26 = InlineKeyboardButton('Холодильна вітрина серії SGL (-2 ÷ +8°C)', callback_data='inlineButton16')\r\ninlineButton27 = InlineKeyboardButton('Холодильна вітрина серії FGL (-2 ÷ +8°C)', callback_data='inlineButton17')\r\ninlineButton28 = InlineKeyboardButton('Холодильна вітрина серії FGL (РЕСТАЙЛІНГ) (-2 ÷ +8°C)', callback_data='inlineButton18')\r\ninlineButton29 = InlineKeyboardButton('Холодильна вітрина серії FDI (0 ÷ +8°C)', callback_data='inlineButton29')\r\ninlineButton30 = InlineKeyboardButton('Морозильні скрині для вагового морозива серії SL', callback_data='inlineButton30')\r\ninlineButton31 = InlineKeyboardButton('Морозильні скрині для вагового морозива серії Q', callback_data='inlineButton31')\r\ninlineButton32 = InlineKeyboardButton('Шафи зі скляними дверима', callback_data='inlineButton32')\r\ninlineButton33 = InlineKeyboardButton('Шафи з глухими дверима', callback_data='inlineButton33')\r\ninlineButton34 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M100V', callback_data='inlineButton34')\r\ninlineButton35 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M200V', callback_data='inlineButton35')\r\ninlineButton36 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M200S', callback_data='inlineButton36')\r\ninlineButton37 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M300S', callback_data='inlineButton37')\r\ninlineButton38 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M300SH', callback_data='inlineButton38')\r\ninlineButton39 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M400S', callback_data='inlineButton39')\r\ninlineButton40 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M400SH', callback_data='inlineButton40')\r\ninlineButton41 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M500S', callback_data='inlineButton41')\r\ninlineButton42 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M600S', callback_data='inlineButton42')\r\ninlineButton43 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA N600S', callback_data='inlineButton43')\r\ninlineButton44 = InlineKeyboardButton('Середньотемпературна скриня з гнутим склом JUKA N300S', callback_data='inlineButton44')\r\ninlineButton45 = InlineKeyboardButton('Середньотемпературна скриня з гнутим склом JUKA N200S', callback_data='inlineButton45')\r\ninlineButton46 = InlineKeyboardButton('Середньотемпературна скриня з гнутим склом JUKA N400S', callback_data='inlineButton46')\r\ninlineButton47 = InlineKeyboardButton('Середньотемпературна скриня з гнутим склом JUKA N500S', callback_data='inlineButton47')\r\ninlineButton48 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M200SF', callback_data='inlineButton48')\r\ninlineButton49 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M300SF', callback_data='inlineButton49')\r\ninlineButton50 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M400SF', callback_data='inlineButton50')\r\ninlineButton51 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M500SF', callback_data='inlineButton51')\r\ninlineButton52 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M600SF', callback_data='inlineButton52')\r\ninlineButton53 = InlineKeyboardButton('Морозильна скриня з гнутим склом JUKA M700SF', callback_data='inlineButton53')\r\ninlineButton54 = InlineKeyboardButton('Морозильна скриня з прямим склом JUKA M200P', callback_data='inlineButton54')\r\ninlineButton55 = InlineKeyboardButton('Морозильна скриня з прямим склом JUKA M300P', callback_data='inlineButton55')\r\ninlineButton56 = InlineKeyboardButton('Морозильна скриня з прямим склом JUKA M400P', callback_data='inlineButton56')\r\ninlineButton57 = InlineKeyboardButton('Морозильна скриня з прямим склом JUKA M500P', callback_data='inlineButton57')\r\ninlineButton58 = InlineKeyboardButton('Морозильна скриня з прямим склом JUKA M600P', callback_data='inlineButton58')\r\ninlineButton59 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M200Z', callback_data='inlineButton59')\r\ninlineButton60 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M300Z', callback_data='inlineButton60')\r\ninlineButton61 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M400Z', callback_data='inlineButton61')\r\ninlineButton62 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M500Z', callback_data='inlineButton62')\r\ninlineButton63 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M600Z', callback_data='inlineButton63')\r\ninlineButton64 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M800Z', callback_data='inlineButton64')\r\ninlineButton65 = InlineKeyboardButton('Морозильна скриня з глухою кришкою JUKA M1000Z', callback_data='inlineButton65')\r\ninlineButton66 = InlineKeyboardButton('Середньотемпературна скриня бонетного типу JUKA N800S', callback_data='inlineButton66')\r\ninlineButton67 = InlineKeyboardButton('Середньотемпературна скриня бонетного типу JUKA N800D', callback_data='inlineButton67')\r\ninlineButton68 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA N800W', callback_data='inlineButton68')\r\ninlineButton69 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M800W', callback_data='inlineButton69')\r\ninlineButton70 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M800S', callback_data='inlineButton70')\r\ninlineButton71 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M800D', callback_data='inlineButton71')\r\ninlineButton72 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M1000V', callback_data='inlineButton72')\r\ninlineButton73 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M1000S (-14...-23 °C)', callback_data='inlineButton73')\r\ninlineButton74 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M1000S (-14...-23 °C ÷ +5...-5°C)', callback_data='inlineButton74')\r\ninlineButton75 = InlineKeyboardButton('Морозильна скриня бонетного типу JUKA M1000S (+5...-5°C)', callback_data='inlineButton75')\r\ninlineButton76 = InlineKeyboardButton('Холодильна вітрина VGL 130', callback_data='inlineButton76')\r\ninlineButton77 = InlineKeyboardButton('Холодильна вітрина VGL 130 А', callback_data='inlineButton77')\r\ninlineButton78 = InlineKeyboardButton('Холодильна вітрина VGL 160', callback_data='inlineButton78')\r\ninlineButton79 = InlineKeyboardButton('Холодильна вітрина VGL 160 A', callback_data='inlineButton79')\r\ninlineButton80 = InlineKeyboardButton('Холодильна вітрина VGL 190', callback_data='inlineButton80')\r\ninlineButton81 = InlineKeyboardButton('Холодильна вітрина VGL 190 A', callback_data='inlineButton81')\r\ninlineButton82 = InlineKeyboardButton('Холодильна вітрина SGL 130', callback_data='inlineButton82')\r\ninlineButton83 = InlineKeyboardButton('Холодильна вітрина SGL 130 A', callback_data='inlineButton83')\r\ninlineButton84 = InlineKeyboardButton('Холодильна вітрина SGL 160', callback_data='inlineButton84')\r\ninlineButton85 = InlineKeyboardButton('Холодильна вітрина SGL 160 A', callback_data='inlineButton85')\r\ninlineButton86 = InlineKeyboardButton('Холодильна вітрина SGL 190', callback_data='inlineButton86')\r\ninlineButton87 = InlineKeyboardButton('Холодильна вітрина SGL 190 A', callback_data='inlineButton87')\r\ninlineButton88 = InlineKeyboardButton('Холодильна вітрина SGL 260', callback_data='inlineButton88')\r\ninlineButton89 = InlineKeyboardButton('Холодильна вітрина SGL 260A', callback_data='inlineButton89')\r\ninlineButton90 = InlineKeyboardButton('Холодильна вітрина FGL 130', callback_data='inlineButton90')\r\ninlineButton91 = InlineKeyboardButton('Холодильна вітрина FGL 130 A', callback_data='inlineButton91')\r\ninlineButton92 = InlineKeyboardButton('Холодильна вітрина FGL 160', callback_data='inlineButton92')\r\ninlineButton93 = InlineKeyboardButton('Холодильна вітрина FGL 160 A', callback_data='inlineButton93')\r\ninlineButton94 = InlineKeyboardButton('Холодильна вітрина FGL 190', callback_data='inlineButton94')\r\ninlineButton95 = InlineKeyboardButton('Холодильна вітрина FGL 190 A', callback_data='inlineButton95')\r\ninlineButton96 = InlineKeyboardButton('Холодильна вітрина FGL 260А (Restyling)', callback_data='inlineButton96')\r\ninlineButton97 = InlineKeyboardButton('Холодильна вітрина FGL 130 (Restyling)', callback_data='inlineButton97')\r\ninlineButton98 = InlineKeyboardButton('Холодильна вітрина FGL 130 A (Restyling)', callback_data='inlineButton98')\r\ninlineButton99 = InlineKeyboardButton('Холодильна вітрина FGL 160 (Restyling)', callback_data='inlineButton99')\r\ninlineButton100 = InlineKeyboardButton('Холодильна вітрина FGL 160 A (Restyling)', callback_data='inlineButton100')\r\ninlineButton101 = InlineKeyboardButton('Холодильна вітрина FGL 190 (Restyling)', callback_data='inlineButton101')\r\ninlineButton102 = InlineKeyboardButton('Холодильна вітрина FGL 190 A (Restyling)', callback_data='inlineButton102')\r\ninlineButton103 = InlineKeyboardButton('Холодильна вітрина FGL 260 (Restyling)', callback_data='inlineButton103')\r\ninlineButton104 = InlineKeyboardButton('Кутовий модуль FDI E', callback_data='inlineButton104')\r\ninlineButton105 = InlineKeyboardButton('Холодильна вітрина FDI 160 A', callback_data='inlineButton105')\r\ninlineButton106 = InlineKeyboardButton('Холодильна вітрина FDI 197 A', callback_data='inlineButton106')\r\ninlineButton107 = InlineKeyboardButton('Холодильна вітрина FDI 260 A', callback_data='inlineButton107')\r\ninlineButton108 = InlineKeyboardButton('Морозильна скриня для продажу вагового морозива JUKA M300SL', callback_data='inlineButton108')\r\ninlineButton109 = InlineKeyboardButton('Морозильна скриня для продажу вагового морозива JUKA M400SL', callback_data='inlineButton109')\r\ninlineButton110 = InlineKeyboardButton('Морозильна скриня для продажу вагового морозива JUKA M600SL', callback_data='inlineButton110')\r\n\r\n\r\n\r\nchooseInlineButtons1 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons1.insert(inlineButton11)\r\nchooseInlineButtons1.insert(inlineButton12)\r\nchooseInlineButtons1.insert(inlineButton13)\r\nchooseInlineButtons1.insert(inlineButton14)\r\nchooseInlineButtons1.insert(inlineButton15)\r\nchooseInlineButtons1.insert(inlineButton16)\r\nchooseInlineButtons1.insert(inlineButton17)\r\nchooseInlineButtons1.insert(inlineButton18)\r\n\r\nchooseInlineButtons2 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons2.insert(inlineButton19)\r\nchooseInlineButtons2.insert(inlineButton20)\r\nchooseInlineButtons2.insert(inlineButton21)\r\nchooseInlineButtons2.insert(inlineButton22)\r\nchooseInlineButtons2.insert(inlineButton23)\r\nchooseInlineButtons2.insert(inlineButton24)\r\n\r\nchooseInlineButtons3 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons3.insert(inlineButton25)\r\nchooseInlineButtons3.insert(inlineButton26)\r\nchooseInlineButtons3.insert(inlineButton27)\r\nchooseInlineButtons3.insert(inlineButton28)\r\nchooseInlineButtons3.insert(inlineButton29)\r\n\r\nchooseInlineButtons4 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons4.insert(inlineButton30)\r\nchooseInlineButtons4.insert(inlineButton31)\r\n\r\nchooseInlineButtons5 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons5.insert(inlineButton32)\r\nchooseInlineButtons5.insert(inlineButton33)\r\n\r\nchooseInlineButtons6 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons6.insert(inlineButton34)\r\nchooseInlineButtons6.insert(inlineButton35)\r\n\r\nchooseInlineButtons7 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons7.insert(inlineButton36)\r\nchooseInlineButtons7.insert(inlineButton37)\r\nchooseInlineButtons7.insert(inlineButton38)\r\nchooseInlineButtons7.insert(inlineButton39)\r\nchooseInlineButtons7.insert(inlineButton40)\r\nchooseInlineButtons7.insert(inlineButton41)\r\nchooseInlineButtons7.insert(inlineButton42)\r\nchooseInlineButtons7.insert(inlineButton43)\r\nchooseInlineButtons7.insert(inlineButton44)\r\nchooseInlineButtons7.insert(inlineButton45)\r\nchooseInlineButtons7.insert(inlineButton46)\r\nchooseInlineButtons7.insert(inlineButton47)\r\n\r\nchooseInlineButtons8 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons8.insert(inlineButton48)\r\nchooseInlineButtons8.insert(inlineButton49)\r\nchooseInlineButtons8.insert(inlineButton50)\r\nchooseInlineButtons8.insert(inlineButton51)\r\nchooseInlineButtons8.insert(inlineButton52)\r\nchooseInlineButtons8.insert(inlineButton53)\r\n\r\nchooseInlineButtons9 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons9.insert(inlineButton54)\r\nchooseInlineButtons9.insert(inlineButton55)\r\nchooseInlineButtons9.insert(inlineButton56)\r\nchooseInlineButtons9.insert(inlineButton57)\r\nchooseInlineButtons9.insert(inlineButton58)\r\n\r\nchooseInlineButtons10 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons10.insert(inlineButton59)\r\nchooseInlineButtons10.insert(inlineButton60)\r\nchooseInlineButtons10.insert(inlineButton61)\r\nchooseInlineButtons10.insert(inlineButton62)\r\nchooseInlineButtons10.insert(inlineButton63)\r\nchooseInlineButtons10.insert(inlineButton64)\r\nchooseInlineButtons10.insert(inlineButton65)\r\n\r\nchooseInlineButtons11 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons11.insert(inlineButton66)\r\nchooseInlineButtons11.insert(inlineButton67)\r\nchooseInlineButtons11.insert(inlineButton68)\r\nchooseInlineButtons11.insert(inlineButton69)\r\nchooseInlineButtons11.insert(inlineButton70)\r\nchooseInlineButtons11.insert(inlineButton71)\r\nchooseInlineButtons11.insert(inlineButton72)\r\nchooseInlineButtons11.insert(inlineButton73)\r\nchooseInlineButtons11.insert(inlineButton74)\r\nchooseInlineButtons11.insert(inlineButton75)\r\n\r\nchooseInlineButtons12 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons12.insert(inlineButton76)\r\nchooseInlineButtons12.insert(inlineButton77)\r\nchooseInlineButtons12.insert(inlineButton78)\r\nchooseInlineButtons12.insert(inlineButton79)\r\nchooseInlineButtons12.insert(inlineButton80)\r\nchooseInlineButtons12.insert(inlineButton81)\r\n\r\nchooseInlineButtons13 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons13.insert(inlineButton82)\r\nchooseInlineButtons13.insert(inlineButton83)\r\nchooseInlineButtons13.insert(inlineButton84)\r\nchooseInlineButtons13.insert(inlineButton85)\r\nchooseInlineButtons13.insert(inlineButton86)\r\nchooseInlineButtons13.insert(inlineButton87)\r\nchooseInlineButtons13.insert(inlineButton88)\r\nchooseInlineButtons13.insert(inlineButton89)\r\n\r\nchooseInlineButtons14 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons14.insert(inlineButton90)\r\nchooseInlineButtons14.insert(inlineButton91)\r\nchooseInlineButtons14.insert(inlineButton92)\r\nchooseInlineButtons14.insert(inlineButton93)\r\nchooseInlineButtons14.insert(inlineButton94)\r\nchooseInlineButtons14.insert(inlineButton95)\r\nchooseInlineButtons14.insert(inlineButton96)\r\n\r\nchooseInlineButtons15 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons15.insert(inlineButton97)\r\nchooseInlineButtons15.insert(inlineButton98)\r\nchooseInlineButtons15.insert(inlineButton99)\r\nchooseInlineButtons15.insert(inlineButton100)\r\nchooseInlineButtons15.insert(inlineButton101)\r\nchooseInlineButtons15.insert(inlineButton102)\r\nchooseInlineButtons15.insert(inlineButton103)\r\n\r\nchooseInlineButtons16 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons16.insert(inlineButton104)\r\nchooseInlineButtons16.insert(inlineButton105)\r\nchooseInlineButtons16.insert(inlineButton106)\r\nchooseInlineButtons16.insert(inlineButton107)\r\n\r\nchooseInlineButtons17 = InlineKeyboardMarkup(row_width=1)\r\nchooseInlineButtons17.insert(inlineButton108)\r\nchooseInlineButtons17.insert(inlineButton109)\r\nchooseInlineButtons17.insert(inlineButton110)\r\n\r\n#Вернуться в главное меню\r\nbutton9 = KeyboardButton('Вернуться в главное меню')\r\n\r\n\r\n\r\nbtnInfo = KeyboardButton('Информация')\r\nbtnMoney = KeyboardButton('Курсы валют')\r\n\r\notherMenu = ReplyKeyboardMarkup(resize_keyboard=True).add(btnInfo, btnMoney)\r\n\r\n","repo_name":"SilverMikki/jukabot","sub_path":"markup.py","file_name":"markup.py","file_ext":"py","file_size_in_byte":20907,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25909549349","text":"import pandas as pd\nimport os\nfrom ete3 import EvolTree\n\ndef get_table_codeml(directory, workdir):\n family=[]\n hp=[]\n best_M2=[]\n beb=[]\n omega=[]\n\n for filename in os.listdir(directory):\n if filename.endswith('out'):\n name = filename.split('.')[0]\n family.append(name)\n \n tree = EvolTree(newick = os.path.join(directory,\"{}.treefile\".format(filename)))\n\n best_model_list = []\n \n for model in ['M1', 'M2']:\n best_model = None\n best_lnl = float('-inf')\n for starting_omega in [0.2, 0.7, 1.2]:\n modelo = model+'.'+str(starting_omega)\n current_model = tree.link_to_evol_model(workdir+modelo+'_'+name+'/out', modelo+'_'+name)\n current_model = tree.get_evol_model(modelo+'_'+name)\n \n if current_model.lnL > best_lnl:\n best_lnl = current_model.lnL\n best_lnl_name = modelo +': '+ str(best_lnl)\n best_model = current_model\n \n best_model_list.append(best_lnl_name.split(':')[0]+'_'+name)\n \n model2= tree.get_evol_model(best_model_list[1])\n best_M2.append(best_model_list[1])\n omega.append(model2.classes['w'][2])\n pval = tree.get_most_likely(best_model_list[1], best_model_list[0])\n if pval < 0.05:\n hp.append('M2 model wins.')\n sitios = []\n for s in range(len(model2.sites['BEB']['aa'])):\n if model2.sites['BEB']['p2'][s] > 0.95:\n sitios.append('site %s, position: %s' % (model2.sites['BEB']['aa'][s], s+1))\n beb_str = '/ '.join(sitios)\n beb.append(beb_str)\n else:\n hp.append('M1 model is not rejected')\n beb.append('NaN')\n \n data={'family':pd.Series(family), 'best_M2':pd.Series(best_M2), 'omega':pd.Series(omega), 'positive_selection_sites':pd.Series(beb), 'hypothesis':pd.Series(hp)}\n\n table=pd.DataFrame(data, columns=['family', 'best_M2', 'omega', 'positive_selection_sites','hypothesis'])\n table.to_csv('codeml_sites_output_all.tsv', sep='\\t', index=None)\n print(table)\n \n\nget_table_codeml(directory='/opt/adri/PR1/pipeline_codeml/mafft', workdir='./workdir/')\n","repo_name":"adriayumi/positiveEvolution_codeML","sub_path":"snakemake/scripts/get_sites_table.py","file_name":"get_sites_table.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5212673277","text":"import json\nimport networkinfotranslator\n \ndef items_info():\n open_json_format = {'name' : \"JSON\",\n 'type': \"open\",\n 'call-functions': [\n {'name': \"saveCurrentNetworkWithUserPermission\", 'inputs':[]},\n {'name': \"createNetwork\", 'inputs': [{'name': \"load_json\", 'api': \"python\", 'inputs': [\n {'name': \"getOpenFileName\", 'inputs': [\"json\"]}]}]},\n {'name': \"createChangeStageCommand\", 'inputs': []}]}\n\n open_sbml_format = {'name' : \"SBML\",\n 'type': \"open\",\n 'call-functions': [\n {'name': \"saveCurrentNetworkWithUserPermission\", 'inputs':[]},\n {'name': \"createNetwork\", 'inputs': [{'name': \"load_sbml\", 'api': \"python\", 'inputs': [\n {'name': \"getOpenFileName\", 'inputs': [\"xml\"]}]}]},\n {'name': \"createChangeStageCommand\", 'inputs': []}]}\n \n return json.dumps({'items': [open_json_format, open_sbml_format]})\n\ndef load_json(input):\n file_name = input[0]\n if file_name:\n f = open(file_name)\n return json.dumps(json.load(f))\n\n return \"\"\n\n\ndef load_sbml(input):\n file_name = input[0]\n if file_name:\n network_info_import_from_sbml_model = networkinfotranslator.NetworkInfoImportFromSBMLModel()\n network_info_import_from_sbml_model.extract_info(file_name)\n network_info_export_to_network_editor = networkinfotranslator.NetworkInfoExportToNetworkEditor()\n network_info_export_to_network_editor.extract_graph_info(network_info_import_from_sbml_model)\n network_info = network_info_export_to_network_editor.export(file_name)\n return json.dumps(network_info)\n\n return \"\"","repo_name":"adelhpour/Alcuin","sub_path":"test-app/src/plugins/negui_open_file.py","file_name":"negui_open_file.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69872279928","text":"import sys\nimport os\nfrom lib.repositories.repoTools import RepoTools\n\nif len(sys.argv) > 2:\n\ttargetGitHubUsername = sys.argv[2]\nelif len(sys.argv) > 1:\n\ttargetGitHubUsername = \"qub3d\"\nelse:\n\traise Exception( \"Usage: python3 main