diff --git "a/4859.jsonl" "b/4859.jsonl" new file mode 100644--- /dev/null +++ "b/4859.jsonl" @@ -0,0 +1,674 @@ +{"seq_id":"460593302","text":"from django.urls import include, path\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom lemarche.www.pages.views import ContactView, HomeView, PageView, StatsView, trigger_error\n\n\n# https://docs.djangoproject.com/en/dev/topics/http/urls/#url-namespaces-and-included-urlconfs\napp_name = \"pages\"\n\nurlpatterns = [\n path(\"\", HomeView.as_view(), name=\"home\"),\n path(\"contact/\", ContactView.as_view(), name=\"contact\"),\n # Static pages\n path(\n \"filiere/restauration/\",\n TemplateView.as_view(template_name=\"pages/filiere_restauration.html\"),\n name=\"filiere_restauration\",\n ),\n path(\n \"filiere/recyclage/\",\n TemplateView.as_view(template_name=\"pages/filiere_recyclage.html\"),\n name=\"filiere_recyclage\",\n ),\n path(\n \"decouvrir-inclusion/\",\n TemplateView.as_view(template_name=\"pages/decouvrir-inclusion.html\"),\n name=\"decouvrir_inclusion\",\n ),\n path(\n \"partenaires/\",\n TemplateView.as_view(template_name=\"pages/partenaires.html\"),\n name=\"partenaires\",\n ),\n path(\n \"valoriser-achats/\",\n TemplateView.as_view(template_name=\"pages/valoriser-achats.html\"),\n name=\"valoriser_achats\",\n ),\n path(\"stats/\", StatsView.as_view(), name=\"stats\"),\n # Sentry endpoint for frontend errors\n path(\"sentry-debug/\", trigger_error, name=\"sentry_debug\"),\n # Redirection urls post-migration\n # TODO post-migration: remove at some point in the future (2022 ?)\n path(\n \"fr/\",\n include(\n [\n path(\"\", RedirectView.as_view(pattern_name=\"pages:home\", permanent=True), name=\"old_home\"),\n path(\"page/faq\", RedirectView.as_view(url=\"/faq/\", permanent=True), name=\"old_page_faq\"),\n path(\n \"page/qui-sommes-nous\",\n RedirectView.as_view(url=\"/qui-sommes-nous/\", permanent=True),\n name=\"old_page_qui\",\n ),\n path(\n \"itou/inclusion\",\n RedirectView.as_view(pattern_name=\"pages:decouvrir_inclusion\", permanent=True),\n name=\"old_page_inclusion\",\n ),\n path(\n \"filiere/restauration\",\n RedirectView.as_view(pattern_name=\"pages:filiere_restauration\", permanent=True),\n name=\"old_page_filiere_restauration\",\n ),\n path(\n \"filiere/recyclage\",\n RedirectView.as_view(pattern_name=\"pages:filiere_recyclage\", permanent=True),\n name=\"old_page_filiere_recyclage\",\n ),\n path(\n \"contact/creer\",\n RedirectView.as_view(pattern_name=\"pages:contact\", permanent=True),\n name=\"old_page_contact\",\n ),\n path(\n \"identification/\",\n RedirectView.as_view(pattern_name=\"auth:login\", permanent=True),\n name=\"old_signup\",\n ),\n path(\n \"identification\",\n RedirectView.as_view(pattern_name=\"auth:login\", permanent=True),\n name=\"old_login_without_slash\",\n ),\n path(\n \"inscription/\",\n RedirectView.as_view(pattern_name=\"auth:signup\", permanent=True),\n name=\"old_login\",\n ),\n path(\n \"inscription\",\n RedirectView.as_view(pattern_name=\"auth:signup\", permanent=True),\n name=\"old_signup_without_slash\",\n ),\n path(\n \"tableau-de-bord/profil-utilisateur/editer-a-propos-de-moi/\",\n RedirectView.as_view(pattern_name=\"dashboard:home\", permanent=True),\n name=\"old_dashboard_home\",\n ),\n path(\n \"tableau-de-bord/profil-utilisateur/editer-a-propos-de-moi\",\n RedirectView.as_view(pattern_name=\"dashboard:home\", permanent=True),\n name=\"old_dashboard_home_without_slash\",\n ),\n path(\n \"dashboard/directory/\",\n RedirectView.as_view(pattern_name=\"dashboard:home\", permanent=True),\n name=\"old_dashboard_siaes_home\",\n ),\n path(\n \"dashboard/directory\",\n RedirectView.as_view(pattern_name=\"dashboard:home\", permanent=True),\n name=\"old_dashboard_siaes_home_without_slash\",\n ),\n path(\n \"repertoire/siae/\",\n RedirectView.as_view(pattern_name=\"siae:search_results\", permanent=True),\n name=\"old_siae_search\",\n ),\n path(\n \"repertoire/siae\",\n RedirectView.as_view(pattern_name=\"siae:search_results\", permanent=True),\n name=\"old_siae_search_without_slash\",\n ),\n path(\n \"directory//show/\",\n RedirectView.as_view(pattern_name=\"siae:detail\", permanent=True),\n name=\"old_siae_detail\",\n ), # \n path(\n \"directory//show\",\n RedirectView.as_view(pattern_name=\"siae:detail\", permanent=True),\n name=\"old_siae_detail_without_slash\",\n ), # \n path(\n \"directory//\",\n RedirectView.as_view(pattern_name=\"siae:detail\", permanent=True),\n name=\"old_siae_detail_without_show\",\n ), # \n ]\n ),\n ),\n # Flatpages (created in the admin)\n # path(\"\", include(\"django.contrib.flatpages.urls\")),\n path(\"\", PageView.as_view(), name=\"flatpage\"),\n # Error pages\n path(\"403/\", TemplateView.as_view(template_name=\"403.html\"), name=\"403\"),\n path(\"404/\", TemplateView.as_view(template_name=\"404.html\"), name=\"404\"),\n path(\"500/\", TemplateView.as_view(template_name=\"500.html\"), name=\"500\"),\n]\n","sub_path":"lemarche/www/pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367053419","text":"from unittest import TestCase\n\nimport cga\nimport gener\nimport util\n\n\nclass TestGener(TestCase):\n md = [\n {'h2': '##'},\n {'text_line': 'this is h2'},\n {'code_start': '```c'},\n {'text_line': 'int main() {'},\n {'text_line': ' return 0'},\n {'text_line': '}'},\n {'code_end': '```'}\n ]\n\n def test_write_to_md_without_inserted(self):\n node_list = cga.make_token_list(self.md)\n target = util.write2file('')\n gener.write_to_md(node_list, target)\n with open(target, mode='r', encoding='utf-8') as f:\n res = f.read()\n f.close()\n expected = [\n '## this is h2',\n '```c',\n 'int main() {',\n ' return 0',\n '}',\n '```'\n ]\n self.assertEqual('\\n'.join(expected) + '\\n', res)\n","sub_path":"docs/complie_samples/test/test_gener.py","file_name":"test_gener.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116772175","text":"import gym\nimport model\nimport numpy as np\n\nif __name__==\"__main__\":\n env = gym.make('CartPole-v1')\n state_size, action_size = env.observation_space.shape[0], env.action_space.n\n batch_size = 32\n\n agent = model.FirstModel(state_size, action_size)\n for i_episode in range(20):\n observation = env.reset()\n points = 0\n for t in range(100):\n env.render()\n\n action = agent.predict(observation.reshape(1,-1))\n observation, reward, done, info = env.step(action)\n points += reward\n if done:\n print(\"Episode finished after {} timesteps\".format(t+1))\n break\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312011284","text":"\"\"\"\nCarl Bechie\nCIS 185\nex 10.8\n\"\"\"\n\nfrom socket import *\nfrom codecs import decode\nfrom threading import Thread\n\nBUFSIZE = 1024\nCODE = \"ascii\"\n\nclass ATMClientHandler(Thread):\n \"\"\"Handles ATM requests from a client.\"\"\"\n \n def __init__(self, client, bank):\n \"\"\"Save references to the client socket and bank.\"\"\"\n Thread.__init__(self)\n self.client = client\n self.bank = bank\n \n def run(self):\n \"\"\"Sends a greeting to the client, then enters\n an interative loop to take and respond to\n requests.\"\"\"\n self.client.send(bytes(\"Welcome to the bank!\",\n CODE))\n while True:\n request = decode(self.client.recv(BUFSIZE),\n CODE)\n if not request:\n print(\"Client disconnected\")\n self.client.close()\n break\n else:\n reply = self.interpret(request.split())\n self.client.send(bytes(reply, CODE))\n\n def interpret(self, request):\n \"\"\"Interprets and responds to request.\"\"\"\n command = request[0]\n if command == \"LOGIN\":\n self.account = self.bank.get(request[1], request[2])\n if not self.account:\n return \"FAILURE\"\n else:\n return \"SUCCESS\"\n elif command == \"LOGOUT\":\n self.account = None\n return \"Welcome to the bank!\"\n elif command == \"BALANCE\":\n balance = self.account.getBalance()\n return \"The balance is $\" + str(balance)\n elif command == \"DEPOSIT\":\n message = self.account.deposit(float(request[1]))\n if message:\n return message\n else:\n return \"Deposit successful!\"\n elif command == \"WITHDRAW\":\n message = self.account.withdraw(float(request[1]))\n if message:\n return message\n else:\n return \"Withdrawal successful!\" \n else:\n return \"Command not recognized!\"\n \n\n \n\n","sub_path":"atmclienthandler.py","file_name":"atmclienthandler.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45364476","text":"\"\"\"Message definitions.\"\"\"\n\nfrom collections import namedtuple\n\nfrom ...address import Address\nfrom ...constants import (\n AckNak,\n AllLinkMode,\n DeviceCategory,\n ImButtonEvents,\n ManageAllLinkRecordAction,\n MessageId,\n X10CommandType,\n)\nfrom ...data_types.all_link_record_flags import AllLinkRecordFlags\nfrom ...data_types.im_config_flags import IMConfigurationFlags\nfrom ...data_types.message_flags import MessageFlags\nfrom ...data_types.user_data import UserData\nfrom .message_definition import MessageDefinition\n\nMessageField = namedtuple(\"MessageField\", \"name length type\")\n\n\n# MessageDefinition = namedtuple(\"MessageDefinition\", \"type fields\")\n\n# INSTEON Standard Message Received 0x50\nFLD_STD_REC = [\n MessageField(\"address\", 3, Address),\n MessageField(\"target\", 3, Address),\n MessageField(\"flags\", 1, MessageFlags),\n MessageField(\"cmd1\", 1, int),\n MessageField(\"cmd2\", 1, int),\n]\n\n# INSTEON Extended Message Received 0x51\nFLD_EXT_REC = FLD_STD_REC.copy()\nFLD_EXT_REC.append(MessageField(\"user_data\", 14, UserData))\n\n# X10 Send / Received 0x63 / 0x52\nFLD_X10_SEND_REC = [\n MessageField(\"raw_x10\", 1, int),\n MessageField(\"x10_flag\", 1, X10CommandType),\n]\nFLD_X10_SEND_REC_ACK = FLD_X10_SEND_REC.copy()\nFLD_X10_SEND_REC_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# ALL-Linking Completed 0x53\nFLD_ALL_LINK_COMPLETE = [\n MessageField(\"link_mode\", 1, AllLinkMode),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n MessageField(\"cat\", 1, DeviceCategory),\n MessageField(\"subcat\", 1, int),\n MessageField(\"firmware\", 1, int),\n]\n\n# IM Button Event Report 0x54\nFLD_IM_BTN_EVENT_REPORT_REC = [MessageField(\"event\", 1, ImButtonEvents)]\n\n# IM User Reset Detected 0x55\nFLD_USER_RESET_REC = []\n\n# ALL-Link Cleanup Failure Report 0x56\nFLD_ALL_LINK_CLEANUP_FAILURE = [\n MessageField(\"error\", 1, int),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n]\n\n# ALL-Link Record Response 0x57\nFLD_ALL_LINK_RECORD_RESP = [\n MessageField(\"flags\", 1, AllLinkRecordFlags),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n MessageField(\"data1\", 1, int),\n MessageField(\"data2\", 1, int),\n MessageField(\"data3\", 1, int),\n]\n\n# All-Link Cleanup Status Report 0x58\nFLD_ALL_LINK_CLEANUP_REPORT_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Read from EEPROM Response 0x59\nFLD_READ_EEPROM_RESPONSE = [\n MessageField(\"mem_hi\", 1, int),\n MessageField(\"mem_low\", 1, int),\n MessageField(\"flags\", 1, AllLinkRecordFlags),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n MessageField(\"data1\", 1, int),\n MessageField(\"data2\", 1, int),\n MessageField(\"data3\", 1, int),\n]\n\n# Get IM Info 0x60\nFLD_GET_IM_INFO_SEND = []\nFLD_GET_IM_INFO_REC = [\n MessageField(\"address\", 3, Address),\n MessageField(\"cat\", 1, DeviceCategory),\n MessageField(\"subcat\", 1, int),\n MessageField(\"firmware\", 1, int),\n MessageField(\"ack\", 1, AckNak),\n]\n\n# Send All-Link Command 0x61\nFLD_SEND_ALL_LINK_CMD = [\n MessageField(\"group\", 1, int),\n MessageField(\"cmd1\", 1, int),\n MessageField(\"cmd2\", 1, int),\n]\nFLD_SEND_ALL_LINK_CMD_ACK = FLD_SEND_ALL_LINK_CMD.copy()\nFLD_SEND_ALL_LINK_CMD_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Send INSTEON Standard or Extended Message 0x62\nFLD_STD_SEND = [\n MessageField(\"address\", 3, Address),\n MessageField(\"flags\", 1, MessageFlags),\n MessageField(\"cmd1\", 1, int),\n MessageField(\"cmd2\", 1, int),\n]\nFLD_EXT_SEND = FLD_STD_SEND.copy()\nFLD_EXT_SEND.append(MessageField(\"user_data\", 14, UserData))\n\n# Send INSTEON Standard or Extended Message ACK/NAK 0x62\nFLD_STD_SEND_ACK = FLD_STD_SEND.copy()\nFLD_STD_SEND_ACK.append(MessageField(\"ack\", 1, AckNak))\nFLD_EXT_SEND_ACK = FLD_EXT_SEND.copy()\nFLD_EXT_SEND_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Send X10 0x63 (SEE X10 Received 0x52)\n\n# Start All-Linking 0x64\nFLD_START_ALL_LINKING = [\n MessageField(\"link_mode\", 1, AllLinkMode),\n MessageField(\"group\", 1, int),\n]\nFLD_START_ALL_LINKING_ACK = FLD_START_ALL_LINKING.copy()\nFLD_START_ALL_LINKING_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Cancel All-Linking 0x65\nFLD_CANCEL_ALL_LINKING = []\nFLD_CANCEL_ALL_LINKING_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Set Host Device Category 0x66\nFLD_SET_HOST_DEVICE_CATEGORY = [\n MessageField(\"cat\", 1, DeviceCategory),\n MessageField(\"subcat\", 1, int),\n MessageField(\"firmware\", 1, int),\n]\nFLD_SET_HOST_DEVICE_CATEGORY_ACK = FLD_SET_HOST_DEVICE_CATEGORY.copy()\nFLD_SET_HOST_DEVICE_CATEGORY_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Reset the IM 0x67\nFLD_RESET_IM = []\nFLD_RESET_IM_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Set INSTEON ACK/NAK Message Byte 0x68/0x70\nFLD_SET_ACK_NAK_BYTE = [MessageField(\"cmd2\", 1, int)]\nFLD_SET_ACK_NAK_BYTE_ACK = FLD_SET_ACK_NAK_BYTE.copy()\nFLD_SET_ACK_NAK_BYTE_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Get First ALL-Link Record 0x69\nFLD_GET_FIRST_ALL_LINK_RECORD = []\nFLD_GET_FIRST_ALL_LINK_RECORD_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Get Next ALL-Link Record 0x6A\nFLD_GET_NEXT_ALL_LINK_RECORD = []\nFLD_GET_NEXT_ALL_LINK_RECORD_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Set IM Configuration 0x6B\nFLD_SET_IM_CONFIG = [MessageField(\"flags\", 1, IMConfigurationFlags)]\nFLD_SET_IM_CONFIG_ACK = FLD_SET_IM_CONFIG.copy()\nFLD_SET_IM_CONFIG_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Get All-Link Record for Sender 0x6C\nFLD_GET_ALL_LINK_REC_FOR_SENDER = []\nFLD_GET_ALL_LINK_REC_FOR_SENDER_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# LED On/Off 0x6D / 0x6E\nFLD_LED_ON_OFF = []\nFLD_LED_ON_OFF_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Manage All-Link Record 0x6F\nFLD_MANAGE_ALL_LINK_RECORD = [\n MessageField(\"action\", 1, ManageAllLinkRecordAction),\n MessageField(\"flags\", 1, AllLinkRecordFlags),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n MessageField(\"data1\", 1, int),\n MessageField(\"data2\", 1, int),\n MessageField(\"data3\", 1, int),\n]\nFLD_MANAGE_ALL_LINK_RECORD_ACK = FLD_MANAGE_ALL_LINK_RECORD.copy()\nFLD_MANAGE_ALL_LINK_RECORD_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Set INSTEON NAK Message Byte see 0x68\n# Set INSTEON ACK Message Two Bytes 0x71\nFLD_SET_ACK_TWO_BYTES = [MessageField(\"cmd1\", 1, int), MessageField(\"cmd2\", 1, int)]\nFLD_SET_ACK_TWO_BYTES_ACK = FLD_SET_ACK_TWO_BYTES.copy()\nFLD_SET_ACK_TWO_BYTES_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# RF Sleep 0x72\nFLD_RF_SLEEP = []\nFLD_RF_SLEEP_ACK = [MessageField(\"ack\", 1, AckNak)]\n\n# Get IM Configuration 0x73\nFLD_GET_IM_CONFIG = []\nFLD_GET_IM_CONFIG_ACK = [\n MessageField(\"flags\", 1, IMConfigurationFlags),\n MessageField(\"spare1\", 1, int),\n MessageField(\"spare2\", 1, int),\n MessageField(\"ack\", 1, AckNak),\n]\n\n# Read EEPROM 0x75\nFLD_READ_EEPROM = [\n MessageField(\"mem_hi\", 1, int),\n MessageField(\"mem_low\", 1, int),\n]\nFLD_READ_EEPROM_ACK = FLD_READ_EEPROM.copy()\nFLD_READ_EEPROM_ACK.append(MessageField(\"ack\", 1, AckNak))\n\n# Write to EEPROM 0x76\nFLD_WRITE_EEPROM = [\n MessageField(\"mem_hi\", 1, int),\n MessageField(\"mem_low\", 1, int),\n MessageField(\"flags\", 1, AllLinkRecordFlags),\n MessageField(\"group\", 1, int),\n MessageField(\"target\", 3, Address),\n MessageField(\"data1\", 1, int),\n MessageField(\"data2\", 1, int),\n MessageField(\"data3\", 1, int),\n]\nFLD_WRITE_EEPROM_ACK = FLD_WRITE_EEPROM.copy()\nFLD_WRITE_EEPROM_ACK.append(MessageField(\"ack\", 1, AckNak))\n\nINBOUND_MSG_DEF = {}\nINBOUND_MSG_DEF[MessageId.STANDARD_RECEIVED] = MessageDefinition(\n MessageId.STANDARD_RECEIVED, FLD_STD_REC\n)\nINBOUND_MSG_DEF[MessageId.EXTENDED_RECEIVED] = MessageDefinition(\n MessageId.EXTENDED_RECEIVED, FLD_EXT_REC\n)\nINBOUND_MSG_DEF[MessageId.X10_RECEIVED] = MessageDefinition(\n MessageId.X10_RECEIVED, FLD_X10_SEND_REC\n)\nINBOUND_MSG_DEF[MessageId.ALL_LINKING_COMPLETED] = MessageDefinition(\n MessageId.ALL_LINKING_COMPLETED, FLD_ALL_LINK_COMPLETE\n)\nINBOUND_MSG_DEF[MessageId.BUTTON_EVENT_REPORT] = MessageDefinition(\n MessageId.BUTTON_EVENT_REPORT, FLD_IM_BTN_EVENT_REPORT_REC\n)\nINBOUND_MSG_DEF[MessageId.USER_RESET_DETECTED] = MessageDefinition(\n MessageId.USER_RESET_DETECTED, FLD_USER_RESET_REC\n)\nINBOUND_MSG_DEF[MessageId.ALL_LINK_CLEANUP_FAILURE_REPORT] = MessageDefinition(\n MessageId.ALL_LINK_CLEANUP_FAILURE_REPORT, FLD_ALL_LINK_CLEANUP_FAILURE\n)\nINBOUND_MSG_DEF[MessageId.ALL_LINK_RECORD_RESPONSE] = MessageDefinition(\n MessageId.ALL_LINK_RECORD_RESPONSE, FLD_ALL_LINK_RECORD_RESP\n)\nINBOUND_MSG_DEF[MessageId.ALL_LINK_CLEANUP_STATUS_REPORT] = MessageDefinition(\n MessageId.ALL_LINK_CLEANUP_STATUS_REPORT, FLD_ALL_LINK_CLEANUP_REPORT_ACK\n)\nINBOUND_MSG_DEF[MessageId.READ_EEPROM_RESPONSE] = MessageDefinition(\n MessageId.READ_EEPROM_RESPONSE, FLD_READ_EEPROM_RESPONSE\n)\nINBOUND_MSG_DEF[MessageId.GET_IM_INFO] = MessageDefinition(\n MessageId.GET_IM_INFO, FLD_GET_IM_INFO_REC\n)\nINBOUND_MSG_DEF[MessageId.SEND_ALL_LINK_COMMAND] = MessageDefinition(\n MessageId.SEND_ALL_LINK_COMMAND, FLD_SEND_ALL_LINK_CMD_ACK\n)\nINBOUND_MSG_DEF[MessageId.SEND_STANDARD] = MessageDefinition(\n MessageId.SEND_STANDARD, FLD_STD_SEND_ACK\n)\nINBOUND_MSG_DEF[MessageId.X10_SEND] = MessageDefinition(\n MessageId.X10_SEND, FLD_X10_SEND_REC_ACK\n)\nINBOUND_MSG_DEF[MessageId.START_ALL_LINKING] = MessageDefinition(\n MessageId.START_ALL_LINKING, FLD_START_ALL_LINKING_ACK\n)\nINBOUND_MSG_DEF[MessageId.CANCEL_ALL_LINKING] = MessageDefinition(\n MessageId.CANCEL_ALL_LINKING, FLD_CANCEL_ALL_LINKING_ACK\n)\nINBOUND_MSG_DEF[MessageId.SET_HOST_DEVICE_CATEGORY] = MessageDefinition(\n MessageId.SET_HOST_DEVICE_CATEGORY, FLD_SET_HOST_DEVICE_CATEGORY_ACK\n)\nINBOUND_MSG_DEF[MessageId.RESET_IM] = MessageDefinition(\n MessageId.RESET_IM, FLD_RESET_IM_ACK\n)\nINBOUND_MSG_DEF[MessageId.SET_ACK_MESSAGE_BYTE] = MessageDefinition(\n MessageId.SET_ACK_MESSAGE_BYTE, FLD_SET_ACK_NAK_BYTE_ACK\n)\nINBOUND_MSG_DEF[MessageId.GET_FIRST_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.GET_FIRST_ALL_LINK_RECORD, FLD_GET_FIRST_ALL_LINK_RECORD_ACK\n)\nINBOUND_MSG_DEF[MessageId.GET_NEXT_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.GET_NEXT_ALL_LINK_RECORD, FLD_GET_NEXT_ALL_LINK_RECORD_ACK\n)\nINBOUND_MSG_DEF[MessageId.SET_IM_CONFIGURATION] = MessageDefinition(\n MessageId.SET_IM_CONFIGURATION, FLD_SET_IM_CONFIG_ACK\n)\nINBOUND_MSG_DEF[MessageId.GET_ALL_LINK_RECORD_FOR_SENDER] = MessageDefinition(\n MessageId.GET_ALL_LINK_RECORD_FOR_SENDER, FLD_GET_ALL_LINK_REC_FOR_SENDER_ACK\n)\nINBOUND_MSG_DEF[MessageId.LED_ON] = MessageDefinition(\n MessageId.LED_ON, FLD_LED_ON_OFF_ACK\n)\nINBOUND_MSG_DEF[MessageId.LED_OFF] = MessageDefinition(\n MessageId.LED_OFF, FLD_LED_ON_OFF_ACK\n)\nINBOUND_MSG_DEF[MessageId.MANAGE_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.MANAGE_ALL_LINK_RECORD, FLD_MANAGE_ALL_LINK_RECORD_ACK\n)\nINBOUND_MSG_DEF[MessageId.SET_NAK_MESSAGE_BYTE] = MessageDefinition(\n MessageId.SET_NAK_MESSAGE_BYTE, FLD_SET_ACK_NAK_BYTE_ACK\n)\nINBOUND_MSG_DEF[MessageId.SET_ACK_MESSAGE_TWO_BYTES] = MessageDefinition(\n MessageId.SET_ACK_MESSAGE_TWO_BYTES, FLD_SET_ACK_TWO_BYTES_ACK\n)\nINBOUND_MSG_DEF[MessageId.RF_SLEEP] = MessageDefinition(\n MessageId.RF_SLEEP, FLD_RF_SLEEP_ACK\n)\nINBOUND_MSG_DEF[MessageId.GET_IM_CONFIGURATION] = MessageDefinition(\n MessageId.GET_IM_CONFIGURATION, FLD_GET_IM_CONFIG_ACK\n)\nINBOUND_MSG_DEF[MessageId.READ_EEPROM] = MessageDefinition(\n MessageId.READ_EEPROM, FLD_READ_EEPROM_ACK\n)\nINBOUND_MSG_DEF[MessageId.WRITE_EEPROM] = MessageDefinition(\n MessageId.WRITE_EEPROM, FLD_WRITE_EEPROM_ACK\n)\n\nOUTBOUND_MSG_DEF = {}\nOUTBOUND_MSG_DEF[MessageId.GET_IM_INFO] = MessageDefinition(\n MessageId.GET_IM_INFO, FLD_GET_IM_INFO_SEND\n)\nOUTBOUND_MSG_DEF[MessageId.SEND_ALL_LINK_COMMAND] = MessageDefinition(\n MessageId.SEND_ALL_LINK_COMMAND, FLD_SEND_ALL_LINK_CMD\n)\nOUTBOUND_MSG_DEF[MessageId.SEND_STANDARD] = MessageDefinition(\n MessageId.SEND_STANDARD, FLD_STD_SEND\n)\nOUTBOUND_MSG_DEF[MessageId.X10_SEND] = MessageDefinition(\n MessageId.X10_SEND, FLD_X10_SEND_REC\n)\nOUTBOUND_MSG_DEF[MessageId.START_ALL_LINKING] = MessageDefinition(\n MessageId.START_ALL_LINKING, FLD_START_ALL_LINKING\n)\nOUTBOUND_MSG_DEF[MessageId.CANCEL_ALL_LINKING] = MessageDefinition(\n MessageId.CANCEL_ALL_LINKING, FLD_CANCEL_ALL_LINKING\n)\nOUTBOUND_MSG_DEF[MessageId.SET_HOST_DEVICE_CATEGORY] = MessageDefinition(\n MessageId.SET_HOST_DEVICE_CATEGORY, FLD_SET_HOST_DEVICE_CATEGORY\n)\nOUTBOUND_MSG_DEF[MessageId.RESET_IM] = MessageDefinition(\n MessageId.RESET_IM, FLD_RESET_IM\n)\nOUTBOUND_MSG_DEF[MessageId.SET_ACK_MESSAGE_BYTE] = MessageDefinition(\n MessageId.SET_ACK_MESSAGE_BYTE, FLD_SET_ACK_NAK_BYTE\n)\nOUTBOUND_MSG_DEF[MessageId.GET_FIRST_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.GET_FIRST_ALL_LINK_RECORD, FLD_GET_FIRST_ALL_LINK_RECORD\n)\nOUTBOUND_MSG_DEF[MessageId.GET_NEXT_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.GET_NEXT_ALL_LINK_RECORD, FLD_GET_NEXT_ALL_LINK_RECORD\n)\nOUTBOUND_MSG_DEF[MessageId.SET_IM_CONFIGURATION] = MessageDefinition(\n MessageId.SET_IM_CONFIGURATION, FLD_SET_IM_CONFIG\n)\nOUTBOUND_MSG_DEF[MessageId.GET_ALL_LINK_RECORD_FOR_SENDER] = MessageDefinition(\n MessageId.GET_ALL_LINK_RECORD_FOR_SENDER, FLD_GET_ALL_LINK_REC_FOR_SENDER\n)\nOUTBOUND_MSG_DEF[MessageId.LED_ON] = MessageDefinition(MessageId.LED_ON, FLD_LED_ON_OFF)\nOUTBOUND_MSG_DEF[MessageId.LED_OFF] = MessageDefinition(\n MessageId.LED_OFF, FLD_LED_ON_OFF\n)\nOUTBOUND_MSG_DEF[MessageId.MANAGE_ALL_LINK_RECORD] = MessageDefinition(\n MessageId.MANAGE_ALL_LINK_RECORD, FLD_MANAGE_ALL_LINK_RECORD\n)\nOUTBOUND_MSG_DEF[MessageId.SET_NAK_MESSAGE_BYTE] = MessageDefinition(\n MessageId.SET_NAK_MESSAGE_BYTE, FLD_SET_ACK_NAK_BYTE\n)\nOUTBOUND_MSG_DEF[MessageId.SET_ACK_MESSAGE_TWO_BYTES] = MessageDefinition(\n MessageId.SET_ACK_MESSAGE_TWO_BYTES, FLD_SET_ACK_TWO_BYTES\n)\nOUTBOUND_MSG_DEF[MessageId.RF_SLEEP] = MessageDefinition(\n MessageId.RF_SLEEP, FLD_RF_SLEEP\n)\nOUTBOUND_MSG_DEF[MessageId.GET_IM_CONFIGURATION] = MessageDefinition(\n MessageId.GET_IM_CONFIGURATION, FLD_GET_IM_CONFIG\n)\n\nOUTBOUND_MSG_DEF[MessageId.READ_EEPROM] = MessageDefinition(\n MessageId.READ_EEPROM, FLD_READ_EEPROM\n)\n\nOUTBOUND_MSG_DEF[MessageId.WRITE_EEPROM] = MessageDefinition(\n MessageId.WRITE_EEPROM, FLD_WRITE_EEPROM\n)\n","sub_path":"pyinsteon/protocol/messages/message_definitions.py","file_name":"message_definitions.py","file_ext":"py","file_size_in_byte":14193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487302312","text":"#!/bin/python3\n\nimport requests\n\ndef get_tor_session():\n session = requests.session()\n # Tor uses the 9050 port as the default socks port\n session.proxies = {'http': 'socks5://127.0.0.1:9050',\n 'https': 'socks5://127.0.0.1:9050'}\n return session\n\n# Make a request through the Tor connection\nsession = get_tor_session()\nprint(\" \" + session.get(\"https://phuck-donation.appspot.com/count\").text)\n","sub_path":"scripts/donations.py","file_name":"donations.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589059237","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional\n\n\nclass Min_fbbf_loss(nn.Module):\n def __init__(self, device = 'cpu'):\n super().__init__()\n\n self.device = device\n\n def forward(self, flows, **kwargs):\n n, c, h, w = flows.shape\n\n fw_flow = flows[:, 0:2, :, :]\n bw_flow = flows[:, 2:, :, :]\n\n loss = 0\n for j in range(1, n - 1):\n loss += torch.mean(torch.min(self.__fb(fw_flow[j, :, :, :], bw_flow[j + 1, :, :, :]),\n self.__fb(bw_flow[j, :, :, :], fw_flow[j - 1, :, :, :])))\n\n return loss\n\n def __fb(self, f, b):\n C, H, W = f.shape\n xx, yy = torch.meshgrid(torch.arange(H), torch.arange(W))\n ind = torch.stack((yy, xx), dim=-1).to(self.device)\n\n\n grid = f.permute((1, 2, 0)) + ind\n grid = torch.unsqueeze(grid, 0)\n\n # Normalize coordinates to the square [-1, 1]\n grid = (2*grid / torch.tensor([W, H]).view(1,1,1,2).to(self.device))-1\n\n b2warp = torch.unsqueeze(b, 0)\n interp = torch.nn.functional.grid_sample(b2warp, grid,\n mode='bilinear', padding_mode='border',\n align_corners=False)\n warped_b = torch.squeeze(interp)\n d = torch.norm(warped_b + f, dim=0)\n\n return d","sub_path":"graphs/losses/min_fbbf_loss.py","file_name":"min_fbbf_loss.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"382130279","text":"import pstats, cProfile\n\n#import pyximport\n#pyximport.install()\n\nimport cFloodFillSearch as floodFillSearch\n\nfrom numpy import * \n\ndef runTest():\n N = 2**9\n\n random.seed(0)\n testArray = random.normal(size =[N,N])\n #print(shape(testArray))\n\n cProfile.runctx(\"floodFillSearch.floodFillSearch(testArray)\", globals(), locals(), \"Profile.prof\")\n #cProfile.runctx(\"floodFillSearch.floodFillSearch(testArray)\", globals(), locals(), \"Profile.prof\")\n\n s = pstats.Stats(\"Profile.prof\")\n s.strip_dirs().sort_stats(\"time\").print_stats()\n\n\n #############\n #############\n #############\n floodInds = floodFillSearch.floodFillSearch(testArray)\n\n areaSizes = array([ len(inds) for (inds,_) in floodInds])\n sortInds = argsort(areaSizes)\n\n indexArray = ma.zeros(shape(testArray))\n indexArray[:] = ma.masked\n\n for i in range(len(sortInds)):\n indexArray[floodInds[i]] = log(areaSizes[i])\n\n print(areaSizes[sortInds])\n\n import pylab as P\n P.imshow(indexArray,interpolation='nearest')\n P.show()\n\n\n","sub_path":"ar_detection/floodfillsearch/testTiming.py","file_name":"testTiming.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31559740","text":"import random\nimport time\nimport fut\nsession = fut.Core('xxx@gmail.com','xxx','test',platform='ps4',debug=True)\nwItems = session.watchlist()\nif len(wItems) > 0:\n\tfor a in wItems:\n\t\tif a['tradeId'] == -1:\n\t\t\tsession.quickSell(a['id'])\ncount=0\nsize = session.watchlist_size - len(wItems)\nprint(\"so luong card can bid %s\" % (size))\nwhile True:\n\titems = session.searchAuctions('player',max_price=650, min_buy = 900, start=20,defId=164985)\n\tfor x in items:\n\t\tif x['currentBid']>=650 :break\n\t\tif count>=size: break # need to fix here\n\t\tflag=session.bid(x['tradeId'], 650)\n\t\tprint(flag)\n\t\tif flag:\n\t\t\tcount += 1\n\t\t\tprint(\"count:%d\" % (count))\n\tprint(\"wait for 5,10s ...\")\n\ttime.sleep(random.randint(10,15))\nsession.logout(); # Need to be fixed here","sub_path":"bidthereau.py","file_name":"bidthereau.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605828189","text":"import time\nimport json\nfrom urllib.parse import urlencode\nfrom requests import Session\nfrom requests.exceptions import HTTPError, InvalidURL, ConnectionError\n\nclass TwitchApi:\n def __init__(self, clientId, accessToken, clientSecret):\n self.clientId = clientId\n self.accessToken = accessToken\n self.clientSecret = clientSecret\n self.session = Session()\n\n def _pubRequest(self, url):\n response = self.session.get(url)\n return json.loads(response.text)\n\n def _idedRequest(self, url):\n response = self.session.get(url, headers={'Client-ID': self.clientId})\n result = json.loads(response.text)\n return result\n\n def _authRequest(self, url, data=None, method='GET'):\n if data is not None:\n data = json.dumps(data).encode('utf-8')\n\n response = self.session.request(method, url, data=data, headers={\n 'Client-ID': self.clientId,\n 'Accept': 'application/vnd.twitchtv.v5+json',\n 'Authorization': 'OAuth '+self.accessToken,\n 'Content-Type': 'application/json'\n })\n\n return json.loads(response.text)\n\n def setGame(self, channelId, game):\n if channelId is None:\n return False\n\n data = {\"channel\": {\"game\": game}}\n try:\n result = self._authRequest(\"https://api.twitch.tv/kraken/channels/%s\" % channelId, data, 'PUT')\n return True\n\n except HTTPError as e:\n print(\"setGame: \"+str(e))\n\n return False\n\n def setTitle(self, channelId, title):\n if channelId is None:\n return False\n\n data = {\"channel\": {\"status\": title}}\n try:\n result = self._authRequest(\"https://api.twitch.tv/kraken/channels/%s\" % channelId, data, 'PUT')\n return True\n\n except HTTPError as e:\n print(\"setTitle: \"+str(e))\n\n return False\n\n def getTitle(self,channelId):\n if channelId is None:\n return \"\"\n try:\n result = self._authRequest(\"https://api.twitch.tv/kraken/channels/%s\" % channelId)\n return result[\"status\"] if result else \"\"\n\n except HTTPError as e:\n print(\"getTitle: \"+str(e))\n return \"\"\n\n def getChannelUrl(self,channelId):\n if channelId is None:\n return \"\"\n try:\n result = self._authRequest(\"https://api.twitch.tv/kraken/channels/%s\" % channelId)\n return result[\"url\"] if result else \"\"\n\n except HTTPError as e:\n print(\"getChannelUrl: \"+str(e))\n return \"\"\n\n\n\n def getTwitchEmotes(self):\n return self._pubRequest('https://api.twitch.tv/kraken/chat/emoticons')['emoticons']\n\n def isStreamOnline(self, channelName):\n try:\n streamState = self._idedRequest(\"https://api.twitch.tv/kraken/streams/\"+channelName)\n return streamState['stream'] is not None\n\n except HTTPError as e:\n print(\"isStreamOnline \"+str(e))\n\n except ConnectionError as e:\n print(\"isStreamOnline \"+str(e))\n\n return False\n\n def getStreamLiveTime(self, channelName):\n try:\n streamState = self._idedRequest(\"https://api.twitch.tv/kraken/streams/\"+channelName)\n if streamState['stream'] is not None:\n liveTime = streamState['stream']['created_at']\n return time.strptime(liveTime, '%Y-%m-%dT%H:%M:%SZ')\n\n except HTTPError as e:\n print(\"getStreamLiveTime: \"+str(e))\n\n return None\n\n def getChannelId(self):\n try:\n channels = self._authRequest(\"https://api.twitch.tv/kraken/channel\")\n chanId = channels['_id']\n return chanId\n\n except HTTPError as e:\n print(\"getChannelId: \"+str(e))\n\n return None\n\n def isHosting(self, channelId):\n if channelId is None:\n return False\n\n try:\n hostsList = self._pubRequest(\"https://tmi.twitch.tv/hosts?\"+urlencode({'include_logins': '1', 'host': channelId}))\n return 'target_login' in hostsList['hosts'][0]\n\n except HTTPError as e:\n print(\"isHosting: \"+str(e))\n\n return False\n\n def getCurrentlyHostedChannel(self, channelId):\n if channelId is None:\n return None\n\n try:\n hostsList = self._pubRequest(\"https://tmi.twitch.tv/hosts?\"+urlencode({'include_logins': '1', 'host': channelId}))\n host = hostsList['hosts'][0]\n return host.get('target_login', None)\n\n except HTTPError as e:\n print(\"getCurrentlyHostedChannel: \"+str(e))\n\n return None\n\n def getChatters(self, channelName):\n try:\n chatlist = self._pubRequest('http://tmi.twitch.tv/group/user/%s/chatters' % channelName.lower())\n\n return chatlist['chatters']\n\n except HTTPError as e:\n #This API is particularly prone to responding with a 503,\n #so we don't want to constantly be printing the error out\n #print(\"getChatters: \"+str(e))\n pass\n except InvalidURL as e:\n print(\"getChatters: \"+str(e))\n\n return None\n\n def getAllChatters(self, channelName):\n allchatters = None\n chatterMap = self.getChatters(channelName)\n\n if chatterMap:\n allchatters = []\n for chatters in chatterMap.values():\n allchatters.extend(chatters)\n\n return allchatters\n\n def getModerators(self, channelName):\n chatterMap = self.getChatters(channelName)\n return chatterMap[\"moderators\"] if chatterMap else []\n","sub_path":"astrolib/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44313289","text":"from os import path\nprint('-------------------------------------------------------')\ncheck1 = 'na na boo boo'\nfile1 = input('Enter file name: ') # input file name to be worked on.\nfile1 = file1.lower() # Change to lowercase\nif path.exists(file1): # checking if the file entered exists in the current path\n file2 = open(file1)\n for line in file2:\n line = file2.readlines()\n line = len(line) # counting the number of lines the file has\n print('There are', line, 'lines in the file')\nelif file1 == check1:\n print(\"NA NA BOO BOO - You have been pranked! \")\nelse:\n print('File does not exist')\n","sub_path":"src/chapter7/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422193674","text":"'''\nCreated on Mar 20, 2014\n\n@author: Will\n'''\nfrom app.main import *\nfrom app.web.database import *\nimport unittest\nimport urllib\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nclass MainTest(unittest.TestCase):\n \n def testSQLInjection(self):\n params = urllib.urlencode({'title': \"title;DROP TABLE entries\", \n 'text': \"text;DROP TABLE entries\", \n 'reply':None})\n urllib.urlopen(\"http://localhost:5000/Assignment2.0/Input.py/comments\", params)\n \n with app.app_context():\n db = get_db()\n cur = db.execute('select * from entries')\n entrylist = cur.fetchall()\n self.assertNotEquals(len(entrylist),0)\n \n def testCommentInput(self):\n params = urllib.urlencode({'title': \"title\", 'text': \"text\", 'reply':None})\n urllib.urlopen(\"http://localhost:5000/Assignment2.0/Input.py/comments\", params)\n \n with app.app_context():\n db = get_db()\n cur = db.execute('select text from entries where (text = ?)',[\"text\"])\n entrylist = cur.fetchall()\n for entry in entrylist:\n self.assertEquals(entry[0],\"text\")\n \n def testReplyInput(self):\n params = urllib.urlencode({'title': \"title\", 'text': \"reply\", 'reply':1})\n urllib.urlopen(\"http://localhost:5000/Assignment2.0/Input.py/comments\", params)\n \n with app.app_context():\n db = get_db()\n cur = db.execute('select text from replies where (text = ?)',[\"reply\"])\n replylist = cur.fetchall()\n for reply in replylist:\n self.assertEquals(reply[0],\"reply\")\n \n def testSanitizeInput(self):\n obscene = \"fuck your shit, Marty; you're a dick\"\n correct_string = \"enjoy your chocolate ice cream, worst TA ever; you're a fuzzy panda\"\n params = urllib.urlencode({'title': \"test\", 'text': obscene, 'reply':None})\n urllib.urlopen(\"http://localhost:5000/Assignment2.0/Input.py/comments\", params)\n \n with app.app_context():\n db = get_db()\n cur = db.execute('select text from entries where (text = ?) order by id desc',[correct_string])\n entrylist = cur.fetchall()\n for entry in entrylist:\n self.assertEquals(entry[0],correct_string)\n \n\nif __name__ == \"__main__\":\n unittest.main()\n ","sub_path":"Assignment3.1/Portfolio/app/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488007743","text":"import tensorflow as tf\nimport tensorflow.keras\nimport numpy as np\n\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import DepthwiseConv2D\nfrom tensorflow.keras.layers import MaxPool2D\nfrom tensorflow.keras.layers import UpSampling2D\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.backend import sigmoid\nfrom tensorflow.keras.utils import get_custom_objects\n\ndef swish(x, beta = 1):\n return (x * sigmoid(beta * x))\nget_custom_objects().update({'swish': Activation(swish)})\n\nclass UNet_Adv:\n def __init__(self, input_shape,n_filters,showSummary=True, activation=\"relu\"):\n self.input_shape = input_shape\n self.showSummary = showSummary\n self.n_filters = n_filters\n self.activation = activation\n \n def Conv2D_TailBlock(self,input_tensor, kernel_size, filters):\n conv = Conv2D(filters=filters, kernel_size=kernel_size,\n kernel_initializer=\"he_normal\", padding=\"same\")(input_tensor) \n conv = Activation(self.activation)(conv)\n conv = Conv2D(filters=filters, kernel_size=kernel_size,\n kernel_initializer=\"he_normal\", padding=\"same\")(conv)\n conv = BatchNormalization(renorm=True)(conv)\n\n #----------- Residual Block --------------\n conv_in = Conv2D(filters=filters, kernel_size=(1,1),\n kernel_initializer=\"he_normal\", padding=\"same\")(input_tensor)\n conv_in = BatchNormalization(renorm=True)(conv_in)\n out = tf.keras.layers.Add()([conv, conv_in])\n #----------------------------------------- \n return out\n\n def Conv2D_Block(self,input_tensor, kernel_size, filters, bottleNeckF=1):\n #----------- BottleNeck --------------\n conv_in = Conv2D(filters=np.int(filters/bottleNeckF), kernel_size=(1,1),\n kernel_initializer=\"he_normal\", padding=\"same\")(input_tensor)\n conv = self.DenseNet_Block(conv_in, kernel_size)\n #----------- Residual Block --------------\n out = tf.keras.layers.Add()([conv_in, conv])\n #-----------------------------------------\n #out = Conv2D(filters=filters, kernel_size=(1,1),\n # kernel_initializer=\"he_normal\", padding=\"same\")(out)\n out = BatchNormalization(renorm=True)(out)\n return out\n\n def DenseNet_Chunk(self,input_tensor, kernel_size, BatchNorm=False, activation=False):\n #Assume DenseNet_Block does not need to increase or decrease filters!\n DC_In = DepthwiseConv2D(depth_multiplier=1, kernel_size=kernel_size,\n kernel_initializer=\"he_normal\", padding=\"same\")(input_tensor)\n if activation:\n DC_In = Activation(self.activation)(DC_In)\n if BatchNorm:\n DC_In = BatchNormalization(renorm=True)(DC_In)\n DC_Out = tf.keras.layers.Add()([DC_In, input_tensor])\n return DC_Out, DC_In\n\n def DenseNet_Block(self,input_tensor, kernel_size):\n DN_1, DN_A = self.DenseNet_Chunk(input_tensor, kernel_size, activation=True, BatchNorm=True)\n DN_2, DN_B = self.DenseNet_Chunk(DN_1, kernel_size, BatchNorm=True)\n DN_3, DN_C = self.DenseNet_Chunk(DN_2, kernel_size, activation=True, BatchNorm=True)\n DN_Out = DepthwiseConv2D(depth_multiplier=1, kernel_size=kernel_size,\n kernel_initializer=\"he_normal\", padding=\"same\")(DN_3)\n DN_Out = tf.keras.layers.Add()([DN_Out, DN_A, DN_B, DN_C])\n DN_Out = BatchNormalization(renorm=True)(DN_Out)\n return DN_Out\n\n def UpConvolution(self,input_tensor, skip_tensor, kernel_size, filters):\n upconv = Conv2D(filters=filters, kernel_size=kernel_size, kernel_initializer=\"he_normal\",\n padding=\"same\")(UpSampling2D(size=(2, 2))(input_tensor))\n upconv = BatchNormalization(renorm=True)(upconv)\n upconv = concatenate([upconv, skip_tensor])\n return upconv\n\n\n def CreateUnet(self):\n\n input_layer = Input(self.input_shape)\n\n c1 = self.Conv2D_TailBlock(input_layer, kernel_size=(3, 3), filters=self.n_filters)\n p1 = MaxPool2D(pool_size=(2, 2), name=\"p1\")(c1)\n\n c2 = self.Conv2D_Block(p1, kernel_size=(3, 3), filters=self.n_filters*2)\n p2 = MaxPool2D(pool_size=(2, 2), name=\"p2\")(c2)\n\n c3 = self.Conv2D_Block(p2, kernel_size=(3, 3), filters=self.n_filters*4)\n p3 = MaxPool2D(pool_size=(2, 2), name=\"p3\")(c3)\n\n c4 = self.Conv2D_Block(p3, kernel_size=(3, 3), filters=self.n_filters*8)\n p4 = MaxPool2D(pool_size=(2, 2), name=\"p4\")(c4)\n\n c5 = self.Conv2D_Block(p4, kernel_size=(3, 3), filters=self.n_filters*16)\n d5 = tensorflow.keras.layers.Dropout(0.2, name=\"d5\")(c5)\n\n u1 = self.UpConvolution(d5, c4, kernel_size=(2, 2), filters=self.n_filters*8)\n c6 = self.Conv2D_Block(u1, kernel_size=(3, 3), filters=self.n_filters*8)\n\n u2 = self.UpConvolution(c6, c3, kernel_size=(2, 2), filters=self.n_filters*4)\n c7 = self.Conv2D_Block(u2, kernel_size=(3, 3), filters=self.n_filters*4)\n\n u3 = self.UpConvolution(c7, c2, kernel_size=(2, 2), filters=self.n_filters*2)\n c8 = self.Conv2D_Block(u3, kernel_size=(3, 3),filters=self.n_filters*2 )\n\n u4 = self.UpConvolution(c8, c1, kernel_size=(2, 2), filters=self.n_filters)\n c9 = self.Conv2D_TailBlock(u4, kernel_size=(3, 3), filters=self.n_filters)\n\n output_layer = Conv2D(filters=1, kernel_size=(1, 1),\n activation=\"sigmoid\", name=\"Convolution_c10\")(c9)\n\n MyModel = tensorflow.keras.models.Model(\n inputs=input_layer, outputs=output_layer)\n\n if self.showSummary:\n MyModel.summary()\n return MyModel\n","sub_path":"UNets/MyImplementation/UNET_Adv3.py","file_name":"UNET_Adv3.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294480659","text":"tablica = [[2,3,4,5],[7,6,4,5],[8,9,4,5]]\r\n\r\ndef fun(T, m):\r\n for i in range(m):\r\n print(T[i])\r\n\r\ndef fun2(T, m, n):\r\n suma = 0\r\n for i in range(m):\r\n for j in range(n):\r\n suma += T[i][j]\r\n print(suma)\r\n\r\ndef fun3(T):\r\n suma = 0\r\n for i in T:\r\n for j in i:\r\n if j % 3 == 0:\r\n suma += j\r\n print(suma)\r\n \r\nlista = [1,3,5,6,8,10,11,13,15,16,18,20]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"file6.py","file_name":"file6.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105933084","text":"\nfrom wms.model.mongo.combine_parcel.operation_record import CPOperationRecord\n\nclass AccessorBase(object):\n def __init__(self, *args, **kwargs):\n self._operations = []\n\n def split_props(self, props):\n set_prop = {k:v for k, v in props.items() if v is not None}\n upset_prop = {k:True for k, v in props.items() if v is None}\n update_dict = {}\n if set_prop:\n update_dict[\"$set\"] = set_prop\n if upset_prop:\n update_dict[\"$unset\"] = upset_prop\n return update_dict\n\n def add_operation(self, user_id, operator, operation, operation_description, operation_info, operation_datetime):\n self._operations.append(\n CPOperationRecord(\n user_id=user_id,\n operator=operator,\n operation=operation,\n operation_description=operation_description,\n operation_info=operation_info,\n operation_datetime=operation_datetime\n )\n )\n\n def add_operation_record(self, operation_record):\n self._operations.append(operation_record)\n","sub_path":"wms/lib/accessor_base.py","file_name":"accessor_base.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256920094","text":"import json\nimport numpy as np\nimport cv2\nimport os\nimport random\nimport cython\n\n\ndef rotateImage(image, angel):\n height = image.shape[0]\n width = image.shape[1]\n height_big = height *2\n width_big = width *2\n image_big = cv2.resize(image, (width_big, height_big))\n image_center = (width_big/2, height_big/2)\n x = round(random.uniform(0.75,0.8),2)\n #print(x)\n rot_mat = cv2.getRotationMatrix2D(image_center,angel, x)\n result = cv2.warpAffine(image_big, rot_mat, (width_big, height_big), flags=cv2.INTER_LINEAR,borderValue =255)\n return result\n\n#imageOriginal = cv2.imread(\"0_0.png\",1)\n#imageOriginal = cv2.resize(imageOriginal, (60,80))\n#imageRotated= rotateImage(imageOriginal, 3)\n\n \ndef build(s,s2):\n img = cv2.imread(s,1)\n from random import randint\n x =randint(-7,7)\n img = rotateImage(img,x)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if img[i,j,0] >= 252:\n img[i,j] = 255\n cv2.imwrite(s2,img)\ndef show(s,n,l,s2):\n if n == 1:\n n1 = str(n)\n s = s[:-5]+n1+s[-4:]\n #print(s)\n if l == 1:\n from random import randint\n a = randint(0,35)\n a = str(a)\n s = s[:-5] + a + s[-4:]\n #print(s)\n img1 = cv2.imread(s2, 1)\n img2 = cv2.imread(s, 1)\n #img3 = cv2.imread('white.png',1)\n from random import randint\n x=randint(5,20)\n wc=randint(50,150)\n hc=randint(50,150)\n d = randint(-7,7)\n #print(x)\n #print(img1.shape)\n if l==0:\n img2 = rotateImage(img2,d)\n for i in range(img2.shape[0]):\n for j in range(img2.shape[1]):\n if img2[i,j,0] >= 252:\n img2[i,j] = 255\n\n h2, w2 = img2.shape[:2]\n img2 = cv2.resize(img2,(w2,h2))\n #img3= cv2.resize(img3,(x,50))\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n #h3, w3 = img3.shape[:2]\n vis = np.zeros((max(h1,h2),w1+w2,3),np.uint8)\n #vis = np.zeros((max(h1, max(h3,h2)), w1+w2+w3,3), np.uint8)\n vis[:h1, :w1,:3] = img1\n #vis[:h3, w1:w1+w3,:3] = img3\n # vis[:h2, w1+w3:w1+w3+w2,:3] = img2\n #print(h1,h2)\n if l == 1:\n from random import randint\n p = randint(w1,w2+w1)\n vis [(int)(h1/2):(int)(h1/2)+h2, w1-20:w1+w2-20] = img2\n else:\n vis[:h2,w1:w1+w2] =img2\n #cv2.imshow('abc',vis)\n # cv2.waitKey(500)\n cv2.imwrite(s2,vis)\ndef savve(s2):\n img1 = cv2.imread(s2,1)\n img2 = cv2.imread('white.png',1)\n h1,w1 = img2.shape[:2]\n rows,cols = img1.shape[:2]\n img2 = cv2.resize(img2,(h1+rows,w1+cols))\n cv2.imwrite('white.png',1)\n img2 = cv2.imread('white.png',1)\n roi = img2[0:rows, 0:cols]\n img1gray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img1gray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n img2_fg = cv2.bitwise_and(roi,roi,mask = mask_inv)\n img1_bg = cv2.bitwise_and(img1,img1,mask = mask)\n dst = cv2.add(img1_bg,img2_fg)\n img2[0:rows, 0:cols ] = dst #90 90\n img2 = img2[0:rows,0:cols] #80 110\n #os.chdir('/home/vicker/Desktop/work/new')\n os.chdir('/home/vicker/Desktop/work/new')\n cv2.imwrite(s2,img2)\ndef background(s2):\n n = random.randint(15,76)\n st = str(n)+'.jpg'\n os.chdir('/home/vicker/Desktop/work/new')\n img1 = cv2.imread(s2,1)\n os.chdir('/home/vicker/Desktop/work/background')\n img2 = cv2.imread(st,1)\n print(st)\n h1,w1 = img1.shape[:2]\n h2,w2 = img2.shape[:2]\n if h2

= 1920):\n return False\n\n if tag == \"iyr\":\n yr = int(substr)\n if not 2010 <= yr <= 2020:\n return False\n if tag == \"eyr\":\n yr = int(substr)\n if not 2020 <= yr <= 2030:\n return False\n if tag == \"hgt\":\n if not (substr[2:] == \"in\" and 59 <= int(substr[:2]) <= 76):\n if not (substr[3:] == \"cm\" and 150 <= int(substr[:3]) <= 193):\n return False\n if tag == \"hcl\":\n if len(substr) == 7 and substr[0] == '#':\n int(substr[1:], 16)\n continue\n return False\n if tag == \"ecl\":\n if substr not in eycols:\n return False\n if tag == \"pid\":\n if not (len(substr) == 9 and int(substr) >= 0):\n return False\n else:\n return False\n\n except ValueError:\n return False\n\n return True\n\n\ndef main():\n\n lines = []\n with open(inputfile) as infile:\n while True:\n line = infile.readline()\n if not line:\n break\n lines.append(line)\n\n print(f\"Read {len(lines)} lines\")\n\n numtrees = 0\n numfalse = 0\n countblocks = 0\n\n current_line = \"\"\n\n for line in lines:\n countblocks += 1\n if line != \"\\n\":\n countblocks -= 1\n current_line += \" \" + line.strip() + \" \"\n\n else:\n if validate(current_line):\n numtrees += 1\n else:\n numfalse += 1\n current_line = \"\"\n\n if current_line:\n countblocks += 1\n if validate(current_line):\n numtrees += 1\n else:\n numfalse += 1\n\n print(f\"Valid: {numtrees}, invalid: {numfalse}, countblocks: {countblocks}\")\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257936644","text":"# vim: fdm=indent\n'''\nauthor: David Glass\ndate: 05/19/16\ncontent: Perform statistcs on trees for antidengue\n'''\n# Modules\nimport os\nimport sys\nimport argparse\nfrom collections import defaultdict\nfrom ete3 import Tree\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Classes\nclass TreeStatistician():\n\t'''Performs analyses on phylogenetic trees.'''\n\tdef __init__(self, samplenames=None):\n\t\tfrom filenames import get_lineages_foldername as gfn\n\n\t\tself.tree_dictionary = defaultdict(int)\n\t\tself.edge_length_dictionary = defaultdict(np.array)\n\n\t\tif samplenames is not None:\n\t\t\tself.samplenames = samplenames\n\t\telse:\n\t\t\tsamplenames = []\n\t\t\tfor file in os.listdir(gfn(options='tree')):\n\t\t\t\tif file.endswith(\".nwk\"):\n\t\t\t\t\tsamplenames.append(file)\n\t\t\t\tself.samplenames = samplenames\n\n\n\tdef make_tree_dictionary(self):\n\t\t'''Iterates through newick files, strips them to just their topology, and \n\t\tenters the topology as a key in a dictionary with the value being the count.\n\t\tModifies self.tree_dictionary'''\n\t\tdef strip_tree(tree):\n\t\t\t'''Strips a tree down, leaving only its topology.'''\n\t\t\tnew_tree = ''\n\t\t\tfor ch in tree:\n\t\t\t if ch in '(),;':\n\t\t\t new_tree += ch\n\t\t\tnew_tree = new_tree.replace('()', '(X)')\n\t\t\tnew_tree = new_tree.replace('(,', '(X,')\n\t\t\tnew_tree = new_tree.replace(',)', ',X)')\n\t\t\twhile ',,' in new_tree:\n\t\t\t new_tree = new_tree.replace(',,', ',X,')\n\t\t\treturn new_tree\n\n\n\t\tfrom filenames import get_lineages_foldername as gfn\n\n\t\tfor sample in self.samplenames:\n\t\t\tfn_tree = gfn(options='tree') + sample\n\t\t\twith open(fn_tree, 'r') as f_t:\n\t\t\t\ttree = f_t.readline()\n\t\t\ttree = strip_tree(tree)\n\t\t\tself.tree_dictionary[tree] += 1\n\n\n\tdef count_edge_lengths(self):\n\t\t'''Creates a dictionary of strings to arrays in which the key is a lineage\n\t\tand the value is an array of edge lengths from child node to parent node \n\t\tfor every node in the tree. Modifies self.edge_length_dictionary.'''\n\n\t\tfrom filenames import get_lineages_foldername as gfn\n\t\t\n\t\tfor sample in self.samplenames:\n\t\t\tfn_tree = gfn(options='tree') + sample\n\t\t\twith open(fn_tree, 'r') as f_t:\n\t\t\t\ttree = f_t.readline()\n\t\t\tself.edge_length_dictionary[sample] = [node.dist for node in Tree(tree)]\n\n\n\tdef plot_histogram(self):\n\t\t'''Plots a random lineage tree and a histogram of its branch lengths'''\n\n\t\tfrom filenames import get_lineages_foldername as gfn\n\t\t\n\t\tf = plt.figure(1)\n\t\tlineage, hist_list = self.edge_length_dictionary.popitem()\n\t\tself.edge_length_dictionary[lineage] = hist_list\n\t\tplt.hist(hist_list, bins=20, facecolor='cyan', range=[0,max(hist_list)])\n\t\tplt.ylabel('Counts')\n\t\tplt.xlabel('Distance from parent to child node')\n\t\tplt.title(lineage)\n\t\tplt.grid(True)\n\n\t\tfn_tree = gfn(options='tree') + lineage\n\t\twith open(fn_tree, 'r') as f_t:\n\t\t\ttree_string = f_t.readline()\n\t\ttree = Tree(tree_string)\n\t\t\n\t\tf.show()\n\t\ttree.show()\n\t\t\n\n\tdef coordinator(self, function):\n\t\t'''Calls functions to run.\n\t\tParameters:\n\t\t\tfunction - the function to run'''\n\t\tif 'make_tree_dictionary' in function:\n\t\t\tself.make_tree_dictionary()\n\n\t\tif 'count_edge_length' in function:\n\t\t\tself.count_edge_lengths()\n\t\t\tself.plot_histogram()\n\n\n# Script\nif __name__ == '__main__':\n\n import argparse\n parser = argparse.ArgumentParser(description='Adds single sample processing functionality')\n\n parser.add_argument('-f', '--functions', nargs='+', default=None,\n help='functions to run: make_tree_dictionary, count_edge_length')\n parser.add_argument('-s', '--samples', nargs='+', default=None,\n help='Newick files to analyze')\n args = parser.parse_args()\n\n ts = TreeStatistician(samplenames=args.samples)\n ts.coordinator(args.functions)\n \n","sub_path":"src/tree_statistics.py","file_name":"tree_statistics.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335942040","text":"import os\r\ndef rename(img_dir, mask_dir):\r\n img_list = os.listdir(img_dir)\r\n mask_list = os.listdir(mask_dir)\r\n for f in img_list:\r\n os.rename(os.path.join(img_dir,f), os.path.join(img_dir,f.replace(\"_training.tif\", \".tif\")))\r\n for f in mask_list:\r\n os.rename(os.path.join(mask_dir,f), os.path.join(mask_dir,f.replace(\"_manual1.gif\", \".gif\")))\r\n\r\nif __name__ == \"__main__\":\r\n rename(\"./data/training/images\", \"./data/training/1st_manual\")","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609760710","text":"import pygame\n\ncrossHairSize = (25, 15)\ncrossHairDotSize = (5,7)\nposFormat = (0, 0)\n\nclass CrossHair(pygame.sprite.Sprite):\n\tdef __init__(self, image):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.image.load(image).convert_alpha()\n\t\tself.image = pygame.transform.scale(self.image, crossHairSize)\n\n\t\tself.rect = self.image.get_rect()\n\n\t\tself.rect.center = pygame.mouse.get_pos()\n\n\tdef update(self):\n\t\tself.mouseX, self.mouseY = pygame.mouse.get_pos()\n\t\tself.rect.center = (self.mouseX, self.mouseY + 5)\n\nclass CrossHairDot(pygame.sprite.Sprite):\n\tdef __init__(self, image):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.image.load(image).convert_alpha()\n\t\tself.image = pygame.transform.scale(self.image, crossHairDotSize)\n\t\tself.rect = self.image.get_rect()\n\n\t\tself.mousePosHistory = [posFormat,posFormat,posFormat,posFormat,posFormat]\n\t\tself.mousePosHistory.append(pygame.mouse.get_pos())\n\n\t\tself.rect.center = self.mousePosHistory[0]\n\n\tdef update(self):\n\t\tself.rect.center = self.mousePosHistory[0]\n\t\tself.mousePosHistory.append(pygame.mouse.get_pos())\n\t\tself.mousePosHistory.pop(0)","sub_path":"SimpleShooter/CrossHair.py","file_name":"CrossHair.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460560913","text":"import re\n\ncontent_filter = [\n 'baca juga',\n 'saksikan video pilihan berikut ini',\n 'untuk memberikan komentar',\n 'copyright',\n '\\xa0'\n]\n\ndef is_news_content(text):\n \"\"\"\n menentukan apakah suatu kata adalah konten berita\n :param text: text yang akan diperiksa\n :return: True jika benar isi berita, False jika bukan\n \"\"\"\n if re.search(r'[(].{3}[\\/].{3}[)]', text) != None:\n return False\n for filter in content_filter:\n if filter in text.lower():\n return False\n return True\n\ndef format_datetime(datetime):\n \"\"\"\n mengolah tanggal waktu berita dipost ke website\n :param str: string berisi tanggal dan waktu\n :return: tanggal dan waktu yang sudah diformat\n \"\"\"\n temp = datetime.split()\n date = {\n 'day': int(temp[2]),\n 'month': temp[3],\n 'year': int(temp[4][:4])\n }\n time = {\n 'clock': temp[5],\n 'timezone': temp[6]\n }\n return date, time\n\ndef format_link(link):\n \"\"\"\n cleaning link\n \"\"\"\n return re.sub(r'[?].*', '', link)\n\ndef format_news(paragraphs):\n \"\"\"\n mengolah isi yang memang isi berita\n :param paragraphs: konten mentah dari html\n :return: konten berita yang sebenarnya, dipisah per paragraf\n \"\"\"\n clean_paragraph = []\n for paragraph in paragraphs:\n p_temp = paragraph.text\n if is_news_content(p_temp):\n clean_paragraph.append(p_temp)\n return clean_paragraph","sub_path":"Tugas1/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108142950","text":"#! /usr/bin/python3\n\nimport argparse\nimport datetime\nfrom logger_j import j_log \nimport sounddevice as sd\nimport soundfile as sf\n\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\"filename\", help=\"audio file to be played back\")\nparser.add_argument(\"-d\", \"--device\", type=int, help=\"device ID\")\nparser.add_argument(\"-t\", \"--time\", type=int, help=\"duration of the sound\")\nparser.add_argument(\"-path\",\"--fpath\",help = \"path to log file\")\n#parser.add_argument(\"-b\", \"--blockingmode\", type=int, help=\"duration of the sound\")\nargs = parser.parse_args()\n#d= args.device\n#fn= args.filename\nt = args.time\n#blocking_mode = args.blockingmode\n#print(\"blocking_mode= \"str(blocking_mode))\n\n\n\n\ndef _play_(d):\n\n\ttime_start = datetime.datetime.now()\n\tf_sound = sf.SoundFile(args.filename)\n\tt_file_s = format(len(f_sound) / f_sound.samplerate)\n\tt_file_f = float(t_file_s)\t\n\tt_file = int(t_file_f)\n\tj_log('info',f'Request to play {args.filename}')\n\ttry:\t\t\t\t\t\n\t\tdata, fs = sf.read(args.filename, dtype='float32')\n\t\tj_log('info',f'data from {args.filename} loaded')\n\t\tblocking= (False if type(t) == int else True)\n\t\t#sd.play(data, fs, device= args.device, blocking= blocking)\n\t\tj_log('info',f'playing {args.filename} to device {d}')\n\t\tif blocking == False:\n\t\t\tif t > t_file:\n\t\t\t\tj_log(info,f'user requested {t} second timer')\n\t\t\t\t#si temps demandé > temps du fichier calcule le nombre de fois que le fichier doit être joué\n\t\t\t\tnumber_of_plays = t // t_file\n\t\t\t\tj_log('info',f'playing {args.filename} {number_of_plays} times')\n\t\t\t\twhile number_of_plays > 0:\n\t\t\t\t\tsd.play(data, fs, device= d, blocking= True)\n\t\t\t\t\tsd.stop()\n\t\t\t\t\tnumber_of_plays -= 1\n\t\t\t\t#sortie de boucle cacule le temps restant\n\t\t\t\tt_2 = t%t_file\n\t\t\t\n\t\t\telse:\n\t\t\t\t#sinon set un timer normal \n\t\t\t\tt_2 = t\n\t\t\t\tif type(d) == list:\n\t\t\t\t\tfor i in d:\n\t\t\t\t\t\tsd.play(data, fs, device= i, blocking= blocking)\n\t\t\t\telse:\n\t\t\t\t\tsd.play(data, fs, device= d, blocking= blocking)\t\n\t\t\tt_start_2 = datetime.datetime.now()\t\n\t\t\t\n\t\t\twhile (datetime.datetime.now() - t_start_2).total_seconds() < t_2:\n\t\t\t\ti=0\n\t\telse:\n\t\t\tsd.play(data, fs, device= d, blocking= blocking)\n\t\t\tj_log('info','blocking == True')\n\t\tsd.stop()\n\t\telapsed_time= (datetime.datetime.now() - time_start).total_seconds()\n\n\t\tprint(\"elapsed_time= \"+str(int(elapsed_time//60))+':'+str(int(elapsed_time%60))+'\\r')\n\t\tif blocking:\n\t\t\tj_log('info','no timer requested, time elapsed = ' + str(int(elapsed_time//60))+':'+str(int(elapsed_time%60)))\n\t\telse:\n\t\t\tj_log('info','elapsed time= '+ str(int(elapsed_time//60))+':'+str(int(elapsed_time%60)))\n\t\tstatus = sd.get_status()\n\t\tif status:\n\t\t\tj_log('info',warning(str(status)))\n\texcept KeyboardInterrupt:\n\t\tj_log('error','user quit')\n\t\tj_log('info','=======================================')\n\t\treturn \n\t\t#parser.exit('\\nInterrupted by user')\n\texcept Exception as e:\n\t\tparser.exit(type(e).__name__ + ': ' + str(e))\n\t\treturn\n\tj_log('info',f'Finished playing {args.filename}')\n\tj_log('info','=======================================')\n\treturn \n\ndef play_indi(d):\n\tfor i in d:\n\t\tprint(f'playing to device {i}')\n\t\t_play_(i)\n\treturn ","sub_path":"A-test/audio-files/play_api.py","file_name":"play_api.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115794560","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_three_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-subnet-group.html\nif __name__ == '__main__':\n \"\"\"\n\tdelete-db-subnet-group : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/delete-db-subnet-group.html\n\tdescribe-db-subnet-groups : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-subnet-groups.html\n\tmodify-db-subnet-group : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/modify-db-subnet-group.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # db-subnet-group-name : The name for the DB subnet group. This value is stored as a lowercase string.\nConstraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.\nExample: mySubnetgroup\n # db-subnet-group-description : The description for the DB subnet group.\n # subnet-ids : The EC2 Subnet IDs for the DB subnet group.\n(string)\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_three_parameter(\"rds\", \"create-db-subnet-group\", \"db-subnet-group-name\", \"db-subnet-group-description\", \"subnet-ids\", add_option_dict)\n","sub_path":"rds_write_3/db-subnet-group_create.py","file_name":"db-subnet-group_create.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"188511969","text":"\"\"\"empty message\n\nRevision ID: 6c7aa67739ce\nRevises: 5f820bd3e77f\nCreate Date: 2019-12-02 22:20:30.364791\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '6c7aa67739ce'\ndown_revision = '5f820bd3e77f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('city',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=250), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('shop', sa.Column('city_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'shop', 'city', ['city_id'], ['id'])\n op.drop_column('shop', 'city')\n op.drop_column('user', 'username')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('username', mysql.VARCHAR(collation='utf8_unicode_ci', length=200), nullable=True))\n op.add_column('shop', sa.Column('city', mysql.VARCHAR(collation='utf8_unicode_ci', length=40), nullable=True))\n op.drop_constraint(None, 'shop', type_='foreignkey')\n op.drop_column('shop', 'city_id')\n op.drop_table('city')\n # ### end Alembic commands ###\n","sub_path":"app/migrations/versions/6c7aa67739ce_.py","file_name":"6c7aa67739ce_.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"474428674","text":"def quick_sort(Array):\n '''\n Выполняет быструю сортировку массива Array.\n Возвращает отсортированный массив.\n '''\n\n if len(Array) <= 1:\n return\n \n barrier = Array[0]\n L = []\n M = []\n R = []\n\n for x in Array:\n if x < barrier:\n L.append(x)\n elif x == barrier:\n M.append(x)\n else:\n R.append(x) \n quick_sort(L)\n quick_sort(R)\n k = 0\n for x in L + M + R:\n Array[k] = x\n k += 1\n \n return Array\n\nfrom test_sort import test_sort\n\ntest_sort(quick_sort)","sub_path":"sorts/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644765427","text":"import praw\nimport torch\nimport numpy\nimport os\nimport logging\nimport creds as c\nimport csv\nimport cython\n\nis_whale = [\"whale\"]\nreply_template = \"This is a {predict}, I'm {guess_accuracy}% sure \\n \" \\\n \"##### I'm a bot, contact /u/goose323 to report issues!\"\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\nfor logger_name in (\"praw\", \"prawcore\"):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n\nclass Mlin: # input to machine learning layer\n print('test')\n\n\nclass Readcomments:\n ml_in = Mlin\n global is_whale\n subreddit = c.reddit.subreddit(\"all\")\n\n def process_submission(submission):\n normal_title = submission.title.lower()\n for normal_title in is_whale:\n if is_whale in normal_title:\n print(submission.title)\n\n","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624395976","text":"#!/usr/bin/env python\nfrom teca import *\nimport sys\n\nif (len(sys.argv) != 3):\n sys.stderr.write('usage:\\nteca_convert_table.py [input] [output]\\n\\n')\n sys.stderr.write('converts a table from one format to another.\\n' \\\n 'The format is specified in the file name.\\n\\n')\n sys.exit(-1)\n\nr = teca_table_reader.New()\nr.set_file_name(sys.argv[1])\n\nw = teca_table_writer.New()\nw.set_input_connection(r.get_output_port())\nw.set_file_name(sys.argv[2])\n\nw.update()\n","sub_path":"apps/teca_convert_table.py","file_name":"teca_convert_table.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50543141","text":"from enum import Enum\n\n\nclass SdsTypeProperty(object):\n \"\"\"\n Sds type property definition\n \"\"\"\n\n def __init__(self, id=None, name=None, description=None, isKey=False,\n sdsType=None, value=None, order=None):\n \"\"\"\n\n :param id: required\n :param name: not required\n :param description: not required\n :param isKey: bool Set whether this property is part of the type's\n index required\n :param sdsType: SdsType required\n :param value: object not required\n :param order: Integer Determines the order of a complex index type.\n If isKey is set and this is part of a complex index\n this is required. Not required\n \"\"\"\n self.Id = id\n self.Description = description\n self.IsKey = isKey\n self.SdsType = sdsType\n self.Value = value\n self.Order = order\n\n @property\n def Id(self):\n \"\"\"\n required\n :param self:\n :return:\n \"\"\"\n return self.__id\n\n @Id.setter\n def Id(self, id):\n \"\"\"\n required\n :param self:\n :param id:\n :return:\n \"\"\"\n self.__id = id\n\n @property\n def Name(self):\n \"\"\"\n not required\n :param self:\n :return:\n \"\"\"\n return self.__name\n\n @Name.setter\n def Name(self, name):\n \"\"\"\n not required\n :param self:\n :param name:\n :return:\n \"\"\"\n self.__name = name\n\n @property\n def Description(self):\n \"\"\"\n not required\n :param self:\n :return:\n \"\"\"\n return self.__description\n\n @Description.setter\n def Description(self, Description):\n \"\"\"\n not required\n :param self:\n :param Description:\n :return:\n \"\"\"\n self.__description = Description\n\n @property\n def IsKey(self):\n \"\"\"\n bool Set whether this property is part of the type's index required\n :param self:\n :return:\n \"\"\"\n return self.__isKey\n\n @IsKey.setter\n def IsKey(self, iskey):\n \"\"\"\n bool Set whether this property is part of the type's index required\n :param self:\n :param iskey:\n :return:\n \"\"\"\n self.__isKey = iskey\n\n @property\n def SdsType(self):\n \"\"\"\n required\n :param self:\n :return:\n \"\"\"\n return self.__sdsType\n\n @SdsType.setter\n def SdsType(self, sdsType):\n \"\"\"\n required\n :param self:\n :param sdsType:\n :return:\n \"\"\"\n self.__sdsType = sdsType\n\n @property\n def Value(self):\n \"\"\"\n not required\n :param self:\n :return:\n \"\"\"\n return self.__value\n\n @Value.setter\n def Value(self, value):\n \"\"\"\n not required\n :param self:\n :param value:\n :return:\n \"\"\"\n self.__value = value\n\n @property\n def Order(self):\n \"\"\"\n Integer Determines the order of a complex index type.\n If isKey is set and this is part of a complex index\n this is required. Not required\n :param self:\n :return:\n \"\"\"\n return self.__order\n\n @Order.setter\n def Order(self, order):\n \"\"\"\n Integer Determines the order of a complex index type. If isKey is\n set and this is part of a complex index this is required.\n Not required\n :param self:\n :param order:\n :return:\n \"\"\"\n self.__order = order\n\n def toDictionary(self):\n dictionary = {'IsKey': self.IsKey}\n\n if hasattr(self, 'Id'):\n dictionary['Id'] = self.Id\n\n if hasattr(self, 'Name'):\n dictionary['Name'] = self.Name\n\n if hasattr(self, 'Description'):\n dictionary['Description'] = self.Description\n\n if hasattr(self, 'SdsType'):\n if(self.SdsType):\n from .SdsType import SdsType\n dictionary['SdsType'] = self.SdsType.toDictionary()\n\n if hasattr(self, 'Value'):\n if (isinstance(self.Value, Enum)):\n dictionary['Value'] = self.Value.name\n else:\n dictionary['Value'] = self.Value\n\n if hasattr(self, 'Order'):\n dictionary['Order'] = self.Order\n\n return dictionary\n\n @staticmethod\n def fromDictionary(content):\n typeProperty = SdsTypeProperty()\n\n if not content:\n return typeProperty\n\n if 'Id' in content:\n typeProperty.Id = content['Id']\n\n if 'IsKey' in content:\n typeProperty.IsKey = content['IsKey']\n\n if 'Name' in content:\n typeProperty.Name = content['Name']\n\n if 'Description' in content:\n typeProperty.Description = content['Description']\n\n if 'SdsType' in content:\n from .SdsType import SdsType\n typeProperty.SdsType = SdsType.fromDictionary(content['SdsType'])\n\n if 'Value' in content:\n typeProperty.Value = content['Value']\n\n if 'Order' in content:\n typeProperty.Order = content['Order']\n\n return typeProperty\n","sub_path":"library_samples/Python/ocs_sample_library_preview/SDS/SdsTypeProperty.py","file_name":"SdsTypeProperty.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419631380","text":"from setuptools import setup\n\n# Filling in this template requires filling in:\n# name\n# description\n# packages\n# classifiers\n# Development Status :: \n# ... it would also be beneficial to study/fill in other classifiers \n#\n# Also will benefit from confirming the url -- which may change frequently\n# ... such as if not using bitbucket\n\n\ndef TEMPLATE(placeholder='unspecified'):\n \"\"\"This function exists only to prevent you from running setup.py wihtout\n filling in necessary parts. Delete TEMPLATE in the filled-in version.\"\"\"\n raise Exception(\"Template has not yet been filled in for: \"+placeholder)\n\nsetup(\n name=TEMPLATE('{package-name}'),\n version=open('VERSION').read().strip(),\n author='Oakland John Peters',\n author_email='oakland.peters@gmail.com',\n\n description='Class-based syntax for creating properties in Python.',\n long_description=open('README.rst').read(),\n url=TEMPLATE('package: http://bitbucket.org/OPeters/{package-name}'),\n license='MIT',\n\n packages=['clsproperty'],\n\n classifiers=[\n #Select one 'Development Status'\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n #'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n 'Development Status :: 2 - Pre-Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Intended Audience :: Developers',\n 'Topic :: Utilities' #only if appropriate\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433738290","text":"import os\n\nfrom flask import Flask, render_template, redirect, request, url_for, send_from_directory\n\napp = Flask(__name__)\n\napp.config['UPLOAD_PATH'] = os.path.dirname(os.path.abspath(__file__)) + '/uploads'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/save_image', methods=['POST',])\ndef save_image():\n name = request.form['name']\n archive = request.files['image']\n\n extension = os.path.splitext(archive.filename)[1]\n f_name = str(name + extension)\n\n upload_path = app.config['UPLOAD_PATH']\n\n archive.save(os.path.join(upload_path, f_name))\n\n return redirect(url_for('index'))\n\n@app.route('/uploads/')\ndef image(name_archive):\n return send_from_directory('uploads',name_archive) # We return archive of directory specific\n\nif __name__ == \"__main__\":\n app.run(debug=True,port=5380)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"314311284","text":"from django.conf.urls.defaults import *\r\nfrom django.conf import settings\r\n\r\nfrom django.contrib import admin\r\n\r\nadmin.autodiscover()\r\n\r\nurlpatterns = patterns('',\r\n (r'^djikiki/', include('djikiki.urls')),\r\n (r'^accounts/', include('registration.urls')),\r\n\r\n)\r\n\r\nurlpatterns += patterns('',\r\n ('^admin/(.*)', admin.site.root),\r\n )\r\n\r\nif settings.DEBUG:\r\n import os\r\n dirname = os.path.dirname(globals()[\"__file__\"])\r\n media_dir = os.path.join(dirname, 'site_media')\r\n urlpatterns += patterns('',\r\n (r'^site_media/(?P.*)$', 'django.views.static.serve', {'document_root': media_dir}),\r\n \r\n )\r\n\r\n","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"170119457","text":"from setwrapper import Set\n\n\nclass MultiSet(Set):\n '''\n Inherits all Set names, but extends intersect\n and union to support multiple operands; note\n that \"self\" is still the first argument (stored\n in the *args argument now); also note that the\n inherited & and | operator call the new methods\n here with 2 arguments, but processing more than\n 2 requires a method call, not an expression:\n '''\n\n def intersect(self, *others):\n res = []\n for x in self:\n for other in others:\n if x not in other:\n break\n else:\n res.append(x)\n return Set(res)\n\n def union(*args):\n res = []\n for seq in args:\n for x in seq:\n if not x in res:\n res.append(x)\n return Set(res)\n\n\nif __name__ == '__main__':\n x = MultiSet([1, 2, 3, 4])\n y = MultiSet([3, 4, 5])\n z = MultiSet([0, 1, 2])\n\n print(x & y, x | y) # Two operands\n\n print(x.intersect(y, z)) # Three operands\n\n print(x.union(y, z))\n print(x.intersect([1, 2, 3], [2, 3, 4], [1, 2, 3])) # Four operands\n print(x.union(range(10))) # Non-MultiSets work, too\n","sub_path":"multiset.py","file_name":"multiset.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541946506","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 21 22:17:03 2018\n\n@author: Saul\n\"\"\"\n\n# -*- coding:utf-8 -*-\nfrom ctypes import *\nclass Solution:\n def NumberOf1(self, n):\n # write code here\n count = 0\n while c_int(n).value:\n count += 1\n n = (n-1) & n\n return count\n \ntest = Solution()\nprint(test.NumberOf1(-3))","sub_path":"comeonoffer/二进制中1的个数.py","file_name":"二进制中1的个数.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596662002","text":"'''\nInput: an integer\nReturns: an integer\n'''\n\n# what is the runtime of this implementation\n# O(3^n)\ndef eating_cookies(n):\n # What are our base cases?\n # This represents a number of cookies where we can just take that many cookies\n \n if n < 0:\n return 0\n if n == 0:\n return 1\n\n return eating_cookies(n - 1) + eating_cookies(n - 2) + eating_cookies(n -3)\n\n# print(eating_cookies(5))\n\nif __name__ == \"__main__\":\n # Use the main function here to test out your implementation\n num_cookies = 5\n\n print(f\"There are {eating_cookies(num_cookies)} ways for Cookie Monster to eat {num_cookies} cookies\")\n\n ###########################\n # From after hours video\n #use recursion\n # base case is n <= 2\n #These need to be hardcoded as our base cases\n # n = 0 = > 0\n # n = 1 = > 1\n # n = 2 = > 2\n #every other situation it can be broken down recursively\n ###########################","sub_path":"eating_cookies/eating_cookies.py","file_name":"eating_cookies.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359657764","text":"#!/usr/bin/python3\nimport sys\nimport pathlib\nimport os\nimport subprocess\n\n\ndef _run(command):\n subprocess.run(command, check=True)\n\n\ndef copy_to_uploads(filepath):\n filename = os.path.basename(filepath)\n uploads = os.path.join(\n \"/Users/pratyush/Websites/faltoo\", \"content\", \"uploads\", filename\n )\n subprocess.run([\"cp\", filepath, uploads], check=True)\n # copy new path to clipboard\n new_path = f\"/uploads/{filename}\"\n subprocess.run(\n \"pbcopy\", universal_newlines=True, input=new_path, check=True\n )\n\n\ncopy_to_uploads(sys.argv[1])\n","sub_path":"copy_to_uploads.py","file_name":"copy_to_uploads.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"395069605","text":"#!/bin/env python3\n\nimport curses\n\nfrom time import sleep\nfrom math import floor\nfrom random import randint\n\n\ndef populate(array, percent=0.3):\n 'Populates a blank array so with some random 1s (live cells).'\n x = len(array)\n y = len(array[0])\n\n for _ in range(floor(x * y * percent)):\n array[randint(0, x - 1)][randint(0, y - 1)] = 1 # randomly set values in array to live\n\n\ndef updatescreen(screen, array):\n 'Draws array on given screen and refreshes.'\n for y, line in enumerate(array):\n for x, value in enumerate(line):\n if value:\n screen.addstr(y + 1, x + 1, ' ', curses.color_pair(min(value, 5)))\n else:\n screen.addstr(y + 1, x + 1, ' ')\n\n screen.refresh()\n\n\ndef lifestep(array):\n \"Given an array, steps it forward one generation per Conway's game of life. Uses toroidal geometry.\"\n\n newgeneration = [[0 for i in array[0]] for i in array]\n\n for x, row in enumerate(array):\n for y, cell in enumerate(row):\n newgeneration[x][y] = cellstep(x, y, array)\n # Now we are addressing each cell\n\n return newgeneration\n\n\ndef cellstep(row, column, array):\n \"Given a cell, steps it one step forward in Conway's Game of Life. 2,3 -> live\"\n rowdim = len(array)\n coldim = len(array[0])\n subarr = [[array[(row + i) % rowdim][(column + j) % coldim] for j in range(-1, 2)] for i in range(-1, 2)]\n value = subarr[1].pop(1)\n\n conwaysum = sum([1 if item >= 1 else 0 for item in [item for sublist in subarr for item in sublist]])\n # first we flatten the list of lists into a single list\n # then we convert it to 1s and 0s because the larger numbers matter for colors, not life or death\n\n if conwaysum < 2: # underpopulation\n return 0\n elif conwaysum > 3: # overpopulation\n return 0\n elif conwaysum == 3:\n if value == 0:\n return 1\n else:\n return value + 1\n else:\n if value is not 0:\n return value + 1\n else:\n return 0\n\n\ndef main(lifescr):\n # TODO define curses colors here\n\n curses.init_pair(1, 7, 1)\n curses.init_pair(2, 1, 2)\n curses.init_pair(3, 1, 3)\n curses.init_pair(4, 1, 4)\n curses.init_pair(5, 1, 5)\n curses.init_pair(6, 1, 6)\n curses.init_pair(7, 1, 7)\n #curses.init_pair(8, 1, 8)\n #curses.init_pair(9, 7, 9)\n\n lifescr.nodelay(True)\n\n lifescr.border()\n\n y, x = lifescr.getmaxyx()\n\n scrarray = [[0 for i in range(x - 2)] for i in range(y - 2)]\n\n populate(scrarray)\n\n updatescreen(lifescr, scrarray)\n\n while True:\n sleep(.1)\n\n scrarray = lifestep(scrarray)\n\n updatescreen(lifescr, scrarray)\n\n\nif __name__ == '__main__':\n curses.wrapper(main)\n","sub_path":"pylife.py","file_name":"pylife.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229792693","text":"import numpy as np\n\n\ndef state2multiChImg(state):\n rows = []\n cols = []\n for tile in [0, 1, 2, 3, 4]:\n row = np.where(state == tile)[0][0] // 4\n rows.append(row)\n column = np.where(state == tile)[0][0] % 4\n cols.append(column)\n\n rows = np.array(rows)\n cols = np.array(cols)\n level = np.arange(5)\n z = np.zeros((5, 4, 4))\n z[level, rows, cols] = 1\n return z\n\n\nwith open(\"../data/pdb.txt\", \"r\") as f:\n lines_lst = f.readlines()\nprint(lines_lst[0])\n\n\nstates_np = np.array(list(map(lambda l: l.replace('(4x4)', '').split(' ')[0].split(' '), lines_lst))).astype(np.int8)\nlabels_np = np.array(list(map(lambda l: l.split(' ')[1], lines_lst))).reshape(-1, 1).astype(np.int8)\n\nmulti_ch_images_np = np.array([state2multiChImg(s) for s in states_np])\nnp.save(\"../data/multi_ch_images.npy\", multi_ch_images_np)\nnp.save(\"../data/labels.npy\", labels_np)\n\n\n","sub_path":"utils/pdb2multi_ch_image.py","file_name":"pdb2multi_ch_image.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397842797","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import TemplateView\n# Create your views here.\nfrom django.http import HttpResponse\n\n########################################################################################\n# Home View\n########################################################################################\nclass indexAppView(TemplateView):\n template_name = 'indexAppBase.html'\n def get(self, request):\n context = {}\n return render(request, self.template_name, context)\n\n########################################################################################\n# Clientes view Form\n########################################################################################\nfrom .models import *\nfrom .forms import *\nclass AddClienteView(TemplateView):\n template_name = 'clientes.html'\n\n def post(self, request):\n context = {}\n form = ClientesForm(request.POST)\n if form.is_valid():\n form.save()\n context['saved'] = true\n return render(request, self.template_name, context)\n else:\n context['error'] = 'Error en la captura de la informacion, por favor verfique los datos y vuelva a intentarlo'\n context['form'] = form\n return render(request, self.template_name, context)\n\n def get(self,request):\n context = {}\n form = ClientesForm()\n context['form'] = form\n return render(request, self.template_name, context)\n\nfrom django.views.generic import CreateView\nfrom .models import *\nfrom .forms import *\n\nclass AddClienteCreateView(CreateView):\n template_name = 'clientes.html'\n model = Cliente\n form_class = ClientesForm\n \n def get(self, request):\n \"\"\"\n Handles GET requests and instantiates blank versions of the form\n and its inline formsets.\n \"\"\"\n context = {}\n self.object = None\n \n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ##Cliente\n credito_formset =CreditoFormSet(request.POST,request.FILES)\n guardavalores_formset =GuardaValoresFormSet(request.POST,request.FILES)\n empelo_formset =EmpeloFormSet(request.POST,request.FILES)\n cotitular_formset =CotitularFormSet(request.POST,request.FILES)\n domicilio_formset =DomicilioFormSet(request.POST,request.FILES)\n #Aval Datos\n aval_formset =AvalFormSet(request.POST,request.FILES)\n domicilio_avalformSet = DomicilioAvalFormSet(request.POST,request.FILES)\n cotitular_avalformSet = CotitularAvalFormSet(request.POST,request.FILES)\n\n context['form'] = form\n context['credito_formset'] = credito_formset\n context['guardavalores_formset'] = guardavalores_formset\n context['empelo_formset'] = empelo_formset\n context['cotitular_formset'] = cotitular_formset \n context['domicilio_formset'] = domicilio_formset\n context['aval_formset'] = aval_formset\n context['domicilio_avalformset'] = domicilio_avalformSet\n context['cotitular_avalformset'] = cotitular_avalformSet\n\n return render(request, self.template_name, context)\n\n def post(self, request):\n \"\"\"\n Handles POST requests, instantiating a form instance and its inline\n formsets with the passed POST variables and then checking them for\n validity.\n \"\"\"\n context = {}\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n \n form = ClientesForm(request.POST)\n credito_formset =CreditoFormSet(request.POST,request.FILES)\n guardavalores_formset =GuardaValoresFormSet(request.POST,request.FILES)\n empelo_formset =EmpeloFormSet(request.POST,request.FILES)\n cotitular_formset =CotitularFormSet(request.POST,request.FILES)\n domicilio_formset =DomicilioFormSet(request.POST,request.FILES)\n aval_formset =AvalFormSet(request.POST,request.FILES)\n domicilio_avalformSet = DomicilioAvalFormSet(request.POST,request.FILES)\n cotitular_avalformSet = CotitularAvalFormSet(request.POST,request.FILES)\n\n if (form.is_valid() and credito_formset.is_valid()and\n guardavalores_formset.is_valid()and\n empelo_formset.is_valid()and\n cotitular_formset.is_valid()and\n aval_formset.is_valid() and\n domicilio_formset.is_valid() and\n domicilio_avalformSet.is_valid() and\n cotitular_avalformSet.is_valid()\n ):\n return self.form_valid(request, form,credito_formset , guardavalores_formset , empelo_formset , cotitular_formset , aval_formset, domicilio_formset, domicilio_avalformSet, cotitular_avalformSet)\n else:\n return self.form_invalid(request, form,credito_formset , guardavalores_formset , empelo_formset , cotitular_formset , aval_formset, domicilio_formset, domicilio_avalformSet, cotitular_avalformSet)\n\n def form_valid(self, request, form, credito_formset , guardavalores_formset , empelo_formset , cotitular_formset , aval_formset, domicilio_formset, domicilio_avalformSet, cotitular_avalformSet):\n \"\"\"\n Called if all forms are valid. Creates a Recipe instance along with\n associated Ingredients and Instructions and then redirects to a\n success page.\n \"\"\"\n context = {}\n self.object = form.save()\n \n credito_formset.instance = self.object\n credito_formset.save()\n \n guardavalores_formset.instance = self.object\n guardavalores_formset.save()\n \n empelo_formset.instance = self.object\n empelo_formset.save()\n \n cotitular_formset.instance = self.object\n cotitular_formset.save()\n \n domicilio_formset.instance = self.object\n domicilio_formset.save()\n \n aval_formset.instance = self.object\n aval_formset.save()\n\n domicilio_avalformSet.instance = self.object\n domicilio_avalformSet.save()\n\n cotitular_avalformSet.instance = self.object\n cotitular_avalformSet.save()\n\n\n \n context['saved'] = True\n return render(request, self.template_name, context)\n\n def form_invalid(self, request, form, credito_formset , guardavalores_formset , empelo_formset , cotitular_formset , aval_formset, domicilio_formset, domicilio_avalformSet, cotitular_avalformSet):\n \"\"\"\n Called if a form is invalid. Re-renders the context data with the\n data-filled forms and errors.\n \"\"\"\n context = {}\n context['form'] = form\n context['credito_formset'] = credito_formset\n context['guardavalores_formset'] = guardavalores_formset\n context['empelo_formset'] = empelo_formset\n context['cotitular_formset'] = cotitular_formset\n context['domicilio_formset'] = domicilio_formset\n context['aval_formset'] = aval_formset\n context['domicilio_avalformset'] = domicilio_avalformSet\n context['cotitular_avalformset'] = cotitular_avalformSet\n\n return render(request, self.template_name, context)","sub_path":"appsofom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369414180","text":"# Utilities for retrieving altitude and azimuth from longitude and latitude\n\nfrom pysolar.solar import *\nimport datetime\nimport requests\nimport math\ntry:\n import pytz\nexcept:\n pytz = None\n\nfrom timezonefinder import TimezoneFinder\nfrom pytz import timezone\n\n# For optional datetime\n# https://stackoverflow.com/questions/9539921/how-do-i-create-a-python-function-with-optional-arguments#:~:text=After%20the%20required%20positional%20arguments,specific%20optional%20arguments%20by%20name.&text=Just%20use%20the%20*args%20parameter,a%20%22way%22%20of%20overloading.\ndef get_sun_coords(lat, long, *args):\n lat = float(lat)\n long = float(long)\n\n if lat > 90 or lat < -90 or long > 180 or long < -180:\n return {\"error\": \"Invalid coordinates\"}\n\n if len(args) > 0:\n dateStr = args[0]\n date = datetime.datetime.strptime(dateStr, '%Y-%m-%d')\n else:\n date = datetime.date.today()\n\n months = ['January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December']\n\n data = {}\n all_minutes = get_all_minutes_for_date(date)\n counter = 0\n\n for minute in all_minutes:\n coords = get_sun_coords_for_time(lat, long, minute)\n if \"error\" in coords:\n return {\"error\": coords[\"error\"]}\n\n convertedCoords = convert_coords_to_xyz(coords)\n data[counter] = convertedCoords\n counter += 1\n\n data[\"date\"] = f\"{months[date.month - 1]} {date.day}, {date.year}\"\n data[\"lat\"] = str(lat)\n data[\"long\"] = str(long)\n\n riseSet = get_sunrise_sunset(lat, long, date)\n\n data['sunrise'] = riseSet['sunrise']\n data['sunset'] = riseSet['sunset']\n\n day_len_hour = math.floor(riseSet['day_length'])\n day_len_minute = math.floor((riseSet['day_length'] - day_len_hour) * 60)\n\n day_len_str = str(day_len_hour) + ' hours, ' + str(day_len_minute) + ' minutes'\n\n data['day_length'] = day_len_str\n return data\n\ndef convert_coords_to_xyz(coords):\n azimuth = coords['azimuth']\n altitude = coords['altitude']\n\n # 3D \"globe\" radius for trig calculations is an arbitrary constant\n threeDRadius = 100\n alt_in_radians = math.radians(altitude)\n az_in_radians = math.radians(azimuth)\n is_negative = alt_in_radians < 0\n\n # Trigonometry reminder\n # https://owlcation.com/stem/Everything-About-Triangles-and-More-Isosceles-Equilateral-Scalene-Pythagoras-Sine-and-Cosine\n twoDRadius = threeDRadius * math.cos(alt_in_radians)\n\n # For 3d\n x = twoDRadius * math.cos(az_in_radians)\n y = twoDRadius * math.sin(az_in_radians)\n z = threeDRadius * math.sin(alt_in_radians)\n return {\"x\": x,\n \"y\": y,\n \"z\": z,\n \"timestring\": coords['timestring'],\n \"isNegative\": str(is_negative)}\n\ndef get_sun_coords_for_time(lat, long, date):\n tf = TimezoneFinder()\n\n lat = float(lat)\n long = float(long)\n\n localTzStr = tf.timezone_at(lng=long, lat=lat)\n\n if localTzStr == None:\n localTzStr = tf.closest_timezone_at(lng=long, lat=lat, delta_degree=3)\n\n if localTzStr == None:\n return {\"error\": \"No timezone\"}\n\n localTzObj = pytz.timezone(localTzStr)\n\n date = localTzObj.localize(date)\n\n altitude = get_altitude(lat, long, date)\n azimuth = get_azimuth(lat, long, date)\n\n ampm = \"AM\" if date.hour < 12 else \"PM\"\n hour = date.hour if date.hour < 13 else date.hour - 12\n\n if hour == 0:\n hour = 12\n\n minute = '00' if date.minute == 0 else date.minute\n\n timestring = f\"{hour}:{minute} {ampm}\"\n return { \"altitude\" : altitude, \"azimuth\" : azimuth, \"timestring\": timestring}\n\ndef get_all_minutes_for_date(date):\n all_minutes = []\n\n #todayDay = todayDay.replace(month=3)\n iterateDate = datetime.datetime(year=date.year,\n month=date.month,\n day=date.day,\n hour=0,\n minute=0)\n\n # Using timedelta\n # https://www.geeksforgeeks.org/python-datetime-timedelta-function/\n td = datetime.timedelta(minutes=10)\n\n while iterateDate.day == date.day:\n all_minutes.append(iterateDate)\n iterateDate = iterateDate + td\n return all_minutes\n\ndef get_sunrise_sunset(lat, long, date):\n tf = TimezoneFinder()\n date = str(date)\n\n # Don't pass pure 0 to SunriseSunset.com\n # It causes an error\n if float(lat) == 0:\n lat = \"0.000001\"\n\n if float(long) == 0:\n long = \"0.000001\"\n\n apiString = 'https://api.sunrise-sunset.org/json?lat=' + str(lat) + '&lng=' + str(long) + '&date=' + str(date) + '&formatted=0'\n apiData = requests.get(apiString).json()\n\n localTzStr = tf.timezone_at(lng=float(long), lat=float(lat))\n\n if localTzStr == None:\n localTzStr = tf.closest_timezone_at(lng=float(long), lat=float(lat), delta_degree=3)\n\n if localTzStr == None:\n return {\"error\": \"No timezone\"}\n\n localTzObj = pytz.timezone(localTzStr)\n utcTzObj = pytz.timezone('UTC')\n\n sunriseStr = apiData['results']['sunrise']\n sunsetStr = apiData['results']['sunset']\n\n utcSunrise = datetime.datetime.strptime(sunriseStr, '%Y-%m-%dT%H:%M:%S+00:00').replace(tzinfo=utcTzObj)\n utcSunset = datetime.datetime.strptime(sunsetStr, '%Y-%m-%dT%H:%M:%S+00:00').replace(tzinfo=utcTzObj)\n\n # Convert to local timezone\n # https://stackoverflow.com/questions/10997577/python-timezone-conversion\n sunrise = utcSunrise.astimezone(localTzObj)\n sunset = utcSunset.astimezone(localTzObj)\n\n sunrise = get_nice_datestring(sunrise)\n sunset = get_nice_datestring(sunset)\n\n day_length = float(apiData['results']['day_length']) / (60 * 60)\n return {\"sunrise\": sunrise, \"sunset\": sunset, \"day_length\": day_length}\n\ndef get_nice_datestring(date):\n ampm = \"AM\" if date.hour < 12 else \"PM\"\n hour = date.hour if date.hour < 13 else date.hour - 12\n\n if hour == 0:\n hour = 12\n\n minute = '00' if date.minute == 0 else date.minute\n\n if date.minute < 10:\n minute = '0' + str(date.minute)\n\n timestring = f\"{hour}:{minute} {ampm}\"\n return timestring\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610680168","text":"#-*- coding: utf-8 -*- \nimport os\nimport pathlib\nimport glob\nimport cv2\nimport dlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport settings\nfrom imutils import face_utils\n\ndef load_name_images(image_path_pattern):\n name_images = []\n # 지정한 Path Pattern에 일치하는 파일 얻기\n image_paths = glob.glob(image_path_pattern)\n # 파일별로 읽기\n for image_path in image_paths:\n path = pathlib.Path(image_path)\n # 파일 경로\n fullpath = str(path.resolve())\n print(f\"Image file(절대 경로):{fullpath}\")\n # 파일명\n filename = path.name\n print(f\"image file : {filename}\")\n # 이미지 읽기\n image = cv2.imread(fullpath)\n if image is None:\n print(f\"이미지 파일({fullpath})을 읽을 수 없습니다.\")\n continue\n # TO-DO\n name_images.append((filename,image)) \n return name_images\n\ndef detect_image_face(file_path, image, cascade_filepath): \n # 이미지 파일의 Grayscale화\n image_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # 캐스케이드 파일 읽기\n cascade = cv2.CascadeClassifier(cascade_filepath)\n # 얼굴인식\n faces = cascade.detectMultiScale(image_gs,scaleFactor=1.1,minNeighbors=15,minSize=(64,64))\n if len(faces) == 0:\n print(\"Fail to detecting face\")\n return\n print(\"Success Face Detection!\")\n # TO-DO \n # [76 31 83 83] -> x_pos, y_pos, width, height\n face_count = 1\n for(x_pos, y_pos, width, height) in faces:\n face_image = image[y_pos:y_pos+height,x_pos:x_pos+width] # y, x\n if face_image.shape[0] > 64:\n face_image = cv2.resize(face_image,(64,64))\n print(face_image.shape)\n # Save 00_001.jpg -> 00_001_(face_count).jpg\n path = pathlib.Path(file_path)\n directory = str(path.parent.resolve())\n filename = path.stem\n extension = path.suffix\n output_path = os.path.join(directory,f\"{filename}_{face_count:03}-{extension}\")\n print(f\"=================================OUTPUT File(절대 경로) : {output_path}\")\n try:\n cv2.imwrite(output_path,face_image)\n face_count = face_count + 1\n except:\n print(\"Exception occured : {}\".format(output_path))\n return\n\ndef delete_dir(dir_path, is_delete_top_dir=True):\n for root, dirs, files in os.walk(dir_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n if is_delete_top_dir:\n os.rmdir(dir_path)\n\n\n\n\n\ndef detect_dog_face(file_path, image): \n # 이미지 파일의 Grayscale화\n image_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.cnn_face_detection_model_v1('dogHeadDetector.dat')\n predictor = dlib.shape_predictor('landmarkDetector.dat')\n\n # 강아지 얼굴 인식\n faces = detector(image_gs, upsample_num_times=1)\n if len(faces) == 0:\n print(\"Fail to detecting face\")\n return\n print(\"Success Face Detection!\")\n # TO-DO \n # [76 31 83 83] -> x_pos, y_pos, width, height\n face_count = 1\n for(i, d) in enumerate(faces):\n x1, y1 = d.rect.left(), d.rect.top()\n x2, y2 = d.rect.right(), d.rect.bottom()\n face_image = image[y1:y2,x1:x2] # y, x\n if face_image.shape[0] > 64:\n face_image = cv2.resize(face_image,(64,64))\n print(face_image.shape)\n # Save 00_001.jpg -> 00_001_(face_count).jpg\n\n path = pathlib.Path(file_path)\n directory = str(path.parent.resolve())\n filename = path.stem\n extension = path.suffix\n output_path = os.path.join(directory,f\"{filename}_{face_count:03}-{extension}\")\n print(f\"=================================OUTPUT File(절대 경로) : {output_path}\")\n try:\n cv2.imwrite(output_path,face_image)\n face_count = face_count + 1\n except:\n print(\"Exception occured : {}\".format(output_path))\n return\n\n\n\n\n\n\nRETURN_SUCCESS = 0\nRETURN_FAILURE = -1\n# Origin Image Pattern\nIMAGE_PATH_PATTERN = \"./origin_image/*\"\n# Output Directory\nOUTPUT_IMAGE_DIR = \"./face_image\"\n\ndef main():\n print(\"===================================================================\")\n print(\"이미지 얼굴인식 OpenCV 이용\")\n print(\"지정한 이미지 파일의 정면얼굴을 인식하고, 64x64 사이즈로 변경\")\n print(\"===================================================================\")\n\n # 디렉토리 작성\n if not os.path.isdir(OUTPUT_IMAGE_DIR):\n os.mkdir(OUTPUT_IMAGE_DIR)\n # 디렉토리 내의 파일 제거\n delete_dir(OUTPUT_IMAGE_DIR, False)\n\n # 이미지 파일 읽기\n # TO-DO\n name_images = load_name_images(IMAGE_PATH_PATTERN)\n\n # 이미지별로 얼굴인식 ->2명의 연예인 200개\n for name_image in name_images:\n file_path = os.path.join(OUTPUT_IMAGE_DIR,f\"{name_image[0]}\")\n image = name_image[1]#실제 image 파일 \n\n # cascade_filepath = settings.CASCADE_FILE_PATH\n detect_dog_face(file_path, image)\n # # TO-DO \n\n\n return RETURN_SUCCESS\n\nif __name__ == \"__main__\":\n main()","sub_path":"CNN_project/img_face_detector.py","file_name":"img_face_detector.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114807543","text":"from read_datasetBreakfast import load_data, read_mapping_dict\nimport os\nimport numpy as np\n\nCOMP_PATH = ''\n\n''' \ntraining to load train set\ntest to load test set\n'''\nsplit = 'test'\n#split = 'test'\ntrain_split = os.path.join(COMP_PATH, 'splits/dev_train.split1.bundle') #Train Split\ntest_split = os.path.join(COMP_PATH, 'splits/test.split1.bundle') #Test Split\nGT_folder = os.path.join(COMP_PATH, 'groundTruth/') #Ground Truth Labels for each training video \nDATA_folder = os.path.join(COMP_PATH, 'Data/') #Frame I3D features for all videos\nmapping_loc = os.path.join(COMP_PATH, 'splits/mapping_bf.txt') \n\nactions_dict = read_mapping_dict(mapping_loc)\nif split == 'training':\n data_feat, data_labels = load_data( train_split, actions_dict, GT_folder, DATA_folder, datatype = split) #Get features and labels\nif split == 'test':\n data_feat = load_data( test_split, actions_dict, GT_folder, DATA_folder, datatype = split) #Get features only\n\n'''\nWrite Code Below\nPointers\nNeed to load the segments.txt file for segments for test videos \nOutput the CSV in correct format as shown in Evaluation Section\nId corresponds to the segments in order. \nExample - 30-150 = Id 0\n 150-428 = Id 1\n 428-575 = Id 2\nCategory is the Class of the Predicted Action\n'''\nprint(len(data_feat))\nprint(data_feat[0].shape)\n\n\n","sub_path":"utils/template_code.py","file_name":"template_code.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620313611","text":"# django imports\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\n# project imports\nfrom .models import Plan\nfrom Hashtag.models import Hashtag\n\n\nclass PlanCreate(TestCase):\n\n def test_create_ok(self):\n # given\n hashtag = Hashtag.objects.create(name=\"Music\")\n new_plan = {\n \"title\": \"TITLE\",\n \"description\": \"DESCRIPTION\",\n \"price\": 50.0,\n \"hashtags\": hashtag.id,\n }\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 1)\n\n def test_create_min_data_ok(self):\n # given\n new_plan = {\n \"title\": \"TITLE\",\n \"price\": 50.0,\n }\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 1)\n\n def test_create_same_data_ok(self):\n # given\n hashtag = Hashtag.objects.create(name=\"Music\")\n new_plan = {\n \"title\": \"TITLE\",\n \"description\": \"DESCRIPTION\",\n \"price\": 50.0,\n \"hashtags\": hashtag.id,\n }\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 2)\n\n def test_create_no_data_ko(self):\n # given\n new_plan = {\n }\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n def test_create_max_hashtags_ko(self):\n # given\n hashtags = []\n for x in range(settings.MAX_HASHTAGS + 1):\n new_hashtag = Hashtag.objects.create(name=\"Music{0}\".format(x))\n hashtags += [new_hashtag.id]\n\n new_plan = {\n \"title\": \"TITLE\",\n \"description\": \"DESCRIPTION\",\n \"price\": 50.0,\n \"hashtags\": hashtags,\n }\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n # when\n response = self.client.post(reverse('Plan:create'), new_plan)\n\n # then\n self.assertEqual(response.status_code, 200)\n num_plans = Plan.objects.all()\n self.assertEquals(len(num_plans), 0)\n\n def test_list_all_ok(self):\n # given\n new_plan = {\n \"title\": \"Sensational\",\n \"price\": 30.0,\n }\n first = Plan.objects.create(**new_plan) # FEV000000\n new_plan = {\n \"title\": \"Domino\",\n \"price\": 20.0,\n }\n second = Plan.objects.create(**new_plan) # FEV000001\n new_plan = {\n \"title\": \"Domino\",\n \"price\": 20.0,\n }\n third = Plan.objects.create(**new_plan) # FEV000002\n new_plan = {\n \"title\": \"Architecture\",\n \"price\": 20.0,\n }\n fourth = Plan.objects.create(**new_plan) # FEV000003\n new_plan = {\n \"title\": \"World Cup\",\n \"price\": 10.0,\n }\n fifth = Plan.objects.create(**new_plan) # FEV000004\n\n # when\n response = self.client.get(reverse('Plan:list'))\n\n # then\n self.assertEqual(response.status_code, 200)\n # First order: price\n self.assertEquals(response.context['plans'][0], fifth)\n # Second order: title\n self.assertEquals(response.context['plans'][1], fourth)\n # Third order: id\n self.assertEquals(response.context['plans'][2], second)\n self.assertEquals(response.context['plans'][3], third)\n self.assertEquals(response.context['plans'][4], first)\n\n def test_list_all_without_data_ok(self):\n # when\n response = self.client.get(reverse('Plan:list'))\n\n # then\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['plans'], [])\n","sub_path":"question2/Plan/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"611134454","text":"#!/usr/bin/env python\n# * coding: utf8 *\n'''\ntest_change_detection.py\na module containing tests for the change detection module\n'''\nimport logging\nimport unittest\nfrom os import path\n\nfrom pytest import raises\n\nimport arcpy\nfrom forklift import core\nfrom forklift.change_detection import (ChangeDetection, _get_hashes,\n hash_field, table_name_field)\nfrom forklift.models import Crate\n\ncurrent_folder = path.dirname(path.abspath(__file__))\ntest_data_folder = path.join(current_folder, 'data')\ntest_fgdb = path.join(test_data_folder, 'test_change_detection', 'data.gdb')\nhash_table = path.join(test_fgdb, 'TableHashes')\n\n\nclass TestChangeDetection(unittest.TestCase):\n def test_has_table(self):\n change_detection = ChangeDetection(['ChangeDetection'], test_fgdb, hash_table=hash_table)\n\n self.assertTrue(change_detection.has_table('UPDATE_TESTS.dbo.counties'))\n self.assertFalse(change_detection.has_table('bad table name'))\n\n def test_has_changed(self):\n change_detection = ChangeDetection(['ChangeDetection'], test_fgdb, hash_table=hash_table)\n\n self.assertFalse(change_detection.has_changed('UPDATE_TESTS.dbo.counties'))\n self.assertTrue(change_detection.has_changed('UPDATE_TESTS.dbo.providers'))\n\n with raises(Exception):\n assert change_detection.has_table('bad table name')\n\n\nclass TestGetHashes(unittest.TestCase):\n def test_returns_data(self):\n hashes = _get_hashes([path.join(test_fgdb, 'ChangeDetection')])\n expected = {'update_tests.dbo.counties': '1',\n 'update_tests.dbo.providers': '2',\n 'counties': '5'}\n\n self.assertEqual(hashes, expected)\n\n def test_throw_on_duplicate_table_name(self):\n tables = ['ChangeDetection', 'ChangeDetectionWithDup']\n\n with raises(Exception):\n assert _get_hashes([path.join(test_fgdb, table) for table in tables])\n\n def test_throw_on_bad_path(self):\n tables = ['ChangeDetection', 'BadPath']\n\n with raises(Exception):\n assert _get_hashes([path.join(test_fgdb, table) for table in tables])\n\n\ncore.init(logging.getLogger('forklift'))\nclass TestUpdate(unittest.TestCase):\n def test_updates_data(self):\n scratch_hash_table = path.join(arcpy.env.scratchGDB, path.basename(hash_table))\n scratch_destination = path.join(arcpy.env.scratchGDB, 'Counties')\n temp_data = [scratch_hash_table, scratch_destination]\n for dataset in temp_data:\n if arcpy.Exists(dataset):\n arcpy.management.Delete(dataset)\n arcpy.management.Copy(hash_table, scratch_hash_table)\n\n change_detection = ChangeDetection(['ChangeDetection'], test_fgdb, hash_table=scratch_hash_table)\n\n table = 'counties'\n crate = Crate(table, test_fgdb, arcpy.env.scratchGDB, path.basename(scratch_destination))\n crate.result = (Crate.CREATED, None)\n core._create_destination_data(crate, skip_hash_field=True)\n change_detection.current_hashes[table] = '8'\n result = change_detection.update(crate)\n\n where = f'{table_name_field} = \\'{table}\\''\n with arcpy.da.SearchCursor(scratch_hash_table, [hash_field], where_clause=where) as cursor:\n self.assertEqual(next(cursor)[0], '8')\n\n self.assertEqual(result[0], Crate.CREATED)\n\n change_detection.current_hashes[table] = '9'\n crate.result = (Crate.UNINITIALIZED, None)\n result = change_detection.update(crate)\n\n where = f'{table_name_field} = \\'{table}\\''\n with arcpy.da.SearchCursor(scratch_hash_table, [hash_field], where_clause=where) as cursor:\n self.assertEqual(next(cursor)[0], '9')\n\n self.assertEqual(result[0], Crate.UPDATED)\n\n def test_invalid_data(self):\n scratch_hash_table = path.join(arcpy.env.scratchGDB, path.basename(hash_table))\n scratch_destination = path.join(arcpy.env.scratchGDB, 'Counties')\n temp_data = [scratch_hash_table, scratch_destination]\n for dataset in temp_data:\n if arcpy.Exists(dataset):\n arcpy.management.Delete(dataset)\n arcpy.management.Copy(hash_table, scratch_hash_table)\n\n change_detection = ChangeDetection(['ChangeDetection'], test_fgdb, hash_table=scratch_hash_table)\n\n table = 'update_tests.dbo.providers'\n crate = Crate(table, 'someWorkspace', arcpy.env.scratchGDB, path.basename(scratch_destination))\n result = change_detection.update(crate)\n\n self.assertEqual(result[0], Crate.INVALID_DATA)\n","sub_path":"tests/test_change_detection.py","file_name":"test_change_detection.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88849302","text":"\n# Importing packages needed. \nimport requests\nfrom bs4 import BeautifulSoup\nimport random\nfrom nltk.corpus import movie_reviews\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport pickle\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\n\n#you can pass it url for a review site. I did review the Html structure prior to parsing.\n# Would advise review structure before passing through BS4\n\nsauce = 'URL'\nr = requests.get(sauce)\n\n#Turning the request into a text file for preparing for analysis\nhtml_doc = r.text\n\n#Cleaning the text file into structured HTML page\nsoup = BeautifulSoup(html_doc, \"html.parser\")\np_tags = soup.find_all(\"p\")\n\nfor text in p_tags:\n # Creating a new file for reviews In this case I used Yelp\n yelp_reviews = open(\"yelp_reviews.txt\", 'a')\n texts = text.get_text()\n yelp_reviews.write(texts)\n yelp_reviews.write('\\n')\n yelp_reviews.close()\n\n# Now that we have some data points for our ML model\n# This is going to be a classification problem, meaning there are limited outcomes\n# To simplify we are going to use a binary outcome either positive or negative. \n\n#Empty list to store values in\nall_words = []\n\n#Getting our text file of yelp reviews\nreviews = open(\"yelp_reviews.txt\").read()\nfile= reviews.split(\" \")\n\n#processing our file for Sentiment Analysis\nfor line in file:\n all_words.append(line)\n \nall_words = nltk.FreqDist(all_words)\nstopwords = nltk.corpus.stopwords.words('english')\nnew_words = nltk.FreqDist(w.lower() for w in all_words if w not in stopwords)\nword_features = list(new_words.keys())[:3000]\n\n#finding features from our text files\ndef find_features(document):\n words = set(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features\n\n\n#Importing movie reviews which are preprocessed as either negative or positive in the document variable\ndocuments = [(list(movie_reviews.words(fileid)), category)\n for category in movie_reviews.categories()\n for fileid in movie_reviews.fileids(category)]\n\n#Since they are in order we shuffle them to have a more accurate ML \nrandom.shuffle(documents)\nfeaturesets = [(find_features(rev), category) for (rev, category) in documents]\n\n\n#Creating training set and a testing set for our ML model\ntraining_set = featuresets[:1900]\ntesting_set= featuresets[1900:]\n\n#Training and Testing our ML model for sentiment analysis\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(training_set)\nprint(\"BernoulliNB_classifier accuracy percent:\", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nprint(\"MNB_classifier accuracy percent:\", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nprint(\"LogisticRegression_classifier accuracy percent:\", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nprint(\"LinearSVC_classifier accuracy percent:\", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)\n\n\nSGDC_classifier = SklearnClassifier(SGDClassifier())\nSGDC_classifier.train(training_set)\nprint(\"SGDClassifier accuracy percent:\",nltk.classify.accuracy(SGDC_classifier, testing_set)*100)\n\n\n#printing how accurate we were with the model\n# Simple Naive Bayes wasn't as accurate as we would like for a classifier and we can further apply other models for more accuracy\nprint(\"Classifier accuracy percent:\",(nltk.classify.accuracy(classifier, testing_set))*100)\nprint(classifier.show_most_informative_features(10))\n\n#Saving our ML model for easy use later as pickled file\n# You can save multiple trained models for future use following the format below. \nnaive_bayes = open(\"naivebayes.pickle\", \"wb\")\npickle.dump(classifier,naive_bayes)\nnaive_bayes.close()\n","sub_path":"Text Sentiment/textSentiment.py","file_name":"textSentiment.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263969686","text":"\"\"\"\nThis module contains the player class which defines the players abilities and\ninteractions with the world and handles player movement\n\"\"\"\n\n\nimport os\nimport pygame\nfrom map import *\nfrom constants import *\n\nclass Player(GeneralSquare):\n \"\"\"Main class for player\"\"\"\n \n ID = 'default'\n NAME = ''\n STATE = {'LEVEL': 1, 'HP': 1000, 'ATK': 10, 'DEF': 10, 'GOLD': 0, 'EXP': 0}\n KEY_COLLECTION = {'Yellow Key': 1, 'Blue Key': 1, 'Red Key': 1}\n FLOOR = 1\n FLOOR_SET = {FLOOR} #storing floors have been through\n COMPASS = False\n ILLUSTRATION = False\n WIN = False\n\n def playSound(self, file):\n \"\"\"function to play sound effect\"\"\"\n\n effect = pygame.mixer.Sound(os.path.join('Sound', file))\n effect.play()\n\n def showMessage(self, text, screen):\n \"\"\" function to show message box on screen\"\"\"\n \n width = SCREEN_X - 100\n height = SCREEN_Y // 7\n surf = pygame.Surface((width, height))\n pygame.draw.rect(surf, ORANGE, [0, 0, width, height], 5)\n font = pygame.font.Font(None, width // 25)\n text_print = font.render('{}'.format(text), True, WHITE)\n surf.blit(text_print, (width / 2 - text_print.get_width() / 2, height / 2 - 20))\n screen.blit(surf, (SCREEN_X / 4 + (SCREEN_X - width) / 2, (SCREEN_Y - height) / 2))\n pygame.display.flip()\n pygame.time.wait(750)\n\n def update(self, pressed_keys, overlay, world_overlays, world_floors, screen):\n \"\"\"function to update player interactions\"\"\"\n\n old_position = self.rect[0:2]\n if pressed_keys[K_UP]:\n self.rect.move_ip(0, int(-SCREEN_Y / 13))\n elif pressed_keys[K_DOWN]:\n self.rect.move_ip(0, int(SCREEN_Y / 13))\n elif pressed_keys[K_RIGHT]:\n self.rect.move_ip(int(SCREEN_X / 13), 0)\n elif pressed_keys[K_LEFT]:\n self.rect.move_ip(int(-SCREEN_X / 13), 0)\n\n if pygame.sprite.spritecollideany(self, COLLISION_TYPE):\n\n # If the player collide a door\n if pygame.sprite.spritecollideany(self, DOOR_TYPE):\n for key in overlay:\n if type(overlay[key]) == YellowDoor:\n if pygame.sprite.collide_rect(self, overlay[key]):\n if self.KEY_COLLECTION['Yellow Key'] > 0:\n self.KEY_COLLECTION['Yellow Key'] -= 1\n overlay[key].kill()\n overlay[key] = 0\n elif type(overlay[key]) == BlueDoor:\n if pygame.sprite.collide_rect(self, overlay[key]):\n if self.KEY_COLLECTION['Blue Key'] > 0:\n self.KEY_COLLECTION['Blue Key'] -= 1\n overlay[key].kill()\n overlay[key] = 0\n elif type(overlay[key]) == RedDoor:\n if pygame.sprite.collide_rect(self, overlay[key]):\n if self.KEY_COLLECTION['Red Key'] > 0:\n self.KEY_COLLECTION['Red Key'] -= 1\n overlay[key].kill()\n overlay[key] = 0\n\n # If the player collide a stair\n if pygame.sprite.spritecollideany(self, STAIR_TYPE):\n for key in overlay:\n if type(overlay[key]) == StairUp:\n if pygame.sprite.collide_rect(self, overlay[key]):\n self.FLOOR += 1\n self.FLOOR_SET = self.FLOOR_SET | {self.FLOOR}\n elif type(overlay[key]) == StairDown:\n if pygame.sprite.collide_rect(self, overlay[key]):\n self.FLOOR -= 1\n self.FLOOR_SET = self.FLOOR_SET | {self.FLOOR}\n\n # If the player collide a monster\n if pygame.sprite.spritecollideany(self, MONSTER_TYPE):\n for key in overlay:\n try:\n if pygame.sprite.collide_rect(self, overlay[key]):\n # Check condition for ability to fight\n monster_ability = {'HP': overlay[key].HP, 'ATK': overlay[key].ATK, 'ATK2': overlay[key].ATK2, 'ATK3': overlay[key].ATK3, 'DEF': overlay[key].DEF}\n player_ability = {'HP': self.STATE['HP'], 'ATK': self.STATE['ATK'], 'DEF': self.STATE['DEF']}\n\n if monster_ability['ATK'] > player_ability['DEF']:\n player_minus = monster_ability['ATK'] - player_ability['DEF']\n else:\n player_minus = 0\n\n if player_ability['ATK'] > monster_ability['DEF']:\n monster_minus = player_ability['ATK'] - monster_ability['DEF']\n else:\n break\n\n if monster_ability['ATK2'] != 0:\n player_ability['HP'] -= monster_ability['ATK2']\n\n if monster_ability['ATK3'] != 0:\n player_ability['HP'] -= player_ability['HP'] // monster_ability['ATK3']\n\n while monster_ability['HP'] > 0 and player_ability['HP'] > 0:\n monster_ability['HP'] -= monster_minus\n player_ability['HP'] -= player_minus\n\n # If player wins\n if monster_ability['HP'] <= 0:\n if monster_ability['ATK2'] != 0:\n self.STATE['HP'] -= monster_ability['ATK2']\n if monster_ability['ATK3'] != 0:\n self.STATE['HP'] -= self.STATE['HP'] // monster_ability['ATK3']\n overlay[key].draw_popup(self, screen) # draw battle box\n self.STATE['GOLD'] += overlay[key].GOLD\n self.STATE['EXP'] += overlay[key].EXP\n if overlay[key].NAME == 'Boss':\n self.showMessage('You have conquered the magic tower!', screen)\n self.WIN = True\n overlay[key].kill()\n overlay[key] = 0\n except AttributeError:\n pass\n\n # If the player collide an NPC\n if pygame.sprite.spritecollideany(self, NPC_TYPE):\n i = 0\n for key in overlay:\n try:\n if pygame.sprite.collide_rect(self, overlay[key]):\n if overlay[key].ID == 'Fairy':\n print('Fairy')\n overlay[key].action(world_overlays)\n print('Fairy2')\n self.showMessage('Hi {}! Welcome to Magic Tower! Enjoy your game!'.format(self.NAME), screen)\n if i == 93:\n overlay[key].kill()\n overlay[key] = 0\n elif overlay[key].ID == 'Thief':\n overlay[key].action(world_overlays)\n self.showMessage('Thanks for saving me, {}! I will open the magic door in floor 3 for you!'.format(self.NAME), screen)\n overlay[key].kill()\n overlay[key] = 0\n elif overlay[key].ID == 'Princess':\n self.showMessage('You are my hero, {}! I will wait for you here until you defeat final boss!'.format(self.NAME), screen)\n else:\n overlay[key].action(self, screen)\n except AttributeError:\n pass\n i += 1\n\n # If the player collide an item\n if pygame.sprite.spritecollideany(self, ITEM_TYPE):\n for key in overlay:\n try:\n if pygame.sprite.collide_rect(self, overlay[key]):\n self.playSound('pickup.wav')\n if overlay[key].ID == 10:\n overlay[key].effect(world_overlays, world_floors)\n self.showMessage(overlay[key].message, screen)\n overlay[key].kill()\n overlay[key] = 0\n elif overlay[key].effect(self):\n self.showMessage(overlay[key].message, screen)\n overlay[key].kill()\n overlay[key] = 0\n else:\n self.showMessage(overlay[key].message, screen)\n except AttributeError:\n pass\n\n self.rect[0:2] = old_position\n elif self.rect[0] < int(SCREEN_X / 13) or self.rect.right > int(SCREEN_X / 13 * 12) or self.rect[1] < int(SCREEN_Y / 13) or self.rect.bottom > int(SCREEN_Y / 13 * 12):\n self.rect[0:2] = old_position\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610386498","text":"\"\"\"Define tests for the REST API.\"\"\"\nimport datetime\n\nimport aiohttp\nimport pytest\n\nfrom aioambient import Client\nfrom aioambient.errors import RequestError\n\nfrom .common import TEST_API_KEY, TEST_APP_KEY, TEST_MAC, load_fixture\n\n\n@pytest.mark.asyncio\nasync def test_api_error(aresponses):\n \"\"\"Test the REST API raising an exception upon HTTP error.\"\"\"\n aresponses.add(\n \"api.ambientweather.net\",\n \"/v1/devices\",\n \"get\",\n aresponses.Response(text=\"\", status=500),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(TEST_API_KEY, TEST_APP_KEY, session=session)\n\n with pytest.raises(RequestError):\n await client.api.get_devices()\n\n\n@pytest.mark.asyncio\nasync def test_get_device_details(aresponses):\n \"\"\"Test retrieving device details from the REST API.\"\"\"\n aresponses.add(\n \"api.ambientweather.net\",\n f\"/v1/devices/{TEST_MAC}\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"device_details_response.json\"), status=200\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(TEST_API_KEY, TEST_APP_KEY, session=session)\n\n device_details = await client.api.get_device_details(\n TEST_MAC, end_date=datetime.datetime(2019, 1, 6)\n )\n assert len(device_details) == 2\n\n\n@pytest.mark.asyncio\nasync def test_get_devices(aresponses):\n \"\"\"Test retrieving devices from the REST API.\"\"\"\n aresponses.add(\n \"api.ambientweather.net\",\n \"/v1/devices\",\n \"get\",\n aresponses.Response(text=load_fixture(\"devices_response.json\"), status=200),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(TEST_API_KEY, TEST_APP_KEY, session=session)\n\n devices = await client.api.get_devices()\n assert len(devices) == 2\n\n\n@pytest.mark.asyncio\nasync def test_session_from_scratch(aresponses):\n \"\"\"Test that an aiohttp ClientSession is created on the fly if needed.\"\"\"\n aresponses.add(\n \"api.ambientweather.net\",\n \"/v1/devices\",\n \"get\",\n aresponses.Response(text=load_fixture(\"devices_response.json\"), status=200),\n )\n\n client = Client(TEST_API_KEY, TEST_APP_KEY)\n\n devices = await client.api.get_devices()\n assert len(devices) == 2\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338781487","text":"import json\nimport os\n\nimport pandas as pd\nfrom dagster import EventMetadata\nfrom dagster.core.asset_defs import build_assets_job\nfrom dagster.utils import file_relative_path\nfrom dagster_dbt import dbt_cli_resource\nfrom dagster_dbt.asset_defs import load_assets_from_dbt_manifest\nfrom dagster_pyspark import pyspark_resource\nfrom hacker_news_assets.pipelines.download_pipeline import S3_SPARK_CONF\nfrom hacker_news_assets.resources.snowflake_io_manager import (\n SHARED_SNOWFLAKE_CONF,\n connect_snowflake,\n snowflake_io_manager_dev,\n snowflake_io_manager_prod,\n)\n\nDBT_PROJECT_DIR = file_relative_path(__file__, \"../../hacker_news_dbt\")\nDBT_PROFILES_DIR = DBT_PROJECT_DIR + \"/config\"\n\n# We define two sets of resources, one for the prod mode, which writes to production schemas and\n# one for dev mode, which writes to alternate schemas\nPROD_RESOURCES = {\n \"dbt\": dbt_cli_resource.configured(\n {\"profiles_dir\": DBT_PROFILES_DIR, \"project_dir\": DBT_PROJECT_DIR, \"target\": \"prod\"}\n ),\n \"warehouse_io_manager\": snowflake_io_manager_prod,\n # \"parquet_io_manager\": parquet_io_manager.configured({\"base_path\": get_system_temp_directory()}),\n \"pyspark\": pyspark_resource,\n}\n\nDEV_RESOURCES = {\n \"dbt\": dbt_cli_resource.configured(\n {\"profiles-dir\": DBT_PROFILES_DIR, \"project-dir\": DBT_PROJECT_DIR, \"target\": \"dev\"}\n ),\n \"warehouse_io_manager\": snowflake_io_manager_dev,\n # \"parquet_io_manager\": parquet_io_manager.configured(\n # {\"base_path\": \"s3://hackernews-elementl-prod\"}\n # ),\n \"pyspark\": pyspark_resource.configured(S3_SPARK_CONF),\n}\n\n\ndef asset_metadata(_context, model_info):\n config = dict(SHARED_SNOWFLAKE_CONF)\n config[\"schema\"] = model_info[\"schema\"]\n with connect_snowflake(config=config) as con:\n df = pd.read_sql(f\"SELECT * FROM {model_info['name']} LIMIT 5\", con=con)\n num_rows = con.execute(f\"SELECT COUNT(*) FROM {model_info['name']}\").fetchone()\n\n return {\"Data sample\": EventMetadata.md(df.to_markdown()), \"Rows\": num_rows[0]}\n\n\n# this list has one element per dbt model\nassets = load_assets_from_dbt_manifest(\n json.load(open(os.path.join(DBT_PROJECT_DIR, \"target\", \"manifest.json\"))),\n runtime_metadata_fn=asset_metadata,\n io_manager_key=\"warehouse_io_manager\",\n)\nactivity_stats = build_assets_job(\"activity_stats\", assets, [], resource_defs=DEV_RESOURCES)\n","sub_path":"examples/hacker_news_assets/hacker_news_assets/pipelines/dbt_pipeline.py","file_name":"dbt_pipeline.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140530168","text":"# coding: utf-8\n\nimport os\nimport numpy as np\n\nfrom PIL import Image\n\n\nclass Array(object):\n\n # 常量设置,代表矩阵4个方向的边\n LEFT = 0\n TOP = 1\n RIGHT = 2\n BOTTOM = 3\n\n def __init__(self):\n self.image_path = None\n self.image_obj = None\n self.image_array = None\n\n self.width = 0\n self.height = 0\n\n def __str__(self):\n if self.image_path:\n return 'Array object of {}'.format(self.image_path)\n else:\n return 'Empty Array object'\n\n def __update_size(self):\n \"\"\"\n 从图像或矩阵载入矩阵,以及矩阵大小改变后,更新矩阵高度和宽度数据\n :return: \n \"\"\"\n self.height, self.width = self.image_array.shape\n\n def hjoin(self, other):\n \"\"\"\n 水平合并\n :param other: 参与合并的另一个 Array 对象\n :return: 合并后的 Array 对象\n \"\"\"\n if self.image_array is None:\n self.image_array = other.image_array\n else:\n self.image_array = np.hstack((self.image_array, other.image_array))\n self.__update_size()\n return self\n\n def vjoin(self, other):\n \"\"\"\n 垂直合并\n :param other: 参与合并的另一个 Array 对象\n :return: 合并后的 Array 对象\n \"\"\"\n if self.image_array is None:\n self.image_array = other.image_array\n else:\n self.image_array = np.vstack((self.image_array, other.image_array))\n self.__update_size()\n return self\n\n def load_image(self, image_path):\n \"\"\"\n 加载图片\n :param image_path: 图片的路径\n :return: self\n \"\"\"\n self.image_path = image_path\n\n # 检测图片是否存在\n if not os.path.exists(image_path):\n print('No such a image')\n return self\n\n # 生成 image 对象\n self.image_obj = Image.open(image_path).convert('RGB')\n self.width = self.image_obj.size[0]\n self.height = self.image_obj.size[1]\n self.image_to_array()\n del self.image_obj\n return self\n\n def image_to_array(self):\n \"\"\"\n 将图片转换成矩阵\n :return: self\n \"\"\"\n # np.sum(array, axis=2)\n # 沿第二个坐标轴求和\n # 最终生成图片的灰度矩阵\n self.image_array = np.sum(np.asarray(self.image_obj), axis=2) // 3\n return self\n\n def binary(self):\n \"\"\"\n 图片二值化\n :return: self\n \"\"\"\n self.image_array = self.image_array // 128\n return self\n\n def convert_to_image(self, image_path):\n \"\"\"\n 根据矩阵生成图片并保存\n :param image_path:\n :return:\n \"\"\"\n image = Image.fromarray(self.image_array.astype(np.uint8))\n image.save(image_path)\n\n def load_array(self, array):\n \"\"\"\n 从矩阵载入数据\n :param array: 待载入矩阵\n :return:\n \"\"\"\n self.image_array = np.array(array)\n self.__update_size()\n return self\n\n def get_edge(self, direction):\n \"\"\"\n 得到图像矩阵的边缘向量\n :param direction: 对应的边缘\n :return: 边缘向量\n \"\"\"\n if direction == Array.LEFT:\n return self.image_array[:, 0]\n elif direction == Array.TOP:\n return self.image_array[0]\n elif direction == Array.RIGHT:\n return self.image_array[:, -1]\n elif direction == Array.BOTTOM:\n return self.image_array[-1]\n else:\n return self.image_array[:, 0]\n\n def match(self, array, direction=2, kernel=(0.1, 0.8, 0.1)):\n \"\"\"\n 计算余弦相似度\n :param image:\n :return:\n \"\"\"\n begin_edge = self.get_edge(direction)\n end_edge = array.get_edge((direction + 2) % 4)\n # begin_edge = self.convolution(begin_edge.copy(), np.array(kernel))\n # end_edge = self.convolution(end_edge.copy(), np.array(kernel))\n\n cos_theta = np.dot(begin_edge, end_edge) / np.linalg.norm(begin_edge) / np.linalg.norm(end_edge)\n return cos_theta\n\n # edge_diff = begin_edge - end_edge\n # counter = 0\n # for diff in edge_diff:\n # if diff < 0.1:\n # counter += 1\n # return counter / len(begin_edge)\n\n def get_row(self):\n \"\"\"\n 得到矩阵每一行的特征\n 用于表征文字沿 y 轴的分布\n 从而对不同行的碎片进行聚类\n :return:\n \"\"\"\n array = np.sum(255 - self.image_array, 1) / 255 / self.width\n return Array().load_array(array.reshape((self.height, 1)))\n\n def convolution(self, vector, kernel):\n \"\"\"\n 卷积\n :param vector: 待卷积矩阵\n :param kernel: 卷积核\n :return:\n \"\"\"\n kernel = kernel / sum(kernel)\n new_vector = vector.copy()\n vector_len = len(vector)\n kernel_len = len(kernel)\n center_index = kernel_len // 2\n\n for x in range(vector_len):\n new_value = 0\n begin_index = center_index - x if center_index - x >= 0 else 0\n end_index = center_index + vector_len - x if center_index + vector_len - x < kernel_len else kernel_len - 1\n for y in range(begin_index, end_index):\n new_value += vector[x + y - center_index] * kernel[y]\n new_vector[x] = new_value\n\n return new_vector\n","sub_path":"practice/shred_recovery/modules/Array.py","file_name":"Array.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281125437","text":"from nltk import word_tokenize, pos_tag\nfrom google_images_download import google_images_download\nimport json\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--images_folder\", type = str, default = \"data/visualsem_images\", help = \"Where the visualsem images are stored. \")\n parser.add_argument(\"--initial_node_file\", type = str, default = \"data/nodes_1000.json\", help = \"Where the initial nodes json is stored. \")\n args = parser.parse_args()\n\n with open(args.initial_node_file, 'r') as f:\n nodes = json.loads(f.read())\n\n # Create sufficient descriptions for nodes without images in the core\n search_names = []\n zero_ims = [n for n in nodes if len(nodes[n][\"images\"]) == 0] # LINE TO CHANGE IF MORE NODES NEED IMAGES\n for n in zero_ims:\n sent = nodes[n][\"description\"][0]\n e = pos_tag(word_tokenize(sent))\n extra = [w[0] for w in e if \"NN\" in w[1] and (not w[0][0].isupper())]\n new_sent = nodes[n][\"senses\"][0].replace(\"_\", \" \").split() + extra\n search_names.append((\" \".join(new_sent[:3]), n))\n\n print(search_names)\n\n response = google_images_download.googleimagesdownload()\n paths = []\n for (name, fold) in search_names:\n print(\"done\")\n absolute_image_paths = response.download({\"keywords\":name,\"limit\":10,\"print_urls\":False,\n \"usage_rights\":\"labeled-for-reuse-with-modifications\",\n \"output_directory\":args.images_folder, \"image_directory\":fold,\n }) #\"safe_search\":True\n paths.append(absolute_image_paths)\n","sub_path":"dataset_creation/google_download.py","file_name":"google_download.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37136833","text":"import cv2\nimport numpy as np\nimport face_recognition\n\nimgElon = face_recognition.load_image_file(\"ImageBasics/Elon-Musk.jpg\")\nimgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)\n\nimgTest = face_recognition.load_image_file(\"ImageBasics/Elon Mask Test.jpg\")\nimgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)\n\nfaceLoc = face_recognition.face_locations(imgElon)[0]\nencodeElon = face_recognition.face_encodings(imgElon)[0]\ncv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]), (255,0,255), 2)\n#print(faceLoc)\n\nfaceLocTest = face_recognition.face_locations(imgTest)[0]\nencodeElonTest = face_recognition.face_encodings(imgTest)[0]\ncv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]), (faceLocTest[1], faceLocTest[2]), (255,0,255), 2)\n\n## Compare Above image\nresults = face_recognition.compare_faces([encodeElon], encodeElonTest)\nfaceDis = face_recognition.face_distance([encodeElon], encodeElonTest)\nprint(results, faceDis)\n\ncv2.putText(imgTest, f'{results} {round(faceDis[0],2)}', (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)\ncv2.imshow('Elon Musk', imgElon)\ncv2.imshow('Elon Test', imgTest)\ncv2.waitKey(0)","sub_path":"faceRecognitionImage.py","file_name":"faceRecognitionImage.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650764032","text":"import sys\r\nimport matlab.engine as matlab\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas \r\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\r\nimport numpy as np\r\nfrom scipy.integrate import simps\r\nfrom scipy.signal import find_peaks\r\nfrom PyQt5.uic import loadUi\r\nfrom PyQt5.QtWidgets import QDialog,QApplication,QVBoxLayout\r\nfrom PyQt5 import QtCore,QtGui\r\n\r\n\r\n\r\nclass MatlabBaglanti():\r\n \r\n def __init__(self,zaman,aci):\r\n self.motor = matlab.start_matlab()\r\n self.zaman=zaman\r\n self.aci=aci\r\n \r\n\r\n \r\n def ModelYukle(self):\r\n self.motor.eval(\"model = 'ModelModelModel'\",nargout=0)\r\n self.motor.eval(\"load_system(model)\",nargout=0)\r\n \r\n def Simule(self):\r\n \r\n #self.metin='Simülasyon Başlıyor...'\r\n self.motor.eval(\"set_param(model,'StopTime','\"+str(self.zaman)+\"')\",nargout=0)\r\n self.motor.set_param('ModelModelModel/Glider1/aci','value',str(self.aci),nargout=0)\r\n self.motor.eval(\"sim(model)\",nargout=0)\r\n #self.metin='Simülasyon Tamamlandı'\r\n self.zaman = np.array(self.motor.workspace['tout'])\r\n self.hiz = np.matrix(self.motor.workspace['Velocity'])\r\n self.results= np.matrix(self.motor.workspace['results'])\r\n self.soc = self.results[:,0]\r\n self.guc = self.results[:,4]\r\n self.tork = self.results[:,13]\r\n \r\n def SonucGoster(self):\r\n \r\n fig = plt.figure(figsize=(30,20))\r\n plt.suptitle('Zamana Göre Simülasyon Çıktıları',fontsize=30)\r\n\r\n\r\n plt.subplot(2, 2, 1)\r\n plt.plot(self.zaman,self.hiz,color='green')\r\n plt.ylabel('Hız (Mil/Saat)', fontsize=20)\r\n plt.xlabel('Zaman (s)', fontsize=20)\r\n\r\n plt.subplot(2, 2, 2)\r\n plt.plot(self.zaman,self.soc,color='green')\r\n plt.ylabel('State of Charge (%)', fontsize=20)\r\n plt.xlabel('Zaman (s)', fontsize=20)\r\n\r\n plt.subplot(2, 2, 3)\r\n plt.scatter(np.array(self.hiz),np.array(self.guc),marker='o',color='gray')\r\n plt.ylabel('Batarya Gücü', fontsize=20)\r\n plt.xlabel('Hız', fontsize=20)\r\n\r\n plt.subplot(2, 2, 4)\r\n plt.scatter(np.array(self.hiz),np.array(self.tork),marker='v',color='gray')\r\n plt.ylabel('Tork', fontsize=20)\r\n plt.xlabel('Hız', fontsize=20)\r\n plt.show()\r\n\r\n\r\n \r\n \r\n \r\n def PeakGoster(self):\r\n \r\n metin=\"Hızın Peak Yaptığı Saniyeler :\\n\"\r\n x = np.array(self.zaman).reshape(-1,)\r\n y = np.array(self.hiz).reshape(-1,)\r\n self.peaks, _ = find_peaks(y)\r\n self.peaknokta = [x /10 for x in self.peaks]\r\n for i in range (0,len(self.peaknokta)):\r\n metin+=str(i+1)+\". --> \"+str(self.peaknokta[i])+\"\\n\"\r\n plt.plot(y)\r\n plt.plot(self.peaks, y[self.peaks], \"x\")\r\n plt.text(50, 30, metin, fontsize=10)\r\n plt.grid(True)\r\n plt.show()\r\n \r\n \r\n\r\n\r\nclass Pencere(QDialog):\r\n \r\n def __init__(self):\r\n super(Pencere,self).__init__()\r\n loadUi(\"C:\\\\Users\\\\Public\\\\interface.ui\",self)\r\n\r\n self.pushButton.clicked.connect(self.Baglan)\r\n self.pushButton_2.clicked.connect(self.SimulasyonCikti)\r\n self.pushButton_3.clicked.connect(self.PeakCikti)\r\n\r\n def Baglan(self):\r\n self.simulasyonzaman=int(self.sure.value())\r\n self.egimaci=int(self.dial.value())\r\n self.progressBar.setValue(25)\r\n self.label_2.setText('MATLAB Başlatılıyor...')\r\n self.motor=MatlabBaglanti(self.simulasyonzaman,self.egimaci)\r\n self.label_2.setText('MATLAB Başlatıldı')\r\n self.progressBar.setValue(50)\r\n self.label_3.setText('Model Yükleniyor...')\r\n self.motor.ModelYukle()\r\n self.label_3.setText('Model Yüklendi')\r\n self.progressBar.setValue(75)\r\n self.label_4.setText('Simülasyon Başlıyor...')\r\n self.motor.Simule()\r\n self.label_4.setText('Simülasyon Tamamlandı')\r\n self.progressBar.setValue(100)\r\n self.MesafeGoster()\r\n \r\n def MesafeGoster(self):\r\n x=list(np.array(self.motor.hiz).reshape(-1,))\r\n y=list(np.array(self.motor.zaman).reshape(-1,))\r\n y1 = [x * saniyetosaat for x in y]\r\n alan=simps(x,y1)\r\n \r\n fig, ax = plt.subplots()\r\n ax.set_title('Alınan mesafe :{} mil / {} kilometre'.format(round(alan,2),round(1.609344*alan,2)))\r\n ax.fill_between(y,x, 0,\r\n color='red', \r\n alpha=0.2);\r\n\r\n self.plotWidget = FigureCanvas(fig)\r\n lay = QVBoxLayout(self.content_plot) \r\n lay.setContentsMargins(0, 0, 0, 0) \r\n lay.addWidget(self.plotWidget)\r\n plt.close(fig)\r\n def PeakCikti(self):\r\n self.motor.PeakGoster()\r\n \r\n \r\n def SimulasyonCikti(self):\r\n self.motor.SonucGoster()\r\n \r\nif __name__ == '__main__':\r\n app=QApplication(sys.argv)\r\n widget=Pencere()\r\n widget.show()\r\n\r\n app.exit(app.exec())\r\n","sub_path":"software/main_script.py","file_name":"main_script.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591807157","text":"inp = input().split()\ntot = 0\n\nfor i in range (0,5):\n inp[i] = int(inp[i])\n tot += inp[i]\n\ninp.sort()\nMin = inp.pop()\n\ninp.reverse()\nMax = inp.pop()\n\nprint(tot - Min, tot - Max)\n\n\n\n","sub_path":"assets/Programming/Online Judges/Hackerrank/Problem Solving/Algorithms/01 Warmup/08 Mini-Max Sum.py","file_name":"08 Mini-Max Sum.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116416742","text":"# file: rfcomm-server.py\n# auth: Albert Huang \n# desc: simple demonstration of a server application that uses RFCOMM sockets\n#\n# $Id: rfcomm-server.py 518 2007-08-10 07:20:07Z albert $\n\nfrom bluetooth import *\nimport threading\nimport random\nimport json\nimport os\nimport sys\nimport stat, os\nimport time\n\nfifo_path = \"/tmp/btcomm.fifo\"\nstate = 0\n\ndef fiforw(message):\n if(message == None):\n fifo = open(fifo_path, 'r')\n string = \"\"\n for line in fifo:\n string += line\n return string\n fifo = open(fifo_path, 'w')\n fifo.write(message)\n fifo.close()\n\n#This is the function that the thread uses to listen on the bluetooth socket.\n#The parameter is a socket.\ndef btlistener(socket):\n\twhile 1:\n\t\ttry:\n\t\t\tdata = socket.recv(1024)\n\t\t\tif data == \"dataReq\\n\":\n\t\t\t\tprint(\"Data start.\")\n\t\t\t\tclient_sock.send(\"data_begin\".encode(\"utf-8\"))\n\n\t\t\t\tdata = gen_input(True).split(\"|\")\n\t\t\t\tfor chunk in data:\n\t\t\t\t\tprint(chunk)\n\t\t\t\t\tclient_sock.send((chunk+\"\\n\").encode(\"utf-8\"))\n\t\t\t\t\tprint(\"After send\")\n\t\t\t\t\tclient_sock.recv(1024)\n\t\t\t\t\tprint(\"Recv\")\n\t\t\t\t\tif state == 2:\n\t\t\t\t\t\tprint(\"Dingle\")\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\tprint(\"slept\")\n\t\t\t\tclient_sock.send(\"data_end\".encode(\"utf-8\"))\n\t\t\t\tprint( \"Data sent!\")\n\t\t\telif data == \"start\\n\":\n\t\t\t\tfiforw(\"start\")\n\t\t\t\tprint(\"Received start command.\")\n\t\t\t\tstate = 2\n\t\t\t\tclient_sock.send(\"S_ACK\".encode(\"utf-8\"))\n\t\t\telse:\n\t\t\t\tprint(\"Recieved: %s\" % data)\n\t\texcept:\n\t\t\treturn\n\n#Setup socket information\n#returns a tuple of sockets\n# client socket, then server socket\ndef get_sockets():\n\tserver_sock=BluetoothSocket( RFCOMM )\n\tserver_sock.bind((\"\",PORT_ANY))\n\tserver_sock.listen(1)\n\tport = server_sock.getsockname()[1]\n\tuuid = \"94f39d29-7d6d-437d-973b-fba39e49d4ee\"\n\t\n\t#Advertise bluetooth service from controller\n\tadvertise_service(\n\t\t\t server_sock, \"cody-Lenovo-B590\",\n\t service_id = uuid,\n\t service_classes = [ uuid, SERIAL_PORT_CLASS ],\n\t profiles = [ SERIAL_PORT_PROFILE ], \n\t )\n \n\t#Wait for connections\n\tprint(\"Waiting for connection on RFCOMM channel %d\" % port)\n\tclient_sock, client_info = server_sock.accept()\n\tprint(\"Accepted connection from \", client_info)\n\treturn (client_sock, server_sock)\n'''\ndef find_device():\n\tdevice = None\n\tn = None\n\tserver_sock=BluetoothSocket( RFCOMM )\n\tserver_sock.bind((\"\",PORT_ANY))\n\tserver_sock.listen(1)\n\tuuid = \"94f39d29-7d6d-437d-973b-fba39e49d4ee\"\n\tadvertise_service(\n\t\t\t server_sock, \"cody-Lenovo-B590\",\n\t service_id = uuid,\n\t service_classes = [ uuid, SERIAL_PORT_CLASS ],\n\t profiles = [ SERIAL_PORT_PROFILE ], \n\t )\n\tnear_devices = discover_devices(lookup_names = True)\n\tfor addr,name in near_devices:\n\t\tprint \"Name: \" + name + \" Address: \" + addr\n\t\tif name == \"BrewAIUI\":\n\t\t\tdevice = addr\n\t\t\tn = name\n\tnear_devices = find_service(address = addr)\n\tfor services in near_devices:\n\t\tprint \" Name: %s\" % (services[\"name\"])\n\t\tprint \" Description: %s\" % (services[\"description\"])\n\t\tprint \" Protocol: %s\" % (services[\"protocol\"])\n\t\tprint \" Provider: %s\" % (services[\"provider\"])\n\t\tprint \" Port: %s\" % (services[\"port\"])\n\t\tprint \" Service id: %s\" % (services[\"service-id\"])\n\t\n\tclient_sock=BluetoothSocket( RFCOMM )\n\tclient_sock.bind((\"\",PORT_ANY))\n\tclient_sock.connect((device, near_devices[0][\"port\"]))\n\treturn client_sock\n'''\n\n#Start the listener thread\ndef start_listener_thread(socket):\n \n\tt = threading.Thread(target=btlistener, args = (client_sock,))\n\tt.daemon = True\n\tt.start()\n\n#please close the sockets after you're done\ndef close_sockets(sock1, sock2):\n\tsock2.close()\n\tsock1.close()\n\tprint(\"Connection closed.\")\n\ndef gen_input(rand):\n\tnames = [\"\\'temp\\':\", \"\\'co2\\':\", \"\\'grav\\':\", \"\\'time\\':\"]\n\t#tmp = \"{\"\n\ttmp = \"\"\n\n\tif rand == True:\n\t\tfor i in range(1, 100):\n\t\t\ttmp += \"{\"\n\t\t\tfor j in range(0, 3):\n\t\t\t\ttmp += names[j] + str(round(random.uniform(0, 30), 5)) +\",\"\n\t\t\ttmp += names[3] + str(i)\n\t\t\ttmp += \"}\"\n\t\t\tif i != 99:\n\t\t\t\ttmp += \"|\"\n\telse:\n\t\tfor i in range(1, 100):\n\t\t\ttmp += \"{\"\n\t\t\tfor j in range(0, 3):\n\t\t\t\ttmp += names[j] + str(j) + \",\"\n\t\t\ttmp += names[3] + str(i)\n\t\t\ttmp += \"}\"\n\t\t\tif i != 99:\n\t\t\t\ttmp += \"|\"\n\t#tmp += \"}\"\n\treturn tmp\n'''\n\tarray = []\n\tfor i in range(1, 100):\n\t\tarray += (round(random.uniform(0, 30), 5), round(random.uniform(0, 30), 5), round(random.uniform(0, 30), 5), i)\n\treturn json.dumps(array.__dict__)\n'''\n\n\n#print gen_input(False)\n\n\n\nclient_sock, server_sock = get_sockets()\nstart_listener_thread(client_sock)\n\n#if not stat.S_ISFIFO(os.stat(fifo_path).st_mode):\n# os.mkfifo(fifo_path)\n#Sender Daemon\ntry:\n while True:\n\t #input loop\n data = raw_input(\">\")\n if len(data) == 0: break\n\t #data += '\\n' #concatenate a delimiter\n# if state == 2:\n# time.sleep(2)\n# data = gen_input(True).split('|')[0]\n# client_sock.send(data.encode('utf-8'))\nexcept IOError:\n pass\n\nclose_sockets(client_sock, server_sock)\n\n'''\n\n\tlistener\n\t\tforever\n\t\t\tdata = recv\n\t\t\tsplit data by semicolon\n\t\t\tif data[0] == Brewing\n\t\t\t\tTell program to stop\n\t\t\telseif data[0] == BrewData\n\t\t\t\tSend batch data[1]\n\t\t\telseif data[0] == SurveyData\n\t\t\t\tGive data to AI\n\t\t\telseif data[0] == PreBrew\n\t\t\t\tif SuggActReq\n\t\t\t\t\tSend AI's suggestion\n\t\t\t\telseif Instruct Send\n\t\t\t\t\tPrepare to receive data\n\t\t\t\t\tGive data to AI and microcontroller\n\t\t\t\t\n\t\t\t\t\n'''\n","sub_path":"src/interface/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331045669","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\n\nfrom supplier.models import Supplier\nfrom store.models import RequestOrder, Stock\nfrom supplier.forms import AddSupplierForm\nfrom store.forms import RequestOrderForm, StockOrderForm\nfrom index.models import Account\n\n\n# Create your views here.\n@login_required\ndef supplier(request):\n context = {\n 'suppliers': Supplier.objects.all()\n }\n return render(request, 'supplier.html', context)\n\n\n@login_required\ndef supplier_add(request):\n form = AddSupplierForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, \"Added Supplier\")\n return redirect('supplier:index')\n context = {\n 'form': form,\n }\n return render(request, 'supplier_add.html', context)\n\n\n@login_required\ndef order(request):\n context = {\n 'orders': RequestOrder.objects.all()\n }\n return render(request, 'order.html', context)\n\n\ndef order_request(request):\n if request.method == \"POST\":\n form = RequestOrderForm(request.POST)\n if form.is_valid():\n user = get_object_or_404(Account, user=request.user)\n instance = form.save(commit=False)\n instance.requested_by = user\n instance.save()\n print(instance)\n messages.success(request, \"Requested {}\".format(form.cleaned_data.get('item')))\n return redirect('supplier:order')\n\n context = {\n 'form': RequestOrderForm\n }\n return render(request, 'order_request.html', context)\n\n\ndef order_stock(request, key):\n order = get_object_or_404(RequestOrder, id=key)\n form = StockOrderForm(request.POST or None)\n user = get_object_or_404(Account, user=request.user)\n if form.is_valid():\n order.price = form.cleaned_data.get('price')\n order.quantity = form.cleaned_data.get('quantity')\n order.stocked = True\n order.save()\n Stock.objects.create(\n item=order.item,\n quantity=order.quantity,\n remaining=order.quantity,\n added_by=user, order=order,\n )\n messages.success(request, \"Received Items --> store\")\n return redirect('supplier:order')\n context = {\n 'order': order,\n 'form': form\n }\n return render(request, 'order_stock.html', context)","sub_path":"muslim/supplier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375988241","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 8 15:16:32 2019\n\n@author: cave\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import PercentFormatter\nfrom scipy.optimize import curve_fit\n#define fit function\ndef maxwell(x, A):\n return x * A * np.exp(-1 * (A*(x)**2 / 2))\ndef sigma(ns, wbs, popts):\n r = [np.sqrt((y - maxwell(x, popts))**2) for y, x in zip(ns, wbs)]\n return np.mean(r)\ndef rmse(ns, wbs, popts):\n a = [(y - maxwell(x, popts))**2 for y,x in zip(ns,wbs)]\n return np.sqrt(np.mean(a))\nxspace = np.linspace(0,25,1000)\n#############################\ndm9gh=pd.read_csv(\"M9GH_Geschwindigkeiten.csv\", sep=\"\\t\")\nsm9gh=dm9gh[\"v\"]\ndm9gg=pd.read_csv(\"M9GG_Geschwindigkeiten.csv\", sep=\"\\t\")\nsm9gg=dm9gg[\"v\"]\n\nnzm9gh = [x for x in sm9gh if x > 0.48]\nnzm9gg = [x for x in sm9gg if x > 0.27]\n\nplt.subplot(1,2,1)\nnm9gh, binsm9gh, patchesm9gh = plt.hist(nzm9gh, bins=100, density=True, histtype=\"bar\")\nwbm9gh = [(binsm9gh[x] + binsm9gh[x+1])/2 for x in range(len(binsm9gh)-1)]\npoptmm9gh, pcovmm9gh = curve_fit(maxwell, wbm9gh, nm9gh, absolute_sigma=True)\nplt.plot(xspace, maxwell(xspace, poptmm9gh), color=\"orange\", label=\"Fit A={}\".format(round(poptmm9gh[0],2)))\nplt.gca().yaxis.set_major_formatter(PercentFormatter(1))\nplt.ylim(0,0.15)\nplt.legend()\nplt.title(\"m\\u2081 = 32g\")\nplt.rcParams[\"legend.loc\"]=\"upper right\"\nplt.xlabel(\"v [Pixel/Frame\")\nplt.ylabel(\"Häufigkeit\")\nplt.xlim(0, 25)\nprint(\"pcovmm9gh = \", pcovmm9gh, \"A\", poptmm9gh)\n\nplt.subplot(1,2,2)\nnm9gg, binsm9gg, patchesm9gg = plt.hist(nzm9gg, bins=binsm9gh, density=True, histtype=\"bar\")\nwbm9gg = [(binsm9gg[x] + binsm9gg[x+1])/2 for x in range(len(binsm9gg)-1)]\npoptmm9gg, pcovmm9gg = curve_fit(maxwell, wbm9gg, nm9gg, absolute_sigma=True)\nplt.plot(xspace, maxwell(xspace, poptmm9gg), color=\"orange\", label=\"Fit A={}\".format(round(poptmm9gg[0],2)))\nplt.gca().yaxis.set_major_formatter(PercentFormatter(1))\nplt.ylim(0,0.15)\nplt.title(\"m\\u2082 = 19g\")\nplt.legend()\nplt.xlabel(\"v [Pixel/Frame]\")\nplt.ylabel(\"Häufigkeit\")\nplt.xlim(0, 25)\nprint(\"pcovmm9gg = \", pcovmm9gg, \"A\", poptmm9gg)\n\nplt.tight_layout()\nplt.rcParams.update({'font.size': 8})\nplt.savefig(\"../grafiken/massenvgl\", dpi = 500)\nplt.show()\n","sub_path":"Maxwellverteilung/massenvgl.py","file_name":"massenvgl.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160685302","text":"user_0 = {\n 'username': 'efermi',\n 'first': 'enrico',\n 'last': 'fermi'\n }\n\nfor key, value in user_0.items():\n print(\"\\nKey \" + key)\n print((\"Value \" + value))\n\n\n# for key in user_0 will have same result.\n# all the keys will be displayed.\nfor key in user_0.keys():\n print(\"\\nKey \" + key)\n\n\n# all the values will be displayed.\nprint(\"\\nall the values will be displayed.\")\nfor value in user_0.values():\n print(value)\n\n\n# duplicate value will be not displayed when set() is being used.\nprint(\"\\nduplicate value will be not displayed when set() is being used.\")\nfavorite_language = {\n 'jen': 'python',\n 'sarah': 'c',\n 'edward': 'ruby',\n 'phil': 'python'\n}\n\nfor value in set(favorite_language.values()):\n print(value)","sub_path":"Python/PCC/Basics/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126805465","text":"\"\"\"\nAPI Input Models not described in reasoner-pydantic\n\"\"\"\n\nfrom pydantic import BaseModel, Field\n\nfrom typing import List\n\n\nclass CurieList(BaseModel):\n \"\"\"Curie list input model\"\"\"\n\n curies: List[str] = Field(\n ..., # Ellipsis means field is required\n title='list of nodes formatted as curies'\n )\n\n class Config:\n schema_extra = {\n \"example\": {\n \"curies\": ['MESH:D014867', 'NCIT:C34373']\n }\n }\n\n\nclass SemanticTypesInput(BaseModel):\n \"\"\"Semantic type input model\"\"\"\n\n semantic_types: List[str] = Field(\n ..., # required field\n title='list of semantic types',\n )\n\n class Config:\n schema_extra = {\n \"example\": {\n \"semantic_types\": ['biolink:ChemicalSubstance', 'biolink:AnatomicalEntity']\n }\n }\n","sub_path":"node_normalizer/model/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436346383","text":"import time\nfrom collections import OrderedDict\nimport pprint\nfrom pull_web_stats_from_ga import initialize_ga_api, get_IDs, get_history_by_month, insert_zeros, push_dataset_to_ckan, field_mapper\n\nimport sys # These two lines are here to prevent a weird problem wherein\nsys.excepthook = sys.__excepthook__ # a) the script would get stuck after\n# failing to retrieve some data from Google Analytics after trying twice\n# and b) would just hang, rather than raising an exception (as designed).\n#http://stackoverflow.com/questions/12865637/why-doesnt-python-exit-from-a-raised-exception-when-executed-with-an-absolute-p\n\nfrom credentials_file import server, monthly_downloads_resource_id\n####### Get monthly downloads data ####################################\n # Create entire dataset-downloads dataset by looking at every month.\n # For every resource ID in the data.json file, run metric_by_month and upsert the results to the monthly-downloads datastore.\n\ndef fetch_and_store_metric(metric,metrics_name,target_resource_id,modify_datastore,event,first_yearmonth,limit=0):\n # target_resource_id is the resource ID of the dataset that the\n # fetched information should be sent to (not to be confused with\n # the resource IDs of the data files about which metrics are being\n # obtained).\n service = initialize_ga_api()\n from credentials_file import profile # The Google Analytics profile ID for data.wprdc.org.\n\n metrics = ', '.join(metrics_name.keys())\n resources, packages, lookup_by_id = get_IDs()\n\n #Write the field names as the first line of the file:\n dmbm_file = 'dataset_'+metric+'_by_month.csv'\n fcsv = open(dmbm_file,'w')\n extra_fields = ['Year+month']\n extra_fields += ['Package','Resource','Publisher','Groups','Package ID','Resource ID']\n # This is the first place to add extra fields.\n #if metric == 'downloads':\n # extra_fields.remove(\"Resource ID\") # This causes an error.\n csv_row = ','.join(extra_fields + metrics_name.values())\n fcsv.write(csv_row+'\\n')\n\n all_rows = []\n if limit > 0:\n resources = resources[:limit]\n for k,r_id in enumerate(resources):\n metric_by_month = get_history_by_month(service, profile, metrics, r_id, event)\n if metric_by_month is None:\n print(\"Strike 1. \")\n metric_by_month = get_history_by_month(service, profile, metrics, r_id, event)\n if metric_by_month is None:\n print(\"Strike 2. \")\n metric_by_month = get_history_by_month(service, profile, metrics, r_id, event)\n if metric_by_month is None:\n print(\"Strike 3. \")\n raise Exception(\"Unable to get metric_by_month data for resource ID {} after trying thrice.\".format(r_id))\n if 'rows' in metric_by_month:\n metric_rows = metric_by_month['rows']\n # Unfortunately, Google Analytics does not provide the resource\n # ID as a queryable parameter for pageviews or other metrics\n # the way it does for downloads (since the resource ID has been\n # inserted as the eventLabel).\n # Therefore, I need to manually insert the resource ID in these\n # cases (and now other parameters).\n\n lbid = lookup_by_id[r_id]\n\n new_metric_rows = []\n for row in metric_rows:\n if metric == 'downloads':\n row.remove(unicode(r_id))\n new_metric_rows.append([row[0], lbid['package name'], lbid['name'], lbid['publisher'], lbid['groups'], lbid['package id'], r_id] + row[1:])\n # This is the second place to add (and order) extra fields.\n metric_rows = new_metric_rows\n\n metric_rows = insert_zeros(metric_rows,\n [lbid['package name'], lbid['name'], lbid['publisher'], lbid['groups'], lbid['package id'], r_id], len(metrics_name), first_yearmonth)\n # This is the third place to add extra fields.\n\n pprint.pprint(metric_rows)\n for row in metric_rows:\n csv_row = ','.join(row)\n fcsv = open(dmbm_file,'a')\n fcsv.write(csv_row+'\\n')\n fcsv.close()\n\n all_rows += metric_rows\n else:\n print(\"No rows found in the response for the dataset with resource ID {}\".format(r_id))\n time.sleep(1.0)\n\n# Create an update to the dataset-downloads dataset by just looking at this month and last month and upserting the results.\n if modify_datastore:\n # The fourth and final place to add extra fields is field_mapper,\n # which is now defined in pull_web_stats_from_ga, but you can also\n # just extend it here with a command like\n # field_mapper['Beeblebrox'] = \"dict\"\n keys = ['Year+month', 'Resource ID']\n push_dataset_to_ckan(all_rows, metrics_name, server, target_resource_id, field_mapper, keys, extra_fields) #This pushes everything in metric_rows\n\n fields = extra_fields + metrics_name.values()\n return all_rows, fields\n\n # [ ] Modify push_dataset_to_ckan to only initialize the datastore when necessary.\n # This script could have two modes:\n # 1) Download all data and overwrite the old stuff.\n # 2) Download only this month and last month and upsert into\n # the existing repository.\n\n\n\n# get_full_history(resource_id=\"40776043-ad00-40f5-9dc8-1fde865ff571\")\n# Pull down daily downloads/unique downloads/pageviews and then monthly\n# stats, and then use the data.json file to filter down to the things\n# we want to track (maybe).\n\n# If we dump the output\n# u'rows': [[u'40776043-ad00-40f5-9dc8-1fde865ff571', u'668', u'260'],\n# [u'7a417847-37bb-4a16-a25e-477f2a71661d', u'493', u'82'],\n# [u'c0fcc09a-7ddc-4f79-a4c1-9542301ef9dd', u'139', u'78'],\n#\n# and prepend some kind of date information, that could essentially be the\n# stuff inserted into the dataset (once the types have been properly taken care of)\n\ndef main():\n service = initialize_ga_api()\n #metrics_name = OrderedDict([(\"ga:totalEvents\",'Downloads'),\n # (\"ga:uniqueEvents\",'Unique downloads')\n # ])\n #metrics = ', '.join(metrics_name.keys())\n #h = get_history_by_month(service,profile,metrics,'40776043-ad00-40f5-9dc8-1fde865ff571',False)\n #print h\n\n #raw_input('Press enter to continue: ')\n\n\n store = True\n metric = 'downloads'\n event = True\n store = False\n\n\n metric = 'pageviews'\n first_yearmonth = '201510'\n if metric == 'downloads':\n target_resource_id = monthly_downloads_resource_id\n first_yearmonth = '201603'\n elif metric == 'pageviews':\n target_resource_id = None\n event = False\n store = False\n if metric == 'downloads':\n metrics_name = OrderedDict([(\"ga:totalEvents\",'Downloads'),\n (\"ga:uniqueEvents\",'Unique downloads')\n ])\n elif metric == 'pageviews':\n metrics_name = OrderedDict([(\"ga:pageviews\",'Pageviews')\n ])\n\n fetch_and_store_metric(metric,metrics_name,target_resource_id,store,event,first_yearmonth)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pull_monthly_metric_from_ga.py","file_name":"pull_monthly_metric_from_ga.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96774193","text":"import pytest\nfrom collections import namedtuple\n\n\n_NamedTuple = namedtuple('_NamedTuple', ['field1'])\n\n_storage_input = {\n 'none': None,\n 'str': 'thisisastring',\n 'int': 42,\n 'dict': {'int': 3, 'float': 0.5},\n 'list': [0, 'foo', 4.2],\n 'namedtuple': _NamedTuple(42),\n}\n\n\nclass BaseTestStorage(object):\n def test_persistence(self):\n for k, v in _storage_input.items():\n self.storage[k] = v\n\n self.reopen_storage()\n\n for k, v in _storage_input.items():\n assert self.storage[k] == v\n\n def test_set_pod(self):\n for k, v in _storage_input.items():\n self.storage[k] = v\n\n for k, v in _storage_input.items():\n assert self.storage[k] == v\n\n def test_get_nonexistent(self):\n with pytest.raises(KeyError):\n self.storage['non-existent']\n\n def test_del(self):\n for k, v in _storage_input.items():\n self.storage[k] = v\n\n for k, v in _storage_input.items():\n del self.storage[k]\n\n for k in _storage_input.keys():\n assert k not in self.storage\n\n def test_del_nonexistent(self):\n with pytest.raises(KeyError):\n del self.storage['non-existent']\n","sub_path":"tests/storage/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64094102","text":"import scrapy\nfrom tldextract import tldextract\n\nfrom tpdb.BaseSceneScraper import BaseSceneScraper\n\n\nclass TeenMegaWorldSpider(BaseSceneScraper):\n name = 'TeenMegaWorld'\n network = 'teenmegaworld'\n\n start_urls = [\n 'https://teenmegaworld.net',\n 'http://rawcouples.com/',\n 'http://anal-angels.com',\n 'http://anal-beauty.com',\n 'http://beauty4k.com',\n 'http://beauty-angels.com',\n 'http://creampie-angels.com',\n 'http://dirty-coach.com',\n 'http://dirty-doctor.com',\n 'http://firstbgg.com',\n 'http://fuckstudies.com',\n 'http://gag-n-gape.com',\n 'http://lollyhardcore.com',\n 'http://noboring.com',\n 'http://nubilegirlshd.com',\n 'http://old-n-young.com',\n 'http://soloteengirls.net',\n 'http://teensexmania.com',\n 'http://trickymasseur.com',\n 'http://x-angels.com',\n 'http://teensexmovs.com',\n ]\n\n selector_map = {\n 'title': \"//div[contains(@class, 'title-line')]//h1/text()\",\n 'description': \"//p[contains(@class, 'description')]/text() | //div[contains(@class, 'text')]/text() | //meta[@property='og:description']/@content\",\n 'date': \"//div[contains(@class, 'date')]//time/text()\",\n 'image': '//deo-video/@poster | //video/@poster | //meta[@property=\"og:image\"]/@content',\n 'performers': \"//div[contains(@class, 'site')]//a[contains(@href, 'models')]/text()\",\n 'tags': \"//ul[contains(@class, 'tag-list')]//a/text()\",\n 'external_id': 'trailers\\\\/(.+)\\\\.html',\n 'trailer': '//source/@src',\n 'pagination': '/categories/movies_%s_d.html'\n }\n\n def get_scenes(self, response):\n scenes = response.xpath(\n \"//a[contains(@class, 'title')]/@href\").getall()\n for scene in scenes:\n yield scrapy.Request(url=scene, callback=self.parse_scene)\n\n def get_site(self, response):\n site = response.xpath(\n '//div[contains(@class, \"site\")]//a[starts-with(@href, \"/search\")]/text()').extract_first()\n return tldextract.extract(site).domain\n","sub_path":"scenes/networkTeenMegaWorld.py","file_name":"networkTeenMegaWorld.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449985647","text":"import math\nimport random\nimport numpy as np\nfrom gngauss import gngauss\n\n\ndef cm_sm52(snr_in_dB):\n\n N = 10000\n Eb = 1\n d = 1\n snr = 10 ** (snr_in_dB / 10) # 每一比特的信噪比\n sgma = np.sqrt(Eb / (2 * snr)) # 信噪比方差\n phi = 0\n numoferr = 0\n dsource = np.zeros(10000)\n\n for i in range(N):\n if (random.random() < 0.5):\n dsource[i] = 0\n else:\n dsource[i] = 1\n\n for i in range(N):\n\n if (dsource[i] == 0):\n r0c = np.sqrt(Eb) * np.cos(phi) + gngauss(0, sgma)\n r0s = np.sqrt(Eb) * np.sin(phi) + gngauss(0, sgma)\n r1c = gngauss(0, sgma)\n r1s = gngauss(0, sgma)\n\n else:\n r0c = gngauss(0, sgma)\n r0s = gngauss(0, sgma)\n r1c = np.sqrt(Eb) * np.cos(phi) + gngauss(0, sgma)\n r1s = np.sqrt(Eb) * np.sin(phi) + gngauss(0, sgma)\n\n # square law detector outputs 平方检测输出法?\n r0 = r0c ** 2 + r0s ** 2\n r1 = r1c ** 2 + r1s ** 2\n\n if r0 > r1:\n decis = 0\n else:\n decis = 1\n\n # 如果检测错误,记录错误\n if (decis != dsource[i]):\n numoferr = numoferr + 1\n\n return numoferr / N\n","sub_path":"3/cm_sm52.py","file_name":"cm_sm52.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556618481","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\ndef forwards_func(apps, schema_editor):\n Name = apps.get_model(\"django_namecoin\", \"Name\")\n for name in Name.objects.all():\n if (not name.blockchain):\n name.blockchain = name.operations.all()[:1][0].block.blockchain\n name.save()\n\n\ndef reverse_func(apps, schema_editor):\n Name = apps.get_model(\"django_namecoin\", \"Name\")\n for name in Name.objects.all():\n name.blockchain = None\n name.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_namecoin', '0006_name_blockchain'),\n ]\n\n operations = [\n migrations.RunPython(forwards_func, reverse_func),\n ]\n","sub_path":"django_namecoin/migrations/0007_name_blockchain_correct.py","file_name":"0007_name_blockchain_correct.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205740705","text":"import difflib\nimport functools\nimport operator\nimport sys\nfrom functools import reduce\nfrom itertools import islice\n\nimport numpy as np\n\nfrom .misc import indent\n\n__all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values',\n 'where_not_allclose']\n\n# Smaller default shift-width for indent\nfixed_width_indent = functools.partial(indent, width=2)\n\n\ndef diff_values(a, b, rtol=0.0, atol=0.0):\n \"\"\"\n Diff two scalar values. If both values are floats, they are compared to\n within the given absolute and relative tolerance.\n\n Parameters\n ----------\n a, b : int, float, str\n Scalar values to compare.\n\n rtol, atol : float\n Relative and absolute tolerances as accepted by\n :func:`numpy.allclose`.\n\n Returns\n -------\n is_different : bool\n `True` if they are different, else `False`.\n\n \"\"\"\n if isinstance(a, float) and isinstance(b, float):\n if np.isnan(a) and np.isnan(b):\n return False\n return not np.allclose(a, b, rtol=rtol, atol=atol)\n else:\n return a != b\n\n\ndef report_diff_values(a, b, fileobj=sys.stdout, indent_width=0):\n \"\"\"\n Write a diff report between two values to the specified file-like object.\n\n Parameters\n ----------\n a, b\n Values to compare. Anything that can be turned into strings\n and compared using :py:mod:`difflib` should work.\n\n fileobj : obj\n File-like object to write to.\n The default is ``sys.stdout``, which writes to terminal.\n\n indent_width : int\n Character column(s) to indent.\n\n Returns\n -------\n identical : bool\n `True` if no diff, else `False`.\n\n \"\"\"\n typea = type(a)\n typeb = type(b)\n\n if (isinstance(a, str) and not isinstance(b, str)):\n a = repr(a).lstrip('u')\n elif (isinstance(b, str) and not isinstance(a, str)):\n b = repr(b).lstrip('u')\n\n if isinstance(a, (int, float, complex, np.number)):\n a = repr(a)\n\n if isinstance(b, (int, float, complex, np.number)):\n b = repr(b)\n\n if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):\n diff_indices = np.where(a != b)\n # NOTE: Two 5x5 arrays that are completely different would\n # report num_diffs of 625 (25 * 25).\n num_diffs = reduce(operator.mul, map(len, diff_indices), 1)\n for idx in islice(zip(*diff_indices), 3):\n fileobj.write(\n fixed_width_indent(' at {!r}:\\n'.format(list(idx)),\n indent_width))\n report_diff_values(a[idx], b[idx], fileobj=fileobj,\n indent_width=indent_width + 1)\n\n if num_diffs > 3:\n fileobj.write(fixed_width_indent(\n ' ...and at {} more indices.\\n'.format(num_diffs - 3),\n indent_width))\n return num_diffs == 0\n\n padding = max(len(typea.__name__), len(typeb.__name__)) + 3\n identical = True\n\n for line in difflib.ndiff(str(a).splitlines(), str(b).splitlines()):\n if line[0] == '-':\n identical = False\n line = 'a>' + line[1:]\n if typea != typeb:\n typename = '(' + typea.__name__ + ') '\n line = typename.rjust(padding) + line\n\n elif line[0] == '+':\n identical = False\n line = 'b>' + line[1:]\n if typea != typeb:\n typename = '(' + typeb.__name__ + ') '\n line = typename.rjust(padding) + line\n else:\n line = ' ' + line\n if typea != typeb:\n line = ' ' * padding + line\n fileobj.write(fixed_width_indent(\n ' {}\\n'.format(line.rstrip('\\n')), indent_width))\n\n return identical\n\n\ndef where_not_allclose(a, b, rtol=1e-5, atol=1e-8):\n \"\"\"\n A version of :func:`numpy.allclose` that returns the indices\n where the two arrays differ, instead of just a boolean value.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n\n rtol, atol : float\n Relative and absolute tolerances as accepted by\n :func:`numpy.allclose`.\n\n Returns\n -------\n idx : tuple of arrays\n Indices where the two arrays differ.\n\n \"\"\"\n # Create fixed mask arrays to handle INF and NaN; currently INF and NaN\n # are handled as equivalent\n if not np.all(np.isfinite(a)):\n a = np.ma.fix_invalid(a).data\n if not np.all(np.isfinite(b)):\n b = np.ma.fix_invalid(b).data\n\n if atol == 0.0 and rtol == 0.0:\n # Use a faster comparison for the most simple (and common) case\n return np.where(a != b)\n return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))\n","sub_path":"astropy/utils/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263514350","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.array([[20,14,30,56,47,95,10,36,20,11,66,44]])\nx_trans = x.T\nb = np.ones((x_trans.shape[0],1))\n\nX = np.concatenate((x_trans,b),axis = 1) # N x 2\nY_Label = np.array([[14,8,20,38,34,80,5,23,14,6,43,32]]).T\nW = np.random.randn(x_trans.shape[0],2)\n\n#Linear: y_pred = w.T * x\n#Loss : L = 1/2(Y_label - Y_pred)^2\n\nderived_W_trans = np.linalg.pinv(X).dot(np.linalg.pinv(X.T)).dot(np.dot(X.T,Y_Label))\n\nw_0 = derived_W_trans[0][0]\nw_1 = derived_W_trans[1][0]\n\nx0 = np.linspace(10,100,2)\ny0 = w_1 + w_0*x0\n\nplt.plot(x,Y_Label.T,'ro')\nplt.plot(x0,y0)\nplt.axis([0,100,1,90])\nplt.xlabel('Money Put In')\nplt.ylabel('Percent of winning')\nplt.show()\n","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520427097","text":"from core.agility.common.AgilityModelBase import AgilityModelBase\n\n\nclass ResourceWeightMetaBase(AgilityModelBase):\n '''\n classdocs\n '''\n def __init__(self, resourceweightinfo=[], displayname='', id=None, jaxbtype='', name=''):\n AgilityModelBase.__init__(self)\n self._attrSpecs = getattr(self, '_attrSpecs', {})\n self._attrSpecs.update({'resourceWeightInfo': {'maxOccurs': 'unbounded', 'type': 'ResourceWeightInfo', 'name': 'resourceweightinfo', 'minOccurs': '0', 'native': False}, 'displayName': {'type': 'string', 'name': 'displayname', 'native': True}, 'id': {'type': 'int', 'name': 'id', 'native': True}, 'name': {'type': 'string', 'name': 'name', 'native': True}, 'jaxbType': {'type': 'string', 'name': 'jaxbtype', 'native': True}})\n self.resourceweightinfo = resourceweightinfo\n self.displayname = displayname\n self.id = id\n self.jaxbtype = jaxbtype\n self.name = name \n","sub_path":"core/agility/v3_0/agilitymodel/base/ResourceWeightMeta.py","file_name":"ResourceWeightMeta.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503205221","text":"from __future__ import absolute_import\r\nfrom __future__ import print_function\r\n\r\nfrom keras.models import Model\r\nfrom keras import regularizers\r\nfrom keras.layers import Input, Dense, GRU, Masking, Dropout\r\nfrom keras.layers.wrappers import Bidirectional, TimeDistributed\r\nfrom mimic3models.keras_utils import LastTimestep\r\nfrom mimic3models.keras_utils import ExtendMask\r\n\r\n\r\nclass Network(Model):\r\n\r\n def __init__(self, dim, batch_norm, dropout, rec_dropout, task,\r\n target_repl=False, deep_supervision=False, num_classes=1,\r\n depth=1, input_dim=69, **kwargs):\r\n\r\n print(\"==> not used params in network class:\", kwargs.keys())\r\n\r\n self.dim = dim\r\n self.batch_norm = batch_norm\r\n self.dropout = dropout\r\n self.rec_dropout = rec_dropout\r\n self.depth = depth\r\n\r\n if task == 'ihm':\r\n final_activation = 'sigmoid'\r\n else:\r\n raise ValueError(\"Wrong value for task\")\r\n\r\n # Input layers and masking\r\n X = Input(shape=(None, input_dim), name='X')\r\n inputs = [X]\r\n mX = Masking()(X)\r\n\r\n \r\n # Output module of the network\r\n G_1 = GRU(units=dim,\r\n activation='tanh',\r\n kernel_regularizer = regularizers.l2(0.001),\r\n return_sequences=True,\r\n dropout=dropout,\r\n recurrent_dropout=rec_dropout)(mX)\r\n #D = Dropout(dropout)(G_1)\r\n G = GRU(units=dim,\r\n activation='tanh',\r\n kernel_regularizer = regularizers.l2(0.001),\r\n return_sequences=False,\r\n dropout=dropout,\r\n recurrent_dropout=rec_dropout)(G_1)\r\n D = Dropout(dropout)(G)\r\n y = Dense(num_classes, activation=final_activation)(D)\r\n outputs = [y]\r\n\r\n super(Network, self).__init__(inputs=inputs, outputs=outputs)\r\n\r\n def say_name(self):\r\n return \"{}.n{}{}{}{}.dep{}\".format('k_single_2gru',\r\n self.dim,\r\n \".bn\" if self.batch_norm else \"\",\r\n \".d{}\".format(self.dropout) if self.dropout > 0 else \"\",\r\n \".rd{}\".format(self.rec_dropout) if self.rec_dropout > 0 else \"\",\r\n self.depth)\r\n","sub_path":"mimic3models/keras_models/single_2gru.py","file_name":"single_2gru.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213832928","text":"import unittest\nimport json\nfrom server import app\nfrom flask import Flask, render_template, redirect, request, flash, session, json, jsonify\nfrom model import User, Favorite, Regulation, connect_to_db, db\n\nclass loggedOutTests(unittest.TestCase):\n \"\"\"Tests that can be run when user is not logged in.\"\"\"\n\n def setUp(self):\n \"\"\"Stuff to do before every test.\"\"\"\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n connect_to_db(app)\n\n\n def tearDown(self):\n \"\"\"Stuff to do after each test.\"\"\"\n\n\n def test_homepage(self):\n \"\"\"Test for logo text on homepage.\"\"\"\n\n result = self.client.get('/')\n self.assertEqual(result.status_code, 200)\n self.assertIn('Park Place', result.data)\n\n\n def test_searchpage(self):\n \"\"\"Test for form on search page.\"\"\"\n\n result = self.client.get('/search')\n self.assertEqual(result.status_code, 200)\n self.assertIn('Desired Destination', result.data)\n\n\n # def test_logout(self):\n # \"\"\"Test logout route for flash message.\"\"\"\n\n # result = self.client.get('/logout', follow_redirects=True)\n # self.assertEqual(result.status_code, 200)\n # self.assertIn('logged out', result.data)\n\n\nclass loggedInTests(unittest.TestCase):\n \"\"\"Tests that can be run when user is logged in.\"\"\"\n\n def setUp(self):\n \"\"\"Stuff to do before every test.\"\"\"\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'free'\n connect_to_db(app)\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1\n\n\n def tearDown(self):\n \"\"\"Stuff to do after each test.\"\"\"\n\n # def test_favpage(self):\n # \"\"\"Test that user's favorites show up on page.\"\"\"\n\n # # default user values for testing route\n # user_id = 1\n # user_email = 'hello@hello.com'\n\n # result = self.client.get('/favorites/{}'.format(int(user_id)))\n # self.assertEqual(result.status_code, 200)\n # self.assertIn('{}'.format(user_email), result.data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n# TO DO: Create a test db. Write tests on routes that have \"side effects\"","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441359768","text":"# https://leetcode-cn.com/problems/remove-nth-node-from-end-of-list/\r\n\r\n\r\n# Definition for singly-linked list.\r\nclass ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n def __repr__(self):\r\n return str(self.val)\r\n\r\n\r\nclass Solution:\r\n def removeNthFromEnd(self, head, n) -> ListNode:\r\n dummy = ListNode(-1) # 哑结点,在头部,以应对极端情况\r\n dummy.next = head\r\n tmp = dummy\r\n lenth = 0\r\n\r\n # 计算ListNode的长度\r\n while tmp:\r\n tmp = tmp.next\r\n lenth += 1\r\n # print('lenth', lenth)\r\n\r\n # 修改ListNode,边界问题需要注意\r\n # 如果count = lenth - n,则一开始进while就会向前推一步,整好是要删除的节点的位置\r\n tmp = dummy\r\n count = lenth - n - 1\r\n print('count', count)\r\n while count:\r\n tmp = tmp.next\r\n count -= 1\r\n print(tmp, count) # 最后是3 0\r\n\r\n tmp.next = tmp.next.next\r\n return dummy.next\r\n\r\n\r\nhead1_list = [1, 2, 3, 4, 5]\r\nhead1 = tmp = ListNode(head1_list[0]) # 设定tmp是为了不改变head1\r\nfor i in range(len(head1_list) - 1):\r\n tmp.next = ListNode(head1_list[i + 1])\r\n tmp = tmp.next\r\n# while head1:\r\n# print(head1.val)\r\n# head1 = head1.next\r\n\r\n# Solution().removeNthFromEnd(head1, 2)\r\n\r\nres = Solution().removeNthFromEnd(head1, 2)\r\nwhile res:\r\n print(res.val)\r\n res = res.next\r\n","sub_path":"019-2-Remove Nth Node From End of List-两次遍历.py","file_name":"019-2-Remove Nth Node From End of List-两次遍历.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"295664970","text":"\"\"\"\"[Filepaths]\nproject_directory: /home/joli/PycharmProjects/SPECdata\ndatabase_directory: /home/joli/PycharmProjects/SPECdata/data\ndatabase_file: /home/joli/PycharmProjects/SPECdata/data/spectrum.db\n\n\n[OS]\nos: Linux\n\nimport ConfigParser\n\nConfig = ConfigParser.ConfigParser()\n\n\n#Config.read('./config')\"\"\"\nclass Config ():\n\n def __init__(self, project_directory, database_directory=None, database_filepath=None):\n self.PROJECT_DIR = project_directory\n self.DATABASE_DIR = database_directory\n self.DATABASE_FILEPATH = database_filepath\n self.CONN = None\n\n def set_as_default(self):\n \"\"\"\n Set default Configuration Values.\n Dependent on Project directory\n :return:\n \"\"\"\n import os\n self.PROJECT_DIR = os.curdir()\n self.DATABASE_DIR = os.path.join(self.PROJECT_DIR, \"data\")\n self.DATABASE_FILEPATH = os.path.join(self.DATABASE_DIR, 'spectrum.db')\n self.set_connection()\n\n def set_connection(self):\n \"\"\"\n Set SQLite Connection to the database_filepath\n :return:\n \"\"\"\n import sqlite3\n self.CONN = sqlite3.connect(self.database_filepath)\n\n","sub_path":"global_cfg.py","file_name":"global_cfg.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594160592","text":"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport hashlib\n\n\ndef generate(filename, info):\n m = hashlib.md5()\n m.update(json.dumps(info).encode('utf-8'))\n defines = []\n enums = []\n for cn, cat in info.items():\n for fn, func in cat.items():\n if len(func['function_ids']) > 1:\n for n in range(len(func['function_ids'])-1):\n arg_fmt = list(func['function_ids'].items())[n][0]\n fid = list(func['function_ids'].items())[n][1]\n enums.append(\n ' NN_FUNCTION_{}_{} = {}, ///< Recent version of {} has arg [{}]'.format(func['snake_name'].upper(), n, fid, fn, arg_fmt))\n\n enums.append(\n ' NN_FUNCTION_{} = {}, ///< {}'.format(func['snake_name'].upper(), func['id'], fn))\n\n defines.append('/// @brief {} function.'.format(fn))\n defines.append('/// @{')\n defines.append('typedef struct {')\n defines.append(\n ' nn_function_type_t type : 16; ///< Common: type of function.')\n defines.append(\n ' nn_function_implement_t impl : 16; ///< Common: function implementation.')\n defines.append(\n ' nn_list_t inputs; ///< Common: List of input variables.')\n defines.append(\n ' nn_list_t outputs; ///< Common: List of output variables.')\n if 'arguments' in func and len(func['arguments']) > 0:\n defines.append(' // End of common part.')\n for an, arg in func['arguments'].items():\n if arg['type'] == 'bool':\n defines.append(\n ' uint8_t {}; ///< Original type is [{}]'.format(an, arg['type']))\n elif arg['type'] == 'double' or arg['type'] == 'float':\n defines.append(\n ' float {}; ///< Original type is [{}]'.format(an, arg['type']))\n elif arg['type'] == 'int64':\n defines.append(\n ' int32_t {}; ///< Original type is [{}]'.format(an, arg['type']))\n elif arg['type'] == 'repeated int64' or arg['type'] == 'Shape':\n defines.append(\n ' nn_list_t {}; ///< Original type is [{}]'.format(an, arg['type']))\n elif arg['type'] == 'string':\n defines.append(\n ' uint32_t {}; ///< Original type is [{}]'.format(an, arg['type']))\n defines.append('}} nn_function_{}_t;'.format(\n func['snake_name']))\n defines.append('')\n defines.append('/// @}')\n defines.append('')\n\n versions = {}\n with open(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'VERSION.txt')) as f:\n for l in f.readlines():\n ls = l.rstrip().split(': ')\n if len(ls) == 2:\n versions[ls[0]] = ls[1]\n\n from mako.template import Template\n from mako import exceptions\n try:\n tmpl = Template(filename=filename)\n output = tmpl.render(NNABLA_VERSION=versions['NNABLA_VERSION'],\n C_RUNTIME_VERSION=versions['C_RUNTIME_VERSION'],\n NNB_MINIMUM_VERSION=versions['NNB_MINIMUM_VERSION'],\n NNB_VERSION=versions['NNB_VERSION'],\n API_LEVEL=versions['API_LEVEL'],\n FUNCTION_ENUMS='\\n'.join(enums),\n FUNCTION_DEFINES='\\n'.join(defines))\n return output\n except:\n print(exceptions.text_error_template().render())\n return None\n","sub_path":"build-tools/code-generator/generators/generator_include_nnablart_network_h.py","file_name":"generator_include_nnablart_network_h.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470167916","text":"import numpy as np\n\ndef load_dataset(filename):\n dataset = []\n file = open(filename)\n for line in file.readlines():\n current_line = line.strip().split('\\t')\n float_line = [float(example) for example in current_line]\n dataset.append(float_line)\n return np.array(dataset)","sub_path":"decision_tree/model/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626737526","text":"#!/usr/bin/env python\n\n\"\"\"\nImplementation of gpustat\n\n@author Jongwook Choi\n@url https://github.com/wookayin/gpustat\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport locale\nimport os.path\nimport platform\nimport sys\nfrom datetime import datetime\n\nimport six\nfrom six.moves import cStringIO as StringIO\n\nimport psutil\nimport pynvml as N\nfrom blessings import Terminal\n\nNOT_SUPPORTED = 'Not Supported'\n\n\nclass GPUStat(object):\n\n def __init__(self, entry):\n if not isinstance(entry, dict):\n raise TypeError('entry should be a dict, {} given'.format(type(entry)))\n self.entry = entry\n\n # Handle '[Not Supported] for old GPU cards (#6)\n for k in self.entry.keys():\n if isinstance(self.entry[k], six.string_types) and NOT_SUPPORTED in self.entry[k]:\n self.entry[k] = None\n\n def __repr__(self):\n return self.print_to(StringIO()).getvalue()\n\n def keys(self):\n return self.entry.keys()\n\n def __getitem__(self, key):\n return self.entry[key]\n\n @property\n def index(self):\n \"\"\"\n Returns the index of GPU (as in nvidia-smi).\n \"\"\"\n return self.entry['index']\n\n @property\n def uuid(self):\n \"\"\"\n Returns the uuid returned by nvidia-smi,\n e.g. GPU-12345678-abcd-abcd-uuid-123456abcdef\n \"\"\"\n return self.entry['uuid']\n\n @property\n def name(self):\n \"\"\"\n Returns the name of GPU card (e.g. Geforce Titan X)\n \"\"\"\n return self.entry['name']\n\n @property\n def memory_total(self):\n \"\"\"\n Returns the total memory (in MB) as an integer.\n \"\"\"\n return int(self.entry['memory.total'])\n\n @property\n def memory_used(self):\n \"\"\"\n Returns the occupied memory (in MB) as an integer.\n \"\"\"\n return int(self.entry['memory.used'])\n\n @property\n def memory_free(self):\n \"\"\"\n Returns the free (available) memory (in MB) as an integer.\n \"\"\"\n v = self.memory_total - self.memory_used\n return max(v, 0)\n\n @property\n def memory_available(self):\n \"\"\"\n Returns the available memory (in MB) as an integer. Alias of memory_free.\n \"\"\"\n return self.memory_free\n\n @property\n def temperature(self):\n \"\"\"\n Returns the temperature of GPU as an integer,\n or None if the information is not available.\n \"\"\"\n v = self.entry['temperature.gpu']\n return int(v) if v is not None else None\n\n @property\n def utilization(self):\n \"\"\"\n Returns the GPU utilization (in percentile),\n or None if the information is not available.\n \"\"\"\n v = self.entry['utilization.gpu']\n return int(v) if v is not None else None\n\n @property\n def power_draw(self):\n \"\"\"\n Returns the GPU power usage in Watts,\n or None if the information is not available.\n \"\"\"\n v = self.entry['power.draw']\n return int(v) if v is not None else None\n\n @property\n def power_limit(self):\n \"\"\"\n Returns the (enforced) GPU power limit in Watts,\n or None if the information is not available.\n \"\"\"\n v = self.entry['enforced.power.limit']\n return int(v) if v is not None else None\n\n @property\n def processes(self):\n \"\"\"\n Get the list of running processes on the GPU.\n \"\"\"\n return list(self.entry['processes'])\n\n\n def print_to(self, fp,\n with_colors=True, # deprecated arg\n show_cmd=False,\n show_user=False,\n show_pid=False,\n show_power=None,\n gpuname_width=16,\n term=Terminal(),\n ):\n # color settings\n colors = {}\n\n def _conditional(cond_fn, true_value, false_value,\n error_value=term.bold_black):\n try:\n if cond_fn(): return true_value\n else: return false_value\n except:\n return error_value\n\n colors['C0'] = term.normal\n colors['C1'] = term.cyan\n colors['CName'] = term.blue\n colors['CTemp'] = _conditional(lambda: int(self.entry['temperature.gpu']) < 50,\n term.red, term.bold_red)\n colors['CMemU'] = term.bold_yellow\n colors['CMemT'] = term.yellow\n colors['CMemP'] = term.yellow\n colors['CUser'] = term.bold_black # gray\n colors['CUtil'] = _conditional(lambda: int(self.entry['utilization.gpu']) < 30,\n term.green, term.bold_green)\n colors['CPowU'] = _conditional(lambda: float(self.entry['power.draw']) / self.entry['enforced.power.limit'] < 0.4,\n term.magenta, term.bold_magenta)\n colors['CPowL'] = term.magenta\n\n if not with_colors:\n for k in list(colors.keys()):\n colors[k] = ''\n\n def _repr(v, none_value='??'):\n if v is None: return none_value\n else: return str(v)\n\n # build one-line display information\n # we want power use optional, but if deserves being grouped with temperature and utilization\n reps = \"%(C1)s[{entry[index]}]%(C0)s %(CName)s{entry[name]:{gpuname_width}}%(C0)s |\" \\\n \"%(CTemp)s{entry[temperature.gpu]:>3}'C%(C0)s, %(CUtil)s{entry[utilization.gpu]:>3} %%%(C0)s\"\n\n if show_power:\n reps += \", %(CPowU)s{entry[power.draw]:>3}%(C0)s \"\n if show_power is True or 'limit' in show_power:\n reps += \"/ %(CPowL)s{entry[enforced.power.limit]:>3}%(C0)s \"\n reps += \"%(CPowL)sW%(C0)s\"\n else:\n reps += \"%(CPowU)sW%(C0)s\"\n\n reps += \" | %(C1)s%(CMemU)s{entry[memory.used]:>5}%(C0)s / %(CMemT)s{entry[memory.total]:>5}%(C0)s MB\"\n reps = (reps) % colors\n reps = reps.format(entry={k: _repr(v) for (k, v) in self.entry.items()},\n gpuname_width=gpuname_width)\n reps += \" |\"\n\n def process_repr(p):\n r = ''\n if not show_cmd or show_user:\n r += \"{CUser}{}{C0}\".format(_repr(p['username'], '--'), **colors)\n if show_cmd:\n if r: r += ':'\n r += \"{C1}{}{C0}\".format(_repr(p.get('command', p['pid']), '--'), **colors)\n\n if show_pid:\n r += (\"/%s\" % _repr(p['pid'], '--'))\n r += '({CMemP}{}M{C0})'.format(_repr(p['gpu_memory_usage'], '?'), **colors)\n return r\n\n if self.entry['processes'] is not None:\n for p in self.entry['processes']:\n reps += ' ' + process_repr(p)\n else:\n # None (not available)\n reps += ' (Not Supported)'\n\n fp.write(reps)\n return fp\n\n def jsonify(self):\n o = dict(self.entry)\n o['processes'] = [{k: v for (k, v) in p.items() if k != 'gpu_uuid'}\n for p in self.entry['processes']]\n return o\n\n\nclass GPUStatCollection(object):\n\n def __init__(self, gpu_list):\n self.gpus = gpu_list\n\n # attach additional system information\n self.hostname = platform.node()\n self.query_time = datetime.now()\n\n @staticmethod\n def new_query():\n \"\"\"Query the information of all the GPUs on local machine\"\"\"\n\n N.nvmlInit()\n\n def get_gpu_info(handle):\n \"\"\"Get one GPU information specified by nvml handle\"\"\"\n\n def get_process_info(nv_process):\n \"\"\"Get the process information of specific pid\"\"\"\n process = {}\n ps_process = psutil.Process(pid=nv_process.pid)\n process['username'] = ps_process.username()\n # cmdline returns full path; as in `ps -o comm`, get short cmdnames.\n _cmdline = ps_process.cmdline()\n if not _cmdline: # sometimes, zombie or unknown (e.g. [kworker/8:2H])\n process['command'] = '?'\n else:\n process['command'] = os.path.basename(_cmdline[0])\n # Bytes to MBytes\n process['gpu_memory_usage'] = int(nv_process.usedGpuMemory / 1024 / 1024)\n process['pid'] = nv_process.pid\n return process\n\n def _decode(b):\n if isinstance(b, bytes):\n return b.decode() # for python3, to unicode\n return b\n\n name = _decode(N.nvmlDeviceGetName(handle))\n uuid = _decode(N.nvmlDeviceGetUUID(handle))\n\n try:\n temperature = N.nvmlDeviceGetTemperature(handle, N.NVML_TEMPERATURE_GPU)\n except N.NVMLError:\n temperature = None # Not supported\n\n try:\n memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes\n except N.NVMLError:\n memory = None # Not supported\n\n try:\n utilization = N.nvmlDeviceGetUtilizationRates(handle)\n except N.NVMLError:\n utilization = None # Not supported\n\n try:\n power = N.nvmlDeviceGetPowerUsage(handle)\n except:\n power = None\n\n try:\n power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle)\n except:\n power_limit = None\n\n processes = []\n try:\n nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle)\n except N.NVMLError:\n nv_comp_processes = None # Not supported\n try:\n nv_graphics_processes = N.nvmlDeviceGetGraphicsRunningProcesses(handle)\n except N.NVMLError:\n nv_graphics_processes = None # Not supported\n\n if nv_comp_processes is None and nv_graphics_processes is None:\n processes = None # Not supported (in both cases)\n else:\n nv_comp_processes = nv_comp_processes or []\n nv_graphics_processes = nv_graphics_processes or []\n for nv_process in (nv_comp_processes + nv_graphics_processes):\n # TODO: could be more information such as system memory usage,\n # CPU percentage, create time etc.\n try:\n process = get_process_info(nv_process)\n processes.append(process)\n except psutil.NoSuchProcess:\n # TODO: add some reminder for NVML broken context\n # e.g. nvidia-smi reset or reboot the system\n pass\n\n index = N.nvmlDeviceGetIndex(handle)\n gpu_info = {\n 'index': index,\n 'uuid': uuid,\n 'name': name,\n 'temperature.gpu': temperature,\n 'utilization.gpu': utilization.gpu if utilization else None,\n 'power.draw': int(power / 1000) if power is not None else None,\n 'enforced.power.limit': int(power_limit / 1000) if power_limit is not None else None,\n # Convert bytes into MBytes\n 'memory.used': int(memory.used / 1024 / 1024) if memory else None,\n 'memory.total': int(memory.total / 1024 / 1024) if memory else None,\n 'processes': processes,\n }\n return gpu_info\n\n # 1. get the list of gpu and status\n gpu_list = []\n device_count = N.nvmlDeviceGetCount()\n\n for index in range(device_count):\n handle = N.nvmlDeviceGetHandleByIndex(index)\n gpu_info = get_gpu_info(handle)\n gpu_stat = GPUStat(gpu_info)\n gpu_list.append(gpu_stat)\n\n N.nvmlShutdown()\n return GPUStatCollection(gpu_list)\n\n def __len__(self):\n return len(self.gpus)\n\n def __iter__(self):\n return iter(self.gpus)\n\n def __getitem__(self, index):\n return self.gpus[index]\n\n def __repr__(self):\n s = 'GPUStatCollection(host=%s, [\\n' % self.hostname\n s += '\\n'.join(' ' + str(g) for g in self.gpus)\n s += '\\n])'\n return s\n\n # --- Printing Functions ---\n\n def print_formatted(self, fp=sys.stdout, force_color=False, no_color=False,\n show_cmd=False, show_user=False, show_pid=False,\n show_power=None, gpuname_width=16,\n show_header=True,\n ):\n # ANSI color configuration\n if force_color and no_color:\n raise ValueError(\"--color and --no_color can't be used at the same time\")\n\n if force_color:\n t_color = Terminal(kind='xterm-color', force_styling=True)\n elif no_color:\n t_color = Terminal(force_styling=None)\n else:\n t_color = Terminal() # auto, depending on isatty\n\n # header\n if show_header:\n time_format = locale.nl_langinfo(locale.D_T_FMT)\n\n header_msg = '{t.bold_white}{hostname}{t.normal} {timestr}'.format(**{\n 'hostname': self.hostname,\n 'timestr': self.query_time.strftime(time_format),\n 't': t_color,\n })\n\n fp.write(header_msg)\n fp.write('\\n')\n\n # body\n gpuname_width = max([gpuname_width] + [len(g.entry['name']) for g in self])\n for g in self:\n g.print_to(fp,\n show_cmd=show_cmd,\n show_user=show_user,\n show_pid=show_pid,\n show_power=show_power,\n gpuname_width=gpuname_width,\n term=t_color)\n fp.write('\\n')\n\n fp.flush()\n\n def jsonify(self):\n return {\n 'hostname': self.hostname,\n 'query_time': self.query_time,\n \"gpus\": [g.jsonify() for g in self]\n }\n\n def print_json(self, fp=sys.stdout):\n def date_handler(obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n else:\n raise TypeError(type(obj))\n\n o = self.jsonify()\n json.dump(o, fp, indent=4, separators=(',', ': '),\n default=date_handler)\n fp.write('\\n')\n fp.flush()\n\n\ndef new_query():\n '''\n Obtain a new GPUStatCollection instance by querying nvidia-smi\n to get the list of GPUs and running process information.\n '''\n return GPUStatCollection.new_query()\n","sub_path":"gpustat/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":14717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"293315180","text":"import json\n\ndef ext_pillar(minion_id,pillar,config):\n '''\n Return Pillar data\n '''\n comps = config.split()\n key = comps[0]\n url = comps[1]\n status = __salt__['http.query'](url, decode=True)\n mytime = json.loads(status['text'])\n return {key: mytime['seconds']}\n","sub_path":"custom/pillar/http_time.py","file_name":"http_time.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66209760","text":"import csv\nimport os\n\npath = \"/Users/bill/dev/breadlist\" # Set path of new directory here\nos.chdir(path) # changes the directory\nfrom classifieds.models import Province # imports the model\nwith open('provinces.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n p = Province(province_name=row['Province_name'])\n p.save()\n","sub_path":"csvloader.py","file_name":"csvloader.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481089840","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scipy.linalg import circulant\n\n\ndef to_conv_mat(permittivities, fourier_order):\n # FFT scaling\n # https://kr.mathworks.com/matlabcentral/answers/15770-scaling-the-fft-and-the-ifft#:~:text=the%20matlab%20fft%20outputs%202,point%20is%20the%20parseval%20equation.\n ff = 2 * fourier_order + 1\n\n if len(permittivities[0].shape) == 1: # 1D\n res = np.ndarray((len(permittivities), 2*fourier_order+1, 2*fourier_order+1)).astype('complex')\n\n # extend array\n if permittivities.shape[1] < 2 * ff + 1:\n n = (2 * ff + 1) // permittivities.shape[1]\n permittivities = np.repeat(permittivities, n+1, axis=1)\n\n for i, pmtvy in enumerate(permittivities):\n pmtvy_fft = np.fft.fftn(pmtvy / pmtvy.size)\n pmtvy_fft = np.fft.fftshift(pmtvy_fft)\n\n center = len(pmtvy_fft) // 2\n pmtvy_fft_cut = (pmtvy_fft[-ff + center: center+ff+1])\n A = np.roll(circulant(pmtvy_fft_cut.flatten()), (pmtvy_fft_cut.size + 1) // 2, 0)\n res[i] = A[:2*fourier_order+1, :2*fourier_order+1]\n # res[i] = circulant(pmtvy_fft_cut)\n\n else: # 2D\n res = np.ndarray((len(permittivities), ff ** 2, ff ** 2)).astype('complex')\n\n # extend array\n if permittivities.shape[0] < 2 * ff + 1:\n n = (2 * ff + 1) // permittivities.shape[1]\n permittivities = np.repeat(permittivities, n+1, axis=0)\n if permittivities.shape[1] < 2 * ff + 1:\n n = (2 * ff + 1) // permittivities.shape[1]\n permittivities = np.repeat(permittivities, n+1, axis=1)\n\n for i, pmtvy in enumerate(permittivities):\n\n pmtvy_fft = np.fft.fftn(pmtvy / pmtvy.size)\n pmtvy_fft = np.fft.fftshift(pmtvy_fft)\n\n center = np.array(pmtvy_fft.shape) // 2\n\n conv_idx = np.arange(ff-1, -ff, -1)\n conv_idx = circulant(conv_idx)[ff-1:, :ff]\n\n conv_i = np.repeat(conv_idx, ff, axis=1)\n conv_i = np.repeat(conv_i, [ff] * ff, axis=0)\n\n conv_j = np.tile(conv_idx, (ff, ff))\n res[i] = pmtvy_fft[center[0] + conv_i, center[1] + conv_j]\n\n # import matplotlib.pyplot as plt\n #\n # plt.figure()\n # plt.imshow(abs(res[0]), cmap='jet')\n # plt.colorbar()\n # plt.show()\n #\n return res\n\npi = np.pi\n\nn_I = 1\nn_II = 1\n\ntheta = 0.001 * pi / 180\nphi = 0 * pi / 180\npsi = 0 * pi / 180\n\nfourier_order = 3\nff = 2 * fourier_order + 1\ncenter = ff * ff\n\nperiod = (0.7, 0.7)\n\nwls = np.linspace(0.5, 2.3, 10)\n\nI = np.eye(ff ** 2)\nO = np.zeros((ff**2, ff**2))\n\nspectrum_r, spectrum_t = [], []\n\n# permittivity in grating layer\nthickness = [0.46, 0.66]\n\npermt = np.ones((1024, 1024))\npermt[:, :307] = 3.48**2\npermt = np.array([permt, permt])\n\nE_conv_all = to_conv_mat(permt, fourier_order)\n\noneover_permt = 1 / permt\noneover_E_conv_all = to_conv_mat(oneover_permt, fourier_order)\n\nfourier_indices = np.arange(-fourier_order, fourier_order + 1)\n\ndelta_i0 = np.zeros(ff**2).reshape((-1, 1))\ndelta_i0[ff**2//2, 0] = 1\n\nfor wl in wls:\n k0 = 2 * np.pi / wl\n\n kx_vector = k0 * (n_I*np.sin(theta)*np.cos(phi) - fourier_indices * (wl/period[0])).astype('complex')\n ky_vector = k0 * (n_I*np.sin(theta)*np.sin(phi) - fourier_indices * (wl/period[1])).astype('complex')\n\n Kx = np.diag(np.tile(kx_vector, ff).flatten()) / k0\n Ky = np.diag(np.tile(ky_vector.reshape((-1, 1)), ff).flatten()) / k0\n\n k_I_z = (k0**2 * n_I ** 2 - kx_vector**2 - ky_vector.reshape((-1, 1))**2)**0.5\n k_II_z = (k0**2 * n_II ** 2 - kx_vector**2 - ky_vector.reshape((-1, 1))**2)**0.5\n\n k_I_z = k_I_z.flatten().conjugate()\n k_II_z = k_II_z.flatten().conjugate()\n\n varphi = np.arctan(ky_vector.reshape((-1, 1))/kx_vector).flatten()\n\n Y_I = np.diag(k_I_z / k0)\n Y_II = np.diag(k_II_z / k0)\n\n Z_I = np.diag(k_I_z / (k0 * n_I ** 2))\n Z_II = np.diag(k_II_z / (k0 * n_II ** 2))\n\n big_F = np.block([[I, O], [O, 1j * Z_II]])\n big_G = np.block([[1j * Y_II, O], [O, I]])\n\n big_T = np.eye(ff**2*2)\n\n for E_conv, oneover_E_conv, d in zip(E_conv_all[::-1], oneover_E_conv_all[::-1], thickness[::-1]):\n\n E_i = np.linalg.inv(E_conv)\n\n B = Kx @ E_i @ Kx - I\n D = Ky @ E_i @ Ky - I\n oneover_E_conv_i = np.linalg.inv(oneover_E_conv)\n\n S2_from_S = np.block(\n [\n [Ky ** 2 + B @ oneover_E_conv_i, Kx @ (E_i @ Ky @ E_conv - Ky)],\n [Ky @ (E_i @ Kx @ oneover_E_conv_i - Kx), Kx ** 2 + D @ E_conv]\n ])\n\n eigenvalues, W = np.linalg.eig(S2_from_S)\n eigenvalues += 0j # to get positive square root\n\n q = eigenvalues ** 0.5\n\n q_1 = q[:center]\n q_2 = q[center:]\n\n Q = np.diag(q)\n Q_i = np.linalg.inv(Q)\n U1_from_S = np.block(\n [\n [-Kx @ Ky, Kx ** 2 - E_conv],\n [oneover_E_conv_i - Ky ** 2, Ky @ Kx]\n ]\n )\n V = U1_from_S @ W @ Q_i\n\n W_11 = W[:center, :center]\n W_12 = W[:center, center:]\n W_21 = W[center:, :center]\n W_22 = W[center:, center:]\n\n V_11 = V[:center, :center]\n V_12 = V[:center, center:]\n V_21 = V[center:, :center]\n V_22 = V[center:, center:]\n\n X_1 = np.diag(np.exp(-k0*q_1*d))\n X_2 = np.diag(np.exp(-k0*q_2*d))\n\n F_c = np.diag(np.cos(varphi))\n F_s = np.diag(np.sin(varphi))\n\n W_ss = F_c @ W_21 - F_s @ W_11\n W_sp = F_c @ W_22 - F_s @ W_12\n W_ps = F_c @ W_11 + F_s @ W_21\n W_pp = F_c @ W_12 + F_s @ W_22\n\n V_ss = F_c @ V_11 + F_s @ V_21\n V_sp = F_c @ V_12 + F_s @ V_22\n V_ps = F_c @ V_21 - F_s @ V_11\n V_pp = F_c @ V_22 - F_s @ V_12\n\n big_I = np.eye(2*(len(I)))\n big_X = np.block([[X_1, O], [O, X_2]])\n big_W = np.block([[W_ss, W_sp], [W_ps, W_pp]])\n big_V = np.block([[V_ss, V_sp], [V_ps, V_pp]])\n\n big_W_i = np.linalg.inv(big_W)\n big_V_i = np.linalg.inv(big_V)\n\n big_A = 0.5 * (big_W_i @ big_F + big_V_i @ big_G)\n big_B = 0.5 * (big_W_i @ big_F - big_V_i @ big_G)\n\n big_A_i = np.linalg.inv(big_A)\n\n big_F = big_W @ (big_I + big_X @ big_B @ big_A_i @ big_X)\n big_G = big_V @ (big_I - big_X @ big_B @ big_A_i @ big_X)\n\n big_T = big_T @ big_A_i @ big_X\n\n big_F_11 = big_F[:center, :center]\n big_F_12 = big_F[:center, center:]\n big_F_21 = big_F[center:, :center]\n big_F_22 = big_F[center:, center:]\n\n big_G_11 = big_G[:center, :center]\n big_G_12 = big_G[:center, center:]\n big_G_21 = big_G[center:, :center]\n big_G_22 = big_G[center:, center:]\n\n # Final Equation in form of AX=B\n final_A = np.block(\n [\n [I, O, -big_F_11, -big_F_12],\n [O, -1j*Z_I, -big_F_21, -big_F_22],\n [-1j*Y_I, O, -big_G_11, -big_G_12],\n [O, I, -big_G_21, -big_G_22],\n ]\n )\n\n final_B = np.block([\n [-np.sin(psi)*delta_i0],\n [-np.cos(psi) * np.cos(theta) * delta_i0],\n [-1j*np.sin(psi) * n_I * np.cos(theta) * delta_i0],\n [1j*n_I*np.cos(psi) * delta_i0]\n ]\n )\n\n final_X = np.linalg.inv(final_A) @ final_B\n\n R_s = final_X[:ff**2, :].flatten()\n R_p = final_X[ff**2:2*ff**2, :].flatten()\n\n big_T = big_T @ final_X[2*ff**2:, :]\n T_s = big_T[:ff**2, :].flatten()\n T_p = big_T[ff**2:, :].flatten()\n\n DEri = R_s*np.conj(R_s) * np.real(k_I_z/(k0*n_I*np.cos(theta))) \\\n + R_p*np.conj(R_p) * np.real((k_I_z/n_I**2)/(k0*n_I*np.cos(theta)))\n\n DEti = T_s*np.conj(T_s) * np.real(k_II_z/(k0*n_I*np.cos(theta))) \\\n + T_p*np.conj(T_p) * np.real((k_II_z/n_II**2)/(k0*n_I*np.cos(theta)))\n\n spectrum_r.append(DEri.reshape((ff, ff)).real)\n spectrum_t.append(DEti.reshape((ff, ff)).real)\n\nplt.plot(wls, np.array(spectrum_r).sum(axis=(1, 2)))\nplt.plot(wls, np.array(spectrum_t).sum(axis=(1, 2)))\nplt.title(f'Lalanne 2D, f order of {fourier_order}')\n\nplt.show()\n","sub_path":"examples/rcwa/lalanne_2D.py","file_name":"lalanne_2D.py","file_ext":"py","file_size_in_byte":7999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310897515","text":"#!/usr/bin/python3\nimport chess.pgn\n\n\ndef main():\n # get from https://raw.githubusercontent.com/niklasf/python-chess/master/data/pgn/kasparov-deep-blue-1997.pgn\n pgn = open(\"data/kasparov-deep-blue-1997.pgn\")\n running = True\n\n while running:\n pgn_game = chess.pgn.read_game(pgn)\n \n if pgn_game:\n board = pgn_game.board()\n\n for move in pgn_game.mainline_moves():\n print(board)\n print(\"###################\")\n board.push(move)\n else:\n running = False\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train_png.py","file_name":"train_png.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193616346","text":"import logging, spacy\nimport re\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.symbols import ORTH, LEMMA, POS\nfrom fuzzywuzzy import fuzz\n\n\ndef create_tokenizer(nlp):\n prefix_re = spacy.util.compile_prefix_regex(nlp.Defaults.prefixes)\n suffix_re = spacy.util.compile_suffix_regex(nlp.Defaults.suffixes)\n #infix_re = spacy.util.compile_infix_regex(nlp.Defaults.infixes)\n infix_re = re.compile(r'\\w+(?:--\\w+)+')\n\n tokenizer = Tokenizer(nlp.vocab,\n nlp.Defaults.tokenizer_exceptions,\n prefix_re.search,\n suffix_re.search,\n infix_re.finditer,\n token_match = None)\n make_doc = lambda text: tokenizer(text)\n return make_doc\n\nlogging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')\nlogging.debug('loading spacy...')\nnlp = spacy.load('en', create_make_doc=create_tokenizer, add_vectors=False)\nnlp.tokenizer.add_special_case(u'steaks',[{\n ORTH: u'steaks',\n LEMMA: u'steak',\n POS: u'NOUN'}])\n\ndef isNoun(token):\n pos = token.pos_\n return pos == 'NOUN' or pos == 'PROPN' or pos == 'ADJ'\n\ndef find_methods_and_ingredients(tokenized):\n methods = []\n ingredients = []\n i, count = 0, len(tokenized)\n while i < count :\n token = tokenized[i]\n if token.tag_ == 'VB':\n methods.append(token.orth_)\n i += 1\n elif isNoun(token):\n res, inc = lookaheadNoun(tokenized[i:])\n ingredients.append(res)\n i += inc\n else:\n i += 1\n return methods, ingredients\n \ndef find_tools(tokenized, ingredients_word):\n tools = set()\n i, count = 0, len(tokenized)\n while i < count :\n token = tokenized[i]\n # if token.orth_ == 'Preheat':\n # print tokenized[i+1].orth_\n if token.orth_ == 'into' or token.orth_ == 'in' or token.orth_ == 'with' or token.orth_ == 'Preheat' or token.orth_ == 'Heat':\n i += 1 \n if tokenized[i].pos_ == 'DET':\n i += 1\n res, inc = lookaheadNoun(tokenized[i:])\n if res not in ingredients_word:\n tools.add(res)\n i += inc\n else:\n i += 1\n return tools\n\ndef lookaheadNoun(text):\n i, count = 0, len(text)\n chunk = ''\n while i < count:\n token = text[i]\n if isNoun(token):\n chunk += token.orth_ + ' '\n i += 1 \n else:\n break\n\n \n return chunk, i\n\n\nclass Step(object):\n def __init__(self, text):\n self.raw_text=text\n self.method=list()\n self.ingredients=set()\n self.tools=set()\n # self.implied_tools=list()\n # self.time=None\n self.next=None\n \n # def parseText(self): \n\nclass RecipeParser(object):\n\n def parseSteps(self, basic_ingredients, steps):\n # steps=self.recipe_info['steps']\n raw_steps = list()\n \n for step in steps:\n big_step = nlp(unicode(step))\n small_steps = [sent.string.strip() for sent in big_step.sents]\n raw_steps += small_steps\n # print raw_steps\n first_step = None\n prev_step = None\n\n for step in raw_steps:\n curr_step = Step(step) \n parsed = nlp(unicode(step))\n methods, nouns = find_methods_and_ingredients(parsed)\n curr_step.methods = methods\n ingredients_in_instructions = set()\n for bad in nouns:\n for good in basic_ingredients:\n rat = fuzz.token_set_ratio(bad, good)\n if rat>80: \n ingredients_in_instructions.add(bad)\n curr_step.ingredients.add(good)\n\n tools_in_instruction = find_tools(parsed, ingredients_in_instructions)\n curr_step.tools = tools_in_instruction\n # print \"----methods\",\n # print curr_step.methods\n # print \"----ingredients\",\n # print curr_step.ingredients\n # print \"----tools\",\n # print curr_step.tools\n if prev_step == None:\n first_step = curr_step\n else:\n prev_step.next = curr_step\n prev_step = curr_step\n return first_step\n\n\n\ndef main():\n parser=RecipeParser()\n basic_ingredients = ['butter','bone-in chicken breast','potatoes','carrots', 'paprika','garlic powder','salt', 'pepper']\n\n with open('chicken_breast', 'r') as f:\n steps = [line.strip() for line in f.readlines()]\n parser.parseSteps(basic_ingredients, steps)\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"parsley.py","file_name":"parsley.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548840914","text":"import time, sys, os, threading\nimport serial, io\nimport socket\n\ncom = serial.Serial('COM4', 9600)\nmsg1 = b\"\"\nUDP_IP = \"192.168.1.104\"\nUDP_PORT = 15400\n\nwhile True:\n msg1 = com.readline(com.inWaiting())\n print(msg1)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg1, (UDP_IP, UDP_PORT))\n\n time.sleep(1)\n","sub_path":"Firmware/Test_code/python_test_code/serial_wifi_sender.py","file_name":"serial_wifi_sender.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258132016","text":"from iBott import ChromeBrowser, Excel\nfrom iBott.robot_activities import Robot, Robotmethod, get_all_Methods\nfrom google_search import Keywords\nfrom iBott.system_activities import saveFileFromOrchestrator\nimport iRobot.settings as settings\n\n\nclass Main(Robot):\n def __init__(self, args):\n self.methods = get_all_Methods(self)\n if args is not None:\n self.robotId = args['RobotId']\n self.ExecutionId = args['ExecutionId']\n self.url = args['url']\n self.username = args['username']\n self.password = args['password']\n self.robotParameters = args['params']\n super().__init__(robotId=self.robotId, ExecutionId=self.ExecutionId, url=self.url,\n username=self.username, password=self.password,\n params=self.robotParameters)\n else:\n super().__init__()\n\n @Robotmethod\n def init(self):\n self.browser = ChromeBrowser(undetectable=True)\n self.browser.load_extension(settings.EXTENION_PATH)\n self.browser.open()\n self.browser.maximize_window()\n self.keywords =[]\n self.keyword = Keywords(self)\n try:\n if len(self.findQueuesByName(\"KW5\")) == 0:\n self.queue = self.createQueue(\"KW5\")\n else:\n self.queue = self.findQueuesByName(\"KW5\")[0]\n except:\n pass\n\n\n @Robotmethod\n def cleanup(self):\n \"\"\"Clean system before executing the robot\"\"\"\n\n\n @Robotmethod\n def start(self):\n \"\"\"Init variables, instance objects and start the applications you are going to work with\"\"\"\n self.read_input()\n for keyword in self.keywords:\n self.queue.createItem({'Keyword': keyword})\n\n\n @Robotmethod\n def process(self):\n \"\"\"Run robot process\"\"\"\n Qitem = self.queue.getNextItem()\n if Qitem:\n try:\n Qitem.setItemAsWorking()\n k = Qitem.value['Keyword']\n self.Log.info(\"Processing : \" + k)\n self.keyword.get_search_data(Qitem)\n self.keyword.get_page_data()\n self.keyword.store_data()\n Qitem.setItemAsOk()\n except:\n Qitem.setItemAsFail()\n self.process()\n\n\n @Robotmethod\n def end(self):\n \"\"\"Finish robot execution, cleanup environment, close applications and send reports\"\"\"\n self.browser.close()\n\n def read_input(self):\n '''Privte method reads Excel sent from Orchestrator'''\n file = saveFileFromOrchestrator(self.robotParameters['file-1632235752781'], settings.FILES_PATH)\n excel = Excel(file)\n i=1\n while True:\n data = excel.readCell(f\"A{i}\")\n if data is None:\n break\n else:\n self.keywords.append(data)\n i += 1\n\n\n\n\n","sub_path":"iRobot/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"628806536","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ncredit = pd.read_csv(\"C:\\\\pyproject\\\\py4pro\\\\transformed.csv\")\n\ncol_dicts = {}\ncols = ['status','credit_history', 'purpose', 'savings', 'employment_duration','installment_rate', 'personal_status_sex', 'other_debtors',\n 'present_residence','property','other_installment_plans','housing','number_credits','job','people_liable','telephone','foreign_worker']\n\ncol_dicts = {\n 'status': {'0<= ... < 200 DM': 0,\n '... < 0 DM': 1,\n '... >= 200 DM / salary for at least 1 year': 2,\n 'no checking account': 3},\n 'credit_history': {'critical account/other credits elsewhere': 0,\n 'delay in paying off in the past': 1,\n 'no credits taken/all credits paid back duly': 2,\n 'all credits at this bank paid back duly': 3,\n 'existing credits paid back duly till now': 4},\n 'purpose': {'business': 0,\n 'car (new)': 1,\n 'car (used)': 2,\n 'domestic appliances': 3,\n 'vacation': 4,\n 'furniture/equipment': 5,\n 'others': 6,\n 'radio/television': 7,\n 'repairs': 8,\n 'retraining': 9},\n 'savings': {'100 <= ... < 500 DM': 0,\n '500 <= ... < 1000 DM': 1,\n '... < 100 DM': 2,\n '... >= 1000 DM': 3,\n 'unknown/no savings account': 4},\n 'employment_duration': {'< 1 yr': 0,\n '1 <= ... < 4 yrs': 1,\n '4 <= ... < 7 yrs': 2,\n '>= 7 yrs': 3,\n 'unemployed': 4},\n 'installment_rate': {'< 20': 0,\n '25 <= ... < 35': 1,\n '20 <= ... < 25': 2,\n '>= 35': 3},\n 'personal_status_sex': {'male : married/widowed': 0,\n 'female : non-single or male : single': 1,\n 'female : single': 2,\n 'male : divorced/separated': 3},\n 'other_debtors': {'co-applicant': 0, \n 'guarantor': 1, \n 'none': 2},\n 'present_residence': {'>= 7 yrs': 0,\n '1 <= ... < 4 yrs': 1,\n '4 <= ... < 7 yrs': 2,\n '< 1 yr': 3},\n 'property': {'building soc. savings agr./life insurance': 0,\n 'car or other': 1,\n 'real estate': 2,\n 'unknown / no property': 3},\n 'other_installment_plans': {'bank': 0, \n 'none': 1, \n 'stores': 2},\n 'housing': {'for free': 0,\n 'own': 1, 'rent': 2},\n 'number_credits': {'1': 0,\n '2-3': 1,\n '4-5': 2,\n '>= 6': 3},\n 'job': {'manager/self-empl./highly qualif. employee': 0,\n 'skilled employee/official': 1,\n 'unemployed/unskilled - non-resident': 2,\n 'unskilled - resident': 3},\n 'people_liable': {'0 to 2': 0,\n '3 or more': 1},\n 'telephone': {'no': 0, \n 'yes (under customer name)': 1},\n 'foreign_worker': {'no': 0,\n 'yes': 1}}\n\nfor col in cols:\n credit[col] = credit[col].map(col_dicts[col])\n \ncredit.status.value_counts()\n\n","sub_path":".history/py4pro/decisiontree_20210421150205.py","file_name":"decisiontree_20210421150205.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529786410","text":"#!/usr/bin/env python3\n\nfrom power_api import SixfabPower, Definition, Event\nimport time\nimport schedule\n\napi = SixfabPower();\n\ndef job():\n try:\n print(str(api.get_working_mode()));\n except:\n print(\"An exception occurred in get_working_mode\");\n\n#schedule.every(1).minutes.do(job);\n# After every 1 to 2 mins in between run job() \nschedule.every(1).to(2).minutes.do(job) \n\nwhile True:\n schedule.run_pending();\n time.sleep(7);\n","sub_path":"webRTC_Control/pandaServer/python/sixfab_working_mode.py","file_name":"sixfab_working_mode.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298509955","text":"class LinkTarget(object):\r\n \"\"\"\r\n Represents an element on a page that can be linked to from other documents or other places in the same document.\r\r\n \r\r\n LinkTarget()\r\n \"\"\"\r\n Name=property(lambda self: object(),lambda self,v: None,lambda self: None)\r\n \"\"\"Gets or sets the name of the element that this System.Windows.Documents.LinkTarget identifies as a linkable element.\r\r\n\r\r\nGet: Name(self: LinkTarget) -> str\r\r\n\r\r\nSet: Name(self: LinkTarget)=value\r\r\n\"\"\"\r\n\r\n\r\n","sub_path":"stubs.min/System/Windows/Documents/__init___parts/LinkTarget.py","file_name":"LinkTarget.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333555242","text":"from flask import Blueprint, request, make_response, jsonify, Response,\\\n redirect, current_app\nimport json\nimport numpy as np\n\n\nfrom pychunkedgraph.meshing import meshgen, meshgen_utils\nfrom pychunkedgraph.app import app_utils\nfrom pychunkedgraph.backend import chunkedgraph\n\n# os.environ['TRAVIS_BRANCH'] = \"IDONTKNOWWHYINEEDTHIS\"\n\n__version__ = '0.1.108'\nbp = Blueprint('pychunkedgraph_meshing', __name__, url_prefix=\"/meshing\")\n\n# -------------------------------\n# ------ Access control and index\n# -------------------------------\n\n@bp.route('/')\n@bp.route(\"/index\")\ndef index():\n return \"Meshing Server -- \" + __version__\n\n\n@bp.route\ndef home():\n resp = make_response()\n resp.headers['Access-Control-Allow-Origin'] = '*'\n acah = \"Origin, X-Requested-With, Content-Type, Accept\"\n resp.headers[\"Access-Control-Allow-Headers\"] = acah\n resp.headers[\"Access-Control-Allow-Methods\"] = \"POST, GET, OPTIONS\"\n resp.headers[\"Connection\"] = \"keep-alive\"\n return resp\n\n\n# ------------------------------------------------------------------------------\n\ndef _mesh_lvl2_nodes(serialized_cg_info, lvl2_nodes):\n cg = chunkedgraph.ChunkedGraph(**serialized_cg_info)\n\n for lvl2_node in lvl2_nodes:\n print(lvl2_node)\n meshgen.mesh_lvl2_preview(cg, lvl2_node, supervoxel_ids=None,\n cv_path=None, cv_mesh_dir=None, mip=2,\n simplification_factor=999999,\n max_err=40, parallel_download=1,\n verbose=True,\n cache_control='no-cache')\n\n return Response(status=200)\n\n\n\n@bp.route('/1.0///mesh_preview', methods=['POST', 'GET'])\ndef handle_preview_meshes(table_id, node_id):\n if len(request.data) > 0:\n data = json.loads(request.data)\n else:\n data = {}\n\n node_id = np.uint64(node_id)\n\n cg = app_utils.get_cg(table_id)\n\n if \"seg_ids\" in data:\n seg_ids = data[\"seg_ids\"]\n\n chunk_id = cg.get_chunk_id(node_id)\n supervoxel_ids = [cg.get_node_id(seg_id, chunk_id)\n for seg_id in seg_ids]\n else:\n supervoxel_ids = None\n\n meshgen.mesh_lvl2_preview(cg, node_id, supervoxel_ids=supervoxel_ids,\n cv_path=None, cv_mesh_dir=None, mip=2,\n simplification_factor=999999,\n max_err=40, parallel_download=1, verbose=True,\n cache_control='no-cache')\n return Response(status=200)\n\n\n## VALIDFRAGMENTS --------------------------------------------------------------\n\n@bp.route('/1.0///validfragments', methods=['POST', 'GET'])\ndef handle_valid_frags(table_id, node_id):\n cg = app_utils.get_cg(table_id)\n\n seg_ids = meshgen_utils.get_highest_child_nodes_with_meshes(\n cg, np.uint64(node_id), stop_layer=1, verify_existence=True)\n\n return app_utils.tobinary(seg_ids)\n\n\n## MANIFEST --------------------------------------------------------------------\n\n@bp.route('/1.0//manifest/:0', methods=['GET'])\ndef handle_get_manifest(table_id, node_id):\n verify = request.args.get('verify', False)\n verify = verify in ['True', 'true', '1', True]\n\n # TODO: Read this from config\n MESH_MIP = 2\n\n cg = app_utils.get_cg(table_id)\n seg_ids = meshgen_utils.get_highest_child_nodes_with_meshes(\n cg, np.uint64(node_id), stop_layer=2, verify_existence=verify)\n\n filenames = [meshgen_utils.get_mesh_name(cg, s, MESH_MIP) for s in seg_ids]\n\n return jsonify(fragments=filenames)\n","sub_path":"pychunkedgraph/app/meshing_app_blueprint.py","file_name":"meshing_app_blueprint.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"232962911","text":"import datetime\nimport os\nimport random\nfrom string import ascii_lowercase, digits\nimport sys\n\nfrom dateutil.parser import parse\n\n\nmigration_scripts_location = 'migrations/versions'\n\n\ndef get_most_recent_migration_hash():\n migrations = [os.path.join(migration_scripts_location, file_) for file_ in \n os.listdir(migration_scripts_location) if \n file_.endswith('.py')]\n \n if not migrations: return None\n\n most_recent = datetime.datetime(1950, 1, 1)\n for migration in migrations:\n with open(migration, 'r') as fin:\n for line in fin.readlines():\n if 'Create Date: ' in line:\n date_line = line\n break\n date = date_line.split('Date: ')[1].rstrip()\n date = parse(date)\n if date > most_recent:\n most_recent, the_hash = date, migration.split('_')[0]\n return os.path.basename(the_hash)\n\n\ndef generate_migration_hash():\n return ''.join(random.choice(ascii_lowercase + digits) for i in range(12))\n\n\ndef make_filename(this_migration_hash, description):\n description = add_underscores_for_spaces(description)\n return f'{this_migration_hash}_{description[:40]}.py'\n\n\ndef add_underscores_for_spaces(description):\n return description.replace(' ', '_')\n\n\ndef create_migration_script(creation_date, \n most_recent_migration_hash,\n this_migration_hash,\n description):\n filename = make_filename(this_migration_hash, description.replace('.\\n',''))\n filepath = os.path.join(migration_scripts_location, filename)\n print(f'Generating {filepath}...')\n with open(f'{filepath}', 'w') as fout:\n fout.write(f'\"\"\"{description}')\n fout.write('\\n')\n fout.write('\\n')\n fout.write(f'Revision ID: {this_migration_hash}')\n fout.write('\\n')\n fout.write(f'Revises: {most_recent_migration_hash}')\n fout.write('\\n')\n fout.write(f'Create Date: {creation_date}')\n fout.write('\\n')\n fout.write('\\n')\n fout.write(f'\"\"\"')\n fout.write('\\n')\n fout.write('\\n')\n fout.write(f'revision = \\'{this_migration_hash}\\'')\n fout.write('\\n')\n fout.write(f'down_revision = \\'{most_recent_migration_hash}\\'')\n fout.write('\\n')\n fout.write('\\n')\n fout.write('from alembic import op')\n fout.write('\\n')\n fout.write('import sqlalchemy as sa')\n fout.write('\\n')\n fout.write('from sqlalchemy.dialects import mysql')\n fout.write('\\n')\n fout.write('\\n')\n fout.write('def upgrade():')\n fout.write('\\n')\n fout.write(' pass')\n fout.write('\\n')\n fout.write('\\n')\n fout.write('\\n')\n fout.write('def downgrade():')\n fout.write('\\n')\n fout.write(' pass')\n print('done')\n\n\ndef main():\n most_recent_migration_hash = get_most_recent_migration_hash()\n this_migration_hash = generate_migration_hash()\n creation_date = str(datetime.datetime.now())\n if len(sys.argv) > 1:\n desc_arg_num = 2 if sys.argv[1] == '-m' else 1\n description = '.\\n'.join(sys.argv[desc_arg_num].split('.'))\n else:\n description = ''\n create_migration_script(creation_date, \n most_recent_migration_hash,\n this_migration_hash,\n description)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"make_blank_migration.py","file_name":"make_blank_migration.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115148028","text":"import requests\nimport json\nimport time\nimport configparser\n\nURL = \"https://us.api.blizzard.com\"\n\nPARSER = configparser.ConfigParser()\nPARSER.read('.env')\n\ndef characters():\n\tAPI_HTTP_PATH = \"/wow/user/characters\"\n\tOAUTH_TOKEN = PARSER.get('codeflow', 'token')\n\tHEADER = f\"Bearer {OAUTH_TOKEN}\"\n\tAUTH_HEADER = {'Authorization': HEADER}\n\n\tFULL_URL = f\"{URL}{API_HTTP_PATH}\"\n\tRESULT = requests.get(FULL_URL, headers=AUTH_HEADER).json()\n\n\tprint(json.dumps(RESULT, indent=2, sort_keys=True))\n\n###########################################################################\n###########################################################################\n\ncharacters()\n","sub_path":"community.py","file_name":"community.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50800637","text":"class Team:\n def __init__(self, id, full_name, abbr, nick_name, city):\n self.id = id\n self.full_name = full_name\n self.abbr = abbr\n self.nick_name = nick_name\n self.city = city\n self.conference = None\n self.division = None\n self.team_code = None\n self.wins = None\n self.loses = None\n self.pct = None\n\n def add_common_info(self, info):\n self.conference = info[\"TEAM_CONFERENCE\"]\n self.division = info[\"TEAM_DIVISION\"]\n self.team_code = info[\"TEAM_CODE\"]\n self.wins = info[\"W\"]\n self.loses = info[\"L\"]\n self.pct = info[\"PCT\"]\n","sub_path":"controller/data/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31355471","text":"import sys\nimport math\n\ndef sizechk(a,b):\n\tif a.dim != b.dim:\n\t\traise TypeError('Tuple length mismatch')\n\nclass Tupe:\n\tdef __init__(self,b):\n\t\tif isinstance(b,(list,tuple)):\n\t\t\tself.dim = len(b)\n\t\t\tself.dat = b\n\t\telse:\n\t\t\tself.dim = b\n\t\t\tself.dat = [None]*b\t\n\n\tdef __len__(self):\n\t\treturn self.dim\n\t#def __getitem__(self,ind):\n\t#\treturn self.dat[ind]\n\t#def __setitem__(self,ind,val):\n\t#\tself.dat[ind] = val\n\n\t#def __add__(self, tup):\n\t#\tsizechk(self,tup)\n\t#\tret = Tupe(self.dim)\n\t#\tfor i in xrange(0,self.dim):\n\t#\t\tret[i] = self[i] + tup[i]\n\t#\treturn ret\n\n\t#def __sub__(self,tup):\n\t#\tsizechk(self,tup)\n\t#\tret = Tupe(self.dim)\n\t#\tfor i in xrange(0,self.dim):\n\t#\t\tret[i] = self[i] - tup[i]\n\t#\treturn ret\n\n\tdef __mul__(self,tup):\n\t\tif isinstance(tup,Tupe) and isinstance(self,Tupe):\n\t\t\tsizechk(self,tup)\n\t\t\tret = self[0]*tup[0]\n\t\t\tfor i in xrange(1,self.dim):\n\t\t\t\tret+= self[i]*tup[i]\n\t\t\treturn ret\n\t\telif not isinstance(tup,Tupe):\n\t\t\tret = Tupe(self.dim)\n\t\t\tret[0] = self[0]*tup\n\t\t\tfor i in xrange(1,self.dim):\n\t\t\t\tret[i] = tup*self[i]\n\t\t\treturn ret\n\nclass ThreeTup(Tupe):\n\tdef __init__(self,lst=[None]*3):\n\t\tif len(lst) != 3:\n\t\t\traise TypeError('Input list is not of length 3')\n\t\t#Tupe.__init__(self,lst)\n\t\tself.dat = lst\n\t\tself.dim = 3\n\tdef __setitem__(self,ind,val):\n\t\tself.dat[ind] = val\n\tdef __getitem__(self,ind):\n\t\treturn self.dat[ind]\n\tdef __add__(self,tup):\n\t\tif not isinstance(tup,ThreeTup):\n\t\t\traise TypeError('Input argument is not a three-tuple')\n\t\tret = ThreeTup([0,0,0])\n\t\tfor i in xrange(0,3):\n\t\t\tret[i] = self.__getitem__(i) + tup[i]\n\t\treturn ret\n\tdef __sub__(self,tup):\n\t\tif not isinstance(tup,ThreeTup):\n\t\t\traise TypeError('Input argument is not a three-tuple')\n\t\tret = ThreeTup([0,0,0])\n\t\tfor i in xrange(0,3):\n\t\t\tret[i] = self[i] - tup[i]\n\t\treturn ret\n\tdef __floordiv__(self,tup):\n\t\tif not isinstance(tup,ThreeTup):\n\t\t\traise TypeError('Input argument is not a three-tuple')\n\t\tret = ThreeTup()\n\t\tfor i in xrange(0,3):\n\t\t\tind = [(i+1)%3,(i+2)%3]\n\t\t\tret[i] = self[ind[0]]*tup[ind[1]] - self[ind[1]]*tup[ind[0]]\n\t\treturn ret\n\tdef __abs__(self):\n\t\tnorm = math.sqrt(self*self)\n\t\tif norm != 0.0:\n\t\t\tself = self*(1/norm)\n\n\tdef __mul__(self,tup):\n\t\tif isinstance(tup,ThreeTup) and isinstance(self,ThreeTup):\n\t\t\tsizechk(self,tup)\n\t\t\tret = self[0]*tup[0]\n\t\t\tfor i in xrange(1,self.dim):\n\t\t\t\tret+= self[i]*tup[i]\n\t\t\treturn ret\n\t\telif not isinstance(tup,ThreeTup):\n\t\t\tret = ThreeTup()\n\t\t\tret[0] = self[0]*tup\n\t\t\tfor i in xrange(1,self.dim):\n\t\t\t\tret[i] = tup*self[i]\n\t\t\treturn ret\n\n","sub_path":"Detector_coarse/ThreeTup.py","file_name":"ThreeTup.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421806306","text":"class Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n # May not work\n result = []\n map = [\"0\", \"1\", \"abc\", \"def\", \"ghi\", \"jkl\", \"mno\", \"pqrs\", \"tuv\", \"wxyz\"]\n if len(digits) == 0:\n return result\n \n result.append(\"\")\n \n for i in range(len(digits)):\n \n index = int(digits[i])\n temp_str = result.pop()\n while len(temp_str) == i:\n for c in list(temp_str):\n result.append(temp_str+c)\n \n return result","sub_path":"q17/Q17.py","file_name":"Q17.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116869149","text":"import EmployeeController, mysql.connector\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom mysql.connector import Error\n\npassword = \"\"\n\nclass Ui_Dialog(object):\n def __init__(self, employID):\n #def __init__(self):\n self.employID = employID \n self.Dialog = QtWidgets.QDialog()\n self.Dialog.setObjectName(\"Dialog\")\n self.Dialog.resize(476, 335)\n self.label_11 = QtWidgets.QLabel(self.Dialog)\n self.label_11.setGeometry(QtCore.QRect(100, 70, 61, 20))\n self.label_11.setObjectName(\"label_11\")\n self.line_2 = QtWidgets.QFrame(self.Dialog)\n self.line_2.setGeometry(QtCore.QRect(67, 30, 351, 20))\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_2.setObjectName(\"line_2\")\n self.degreeDoctor = QtWidgets.QLineEdit(self.Dialog)\n self.degreeDoctor.setGeometry(QtCore.QRect(160, 70, 221, 61))\n self.degreeDoctor.setObjectName(\"degreeDoctor\")\n self.label_12 = QtWidgets.QLabel(self.Dialog)\n self.label_12.setGeometry(QtCore.QRect(70, 160, 91, 20))\n self.label_12.setObjectName(\"label_12\")\n self.label_10 = QtWidgets.QLabel(self.Dialog)\n self.label_10.setGeometry(QtCore.QRect(204, 20, 61, 20))\n self.label_10.setObjectName(\"label_10\")\n self.ok = QtWidgets.QPushButton(self.Dialog)\n self.ok.setGeometry(QtCore.QRect(110, 280, 121, 28))\n self.ok.setObjectName(\"ok\")\n self.certiDoctor = QtWidgets.QLineEdit(self.Dialog)\n self.certiDoctor.setGeometry(QtCore.QRect(160, 160, 221, 71))\n self.certiDoctor.setObjectName(\"certiDoctor\")\n self.cancel = QtWidgets.QPushButton(self.Dialog)\n self.cancel.setGeometry(QtCore.QRect(270, 280, 93, 28))\n self.cancel.setObjectName(\"cancel\")\n\n self.retranslateUi(self.Dialog)\n QtCore.QMetaObject.connectSlotsByName(self.Dialog)\n\n self.ok.clicked.connect(self.edit)\n self.cancel.clicked.connect(self.back)\n\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.label_11.setText(_translate(\"Dialog\", \"Degree :\"))\n self.label_12.setText(_translate(\"Dialog\", \"Certification :\"))\n self.label_10.setText(_translate(\"Dialog\", \"Multivalue\"))\n self.ok.setText(_translate(\"Dialog\", \"Ok\"))\n self.cancel.setText(_translate(\"Dialog\", \"Cancel\"))\n\n\n def show(self):\n self.Dialog.show()\n\n def edit(self):\n #TODO edit data\n try:\n connection = mysql.connector.connect(host='localhost',\n database='hospital',\n user='root',\n password=password)\n objdata = (self.employID,)\n \n sqlQuery = \"insert into \"+\"doctor\"+\"(Employee_ID) \" \\\n \"values(%s)\"\n \n cursor = connection.cursor()\n cursor.execute(sqlQuery, objdata)\n connection.commit()\n except Exception as e:\n print(e)\n retmsg = [\"1\", \"writing error\"]\n else :\n retmsg = [\"0\", \"writing done\"]\n finally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n\n \n try:\n connection = mysql.connector.connect(host='localhost',\n database='hospital',\n user='root',\n password=password)\n objdata = (self.employID,self.certiDoctor.text())\n sqlQuery = \"insert into \"+\"doctor_certification\"+\"(Employee_ID,Certification) \" \\\n \"values(%s, %s)\"\n \n cursor = connection.cursor()\n cursor.execute(sqlQuery, objdata)\n connection.commit()\n except Exception as e:\n print(e)\n retmsg = [\"1\", \"writing error\"]\n else :\n retmsg = [\"0\", \"writing done\"]\n finally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n\n try:\n connection = mysql.connector.connect(host='localhost',\n database='hospital',\n user='root',\n password=password)\n objdata = (self.employID,self.degreeDoctor.text())\n \n sqlQuery = \"insert into \"+\"doctor_degree\"+\"(Employee_ID, Degree) \" \\\n \"values(%s, %s)\"\n \n cursor = connection.cursor()\n cursor.execute(sqlQuery, objdata)\n connection.commit()\n except Exception as e:\n print(e)\n retmsg = [\"1\", \"writing error\"]\n else :\n retmsg = [\"0\", \"writing done\"]\n finally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n\n \n try:\n connection = mysql.connector.connect(host='localhost',\n database='hospital',\n user='root',\n password=password)\n\n objdata = (self.certiDoctor.text(), self.employID)\n sqlQuery = \"update doctor_certification set Certification = %s where Employee_ID = %s\" \n \n cursor = connection.cursor()\n cursor.execute(sqlQuery, objdata)\n connection.commit()\n except Exception as e:\n print(e)\n retmsg = [\"1\", \"writing error\"]\n else :\n retmsg = [\"0\", \"writing done\"]\n finally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n\n try:\n connection = mysql.connector.connect(host='localhost',\n database='hospital',\n user='root',\n password=password)\n objdata = (self.degreeDoctor.text(), self.employID)\n sqlQuery = \"update doctor_degree set Degree = %s where Employee_ID = %s\" \n \n cursor = connection.cursor()\n cursor.execute(sqlQuery, objdata)\n connection.commit()\n except Exception as e:\n print(e)\n retmsg = [\"1\", \"writing error\"]\n else :\n retmsg = [\"0\", \"writing done\"]\n finally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n\n self.ui = EmployeeController.Ui_Dialog()\n self.ui.show()\n self.Dialog.close()\n \n def back(self):\n self.ui = EmployeeController.Ui_Dialog()\n self.ui.show()\n self.Dialog.close() \n\n \nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n ui = Ui_Dialog()\n ui.show()\n sys.exit(app.exec_())\n","sub_path":"editDoctorPopup.py","file_name":"editDoctorPopup.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388570405","text":"import asyncio\nimport json\n\nimport aiohttp\nfrom aiohttp import web\n\nfrom brittle_wit.constants import LOGGER\nfrom brittle_wit.executors import twitter_req_to_http_req\nfrom brittle_wit.messages import TwitterError, BrittleWitError\n\n\nclass EntryProcessor:\n \"\"\"\n Splits a stream of bytes into CRLF-separated messages.\n \"\"\"\n\n def __init__(self):\n self._buf = b\"\" # Use a more efficient buffer\n self._mailbox = []\n\n def process(self, chunk):\n \"\"\"\n Translate received bytes into messages in a mailbox.\n\n :param chunk: a chunk of bytes received from the HTTP connection\n \"\"\"\n self._buf += chunk\n\n # Streams are `\\r\\n`-separated JSON messages.\n raw_lines = self._buf.split(b\"\\r\\n\")\n\n # If only one element in the split, then there wasn't a CRLF.\n if len(raw_lines) > 1:\n # The last element may be a b'', which is perfectly fine.\n self._buf = raw_lines[-1]\n\n # Blank lines are keep-alive messages.\n self._mailbox.extend(l for l in raw_lines[:-1] if l.strip())\n\n def messages(self):\n \"\"\"\n Return the messages then reset the mailbox\n\n :return: the enqueued messages\n \"\"\"\n msgs = self._mailbox\n self._mailbox = []\n return msgs\n\n def __bool__(self):\n \"\"\"\n :return: True if there are any messages available\n \"\"\"\n return bool(self._mailbox)\n\n def take_one(self):\n \"\"\"\n :return: the first message removed from the mailbox\n \"\"\"\n return self._mailbox.pop(0)\n\n def purge_buffer(self):\n \"\"\"\n Reset the internal buffer.\n\n This is useful when restarting a stream. When the old stream\n terminates, the buffer is incomplete. Appending to it will produce a\n corrupt entry.\n \"\"\"\n self._buf = b''\n\n def purge_mailbox(self):\n \"\"\"\n Resets the mailbox so there are no messages\n \"\"\"\n self._mailbox = []\n\n def __iter__(self):\n \"\"\"\n :return: an iterator which pops messages of the mailbox until fully\n consumed\n \"\"\"\n return self._entry_consumer()\n\n def _entry_consumer(self):\n while self._mailbox:\n yield self.take_one()\n\n\ndef noop_entry_parser(entry):\n \"\"\"\n :return: the entry string without further processing.\n \"\"\"\n return entry\n\n\ndef json_entry_parser(entry):\n \"\"\"\n :return: the entry parsed as a JSON object\n \"\"\"\n return json.loads(entry.decode('ascii'))\n\n\nclass TwitterStream:\n def __init__(self, session, app_cred, client_cred, twitter_req,\n parser=noop_entry_parser, chunk_size=4096):\n \"\"\"\n Initialize a Twitter stream.\n\n This connects to Twitter's servers but doesn't start reading until a\n call to __iter__.\n\n :param session: the aiohttp.ClientSession object\n :param app_cred: the AppCredentials\n :param client_cred: the ClientCredentials\n :param twitter_req: the TwitterRequest to initiate streaming\n :param parser: a parser which takes a byte-entry and translates it\n into a message.\n :param chunk_size: the number of bytes to asynchronously read from\n Twitter at a time\n \"\"\"\n self._session = session\n self._app_cred = app_cred\n self._client_cred = client_cred\n self._twitter_req = twitter_req\n self._parser = parser\n self._chunk_size = chunk_size\n\n self._entry_processor = EntryProcessor()\n self._http_req = None\n\n self._connect()\n\n def _connect(self):\n \"\"\"\n Connect to Twitter's servers.\n \"\"\"\n # There may only be one set of credentials connected to a\n # streaming endpoint at a time. Preempting an existing\n # connection is error-prone. Complain loudly when a connection\n # already exists.\n if self.is_open:\n LOGGER.error(\"Already connected. Call close() first!\")\n raise RuntimeError(\"Already connected. Call close() first!\")\n\n self._http_req = twitter_req_to_http_req(self._session,\n self._app_cred,\n self._client_cred,\n self._twitter_req)\n LOGGER.error(\"Reconnect, complete...\")\n\n\n @property\n def is_open(self):\n \"\"\"\n :return: True if their is an active connection to Twitter's servers.\n \"\"\"\n return self._http_req is not None and not self._resp.connection.closed\n\n def reconnect(self, force_close_if_open=False, clear_prior_messages=False):\n \"\"\"\n Reconnect to Twitter's servers.\n\n :param force_close_if_open: if True, close the prior connection, if it\n exists\n :param clear_prior_messages: if True, purges all (correct) messages\n waiting for consumption on the internal EntryProcessor from the\n prior connection\n \"\"\"\n print(\"ATTEMPTING TO RECONNECT\")\n LOGGER.error(\"ATTEMPTING TO RECONNECT\")\n # Explicitly acknowledge that forced closure is okay.\n if force_close_if_open and self.is_open:\n self.disconnect()\n\n LOGGER.error(\"CLEARING PRIOR\")\n # The initiating request does not change between connection resets,\n # so old messages are consumable.\n if clear_prior_messages:\n self._entry_processor.purge_mailbox()\n\n LOGGER.error(\"PREGING BUFFER\")\n # The buffer underling the entry processor should always be in\n # an initially pristine state. A broken connection virtually\n # ensures corruption.\n self._entry_processor.purge_buffer()\n\n LOGGER.error(\"CONNECTING\")\n self._connect()\n LOGGER.error(\"GO\")\n\n def disconnect(self):\n \"\"\"\n Disconnect the stream from Twitter's servers.\n \"\"\"\n if self._http_req is not None:\n print(\"Called close!\")\n LOGGER.error(\"Closing!\")\n print(self._http_req.close())\n self._http_req = None\n\n async def __aiter__(self):\n \"\"\"\n Commence streaming, returning this object as an iterator.\n \"\"\"\n # I hate the next line. Is there a decontextualize pattern?\n self._resp = await self._http_req.__aenter__()\n if self._resp.status != 200:\n LOGGER.error(\"Error on {}\".format(self._resp.status))\n raise TwitterError(self._client_cred,\n self._twitter_req,\n self._resp,\n await self._resp.text())\n\n return self\n\n async def __anext__(self):\n # If there are no messages ready, read from the stream.\n while not self._entry_processor:\n chunk = await self._resp.content.read(self._chunk_size)\n if not chunk:\n LOGGER.error(\"Read zero bytes on a stream.\")\n break\n self._entry_processor.process(chunk)\n\n # If there are no messages ready, the stream closed!\n if not self._entry_processor:\n LOGGER.error(\"Stream Closing (non-explicit)\")\n raise StopAsyncIteration\n\n # Take one message.\n return self._parser(self._entry_processor.take_one())\n\n\nclass StreamProcessor:\n def __init__(self, twitter_stream):\n self._twitter_stream = twitter_stream\n self._subscribers = {}\n self._requires_json = False\n LOGGER.info(\"STREAM READY\")\n\n def subscribe(self, handler, as_json=False):\n handler_id = id(handler)\n\n if handler_id in self._subscribers:\n raise RuntimeError(\"Already subscribed!\")\n\n if not hasattr(handler, \"send\"):\n raise RuntimeError(\"Handler must implement send(msg)\")\n\n if as_json:\n self._requires_json = True\n\n self._subscribers[handler_id] = (handler, as_json)\n return handler_id\n\n def unsubscribe(self, handler):\n handler_id = id(handler)\n\n if handler_id not in self._subscribers:\n msg = \"Either never subscribed or already unsubscribed!\"\n raise RuntimeError(msg)\n\n required_json = self._subscribers[handler_id][1]\n\n del self._subscribers[handler_id]\n\n if required_json and not any(p[1] for p in self._subscribers.values()):\n self._requires_json = False\n\n def _send_to_all(self, message):\n # JSON parsing is expensive. If the application only pipes the\n # Twitter Stream to HTTP clients, it's dead time. The pattern\n # would be: BYTES -> JSON -> BYTES\n if self._requires_json:\n json_message = json_entry_parser(message)\n\n for subscriber, as_json in self._subscribers.values():\n subscriber.send(json_message if as_json else message)\n\n async def run(self):\n # Retry Strategy: https://dev.twitter.com/streaming/overview/connecting\n # TODO: More compliant!\n failures, failed = 0, False\n while True:\n try:\n if failed:\n failures += 1\n LOGGER.error(\"RECONNECTING\")\n self._twitter_stream.reconnect(force_close_if_open=True)\n LOGGER.error(\"AFTER RECONNECT, RESUMING STREAM\")\n\n async for message in self._twitter_stream:\n self._send_to_all(message)\n failed = False # TODO: FIX: This is such a waste!\n except TwitterError as e:\n # The docs only say 420. But, I suspect 429 codes, too.\n if e.status_code == 420 or e.status_code == 429:\n sleep_time = 60 * 2**failures\n msg = \"Stream 420'd waiting a {} seconds (failure {})\"\n LOGGER.error(msg.format(sleep_time, failures))\n await asyncio.sleep(sleep_time)\n else:\n sleep_time = min(5 * 2**failures, 320)\n msg = \"Stream {}'d waiting b {} seconds (failure {})\"\n LOGGER.error(msg.format(e.status_code, sleep_time,\n failures))\n print(e._http_resp) # FIX: XXX: HACK: FUCK\n\n failed = True\n await asyncio.sleep(sleep_time)\n except BrittleWitError as e:\n if e.is_retryable: # TODO FIX as is network error.\n sleep_time = min(0.25 * failures, 16)\n msg = \"Stream {}'d waiting c {} seconds (failure {})\"\n LOGGER.error(msg.format(repr(e), sleep_time, failures))\n failed = True\n await asyncio.sleep(sleep_time)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n print(e)\n sleep_time = min(0.25 * failures, 16)\n msg = \"Stream {}'d waiting {} d seconds (failure {})\"\n LOGGER.error(msg.format(repr(e), sleep_time, failures))\n failed = True\n await asyncio.sleep(sleep_time)\n\n\nclass StreamingHTTPPipe:\n DEFAULT_HEADERS = {'Content-Type': 'application/json'}\n\n def __init__(self, **resp_headers):\n # The _clients dict maps a unique identifier -- via id(obj) -- to a\n # pair of (StreamResponse, Event).\n self._clients = {}\n\n self._headers = {**StreamingHTTPPipe.DEFAULT_HEADERS, **resp_headers}\n\n async def handle(self, req):\n caller_id = id(req)\n resp = web.StreamResponse(status=200, headers=self._headers)\n\n try:\n # Start the FSM.\n await resp.prepare(req)\n\n # The send method signals handle when it processing finishes.\n finished_flag = asyncio.Event()\n self._clients[caller_id] = (resp, finished_flag)\n await finished_flag.wait()\n\n except asyncio.CancelledError:\n # Generally, this means the client disconnected.\n if caller_id in self._clients:\n del self._clients[caller_id]\n\n return resp\n\n def send(self, message):\n removal_set = []\n\n for caller_id, (resp, finished_flag) in self._clients.items():\n try:\n resp.write(message) # I don't think draining is nessessary.\n except Exception as e:\n # This is a sloppy catch all for now. I'm not sure if\n # asyncio guarantees a cancel to handle prior to a\n # possible write. But, I do know it would be quite\n # wrong for a write to one client to cause failures\n # in other clients.\n print(\"Send\", repr(e))\n finished_flag.set()\n removal_set.append(caller_id)\n raise e\n\n for k in removal_set:\n del self._clients[k]\n\n\nasync def save_raw_stream(session, app_cred, client_cred, twitter_req,\n output_path, chunk_size=4096, timeout=30):\n \"\"\"\n Save response headers and timeout seconds of response for a streaming req.\n\n I use this function to save some stream output so I can write text fixtures\n against the streaming API.\n\n :param session: the aiohttp ClientSession\n :param app_cred: an AppCredentials object\n :param client_cred: a ClientCredentials object\n :param twitter_req: the TwitterRequest for the streaming endpoint\n :param output_path: the output path. Technically, this is a prefix as the\n function writes two output files: prefix + '.headers.json' and\n prefix + '.content.raw'\n :param chunk_size: the number of bytes for read chunking\n :param timeout: the number of seconds to pipe response to file output\n before exiting.\n \"\"\"\n header_path = output_path + \".headers.json\"\n content_path = output_path + \".content.raw\"\n\n http_req = twitter_req_to_http_req(session, app_cred,\n client_cred, twitter_req)\n\n # This uses blocking file operations. It's possible that a slow\n # disk could generate a stale warning. But, that's fine for the\n # use case of this function: debugging. A stale warning is useful\n # information to have in a saved feed as a fixture.\n with open(header_path, \"w\") as hp, open(content_path, \"wb\") as fp:\n try:\n with aiohttp.Timeout(timeout):\n async with http_req as resp:\n # Save response headers.\n json.dump({k: v for k, v in resp.headers.items()},\n hp, indent=4)\n\n while True:\n chunk = await resp.content.read(chunk_size)\n if not chunk:\n break\n fp.write(chunk)\n except asyncio.TimeoutError:\n pass\n","sub_path":"brittle_wit/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":14939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554394506","text":"from selenium import webdriver\nimport os\noptions = webdriver.ChromeOptions()\ndir_path = os.path.dirname(os.path.realpath(__file__))\nchromedriver = dir_path + '/chromedriver'\nos.environ['webdriver.chrome.driver'] = chromedriver\ndriver = webdriver.Chrome(chrome_options=options, executable_path=chromedriver)\n\ndriver.get('http://www.google.com')\nprint(driver.title)\n","sub_path":"SampleCodes/testingInChromeBrowser/seleniumTest.py","file_name":"seleniumTest.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467075963","text":"from detectron2.config import get_cfg\nfrom detectron2 import model_zoo\nimport torch\n\ndef get_config():\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml\"))\n cfg.DATASETS.TRAIN = (\"hpa_train_final\",)\n cfg.DATASETS.TEST = (\"hpa_val_final\",)\n cfg.DATALOADER.NUM_WORKERS = 10\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml\") # Let training initialize from model zoo\n cfg.MODEL.DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n cfg.SOLVER.IMS_PER_BATCH = 32\n cfg.SOLVER.BASE_LR = 0.000025\n cfg.SOLVER.WARMUP_ITERS = 5000\n cfg.SOLVER.MAX_ITER = 30000 #adjust up if val mAP is still rising, adjust down if overfit\n cfg.SOLVER.STEPS = ()\n cfg.MODEL.ROI_BOX_HEAD.FOCAL_ALPHA = 0.25\n cfg.MODEL.ROI_BOX_HEAD.FOCAL_GAMMA = 2.0\n\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 19\n cfg.MODEL.ROI_HEADS.NAME = 'FocalLossROIHeads'\n cfg.TEST.EVAL_PERIOD = 1000\n cfg.OUTPUT_DIR = './output/focal_25_alpha'\n \n return cfg","sub_path":"detectron_code/focal_config.py","file_name":"focal_config.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236921603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 13 16:28:22 2018\n\n@author: Patrick\n\"\"\"\n\n# based on http://nbviewer.jupyter.org/github/cknoll/beispiele/blob/master/zweifachpendel_nq2_np2_ruled_manif.ipynb\n\nimport sympy as sp\nimport symbtools as st\nimport symbtools.modeltools as mt\nfrom joblib import dump\n\n\nNp = 1\nNq = 1\nn = Np + Nq\npp = st.symb_vector(\"p1:{0}\".format(Np+1))\nqq = st.symb_vector(\"q1:{0}\".format(Nq+1))\n\nttheta = st.row_stack(pp, qq)\ntthetad = st.time_deriv(ttheta, ttheta)\ntthetadd = st.time_deriv(ttheta, ttheta, order=2)\nst.make_global(ttheta, tthetad, tthetadd)\n\nparams = sp.symbols('l1, s1, m1, m0, g')\nst.make_global(params)\n\ntau1 = sp.Symbol(\"tau1\")\n\n#Einheitsvektoren\n\nex = sp.Matrix([1,0])\ney = sp.Matrix([0,1])\n\n# Koordinaten der Schwerpunkte und Gelenke\nS0 = ex*q1 # Schwerpunkt Wagen\nG0 = S0 # Gelenk zwischen Wagen und Pendel 1\n# Schwerpunkte des Pendels (Pendel zeigt für kleine Winkel nach oben)\nS1 = G0 + mt.Rz(p1)*ey*s1\n\n# Zeitableitungen der Schwerpunktskoordinaten\nSd0, Sd1 = st.col_split(st.time_deriv(st.col_stack(S0, S1), ttheta))\n\n# Energie\nT_rot = 0\nT_trans = (m0*Sd0.T*Sd0 + m1*Sd1.T*Sd1)/2\n\nT = T_rot + T_trans[0]\n\nV = m1*g*S1[1]\n\nmod = mt.generate_symbolic_model(T, V, ttheta, [0, tau1])\n\n# Zustandsraummodell\nmod.calc_state_eq(simplify=True)\nx_dot = mod.f + mod.g*tau1\n\n# Zustandsraummodell, partiell linearisiert\nmod.calc_coll_part_lin_state_eq(simplify=True)\nx_dot_pl = mod.ff + mod.gg*qddot1\n\n# Zustandsdefinition anpassen und ZRM speichern\nreplacements = {'Matrix': 'sp.Matrix',\n 'sin': 'sp.sin',\n 'cos': 'sp.cos',\n 'q1': 'x1',\n 'qdot1': 'x2',\n 'qddot1': 'u1',\n 'p1': 'x3',\n 'pdot1': 'x4',\n 'tau1': 'u1'}\n\ndef str_replace_all(string, replacements):\n for (key, val) in replacements.items():\n string = string.replace(key, val)\n return string\n\nx_dot = sp.Matrix([x_dot[1], x_dot[3], x_dot[0], x_dot[2]])\nx_dot_str = str_replace_all(str(x_dot), replacements)\ndump({'x_dot_str': x_dot_str}, 'examples/pend_cart.str')\n\nx_dot_pl = sp.Matrix([x_dot_pl[1], x_dot_pl[3], x_dot_pl[0], x_dot_pl[2]])\nx_dot_pl_str = str_replace_all(str(x_dot_pl), replacements)\ndump({'x_dot_str': x_dot_pl_str}, 'examples/pend_cart_pl.str')","sub_path":"examples/pend_cart.py","file_name":"pend_cart.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561115408","text":"import numpy as np\n\n\n#===============================================================================\nclass Patch:\n # The computational domain - called Patch to match with Vaango/Uintah\n def __init__(self,X0,X1,Nc,nGhost,t0,tf,dt,th,dw=0):\n dim = 2\n self.X0 = X0 # Bottom corner of patch domain\n self.X1 = X1 # Top corner of patch domain\n self.Nc = Nc+1+2*nGhost # Vector of node counts\n self.thick = th # Thickness\n self.nGhost = nGhost # Number of Ghost nodes\n self.dX = (X1-X0)/(Nc) # Cell size\n self.t = t0 # Time\n self.tf = tf # Final time\n self.dt = dt # Time increment\n self.it = 0 # Timestep\n self.tol = 1.e-15 # Global tolerance\n self.bcs = []\n \n if not (dw==0):\n self.initGrid(dw) # If specified, initialize nodes in data\n # warehouse \n \n def initGrid(self, dw):\n for jj in range(self.Nc[1]):\n yy = (jj-self.nGhost)*self.dX[1] + self.X0[1]\n for ii in range(self.Nc[0]):\n xx = (ii-self.nGhost)*self.dX[0] + self.X0[0]\n XX = np.array( [xx, yy] )\n dw.addNode( XX )\n \n def inPatch( self, pt ):\n if (pt[0] < self.X0[0]) or (pt[1] self.X1[0]) or (pt[1] >self.X1[1]):\n return False\n return True\n \n def allInPatch( self, pts ):\n for pt in pts:\n if not self.inPatch( pt ):\n return False\n return True\n \n def stepTime( self ):\n self.t += self.dt\n self.it += 1","sub_path":"mpm_old/src/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429060016","text":"from oricrete.folding2 import \\\n YoshimuraCreasePattern, CnstrTargetFace, Folding, Initialization, CreasePatternView, \\\n link, r_, s_, t_\nimport numpy as np\n\ndef geo_trans(X):\n x, y, z = X.T\n y_ = (y - 0.4) * (1 - 0.6 / 1.2 * x)\n return np.c_[x, y_, z]\n\ncp = YoshimuraCreasePattern(L_x=1.2, L_y=0.8, n_x=3, n_y=6,\n geo_transform=geo_trans)\nface_z_t = CnstrTargetFace(F=[r_, s_, 0.6 * t_ * r_ * (1 - r_ / 1.2)])\ninit = Initialization(cp=cp, tf_lst=[(face_z_t, cp.N)], t_init=0.1)\nfold = Folding(source=init, n_steps=8,\n tf_lst=[(face_z_t, cp.N)])\ncpw = CreasePatternView(root=init)\ncpw.configure_traits()\n","sub_path":"docs/use_cases/example_target_face_and_dof_cntl_3x4.py","file_name":"example_target_face_and_dof_cntl_3x4.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"319618291","text":"import socket, time, sys\r\nhost = 'localhost'\r\ntimeout = 0.5\r\nfilename = sys.argv[1]\r\nf = open(filename, \"rb\")\r\nfileExtention = filename[filename.index('.'):]\r\n\r\nclientport = 8002\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\nserverport = 8001\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\nserver.bind((host, serverport))\r\nserver.settimeout(0.1)\r\nbase = 0\r\nseq = 1\r\nthreshold = 16\r\nwindowSize = 1\r\nt1 = time.clock()\r\nbuffer = []\r\nnextWindow = 1\r\n\r\nwhile True:\r\n if len(buffer) == 0 and seq == 1:\r\n buffer.append(bytes(fileExtention.ljust(1004, '\\0'), 'utf8'))\r\n elif len(buffer) < windowSize and not f.closed:\r\n for i in range(windowSize - len(buffer)):\r\n tmp = f.read(1000)\r\n if len(tmp) > 0:\r\n dlen = len(tmp)\r\n tmp = tmp.ljust(1000, bytes('\\0', 'utf8'))\r\n buffer.append(bytes(str(dlen).zfill(4), 'utf8') + tmp)\r\n else:\r\n buffer.append(\"finxfin\")\r\n f.close()\r\n break ;\r\n if nextWindow:\r\n nextWindow = 0\r\n t1 = time.clock()\r\n for i in range(min(windowSize, len(buffer))):\r\n seq_str = str(seq + i).zfill(20)\r\n if buffer[i] == \"finxfin\":\r\n print(\"send fin\")\r\n client.sendto(bytes(seq_str + buffer[i],'utf8'), (host, clientport))\r\n break ;\r\n print(\"send data #%d, winSize = %d\" % (seq + i, windowSize))\r\n client.sendto(bytes(seq_str,'utf8') + buffer[i], (host, clientport))\r\n elif (time.clock() - t1) > timeout:\r\n threshold = max(1, int(windowSize / 2))\r\n windowSize = 1\r\n base = seq - 1\r\n print(\"time out, threshold =\", threshold)\r\n #print(base, seq, windowSize)\r\n t1 = time.clock()\r\n for i in range(min(windowSize, len(buffer))):\r\n seq_str = str(seq + i).zfill(20)\r\n if buffer[i] == \"finxfin\":\r\n print(\"resend fin\")\r\n client.sendto(bytes(seq_str + buffer[i],'utf8'), (host, clientport))\r\n break ;\r\n print(\"resend data #%d, winSize = %d\" % (seq + i, windowSize))\r\n client.sendto(bytes(seq_str,'utf8') + buffer[i], (host, clientport))\r\n try:\r\n ack, addr= server.recvfrom(20)\r\n ack = int(ack.decode('utf8'))\r\n if ack == 99999999999999999999:\r\n print(\"recv finack\")\r\n break ;\r\n print(\"recv ack #%d\" % ack)\r\n if ack == seq:\r\n seq += 1\r\n buffer.pop(0)\r\n if seq == base + windowSize + 1:\r\n nextWindow = 1\r\n base = seq - 1\r\n if windowSize < threshold:\r\n windowSize *= 2\r\n else:\r\n windowSize += 1\r\n elif ack > seq:\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n print(ack, seq)\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n while ack >= seq:\r\n seq += 1\r\n buffer.pop(0)\r\n if seq == base + windowSize + 1:\r\n nextWindow = 1\r\n base = seq - 1\r\n if windowSize < threshold:\r\n windowSize *= 2\r\n else:\r\n windowSize += 1\r\n except socket.timeout:\r\n yolo = 666\r\n \r\n","sub_path":"CN/hw2_b04902071/Sender.py","file_name":"Sender.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533057759","text":"import io\nimport sys\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\npd.set_option('max_columns', 1000)\npd.set_option('max_info_columns', 1000)\npd.set_option('expand_frame_repr', False)\npd.set_option('display.max_rows', 30000)\npd.set_option('max_colwidth', 4000)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\ndef main():\n '''If output is in csv format'''\n start_month = '9'\n start_day = '1'\n end_month = '9'\n end_day = '8'\n\n login_data = {'un': 'USERNAME', 'pw': 'PASSWORD'}\n df = pd.DataFrame()\n for year in map(str, range(2010, 2018)):\n print('Pulling data for {0} from {1}/{2} to {3}/{4}...'.format(year, start_month, start_day, end_month, end_day))\n form_data = 'INSPECT (make sure .csv is selected), NETWORK, CLICK ON FILE, FORM DATA IN HEADERS, VIEW SOURCE, REPLACE RELEVANT DATA FIELD'\n\n\n with requests.session() as s:\n s.get('https://na51.salesforce.com', params=login_data)\n data = requests.post(\n 'https://na51.salesforce.com/REPORT NUMBER',\n headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},\n cookies=s.cookies,\n data=form_data).content\n df = df.append(pd.read_csv(io.StringIO(data.decode('utf-8'))))\n return df.\\\n dropna(subset=['Lead: System ID']).\\\n reset_index(drop=True)\n\n\ndef main2():\n '''Otherwise'''\n start_month = '9'\n start_day = '1'\n end_month = '9'\n end_day = '8'\n\n login_data = {'un': 'USERNAME', 'pw': 'PASSWORD'}\n df = pd.DataFrame()\n for year in map(str, range(2010, 2018)):\n print('Pulling data for {0} from {1}/{2} to {3}/{4}...'.format(year, start_month, start_day, end_month, end_day))\n form_data = 'INSPECT, NETWORK, CLICK ON FILE, FORM DATA IN HEADERS, VIEW SOURCE, REPLACE RELEVANT DATA FIELD'\n\n with requests.session() as s:\n s.get('https://na51.salesforce.com', params=login_data)\n data = requests.post(\n 'https://na51.salesforce.com/REPORT NUMBER',\n headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},\n cookies=s.cookies,\n data=form_data).content\n df = df.append(parse_data(data))\n return df.\\\n dropna(subset=['Lead: System ID']).\\\n reset_index(drop=True)\n\ndef parse_data(csv_data):\n soup = BeautifulSoup(csv_data, \"lxml\")\n data_lst = []\n for row in soup.findAll('tr'):\n data_lst.append([cell.text for cell in row.findAll('td')])\n\n columns = [\n \"Lead: Lead Status\",\n \"Lead: System ID\",\n \"Monthly App Issued Count\",\n \"Lead: Dental Status\",\n \"Lead: Stips Agent\",\n \"INF Policy Applicant\",\n \"Lead: Lead ID\",\n \"Lead First Name\",\n \"Lead: Last Name\",\n \"Lead: Phone\",\n \"Lead: Email\",\n \"Effective Date Applicant\",\n \"Lead: Retention Status\",\n \"Spouse First Name\",\n \"Spouse Last Name\",\n \"Effective Date Spouse\",\n \"Lead: Retention Status Spouse\",\n \"Lead: Applicant State/Province\",\n \"Lead: Retention Agent\",\n \"Lead: Confirmation By\",\n \"Carrier Applicant\"\n ]\n if len(data_lst) > 1:\n return pd.DataFrame(data_lst, columns=columns).reset_index(drop=True)\n return pd.DataFrame(columns=columns)\n\n\nif __name__ == '__main__':\n df = main()\n print(df.info())\n print(df.head())\n print(df.tail())\n","sub_path":"coding/python/scrape/examples/salesforce_api.py","file_name":"salesforce_api.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604989017","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 9 17:03:09 2020\n\n@author: wxi\nresidential land prices dataset - from fhfa\nagricultural land prices from USDA\n\"\"\"\nimport pandas as pd\n\ndef main():\n landprice = pd.read_csv(\"./calculation_data/county_price.csv\")\n landprice[\"state_fips\"] = round(landprice[\"County\"], -3)\n colnames = [\"County Name\", \"County GEOID\", \"Neighbor Name\", \"Neighbor GEOID\"]\n dtypes = [\"str\", \"float\", \"str\", \"int\"]\n dtypes = {k:v for k,v in zip(colnames,dtypes)}\n adj_county = pd.read_csv(\"./calculation_data/county_adjacency.txt\", sep = '\\t', lineterminator = \"\\n\", names = colnames, dtype = dtypes, encoding = \"ISO-8859-1\")\n adj_county.fillna(method = \"ffill\", inplace = True)\n adj_county [\"County GEOID\"] = adj_county[\"County GEOID\"].astype(\"int\")\n adj_county.drop(adj_county[round(adj_county['County GEOID'], -3) != round(adj_county[\"Neighbor GEOID\"], -3) ].index, inplace = True) \n all_fips = pd.read_csv(\"./calculation_data/US_FIPS_Codes.csv\", usecols = [\"COUNTY_FIPS\"])\n all_fips.drop(0, inplace = True)\n all_fips[\"COUNTY_FIPS\"] = all_fips[\"COUNTY_FIPS\"].astype(int)\n disjoint = list(map(int,list(set(all_fips[\"COUNTY_FIPS\"]) - set(landprice[\"County\"]))))\n \n def get_avg_price(county_id):\n land_prices = []\n adj_counties = adj_county[adj_county[\"County GEOID\"] == county_id][\"Neighbor GEOID\"].to_list()\n for adj_id in adj_counties[1:]:\n try:\n land_prices.append(float(landprice.loc[landprice[\"County\"] == adj_id, \"Price Per Acre\"]))\n except TypeError:\n continue\n if not len(land_prices):\n return landprice[landprice[\"state_fips\"] == round(county_id,-3)][\"Price Per Acre\"].mean()\n else:\n return sum(land_prices) / len(land_prices)\n \n est_landprice = [get_avg_price(i) for i in disjoint]\n landprice = landprice.append(pd.DataFrame({\"County\": disjoint, \"Price Per Acre\": est_landprice, \"state_fips\" : list(map(lambda x: round(x,-3),disjoint))}))\n landprice = landprice.sort_values(by = [\"County\"]).reset_index()\n landprice.drop(columns = [\"index\"], inplace = True)\n landprice.to_csv(\"./calculation_data/landprices.csv\")","sub_path":"process_parity_calc/get_land_prices.py","file_name":"get_land_prices.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559927970","text":"import os\nimport sys\nimport re\nimport glob\nimport shutil\nfrom PIL import Image\nfrom PIL import ImageFile\nfrom PIL import ExifTags\nimport PIL.ImageOps\n\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef get_jpeg_info(img_fpath):\n lat = None\n lng = None\n capture_date = None\n im = Image.open(img_fpath)\n\n exif = im._getexif()\n\n if exif is not None:\n exif = {ExifTags.TAGS[k]: v for k, v in exif.items() if k in ExifTags.TAGS}\n if 'GPSInfo' in exif:\n gps_tags = exif['GPSInfo']\n gps = {ExifTags.GPSTAGS.get(t, t): gps_tags[t] for t in gps_tags}\n is_lat = 'GPSLatitude' in gps\n is_lat_ref = 'GPSLatitudeRef' in gps\n is_lon = 'GPSLongitude' in gps\n is_lon_ref = 'GPSLongitudeRef' in gps\n\n if is_lat and is_lat_ref and is_lon and is_lon_ref:\n lat = gps['GPSLatitude']\n lat_ref = gps['GPSLatitudeRef']\n if lat_ref == 'N':\n lat_sign = 1.0\n elif lat_ref == 'S':\n lat_sign = -1.0\n lon = gps['GPSLongitude']\n lon_ref = gps['GPSLongitudeRef']\n if lon_ref == 'E':\n lon_sign = 1.0\n elif lon_ref == 'W':\n lon_sign = -1.0\n lat = lat_sign * lat[0] + lat[1] / 60 + lat[2] / 3600\n lng = lon_sign * lon[0] + lon[1] / 60 + lon[2] / 3600\n\n if 'DateTimeOriginal' in exif:\n capture_date = exif['DateTimeOriginal']\n capture_date = capture_date.split(' ')[0].replace(':', '-')\n return (capture_date, lat, lng)\n\n\n\n\ndef parse_xml(file_path):\n objects = []\n img_shape = [None, None, None]\n object_dict = {'name': None, 'xmin': None, 'ymin': None, 'xmax': None, 'ymax': None}\n \n # regex for searching object's information\n re_width = re.compile(r'([0-9]+)')\n re_height = re.compile(r'([0-9]+)')\n re_depth = re.compile(r'([0-9]+)')\n re_name = re.compile(r'(.+)')\n re_xmin = re.compile(r'([0-9]+)')\n re_ymin = re.compile(r'([0-9]+)')\n re_xmax = re.compile(r'([0-9]+)')\n re_ymax = re.compile(r'([0-9]+)')\n\n # open Pascal VOC XML and read object information\n with open(file_path, 'r') as xmlfh:\n\n is_object_record = False\n\n for line in xmlfh:\n if not is_object_record:\n m = re_width.search(line)\n if m:\n img_shape[0] = int(m.group(1))\n m = re_height.search(line)\n if m:\n img_shape[1] = int(m.group(1))\n m = re_depth.search(line)\n if m:\n img_shape[2] = int(m.group(1))\n\n if '' in line:\n is_object_record = True\n continue\n\n if '' in line:\n objects.append(object_dict)\n object_dict = {'name': None, 'xmin': None, 'ymin': None, 'xmax': None, 'ymax': None}\n is_object_record = False\n continue\n\n if is_object_record:\n m = re_name.search(line)\n if m:\n object_dict['name'] = m.group(1)\n\n m = re_xmin.search(line)\n if m:\n object_dict['xmin'] = int(m.group(1))\n\n m = re_ymin.search(line)\n if m:\n object_dict['ymin'] = int(m.group(1))\n\n m = re_xmax.search(line)\n if m:\n object_dict['xmax'] = int(m.group(1))\n\n m = re_ymax.search(line)\n if m:\n object_dict['ymax'] = int(m.group(1))\n\n return {'shape': tuple(img_shape), 'objects': objects}\n\n\n\ndef crop_images(img_fpath, mode):\n objects = parse_xml(os.path.splitext(img_fpath)[0] + '.xml')\n im = Image.open(img_fpath)\n im = PIL.ImageOps.exif_transpose(im)\n for obj in objects['objects']:\n obj = (obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'])\n im_cropped = im.crop(obj)\n output_fpath = os.path.splitext(img_fpath)[0] + '__cropped__' + '_'.join(map(str, obj)) + '.jpg'\n if mode == 'exif':\n im_cropped.save(output_fpath, quality=100, exif=im.info.get('exif'))\n else:\n im_cropped.save(output_fpath, quality=100)\n\n\ndef crop(mode, input_dirpath):\n for f_ext in ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']:\n for image_path in sorted(glob.glob(os.path.join(input_dirpath, '*', '*' + f_ext))):\n if not os.path.exists(os.path.splitext(image_path)[0] + '.xml'):\n continue\n \n if mode == 'exif':\n exif_info = get_jpeg_info(image_path)\n print(image_path, exif_info)\n if (exif_info[0] is None) or (exif_info[1] is None) or (exif_info[2] is None):\n print('No EXIF: ' + image_path)\n continue\n \n crop_images(image_path, mode)\n \n\n\n \n\n\nif __name__ == '__main__':\n \n mode = sys.argv[1]\n input_dpath = sys.argv[2]\n crop(mode, input_dpath)\n \n\n\n","sub_path":"10.3389/fevo.2021.762173/scripts/crop_images.py","file_name":"crop_images.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498647384","text":"class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n # DFS\n l1, l2, l3 = len(s1), len(s2), len(s3)\n if l1 + l2 != l3:\n return False\n stack = [(0,0)]\n visited = set((0,0))\n while stack:\n x, y = stack.pop()\n if x+y == l3:\n return True\n if x+1 <= l1 and s1[x] == s3[x+y] and (x+1, y) not in visited:\n stack.append((x+1, y))\n visited.add((x+1, y))\n if y+1 <= l2 and s2[y] == s3[x+y] and (x,y+1) not in visited:\n stack.append((x, y+1))\n visited.add((x, y+1))\n return False","sub_path":"97.InterleavingString.py","file_name":"97.InterleavingString.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440404716","text":"import click\n\nfrom kubernetes import config\n\nfrom kubetools import __version__\nfrom kubetools.log import setup_logging\nfrom kubetools.settings import get_settings\n\n\nclass SpecialHelpOrder(click.Group):\n def __init__(self, *args, **kwargs):\n self.help_priorities = {}\n super(SpecialHelpOrder, self).__init__(*args, **kwargs)\n\n def list_commands(self, ctx):\n '''\n Reorder the list of commands when listing the help.\n '''\n commands = super(SpecialHelpOrder, self).list_commands(ctx)\n return (\n c[1] for c in sorted(\n (self.help_priorities.get(command, 1), command)\n for command in commands\n )\n )\n\n def group(self, *args, **kwargs):\n '''\n Behaves the same as `click.Group.command()` except capture a priority for\n listing command names in help.\n '''\n\n help_priority = kwargs.pop('help_priority', 1)\n help_priorities = self.help_priorities\n\n def decorator(f):\n cmd = super(SpecialHelpOrder, self).group(*args, **kwargs)(f)\n help_priorities[cmd.name] = help_priority\n return cmd\n\n return decorator\n\n def command(self, *args, **kwargs):\n '''\n Behaves the same as `click.Group.command()` except capture a priority for\n listing command names in help.\n '''\n\n help_priority = kwargs.pop('help_priority', 1)\n help_priorities = self.help_priorities\n\n def decorator(f):\n cmd = super(SpecialHelpOrder, self).command(*args, **kwargs)(f)\n help_priorities[cmd.name] = help_priority\n return cmd\n\n return decorator\n\n\ndef _get_context_names():\n try:\n contexts, active_context = config.list_kube_config_contexts()\n except config.ConfigException as e:\n # The python-kubernetes library currently does not handle a missing \"current context\"\n # well at all, raising an exception.\n # See: https://github.com/kubernetes-client/python/issues/1193\n if 'Expected key current-context' in e.args[0]:\n raise click.ClickException((\n 'No current-context set in kubeconfig! Please set this to any '\n 'value using `kubectl config use-context `.'\n ))\n raise\n\n if not contexts:\n print('Cannot find any context in kube-config file.')\n return\n return [context['name'] for context in contexts], active_context['name']\n\n\ndef print_contexts(ctx, param, value):\n if not value:\n return\n\n click.echo('--> Available Kubernetes contexts:')\n context_names, active_context_name = _get_context_names()\n for name in context_names:\n click.echo(f' {click.style(name, bold=name == active_context_name)}')\n\n ctx.exit()\n\n\ndef ensure_context(ctx, param, value):\n context_names, active_context_name = _get_context_names()\n\n if value:\n if value not in context_names:\n raise click.BadParameter(f'{value}; available contexts: {context_names}')\n else:\n click.echo(f'Using active context: {click.style(active_context_name, bold=True)}')\n value = active_context_name\n\n return value\n\n\n@click.group(cls=SpecialHelpOrder)\n@click.option(\n '--context',\n callback=ensure_context,\n envvar='KUBETOOLS_CONTEXT',\n help='The name of the Kubernetes context to use.',\n)\n@click.option(\n '--contexts',\n is_flag=True,\n is_eager=True,\n callback=print_contexts,\n expose_value=False,\n help='List available Kubernetes contexts and exit.',\n)\n@click.option('--debug', is_flag=True, help='Show debug logs.')\n@click.version_option(version=__version__, message='%(prog)s: v%(version)s')\n@click.pass_context\ndef cli_bootstrap(ctx, context, debug):\n '''\n Kubetools client - deploy apps to Kubernetes.\n '''\n\n ctx.meta['kube_context'] = context\n\n setup_logging(debug)\n get_settings()\n","sub_path":"kubetools/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610848300","text":"# -*- coding: utf-8 -*-\r\nfrom keras.callbacks import EarlyStopping\r\nfrom datetime import datetime\r\nimport os\r\nimport argparse\r\n\r\nimport load_train_data\r\nimport multilayer_perceptron\r\nimport predict\r\nimport plot\r\n\r\nparser = argparse.ArgumentParser(description=\"colorization\")\r\nparser.add_argument('-e', '--epochs', type=int, default=100)\r\nparser.add_argument('-b', '--batch', type=int, default=100)\r\nparser.add_argument('-c', '--category', type=str, default=\"grass\")\r\nargs = parser.parse_args()\r\n\r\nh_train, s_train, mono_train = load_train_data.Load_cov(args.category)\r\n\r\nHmodel = multilayer_perceptron.Build_128()\r\nSmodel = multilayer_perceptron.Build_128()\r\n\r\n\r\nbatch_size = args.batch\r\nepochs = args.epochs \r\n\r\nHmodel.summary()\r\n\r\nearly_stopping = EarlyStopping(monitor='loss', patience=10, verbose=1)\r\nhistory_h = Hmodel.fit(mono_train, h_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_split=0.1,\r\n callbacks=[early_stopping])\r\n#basename = \"model/Hmodel_\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".h5\"\r\n#Hmodel.save(basename)\r\nprint (\"\\007\")\r\n\r\n\r\n#early_stopping = EarlyStopping(monitor='mean_squared_error', patience=10, verbose=1)\r\nhistory_s = Smodel.fit(mono_train, s_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_split=0.1,\r\n callbacks=[early_stopping])\r\n#basename = \"model/Smodel_\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".h5\"\r\n#Smodel.save(basename)\r\nprint (\"\\007\")\r\n\r\n\r\npredir = \"pre_HSVCOV_\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\r\nos.mkdir(predir)\r\npredict.Predict_HSCOV(Hmodel, Smodel, args.category, predir)\r\nplot.Plot_history(history_h.history, predir+\"/h_history\")\r\nplot.Plot_history(history_s.history, predir+\"/s_history\")\r\n\r\n\r\nprint (\"\\007\")","sub_path":"floyd/colorization_hsvcov.py","file_name":"colorization_hsvcov.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624889759","text":"from libs.myapp import update_prompt\nfrom libs.config import gset, gget, alias, color\n\n\n@alias(True, _type=\"COMMON\")\ndef run(switch: str = \"\"):\n \"\"\"\n verbose\n\n Open / Close verbose info for prompt.\n\n switch:\n - ON\n - OFF\n \"\"\"\n switch = switch.upper()\n if (switch in [\"ON\", \"OFF\", \"\"]):\n switch_name = \"PROMPT.VERBOSE\"\n if switch == \"\":\n gset(switch_name, not gget(switch_name, default=False))\n elif switch == \"ON\":\n gset(switch_name, True)\n elif switch == \"OFF\":\n gset(switch_name, False)\n update_prompt()\n print(\n f\"\\nSet verbose info: {color.green('On') if gget(switch_name) else color.red('Off')}\\n\")\n else:\n print(color.red(\"\\nNo this switch\\n\"))\n","sub_path":"doughnuts/webshell_plugins/verbose.py","file_name":"verbose.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296897728","text":"S = list(input())\nS = [ int(x) for x in S]\nK = int(input())\nans = 1\nfor i in range(min(K,len(S))):\n if S[i] != 1:\n ans = S[i]\n break\nprint(ans)\n \n \n \n","sub_path":"beginner/106/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226119433","text":"#! /usr/bin/python\n\nimport rospy\nimport numpy as np\nfrom fish_msgs.msg import DepthTestMsg # DepthTestMsg\nfrom fish_msgs.msg import mbedStatusMsg # mbedStatusMsg\nfrom sensor_msgs.msg import Joy\nimport time\n\n\nl2_button = 8\nr2_button = 9\nl1_button = 10\nr1_button = 11\ntri_button = 12\nx_button = 14\n\nl_stick_y_axis = 1\n\nmode1_button = l1_button\nmode2_button = r1_button\nmode3_button = l2_button\nmode4_button = r2_button\n\ntick_up_button = tri_button\ntick_down_button = x_button\n\nvel_ctrl_axis = l_stick_y_axis\n\n\nclass DepthJoy(object):\n def __init__(self):\n self.node_name = rospy.get_name()\n rospy.loginfo(\"[%s] Initializing \" %(self.node_name))\n\n self.joy = None\n\n self.pub = rospy.Publisher(\"joy_control\", DepthTestMsg, queue_size = 100)\n\n self.sub_joy = rospy.Subscriber(\"joy\", Joy, self.cbJoy, queue_size = 100)\n\n self.sub_mbed = rospy.Subscriber(\"mbed_stat\", mbedStatusMsg, self.cbMbed, queue_size = 100)\n self.mode_map = {4: \"depth\", 2: \"velocity\", 3: \"position\", 1: \"voltage\"}\n self.desired_number = {\"depth\": 0, \"velocity\": 0, \"position\": 0, \"voltage\": 0}\n self.mode = 2\n self.value = 0\n\n self.axis_gain_dict = {\"depth\": -1, \"velocity\": 1, \"position\": 1, \"voltage\": 1}\n self.tick_gain_dict = {\"depth\": -0.05, \"velocity\": 0.05, \"position\": 0.05, \"voltage\": 0.05}\n\n self.minmax_dict = {\"depth\": (0, 1), \"velocity\": (-1, 1), \"position\": (0,1)}\n\n self.bad_axes = [23, 24, 25]\n self.mbed_msg = DepthTestMsg()\n # Controller:\n # LStick: x0, -y1\n # RStick: x2, -y3\n # LDpad: b7, ? (projected a11)\n # RDpad: b5, a9\n # UDpad: b4, a8\n # DDpad: b6, a10\n # X: b14, a18\n # Tri: b12, a16\n # Square: b15, a19\n # Circle: b13, a17\n # L1: b10, a14\n # R1: b11, a15\n # L2: b8, a12\n # R2: b9, a13\n # Select b0\n # Start b3\n # PSbutton b16\n # L3 b1\n # R3 b2\n\n\n\n def changed_axes_buttons(self, joy_msg):\n if self.joy != None:\n return {\"axes\": [(self.joy.axes[i] != joy_msg.axes[i]) and (i not in self.bad_axes) for i in range(len(self.joy.axes))], \"buttons\": [self.joy.buttons[i] != joy_msg.buttons[i] for i in range(len(self.joy.buttons))]}\n return {\"axes\": [(i not in self.bad_axes) for i in range(len(joy_msg.axes))], \"buttons\": [True for i in range(len(joy_msg.buttons))]}\n\n def joy_updated(self, diff):\n for cat in diff.keys():\n# rospy.loginfo(\"diff: %s\", diff[cat])\n for i in range(len(diff[cat])):\n if diff[cat][i]:\n # rospy.loginfo(\"%s %s\",i, diff[cat][i])\n return True\n return False\n\n def normalize_axis(self, value):\n # rospy.loginfo(\"%s\", value)\n return value\n\n def clip(self, val, minmax):\n return min(max(val, minmax[0]), minmax[1])\n\n\n def do_incr(self, incr):\n increase = self.tick_gain_dict[self.mode_map[self.mode]] * incr\n self.desired_number[self.mode_map[self.mode]] = self.clip(increase + self.desired_number[self.mode_map[self.mode]], self.minmax_dict[self.mode_map[self.mode]])\n\n def cbJoy(self, joy_msg):\n diff = self.changed_axes_buttons(joy_msg)\n # rospy.loginfo(\"%s\", diff)\n\n mbed_msg = DepthTestMsg()\n modifier = 0\n\n if self.joy_updated(diff):\n self.joy = joy_msg\n sendMsg = False\n if diff[\"buttons\"][mode1_button] and joy_msg.buttons[mode1_button]:\n self.mode = 1\n sendMsg = True\n # update msg with new mode\n if diff[\"buttons\"][mode2_button] and joy_msg.buttons[mode2_button]:\n self.mode = 2\n sendMsg = True\n if diff[\"buttons\"][mode3_button] and joy_msg.buttons[mode3_button]:\n self.mode = 3\n sendMsg = True\n if diff[\"buttons\"][mode4_button] and joy_msg.buttons[mode4_button]:\n self.mode = 4\n sendMsg = True\n if diff[\"buttons\"][tick_up_button] and joy_msg.buttons[tick_up_button]:\n self.do_incr(1)\n sendMsg = True\n if diff[\"buttons\"][tick_down_button] and joy_msg.buttons[tick_down_button]:\n self.do_incr(-1)\n sendMsg = True\n if diff[\"axes\"][vel_ctrl_axis]:\n sendMsg = True\n\n\n mbed_msg.mode = self.mode\n self.value = self.clip(self.desired_number[self.mode_map[self.mode]] + (self.axis_gain_dict[self.mode_map[self.mode]] * self.normalize_axis(joy_msg.axes[vel_ctrl_axis])), self.minmax_dict[self.mode_map[self.mode]])\n mbed_msg.value = self.value\n if sendMsg:\n rospy.loginfo(\"PI mode: %s, value: %s\", self.mode_map[mbed_msg.mode], mbed_msg.value)\n # self.pub.publish(mbed_msg)\n\n def cbMbed(self, mbed_msg):\n rospy.loginfo(\"MBED mode: %s, value: %s\", self.mode_map[mbed_msg.mode], mbed_msg.value)\n\n\n def sendStat(self):\n mbed_msg = DepthTestMsg()\n mbed_msg.mode = self.mode;\n mbed_msg.value = self.value;\n self.pub.publish(mbed_msg);\n\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"depth_control_pi\", anonymous=False)\n depth_control_pi = DepthJoy()\n while True:\n depth_control_pi.sendStat()\n time.sleep(0.01)\n rospy.spin()\n","sub_path":"fish/pi/ros/catkin_ws/src/depth_control/src/depth_control_node.py","file_name":"depth_control_node.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216909014","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data loader for NNGP experiments.\n\nLoading MNIST dataset with train/valid/test split as numpy array.\n\nUsage:\nmnist_data = load_dataset.load_mnist(num_train=50000, use_float64=True,\n mean_subtraction=True)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport copy\nimport numpy as np\nimport tensorflow as tf\n#import tensorflow_datasets as tfds\n\nfrom keras.datasets import mnist, cifar10\n\nfrom getstl10 import read_all_images, read_labels\n\ndef load_mnist(num_train=50000,\n use_float64=False,\n mean_subtraction=False,\n random_roated_labels=False):\n \n \"\"\"Loads MNIST as numpy array.\"\"\"\n # uncomment later\n flags = tf.app.flags\n FLAGS = flags.FLAGS\n\n flags.DEFINE_string('data_dir', '/tmp/nngp/data/',\n 'Directory for data.')\n\n\n from tensorflow.examples.tutorials.mnist import input_data\n data_dir = FLAGS.data_dir\n datasets = input_data.read_data_sets(\n data_dir, False, validation_size=10000, one_hot=True)\n mnist_data = _select_subset(\n datasets,\n num_train,\n use_float64=use_float64,\n mean_subtraction=mean_subtraction,\n random_roated_labels=random_roated_labels)\n\n return mnist_data\n\n\ndef load_cifar10(num_train=50000,\n use_float64=False,\n mean_subtraction=False,\n random_roated_labels=False):\n \"\"\"Loads CIFAR as numpy array.\"\"\"\n (x_train, y_trainlabel), (x_test, y_testlabel) = cifar10.load_data()\n \n x_train = np.array([img.flatten() for img in x_train])\n x_test = np.array([img.flatten() for img in x_test])\n \n y_train = np.zeros((len(x_train), 10))\n for idx, l in enumerate(y_trainlabel):\n y_train[idx, l] = 1\n \n y_test = np.zeros((len(x_test), 10))\n for idx, l in enumerate(y_testlabel):\n y_test[idx, l] = 1\n \n x_train = x_train.astype('float64')\n y_train = y_train.astype('float64')\n x_test = x_test.astype('float64')\n y_test = y_test.astype('float64')\n\n if (num_train+5000 > len(x_train)):\n print(\"Too many training points selected {0} + {1} > {2}\".format(num_train, 5000, len(x_train)))\n sys.exit()\n \n x_train = x_train[:num_train]\n y_train = y_train[:num_train]\n\n x_valid = x_train[-5000:]\n y_valid = y_train[-5000:]\n\n train_image_mean = np.mean(x_train)\n train_label_mean = np.mean(y_train)\n \n if mean_subtraction:\n x_train -= train_image_mean\n y_train -= train_label_mean\n x_test -= train_image_mean\n y_test -= train_label_mean\n x_valid -= train_image_mean\n y_valid -= train_label_mean\n \n return (x_train, y_train,\n x_valid, y_valid,\n x_test, y_test)\n\n\ndef load_stl10(num_train=4500,\n use_float64=False,\n mean_subtraction=False,\n random_roated_labels=False,\n poor=False):\n\n DATA_PATH_TRAIN = '/tmp/nngp/data/stl10_binary/train_X.bin'\n LABEL_PATH_TRAIN = '/tmp/nngp/data/stl10_binary/train_y.bin'\n\n DATA_PATH_TEST = '/tmp/nngp/data/stl10_binary/test_X.bin'\n LABEL_PATH_TEST = '/tmp/nngp/data/stl10_binary/test_y.bin'\n\n x_train = read_all_images(DATA_PATH_TRAIN)\n y_trainlabel = read_labels(LABEL_PATH_TRAIN)\n y_trainlabel = y_trainlabel - 1\n x_test = read_all_images(DATA_PATH_TEST)\n y_testlabel = read_labels(LABEL_PATH_TEST)\n y_testlabel = y_testlabel - 1\n \n if poor:\n return (x_train, y_trainlabel, x_test, y_testlabel)\n\n x_train = np.array([img.flatten() for img in x_train])\n x_test = np.array([img.flatten() for img in x_test])\n\n y_train = np.zeros((len(x_train), 10))\n for idx, l in enumerate(y_trainlabel):\n y_train[idx, l] = 1\n \n y_test = np.zeros((len(x_test), 10))\n for idx, l in enumerate(y_testlabel):\n y_test[idx, l] = 1\n \n x_train = x_train.astype('float64')\n y_train = y_train.astype('float64')\n x_test = x_test.astype('float64')\n y_test = y_test.astype('float64')\n\n if (num_train+500 > len(x_train)):\n print(\"Too many training points selected {0} + {1} > {2}\".format(num_train, 500, len(x_train)))\n sys.exit()\n \n x_train = x_train[:num_train]\n y_train = y_train[:num_train]\n\n x_valid = x_train[-500:]\n y_valid = y_train[-500:]\n\n train_image_mean = np.mean(x_train)\n train_label_mean = np.mean(y_train)\n \n if mean_subtraction:\n x_train -= train_image_mean\n y_train -= train_label_mean\n x_test -= train_image_mean\n y_test -= train_label_mean\n x_valid -= train_image_mean\n y_valid -= train_label_mean\n \n return (x_train, y_train,\n x_valid, y_valid,\n x_test, y_test)\n\ndef _select_subset(datasets,\n num_train=100,\n classes=list(range(10)),\n seed=9999,\n sort_by_class=False,\n use_float64=False,\n mean_subtraction=False,\n random_roated_labels=False):\n \"\"\"Select subset of MNIST and apply preprocessing.\"\"\"\n np.random.seed(seed)\n classes.sort()\n subset = copy.deepcopy(datasets)\n\n num_class = len(classes)\n num_per_class = num_train // num_class\n\n idx_list = np.array([], dtype='uint8')\n\n ys = np.argmax(subset.train.labels, axis=1) # undo one-hot\n for c in classes:\n if datasets.train.num_examples == num_train:\n idx_list = np.concatenate((idx_list, np.where(ys == c)[0]))\n else:\n idx_list = np.concatenate((idx_list,\n np.where(ys == c)[0][:num_per_class]))\n if not sort_by_class:\n np.random.shuffle(idx_list)\n\n data_precision = np.float64 if use_float64 else np.float32\n\n train_image = subset.train.images[idx_list][:num_train].astype(data_precision)\n train_label = subset.train.labels[idx_list][:num_train].astype(data_precision)\n valid_image = subset.validation.images.astype(data_precision)\n valid_label = subset.validation.labels.astype(data_precision)\n test_image = subset.test.images.astype(data_precision)\n test_label = subset.test.labels.astype(data_precision)\n\n if sort_by_class:\n train_idx = np.argsort(np.argmax(train_label, axis=1))\n train_image = train_image[train_idx]\n train_label = train_label[train_idx]\n\n if mean_subtraction:\n train_image_mean = np.mean(train_image)\n train_label_mean = np.mean(train_label)\n train_image -= train_image_mean\n train_label -= train_label_mean\n valid_image -= train_image_mean\n valid_label -= train_label_mean\n test_image -= train_image_mean\n test_label -= train_label_mean\n\n if random_roated_labels:\n r, _ = np.linalg.qr(np.random.rand(10, 10))\n train_label = np.dot(train_label, r)\n valid_label = np.dot(valid_label, r)\n test_label = np.dot(test_label, r)\n\n return (train_image, train_label,\n valid_image, valid_label,\n test_image, test_label)\n\n","sub_path":"load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":7608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200458993","text":"from flask import Flask, render_template, jsonify\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__, static_url_path='/static')\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI']= \"sqlite:///databases/database.db\"\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb = SQLAlchemy(app)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route(\"/api/v1/users\")\r\ndef api_users():\r\n datos = db.engine.execute(\"SELECT * FROM users\")\r\n return jsonify({ 'usuarios ': [dict(tupla) for tupla in datos]})\r\n\r\n@app.route(\"/users/list\")\r\ndef lista_users():\r\n resultado = db.engine.execute(\"SELECT * FROM users\")\r\n return render_template('users.html', datos=resultado)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326101963","text":"#AUTHOR: CHIRCHIR\n#ACCESSING DICTIONARIES IN PYTHON \n\nstudent = dict({\n 'name':'chirchir',\n 'phone':'07958473622',\n 'location':'Nairobi',\n 'work':'programmer',\n 'id':'772616273'\n})\n# printing only keys in a dictionary\nfor key in student:\n\tprint(key)\n#printing both keys and values in dict using .items function\n\nfor key, val in student.items():\n\tprint(\"{}==>{}\".format(key, val))","sub_path":"gettting_dict_keys.py","file_name":"gettting_dict_keys.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489994029","text":"#!/usr/bin/env python\r\n######################################################################\r\n#\r\n# Build a ROMS grid file\r\n#\r\n# Further Information: \r\n# http://www.brest.ird.fr/Roms_tools/\r\n# \r\n# This file is part of ROMSTOOLS\r\n#\r\n# ROMSTOOLS is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published\r\n# by the Free Software Foundation; either version 2 of the License,\r\n# or (at your option) any later version.\r\n#\r\n# ROMSTOOLS is distributed in the hope that it will be useful, but\r\n# WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\r\n# MA 02111-1307 USA\r\n#\r\n# Copyright (c) 2002-2006 by Pierrick Penven \r\n# e-mail:Pierrick.Penven@ird.fr \r\n#\r\n# Contributions of P. Marchesiello (IRD) and X. Capet (UCLA)\r\n#\r\n# Updated Aug-2006 by Pierrick Penven\r\n# Updated 24-Oct-2006 by Pierrick Penven (mask correction)\r\n#\r\n# Translated to Python by Rafael Soutelino: rsoutelino@gmail.com \r\n# Last Modification: Aug, 2010\r\n################################################################\r\n\r\n\r\nprint (' \\n' + '==> ' + ' IMPORTING MODULES ...\\n' + ' ')\r\n# IMPORTING MODULES #################################################\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#from matplotlib import delaunay\r\nfrom matplotlib.mlab import find\r\nfrom mpl_toolkits.basemap import Basemap\r\nfrom scipy.io import loadmat\r\nimport datetime as dt\r\nimport netCDF4\r\nfrom utils import get_isobath, point_in_poly\r\nfrom scipy.interpolate import griddata, interp1d\r\nfrom scipy.io import savemat as mat\r\nimport scipy.io as sio\r\n\r\n# classes and functions to the computings\r\nfrom roms_setup import run_setup, rho2uvp, get_metrics, spheric_dist\r\nfrom roms_setup import get_angle, add_topo, process_mask, uvp_mask, smoothgrid\r\nfrom roms_setup import rotfilter, rfact, hanning_smoother\r\nfrom roms_setup import hanning_smoother_coef2d, FX, FY \r\n\r\nfrom bathy_smoother.bathy_smoothing import smoothing_Positive_rx0, smoothing_Laplacian_rx0\r\nfrom bathy_smoother.LP_bathy_smoothing import LP_smoothing_rx0\r\n\r\n# SCRIPT START ######################################################\r\n\r\n# Basic Settings:\r\n\r\n\r\nfilenamestr = '_grd.nc'\r\nfiletypestr = 'ROMS Grid file'\r\n\t\r\n# READING PREVIOUSLY BUILT RELEVANT FILES: ###########################\r\n# metadata ascii file\r\n# OA-created netcdf initial T, S file \r\n# grid netcdf file\r\n\r\n\r\ndl = 0.083\r\n#lonr = np.arange(-47, -29 + dl, dl)\r\nlonr = np.arange(-53, -34 + dl, dl)\r\n\r\ni = 0; \r\nlatr = np.array([-36])\r\nwhile latr[i] <= -20:\r\n i = i + 1\r\n tmp = latr[i-1] + dl * np.cos( latr[i-1]*np.pi/180 )\r\n latr = np.hstack([latr, tmp])\r\n\r\n\r\n\r\nLonr, Latr = np.meshgrid(lonr, latr)\r\nLonu, Lonv, Lonp = rho2uvp(Lonr)\r\nLatu, Latv, Latp = rho2uvp(Latr)\r\n\r\nM, L = Latp.shape\r\n\r\n\r\n\r\nprint (' \\n' + '==> ' + ' COMPUTING METRICS ...\\n' + ' ')\r\n\r\n\r\nprint (' \\n' + '==> ' + ' LLm = ' + np.str(L-1) + ' ...\\n' + ' ')\r\nprint (' \\n' + '==> ' + ' MMm = ' + np.str(M-1) + ' ...\\n' + ' ')\r\n\r\n# !!!!!!!!!!!!!!!!!!!!!\r\n### CODE SOMETHING HERE TO WRITE THIS INFORMATION IN THE METADATA FILE\r\n# !!!!!!!!!!!!!!!!!!!!!\r\n\r\npm, pn, dndx, dmde = get_metrics(Latu, Lonu, Latv, Lonv)\r\nxr = 0*pm\r\nyr = xr.copy()\r\n\r\n\r\nfor i in np.arange(0, L):\r\n xr[:, i+1] = xr[:, i] + 2 / ( pm[:, i+1] + pm[:, i] )\r\n\r\nfor j in np.arange(0, M):\r\n yr[j+1, :] = yr[j, :] + 2 / ( pn[j+1, :] + pn[j, :] )\r\n \r\nxu, xv, xp = rho2uvp(xr)\r\nyu, yv, yp = rho2uvp(yr)\r\n\r\ndx = 1 / pm\r\ndy = 1 / pn\r\ndxmax = np.max( dx/1000. )\r\ndxmin = np.min( dx/1000. )\r\ndymax = np.max( dy/1000. )\r\ndymin = np.min( dy/1000. )\r\n\r\nangle = get_angle(Latu, Lonu)\r\n\r\nf0 = 4 * np.pi * np.sin( np.pi * Latr/180 ) / ( 24*3600 )\r\n\r\n\r\nprint (' \\n' + '==> ' + ' ADDING TOPOGRAPHY ...\\n' + ' ')\r\n\r\n#ff=np.isnan(X)\r\n \r\n \r\n##############\r\nETOPO2=False\r\nif ETOPO2:\r\n print('bathymetry from ETOPO2')\r\n h = add_topo(Lonr, Latr, pm, pn,'ETOPO2v2g_f4.nc') #etopo\r\n\r\nGEBCO=False\r\nif GEBCO:\r\n print('bathymetry from GEBCO')\r\n gebco=netCDF4.Dataset('gebco_222.nc')\r\n latgebco=gebco['lat'][:]\r\n longebco=gebco['lon'][:]\r\n elevation=-gebco['elevation'][:]\r\n from scipy.interpolate import interp2d\r\n hgeb=interp2d(longebco,latgebco,elevation,kind='cubic')\r\n h=hgeb(Lonr[0,:],Latr[:,0])\r\n \r\nETOPO1=True\r\nif ETOPO1:\r\n print('bathymetry from ETOPO1')\r\n etopo1=netCDF4.Dataset('etopo1_bed_g2.nc')\r\n latgebco=etopo1['lat'][:]\r\n longebco=etopo1['lon'][:]\r\n elevation=-etopo1['topo'][:]\r\n from scipy.interpolate import interp2d\r\n hgeb=interp2d(longebco,latgebco,elevation,kind='cubic')\r\n h=hgeb(Lonr[0,:],Latr[:,0])\r\n\r\n#################\r\n\r\n############## interp from parent\r\n\r\n#p_file=netCDF4.Dataset('azul_grd2.nc')\r\n#h_parent=p_file['h'][:]\r\n#lon_parent=p_file['lon_rho'][:]\r\n#lat_parent=p_file['lat_rho'][:]\r\n#h=griddata((lon_parent.ravel(),lat_parent.ravel()),h_parent.ravel(),(Lonr.ravel(),Latr.ravel())).reshape(Lonr.shape)\r\n\r\n\r\n#####################\r\n\r\nhmin=10\r\n\r\nh[h ' + ' COMPUTING THE MASK ...\\n' + ' ')\r\n\r\nmaskr = hraw*0\r\nmaskr=np.abs(maskr)\r\nmaskr[ np.where(hraw > -10) ] = 1 #tirar a mascara para passar o filtro\r\nmaskr = process_mask(maskr)\r\n\r\n\r\n#for i in range(maskr.shape[0]):\r\n# maskr[i,np.where(lonr>-39)] = 1 #excluir a mascara das ilhas / teste do q fazer\r\n \r\n\r\nprint (' \\n' + '==> ' + ' FILTERING THE TOPOGRAPHY ...\\n' + ' ')\r\n\r\n\r\nprint('new smoothingg')\r\n\r\n\r\nh=smoothing_Positive_rx0(maskr,hraw,0.27)\r\n\r\nAmpConst=np.zeros(maskr.shape)+10000\r\n\r\nSignConst=np.zeros(maskr.shape)\r\n#signindex=np.where((hraw>0)&(hraw<1000))\r\n#SignConst[signindex]=-1\r\n\r\n\r\n#h=LP_smoothing_rx0(maskr,hraw,0.3,SignConst,AmpConst)\r\n\r\n\r\nh_parent=h.copy()\r\n\r\nh2=h.copy()\r\n\r\n#h_parent=hraw.copy()\r\n######################################################################\r\nhh = smoothgrid(h_parent, maskr, hmin,hmin,2.6, 1,1) # OLD FILTER\r\n#hh=hh.filled()\r\nhh[hh>100000] = 0\r\n######################################################################\r\n\r\n############################################################# weights for transition between the two smoothed grids\r\n[Lr,Mr] = Lonr.shape;\r\n\r\ninner = 1;\r\nouter = 0.; \r\nwidth = 10.; \r\n\r\nwork = np.zeros([Lr,Mr]) + inner;\r\n\r\n\r\n#--------------------------------------------------------------------------\r\n# Set inverse time scales.\r\n#--------------------------------------------------------------------------\r\n\r\nIstrR = 0;\r\nIendR = int(Lr);\r\nJstrR = 0;\r\nJendR = int(Mr);\r\n\r\nfor i in range(IendR): # Eastern Boundary\r\n for j in range(JendR-int(width),JendR):\r\n work[i,j] = outer + (JendR -1 - j) * (inner - outer) / width;\r\n \r\nfor i in range(IendR): # western Boundary\r\n for j in range(JstrR,JstrR+int(width)):\r\n work[i,j] = inner + (width - j) * (outer - inner) / width;\r\n \r\nh=0 \r\nfor i in range(IendR-int(width),IendR): # nothern Boundary\r\n for j in range(JstrR+int(width)-h,JendR-int(width)+h):\r\n work[i,j] = outer + (IendR -1 - i) * (inner - outer) / width;\r\n h=h+1\r\n\r\nh=int(width)\r\nfor i in range(IstrR,IstrR+int(width)): # southern Boundary\r\n for j in range(JstrR+int(width)-h,JendR-int(width)+h):\r\n work[i,j] = inner + (width - i) * (outer - inner) / width;\r\n h=h-1\r\n\r\n##########################################################################################\r\n\r\nhson=hh.copy()\r\nh=hh.copy()\r\nh_parent=hh.copy() #testando primeiro com tudo uniforme\r\n\r\n\r\nfor i in range(Lonr.shape[0]):\r\n for j in range(Lonr.shape[1]):\r\n h[i,j]=(work[i,j]*hson[i,j]) + ((1. - work[i,j])*h_parent[i,j])\r\n\r\n\r\n\r\n#plt.pcolor(h);plt.colorbar();plt.show()\r\n#plt.pcolor(hraw);plt.colorbar();plt.show()\r\n\r\n####################### TESTING PROFILES\r\nmaskr = h*0\r\nmaskr=np.abs(maskr)\r\nmaskr[ np.where(h > 20.1) ] = 1 \r\nmaskr = process_mask(maskr)\r\n\r\nggg=np.ma.masked_where(maskr==0, h2)\r\ng=np.ma.masked_where(maskr==0, h)\r\ngg=np.ma.masked_where(maskr==0, hraw)\r\n\r\nplt.plot(-ggg[-1,:]);plt.show()\r\nplt.plot(-g[-1,:]);plt.show()\r\nplt.plot(-gg[-1,:]);plt.show()\r\n\r\nplt.plot(-ggg[0,:]);plt.show()\r\nplt.plot(-g[0,:]);plt.show()\r\nplt.plot(-gg[0,]);plt.show()\r\n\r\n#\r\n#######################################\r\n\r\nh[np.where(h==0)] = 0.1 #get rid of 0 depth\r\n\r\n\r\nmaskr = h*0\r\nmaskr=np.abs(maskr)\r\nmaskr[ np.where(h > 20.1) ] = 1 \r\nmaskr = process_mask(maskr)\r\n[masku, maskv, maskp] = uvp_mask(maskr) \r\n\r\n\r\n#for i in range(masku.shape[0]):\r\n# masku[i,np.where(Lonu[0,:]>-39)] = 1 #excluir a mascara das ilhas\r\n#for i in range(maskv.shape[0]):\r\n# maskv[i,np.where(Lonv[0,:]>-39)] = 1 #excluir a mascara das ilhas\r\n#for i in range(maskp.shape[0]):\r\n# maskp[i,np.where(Lonp[0,:]>-39)] = 1 #excluir a mascara das ilhas\r\n\r\n#cs=plt.contourf(lonr, latr, h, levels=[0,100], colors='red')\r\n#proxy = [plt.Rectangle((0,0),1,1,fc = pc.get_facecolor()[0]) \r\n# for pc in cs.collections]\r\n\r\nplt.pcolor(Lonr,Latr,maskp);plt.show()\r\n\r\n#plt.legend(proxy, [\"0-100 m\"])\r\n#plt.show()\r\n\r\n#plt.pcolor(lonr, latr, maskp);plt.colorbar()\r\n#cs=plt.contourf(lonr, latr, h, levels=[1000,3000])\r\n#proxy = [plt.Rectangle((0,0),1,1,fc = pc.get_facecolor()[0]) \r\n# for pc in cs.collections]\r\n#plt.show()\r\n\r\n \r\n\r\n###\r\n####################################################################\r\n####################################################################\r\n\r\nprint (' \\n' + '==> ' + ' WRITING NETCDF GRID FILE ...\\n' + ' ')\r\n\r\nnow = dt.datetime.now()\r\nLp = L + 1\r\nMp = M + 1\r\n\r\n#if run.spherical == 1:\r\nspherical = 'T'\r\n#else:\r\n#\tspherical = 'F'\r\n\r\nncfile = netCDF4.Dataset('azul_SC.nc', mode='w',\r\n clobber='true', format='NETCDF3_CLASSIC')\r\n\r\n# creating DIMENSIONS\r\nncfile.createDimension('xi_psi', size=L)\r\nncfile.createDimension('xi_rho', size=Lp)\r\nncfile.createDimension('xi_u', size=L)\r\nncfile.createDimension('xi_v', size=Lp)\r\nncfile.createDimension('eta_psi', size=M)\r\nncfile.createDimension('eta_rho', size=Mp)\r\nncfile.createDimension('eta_u', size=Mp)\r\nncfile.createDimension('eta_v', size=M)\r\n#ncfile.createDimension('bath', size=None)\r\nncfile.createDimension('one', size=1)\r\nncfile.createDimension('two', size=2)\r\nncfile.createDimension('four', size=4)\r\n\r\n\r\n# creating GLOBAL ATTRIBUTES\r\nsetattr(ncfile, 'type', filetypestr)\r\nsetattr(ncfile, 'title', 'cuzao')\r\nsetattr(ncfile, 'history', str(now))\r\n\r\n\r\n# creating VARIABLES, ATTRIBUTES and ASSIGNING VALUES\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('spherical', 'c')\r\nsetattr(ncfile.variables['spherical'], 'long_name', 'Grid type logical switch')\r\nsetattr(ncfile.variables['spherical'], 'option_T', 'spherical')\r\nsetattr(ncfile.variables['spherical'], 'option_F', 'cartesian')\r\nncfile.variables['spherical'][:] = spherical\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('xl', 'd', dimensions=('one'))\r\nsetattr(ncfile.variables['xl'], 'long_name', 'domain length in XI-direction')\r\nsetattr(ncfile.variables['xl'], 'units', 'meter')\r\nncfile.variables['xl'][:] = xr.max()\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('el', 'd', dimensions=('one'))\r\nsetattr(ncfile.variables['el'], 'long_name', 'domain length in ETA-direction')\r\nsetattr(ncfile.variables['el'], 'units', 'meter')\r\nncfile.variables['el'][:] = yr.max()\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('hraw', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['hraw'], 'long_name', 'Working bathymetry at RHO-points')\r\nsetattr(ncfile.variables['hraw'], 'units', 'meter')\r\nsetattr(ncfile.variables['hraw'], 'coordinates', 'lon_rho lat_rho bath')\r\nncfile.variables['hraw'][:] = hraw\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('h', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['h'], 'long_name', 'Final bathymetry at RHO-points')\r\nsetattr(ncfile.variables['h'], 'units', 'meter')\r\nsetattr(ncfile.variables['h'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['h'][:] = h\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('f', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['f'], 'long_name', 'Coriolis parameter at RHO-points')\r\nsetattr(ncfile.variables['f'], 'units', 'second-1')\r\nsetattr(ncfile.variables['f'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['f'][:] = f0\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('pm', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['pm'], 'long_name', 'Curvilinear coordinate metric in XI')\r\nsetattr(ncfile.variables['pm'], 'units', 'meter-1')\r\nsetattr(ncfile.variables['pm'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['pm'][:] = pm\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('pn', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['pn'], 'long_name', 'Curvilinear coordinate metric in ETA')\r\nsetattr(ncfile.variables['pn'], 'units', 'meter-1')\r\nsetattr(ncfile.variables['pn'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['pn'][:] = pn\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('dndx', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['dndx'], 'long_name', \r\n\t'XI derivative of inverse metric factor pn')\r\nsetattr(ncfile.variables['dndx'], 'units', 'meter')\r\nsetattr(ncfile.variables['dndx'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['dndx'][:] = dndx\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('dmde', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['dmde'], 'long_name', \r\n\t'ETA derivative of inverse metric factor pm')\r\nsetattr(ncfile.variables['dmde'], 'units', 'meter')\r\nsetattr(ncfile.variables['dmde'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['dmde'][:] = dmde\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('x_rho', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['x_rho'], 'long_name', 'x location of RHO-points')\r\nsetattr(ncfile.variables['x_rho'], 'units', 'meter')\r\nncfile.variables['x_rho'][:] = xr\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('y_rho', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['y_rho'], 'long_name', 'y location of RHO-points')\r\nsetattr(ncfile.variables['y_rho'], 'units', 'meter')\r\nncfile.variables['y_rho'][:] = yr\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('x_psi', 'd', dimensions=('eta_psi', 'xi_psi'))\r\nsetattr(ncfile.variables['x_psi'], 'long_name', 'x location of PSI-points')\r\nsetattr(ncfile.variables['x_psi'], 'units', 'meter')\r\nncfile.variables['x_psi'][:] = xp\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('y_psi', 'd', dimensions=('eta_psi', 'xi_psi'))\r\nsetattr(ncfile.variables['y_psi'], 'long_name', 'y location of PSI-points')\r\nsetattr(ncfile.variables['y_psi'], 'units', 'meter')\r\nncfile.variables['y_psi'][:] = yp\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('x_u', 'd', dimensions=('eta_u', 'xi_u'))\r\nsetattr(ncfile.variables['x_u'], 'long_name', 'x location of U-points')\r\nsetattr(ncfile.variables['x_u'], 'units', 'meter')\r\nncfile.variables['x_u'][:] = xu\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('y_u', 'd', dimensions=('eta_u', 'xi_u'))\r\nsetattr(ncfile.variables['y_u'], 'long_name', 'y location of U-points')\r\nsetattr(ncfile.variables['y_u'], 'units', 'meter')\r\nncfile.variables['y_u'][:] = yu\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('x_v', 'd', dimensions=('eta_v', 'xi_v'))\r\nsetattr(ncfile.variables['x_v'], 'long_name', 'x location of V-points')\r\nsetattr(ncfile.variables['x_v'], 'units', 'meter')\r\nncfile.variables['x_v'][:] = xv\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('y_v', 'd', dimensions=('eta_v', 'xi_v'))\r\nsetattr(ncfile.variables['y_v'], 'long_name', 'y location of V-points')\r\nsetattr(ncfile.variables['y_v'], 'units', 'meter')\r\nncfile.variables['y_v'][:] = yv\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lon_rho', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['lon_rho'], 'long_name', 'longitude of RHO-points')\r\nsetattr(ncfile.variables['lon_rho'], 'units', 'degree east')\r\nncfile.variables['lon_rho'][:] = Lonr\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lat_rho', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['lat_rho'], 'long_name', 'latitude of RHO-points')\r\nsetattr(ncfile.variables['lat_rho'], 'units', 'degree north')\r\nncfile.variables['lat_rho'][:] = Latr\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lon_psi', 'd', dimensions=('eta_psi', 'xi_psi'))\r\nsetattr(ncfile.variables['lon_psi'], 'long_name', 'longitude of PSI-points')\r\nsetattr(ncfile.variables['lon_psi'], 'units', 'degree east')\r\nncfile.variables['lon_psi'][:] = Lonp\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lat_psi', 'd', dimensions=('eta_psi', 'xi_psi'))\r\nsetattr(ncfile.variables['lat_psi'], 'long_name', 'latitude of PSI-points')\r\nsetattr(ncfile.variables['lat_psi'], 'units', 'degree north')\r\nncfile.variables['lat_psi'][:] = Latp\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lon_u', 'd', dimensions=('eta_u', 'xi_u'))\r\nsetattr(ncfile.variables['lon_u'], 'long_name', 'longitude of U-points')\r\nsetattr(ncfile.variables['lon_u'], 'units', 'degree east')\r\nncfile.variables['lon_u'][:] = Lonu\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lat_u', 'd', dimensions=('eta_u', 'xi_u'))\r\nsetattr(ncfile.variables['lat_u'], 'long_name', 'latitude of U-points')\r\nsetattr(ncfile.variables['lat_u'], 'units', 'degree north')\r\nncfile.variables['lat_u'][:] = Latu\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lon_v', 'd', dimensions=('eta_v', 'xi_v'))\r\nsetattr(ncfile.variables['lon_v'], 'long_name', 'longitude of V-points')\r\nsetattr(ncfile.variables['lon_v'], 'units', 'degree east')\r\nncfile.variables['lon_v'][:] = Lonv\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('lat_v', 'd', dimensions=('eta_v', 'xi_v'))\r\nsetattr(ncfile.variables['lat_v'], 'long_name', 'latitude of V-points')\r\nsetattr(ncfile.variables['lat_v'], 'units', 'degree north')\r\nncfile.variables['lat_v'][:] = Latv\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('angle', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['angle'], 'long_name', 'angle between XI-axis and EAST')\r\nsetattr(ncfile.variables['angle'], 'units', 'radians')\r\nsetattr(ncfile.variables['angle'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['angle'][:] = angle\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('mask_rho', 'd', dimensions=('eta_rho', 'xi_rho'))\r\nsetattr(ncfile.variables['mask_rho'], 'long_name', 'mask on RHO-points')\r\nsetattr(ncfile.variables['mask_rho'], 'flag_values', '0, 1')\r\nsetattr(ncfile.variables['mask_rho'], 'flag_meanings', 'land, water')\r\nsetattr(ncfile.variables['mask_rho'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['mask_rho'][:] = maskr\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('mask_u', 'd', dimensions=('eta_u', 'xi_u'))\r\nsetattr(ncfile.variables['mask_u'], 'long_name', 'mask on U-points')\r\nsetattr(ncfile.variables['mask_u'], 'flag_values', '0, 1')\r\nsetattr(ncfile.variables['mask_u'], 'flag_meanings', 'land, water')\r\nsetattr(ncfile.variables['mask_u'], 'coordinates', 'lon_u lat_u')\r\nncfile.variables['mask_u'][:] = masku\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('mask_v', 'd', dimensions=('eta_v', 'xi_v'))\r\nsetattr(ncfile.variables['mask_v'], 'long_name', 'mask on V-points')\r\nsetattr(ncfile.variables['mask_v'], 'flag_values', '0, 1')\r\nsetattr(ncfile.variables['mask_v'], 'flag_meanings', 'land, water')\r\nsetattr(ncfile.variables['mask_v'], 'coordinates', 'lon_v lat_v')\r\nncfile.variables['mask_v'][:] = maskv\r\n\r\n# ---------------------------------------------------------------------------\r\nncfile.createVariable('mask_psi', 'd', dimensions=('eta_psi', 'xi_psi'))\r\nsetattr(ncfile.variables['mask_psi'], 'long_name', 'mask on PSI-points')\r\nsetattr(ncfile.variables['mask_psi'], 'flag_values', '0, 1')\r\nsetattr(ncfile.variables['mask_psi'], 'flag_meanings', 'land, water')\r\nsetattr(ncfile.variables['mask_psi'], 'coordinates', 'lon_rho lat_rho')\r\nncfile.variables['mask_psi'][:] = maskp\r\n\r\nncfile.sync()\r\n\r\nprint (' \\n' + '==> ' + ' ############################################ ...\\n' + ' ')\r\nprint (' \\n' + '==> ' + ' GRID FILE SUCCESSFULLY CREATED ...\\n' + ' ')\r\nprint (' \\n' + '==> ' + ' ############################################ ...\\n' + ' ')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"make_grid/make_grid_azul.py","file_name":"make_grid_azul.py","file_ext":"py","file_size_in_byte":22289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555922810","text":"def conver(temp,type):\n if type == 1:\n result = 9/5*temp +32\n return result\n elif type == 2:\n result = 5/9*(temp - 32)\n return result\n else:\n print(\"Conversion Nope\")\ndef displayresult(result,temp, type):\n if type == 1:\n print(temp,\"C->\",result,\"F\")\n elif type == 2:\n print(temp,\"F ->\",result,\"C\")\n else:\n print(\"Print nope\") \ndef main():\n temp = float(input(\"What is thy temperature? \"))\n type = int(input(\"Enter: 1 for C -> F, 2 for F -> C \"))\n result = conver(temp,type)\n displayresult(result,temp,type)\nmain()","sub_path":"Lab4/Temperature.py","file_name":"Temperature.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"414421035","text":"import logging\nimport traceback\n\nfrom kubernetes.client.rest import ApiException\n\nfrom django.conf import settings\nfrom django.utils.timezone import now\n\nimport auditor\n\nfrom constants.jobs import JobLifeCycle\nfrom db.models.build_jobs import BuildJob\nfrom docker_images.image_info import get_tagged_image\nfrom event_manager.events.build_job import BUILD_JOB_STARTED, BUILD_JOB_STARTED_TRIGGERED\nfrom libs.paths.exceptions import VolumeNotFoundError\nfrom scheduler.spawners.dockerizer_spawner import DockerizerSpawner\nfrom scheduler.spawners.utils import get_job_definition\n\n_logger = logging.getLogger('polyaxon.scheduler.dockerizer')\n\n\ndef check_image(build_job):\n from docker import APIClient\n\n docker = APIClient(version='auto')\n return docker.images(get_tagged_image(build_job))\n\n\ndef create_build_job(user, project, config, code_reference):\n \"\"\"Get or Create a build job based on the params.\n\n If a build job already exists, then we check if the build has already an image created.\n If the image does not exists, and the job is already done we force create a new job.\n\n Returns:\n tuple: (build_job, image_exists[bool], build_status[bool])\n \"\"\"\n build_job = BuildJob.create(\n user=user,\n project=project,\n config=config,\n code_reference=code_reference)\n\n if check_image(build_job=build_job):\n # Check if image exists already\n return build_job, True, False\n\n if build_job.succeeded and (now() - build_job.finished_at).seconds < 3600 * 6:\n # Check if image was built in less than an 6 hours\n return build_job, True, False\n\n if build_job.is_done:\n build_job = BuildJob.create(\n user=user,\n project=project,\n config=config,\n code_reference=code_reference,\n nocache=True)\n\n if not build_job.is_running:\n # We need to build the image first\n auditor.record(event_type=BUILD_JOB_STARTED_TRIGGERED,\n instance=build_job,\n actor_id=user.id,\n actor_name=user.username)\n build_status = start_dockerizer(build_job=build_job)\n else:\n build_status = True\n\n return build_job, False, build_status\n\n\ndef start_dockerizer(build_job):\n # Update job status to show that its started\n build_job.set_status(JobLifeCycle.SCHEDULED)\n\n spawner = DockerizerSpawner(\n project_name=build_job.project.unique_name,\n project_uuid=build_job.project.uuid.hex,\n job_name=build_job.unique_name,\n job_uuid=build_job.uuid.hex,\n k8s_config=settings.K8S_CONFIG,\n namespace=settings.K8S_NAMESPACE,\n in_cluster=True)\n\n error = {}\n try:\n results = spawner.start_dockerizer(resources=build_job.resources,\n node_selector=build_job.node_selector,\n affinity=build_job.affinity,\n tolerations=build_job.tolerations)\n auditor.record(event_type=BUILD_JOB_STARTED,\n instance=build_job)\n build_job.definition = get_job_definition(results)\n build_job.save()\n return True\n except ApiException:\n _logger.error('Could not start build job, please check your polyaxon spec',\n exc_info=True)\n error = {\n 'raised': True,\n 'traceback': traceback.format_exc(),\n 'message': 'Could not start build job, encountered a Kubernetes ApiException.'\n }\n except VolumeNotFoundError as e:\n _logger.error('Could not start build job, please check your volume definitions.',\n exc_info=True)\n error = {\n 'raised': True,\n 'traceback': traceback.format_exc(),\n 'message': 'Could not start build job, encountered a volume definition problem. %s' % e\n }\n except Exception as e:\n _logger.error('Could not start build job, please check your polyaxon spec.',\n exc_info=True)\n error = {\n 'raised': True,\n 'traceback': traceback.format_exc(),\n 'message': 'Could not start build job encountered an {} exception.'.format(\n e.__class__.__name__\n )\n }\n finally:\n if error.get('raised'):\n build_job.set_status(\n JobLifeCycle.FAILED,\n message=error.get('message'),\n traceback=error.get('traceback'))\n\n\ndef stop_dockerizer(project_name, project_uuid, build_job_name, build_job_uuid):\n spawner = DockerizerSpawner(\n project_name=project_name,\n project_uuid=project_uuid,\n job_name=build_job_name,\n job_uuid=build_job_uuid,\n k8s_config=settings.K8S_CONFIG,\n namespace=settings.K8S_NAMESPACE,\n in_cluster=True)\n\n spawner.stop_dockerizer()\n","sub_path":"polyaxon/scheduler/dockerizer_scheduler.py","file_name":"dockerizer_scheduler.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564676571","text":"import math\nimport pandas as pd\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport seaborn as sb\nimport argparse\nimport cv2\nfrom glob import glob\nimport os\n\nfrom prepare import data_load\nfrom prepare import get_shuffled_batch_ind\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\n\n#tf layer をラッパーして書くやりかた\n\nnum_classes = 2\nimg_height, img_width = 64, 64\n\nbase_path = os.path.abspath(os.path.dirname(__file__))\ndata_dir = \"..\\\\..\\\\DeepLearningMugenKnock\\\\Dataset\\\\train\\\\images\"\ntest_dir = \"..\\\\..\\\\DeepLearningMugenKnock\\\\Dataset\\\\test\\\\images\"\ndata_path = os.path.join(base_path, data_dir)\ntest_path = os.path.join(base_path, test_dir)\n\n# TensorBoard情報出力ディレクトリ\nlog_dir = os.path.join(base_path, 'log_data')\nprint(log_dir)\n\n\n# convolution 層のラッパー,\ndef conv2d(x, k=3, in_num=1, out_num=32, strides=1, activ=None, bias=True, name='conv'):\n w = tf.Variable(tf.random_normal([k, k, in_num, out_num]), name=name+'_w') #重みの初期値の変数を定義\n x = tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding='SAME') #畳み込み演算 stridesの1つ目と4つ目はN,Cなので当然1、SAMEは、入力と出力のサイズを合わせるようなパディング\n tf.add_to_collections('vars', w) #コレクションに変数を名前付きで保存\n if bias:\n b = tf.Variable(tf.random_normal([out_num]), name=name+'_b')\n tf.add_to_collections('vars', b)\n x = tf.nn.bias_add(x, b) #バイアスとして足し算処理、複数チャンネルにも対応するようブロードキャストして加算する\n if activ is not None:\n x = activ(x) #引数の関数オブジェクトでアクティベーションする\n \n tf.summary.histogram('conv_weights', w)\n tf.summary.histogram('conv_bias', b)\n return x\n\n\ndef maxpool2d(x, k=2): #特に意味のないラッパー\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n\n# 全結合層のラッパー\ndef fc(x, in_num=100, out_num=100, bias=True, activ=None, name='fc'):\n w = tf.Variable(tf.random_normal([in_num, out_num]), name=name+'_w')\n x = tf.matmul(x, w) #行列積\n tf.add_to_collections('vars', w)\n if bias:\n b = tf.Variable(tf.random_normal([out_num]), name=name+'_b')\n tf.add_to_collections('vars', b)\n x = tf.add(x, b) #1-Dの加算\n if activ is not None:\n x = activ(x)\n \n return x\n\ndef Mynet(x, keep_prob):\n #定義したラッパーを使って計算グラフを作成\n x = conv2d(x, k=3, in_num=3, out_num=32, activ=tf.nn.relu, name='conv1_1')\n x = conv2d(x, k=3, in_num=32, out_num=32, activ=tf.nn.relu, name='conv1_2')\n x = maxpool2d(x, k=2)\n x = conv2d(x, k=3, in_num=32, out_num=64, activ=tf.nn.relu, name='conv2_1')\n x = conv2d(x, k=3, in_num=64, out_num=64, activ=tf.nn.relu, name='conv2_2')\n x = maxpool2d(x, k=2)\n x = conv2d(x, k=3, in_num=64, out_num=128, activ=tf.nn.relu, name='conv3_1')\n x = conv2d(x, k=3, in_num=128, out_num=128, activ=tf.nn.relu, name='conv3_2')\n x = maxpool2d(x, k=2)\n\n mb, h, w, c = x.get_shape().as_list() #ネットワークの返り値のshapeを取得\n x = tf.reshape(x, [-1, h*w*c]) #画像・チャンネルを一列のデータに均す\n x = fc(x, in_num=w*h*c, out_num=1024, activ=tf.nn.relu, name='fc1')\n x = tf.nn.relu(x)\n x = tf.nn.dropout(x, keep_prob=keep_prob) #ドロップアウト(一定割合のニューロンの出力が0)\n x = fc(x, in_num=1024, out_num=num_classes, name='fc_out')\n return x\n\n\ndef train_net(train, accuracy, loss, merged, xs, ys):\n config = tf.ConfigProto()\n sess = tf.InteractiveSession(config=config)\n\n sess.run(tf.global_variables_initializer()) #おまじない。グローバルに定義されたvariableをvariable_initializerに渡す\n\n ind_batch = get_shuffled_batch_ind(len(ys), 32, 5)\n iter_per_epoch = len(ys) // 32\n\n running_loss = 0\n\n # SummaryWriterでグラフを書く(これより後のコマンドはグラフに出力されない)\n summary_writer = tf.summary.FileWriter(log_dir , sess.graph)\n\n for i, (batch_xs, batch_ys) in enumerate(zip(xs[ind_batch], ys[ind_batch])):\n\n _, sammary = sess.run([train, merged], feed_dict={X: batch_xs, Y: batch_ys, keep_prob: 0.5}) #プレースホルダーの中身を確定、計算グラフ実行(train, accuracy, lossを実行)\n \n #print(\"iter >>\", i+1, ',loss >>', los / 32, ',accuracy >>', acc)\n\n summary_writer.add_summary(sammary, i)\n\n saver = tf.train.Saver() #学習結果の保存準備\n saver.save(sess, os.path.join(base_path, 'tf_cnn.ckpt')) #セッション結果を保存\n \n summary_writer.close()\n\n\ndef test_net(xs, ys):\n\n config = tf.ConfigProto()\n with tf.Session(config=config) as sess:\n saver = tf.train.Saver()\n saver.restore(sess, os.path.join(base_path, \"tf_cnn.ckpt\"))\n\n for i in range(len(ys)):\n\n input_shape = xs[i].shape\n #print(input_shape)\n x = xs[i].reshape(1, input_shape[0], input_shape[1],input_shape[2])\n y = ys[i]\n\n pred = sess.run([out], feed_dict={X:x, keep_prob:1.})[0]\n print('label:', y, 'out:', pred)\n\n\n#train\nX = tf.placeholder(tf.float32, [None, img_height, img_width, 3]) #入力のプレースホルダーを定義(noneはなんでもいい値)\nY = tf.placeholder(tf.float32, [None, num_classes]) #ラベルのプレースホルダーを定義\nkeep_prob = tf.placeholder(tf.float32) #ドロップアウトの確率のプレースホルダー\n\n# 自分でlayerを定義した時\nlogits = Mynet(X, keep_prob)\n\npreds = tf.nn.softmax(logits) #Mynetの結果をsoftmaxする計算の定義\nloss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=Y)) #Mynetの結果とラベルとの間で誤差関数の計算をする計算の定義\noptimizer = tf.train.AdamOptimizer(learning_rate=0.001) #最適化手段の定義\ntrain = optimizer.minimize(loss) #定義した最適化で、lossの結果に対して最適化を行う計算の定義\n\ncorrect_pred = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1)) #\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) #\n\n# SummaryWriterでグラフを書く\ntf.summary.image('input', X, 10)\ntf.summary.scalar('loss', loss)\ntf.summary.scalar('accuracy', accuracy)\nmerged = tf.summary.merge_all()\n\nxs, ys = data_load(data_path, 64, 64, hflip=True, vflip=True, rot=[angle for angle in range(0,360,10)])\nxs = xs.transpose(0, 2, 3, 1)\nys = np.identity(num_classes)[ys] #one-hot化\nprint(ys)\n\ntrain_net(train, accuracy, loss, merged, xs, ys) #定義した計算を渡して学習sessionを実行させる\n\n\n#test\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, img_height, img_width, 3])\nY = tf.placeholder(tf.float32, [None, num_classes])\nkeep_prob = tf.placeholder(tf.float32)\n\n#logits = Mynet(X, train=False)\nlogits = Mynet(X, keep_prob)\nout = tf.nn.softmax(logits)\n\nxs, ys = data_load(test_path, 64, 64)\nxs = xs.transpose(0, 2, 3, 1)\nys = np.identity(num_classes)[ys]\nprint(ys)\n\ntest_net(xs, ys)","sub_path":"2_model/tf_test_board.py","file_name":"tf_test_board.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"395986810","text":"import matplotlib.pyplot as plt\nimport importData_4 as imD4\nimport keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Bidirectional, Dropout\nfrom keras.callbacks import TensorBoard, EarlyStopping\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\nimport numpy as np\nimport pandas as pd\nimport time\nfrom datetime import datetime\n\na = '\\u00b0'\n# seed = 5\n# np.random.seed(seed)\n\nNAME = f\"lstm_real_5_500_{str(datetime.fromtimestamp(time.time()))}\"\nprint(NAME)\ntensorboard = TensorBoard(log_dir=f\"logs/{NAME}\",\n histogram_freq=50,\n write_graph=True)\n\ndef soft_acc(y_true, y_pred):\n return K.mean(K.equal(K.round(y_true), K.round(y_pred)))\n\nX = imD4.X.values\ny = imD4.y.values.reshape(-1,1)\ntarget_X = imD4.target_X.values\ntarget_y = imD4.target_y.values.reshape(-1,1)\n\n# Normalize with withn range [0,1]\nscalerMm = MinMaxScaler(feature_range=(0,1))\nscalerMm_X = scalerMm.fit(X)\nscalerMm_y = scalerMm.fit(y)\nX = scalerMm_X.transform(X).reshape(302,1,32)\ny = scalerMm_y.transform(y)\ntarget_X = scalerMm_X.transform(target_X).reshape(20,1,32)\ntarget_y = scalerMm_y.transform(target_y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# for epoch in [10, 20, 30, 50, 70, 90, 100]:\n# design network\nmodel = Sequential()\nmodel.add(LSTM(10, input_shape=(1,32),return_sequences=True, kernel_initializer='uniform', activation='relu'))\nmodel.add(LSTM(10, input_shape=(1,32), activation='relu'))\nmodel.add(Dense(1, activation='linear'))\nmodel.compile(loss='mean_squared_error', optimizer='adam', metrics=[soft_acc])\n\n# fit network\ncallbacks = [tensorboard]\nH = model.fit(X_train, y_train, batch_size=50, epochs=500, validation_data=(X_test, y_test), verbose=0)\n\n# # plt.plot(H.history['loss'], label='train loss')\n# plt.plot(H.history['val_loss'], label='val loss', color='orange')\n# plt.title('Training Error')\n# plt.xlabel('nb_epochs')\n# plt.legend()\n# plt.show()\n\n# # plt.plot(H.history['soft_acc'], label='train accuracy')\n# plt.plot(H.history['val_soft_acc'], label='val accuracy', color='orange')\n# plt.title('Training Accuracy')\n# plt.xlabel('nb_epochs')\n# plt.legend()\n# plt.show()\n\n# make predictions\npredictions = model.predict(target_X)\n# plot the predictions\nplt.plot(scalerMm_X.inverse_transform(target_y),'-o', label='real')\nplt.plot(scalerMm_X.inverse_transform(predictions), '-o', label='prediction')\nplt.title('Temperature Predictions')\nplt.xlabel('nb_days')\nplt.ylabel(f'Temperature ({a}C)')\nplt.legend()\n# gi.plt.plot(scalerMm_X.inverse_transform(target),'-b', scalerMm_X.inverse_transform(temp_p), '-g')\nplt.show()\nprint(f\"MSE: {mse(scalerMm_X.inverse_transform(target_y), scalerMm_X.inverse_transform(predictions))}\")\nprint(f\"MAE: {mae(scalerMm_X.inverse_transform(target_y), scalerMm_X.inverse_transform(predictions))}\")\n\n\n# OLD\n# # model.summary()\n# # for layer in model.layers:\n# # print(layer.name, layer.inbound_nodes, layer.outbound_nodes)\n# #print the metrics of mean squared error\n# plt.plot(H.history['loss'], label='train')\n# plt.plot(H.history['val_loss'], label='test')\n# plt.legend()\n# plt.show()\n#\n# plt.plot(H.history['soft_acc'], label='train accuracy')\n# plt.plot(H.history['val_soft_acc'], label='test accuracy')\n# plt.legend()\n# plt.show()\n#\n# make predictions\npredictions = model.predict(target_X)\n# # plot the predictions\nplt.plot(scalerMm_X.inverse_transform(target_y), '-bo', scalerMm_X.inverse_transform(predictions), '-g+')\n# gi.plt.plot(scalerMm_X.inverse_transform(target),'-b', scalerMm_X.inverse_transform(temp_p), '-g')\nplt.show()","sub_path":"lstm_net.py","file_name":"lstm_net.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50454227","text":"print(\" - Programa que cambia las letras 's' de un string por '$' -\")\ntexto = input(\"Dame un texto (preferiblemente que tenga 's'): \")\nlista = list(texto)\ni = 0\nwhile(i < len(lista)):\n if(lista[i] == \"s\"):\n lista[i] = \"$\"\n i += 1\n\nprint(\"Ahora tu texto es:\", \"\".join(lista))","sub_path":"Basics/Ejercicios1/changeString.py","file_name":"changeString.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"138469809","text":"import tkinter as tk\nimport tkinter.ttk\nimport pandas as pd\n\ndef collectwritetaglist():\n list1 = []\n df = pd.read_excel(r'C:\\OPCUA\\Working_VF1_5.xls', sheet_name='WriteGeneral')\n n = 0\n while n < len(df.index):\n list1.append(df.iloc[n, 0])\n n = n + 1\n return list1\n\ndef select():\n curItems = tree.selection()\n lb = tk.Label(root,text = \"\\n\".join([str(tree.item(i)['values']) for i in curItems])).pack()\n\n # lb = tk.Label(root,text = \"\\n\".join([str(tree.item(i)['values']) for i in curItems])).pack())\n print(curItems)\n # lb = tk.Label(root, text=\"\\n\".join([str(tree.item(i)['values']) for i in curItems])).pack())\n # lb.pack()\n\n# \"\\n\".join([str(tree.item(i)['values']) for i in curItems])).pack()\nroot = tk.Tk()\ntree = tkinter.ttk.Treeview(root, height=4)\nlistofwritetag = collectwritetaglist()\n\ntree['show'] = 'headings'\ntree['columns'] = ('Tag Name', 'Value')\ntree.heading(\"#1\", text='Tag Name', anchor='w')\ntree.column(\"#1\", stretch=\"no\")\ntree.heading(\"#2\", text='Value', anchor='w')\ntree.column(\"#2\", stretch=\"no\")\ntree.pack()\n\nn = 0\nwhile n < len(listofwritetag):\n tree.insert(\"\", n, values=[listofwritetag[n], 1 ])\n n = n + 1\n\ntree.bind(\"\", lambda e: select())\nroot.mainloop()","sub_path":"RailRites/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359612780","text":"import math as ma\nimport numpy as np\nimport pylab as pl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.tri as mtri\nimport mpl_toolkits.mplot3d as a3\n\n# Werte für die Höhe des Quaders, Rotationsmatrix und Konstante einer Zwischenrechnung\nhoehe = 4\nrotation = np.array( [ [0, -1,0], [1, 0 ,0],[0,0,1] ] )\nconst = ma.sqrt((1/2)*(2-0.25*0.25*hoehe*hoehe))\n\n#----------------------------------------------------------\n# Erstellen einer Seite des Oktaeders\n# Quadratische Grundfläche\na = np.array([-1,-1,0])\nb = np.array([1,-1,0])\nc = np.array([1,1,0])\nd = np.array([-1,1,0])\n\n#Quadratische Fläche/ Deckel\ne = np.array([-1,-1,hoehe])\nf = np.array([1,-1,hoehe])\ng = np.array([1,1,hoehe])\nh = np.array([-1,1,hoehe])\n\n#Unteres 6-Eck\ni = np.array([-1-const, -1-const, 0.5*hoehe])\nj = np.array([-1,-1-2*const,0.5*hoehe])\nk = np.array([1,-1-2*const,0.5*hoehe])\nl = np.array([1+const,-1-const,0.25*hoehe])\n\n#Ergänzung zum oberen 6-Eck\nm = np.array([-1-const,-1-const,0.75*hoehe])\nn = np.array([1+const,-1-const,0.75*hoehe])\n\n#Ergänzung zum Quadrat zwischen den 6-Ecken\no = np.array([1+2*const,-1,0.5*hoehe])\n#----------------------------------------------------------\n\n# Array mit den Vertices die gezeichnet werden\nvertices = []\n\ndef Quader(x1, x2, x3):\n\tglobal a,b,c,d,e,f,g,h,i,j,k,l,m,n,o\n\tr = [x1, x2, x3]\n\tvertices.append([a+r,b+r,c+r,d+r])\n\tvertices.append([e+r,f+r,g+r,h+r])\n\tvertices.append([a+r,i+r,j+r,k+r,l+r,b+r])\n\tvertices.append([e+r,m+r,j+r,k+r,n+r,f+r])\n\tvertices.append([k+r,n+r,o+r,l+r])\n\n\t# Durch Drehungen um 90 Grad werden die restlichen 4 Seiten des Oktaeders erstellt\t\n\tfor counter in range(0,3):\n\t\ta = np.dot(rotation, a)\n\t\tb = np.dot(rotation, b)\n\t\tc = np.dot(rotation, c)\n\t\td = np.dot(rotation, d)\n\t\te = np.dot(rotation, e)\n\t\tf = np.dot(rotation, f)\n\t\tg = np.dot(rotation, g)\n\t\th = np.dot(rotation, h)\n\t\ti = np.dot(rotation, i)\n\t\tj = np.dot(rotation, j)\n\t\tk = np.dot(rotation, k)\n\t\tl = np.dot(rotation, l)\n\t\tm = np.dot(rotation, m)\n\t\tn = np.dot(rotation, n)\n\t\to = np.dot(rotation, o)\n\t\tvertices.append([a+r,b+r,c+r,d+r])\n\t\tvertices.append([e+r,f+r,g+r,h+r])\n\t\tvertices.append([a+r,i+r,j+r,k+r,l+r,b+r])\n\t\tvertices.append([e+r,m+r,j+r,k+r,n+r,f+r])\n\t\tvertices.append([k+r,n+r,o+r,l+r])\n\n# Vertices mit Quadern auffüllen\nQuader(0,0,0)\nQuader(0,0,hoehe)\nQuader(0,0,2*hoehe)\nQuader(0,-2-2*const,0.5*hoehe)\nQuader(0,-2-2*const, 1.5*hoehe)\nQuader(2+2*const,0,0.5*hoehe)\nQuader(2+2*const,0,-0.5*hoehe)\nQuader(2+2*const,-2-2*const,0)\n\n# Arrays mit Farben für Flächen und Kanten\nfcol = []\necol = []\nfor counter2 in range(0,20):\n\tfcol.append('blue')\n\tecol.append('black')\n\n# Figur erstellen\nfig = pl.figure()\nax = fig.add_subplot(1,1,1,projection='3d')\nax.add_collection3d(a3.art3d.Poly3DCollection(\\\n\tvertices,\n\tfacecolors=fcol,\n\tedgecolors=ecol))\nax.set_xlim3d(-10, 10)\nax.set_ylim3d(-10, 10)\nax.set_zlim3d(-10, 10)\npl.title('Oktaederstumpf')\npl.show()\n","sub_path":"Blatt9/Oktaeder.py","file_name":"Oktaeder.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617547251","text":"#!/usr/local/bin/python2.7\n# -*- coding: utf-8 -*-\nimport os\nimport re\n\nf1 = open(\"1.2/title.dat\").readlines()\nf2 = open(\"1.2/cmd.dat\").readlines()\n\nf3 = open(\"result.txt\", \"a\")\n\nfor title,cmd in zip(f1, f2):\n check = os.popen(cmd.strip()).read()\n if check:\n line = title.strip()+\" YES\\n\"\n f3.write(line)\n else:\n line = title.strip()+\" NO\\n\"\n f3.write(line)\n\n\n\n\n\n\n\n","sub_path":"policy-k/1.2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212739633","text":"import onmt\nimport torch\nimport torch.nn as nn\n\n\nif __name__ == \"__main__\":\n\n from onmt.models.multilingual_translator.reversible_transformers import reversible_encoder, \\\n ReversibleTransformerEncoderLayer\n\n from onmt.models.multilingual_translator.reversible_transformers import reversible_decoder, \\\n ReversibleTransformerDecoderLayer\n\n import argparse\n\n parser = argparse.ArgumentParser(description='reversible transformer')\n parser.add_argument('-model_size', type=int, default=32,\n help='Size of embedding / transformer hidden')\n parser.add_argument('-gpu', default=0, type=int,\n help=\"Seed for deterministic runs.\")\n\n opt = parser.parse_args()\n\n torch.cuda.set_device(opt.gpu)\n\n opt.layers = 4\n opt.variational_dropout = False\n opt.dropout = 0.0\n opt.attn_dropout = 0.0\n opt.residual_dropout = 0.0\n opt.ffn_dropout = 0.0\n opt.n_heads = 4\n opt.inner_size = 4 * opt.model_size\n opt.ffn_glu = False\n opt.ffn_activation = 'relu'\n opt.head_dim = opt.model_size // opt.n_heads\n opt.learnable_position_encoding = False\n opt.ignore_source = False\n\n layers = torch.nn.ModuleList()\n\n for l in range(opt.layers):\n layer = ReversibleTransformerEncoderLayer(opt)\n layers.append(layer)\n\n class TestEncoder(torch.nn.Module):\n\n def __init__(self, layers):\n super().__init__()\n self.function = reversible_encoder\n self.layers = layers\n\n def forward(self, input, pos):\n\n return self.function(self.layers, input, pos, None)\n\n\n bsz = 4\n len_q = 7\n len_r = 7\n len_k = 12\n\n device = torch.device('cuda:0')\n input_states = torch.randn(*(len_q, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)\n pos = torch.randn(*(len_q, 1, opt.model_size), dtype=torch.float64, requires_grad=False, device=device)\n\n net = TestEncoder(layers)\n net = net.double().cuda()\n\n print(net)\n\n # print(\"gradchecking ENCODER start.\")\n #\n # torch.autograd.gradcheck(net, (input_states, pos), eps=1e-6, atol=1e-5, rtol=1e-3)\n #\n # print(\"gradchecking ENCODER completed.\")\n\n class TestDecoder(torch.nn.Module):\n\n def __init__(self, layers):\n super().__init__()\n self.function = reversible_decoder\n self.layers = layers\n\n def forward(self, input, pos, context):\n\n return self.function(self.layers, input, pos, context, None, None, None, None)\n\n\n device = torch.device('cuda:0')\n input_states = torch.randn(*(len_q, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)\n pos = torch.randn(*(len_q, 1, opt.model_size), dtype=torch.float64, requires_grad=False, device=device)\n\n context = torch.randn(*(len_k, bsz, opt.model_size), dtype=torch.float64, requires_grad=True, device=device)\n\n layers = torch.nn.ModuleList()\n\n for l in range(opt.layers):\n layer = ReversibleTransformerDecoderLayer(opt)\n layers.append(layer)\n\n net = TestDecoder(layers)\n net = net.double().cuda()\n\n print(\"gradchecking DECODER start.\")\n torch.autograd.gradcheck(net, (input_states, pos, context), eps=1e-6, atol=1e-5, rtol=1e-3)\n\n print(\"Completed.\")\n # net(input_states, pos, context)","sub_path":"test_reversible.py","file_name":"test_reversible.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119586562","text":"# Randomly added line\nimport struct\nimport datetime\n\nfrom Crypto import Random\nfrom Crypto.Hash import HMAC\nfrom Crypto.Cipher import AES\nfrom lib.helpers import read_hex\n\nfrom dh import create_dh_key, calculate_dh_secret\n\n# Format the time stamp to the milliseconds for preventing replay attacks\ntimestamp_format = \"%Y-%m-%d %H:%M:%S:%f\"\ntimestamp_format_len = 26\n\nclass StealthConn(object):\n def __init__(self, conn, client=False, server=False, verbose=False):\n self.conn = conn\n self.cipher = None\n self.client = client\n self.server = server\n self.verbose = verbose\n self.shared_hash = None\n self.last_message_time = datetime.datetime.now()\n self.initiate_session()\n\n def initiate_session(self):\n # Perform the initial connection handshake for agreeing on a shared secret\n if self.server or self.client:\n my_public_key, my_private_key = create_dh_key()\n # Send them our public key\n self.send(str(my_public_key))\n # Receive their public key\n their_public_key = int(self.recv())\n # Obtain our shared secret\n self.shared_hash = calculate_dh_secret(their_public_key, my_private_key)\n if self.verbose:\n print(\"Shared hash: {}\".format(self.shared_hash))\n self.shared_hash = bytes.fromhex(self.shared_hash)\n\n # Use AES in CFB mode for encryption\n iv = self.shared_hash[:16] # set the initialization vector\n self.cipher = AES.new(self.shared_hash, AES.MODE_CFB, iv) # create cipher object\n\n def send(self, data):\n # Sort out encoding problems\n if type(data) != type(b\"\"):\n data = bytes(data,'ascii')\n if self.verbose:\n print(\"Function 'send' received data\",data,type(data))\n\t\n #Create a HMAC and prepend it to the message\n if self.shared_hash != None:\n h = HMAC.new(self.shared_hash)\n h.update(data)\n if self.verbose:\n print(\"Hex digest is:\",h.hexdigest())\n mac_data = bytes(h.hexdigest(),\"ascii\") + data\n # Use the following code if you want to test what happens when the HMAC is bad\n #mac_data = h.hexdigest()[:-1] + \"a\" + data.decode(\"ascii\") # replace a random character in the digest\n else:\n mac_data = data\n if self.verbose:\n print(\"Data is now encoded with HMAC\",mac_data,type(mac_data))\n \n # Add a timestamp to the message\n current_time = datetime.datetime.now()\n # Use the following code to test if it works: subtract some time from now\n #current_time = self.last_message_time - datetime.timedelta(1,0) # Take away a day from the last recieved message\n timestr = datetime.datetime.strftime(current_time, timestamp_format) #format the timestamp\n mac_data = bytes(timestr, 'ascii') + mac_data # prepend it to the message\n\t\t\t\n if self.cipher:\n encrypted_data = self.cipher.encrypt(mac_data) #Encrypt the message\n if self.verbose:\n print(\"Original data: {}\".format(data))\n print(\"Encrypted data: {}\".format(repr(encrypted_data)))\n print(\"Sending packet of length {}\".format(len(encrypted_data)))\n else:\n encrypted_data = mac_data\n if self.verbose:\n print(\"Ecrypted data is just the same as data\",type(encrypted_data))\n\n # Encode the data's length into an unsigned two byte int ('H')\n pkt_len = struct.pack('H', len(encrypted_data))\n if self.verbose:\n print(\"Sending packet length\",pkt_len,type(pkt_len)) \n self.conn.sendall(pkt_len)\n if self.verbose:\n print(\"Sending encrypted data:\",encrypted_data,type(encrypted_data))\n self.conn.sendall(encrypted_data)\n\n\n def recv(self):\n # Decode the data's length from an unsigned two byte int ('H')\n pkt_len_packed = self.conn.recv(struct.calcsize('H'))\n unpacked_contents = struct.unpack('H', pkt_len_packed)\n pkt_len = unpacked_contents[0]\n if self.verbose:\n print(\"Packet length is\",pkt_len)\n\n encrypted_data = self.conn.recv(pkt_len) # Recieve the message\n if self.verbose:\n print(\"Received Encrypted Data:\",encrypted_data)\n if self.cipher:\n data = self.cipher.decrypt(encrypted_data) # Decrypt the message\n if self.verbose:\n print(\"Receiving packet of length {}\".format(pkt_len))\n print(\"Encrypted data: {}\".format(repr(encrypted_data)))\n print(\"Original data: {}\".format(data))\n else:\n data = encrypted_data\n if self.verbose:\n print(\"Decrypted Data:\",data)\n\n #strip off the HMAC and timestamp and verify the message\n \n #take off the timestamp first\n tstamp = str(data[:timestamp_format_len], 'ascii')\n data = data[timestamp_format_len:]\n \n #get the HMAC, if we're using one\n if self.shared_hash != None:\n h = HMAC.new(self.shared_hash)\n hmac = data[:h.digest_size*2] #Get the HMAC part of the message\n data = data[h.digest_size*2:] # Get the data part of the message\n h.update(data)\n if h.hexdigest() != str(hmac, 'ascii'): #HMAC is not right, so raise an error\n if self.verbose:\n print(\"Bad message\")\n print(\"HMAC from message:\",str(hmac,'ascii'))\n print(\"HMAC from digest:\",h.hexdigest())\n print(\"Not verifying message:\",data)\n raise RuntimeError(\"Bad message: HMAC does not match\")\n elif self.verbose:\n print(\"Shared hash is null\")\n \n #we'll only accept messages that have timstamps after the one we last recieved\n msg_time = datetime.datetime.strptime(tstamp, timestamp_format);\n if self.verbose:\n print(msg_time)\n if msg_time <= self.last_message_time: #If the timestamp is not newer, then raise an error\n if self.verbose:\n print(\"Bad timestamp\")\n print(\"timestamp:\",tstamp)\n raise RuntimeError(\"Bad timestamp: message not newer than last recieved one\")\n\n self.last_message_time = msg_time # Update message time\n \n return data\n\n def close(self):\n self.conn.close()\n","sub_path":"project_kd/skynet_part2/lib/comms.py","file_name":"comms.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638596221","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\"\"\"\r\naccount_oneclick.py\r\n\"\"\"\r\nimport datetime\r\n\r\nfrom rklib.model import BaseModel\r\nfrom apps.common import sequence\r\nfrom apps.common.utils import get_upwd\r\n\r\nclass AccountOneclick(BaseModel):\r\n \"\"\"自分配账号\r\n \r\n Attributes:\r\n pid: 用户openid str\r\n pwd: 自分配的用户密码\r\n uid: 应用自身用户ID str\r\n created_at: 创建时间 date\r\n \"\"\"\r\n def __init__(self,pid = None):\r\n \"\"\"初始化用户账号映射信息\r\n \r\n Args:\r\n pid: openid\r\n \"\"\"\r\n BaseModel.__init__(self)\r\n self.openid = pid\r\n self.access_token = None\r\n self.pid = pid\r\n self.uid = None\r\n self.created_at = str(datetime.datetime.now())\r\n \r\n @classmethod\r\n def get_user_id(cls,pid,force=False):\r\n \"\"\"为每一个用户生成对应的应用自身维护的用户ID\r\n Args:\r\n pid: openid\r\n \r\n Returns:\r\n uid: 应用自身维护的用户ID\r\n \"\"\"\r\n \r\n account_oneclick_obj = cls.get(pid)\r\n \r\n if not isinstance(account_oneclick_obj,cls):\r\n uid = sequence.generate()\r\n \r\n account_oneclick_obj = cls(pid)\r\n account_oneclick_obj.uid = uid\r\n account_oneclick_obj.access_token = get_upwd()\r\n account_oneclick_obj.put()\r\n \r\n return account_oneclick_obj.uid\r\n","sub_path":"python/project/card/my_card/apps/models/account_oneclick.py","file_name":"account_oneclick.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"314295939","text":"from os import path\r\nfrom array import *\r\nfrom copy import deepcopy\r\nimport sys\r\n#import numpy as np\r\nimport csv\r\n\r\n#Global variables\r\ncolNum = 0 #Number of columns, total number of column features, value to be accessed for featureSearch function.\r\n#The rows and columns for the dataset, used in featureSearch, cross validation, and for distance calculation (nearest neighbors distance)\r\nrow = []\r\ncolumnFeature = []\r\n\r\n\"\"\"\r\nPSEUDOCODE BEING FOLLOWED:\r\nfunction feature_search_demo(data)\r\ncurrent_set_of_features = []; % Initialize an empty set\r\nfor i = 1 : size(data,2)-1\r\n disp(['On the ',num2str(i),'th level of the search tree'])\r\n feature_to_add_at_this_level = [];\r\n best_so_far_accuracy = 0;\r\n for k = 1 : size(data,2)-1\r\n if isempty(intersect(current_set_of_features,k)) % Only consider adding, if not already added.\r\n disp(['--Considering adding the ', num2str(k),' feature'])\r\n accuracy = leave_one_out_cross_validation(data,current_set_of_features,k+1);\r\n if accuracy > best_so_far_accuracy\r\n best_so_far_accuracy = accuracy;\r\n feature_to_add_at_this_level = k;\r\n end\r\n end\r\n end\r\n current_set_of_features(i) = feature_to_add_at_this_level;\r\n disp(['On level ', num2str(i),' i added feature ', num2str(feature_to_add_at_this_level), ' to current set'])\r\n end\r\nend\r\n\"\"\"\r\ndef featureSearch(colNum, row, columnFeature, userInput):\r\n algoName = \"\" #For the \"Hello World introduction to the algorithm\"\r\n action = \"\" #Depending on forward selection or backward elimination, we will either ADD or REMOVE a feature to our current set of features.\r\n #print(colNum)\r\n if userInput == '1': #If user picks forward selection\r\n algoName = \"Forward Selection\"\r\n currSetFeatures = []\r\n elif userInput == '2': #If user picks backward elimination.\r\n algoName = \"Backward Elimination\"\r\n #currSetFeatures = np.arange(1, colNum, 1) #https://numpy.org/doc/stable/reference/generated/numpy.arange.html using numpy library in order to create a currSetFeatures withe every feature\r\n\r\n print(\"Hello, world!! We have officially entered \" + algoName + \"!\\n\")\r\n accuracyArr = []\r\n featureOutput = []\r\n for i in range(1, colNum):\r\n if i % 10 == 1 and i != 11:\r\n print(\"On the \" + str(i) + \"st level of the search tree\")\r\n elif i % 10 == 2:\r\n print(\"On the \" + str(i) + \"nd level of the search tree\")\r\n elif i % 10 == 3:\r\n print(\"On the \" + str(i) + \"rd level of the search tree\")\r\n else:\r\n print(\"On the \" + str(i) + \"th level of the search tree\")\r\n featureToAddAtLevel = 0\r\n bestAcc = 0\r\n for k in range(1, colNum):\r\n initCopy = deepcopy(currSetFeatures) #https://docs.python.org/3/library/copy.html in order to create \"levels\" of the search tree, utilized deepcopy from Python's copy library.\r\n if userInput == '1':\r\n if k not in currSetFeatures:\r\n if k % 10 == 1 and k != 11:\r\n print(\"--Considering adding the \" + str(k) + \"st feature to this: \" + str(initCopy))\r\n elif k % 10 == 2:\r\n print(\"--Considering adding the \" + str(k) + \"nd feature to this: \" + str(initCopy))\r\n elif k % 10 == 3:\r\n print(\"--Considering adding the \" + str(k) + \"rd feature to this: \" + str(initCopy))\r\n else:\r\n print(\"--Considering adding the \" + str(k) + \"th feature to this: \" + str(initCopy))\r\n accuracy = leaveOneOutCross(initCopy, k, row, columnFeature, userInput)\r\n print(\"Accuracy: \" + str(accuracy))\r\n if accuracy > bestAcc:\r\n bestAcc = accuracy\r\n featureToAddAtLevel = k\r\n elif userInput == '2':\r\n if k in currSetFeatures:\r\n if k % 10 == 1 and k != 11:\r\n print(\"--Considering removing the \" + str(k) + \"st feature: \" + str(initCopy))\r\n elif k % 10 == 2:\r\n print(\"--Considering removing the \" + str(k) + \"nd feature: \" + str(initCopy))\r\n elif k % 10 == 3:\r\n print(\"--Considering removing the \" + str(k) + \"rd feature: \" + str(initCopy))\r\n else:\r\n print(\"--Considering removing the \" + str(k) + \"th feature: \" + str(initCopy))\r\n accuracy = leaveOneOutCross(initCopy, k, row, columnFeature, userInput)\r\n print(\"Accuracy: \" + str(accuracy))\r\n if accuracy > bestAcc:\r\n bestAcc = accuracy\r\n featureToAddAtLevel = k\r\n if userInput == '1':\r\n currSetFeatures.append(featureToAddAtLevel)\r\n elif userInput == '2':\r\n currSetFeatures = currSetFeatures[currSetFeatures != featureToAddAtLevel]\r\n accuracyArr.append(bestAcc)\r\n resultCopy = deepcopy(currSetFeatures) #We want to output the best features based on best accuracy.\r\n featureOutput.append(resultCopy)\r\n if userInput == '1':\r\n print(\"On level \" + str(i) + \" I added feature(s) \" +\r\n str(featureToAddAtLevel) + \" to current set, accuracy is \" + str(bestAcc) + \"\\n\")\r\n elif userInput == '2':\r\n print(\"On level \" + str(i) + \" I removed feature \" +\r\n str(featureToAddAtLevel) + \" from current set, accuracy is \" + str(bestAcc) + \"\\n\")\r\n maxPercent = max(accuracyArr)\r\n maxPercent = maxPercent * 100\r\n #https://www.w3schools.com/python/ref_list_index.asp to find the indexes that created the best accuracy\r\n print(\"Finished search!! The best feature subset is \" + str(featureOutput[accuracyArr.index(max(accuracyArr))]) + \", which has an accuracy of \" + str(maxPercent) + \"%\")\r\n\r\n#distance function to help calculate accuracy using k-fold cross validation\r\n#https://machinelearningmastery.com/implement-resampling-methods-scratch-python/ helped me grow in my understanding of what I needed to do.\r\ndef calculateDistance(current, a, b, columnFeature):\r\n distCalculated = 0.0\r\n for i in range(len(current)):\r\n row1 = float(columnFeature[current[i] - 1][b])\r\n row2 = float(columnFeature[current[i] - 1][a])\r\n distCalculated = distCalculated + ((row1 - row2)*(row1 - row2))\r\n return distCalculated\r\n\r\n#Pseudocode from Project 2 Briefing lecture slides referenced\r\ndef leaveOneOutCross(initCopy, k, row, columnFeature, userInput):\r\n num = 0 #Integer variable to help calculate accuracy\r\n if userInput == '1': #If user chose Forward Selection\r\n initCopy.append(k)\r\n elif userInput == '2': #If user chose Backward Elimination\r\n # https://stackoverflow.com/questions/25004347/remove-list-element-without-mutation/25004389\r\n initCopy = initCopy[initCopy != k]\r\n else:\r\n print(\"\")\r\n for i in range(len(row)):\r\n nearestNeighborDistance = sys.maxsize #https://docs.python.org/3/library/sys.html\r\n nearestNeighborLocation = sys.maxsize\r\n for j in range(len(row)):\r\n if not i == j:\r\n distance = calculateDistance(initCopy, i, j, columnFeature)\r\n if distance < nearestNeighborDistance:\r\n nearestNeighborDistance = distance\r\n nearestNeighborLocation = j\r\n if row[i] == row[nearestNeighborLocation]:\r\n num = num + 1\r\n #print(\"Object \" + str(i) + \" is class \" + str(row[i]))\r\n #print(\"The nearest neighbor is \" + str(nearestNeighborLocation) + \" which is in class \" + str(nearestNeighborDistance))\r\n accuracy = num / len(row)\r\n return accuracy\r\n\r\ndef main():\r\n global colNum\r\n global row\r\n global columnFeature\r\n userInput = \"\" #The user will be inputting 1, or 2, depending on their algorithm choice\r\n #print(sys.version)\r\n print(\"Welcome to Ted Kim's Feature Selection Algorithm!\")\r\n fileInput = input(\"Type in the name of the file to test: \") #user has to input a correct file to run algorithm on\r\n # https://docs.python.org/3/library/os.path.html, using Python OS.path library to check if a file exists, if it does not exist, the user is re-prompted until a correct file is typed in.\r\n while not path.exists(fileInput):\r\n print(\"CRINGE!! THAT FILE ISN'T REAL.\")\r\n fileInput = input(\"Type in the name of the file to test:\")\r\n #w3schools.com/python/python_file_open.asp Referenced for opening the file\r\n f = open(fileInput, 'r')\r\n # https://docs.python.org/3/library/csv.html\r\n read = csv.reader(f, delimiter=' ', skipinitialspace=True)\r\n nr = next(read) # https://www.w3schools.com/python/ref_func_next.asp\r\n colNum = len(nr)\r\n f.close()\r\n f2 = open(fileInput, 'r')\r\n rl = f2.readlines() # https://www.w3schools.com/python/ref_file_readlines.asp\r\n #https://stackoverflow.com/questions/6696027/how-to-split-elements-of-a-list\r\n for i in rl:\r\n x = i.split()[0] #Splitting every single element in the dataset and then appending each indvidual element\r\n row.append(x)\r\n #arr = np.arange(1,colNum,1)\r\n #print(arr)\r\n print(\"\\n\")\r\n print(\"Wow! Thanks for inputting the file name: \" +\r\n fileInput + \"! Let's do something with it!\\n\")\r\n\r\n dup = []\r\n a = 0\r\n print(\"This dataset has \" + str(colNum - 1) + \" features (not including the class attribute, with \" + str(len(row)) + \" instances.\")\r\n accuracyInit = leaveOneOutCross(dup, 0, row, columnFeature, 0)\r\n print(\"Running nearest neighbor with all \" + str(colNum - 1) + \" features, using \\\"leave one out\\\" evaluation, I get an accuracy of \" + str(accuracyInit * 100) + \"%.\\n\")\r\n\r\n #https://stackoverflow.com/questions/6696027/how-to-split-elements-of-a-list\r\n for i in range(1, colNum):\r\n f3 = open(fileInput,'r')\r\n rl2 = f3.readlines()\r\n outputArr = []\r\n for j in rl:\r\n y = j.split()[i]\r\n #print(y)\r\n outputArr.append(y)\r\n columnFeature.append(outputArr)\r\n f3.close()\r\n\r\n\r\n while userInput != '1' and userInput != '2': #User is asked to choose either 1 or 2 as correct inputs, for one of the two provided algorithms.\r\n print(\"Type the number of the algorithm you want to run.\")\r\n print(\"1) Forward Selection\")\r\n print(\"2) Backward Elimination\")\r\n userInput = input()\r\n if userInput == '1':\r\n print(\"This is going to do forward selection.\")\r\n featureSearch(colNum, row, columnFeature, userInput) #featureSearch function will begin performing forward selection\r\n elif userInput == '2':\r\n print(\"This is going to do backward elimination.\")\r\n featureSearch(colNum, row, columnFeature, userInput) #featureSearch function will begin performing backward elimination\r\n else:\r\n print(\"BOO!! Let's try typing in a REAL algorithm!\")\r\n\r\n\r\nmain()\r\n","sub_path":"Project2/TedKimCS205Project2.py","file_name":"TedKimCS205Project2.py","file_ext":"py","file_size_in_byte":11070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"456735788","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"tests for pagerank.py\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom sknetwork.ranking.pagerank import PageRank, BiPageRank\nfrom sknetwork.data import rock_paper_scissors, movie_actor\n\n\n# noinspection PyMissingOrEmptyDocstring\nclass TestPageRank(unittest.TestCase):\n\n def test_pagerank(self):\n ground_truth = np.ones(3) / 3\n adjacency = rock_paper_scissors()\n\n pagerank_sps = PageRank(solver='spsolve')\n pagerank_sps.fit(adjacency)\n scores = pagerank_sps.scores_\n self.assertAlmostEqual(np.linalg.norm(scores - ground_truth), 0.)\n\n pagerank_sps.fit(adjacency, personalization=np.array([0, 1, 0]))\n pagerank_sps.fit(adjacency, personalization={1: 1})\n\n pagerank_high_damping = PageRank(damping_factor=0.99)\n pagerank_high_damping.fit(adjacency)\n scores = pagerank_high_damping.scores_\n self.assertAlmostEqual(np.linalg.norm(scores - ground_truth), 0., places=1)\n\n pagerank_lcz = PageRank(solver='lanczos')\n pagerank_lcz.fit(adjacency)\n scores = pagerank_lcz.scores_\n self.assertAlmostEqual(np.linalg.norm(scores - ground_truth), 0.)\n\n pagerank_lsq = PageRank(solver='lsqr')\n pagerank_lsq.fit(adjacency)\n scores = pagerank_lsq.scores_\n self.assertAlmostEqual(np.linalg.norm(scores - ground_truth), 0.)\n\n pagerank_naive = PageRank(solver=None)\n pagerank_naive.fit(adjacency)\n\n def test_bipartite(self):\n bipagerank = BiPageRank()\n biadjacency = movie_actor()\n n1, n2 = biadjacency.shape\n\n bipagerank.fit(biadjacency, {0: 1})\n row_scores = bipagerank.row_scores_\n self.assertEqual(len(row_scores), n1)\n col_scores = bipagerank.col_scores_\n self.assertEqual(len(col_scores), n2)\n scores = bipagerank.scores_\n self.assertEqual(len(scores), n1 + n2)\n","sub_path":"sknetwork/ranking/tests/test_pagerank.py","file_name":"test_pagerank.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368063323","text":"#!/usr/bin/env python\n\n# Multi tests for extended auth with a single step - multiple plugins at once.\n# * Error in plugin\n# * No matching authentication method\n# * Matching authentication method, but auth rejected\n# * Matching authentication method, auth succeeds\n# * Matching authentication method, auth succeeds, new auth data sent back to client\n\n\nfrom mosq_test_helper import *\n\ndef write_config(filename, port):\n with open(filename, 'w') as f:\n f.write(\"port %d\\n\" % (port))\n f.write(\"auth_plugin c/auth_plugin_extended_single.so\\n\")\n f.write(\"auth_plugin c/auth_plugin_extended_single2.so\\n\")\n\nport = mosq_test.get_port()\nconf_file = os.path.basename(__file__).replace('.py', '.conf')\n\n\ndef do_test(suffix):\n write_config(conf_file, port)\n rc = 1\n # Single, error in plugin\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"error%s\" % (suffix))\n connect1_packet = mosq_test.gen_connect(\"client-params-test1\", keepalive=42, proto_ver=5, properties=props)\n\n # Single, no matching authentication method\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"non-matching%s\" % (suffix))\n connect2_packet = mosq_test.gen_connect(\"client-params-test2\", keepalive=42, proto_ver=5, properties=props)\n connack2_packet = mosq_test.gen_connack(rc=mqtt5_rc.MQTT_RC_BAD_AUTHENTICATION_METHOD, proto_ver=5, properties=None)\n\n # Single step, matching method, failure\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"single%s\" % (suffix))\n props += mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_DATA, \"baddata\")\n connect3_packet = mosq_test.gen_connect(\"client-params-test3\", keepalive=42, proto_ver=5, properties=props)\n connack3_packet = mosq_test.gen_connack(rc=mqtt5_rc.MQTT_RC_NOT_AUTHORIZED, proto_ver=5, properties=None)\n\n # Single step, matching method, success\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"single%s\" % (suffix))\n props += mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_DATA, \"data\")\n connect4_packet = mosq_test.gen_connect(\"client-params-test5\", keepalive=42, proto_ver=5, properties=props)\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"single%s\" % (suffix))\n connack4_packet = mosq_test.gen_connack(rc=0, proto_ver=5, properties=props)\n\n # Single step, matching method, success, auth data back to client\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"mirror%s\" % (suffix))\n props += mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_DATA, \"somedata\")\n connect5_packet = mosq_test.gen_connect(\"client-params-test6\", keepalive=42, proto_ver=5, properties=props)\n props = mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_METHOD, \"mirror%s\" % (suffix))\n props += mqtt5_props.gen_string_prop(mqtt5_props.PROP_AUTHENTICATION_DATA, \"atademos\")\n connack5_packet = mosq_test.gen_connack(rc=0, proto_ver=5, properties=props)\n\n\n broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)\n\n try:\n sock = mosq_test.do_client_connect(connect1_packet, b\"\", timeout=20, port=port)\n sock.close()\n\n sock = mosq_test.do_client_connect(connect2_packet, connack2_packet, timeout=20, port=port)\n sock.close()\n\n sock = mosq_test.do_client_connect(connect3_packet, connack3_packet, timeout=20, port=port)\n sock.close()\n\n sock = mosq_test.do_client_connect(connect4_packet, connack4_packet, timeout=20, port=port)\n sock.close()\n\n sock = mosq_test.do_client_connect(connect5_packet, connack5_packet, timeout=20, port=port)\n sock.close()\n\n rc = 0\n finally:\n os.remove(conf_file)\n broker.terminate()\n broker.wait()\n (stdo, stde) = broker.communicate()\n if rc:\n print(stde.decode('utf-8'))\n exit(rc)\n\ndo_test(\"\")\ndo_test(\"2\")\nexit(0)\n\n","sub_path":"third_party/mosquitto/test/broker/09-extended-auth-single2.py","file_name":"09-extended-auth-single2.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122985439","text":"from pymongo import MongoClient\nimport json\nimport sys\n \nRUNS = 10\nSIZES = [16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]\n\ndb = client.mydb\n\nstats_file, test_file = sys.argv[1], sys.argv[2]\n\nwith open(stats_file) as stats:\n text = stats.readlines()\n time_list = []\n for line in text:\n time_list.append(json.loads(line))\n\npython_runs = time_list[:RUNS]\nnumba_runs = time_list[RUNS:]\n\nres = {\n \"name\": test_file,\n \"sizes\": SIZES,\n \"runs\": RUNS,\n \"python\": python_runs,\n \"numba\": numba_runs\n}\n \ndb.perf.insert_one(res)\n","sub_path":"db_send.py","file_name":"db_send.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154789741","text":"from __future__ import print_function\nimport math\nimport argparse\nimport pprint\nimport gensim\nimport numpy as np\n\nfrom glove import Glove\nfrom glove import Corpus\nfrom preprocess import pre_processor\n\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')\n# Model training\nparser.add_argument('--data_path', type=str, default='./examples/GloVe_train.dat',\n help='Location of the data corpus')\nparser.add_argument('--corpus_model_path', type=str, default='./corpus.model',\n help='Location of the corpus model')\nparser.add_argument('--model_path', type=str, default='./glove.model',\n help='Location of the model')\nparser.add_argument('--window', type=int, default=10,\n help='Window size for GloVe training')\nparser.add_argument('--no_components', type=int, default=100,\n help='Training parameter')\nparser.add_argument('--learning_rate', type=float, default=0.05,\n help='Training parameter')\nparser.add_argument('--epochs', type=int, default=10,\n help='Training parameter')\n# Filtering\nparser.add_argument('--test_en', type=str, default='./examples/test.en',\n help='Location of the testing context')\nparser.add_argument('--test_vi', type=str, default='./examples/test.vi',\n help='Location of the testing response')\nparser.add_argument('--threshold', type=float, default=0.7,\n help='Filtering threshold')\nargs = parser.parse_args()\n\n\n\nclass GloVeFilter(object):\n\tdef __init__(self):\n\t\t# Preprocessor\n\t\tself.pp = pre_processor()\n\t\t# Corpus model\n\t\tself.corpus_model = Corpus()\n\t\t# Model\n\t\tself.glove = Glove(no_components=args.no_components, learning_rate=args.learning_rate)\n\n\tdef load_corpus_from_txt(self):\n\t\tprint('Reading corpus statistics...')\n\t\ttexts = [self.pp.preprocessing(l.strip().decode(\"utf8\", \"ignore\")) for l in open(args.data_path)]\n\t\tself.corpus_model.fit(texts, window=args.window)\n\t\tself.corpus_model.save(args.corpus_model_path)\n\t\tprint('Dict size: %s' % len(self.corpus_model.dictionary))\n\t\tprint('Collocations: %s' % self.corpus_model.matrix.nnz)\n\n\tdef load_corpus_from_model(self):\n\t\tprint('Reading corpus statistics...')\n\t\tself.corpus_model = Corpus.load(args.corpus_model_path)\n\t\tprint('Dict size: %s' % len(self.corpus_model.dictionary)) \n\t\tprint('Collocations: %s' % self.corpus_model.matrix.nnz)\n\n\tdef load_model(self):\n\t\tprint('Loading pre-trained GloVe model...')\n\t\tself.glove = Glove.load(args.model_path)\n\t\tprint('Loading finished')\n\n\tdef train(self):\n\t\tprint('Training the GloVe model...')\n\t\tself.glove.fit(self.corpus_model.matrix, epochs=args.epochs, verbose=True)\n\t\tself.glove.add_dictionary(self.corpus_model.dictionary)\n\t\tself.glove.save(args.model_path)\n\t\tprint('Training finished') \n\t\n\tdef _paragraph_similarity(self, paragraph_1, paragraph_2):\n\t\tparagraph_vector_1 = self.glove.transform_paragraph(paragraph_1, ignore_missing=True)\n\t\tparagraph_vector_2 = self.glove.transform_paragraph(paragraph_2, ignore_missing=True)\n\t\tdst = (np.dot(paragraph_vector_1, paragraph_vector_2)\n\t\t / np.linalg.norm(paragraph_vector_1)\n\t\t / np.linalg.norm(paragraph_vector_2))\n\t\treturn -1 if math.isnan(dst) else dst\n\t\n\tdef filter(self):\n\t\ten_list = [l.strip().decode(\"utf8\", \"ignore\") for l in open(args.test_en)]\n\t\tvi_list = [l.strip().decode(\"utf8\", \"ignore\") for l in open(args.test_vi)]\n\t\ten_pp = [self.pp.preprocessing(l) for l in en_list]\n\t\tvi_pp = [self.pp.preprocessing(l) for l in vi_list]\n\n\t\ttest_data = [(en_list[i], vi_list[i], en_pp[i], vi_pp[i]) \n\t\t\tfor i in range(0, len(en_list)) if len(en_pp[i]) != 0 and len(vi_pp[i]) != 0]\n\n\t\t# To save mem, you can print out directly at here\n\t\tscores = [(self._paragraph_similarity(enp, vip), en, vi) for (en, vi, enp, vip) in test_data]\n\t\treturn [(s, en, v) for (s, en, v) in scores if s > args.threshold]\n\n\nif __name__ == '__main__':\n\t# Training\n\tt = GloVeFilter()\n\tt.load_corpus_from_txt()\n\tt.train()\n\n\t'''\n\t# Load from pre-trained model\n\tt = GloVeFilter()\n\tt.load_model()\n\t'''\n\n\t# Testing\n\tprint (t.filter())\n","sub_path":"glove_filter.py","file_name":"glove_filter.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558204766","text":"# 560. Subarray Sum Equals K\n# https://leetcode.com/problems/subarray-sum-equals-k/\n\n# Time Complexiety: O(n)\n# Space Complexiety: O(n)\n\nclass Solution:\n def subarraySum(self, nums, k) -> int:\n temp_dict = {0:1}\n sum_so_far = 0\n count = 0\n\n for i in nums:\n sum_so_far += i\n if sum_so_far-k in temp_dict:\n count += temp_dict[sum_so_far-k] \n \n if sum_so_far not in temp_dict:\n temp_dict[sum_so_far] = 1\n else:\n temp_dict[sum_so_far] += 1\n \n return count\n\nobj = Solution()\nprint(obj.subarraySum([1,1,1],2))\nprint(obj.subarraySum([1,2,3],3))\nprint(obj.subarraySum([1,-1,0],0))\n","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115051595","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport io\nfrom flask import Flask, request, render_template, jsonify, send_from_directory\nfrom extractor import BibleWordExtractor, Book, ChapterVerseExtractor\nfrom bible import Bible\nfrom db_connector import Execution, DBBook\nfrom datetime import datetime\nfrom log import Log\nfrom base64 import b64encode\nfrom pptx import build_pptx, remove_old_pptx, PPTX_FILES, PPTX_ERROR_FILE\n\napp = Flask(__name__)\n\nKOR_BIBLE = 'db/kor_bible.db'\nENG_BIBLE = 'db/eng_bible.db'\n\ndef extract_bible_word(message):\n extractor = BibleWordExtractor()\n bible_word_list = extractor.extract_bible_word(message)\n\n if not bible_word_list:\n return []\n\n results = []\n for curr in bible_word_list:\n book = extractor.extract_book(curr)\n chapter_verse = extractor.extract_chapter_verse(curr)\n\n book_fullname = Book.get_fullname(book)\n if book_fullname is not None and ChapterVerseExtractor.extract_chapter(chapter_verse) > 0 and len(ChapterVerseExtractor.extract_verses(chapter_verse)) > 0:\n results.append(encode_bible_word_form(book_fullname, chapter_verse))\n\n return results\n\ndef encode_bible_word_form(book_fullname, chapter_verse):\n return book_fullname + u' ' + chapter_verse\n\ndef decode_bible_word_form(bible_word_form):\n content = bible_word_form.split(u' ')\n if not content or len(content) != 2:\n return None, None\n\n return content[0], content[1]\n\n@app.route('/')\ndef get_main_page():\n book_fullnames = DBBook.book_fullname_to_db.keys()\n book_fullnames.sort()\n return render_template('index.html', bible_books=book_fullnames)\n\n@app.route('/_parse_message')\ndef parse_message():\n message = request.args.get('message')\n bible_word_list = extract_bible_word(message)\n Log.log_parsed_bible_word(message, bible_word_list)\n return jsonify(result=bible_word_list)\n\n@app.route('/_show_bible_text')\ndef show_bible_text():\n version_list = [KOR_BIBLE, ENG_BIBLE]\n query_with_version = {}\n query_with_version[KOR_BIBLE] = Execution(KOR_BIBLE)\n query_with_version[ENG_BIBLE] = Execution(ENG_BIBLE)\n\n bible_word = request.args.get('bible_word')\n b_remove_annotation = True if request.args.get('remove_annotation') == 'true' else False\n \n book_fullname, chapter_verse = decode_bible_word_form(bible_word)\n if book_fullname is None or chapter_verse is None:\n return jsonify(result='Error')\n\n chapter = ChapterVerseExtractor.extract_chapter(chapter_verse)\n verses = ChapterVerseExtractor.extract_verses(chapter_verse)\n bible = Bible(book_fullname, chapter, verses, chapter_verse)\n\n for version in version_list:\n text = query_with_version[version].get_text(bible)\n bible.add_text(version, text)\n content = bible.get_print_str(version_list, False, b_remove_annotation)\n\n for version in version_list:\n query_with_version[version].close_connection()\n\n return jsonify(result=content)\n \n@app.route('/_build_pptx_file')\ndef build_pptx_file():\n version_list = [KOR_BIBLE, ENG_BIBLE]\n query_with_version = {}\n query_with_version[KOR_BIBLE] = Execution(KOR_BIBLE)\n query_with_version[ENG_BIBLE] = Execution(ENG_BIBLE)\n\n bible_word = request.args.get('bible_word')\n b_remove_annotation = True if request.args.get('remove_annotation') == 'true' else False\n \n book_fullname, chapter_verse = decode_bible_word_form(bible_word)\n if book_fullname is None or chapter_verse is None:\n return send_from_directory(PPTX_FILES, PPTX_ERROR_FILE, as_attachment=True)\n\n chapter = ChapterVerseExtractor.extract_chapter(chapter_verse)\n verses = ChapterVerseExtractor.extract_verses(chapter_verse)\n bible = Bible(book_fullname, chapter, verses, chapter_verse)\n\n for version in version_list:\n text = query_with_version[version].get_text(bible)\n bible.add_text(version, text)\n pptx_content = bible.get_pptx_content(version_list, b_remove_annotation)\n\n for version in version_list:\n query_with_version[version].close_connection()\n \n pptx_file = build_pptx(pptx_content)\n remove_old_pptx()\n return send_from_directory(PPTX_FILES, pptx_file, as_attachment=True)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224795372","text":"#!/usr/bin/env python\n# Slingshot shared helper functions\n\nimport argparse\nimport re\n\n\ndef get_input(text):\n return raw_input(text)\n\n\ndef get_confirmation(msg):\n '''\n Get confirmation from user\n msg: Confirmation message\n '''\n while True:\n confirmation = get_input(msg + \" (yes/NO): \").lower()\n if confirmation == '' or confirmation == 'no':\n return False\n elif confirmation == 'yes':\n return True\n\n\ndef valid_regexp(arg):\n '''\n check that arg is a valid regexp.\n\n This function is ment to be used as a type for argparse\n parser.add_argument(\n '-e', '--regex',\n required=False,\n type=valid_regexp,\n dest='regex',\n default='',\n help='specifies a regex to be compared to the volume name'\n )\n '''\n try:\n re.compile(arg)\n except Exception as e:\n msg = \"'%s' is not a valid regular expression: %s\" % (arg, e)\n raise argparse.ArgumentTypeError(msg)\n return arg\n\n\n","sub_path":"platform/scripts/slingshot.py","file_name":"slingshot.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357337456","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUsage: search_and_delete.py -h\n\n\"\"\"\n\nimport click\nimport requests\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n# example query -- ./search_and_delete.py -u http://nanopubstore..biodati.com -s 'nanopub.metadata.project:\"MS Model\" AND nanopub.metadata.gd\\:createTS:[ 2018-07-15 TO 2018-07-18 ] AND nanopub.citation.database.id: 21833088'\n\n\n@click.command(context_settings=CONTEXT_SETTINGS)\n@click.option('--url', '-u', help=\"NanopubStore API url - typically of the format https://nanopubstore..biodati.com\")\n@click.option('--search', '-s', prompt=\"Enter search string\", help=\"Search string to run against NanopubStore\")\n@click.option('--delete', '-d', is_flag=True, help=\"Set delete flag to remove all nanopubs found by search string\")\ndef main(url, search, delete):\n \"\"\"Search and delete nanopubs from NanopubStore\n\n Will only return up to 10000 results.\n\n Please review https://help.biodati.com/nanopubs/nanopub-advanced-searching for tips on how to search the NanopubStore.\n \"\"\"\n url = url.rstrip('/')\n\n # search_body = {\"searchQuery\": search, \"searchFilter\": \"isDeleted: false AND nanopub.metadata.project.keyword: \\\"MS Model\\\"\", \"max\": 10000}\n search_body = {\"searchQuery\": search, \"searchFilter\": \"isDeleted: false\", \"max\": 10000}\n\n r = requests.post(f'{url}/search', json=search_body)\n\n results = r.json()\n\n total_items = results['totalItems']\n print(f'Found {total_items} nanopubs')\n\n if r.status_code == 200:\n for np in results['items']:\n print(np['nanopub']['id'])\n\n if delete:\n count = 0\n for np in results['items']:\n np_id = np['nanopub']['id']\n r = requests.delete(f'{url}/nanopubs/{np_id}')\n if r.status_code == 200:\n count += 1\n\n print(f'Successfully deleted {count} nanopubs')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"nanopubstore/search_and_delete.py","file_name":"search_and_delete.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"398336194","text":"\nimport logging\n\nfrom . import Analysis, register_analysis\n\nfrom .. import SIM_LIBRARIES\nfrom ..errors import AngrValueError\n\nl = logging.getLogger(\"angr.analyses.static_hooker\")\n\nclass StaticHooker(Analysis):\n \"\"\"\n This analysis works on statically linked binaries - it finds the library functions statically\n linked into the binary and hooks them with the appropraite simprocedures.\n\n Right now it only works on libc functions and unstripped binaries, but hey! There's room to\n grow!\n \"\"\"\n\n def __init__(self):\n self.results = {}\n libc = SIM_LIBRARIES['libc.so.6']\n\n if self.project.loader.main_bin.linking == 'dynamic':\n raise AngrValueError('StaticHooker only works on static binaries!')\n\n for func in self.project.loader.main_bin._symbol_cache.values():\n if not func.is_function: continue\n if libc.has_implementation(func.name):\n proc = libc.get(func.name, self.project.arch)\n self.project.hook(func.rebased_addr, proc)\n l.info(\"Hooked %s at %#x\", func.name, func.rebased_addr)\n self.results[func.rebased_addr] = proc\n else:\n l.debug(\"Failed to hook %s at %#x\", func.name, func.rebased_addr)\n\nregister_analysis(StaticHooker, 'StaticHooker')\n","sub_path":"angr/analyses/static_hooker.py","file_name":"static_hooker.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187743761","text":"import argparse\nimport train_healpix\n\ncline_parser = argparse.ArgumentParser(description='Calculate minimal detectable fraction',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\ndef add_arg(*pargs, **kwargs):\n cline_parser.add_argument(*pargs, **kwargs)\n\n\nadd_arg('--f_src', type=float, help='fraction of \"from-source\" EECRs [0,1] or -1 for random', default=-1)\nadd_arg('--Neecr', type=int, help='Total number of EECRs in each sample', default=500)\nadd_arg('--Emin', type=int, help='Emin in EeV for which the input sample was generated', default=56)\n# add_arg('--source_id', type=str,\n# help='source (CenA, NGC253, M82, M87 or FornaxA) or comma separated list of sources or \"all\"',\n# default='CenA')\nadd_arg('sources', type=str, nargs='+', metavar='source', default=[])\nadd_arg('--fractions', type=float, nargs='+', metavar='frac', help='fractions for mixed source case (in the same order as sources)', default=[])\nadd_arg('--Nside', type=int, help='healpix grid Nside parameter', default=32)\nadd_arg('--Nini', type=int, help='Size of the initial sample of from-source events', default=10000)\nadd_arg('--source_vicinity_radius', type=str, help='source vicinity radius', default='1')\nadd_arg('--log_sample', action='store_true', help=\"sample f_src uniformly in log scale\")\nadd_arg('--f_src_max', type=float, help='maximal fraction of \"from-source\" EECRs [0,1]', default=1)\nadd_arg('--f_src_min', type=float, help='minimal fraction of \"from-source\" EECRs [0,1]', default=0)\nadd_arg('--model', type=str, help='healpix NN', default='')\nadd_arg('--n_samples', type=int, help='number of samples', default=50000)\nadd_arg('--alpha', type=float, help='type 1 maximal error', default=0.01)\nadd_arg('--beta', type=float, help='type 2 maximal error', default=0.05)\nadd_arg('--suffix', type=str, default='*')\nadd_arg('--batch_size', type=int, help='size of training batch', default=100)\nadd_arg('--mf', type=str, help='Magnetic field model (jf or pt)', default='jf')\nadd_arg('--data_dir', type=str, help='data root directory (should contain jf/sources/ or pt/sources/)',\n default='data')\nadd_arg('--threshold', type=float,\n help='source fraction threshold for binary classification', default=0.0)\nadd_arg('--seed', type=int, help='sample generator seed', default=train_healpix.test_seed)\n\nargs = cline_parser.parse_args()\n\nif len(args.fractions) > 0:\n assert len(args.fractions) == len(args.sources) and len(args.sources) > 1\n\ngen = train_healpix.SampleGenerator(\n args, deterministic=True, sources=args.sources, suffix=args.suffix, seed=args.seed, mixture=args.fractions\n)\n\nmodel = train_healpix.create_model(gen.Ncells, pretrained=args.model)\nfrac, alpha = train_healpix.calc_detectable_frac(gen, model, args)\nprint(frac, alpha)\n\nout_file = args.model + \"_cmp.txt\"\nwith open(out_file, \"a\") as d:\n print(\"Model to compare with:\", file=d)\n print(*args.sources, file=d)\n if len(args.fractions) > 0:\n print('fractions:', *args.fractions, file=d)\n print(\"Neecr={:3d}\".format(args.Neecr), file=d)\n print(\"Nmixed_samples={:5d}\".format(args.n_samples), file=d)\n d.write(\"frac={:7.2f}\\n\".format(frac*100))\n d.write(\"alpha={:6.4f}\\n\".format(alpha))\n d.write(\"------------------------------\\n\")\n\n\n","sub_path":"src/calc_min_fractions.py","file_name":"calc_min_fractions.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289095213","text":"\"\"\"Tests for deCONZ config flow.\"\"\"\nimport pytest\n\nimport voluptuous as vol\n\nimport homeassistant.components.deconz as deconz\nimport pydeconz\n\n\nasync def test_flow_works(hass, aioclient_mock):\n \"\"\"Test config flow.\"\"\"\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, json=[\n {'id': 'id', 'internalipaddress': '1.2.3.4', 'internalport': '80'}\n ])\n aioclient_mock.post('http://1.2.3.4:80/api', json=[\n {\"success\": {\"username\": \"1234567890ABCDEF\"}}\n ])\n\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n await flow.async_step_init()\n result = await flow.async_step_link(user_input={})\n\n assert result['type'] == 'create_entry'\n assert result['title'] == 'deCONZ'\n assert result['data'] == {\n 'bridgeid': 'id',\n 'host': '1.2.3.4',\n 'port': '80',\n 'api_key': '1234567890ABCDEF'\n }\n\n\nasync def test_flow_already_registered_bridge(hass, aioclient_mock):\n \"\"\"Test config flow don't allow more than one bridge to be registered.\"\"\"\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n flow.hass.data[deconz.DOMAIN] = True\n\n result = await flow.async_step_init()\n assert result['type'] == 'abort'\n\n\nasync def test_flow_no_discovered_bridges(hass, aioclient_mock):\n \"\"\"Test config flow discovers no bridges.\"\"\"\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, json=[])\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n\n result = await flow.async_step_init()\n assert result['type'] == 'abort'\n\n\nasync def test_flow_one_bridge_discovered(hass, aioclient_mock):\n \"\"\"Test config flow discovers one bridge.\"\"\"\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, json=[\n {'id': 'id', 'internalipaddress': '1.2.3.4', 'internalport': '80'}\n ])\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n\n result = await flow.async_step_init()\n assert result['type'] == 'form'\n assert result['step_id'] == 'link'\n\n\nasync def test_flow_two_bridges_discovered(hass, aioclient_mock):\n \"\"\"Test config flow discovers two bridges.\"\"\"\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, json=[\n {'id': 'id1', 'internalipaddress': '1.2.3.4', 'internalport': '80'},\n {'id': 'id2', 'internalipaddress': '5.6.7.8', 'internalport': '80'}\n ])\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n\n result = await flow.async_step_init()\n assert result['type'] == 'form'\n assert result['step_id'] == 'init'\n\n with pytest.raises(vol.Invalid):\n assert result['data_schema']({'host': '0.0.0.0'})\n\n result['data_schema']({'host': '1.2.3.4'})\n result['data_schema']({'host': '5.6.7.8'})\n\n\nasync def test_flow_no_api_key(hass, aioclient_mock):\n \"\"\"Test config flow discovers no bridges.\"\"\"\n aioclient_mock.post('http://1.2.3.4:80/api', json=[])\n flow = deconz.DeconzFlowHandler()\n flow.hass = hass\n flow.deconz_config = {'host': '1.2.3.4', 'port': 80}\n\n result = await flow.async_step_link(user_input={})\n assert result['type'] == 'form'\n assert result['step_id'] == 'link'\n assert result['errors'] == {'base': 'no_key'}\n","sub_path":"tests/components/test_deconz.py","file_name":"test_deconz.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"265536906","text":"#!/usr/bin/python\nimport lutin.module as module\nimport lutin.tools as tools\n\ndef get_desc():\n\treturn \"e-svg SVG image parser and generator\"\n\ndef create(target):\n\tmyModule = module.Module(__file__, 'esvg', 'LIBRARY')\n\tmyModule.add_module_depend(['etk', 'agg', 'exml'])\n\tmyModule.add_src_file([\n\t\t'esvg/Base.cpp',\n\t\t'esvg/Circle.cpp',\n\t\t'esvg/debug.cpp',\n\t\t'esvg/Ellipse.cpp',\n\t\t'esvg/Group.cpp',\n\t\t'esvg/Line.cpp',\n\t\t'esvg/esvg.cpp',\n\t\t'esvg/Path.cpp',\n\t\t'esvg/Polygon.cpp',\n\t\t'esvg/Polyline.cpp',\n\t\t'esvg/Rectangle.cpp',\n\t\t'esvg/Renderer.cpp',\n\t\t'esvg/Stroking.cpp',\n\t\t'esvg/Text.cpp'])\n\tmyModule.add_export_path(tools.get_current_path(__file__))\n\t# add the currrent module at the \n\treturn myModule\n\n","sub_path":"lutin_esvg.py","file_name":"lutin_esvg.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211528168","text":"# Copyright (c) 2015 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport uuid\n\nimport ddt\nimport mock\n\nfrom tests.functional.transport.pecan import base\n\n\n@ddt.ddt\nclass SSLCertificateControllerTest(base.FunctionalTest):\n\n def setUp(self):\n super(SSLCertificateControllerTest, self).setUp()\n\n tld_patcher = mock.patch('tld.get_tld')\n tld_patcher.start()\n self.addCleanup(tld_patcher.stop)\n\n dns_resolver_patcher = mock.patch('dns.resolver')\n dns_resolver_patcher.start()\n self.addCleanup(dns_resolver_patcher.stop)\n\n self.project_id = str(uuid.uuid1())\n self.service_name = str(uuid.uuid1())\n self.flavor_id = str(uuid.uuid1())\n\n # create a mock flavor to be used by new service creations\n flavor_json = {\n \"id\": self.flavor_id,\n \"providers\": [\n {\n \"provider\": \"mock\",\n \"links\": [\n {\n \"href\": \"http://mock.cdn\",\n \"rel\": \"provider_url\"\n }\n ]\n }\n ]\n }\n response = self.app.post('/v1.0/flavors',\n params=json.dumps(flavor_json),\n headers={\n \"Content-Type\": \"application/json\",\n \"X-Project-ID\": self.project_id})\n\n self.assertEqual(201, response.status_code)\n\n @ddt.file_data(\"data_create_ssl_certificate.json\")\n def test_create_ssl_certificate(self, ssl_certificate_json):\n\n # override the hardcoded flavor_id in the ddt file with\n # a custom one defined in setUp()\n ssl_certificate_json['flavor_id'] = self.flavor_id\n\n # create with good data\n response = self.app.post('/v1.0/ssl_certificate',\n params=json.dumps(ssl_certificate_json),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n self.assertEqual(202, response.status_code)\n\n def test_get_ssl_certificate_non_existing_domain(self):\n\n # get non existing domain\n domain = 'www.idontexist.com'\n response = self.app.get('/v1.0/ssl_certificate/{0}'.format(domain),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id},\n expect_errors=True)\n self.assertEqual(404, response.status_code)\n\n def test_get_ssl_certificate_existing_domain(self):\n # validators.is_valid_tld = mock.Mock(return_value=True)\n domain = 'www.iexist.com'\n ssl_certificate_json = {\n \"cert_type\": \"san\",\n \"domain_name\": domain,\n \"flavor_id\": self.flavor_id,\n \"project_id\": self.project_id\n }\n response = self.app.post('/v1.0/ssl_certificate',\n params=json.dumps(ssl_certificate_json),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n self.assertEqual(202, response.status_code)\n\n # get existing domain with same project_id\n\n response = self.app.get('/v1.0/ssl_certificate/{0}'.format(domain),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n response_list = json.loads(response.body.decode(\"utf-8\"))\n self.assertEqual(200, response.status_code)\n self.assertEqual(ssl_certificate_json[\"cert_type\"],\n response_list[\"cert_type\"])\n self.assertEqual(ssl_certificate_json[\"domain_name\"],\n response_list[\"domain_name\"])\n self.assertEqual(ssl_certificate_json[\"flavor_id\"],\n response_list[\"flavor_id\"])\n self.assertEqual(ssl_certificate_json[\"project_id\"],\n response_list[\"project_id\"])\n\n def test_get_ssl_certificate_existing_domain_different_project_id(self):\n domain = 'www.iexist.com'\n ssl_certificate_json = {\n \"cert_type\": \"san\",\n \"domain_name\": domain,\n \"flavor_id\": self.flavor_id,\n \"project_id\": self.project_id\n }\n response = self.app.post('/v1.0/ssl_certificate',\n params=json.dumps(ssl_certificate_json),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n self.assertEqual(202, response.status_code)\n\n # get existing domain with different project_id\n response = self.app.get('/v1.0/ssl_certificate/{0}'.format(domain),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': str(uuid.uuid4())})\n self.assertEqual(200, response.status_code)\n\n def test_create_with_invalid_json(self):\n # create with erroneous data: invalid json data\n response = self.app.post('/v1.0/ssl_certificate',\n params=\"{\",\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id},\n expect_errors=True)\n self.assertEqual(400, response.status_code)\n\n @ddt.file_data(\"data_create_ssl_certificate_bad_input_json.json\")\n def test_create_with_bad_input_json(self, ssl_certificate_json):\n # create with erroneous data\n response = self.app.post('/v1.0/ssl_certificate',\n params=json.dumps(ssl_certificate_json),\n headers={'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id},\n expect_errors=True)\n self.assertEqual(400, response.status_code)\n\n @ddt.file_data(\"data_create_ssl_certificate.json\")\n def test_delete_cert(self, ssl_certificate_json):\n # create with good data\n response = self.app.post('/v1.0/ssl_certificate',\n params=json.dumps(ssl_certificate_json),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n self.assertEqual(202, response.status_code)\n\n # delete cert\n response = self.app.delete(\n '/v1.0/ssl_certificate/{0}'.format(\n ssl_certificate_json['domain_name']\n ),\n headers={'X-Project-ID': self.project_id}\n )\n self.assertEqual(202, response.status_code)\n\n def test_delete_cert_non_exist(self):\n # create with erroneous data: invalid json data\n response = self.app.delete('/v1.0/ssl_certificate/blog.non_exist.com',\n headers={'X-Project-ID': self.project_id},\n expect_errors=True)\n self.assertEqual(400, response.status_code)\n\n def tearDown(self):\n super(SSLCertificateControllerTest, self).tearDown()\n","sub_path":"tests/functional/transport/pecan/controllers/test_ssl_certificate.py","file_name":"test_ssl_certificate.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434119469","text":"# -*- coding: utf8 -*-\nimport os.path\nfrom contextlib import contextmanager as _contextmanager\n\nfrom fabric import colors\nfrom fabric.api import *\nfrom fabric.utils import puts\nfrom fabric.contrib.project import rsync_project\nfrom fabric.contrib.files import exists\n\n\n@_contextmanager\ndef virtualenv():\n with cd(env.directory):\n with prefix(env.activate):\n yield\n\n\ndef production():\n \"\"\"Defaults for deploying to a production server.\"\"\"\n env.directory = os.path.join('/', 'home', env.user, 'notifico2')\n env.activate = 'source {path}'.format(\n path=os.path.join(env.directory, 'ENV', 'bin', 'activate')\n )\n env.notifico_dir = os.path.join(env.directory, 'notifico')\n\n\ndef ve_create():\n \"\"\"\n Create a new virtualenv directory on the server.\n \"\"\"\n require('directory', provided_by=('production',))\n run('virtualenv {args} {path}'.format(\n args=' '.join((\n '--clear', # Start from scratch\n '--distribute' # Use distribute instead of setuptools\n )),\n path=os.path.join(env.directory, 'ENV')\n ))\n\n\ndef ve_list():\n \"\"\"\n Get the list of currently installed packages in the remote virtualenv.\n \"\"\"\n with virtualenv():\n run('pip freeze')\n\n\ndef deploy():\n require('directory', provided_by=('production',))\n rsync_project(\n local_dir='./',\n remote_dir=env.notifico_dir,\n exclude=[\n 'ENV',\n '*.pyc',\n '.git',\n '*.egg-info',\n 'testing.db',\n 'local_config.py'\n ]\n )\n with virtualenv():\n with cd(env.notifico_dir):\n run('python setup.py install')\n\n if exists('notifico.pid'):\n run('kill -HUP `cat notifico.pid`')\n else:\n run(' '.join([\n 'gunicorn',\n '-w 4',\n '-b 127.0.0.1:4000',\n '-p notifico.pid',\n '--daemon',\n '\"notifico:start(debug=False)\"'\n ]), pty=False)\n\n # Try to make sure gunicorn has actually started.\n if exists('notifico.pid'):\n with settings(warn_only=True):\n result = run('kill -0 `cat notifico.pid`')\n\n if result.failed:\n puts(colors.red('Gunicorn is not running!'))\n else:\n puts(colors.green('Gunicorn started.'))\n else:\n puts(colors.red('Gunicorn is not running!'))\n\n\ndef deploy_bots():\n require('directory', provided_by=('production',))\n with virtualenv():\n with cd(env.directory):\n run('pip install supervisor')\n # Update the supervisord configuration.\n put('misc/deploy/supervisord.conf', 'supervisord.conf')\n\n if exists('/tmp/supervisord.pid'):\n # Supervisord is already running, so ask it to restart\n # the running bots.\n run('supervisorctl restart bots')\n else:\n # ... otherwise, start the daemon with our config file.\n run('supervisord -c supervisord.conf')\n\n\ndef upgrade_utopia():\n require('directory', provided_by=('production',))\n with virtualenv():\n with cd(env.directory):\n run(\n 'pip install'\n ' https://github.com/TkTech/utopia/tarball/'\n 'master#egg=UtopiaIRC --upgrade'\n )\n\n\ndef css():\n with lcd('notifico/static'):\n local('lessc less/bootstrap.less css/bootstrap.css')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532273414","text":"def main():\n # write a function that return first recurring character in a string\n\n def frist_recurring_character(string):# O(n**2)\n current_index, l = 0, len(string)\n while current_index < l:\n target = string[current_index]\n for char in range(current_index+1, l):\n if target == string[char]:\n return target\n current_index += 1\n return '' \n\n def frist_recurring(string):# O(n)\n chars_counter_dictionary = {}\n # if the char not in dict so add\n # if exist increment his count\n for char in string:\n if char not in chars_counter_dictionary:\n chars_counter_dictionary[char] = 1\n else:\n return char\n return ''\n\n print(frist_recurring_character(\"ABCDEFGHIJKLMNOPQRSTWXYZabcdefghijklmznopqrstwxyz\"))\n print(frist_recurring(\"ABCDEFGHIJKLMNOPQRSTWXYZabcdefghijklmnopqrBstwxyz\"))\n \nif __name__ == \"__main__\":\n main()","sub_path":"interview questions/frist_recurring_character.py","file_name":"frist_recurring_character.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605901512","text":"# -*- coding: utf-8 -*-\n\n######################## CREER LES VARIABLES NECESSAIRES ######################\n\nimport gzip\nimport pandas as pd\nimport numpy as np\n\n\"\"\"\n_ reviewID : un string représentant l'identifiant d'une review, identifié par \nson numéro de ligne dans la base de données\n\n_ reviewerID : un string représentant l'identifiant d'un reviewer\n\n_ productID : un string représentant l'identifiant d'un produit\n\n_ reviewers : {reviewerID : [reviewsID]}\nles identifiants des reviewers et la liste des identifiants de ses revues\n\n_ products : {productID : [reviewsID]}\nles identifiants des produits et la liste des identifiants des revues qu'il a écrit\n\n_ review_author = array de l'auteur de la revue\n\n_ review_product = array du produit de la review\n\n_ time_post = arrays du temps à laquelle la review a été postée (en secondes)\n\n_ notes = array des notes de chaque revue\n\n_ avg_notes : {productID : avg_note}\n\n_ honesty : array contenant les valeurs d'honêteté de chaque review\n\n_ trustiness : {reviewerID : trustiness score}\n\n_ reliability : {productID : reliability score}\n\"\"\"\n\ndef parse(path):\n g = gzip.open(path, 'rb')\n for l in g:\n yield eval(l)\n\ndef getDF(path):\n i = 0\n df = {}\n for d in parse(path):\n df[i] = d\n i += 1\n return pd.DataFrame.from_dict(df, orient='index')\n\ndf = getDF('reviews_Amazon_Instant_Video.json.gz')\n\ndf2 = df.head(100000)\n\n#remplacer les reviewerID par des numéros\ntmp = sorted(set(df2['reviewerID']))\nreviewerID_to_label = dict(zip(tmp,range(len(tmp))))\ndf2['reviewerID'] = df2['reviewerID'].map(reviewerID_to_label)\n\n#remplacer les asin par des numéros\ntmp = sorted(set(df2['asin']))\nasin_to_label = dict(zip(tmp,range(len(tmp))))\ndf2['asin'] = df2['asin'].map(asin_to_label)\n\nproduct_reviews = df2.groupby(['asin']).groups\n\nreviewer_reviews = df2.groupby(['reviewerID']).groups\n\navg_notes = df2.groupby(['asin']).mean()['overall'].to_dict()\n\nreview_author = np.array(df2['reviewerID'])\n\nreview_product = np.array(df2['asin'])\n\ntime_post = np.array(df2['unixReviewTime'])\n\nnotes = np.array(df2['overall'])\n\nreviewsID = np.arange(0,len(df2))\n\nreviewersID = np.arange(0,len(reviewer_reviews))\n\nproductsID = np.arange(0,len(product_reviews))\n \n \n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53714279","text":"import json\nfrom random import shuffle\nfrom operator import attrgetter\nfrom club import _Club\nfrom config import read_entry\n\n\nclass League:\n\n def __init__(self, name):\n self.name = name\n self.club_names = read_entry('clubnames', 'Leagues', name)\n\n self.clubs = list()\n for club in self.club_names:\n self.clubs.append(_Club(club))\n\n self.fixture = self.__load_fixture()\n\n self.next_matchday = self.load_matchday(read_entry('next_matchday', 'Leagues', self.name))\n\n self.all_results = self.load_results()\n\n self.table = self.calc_table()\n\n self.aufsteiger = read_entry(\"aufsteiger\", 'Leagues', name)\n self.absteiger = read_entry(\"absteiger\", 'Leagues', name)\n self.cl_plaetze = read_entry(\"cl\", 'Leagues', name)\n self.cl_quali_plaetze = read_entry(\"clq\", 'Leagues', name)\n self.el_plaetze = read_entry(\"el\", 'Leagues', name)\n\n def __create_fixture(self):\n n = len(self.club_names)\n teams = self.club_names\n matchs = []\n fixtures = []\n for _ in range(1, n):\n for i in range(int(n / 2)):\n matchs.append((teams[i], teams[n - 1 - i]))\n teams.insert(1, teams.pop())\n fixtures.insert(int(len(fixtures) / 2), matchs)\n matchs = []\n shuffle(fixtures)\n newfixtures = list()\n for fixture1 in fixtures:\n newfixture = []\n for match in fixture1:\n newmatch = (match[1], match[0])\n newfixture.append(newmatch)\n newfixtures.append(newfixture)\n for fixture1 in newfixtures:\n fixtures.append(fixture1)\n with open('leagues/' + self.name + '/fixture.dat', 'w') as file:\n json.dump(fixtures, file)\n results = list()\n for fixture in fixtures:\n newfixture = list()\n for match in fixture:\n newmatch = {match[0]: None, match[1]: None}\n newfixture.append(newmatch)\n results.append(newfixture)\n with open('leagues/' + self.name + '/results.dat', 'w') as file:\n json.dump(results, file)\n return fixtures\n\n def __load_fixture(self):\n with open('leagues/' + self.name + '/fixture.dat') as file:\n return json.load(file)\n\n def load_matchday(self, matchday):\n return self.fixture[int(matchday)]\n\n def load_results(self, matchday='all'):\n with open('leagues/' + self.name + '/results.dat') as file:\n if matchday == 'all':\n return json.load(file)\n\n def calc_table(self):\n return Table(self.club_names, self.all_results).calc()\n\n def __repr__(self):\n return 'Object League <' + self.name + '>'\n\n\nclass _TableTeam:\n def __init__(self, name):\n self.name = name\n self.punkte = 0\n self.spiele = 0\n self.siege = 0\n self.unentschieden = 0\n self.niederlagen = 0\n self.tore = 0\n self.gegentore = 0\n self.tordifferenz = 0\n\n def get_difference(self):\n self.tordifferenz = self.tore - self.gegentore\n\n def __repr__(self):\n return '| {:25}'.format(self.name[:25]) + ' | {:1} | '.format(str(self.spiele)) + '{:2} | '.format(str(self.siege)) + '{:2} | '.format(str(self.unentschieden))+ '{:2} | '.format(str(self.niederlagen))+ '{:3} | '.format(str(self.tore))+ '{:3} | '.format(str(self.gegentore))+ '{:4} | '.format(str(self.tordifferenz))+ '{:3} | '.format(str(self.punkte))\n\n\nclass Table:\n\n def __init__(self, names, all_results):\n \"\"\"format: {teamname: [spiele, siege, unentschieden, niederlagen, tore, gegentore, punkte], ...}\"\"\"\n table = {}\n for team in names:\n add = {team: _TableTeam(team)}\n table.update(add)\n for fixture in all_results:\n for match in fixture:\n\n teams = list()\n for team in match:\n teams.append(team)\n\n if match[teams[0]] is None or match[teams[1]] is None:\n continue\n\n table[teams[0]].spiele += 1 # spiele für beide teams += 1\n table[teams[1]].spiele += 1\n\n if match[teams[0]] > match[teams[1]]:\n table[teams[0]].siege += 1 # siege team 1 += 1\n table[teams[1]].niederlagen += 1 # niederlagen team 2 += 1\n table[teams[0]].punkte += 3 # punkte für sieg erhöhen\n\n elif match[teams[0]] < match[teams[1]]:\n table[teams[1]].siege += 1 # siege team 1 += 1\n table[teams[0]].niederlagen += 1 # niederlagen team 2 += 1\n table[teams[1]].punkte += 3 # punkte für sieg erhöhen\n\n elif match[teams[0]] == match[teams[1]]:\n table[teams[0]].unentschieden += 1 # unentschieden beide += 1\n table[teams[1]].unentschieden += 1\n table[teams[0]].punkte += 1 # punkte erhöhen\n table[teams[1]].punkte += 1\n\n table[teams[0]].tore += match[teams[0]] # tore & gegentore für heimteam\n table[teams[0]].gegentore += match[teams[1]]\n\n table[teams[1]].tore += match[teams[1]] # tore & gegentore für auswaertsteam\n table[teams[1]].gegentore += match[teams[0]]\n for team in table:\n table[team].get_difference()\n\n table = sorted(table.values(), key=attrgetter('punkte', 'tordifferenz', 'tore'), reverse=True)\n\n self.table = table\n\n def calc(self):\n return self.table\n","sub_path":"league.py","file_name":"league.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115873955","text":"from typing import List\r\nimport collections\r\nbeginWord = \"hit\"\r\nendWord = \"cog\"\r\nwordList = [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]\r\n# wordList = [\"hot\", \"dot\", \"dog\", \"lot\", \"log\"]\r\n# def d1(a, b):\r\n# return sum(c1 != c2 for c1, c2 in zip(a, b))\r\n\r\n\r\n# for w in wordList:\r\n# print(beginWord, w, d1(beginWord, w))\r\n\r\ndef findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\r\n if endWord not in wordList:\r\n return []\r\n\r\n wordList.append(beginWord)\r\n connect = collections.defaultdict(set)\r\n\r\n for p in range(len(wordList[0])):\r\n d = collections.defaultdict(set)\r\n for i, w in enumerate(wordList):\r\n k = w[0:p]+w[p+1:]\r\n d[k].add(i)\r\n for i, w in enumerate(wordList):\r\n k = w[0:p]+w[p+1:]\r\n connect[i] |= d[k]\r\n\r\n for c in connect:\r\n connect[c].remove(c)\r\n\r\n # for c in connect:\r\n # print(wordList[c])\r\n # for d in connect[c]:\r\n # print(' ', wordList[d])\r\n\r\n begin = wordList.index(beginWord)\r\n end = wordList.index(endWord)\r\n # print(begin, end)\r\n\r\n visited = {begin}\r\n horizon = [[begin]]\r\n result = []\r\n while horizon and not result:\r\n new_horizon = []\r\n for el in horizon:\r\n for c in connect[el[-1]]:\r\n if c not in visited:\r\n new_horizon.append(el + [c])\r\n if c == end:\r\n result.append(el+[c])\r\n for el in new_horizon:\r\n visited.add(el[-1])\r\n\r\n horizon = new_horizon\r\n\r\n return [[wordList[i] for i in r] for r in result]\r\n\r\n\r\nresult = findLadders(0, beginWord, endWord, wordList)\r\nprint(result)\r\n","sub_path":"127WordLadder.py","file_name":"127WordLadder.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417305663","text":"import datetime\nimport es2\nimport os\nimport unittest\n\nclass TestLogParser(unittest.TestCase):\n\n log_files = []\n filter_file_0_expected_res = []\n concat_expected_res = []\n sortl_expected_res = []\n path = \"\"\n valid_exts = []\n\n @classmethod\n def setUpClass(cls):\n\n cls.current_dir = os.path.dirname(os.path.realpath(__file__))\n cls.path = cls.current_dir + \"/logs/testlogs/\"\n cls.valid_exts = [\"txt\", \"log\"]\n\n cls.log_files = [\n cls.current_dir + \"/logs/testlogs/a.txt\",\n cls.current_dir + \"/logs/testlogs/a.log\",\n cls.current_dir + \"/logs/testlogs/nested/a.txt\",\n cls.current_dir + \"/logs/testlogs/nested/a.log\"\n ]\n\n cls.filter_file_0_expected_res = [\n (datetime.datetime(2010, 5, 7, 12, 33, 44), 8, 'Error: a.txt'),\n (datetime.datetime(2010, 5, 7, 12, 33, 45), 8, 'Error: a.txt'),\n (datetime.datetime(2013, 6, 27, 18, 12, 11), 8, 'Error: a.txt')\n ]\n\n cls.concat_expected_res = [\n (datetime.datetime(2010, 5, 7, 12, 33, 44), 8, 'Error: a.txt'),\n (datetime.datetime(2010, 5, 7, 12, 33, 45), 8, 'Error: a.txt'),\n (datetime.datetime(2013, 6, 27, 18, 12, 11), 8, 'Error: a.txt'),\n (datetime.datetime(2008, 10, 11, 19, 21, 22), 8, 'Error: a.log'),\n (datetime.datetime(2009, 1, 10, 11, 0, 1), 8, 'Error: a.log'),\n (datetime.datetime(2012, 8, 15, 13, 12, 11), 8, 'Error: a.log'),\n (datetime.datetime(2015, 5, 7, 12, 33, 44), 8, 'Error: nested/a.txt'),\n (datetime.datetime(2003, 4, 21, 11, 1, 47), 8, 'Error: nested/a.log'),\n (datetime.datetime(2009, 5, 12, 11, 53, 29), 8, 'Error: nested/a.log')\n ]\n\n cls.sortl_expected_res = [\n (datetime.datetime(2015, 5, 7, 12, 33, 44), 8, 'Error: nested/a.txt'),\n (datetime.datetime(2013, 6, 27, 18, 12, 11), 8, 'Error: a.txt'),\n (datetime.datetime(2012, 8, 15, 13, 12, 11), 8, 'Error: a.log'),\n (datetime.datetime(2010, 5, 7, 12, 33, 45), 8, 'Error: a.txt'),\n (datetime.datetime(2010, 5, 7, 12, 33, 44), 8, 'Error: a.txt'),\n (datetime.datetime(2009, 5, 12, 11, 53, 29), 8, 'Error: nested/a.log'),\n (datetime.datetime(2009, 1, 10, 11, 0, 1), 8, 'Error: a.log'),\n (datetime.datetime(2008, 10, 11, 19, 21, 22), 8, 'Error: a.log'),\n (datetime.datetime(2003, 4, 21, 11, 1, 47), 8, 'Error: nested/a.log')\n ]\n\n def test_list_files(self):\n \"\"\"\n Verifica che tutti e solo i file giusti vengano selezionati per essere\n letti\n \"\"\"\n self.assertEqual(es2.list_files(self.path, self.valid_exts),\n self.log_files)\n\n def test_filter_file(self):\n \"\"\"\n Verifica che i file vengano filtrati correttamente\n \"\"\"\n self.assertEqual(es2.filter_file(self.log_files[0]),\n self.filter_file_0_expected_res)\n\n def test_concat_length(self):\n \"\"\"\n Verifica che la lettura e lo split dei file sia corretto\n \"\"\"\n self.assertEqual(len(es2.concat(self.path, self.valid_exts)),\n len(self.concat_expected_res))\n\n def test_concat_res(self):\n \"\"\"\n Verifica che il risultato di concat sia corretto\n \"\"\"\n self.assertEqual(es2.concat(self.path, self.valid_exts),\n self.concat_expected_res)\n\n def test_sortl(self):\n \"\"\"\n Verifica che i log vengano ordinati nel modo giusto\n \"\"\"\n self.assertEqual(es2.sortl(self.concat_expected_res),\n self.sortl_expected_res)\n\n def test_concat_sanity_check(self):\n \"\"\"\n Verifica che ogni tupla generata da concat contenga davvero\n 3 elementi di tipo (datetime, int, str)\n \"\"\"\n tl = es2.concat(self.path, self.valid_exts)\n for t in tl:\n self.assertTrue(len(t) == 3)\n self.assertTrue(isinstance(t[0], datetime.datetime))\n self.assertTrue(isinstance(t[1], int))\n self.assertTrue(isinstance(t[2], basestring))\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"es2/test_es2.py","file_name":"test_es2.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600034393","text":"import os\n\nfrom torch.optim import Adam\n\nimport rainy\nfrom rainy.agents import PPOAgent\nfrom rainy.envs import PyBullet, pybullet_parallel\nfrom rainy.net.policy import SeparateStdGaussianDist\n\n\n@rainy.main(PPOAgent, script_path=os.path.realpath(__file__))\ndef main(envname: str = \"Hopper\") -> rainy.Config:\n c = rainy.Config()\n c.set_env(lambda: PyBullet(envname))\n c.set_net_fn(\n \"actor-critic\", net.actor_critic.fc_shared(policy=SeparateStdGaussianDist)\n )\n c.set_parallel_env(pybullet_parallel(normalize_obs=True, normalize_reward=True))\n c.set_optimizer(lambda params: Adam(params, lr=3.0e-4, eps=1.0e-4))\n c.max_steps = int(2e6)\n c.grad_clip = 0.5\n # ppo params\n c.value_loss_weight = 0.5\n c.entropy_weight = 0.0\n c.gae_lambda = 0.95\n c.nworkers = 4\n c.nsteps = 512\n c.ppo_minibatch_size = (4 * 512) // 8\n c.ppo_clip = 0.2\n c.use_gae = True\n c.eval_freq = None\n return c\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/ppo_hopper.py","file_name":"ppo_hopper.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277310215","text":"HTTP_OK = 200\nHTTP_BAD_REQUEST = 400\nHTTP_UNAUTHORIZED = 401\nHTTP_FORBIDDEN = 403\nHTTP_NOT_FOUND = 404\nHTTP_METHOD_NOT_ALLOWED = 405\nHTTP_INTERNAL_SERVER_ERROR = 500\n\nHTTP_STATUS_CODES = {\n 200: 'OK',\n 400: 'Bad Request',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not allowed',\n 500: 'Internal Server Error'\n}\n\nHTTP_ERROR_CODES = {\n 400: 'Bad Request',\n 401: 'Unauthorized',\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not allowed',\n 500: 'Internal Server Error'\n}\n","sub_path":"ssfr/core/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342808524","text":"\"\"\"Administration form for general Review Board settings.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.exceptions import ValidationError\nfrom django.utils import six\nfrom django.utils.six.moves.urllib.parse import urlparse\nfrom django.utils.translation import (ugettext,\n ugettext_lazy as _)\nfrom djblets.cache.backend_compat import normalize_cache_backend\nfrom djblets.cache.forwarding_backend import DEFAULT_FORWARD_CACHE_ALIAS\nfrom djblets.forms.fields import TimeZoneField\nfrom djblets.siteconfig.forms import SiteSettingsForm\n\ntry:\n # Django >= 1.7\n from django.utils.module_loading import import_string\nexcept ImportError:\n # Django < 1.7\n from django.utils.module_loading import import_by_path as import_string\n\nfrom reviewboard.admin.siteconfig import load_site_config\n\n\nclass GeneralSettingsForm(SiteSettingsForm):\n \"\"\"General settings for Review Board.\"\"\"\n\n CACHE_TYPE_CHOICES = (\n ('memcached', _('Memcached')),\n ('file', _('File cache')),\n )\n\n CACHE_BACKENDS_MAP = {\n 'file': 'django.core.cache.backends.filebased.FileBasedCache',\n 'memcached': 'django.core.cache.backends.memcached.MemcachedCache',\n 'locmem': 'django.core.cache.backends.locmem.LocMemCache',\n }\n\n CACHE_TYPES_MAP = {\n 'django.core.cache.backends.filebased.FileBasedCache': 'file',\n 'django.core.cache.backends.memcached.CacheClass': 'memcached',\n 'django.core.cache.backends.memcached.MemcachedCache': 'memcached',\n 'django.core.cache.backends.locmem.LocMemCache': 'locmem',\n }\n\n CACHE_LOCATION_FIELD_MAP = {\n 'file': 'cache_path',\n 'memcached': 'cache_host',\n }\n\n CACHE_VALIDATION_KEY = '__rb-cache-validation__'\n CACHE_VALIDATION_VALUE = 12345\n\n company = forms.CharField(\n label=_('Company/Organization'),\n help_text=_('The optional name of your company or organization. '\n 'This will be displayed on your support page.'),\n required=False,\n widget=forms.TextInput(attrs={'size': '30'}))\n\n server = forms.CharField(\n label=_('Server'),\n help_text=_('The URL of this Review Board server. This should not '\n 'contain the subdirectory Review Board is installed in.'),\n widget=forms.TextInput(attrs={'size': '30'}))\n\n site_read_only = forms.BooleanField(\n label=_('Enable read-only mode'),\n help_text=_('Prevent non-superusers from making any changes to '\n 'Review Board.'),\n required=False)\n\n read_only_message = forms.CharField(\n label=_('Read-only message'),\n help_text=_('A custom message displayed when the site is in '\n 'read-only mode.'),\n required=False,\n widget=forms.TextInput(attrs={'size': '30'}))\n\n site_media_url = forms.CharField(\n label=_('Media URL'),\n help_text=(_('The URL to the media files. Set to '\n '%smedia/ to use the default media path on '\n 'this server.')\n % settings.SITE_ROOT),\n required=True,\n widget=forms.TextInput(attrs={'size': '30'}))\n\n site_static_url = forms.CharField(\n label=_('Static URL'),\n help_text=(_('The URL to the static files, such as JavaScript files, '\n 'CSS files, and images that are bundled with Review '\n 'Board or third-party extensions. Set to '\n '%sstatic/ to use the default static path '\n 'on this server.')\n % settings.SITE_ROOT),\n required=True,\n widget=forms.TextInput(attrs={'size': '30'}))\n\n site_admin_name = forms.CharField(\n label=_('Administrator Name'),\n required=True,\n widget=forms.TextInput(attrs={'size': '30'}))\n site_admin_email = forms.EmailField(\n label=_('Administrator E-Mail'),\n required=True,\n widget=forms.TextInput(attrs={'size': '30'}))\n\n locale_timezone = TimeZoneField(\n label=_('Time Zone'),\n required=True,\n help_text=_('The time zone used for all dates on this server.'))\n\n cache_type = forms.ChoiceField(\n label=_('Cache Backend'),\n choices=CACHE_TYPE_CHOICES,\n help_text=_('The type of server-side caching to use.'),\n required=True)\n\n cache_path = forms.CharField(\n label=_('Cache Path'),\n help_text=_('The file location for the cache.'),\n required=True,\n widget=forms.TextInput(attrs={'size': '50'}),\n error_messages={\n 'required': 'A valid cache path must be provided.'\n })\n\n cache_host = forms.CharField(\n label=_('Cache Hosts'),\n help_text=_('The host or hosts used for the cache, in hostname:port '\n 'form. Multiple hosts can be specified by separating '\n 'them with a semicolon (;).'),\n required=True,\n widget=forms.TextInput(attrs={'size': '50'}),\n error_messages={\n 'required': 'A valid cache host must be provided.'\n })\n\n def load(self):\n \"\"\"Load settings from the form.\n\n This will populate initial fields based on the site configuration.\n It takes care to transition legacy (<= Review Board 1.7) cache\n backends, if still used in production, to a modern configuration.\n \"\"\"\n domain_method = self.siteconfig.get('site_domain_method')\n site = Site.objects.get_current()\n\n # Load the rest of the settings from the form.\n super(GeneralSettingsForm, self).load()\n\n # Load the cache settings.\n cache_backend_info = self.siteconfig.get('cache_backend')\n cache_backend = (\n normalize_cache_backend(cache_backend_info,\n DEFAULT_FORWARD_CACHE_ALIAS) or\n normalize_cache_backend(cache_backend_info))\n\n cache_type = self.CACHE_TYPES_MAP.get(cache_backend['BACKEND'],\n 'custom')\n self.fields['cache_type'].initial = cache_type\n\n if settings.DEBUG:\n self.fields['cache_type'].choices += (\n ('locmem', ugettext('Local memory cache')),\n )\n\n if cache_type == 'custom':\n self.fields['cache_type'].choices += (\n ('custom', ugettext('Custom')),\n )\n cache_locations = []\n elif cache_type != 'locmem':\n cache_locations = cache_backend['LOCATION']\n\n if not isinstance(cache_locations, list):\n cache_locations = [cache_locations]\n\n location_field = self.CACHE_LOCATION_FIELD_MAP[cache_type]\n self.fields[location_field].initial = ';'.join(cache_locations)\n\n # This must come after we've loaded the general settings.\n self.fields['server'].initial = '%s://%s' % (domain_method,\n site.domain)\n\n def save(self):\n \"\"\"Save the form.\n\n This will write the new configuration to the database. It will then\n force a site configuration reload.\n \"\"\"\n server = self.cleaned_data['server']\n\n if '://' not in server:\n # urlparse doesn't properly handle URLs without a scheme. It\n # believes the domain is actually the path. So we apply a prefix.\n server = 'http://' + server\n\n url_parts = urlparse(server)\n domain_method = url_parts[0]\n domain_name = url_parts[1]\n\n if domain_name.endswith('/'):\n domain_name = domain_name[:-1]\n\n site = Site.objects.get_current()\n\n if site.domain != domain_name:\n site.domain = domain_name\n site.save(update_fields=['domain'])\n\n self.siteconfig.set('site_domain_method', domain_method)\n\n cache_type = self.cleaned_data['cache_type']\n\n if cache_type != 'custom':\n if cache_type == 'locmem':\n # We want to specify a \"reviewboard\" location to keep items\n # separate from those in other caches.\n location = 'reviewboard'\n else:\n location_field = self.CACHE_LOCATION_FIELD_MAP[cache_type]\n location = self.cleaned_data[location_field]\n\n if cache_type == 'memcached':\n # memcached allows a list of servers, rather than just a\n # string representing one.\n location = location.split(';')\n\n self.siteconfig.set('cache_backend', {\n DEFAULT_FORWARD_CACHE_ALIAS: {\n 'BACKEND': self.CACHE_BACKENDS_MAP[cache_type],\n 'LOCATION': location,\n }\n })\n\n super(GeneralSettingsForm, self).save()\n\n # Reload any important changes into the Django settings.\n load_site_config()\n\n def full_clean(self):\n \"\"\"Begin cleaning and validating all form fields.\n\n This is the beginning of the form validation process. Before cleaning\n the fields, this will set the \"required\" states for the caching\n fields, based on the chosen caching type. This will enable or disable\n validation for those particular fields.\n\n Returns:\n dict:\n The cleaned form data.\n \"\"\"\n orig_required = {}\n cache_type = (self['cache_type'].data or\n self.fields['cache_type'].initial)\n\n for iter_cache_type, field in six.iteritems(\n self.CACHE_LOCATION_FIELD_MAP):\n orig_required[field] = self.fields[field].required\n self.fields[field].required = (cache_type == iter_cache_type)\n\n cleaned_data = super(GeneralSettingsForm, self).full_clean()\n\n # Reset the required flags for any modified field.\n for field, required in six.iteritems(orig_required):\n self.fields[field].required = required\n\n return cleaned_data\n\n def clean(self):\n \"\"\"Clean and validate the form fields.\n\n This is called after all individual fields are validated. It does\n the remaining work of checking to make sure the resulting configuration\n is valid.\n\n Returns:\n dict:\n The cleaned form data.\n \"\"\"\n cleaned_data = super(GeneralSettingsForm, self).clean()\n\n if 'cache_type' not in self.errors:\n cache_type = cleaned_data['cache_type']\n cache_location_field = \\\n self.CACHE_LOCATION_FIELD_MAP.get(cache_type)\n\n if cache_location_field not in self.errors:\n cache_backend = None\n\n try:\n cache_cls = import_string(\n self.CACHE_BACKENDS_MAP[cache_type])\n cache_backend = cache_cls(\n cleaned_data.get(cache_location_field),\n {})\n\n cache_backend.set(self.CACHE_VALIDATION_KEY,\n self.CACHE_VALIDATION_VALUE)\n value = cache_backend.get(self.CACHE_VALIDATION_KEY)\n cache_backend.delete(self.CACHE_VALIDATION_KEY)\n\n if value != self.CACHE_VALIDATION_VALUE:\n self.errors[cache_location_field] = self.error_class([\n _('Unable to store and retrieve values from this '\n 'caching backend. There may be a problem '\n 'connecting.')\n ])\n except Exception as e:\n self.errors[cache_location_field] = self.error_class([\n _('Error with this caching configuration: %s')\n % e\n ])\n\n # If the cache backend is open, try closing it. This may fail,\n # so we want to ignore any failures.\n if cache_backend is not None:\n try:\n cache_backend.close()\n except Exception:\n pass\n\n return cleaned_data\n\n def clean_cache_host(self):\n \"\"\"Validate that the cache_host field is provided if required.\n\n If valid, this will strip whitespace around the ``cache_host`` field\n and return it.\n\n Returns:\n unicode:\n The cache host, with whitespace stripped.\n\n Raises:\n django.core.exceptions.ValidationError:\n A cache host was not provided, and is required by the backend.\n \"\"\"\n cache_host = self.cleaned_data['cache_host'].strip()\n\n if self.fields['cache_host'].required and not cache_host:\n raise ValidationError(\n ugettext('A valid cache host must be provided.'))\n\n return cache_host\n\n def clean_cache_path(self):\n \"\"\"Validate that the cache_path field is provided if required.\n\n If valid, this will strip whitespace around the ``cache_path`` field\n and return it.\n\n Returns:\n unicode:\n The cache path, with whitespace stripped.\n\n Raises:\n django.core.exceptions.ValidationError:\n A cache path was not provided, and is required by the backend.\n \"\"\"\n cache_path = self.cleaned_data['cache_path'].strip()\n\n if self.fields['cache_path'].required and not cache_path:\n raise ValidationError(\n ugettext('A valid cache path must be provided.'))\n\n return cache_path\n\n class Meta:\n title = _('General Settings')\n save_blacklist = ('server', 'cache_type', 'cache_host', 'cache_path')\n\n fieldsets = (\n {\n 'title': _('Site Settings'),\n 'classes': ('wide',),\n 'fields': ('company', 'server', 'site_media_url',\n 'site_static_url', 'site_admin_name',\n 'site_admin_email', 'locale_timezone',\n 'site_read_only', 'read_only_message'),\n },\n {\n 'title': _('Cache Settings'),\n 'classes': ('wide',),\n 'fields': ('cache_type', 'cache_path', 'cache_host'),\n },\n )\n","sub_path":"reviewboard/admin/forms/general_settings.py","file_name":"general_settings.py","file_ext":"py","file_size_in_byte":14490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439929857","text":"import pygame\r\nimport gamebox\r\ncamera = gamebox.Camera(800,600)\r\n\r\nplayer = gamebox.from_color(50, 100, \"red\", 20, 40)\r\nobstacle = gamebox.from_color(400, 300, \"yellow\", 600, 20)\r\n\r\n\r\ndef tick(keys):\r\n camera.clear('black')\r\n\r\n # move the player\r\n if pygame.K_RIGHT in keys:\r\n player.speedx += 0.5\r\n if pygame.K_LEFT in keys:\r\n player.speedx -= 0.5\r\n if pygame.K_UP in keys:\r\n player.speedy -= 0.5\r\n if pygame.K_DOWN in keys:\r\n player.speedy += 0.5\r\n\r\n # gravity\r\n player.speedy += 0.25\r\n\r\n player.move_speed()\r\n\r\n\r\n # handle collisions\r\n if player.touches(obstacle):\r\n obstacle.color = 'blue'\r\n player.move_to_stop_overlapping(obstacle)\r\n else:\r\n obstacle.color = 'yellow'\r\n\r\n # draw everything\r\n camera.draw(player)\r\n camera.draw(obstacle)\r\n\r\n # usually camera.display() should be the last line of the tick method\r\n camera.display()\r\n\r\nticks_per_second = 30\r\n\r\n# keep this line the last one in your program\r\ngamebox.timer_loop(ticks_per_second, tick)\r\n\r\n","sub_path":"markdown/files/001/2017-04-07-game4.py","file_name":"2017-04-07-game4.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191532548","text":"import requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars'\n\n#发送请求并获取请求结果\nr = requests.get(url)\n\n#状态码\nprint(\"Status code:\",r.status_code)\n\n#json数据--->字典\nresponse_dict = r.json()\n\nprint(response_dict.keys())\n\n#Python仓库总数\nprint(\"Total repositories:\", response_dict['total_count'])\n\n#探索有关仓库信息items\nrepo_dicts = response_dict['items']\nprint(\"Repositories returned:\", len(repo_dicts))\n\n#获取仓库的名字和星数\nnames ,stars = [], []\nfor repo_dict in repo_dicts:\n\tnames.append(repo_dict['name'])\n\tstars.append(repo_dict['stargazers_count'])\n\n#样式\nmy_style = LS('#333366', base_style=LCS)\n\n#参数设置\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend=False #隐藏图例\n\n\n#直方图\nchart = pygal.Bar(my_config ,style=my_style)\n\nchart.title = 'Most-Starred Python Projects on Github'\nchart.x_labels = names\nchart.add('', stars)\nchart.render_to_file('python_repos.svg')\n","sub_path":"python_work/使用Web API/python_repos.py","file_name":"python_repos.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95034004","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\ndef zad1(text):\n dict = {}\n text = text.lower()\n i = 0\n for character in text:\n if not character in dict:\n dict[character] = 0\n dict[character] +=1\n return dict\n\ndef zad2(path):\n f = open(path, \"r\")\n line_count = 0\n text = \"\"\n for line in f:\n line1=line.strip(\"\\n\")\n text+=line1\n return zad1(text)\n\ndef zad3():\n list2 = [666,4,5,6]\n x = min(list2)\n return x\n\n\n\nprint(zad1(\"Mariusz to bardzo rzadkie imie\"))\nprint(zad2(\"Lab2\\plik.txt\"))\nprint(zad3())\n\n\n","sub_path":"Lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534194723","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom app import app\nfrom apps import app1,app2,app3,app4,app5,app6\napp.title = 'WASH Analytics'\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Div(id='page-content')\n])\n\n@app.callback(Output('page-content', 'children'),\n Input('url', 'pathname'))\ndef display_page(pathname):\n if pathname == '/':\n return app1.layout\n elif pathname == '/country-sanitation':\n return app2.layout\n elif pathname == '/mean-service':\n return app3.layout\n elif pathname == '/median-service':\n return app4.layout\n elif pathname == '/comparison':\n return app5.layout\n elif pathname == '/prediction':\n return app6.layout\n else:\n return '404'\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"WASH_Analytics/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97285280","text":"#!/usr/bin/env python3\nfrom androguard.misc import *\nfrom androguard.session import Session\nfrom keras.models import load_model\nimport numpy as np\nimport csv\nimport sys\nimport tensorflow as tf\nimport httplib2\nimport os\nimport oauth2client\nfrom oauth2client import client, tools\nimport base64\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom apiclient import errors, discovery\nimport mimetypes\nfrom email.mime.image import MIMEImage\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nimport argparse\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nSCOPES = 'https://www.googleapis.com/auth/gmail.send'\nCLIENT_SECRET_FILE = './client_secret.json'\n# CLIENT_SECRET_FILE = './client_secret.json'\nAPPLICATION_NAME = 'Androguide Service'\n\ndef extract_feature(file):\n print(\"testing file \", file)\n CONF[\"SESSION\"] = Session(True) # start a session\n print(\"Start extracting Features...\")\n a, d, dx = AnalyzeAPK(file)\n features = list()\n features += a.get_permissions()\n # handle multidex problem.\n methods = list()\n if type(d) is list:\n print(\"is list\")\n for d_l, dx_l in zip(d, dx):\n method = get_apis(d_l, dx_l)\n methods += method\n else:\n print(\"not list.\")\n methods = get_apis(d, dx)\n features += methods\n return feature_list(features)\n\n\ndef get_apis(d, dx):\n # get the name of classes\n cs = [cc.get_name() for cc in d.get_classes()]\n methods = set()\n # get the method in the classes\n for method in d.get_methods():\n m = dx.get_method(method)\n if method.get_code() is None:\n continue\n\n for i in m.get_basic_blocks().get():\n for ins in i.get_instructions():\n output = ins.get_output()\n match = re.search(r'(L[^;]*;)->[^\\(]*\\([^\\)]*\\).*', output)\n if match and match.group(1) not in cs:\n methods.add(match.group())\n return methods\n\n\ndef feature_list(features):\n # read feature list and convert origin literal feature to vector\n print(\"Start creating feature vector...\")\n with open('../feature/feature_list.csv', 'r') as file:\n r = csv.reader(file)\n f_list = list(r)[0]\n feature_vector = list()\n for f in f_list:\n if f in features:\n feature_vector.append(1)\n else:\n feature_vector.append(0)\n return feature_vector\n\n\ndef model_predict(feature_vector):\n print(\"Start predicting apks...\")\n feature_vector = np.array(feature_vector)\n feature_vector = feature_vector.reshape(1, feature_vector.shape[0])\n model = load_model('../models/nn_no_encoder.h5')\n prediction = model.predict(feature_vector)\n prediction = prediction.argmax(axis=1)\n if prediction[0] == 0:\n print(\"Benign apk!\")\n return \"Benign apk.\"\n else:\n print(\"Malicious apk!\")\n return \"Malicious apk.\"\n\n'''\nmail service\n'''\ndef get_credentials():\n # home_dir = os.path.expanduser('~')\n credential_dir = os.path.join('/var/www', '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-email-send.json')\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef SendMessage(sender, to, subject, msgHtml, msgPlain, attachmentFile=None):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n if attachmentFile:\n message1 = createMessageWithAttachment(sender, to, subject, msgHtml, msgPlain, attachmentFile)\n else: \n message1 = CreateMessageHtml(sender, to, subject, msgHtml, msgPlain)\n result = SendMessageInternal(service, \"me\", message1)\n return result\n\ndef SendMessageInternal(service, user_id, message):\n try:\n for key, value in message.items():\n message[key] = value.decode('utf-8')\n message = (service.users().messages().send(userId=user_id, body=message).execute())\n print('Message Id: %s' % message['id'])\n return message\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n return \"Error\"\n return \"OK\"\n\ndef CreateMessageHtml(sender, to, subject, msgHtml, msgPlain):\n msg = MIMEMultipart('alternative')\n msg['Subject'] = subject\n msg['From'] = sender\n msg['To'] = to\n msg.attach(MIMEText(msgPlain, 'plain'))\n msg.attach(MIMEText(msgHtml, 'html'))\n return {'raw': base64.urlsafe_b64encode(msg.as_string().encode('utf-8'))}\n\ndef createMessageWithAttachment(\n sender, to, subject, msgHtml, msgPlain, attachmentFile):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n msgHtml: Html message to be sent\n msgPlain: Alternative plain text message for older email clients \n attachmentFile: The path to the file to be attached.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEMultipart('mixed')\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n messageA = MIMEMultipart('alternative')\n messageR = MIMEMultipart('related')\n\n messageR.attach(MIMEText(msgHtml, 'html'))\n messageA.attach(MIMEText(msgPlain, 'plain'))\n messageA.attach(messageR)\n\n message.attach(messageA)\n\n print(\"create_message_with_attachment: file:\", attachmentFile)\n content_type, encoding = mimetypes.guess_type(attachmentFile)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(attachmentFile, 'rb')\n msg = MIMEText(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(attachmentFile, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(attachmentFile, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(attachmentFile, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n filename = os.path.basename(attachmentFile)\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(message.as_string())}\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Process')\n parser.add_argument('-a', action=\"store\", dest=\"path\")\n parser.add_argument('-b', action=\"store\", dest=\"mail\")\n parser.add_argument('-c', action=\"store\", dest=\"fname\")\n results = parser.parse_args()\n print(\"argv[1]:\", results.path , \", argv[2]:\", results.mail, \", argv[3]:\", results.fname)\n feature_vector = extract_feature(results.path)\n result = model_predict(feature_vector)\n \"\"\"\n argv pass:\n sys.argv[1]: apk file\n sys.argv[2]: user_email\n sys.argv[3]: file_name\n \"\"\"\n to = results.mail\n file_name = results.fname\n sender = \"androguideservice@gmail.com\"\n subject = \"Androguide Service: App detection result\"\n msgHtml = \"App detection result for \" + file_name + \":\" + result + \"
\" + \"
Androguide Service team.\"\n msgPlain = \"App detection result for\" + file_name + \":\" + result + \"\\n\" + \"\\nAndroguide Service team.\"\n SendMessage(sender, to, subject, msgHtml, msgPlain)\n # Send message with attachment: \n # SendMessage(sender, to, subject, msgHtml, msgPlain, '/path/to/file.pdf')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/check_apk.py","file_name":"check_apk.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128962886","text":"from scipy.interpolate import interp1d\nimport numpy as np\n\n\nclass FillVoids(object):\n\n def __init__(self,\n array=None,\n nodata=None):\n self.array = array\n self.ncol = array.shape[1]\n self.nrows = array.shape[0]\n self.nodata = nodata\n\n @staticmethod\n def find_blocks(arr,\n nodata=None):\n \"\"\"\n Method to find blocks of no data value in a 1-D array\n :param arr: Input 1-D array\n :param nodata: no data value\n :return: List of tuples [(pixel_loc_before_void, pixel_loc_after_void), ]\n \"\"\"\n loc = np.msort(np.where(arr == nodata)[0])\n\n if loc.shape[0] > 0:\n arr_mask = arr.copy()\n arr_mask = arr_mask * 0 + 1\n arr_mask[loc] = 0\n\n arr_jumps = np.hstack([0, arr_mask[1:]-arr_mask[:-1]])\n\n jump_starts = (np.where(arr_jumps == -1)[0] - 1).tolist()\n jump_ends = (np.where(arr_jumps == 1)[0]).tolist()\n\n if len(jump_starts) != len(jump_ends):\n jump_ends.append(arr.shape[0] - 1)\n\n if jump_starts[0] == -1:\n jump_starts[0] = 0\n\n return zip(jump_starts, jump_ends)\n\n @staticmethod\n def fill_voids(arr,\n blocks=None):\n \"\"\"\n Method to fill voids in a 1-D array\n :param arr: 1-D array\n :param blocks: List of tuples of block locations, output from find_blocks()\n :return: 1-D array\n \"\"\"\n out_arr = arr.copy()\n\n if blocks is not None and len(blocks) > 0:\n for block in blocks:\n y = out_arr[list(block)]\n f = interp1d(block, y)\n out_arr[np.arange(*block)] = f(np.arange(*block))\n\n return out_arr\n\n @staticmethod\n def fill_voids_1d(arr,\n nodata):\n \"\"\"\n Method to fill voids in 1D array by\n filling voids in 1d arrays\n :param arr: 1-D array\n :param nodata: No data value\n :return: 1-D array\n \"\"\"\n void_blocks = FillVoids.find_blocks(arr,\n nodata)\n return FillVoids.fill_voids(arr,\n void_blocks)\n\n def fill(self):\n \"\"\"\n Method to fill voids in 2D array by\n Filling voids in 1D array along x axis\n Filling voids in 1D array along y axis\n and taking the mean of two 2D arrays\n :return: 2D array\n \"\"\"\n xfilled_arr = np.apply_along_axis(self.fill_voids_1d,\n 1,\n self.array,\n self.nodata)\n\n yfilled_arr = np.apply_along_axis(self.fill_voids_1d,\n 0,\n self.array,\n self.nodata)\n\n return (xfilled_arr + yfilled_arr)/2.0\n\n\nclass FillEdges(object):\n\n def __init__(self):\n pass\n\n\n\n","sub_path":"delta.py","file_name":"delta.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70113543","text":"\"\"\"\nsub modules for everything about the policies\n\"\"\"\nfrom enum import Enum\nfrom uuid import uuid4\n\nfrom typing import Iterable, Mapping, Union, Tuple, Optional\n\nfrom nessus.base import LibNessusBase\nfrom nessus.editor import NessusTemplate\nfrom nessus.file import NessusRemoteFile\nfrom nessus.model import lying_exist, Object\n\n\nclass NessusPolicyVisibility(Enum):\n \"\"\"\n should be int value but nessus is lying\n \"\"\"\n private = 'private'\n shared = 'shared'\n\n\nclass NessusPolicy(Object):\n \"\"\"\n nessus is lying with:\n - `visibility` which is not always there and which is an int\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # pylint: disable=too-few-public-methods\n\n # pylint: disable=too-many-arguments\n def __init__(self, policy_id: int, template_uuid: str, name: str, description: str, owner_id: str, owner: str,\n shared: int, user_permissions: int, creation_date: int, last_modification_date: int,\n visibility: NessusPolicyVisibility, no_target: bool) -> None:\n self.id = policy_id # pylint: disable= invalid-name\n self.template_uuid = template_uuid\n self.name = name\n self.description = description\n self.owner_id = owner_id\n self.owner = owner\n self.shared = shared\n self.user_permissions = user_permissions\n self.creation_date = creation_date\n self.last_modification_date = last_modification_date\n self.visibility = visibility\n self.no_target = no_target\n\n def __eq__(self, other):\n return isinstance(other, NessusPolicy) and self.id == other.id\n\n def __hash__(self):\n return hash(self.id)\n\n # pylint: disable=too-many-arguments\n @staticmethod\n def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusPolicy':\n \"\"\"\n generate a NessusPolicy by parsing the given json\n :param json_dict: json encoded NessusPolicy\n :return: extracted NessusPolicy\n \"\"\"\n policy_id = int(json_dict['id'])\n template_uuid = str(json_dict['template_uuid'])\n name = str(json_dict['name'])\n description = str(json_dict['description'])\n owner_id = str(json_dict['owner_id'])\n owner = str(json_dict['owner'])\n shared = int(json_dict['shared'])\n user_permissions = int(json_dict['user_permissions'])\n creation_date = int(json_dict['creation_date'])\n last_modification_date = int(json_dict['last_modification_date'])\n visibility = lying_exist(json_dict, 'visibility', NessusPolicyVisibility, None)\n no_target = bool(json_dict['no_target'])\n\n return NessusPolicy(policy_id, template_uuid, name, description, owner_id, owner, shared, user_permissions,\n creation_date, last_modification_date, visibility, no_target)\n\n\nclass LibNessusPolicies(LibNessusBase):\n \"\"\"\n modules handling /policies\n \"\"\"\n\n def list(self) -> Iterable[NessusPolicy]:\n \"\"\"\n Returns the policy list.\n :return: iterable of available policy\n \"\"\"\n ans = self._get('policies')\n return {NessusPolicy.from_json(policy) for policy in ans.json()['policies']}\n\n def delete(self, policy: NessusPolicy) -> None:\n \"\"\"\n Delete a policy.\n :param policy: one to delete\n \"\"\"\n url = 'policies/{}'.format(policy.id)\n self._delete(url)\n\n # pylint: disable=bad-whitespace\n def create(self, template: NessusTemplate, name: Optional[str] = None) -> Tuple[int, str]:\n \"\"\"\n Creates a policy.\n :param template: what to create\n :param name: name of the policy\n :return: (policy_id, policy_name)\n \"\"\"\n if name is None:\n name = str(uuid4())\n\n json = {\n 'uuid': template.uuid,\n 'settings': {\n 'name': name\n },\n 'audits': {},\n }\n ans = self._post('policies', json=json)\n return ans.json()['policy_id'], ans.json()['policy_name']\n\n def import_(self, remote_file: NessusRemoteFile) -> NessusPolicy:\n \"\"\"\n Import an existing policy uploaded using Nessus.file (.nessus format only).\n sorry about the name, but in python 'import' is a reserved keyword\n :param remote_file: file to treat as nessus policy\n \"\"\"\n json = {'file': remote_file.name}\n ans = self._post('policies/import', json=json)\n return NessusPolicy.from_json(ans.json())\n","sub_path":"nessus/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298571357","text":"from django.db import models\nfrom django.utils import timezone\n\nclass Post(models.Model):\n# Post é o nome do nosso modelo\n#significa que o Post é um modelo de Django, então o Django\n#sabe ele que deve ser salvo no banco de dados\n\n# models.CharField - é assim que definimos um texto com um número limitado de caracteres.\n# models.TextField - este campo é para textos sem um limite fixo. Parece ideal para o conteúdo de um blog, né?\n# models.DateTimeField - este é uma data e hora.\n# models.ForeignKey - este é um link para outro modelo\n author = models.ForeignKey('auth.User', on_delete=models.CASCADE)\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_date = models.DateTimeField(default=timezone.now())\n published_date = models.DateTimeField(blank= True, null = True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):#dunder (double underscore)\n return self.title\n","sub_path":"djangogirls-master/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11964012","text":"from TGText import *\nfrom TGClasses import *\nimport random\n\ndef cls():\n for i in range(50):\n print (\"\")\n\n# **************** INTRODUCTION ***************************\n\ndef Introduction():\n cls()\n #the first thing that a new player does\n print (feedback[\"Introduction1\"])\n player = CreatePlayer()\n player.sayName()\n return player\n \ndef CreatePlayer():\n #allows player to specify their name and select a class\n playername = str(input(feedback[\"GetPlayerName\"]))\n while True:\n playerclass = str(input(feedback[\"GetPlayerClass\"] % playername))\n if playerclass.isdigit():\n playerclass = int(playerclass)\n if playerclass in range(1,3):\n break\n else:\n print(feedback[\"SelectError\"])\n else:\n print (feedback[\"SelectError\"])\n if playerclass == 1:\n player = Knight(playername)\n return player\n elif playerclass == 2:\n player = Brigand(playername)\n return player\n \n\n# ****************** FACTORY *******************\n\ndef CreateUnit(klass, number):\n #creates a number amount of Knights/Brigands/Zombies\n #try to consolidate the various IF statements into a dictionary\n print (feedback[\"CreateUnitMsg\"])\n newUnitNames = []\n newUnits = []\n for i in range(number): \n newUnitNames.append(str(klass) + str(i))\n for j in newUnitNames:\n if klass == \"Knight\":\n j = Knight(j)\n elif klass == \"Brigand\":\n j = Brigand(j)\n elif klass == \"Zombie\":\n j = Zombie(j) \n print (j.name)\n newUnits.append(j)\n return newUnits\n\n\n \n\"\"\"\n \n \n print (feedback[\"InteractionStart\"])\n print (feedback[\"InteractionWorld1\"])\n print (feedback[\"InteractionWorld2\"])\n \n while True:\n choice = str(input(\"Selection: \"))\n if choice.isdigit():\n choice = int(choice)\n if choice in range(1,3):\n break\n else:\n print(feedback[\"SelectError\"])\n else:\n print(feedback[\"SelectError\"])\n if choice == 1:\n Arena(player)\n if choice == 2:\n print (feedback[\"InteractionTroll\"])\n World(player)\n\"\"\"\n \n# ***************** COMBAT/ARENA ******************\n\ndef Arena(player):\n cls()\n print (feedback[\"ArenaWelcome1\"])\n print (feedback[\"ArenaWelcome2\"])\n ArenaPickType = random.choice(KlassText[\"KlassList\"])\n ArenaPickNumber = random.randint(1,5)\n ArenaMobs = CreateUnit(ArenaPickType, ArenaPickNumber)\n CombatInteraction(player, ArenaPickNumber, ArenaMobs)\n\ndef CombatInteraction(player, ArenaPickNumber, ArenaMobs):\n print (feedback[\"InteractionStart\"])\n print (feedback[\"InteractionAttack\"])\n print (feedback[\"InteractionRetreat\"])\n while True:\n choice = str(input(\"Selection: \")) \n if choice.isdigit():\n choice = int(choice)\n if choice in range(1,3):\n break\n else:\n print(feedback[\"SelectError\"])\n else:\n print (feedback[\"SelectError\"])\n if choice == 1:\n CombatBattle(player, ArenaPickNumber, ArenaMobs)\n if choice == 2:\n World(player)\n \ndef CombatBattle(player, ArenaPickNumber, ArenaMobs):\n EnemiesAlive = True\n while EnemiesAlive == True:\n isAliveCounter = 0\n for i in ArenaMobs:\n if i.Alive == False:\n isAliveCounter += 1\n if isAliveCounter == len(ArenaMobs):\n break\n while True:\n choice = str(input(feedback[\"CombatPickText\"]))\n if choice.isdigit():\n choice = int(choice)\n if choice in range(0, ArenaPickNumber):\n break\n else:\n print(feedback[\"SelectError\"])\n else:\n print (feedback[\"SelectError\"])\n player.CombatAttack(ArenaMobs[choice])\n if ArenaMobs[choice].HP <= 0:\n print (ArenaMobs[choice].name, feedback[\"TargetDied\"])\n \n roll = random.randint(0,10)\n if roll > 5:\n attackmob = random.choice(ArenaMobs)\n attackmob.CombatAttack(player)\n \n print (feedback[\"CombatWin\"])\n print (feedback[\"EnteringWorld\"]) \n \n \n World(player)\n\ndef CombatInteraction(player, ArenaPickNumber, ArenaMobs):\n print (feedback[\"InteractionStart\"])\n print (feedback[\"InteractionAttack\"])\n print (feedback[\"InteractionRetreat\"])\n while True:\n choice = str(input(\"Selection: \")) \n if choice.isdigit():\n choice = int(choice)\n if choice in range(1,3):\n break\n else:\n print(feedback[\"SelectError\"])\n else:\n print (feedback[\"SelectError\"])\n if choice == 1:\n CombatBattle(player, ArenaPickNumber, ArenaMobs)\n if choice == 2:\n World(player)\n \n \n# **************** MAP ***************************\n\ndef drawmap(size, x, y):\n \n cls()\n print (feedback[\"InteractionWorldWelcome\"])\n \n #create the board by nested lists\n Board = []\n for row in range(0, size):\n Board.append([])\n for column in range(0, size):\n Board[row].append(random.choice(feedback[\"map\"]))\n \n \n #set an @ symbol at x,y location within Board to represent player\n \n Board[4][4] = \"X\"\n Board[y][x] = \"@\"\n \n #print the board\n for i in Board:\n print (\" \".join(i))\n \n return Board\n\ndef interactmap(size, x, y, player):\n \n while True:\n \n Board = drawmap(size, x, y)\n \n #practice for configuring events based on the map\n if Board[4][4] == \"@\":\n Arena(player)\n\n \n \n else:\n choice = input(\"Direction: \")\n if choice == \"down\" or choice == \"s\":\n y += 1\n elif choice == \"up\" or choice == \"w\":\n y -= 1 \n elif choice == \"left\" or choice == \"a\":\n x -= 1 \n elif choice == \"right\" or choice == \"d\":\n x += 1\n else:\n print (\"error in selection\")\n\n\ndef World(player):\n interactmap(10,0,0, player)\n\n","sub_path":"PythonExperiments/Text Game/TGFunctions.py","file_name":"TGFunctions.py","file_ext":"py","file_size_in_byte":6267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110321281","text":"# This is an exploratory test of Geocodio API services. API key: b42752c85bb22c5b5e924be26276bb246257c25\n# The website can be found here:https://www.geocod.io/\n\nimport pandas as pd\nfrom geocodio import GeocodioClient\nfrom Address_Dictionary import address_dictionary_1 #,address_dictionary_2\nfrom os import walk\nfrom pathlib import Path\n\ndef clean_data(df):\n \n # setup dataframe and geocodio client\n temp = df.copy()\n client = GeocodioClient(\"b42752c85bb22c5b5e924be26276bb246257c25\")\n \n # add additional columns\n temp['Cleaned_Location'] = temp['Location']\n temp['Coordinates'] = ''\n temp['Error_Logging'] = ''\n \n # retrieve all addresses previously geocoded\n coordinate_df = pd.read_csv('Coordinate_Dictionary.csv')\n \n for i, row in temp.iterrows():\n \n # use address dictionary for coordinates if location exists in address dictionary\n location = temp.loc[i,'Location']\n if location in coordinate_df['Location'].unique():\n temp.loc[i,'Cleaned_Location'] = coordinate_df.loc[coordinate_df['Location'] == location, 'Cleaned_Location'].iloc[0]\n temp.loc[i,'Coordinates'] = coordinate_df.loc[coordinate_df['Location'] == location,'Coordinates'].iloc[0]\n continue\n \n # add milwaukee, WI to address if not already present\n if 'MKE' in temp.loc[i,'Cleaned_Location']:\n temp.loc[i,'Cleaned_Location'] = temp.loc[i,'Cleaned_Location'].replace('MKE',' MILWAUKEE, WI')\n else:\n temp.loc[i,'Cleaned_Location'] = temp.loc[i,'Cleaned_Location']+ ', MILWAUKEE, WI'\n\n # clean addresses of common abbreviations and typos\n temp.loc[i,'Cleaned_Location'] = address_dictionary_1(temp.loc[i,'Cleaned_Location'])\n\n # get and record coordinates of given address\n try:\n geocoded_location = client.geocode(temp.loc[i,'Cleaned_Location'])\n # catch error when our api key has run out of calls\n except:\n print('No calls remaining...')\n # save all geocoded addresses\n temp = temp.loc[0:i-1,:]\n coordinate_df.to_csv('Coordinate_Dictionary.csv',index=False,mode='w')\n return temp\n \n # check whether data exists (works perfectly fine, but can be improved)\n if len(geocoded_location['results']) > 0:\n \n coordinates = str(geocoded_location['results'][0]['location'])\n \n # add new coordinates to coordinate dictionary\n coordinate_entry = pd.DataFrame({'Location':[temp.loc[i,'Location']],\n 'Cleaned_Location':[temp.loc[i,'Cleaned_Location']],\n 'Coordinates':[coordinates]\n })\n coordinate_df = coordinate_df.append(coordinate_entry, ignore_index=True)\n # log errors\n else: \n coordinates = ''\n temp.loc[i,'Error_Logging'] = str(geocoded_location)\n error = pd.DataFrame({'location':[temp.loc[i,'Location']],\n 'cleaned_location':[temp.loc[i,'Cleaned_Location']],\n 'geocoding_result':[temp.loc[i,'Error_Logging']]})\n error.to_csv('../geocoding_data/Error_Logging.csv', mode='a', header=False)\n\n temp.loc[i,'Coordinates'] = coordinates\n \n coordinate_df.to_csv('Coordinate_Dictionary.csv',index=False,mode='w')\n return temp\n\ndata_path = '../data/'\ngeocoding_data_path = '../geocoding_data/'\n\nf = []\nfor (dir_path, dir_names, file_names) in walk(data_path):\n f.extend(file_names)\n break\nif 'readme.txt' in file_names:\n file_names.remove('readme.txt')\n\nfor file_name in file_names:\n \n data_df = pd.read_csv(data_path+file_name)\n length_of_data_df = data_df.shape[0]\n \n file = Path(geocoding_data_path+file_name)\n \n # check whether a geocoding data file has already been generated\n if file.is_file():\n geocoding_data_df = pd.read_csv(geocoding_data_path+file_name)\n length_of_geocoding_data_df = geocoding_data_df.shape[0]\n remaining_instances = length_of_data_df - length_of_geocoding_data_df\n \n # geocoding file already exists and only some addresses have been converted to coordinates\n if remaining_instances > 0:\n result = clean_data(data_df.tail(remaining_instances))\n result.to_csv(geocoding_data_path+file_name,mode='a',header=False)\n \n # geocoding file already exists and all addresses have been converted to coordinates\n else:\n print(file_name)\n continue\n \n # geocoding file does not exist\n else:\n result = clean_data(data_df)\n result.to_csv(geocoding_data_path+file_name)\n \n print(file_name)\n \n # if no calls are remaining, then break out of loop\n geocoding_data_df = pd.read_csv(geocoding_data_path+file_name)\n length_of_geocoding_data_df = geocoding_data_df.shape[0]\n if length_of_data_df > length_of_geocoding_data_df:\n break\n \n\n","sub_path":"geocoding/Geocode_Addresses.py","file_name":"Geocode_Addresses.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"68257606","text":"from collections import deque\n\n\nQ = deque()\nfor i in range(5):\n Q.append(i)\n\nwhile len(Q) > 0:\n print(Q.popleft())\n\n\n\ntest = [() for _ in range(5)]\ntest[0] = (0, 0)\ntest[1] = (1, 1)\na = (3, 3)\ntest[3] = a\nprint(test)","sub_path":"개념/06 Queue/01.Queue_deque.py","file_name":"01.Queue_deque.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315226824","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\n\nfrom math import ceil\n\nregister = template.Library()\n\n@register.filter\n@stringfilter\ndef to_rgb(value):\n cmyk_arr = value.split(',')\n cmyk_arr = list(map(int, cmyk_arr))\n\n C = cmyk_arr[0]/100\n M = cmyk_arr[1]/100\n Y = cmyk_arr[2]/100\n K = cmyk_arr[3]/100\n\n r = ceil(255 * (1 - C) * (1 - K))\n g = ceil(255 * (1 - M) * (1 - K))\n b = ceil(255 * (1 - Y) * (1 - K))\n\n return ','.join(map(str, [r, g, b]))\n","sub_path":"color_in/templatetags/cmyk_to_rgb.py","file_name":"cmyk_to_rgb.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507362618","text":"# pandas_scatter.py\n# pandas로 산점도 그리기\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib\n\n# 한글 글꼴 설정 - '맑은 고딕'으로 설정\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic'\n\ntemperature = [25.2, 27.4, 22.9, 26.2, 29.5\n , 33.1, 30.4, 36.1, 34.4, 29.1]\nIce_cream_sales = [236500, 357500, 203500, 365200, 446600\n , 574200, 453200, 675400, 598400, 463100]\n\ndict_data = {'기온' : temperature\n , '아이스크림 판매량' : Ice_cream_sales}\n\n# 데이터 프레임 생성\ndf = pd.DataFrame(dict_data)\nprint(df)\n\ndf_ice_cream = pd.DataFrame(dict_data\n , columns=['기온', '아이스크림 판매량'])\nprint(df_ice_cream)\n\n# 산점도 그래프 그리기\ndf_ice_cream.plot.scatter(x='기온', y='아이스크림 판매량'\n , grid = True\n , title='최고 기온과 아이스크림 판매량')\nplt.show()","sub_path":"20200316/2020_03_16/pandas_scatter.py","file_name":"pandas_scatter.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261628006","text":"\nfrom NetworkTopology import *\nfrom flask import Flask, request, jsonify\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef get():\n return jsonify({\"msg\": \"Invalid request: use POST!\"})\n\n\n@app.route('/', methods=['POST'])\ndef process_input():\n try:\n inpoo = request.get_json()\n validate_input(inpoo)\n\n edges_raw = inpoo[\"inList\"]\n edges = [(e[0], e[1]) for e in edges_raw]\n adj = create_adj(edges)\n topology_type = determine_topology(adj)\n return jsonify({'type': topology_type})\n\n except Exception as e:\n return jsonify({\"msg\": \"an error occured\"})\n\n\n@app.errorhandler(500)\ndef server_error(e):\n return jsonify({\"msg\": \"A server error occurred. Please try again later.\"})\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n","sub_path":"NetworkTopology/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496320817","text":"from datetime import datetime\nfrom app import app, db\nfrom app.core.models import Role, Games\nfrom datetime import datetime\n\n\ndef create_games():\n pass\n \"\"\" Create games when app starts \"\"\"\n from app.core.models import Games, Role\n\n # Create all tables\n # db.create_all()\n\n # Adding roles\n\n # Add users\n # game = find_or_create_game(\"Example Game\")\n # game = find_or_create_game(\"Metroid\")\n\n\n # Save to DB\n # db.session.commit()\n\n\n\ndef find_or_create_game(id, name, detail_url, date_added, date_last_updated, deck, image_url):\n \"\"\" Find existing user or create new user \"\"\"\n game = Games.query.filter(Games.id == id).first()\n if not game:\n game = Games(id=id,\n name=name,\n detail_url=detail_url,\n date_added=date_added,\n date_last_updated=date_last_updated,\n deck=deck,\n image_url=image_url)\n db.session.add(game)\n return game\n\ndef create_or_update_game(id, name, api_detail_url, date_added, date_last_updated, deck, description):\n game = Games.query.filter(Games.id == id).first()\n if not game:\n game = Games(id=id,\n name=name,\n api_detail_url=api_detail_url,\n date_added=date_added,\n date_last_updated=date_last_updated,\n deck=deck,\n description=description)\n db.session.add(game)\n else:\n game.name = name\n game.api_detail_url = api_detail_url\n game.date_added=date_added,\n game.date_last_updated=date_last_updated,\n game.deck=deck,\n game.description=description\n\n db.session.commit()\n\n\ndef find_game(id):\n game = Games.query.filter(Games.id == id).first()\n if not game:\n game = None\n return game\n","sub_path":"app/startup/create_games.py","file_name":"create_games.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616728313","text":"#!/usr/bin/env python\n\nimport roslib\nimport rospy\nimport sys\nimport math\nimport pigpio\n\nfrom std_srvs.srv import *\n\n\nclass SpiderServiceNode:\n \n def __init__(self):\n self.pin_vakuum = rospy.get_param('~pin_vakuum', 15)\n self.pin_power = rospy.get_param('~pin_vakuum', 21)\n self.debug_mode = rospy.get_param('~debug_mode', False)\n\n # LED = 22\n # beep = 6\n\n self.v_service = rospy.Service('trigger_vakuum', SetBool, self.vakuum_service)\n self.p_service = rospy.Service('trigger_power', SetBool, self.power_service) \n\n try:\n if not self.debug_mode:\n self.pi = pigpio.pi()\n self.pi.set_mode(self.pin_vakuum, pigpio.OUTPUT)\n except:\n rospy.logerr(\"SpiderServicesNode::Error: unable to set mode of pin!\")\n return\n\n while not rospy.is_shutdown():\n rospy.spin()\n\n def buzzer(self):\n pi.set_PWM_dutycycle(beep, 5)\n time.sleep(0.5)\n pi.write(beep, 0)\n time.sleep(0.5)\n\n def vakuum_service(self, req):\n resp = SetBoolResponse()\n resp.success = True\n\n try:\n if req.data:\n if not self.debug_mode:\n self.pi.write(self.pin_vakuum, 1)\n resp.message = \"Switched Vakuum ON\"\n else:\n if not self.debug_mode:\n self.pi.write(self.pin_vakuum, 0)\n resp.message = \"Switched Vakuum OFF\"\n except:\n rospy.logerr(\"SpiderServiceNode::Error: unable to read vakuum pin!\")\n resp.message = \"Error: unable to read vakuum pin!\"\n resp.success = False\n\n return resp\n\n def power_service(self, req):\n resp = SetBoolResponse()\n resp.success = True\n\n try:\n if req.data:\n if not self.debug_mode:\n self.pi.write(self.pin_power, 1)\n resp.message = \"Switched Power ON\"\n else:\n if not self.debug_mode:\n self.pi.write(self.pin_power, 0)\n resp.message = \"Switched Power OFF\"\n except:\n rospy.logerr(\"SpiderServiceNode::Error: unable to read power pin!\")\n resp.message = \"Error: unable to read power pin!\"\n resp.success = False\n\n return resp\n\nif __name__ == '__main__':\n rospy.init_node('spider_service_node')\n try:\n ne = SpiderServiceNode()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"spider/src/services_node.py","file_name":"services_node.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66905576","text":"from sys import stdin, stdout\r\nfrom itertools import chain\r\nfrom collections import defaultdict\r\n\r\ndef solve(elements):\r\n counts = defaultdict(int)\r\n\r\n for h in elements:\r\n counts[h] += 1\r\n\r\n missing = []\r\n for h, cnt in counts.items():\r\n if cnt % 2 == 1:\r\n missing.append(h)\r\n\r\n missing.sort()\r\n\r\n return \" \".join(map(str, missing))\r\n\r\n\r\n\r\nT = int(stdin.readline())\r\n\r\nfor t in range(T):\r\n N = int(stdin.readline())\r\n\r\n elements = []\r\n for _ in range(2 * N - 1):\r\n elements.extend(map(int, stdin.readline().strip().split()))\r\n\r\n result = solve(elements)\r\n\r\n stdout.write(\"Case #%d: %s\\n\"%(t+1, result))","sub_path":"solutions_5630113748090880_0/Python/aphi/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"67163890","text":"import numpy as np\n\ndef split_sequence(sequence, n_steps):\n x, y = list(), list() \n for i in range(len(sequence)):\n end_ix = i + n_steps\n if end_ix > len(sequence) - 1:\n break\n \n seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]\n x.append(seq_x)\n y.append(seq_y)\n \n return np.array(x), np.array(y)\n\nif __name__=='__main__':\n print(split_sequence('something', 5))\n\n dataset = np.arange(10, 101, 10)\n print(dataset)\n\n x, y = split_sequence(dataset, 3)\n\n for i in range(len(x)):\n print(x[i], y[i])\n \n\n","sub_path":"keras22_univeriate2.py","file_name":"keras22_univeriate2.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525506769","text":"import random\ndef game_info():\n print('欢迎来到数字推理游戏')\n print('系统会随机抽取三位不相同的数字')\n print('根据系统提示来推理出这3位数字')\n\ndef Double_choice(a,b,hint):\n '''双选择验证函数\n :param a 第一个选项值\n :param b 第二个选项值\n :param hint 选项信息\n :return 返回输入值'''\n choice=''\n while choice.lower() !=a and choice.lower() !=b:\n print(hint)\n choice=input()\n return choice\ndef number_limited(number):\n '''用户输入限制\n :param number 用户输入数字\n :return 返回用户输入数字'''\n #用户限制思路 检查必须是整数,限制3位数字,检查数字不能重复\n while True:\n if not number.isdigit():\n print('请输入整数数字')\n elif len(number)!=3:\n print('请输入三位数字')\n elif len(set(list(number)))!=3:\n print('三个数字不能重复')\n else:\n break\n number=input()\n return number\ndef random_number_list(count):\n '''随机抽取数字\n :param count 抽取数字的位数 最大不能超过10\n :return 返回随机抽取的3位不重复数字'''\n number_list=[]\n random_list=list(range(10))\n random.shuffle(random_list)#打乱顺序\n for i in range(count):\n random_number=random.choice(random_list)\n random_list.remove(random_number)\n number_list.append(str(random_number))\n return number_list\ndef judge_prompt(user_list,number_list):\n '''数字判断提示\n :param user_list 用户数字列表\n :param number_list 随机数字列表\n :return True 猜对了 False猜错了'''\n #判断思路,先判断都相等,有几个数字是数字和位置对的,没有的话查找数字对的,在没有的话就是都猜错了\n pico=0 #数字对了位置不对\n fermi=0 #位置数字对了\n if user_list==number_list:\n return True\n for i in range(len(user_list)):\n if user_list[i]==number_list[i]:\n fermi+=1\n elif user_list[i] in number_list:\n pico+=1\n if fermi:\n print('猜中了数字和位置(%d个)'%fermi)\n if pico:\n print('猜中了数字没有猜中位置(%d个)' % pico)\n if not fermi and not pico:\n print('没有一个数字和位置是对的')\n return False\ndef game_start():\n '''游戏判断核心'''\n number_list = random_number_list(count=3)\n count=9\n while count:\n print('猜猜看(%d次机会)'%count)\n user_number=number_limited(input())\n if judge_prompt(list(user_number),number_list):\n break\n count-=1\n if count==0:\n print('你输了,这个数字是%s'%''.join(number_list))\n else:\n print('玩家获胜,这个数字是%s'%''.join(number_list))\n\ndef game_shell():\n '''外壳程序'''\n game_info() # 游戏开始提示\n game_start()\n while True:\n message='你想在玩一次吗(Y or N)'\n again_flag=Double_choice('y','n',message)\n if again_flag=='n':\n break\n game_start()\n\ngame_shell()","sub_path":"pythonFile/newRp/pythonFile/pygame/Bagels.py","file_name":"Bagels.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364749985","text":"import re\n\nfrom time import strptime\nfrom calendar import timegm\nfrom util import hook, http, timesince\n\n@hook.regex(r'liveleak.com/view\\?(?:.+&)?i=([a-z0-9]{3}_[0-9]{10})')\ndef liveleak_url(match):\n\ttry:\n\t\tdoc = http.get_html('http://mobile.liveleak.com/view?ajax=1&p=1&i=%s' % match.group(1))\n\texcept HTTPError:\n\t\treturn 'error connecting to liveleak'\n\n\tif doc.get_element_by_id('body_text') is None or doc.get_element_by_id('item_info_%s' % match.group(1)) is None:\n\t\t# This means that the video URL was invalid\n\t\treturn\n\n\tinfo = {};\n\tinfo['description'] = doc.find_class('section_title')[0].text_content().strip()\n\n\tinfo_text = doc.get_element_by_id('item_info_%s' % match.group(1)).text_content()\n\n\tinfo['section'] = re.search('In: ((?:[^ ]+\\s)+)\\s{6}', info_text).group(1).strip()\n\tinfo['username'] = re.search('By: ((?:[^ ]+\\s)+)\\s{6}', info_text).group(1).strip()\n\tinfo['upload_date_since'] = re.search('Added: (.+?)\\s*(?:Occurred On|By):', info_text).group(1).strip()\n\tinfo['views_count'] = int(re.search('Views: (\\d+)', info_text).group(1).strip())\n\n\tif u'ago' in info['upload_date_since']:\n\t\tinfo['upload_date_since'] = re.sub('ago', '', info['upload_date_since']).strip()\n\telse:\n\t\tinfo['upload_date_since'] = timesince.timesince(\n\t\t\ttimegm(\n\t\t\t\tstrptime(\n\t\t\t\t\tinfo['upload_date_since'], '%b-%d-%Y'\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n\treturn (\"\\x02%(description)s\\x02 in \\x02%(section)s\\x02 - \"\n\t\t\t \"\\x02%(views_count)d\\x02 views - \"\n\t\t\t \"\\x02%(upload_date_since)s ago\\x02 by \\x02%(username)s\\x02\"\n\t\t\t % info)\n","sub_path":"plugins/liveleak.py","file_name":"liveleak.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614852425","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module holds the interface to the application settings.\n\"\"\"\n\n# Imports #####################################################################\nfrom __future__ import print_function\n\nimport os\nimport re\n\nimport pkg_resources\n\nimport ruamel.yaml\n\nfrom .utils import absjoin\n\ntry:\n from configparser import ConfigParser\nexcept ImportError:\n from ConfigParser import SafeConfigParser as ConfigParser\n\n\n# Metadata ####################################################################\n__author__ = \"Timothy McFadden\"\n__creationDate__ = \"05-JUN-2017\"\n\n# Globals #####################################################################\nDEBUG = os.environ.get(\"NOOGLE_DEBUG\", False)\n\n_SETTINGS = None\nSETTINGS_FILENAME = \"noogle.ini\"\nTHIS_DIR = os.path.abspath(os.path.dirname(__file__))\nINSTANCE_FOLDER = absjoin(THIS_DIR, \"..\", \"instance\")\n\nTOKEN_FOLDER = absjoin(INSTANCE_FOLDER, \"tokens\")\nDATA_FOLDER = absjoin(INSTANCE_FOLDER, \"data\")\nCONFIG_FOLDER = absjoin(INSTANCE_FOLDER, \"config\")\n\nDEPLOY_CONFIG_PATH = absjoin(CONFIG_FOLDER, \"deploy.yaml\")\nCIRCUS_INI_PATH = absjoin(CONFIG_FOLDER, \"circus.ini\")\n\n_DEFAULT_SETTINGS_FOLDER = absjoin(INSTANCE_FOLDER, \"config\")\nSETTINGS_FOLDER = os.getenv(\"SETTINGS_FOLDER\", _DEFAULT_SETTINGS_FOLDER)\nSETTINGS_PATH = absjoin(SETTINGS_FOLDER, SETTINGS_FILENAME)\nFILE_SEARCH = [SETTINGS_PATH]\n\n# These settings will be removed from `as_string`\nSECRET_SETTINGS = [\"nest.product-id\", \"nest.product-secret\", \"nest.access-token\"]\n\nDEPLOY_SETTINGS = {}\nif os.path.exists(DEPLOY_CONFIG_PATH):\n with open(DEPLOY_CONFIG_PATH) as fh:\n DEPLOY_SETTINGS = ruamel.yaml.safe_load(fh)\n\n\ndef get_settings():\n \"\"\"\n Return, or create and return, the settings object.\n \"\"\"\n global _SETTINGS\n if _SETTINGS:\n return _SETTINGS\n\n _SETTINGS = Settings()\n return _SETTINGS\n\n\nclass Settings(object):\n \"\"\"A simple interface to a project's settings stored as a dictionary.\"\"\"\n\n settings = {\n \"general\": {\"use-logfile\": True, \"debug\": DEBUG},\n \"nest\": {\n \"structure\": None,\n \"thermostat\": None,\n \"eco-temperature\": 50,\n \"maximum-hold-days\": 10,\n \"product-id\": None,\n \"product-secret\": None,\n \"access-token\": None,\n },\n \"calendar\": {\n \"name\": \"primary\",\n \"default-home-time\": \"9:00\",\n \"default-away-time\": \"19:00\",\n \"lookback\": 2,\n \"timezone\": \"MST\",\n },\n }\n\n def __init__(self):\n # Do our own converstions for certain items. The ones built in to\n # ConfigParser (e.g. `.getboolean()`) are finicky.\n self.conversions = {\n \"general.use-logfile\": self._to_bool_or_none,\n \"calendar.lookback\": self._to_int,\n }\n\n config = ConfigParser()\n\n self._user_path = SETTINGS_PATH\n self._loaded_paths = config.read(FILE_SEARCH)\n\n for section in config.sections():\n for key, value in config.items(section):\n if value is not None:\n self.settings[section][key] = value\n\n self._validate()\n\n def _to_int(self, value, base=10):\n \"\"\"\n Tries to convert the value to an integer or None.\n \"\"\"\n if isinstance(value, int):\n return value\n elif (value is None) or (not value.isdigit()):\n return None\n\n return int(value, base=base)\n\n def _to_bool_or_none(self, value):\n \"\"\"\n Tries to convert the value to a boolean or None. We use this\n because `ConfigParser.getboolean()` does not work with None.\n \"\"\"\n if isinstance(value, bool):\n return value\n elif value is None:\n return None\n\n return bool(re.match(r\"^[1ty]\", str(value), re.IGNORECASE))\n\n def _validate(self):\n \"\"\"\n Validates the settings to ensure they're correct.\n \"\"\"\n start = self.settings[\"calendar\"][\"default-home-time\"]\n if not re.match(r\"^\\d+:\\d{2}$\", start):\n raise ValueError(\n (\n \"calendar.default-home-time ({0}) not \" \"in correct format: H:mm\"\n ).format(start)\n )\n\n def get(self, item, default=None):\n \"\"\"\n Get a setting in the form of \"section.key\" (e.g. \"nest.thermostat\").\n \"\"\"\n section, key = item.split(\".\", 1)\n val = self.settings.get(section, {}).get(key, default)\n\n if item in self.conversions:\n return self.conversions[item](val)\n\n return val\n\n def set(self, item, value):\n \"\"\"\n Set a setting in the form of `\"section.key\" = value` (e.g. \"nest.thermostat\", 'Test').\n \"\"\"\n section, key = item.split(\".\", 1)\n self.settings[section][key] = value\n\n def as_ini_file(self):\n \"\"\"\n Return the settings formatted as in INI file. This would be used to\n create a user-config file.\n \"\"\"\n default_settings_file = pkg_resources.resource_filename(\n \"noogle\", \"cli/dev/templates/conf-format.ini\"\n )\n\n text = open(default_settings_file).read()\n\n # FYI, we don't need to convert these values; They will default to\n # string(), which is fine.\n return text.format(\n use_logfile=\"\"\n if self.settings[\"general\"][\"use-logfile\"] is None\n else self.settings[\"general\"][\"use-logfile\"],\n nest_thermostat=\"\"\n if self.settings[\"nest\"][\"thermostat\"] is None\n else self.settings[\"nest\"][\"thermostat\"],\n nest_structure=\"\"\n if self.settings[\"nest\"][\"structure\"] is None\n else self.settings[\"nest\"][\"structure\"],\n nest_eco_temperature=\"\"\n if self.settings[\"nest\"][\"eco-temperature\"] is None\n else self.settings[\"nest\"][\"eco-temperature\"],\n nest_max_hold=\"\"\n if self.settings[\"nest\"][\"maximum-hold-days\"] is None\n else self.settings[\"nest\"][\"maximum-hold-days\"],\n gcal_calendar_id=\"\"\n if self.settings[\"calendar\"][\"name\"] is None\n else self.settings[\"calendar\"][\"name\"],\n default_start_time=\"\"\n if self.settings[\"calendar\"][\"default-home-time\"] is None\n else self.settings[\"calendar\"][\"default-home-time\"],\n lookback=\"\"\n if self.settings[\"calendar\"][\"lookback\"] is None\n else self.settings[\"calendar\"][\"lookback\"],\n timezone=\"\"\n if self.settings[\"calendar\"][\"timezone\"] is None\n else self.settings[\"calendar\"][\"timezone\"],\n )\n\n def as_string(self, mask=True):\n \"\"\"\n Return the settings as a formatted string.\n \"\"\"\n lines = []\n\n for section in sorted(self.settings.keys()):\n for key in sorted(self.settings[section].keys()):\n value = self.settings[section][key]\n modified = section + \".\" + key\n if (value is not None) and mask and (modified in SECRET_SETTINGS):\n lines.append(\"%s.%s = \" % (section, key))\n elif value is not None:\n lines.append(\"%s.%s = %s\" % (section, key, value))\n else:\n lines.append(\"%s.%s = \" % (section, key))\n\n return \"\\n\".join(lines)\n\n def save(self):\n \"\"\"\n Stores the settings to the user's configuration file.\n \"\"\"\n path = self._user_path\n dirname = os.path.dirname(path)\n\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n text = self.as_ini_file()\n\n open(path, \"wb\").write(text.encode(\"utf-8\"))\n\n def print_settings(self):\n \"\"\"Display the project settings\"\"\"\n print(self.as_string())\n\n if self._loaded_paths:\n print(\"\\nSettings files loaded in the following order:\")\n for index, path in enumerate(self._loaded_paths):\n print(\" %i) %s\" % (index + 1, path))\n\n print(\"\")\n\n def make_user_settings(self, display_result=False):\n \"\"\"\n Create a user settings file.\n \"\"\"\n self.save()\n\n if display_result:\n print(\"Settings file stored at: %s\" % self._user_path)\n","sub_path":"noogle/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242861678","text":"def patterns(num,s):\n snip=[]\n for i in range(len(s)):\n if (len(s[i:i+num])==num):\n snip.append(s[i:i+num])\n return snip\n\ndef list_all_dict(s):\n res = {}\n result = []\n for i in range(1,len(s)):\n res[i] = patterns(i,s)\n dict={}\n for key in res[i]:\n dict[key] = dict.get(key,0)+1\n dict_last = {i:dict[i] for i in dict if dict[i]>2}\n if dict_last:\n result.append(dict_last)\n return result\n\ndef closed_pattern(result_all):\n list_copy =[]\n for i in result_all:\n dict_copy = {}\n for key,value in i.items():\n dict_copy[key] = str(value)+\"_Y\"\n list_copy.append(dict_copy)\n for i in range(1,len(result_all)):\n myDict = result_all[i]\n for key,value in myDict.items():\n prior = key[0:len(key)-1]\n post = key[1:len(key)]\n\n prior_dict = result_all [i-1]\n if prior in prior_dict.keys() and post in prior_dict.keys():\n\n if myDict[key] == prior_dict[prior]:\n list_copy[i-1][prior] = list_copy[i-1][prior].replace(\"Y\",\"N\")\n if myDict[key] == prior_dict[post]:\n list_copy[i-1][post] = list_copy[i-1][post].replace(\"Y\",\"N\")\n return list_copy\n\n\nseq = []\nwith open('Training-New/Gram-positive-576-Train.txt','r') as f:\n content = f.readlines()\n for each in content:\n if each == \" \" or each == \"\\n\":\n continue\n seq.append(each.strip('\\n'))\n\nflag = 0\nfor i in seq:\n id = i.split(',')[0]\n x = i.split(',')[1]\n\n pattern_x = []\n pattren_results = []\n result_all = list_all_dict(x)\n last = closed_pattern(result_all)\n # print(last[1::])\n\n # print(x+i.split(',')[0])\n\n concatnate_id = str(id)+'#'+str(x)+'#'\n concatnate_list = []\n for i in last:\n for key in i.keys():\n if len(key)>1:\n concatnate_list.append(str(key)+'_'+str(i[key]))\n concatnate_str = ','.join(concatnate_list)\n concatnate_result = concatnate_id+concatnate_str\n # print(concatnate_result)\n print(flag)\n files = open('Gram-positive-576-Train_ClosedPatterns.txt','a+',encoding=\"utf-8\")\n files.write(concatnate_result+'\\n')\n flag += 1\n\n for i in concatnate_list:\n # print(i)\n file = open('Gram-positive-576-Train_Patterns.txt','a+',encoding=\"utf-8\")\n file.write(i+'\\n')\nfiles.close()\nfile.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"snip.py","file_name":"snip.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"399065456","text":"import numpy as np\nimport scipy\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport numpy as np\nfrom random import random, seed\nfrom sklearn.model_selection import train_test_split\nimport sklearn.linear_model as linear_model\nimport sklearn as skl\nimport sklearn.metrics as metrics\nimport pylab\nimport scipy.stats as stats\nimport seaborn as sns\nfrom seaborn import heatmap\nfrom imageio import imread\nimport sys\n\n\ndef CreateDesignMatrix(x, y, n = 5):\n\t\"\"\"\n\tFunction for creating a design X-matrix with rows [1, x, y, x^2, xy, xy^2 , etc.]\n\tInput is x and y mesh or raveled mesh, keyword agruments n is the degree of the polynomial you want to fit.\n\t\"\"\"\n\tif len(x.shape) > 1:\n\t\tx = np.ravel(x)\n\t\ty = np.ravel(y)\n\tN = len(x)\n\tl = int((n+1)*(n+2)/2)\t\t# Number of elements in beta\n\tX = np.ones((N,l))\n\tfor i in range(1,n+1):\n\t\tq = int((i)*(i+1)/2)\n\t\tfor k in range(i+1):\n\t\t\tX[:,q+k] = x**(i-k) * y**k\n\treturn X\ndef FrankeFunction(x,y):\n term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))\n term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))\n term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))\n term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)\n return term1 + term2 + term3 + term4\n\n\ndef MSE(z, z_hat):\n\t#Methods for calculating the mean squared error and the r2-score\n\t# Test that the input is in vector shape\n\n\tif (len(z.shape) > 1):\n\t\tz = np.ravel(z)\n\tif (len(z_hat.shape)) > 1:\n\t\tz_hat= np.ravel(z_hat)\n\tSYY = (z-z_hat)@(z-z_hat)\n\treturn SYY/len(z)\n\n\n\ndef r2_score(z, z_hat):\n\t# Method for calculating the r2-score\n\t# Test that the input is in vector shape\n\tif (len(z.shape) > 1):\n\t\t\tz = np.ravel(z)\n\tif (len(z_hat.shape)) > 1:\n\t\tz_hat = np.ravel(z_hat)\n\tn = len(z)\n\ten = (z-z_hat)@(z-z_hat)\n\tmean = np.sum(z)/n\n\tsst = (z - mean)@(z - mean)\n\treturn 1-(en/sst)\n#Generate x- and y-values (uniformly randomly distributed between 0 and 1) for testing the model\n\ndef createMesh(num_points):\n xp = np.linspace(0, 1, num_points)\n yp = np.linspace(0, 1, num_points)\n x, y = np.meshgrid(xp, yp)\n return x, y\n\n\n\ndef exploreOLS(num_points, polydegree, verbose = False, noise_degree = 0):\n\t#Create a meshgrid of x- and y-values linearly spaced between 0 and 1\n\t#Create the target values, generated from the function we want to approximate.\n\t#Caltulate the coefficient vector beta by OLS (We need to use np.ravel to get the z-values on vector form.\n\t#Predicted values:\n\tn = num_points**2\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx, y = createMesh(num_points)\n\tX = CreateDesignMatrix(x, y, polydegree)\n\t#TODO: Test if singular matrix, use SVD.\n\tz = FrankeFunction(x, y)\n\t#add gaussian noise\n\tz_noisy = np.ravel(FrankeFunction(x, y)) + noise_degree*np.random.randn(n)\n\tz_noisy = np.reshape(z_noisy, (num_points, num_points))\n\n\tH = scipy.linalg.pinv(X.T@X)\n\tbeta_ols = H@(X.T@np.ravel(z))\n\tbeta_ols_noisy = H@(X.T@np.ravel(z_noisy))\n\tz_estimated = np.reshape(X@beta_ols, (num_points, num_points))\n\tz_estimated_noisy = np.reshape(X@beta_ols_noisy, (num_points, num_points))\n\t#We want to calculate the variance:\n\tavg = np.mean(z)\n\tavg_noisy = np.mean(z_noisy)\n\tvariance = np.var(z)\n\tvariance_noisy = np.var(z_noisy)\n\tV = np.diag(H*variance)\n\tV_noisy = np.diag(H*variance_noisy)\n\tresiduals = z_estimated_noisy-z_noisy\n\tif verbose:\n\t fig = plt.figure(figsize=(9, 3))\n\t ax1 = fig.add_subplot(1,3 , 1, projection = '3d')\n\t ax1.set_title(\"1\")\n\t ax1.plot_surface(x, y, z_noisy, cmap=cm.coolwarm, antialiased=False)\n\t # Plot the approximated surface from the OLS-regression:\n\t ax2 = fig.add_subplot(1, 3, 2, projection = '3d')\n\t ax2.set_title(\"2\" )\n\t ax2.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)\n\t fig2 = plt.figure()\n\t ax3 = fig.add_subplot(1, 3, 3, projection = '3d')\n\t ax3.set_title(\"3\" )\n\t ax3.plot_surface(x, y, z_estimated_noisy, cmap=cm.coolwarm, antialiased=False)\n\t # Quantile-quantile plot for residuals between estimated and true (noisy) data.\n\t stats.probplot(np.ravel(residuals), dist=\"norm\", plot=pylab)\n\n\t#Print statistical results:\n\t print(\"Performed OLS-regression with a polynomial of degree %i.\\nR2-score:%f\" % (polydegree, r2_score(z_estimated,z)))\n\t print(\"Mean squared error:\", MSE(z_estimated,z))\n\t print(\"\\n\\n\")\n\t print(\"Performed OLS-regression with a polynomial of degree %i with N(0, 1) noise added.\\nR2-score:%f \" % (polydegree, r2_score(z_estimated_noisy,z_noisy)))\n\t print(\"Mean squared error witn N(0, 1)-noise:\", MSE(z_estimated_noisy,z_noisy))\n\t printConfidenceIntervals(beta_ols_noisy, V)\n\t print(\"Mean squared error (estimated values from model trained on noisy data vs values without noise):\", MSE(z_estimated_noisy, z))\n\t print(\"r2-score (estimated values from model trained on noisy data vs values without noise):\", r2_score(z_estimated_noisy, z))\n\treturn (r2_score(z_estimated_noisy, z_noisy), MSE(z_estimated_noisy, z_noisy))\n\ndef exb(num_points, polydegree, verbose = False):\n #K-fold resampling:\n # 1) Make a set of indices, shuffle it randomly and use this as a reference for the x- and y vectors\n # 2) split the data into 10 folds, shuffled by the indices array\n #\n\n x = np.linspace(0, 1, num_points)\n y = np.linspace(0, 1, num_points)\n x, y = np.meshgrid(x, y)\n x = np.ravel(x)\n y = np.ravel(y)\n ind = np.array(range(num_points**2))\n np.random.shuffle(ind)\n k = 10\n fold_size = int((num_points**2)/k)\n x_train_fold = np.zeros( (k-1)*fold_size)\n y_train_fold = np.zeros( (k-1)*fold_size)\n x_test_fold = np.zeros(fold_size)\n y_test_fold = np.zeros(fold_size)\n beta = np.empty((k, int((polydegree+1)*(polydegree+2)/2)))\n mse_vec = np.zeros(k)\n r2_vec = np.zeros(k)\n bias_vec = np.zeros(k)\n s_vec = np.zeros(k)\n for i in range(k):\n\n ind1 = ind[:fold_size*i]\n ind2 = ind[fold_size*(i+1):]\n train_ind = np.array(list(ind1)+list(ind2))\n test_ind = ind[fold_size*i: fold_size*(i+1)]\n for j in range(len(train_ind)):\n x_train_fold[j] = x[train_ind[j]]\n y_train_fold[j] = y[train_ind[j]]\n for l in range(fold_size):\n x_test_fold[l] = x[test_ind[l]]\n y_test_fold[l] = y[test_ind[l]]\n\n X_train_fold = CreateDesignMatrix(x_train_fold, y_train_fold, polydegree)\n X_test_fold = CreateDesignMatrix(x_test_fold, y_test_fold, polydegree)\n z_train_fold = FrankeFunction(x_train_fold, y_train_fold) + .2*np.random.randn(len(x_train_fold))\n z_test_fold = FrankeFunction(x_test_fold, y_test_fold)+0.2*np.random.randn(len(x_test_fold))\n H_fold = np.linalg.pinv(X_train_fold.T@X_train_fold)\n beta_ols_fold = H_fold@(X_train_fold.T@z_train_fold)\n estimated = X_test_fold@beta_ols_fold\n mse_vec[i] = MSE(estimated, z_test_fold)\n r2_vec[i] = r2_score(estimated, z_test_fold)\n beta[i] = beta_ols_fold\n s_vec[i] = np.mean((estimated - np.mean(estimated))**2)\n\n bias_vec[i] = np.mean(z_test_fold-np.mean(estimated))\n\n #Calculate\n beta = sum(beta)/k\n r2 = sum(r2_vec)/k\n mse = sum(mse_vec)/k\n bias = sum(bias_vec)/k\n s = sum(s_vec)/k\n return (r2, mse, s, bias)\n # return (r2_score(z_test_estimated, z_test), MSE(z_test_estimated, z_test))\n # return 0, 0\n\n\ndef k_fold(model, x, y, num, polydegree, lamb, noise_level = 0):\n\tk = 10 #Nuber of folds\n\tind = np.array(range(num**2))\n\tnp.random.shuffle(ind)\n\tfold_size = int((num**2)/k)\n\tx_train_fold = np.zeros( (k-1)*fold_size)\n\ty_train_fold = np.zeros( (k-1)*fold_size)\n\tx_test_fold = np.zeros(fold_size)\n\ty_test_fold = np.zeros(fold_size)\n\tbetas = np.empty((k, int((polydegree+1)*(polydegree+2)/2)))\n\tmse_vec = np.zeros(k)\n\tr2_vec = np.zeros(k)\n\tbias_vec = np.zeros(k)\n\ts_vec = np.zeros(k)\n\tfor i in range(k):\n\n\t\tind1 = ind[:fold_size*i]\n\t\tind2 = ind[fold_size*(i+1):]\n\t\ttrain_ind = np.array(list(ind1)+list(ind2))\n\t\ttest_ind = ind[fold_size*i: fold_size*(i+1)]\n\t\tfor j in range(len(train_ind)):\n\t\t\tx_train_fold[j] = x[train_ind[j]]\n\t\t\ty_train_fold[j] = y[train_ind[j]]\n\t\tfor l in range(fold_size):\n\t\t\tx_test_fold[l] = x[test_ind[l]]\n\t\t\ty_test_fold[l] = y[test_ind[l]]\n\t\tX_train_fold = CreateDesignMatrix(x_train_fold, y_train_fold, polydegree)\n\t\tX_test_fold = CreateDesignMatrix(x_test_fold, y_test_fold, polydegree)\n\n\t\tz_train_fold = FrankeFunction(x_train_fold, y_train_fold) + noise_level*np.random.randn(len(x_train_fold))\n\t\tz_test_fold = FrankeFunction(x_test_fold, y_test_fold)+noise_level*np.random.randn(len(x_test_fold))\n\t\tbeta = model(X_train_fold, z_train_fold, lamb)\n\t\testimated = X_test_fold@beta\n\t\tmse_vec[i] = MSE(estimated, z_test_fold)\n\t\tr2_vec[i] = r2_score(estimated, z_test_fold)\n\t\tbetas[i] = beta\n\t\ts_vec[i] = np.mean((estimated - np.mean(estimated))**2)\n\t\tbias_vec[i] = np.mean(z_test_fold-np.mean(estimated))\n\tr2 = sum(r2_vec)/k\n\tmse = sum(mse_vec)/k\n\tbias = sum(bias_vec)/k\n\ts = sum(s_vec)/k\n\treturn (r2, mse, s, bias)\ndef ridge(X, z, l):\n\tI = np.identity(X.shape[1])\n\tH = np.linalg.pinv(X.T@X + l*I)\n\tbeta = H@(X.T@z)\n\treturn beta\ndef ols(X, z):\n\tH = np.linalg.pinv(X.T@X)\n\tbeta = H@(X.T@z)\n\treturn beta\ndef lasso(X, z, l):\n\tclf = linear_model.Lasso(alpha=l, fit_intercept=False)\n\tclf.fit(X, z)\n\tbeta = clf.coef_\n\treturn beta\n\ndef printConfidenceIntervals(beta, variance_matrix):\n percent=0.95\n if len(variance_matrix.shape)>1:\n n = variance_matrix.shape[0]\n for i in range(n):\n print(\"Estimated %.2f confidence interval for for Beta %i : %f (-/+) %.4f\" %(percent,i, beta[i], 1.96*np.sqrt(variance_matrix[i][i]) ) )\n print(\" - - - - - - - - - - - - \")\n else:\n n = len(variance_matrix)\n for i in range(n):\n print(\"Estimated %.2f confidence interval for for Beta %i : %f (-/+) %.4f\" %(percent,i, beta[i], 1.96*np.sqrt(variance_matrix[i]) ) )\n print(\" - - - - - - - - - - - - \")\n\ndef textEx(exfunc, num_points, n):\n#This is a method that performs the OLS-regression, calculates the MSE and R2-score\n#and plots the results as a function of the polynomial degree.\n#This is for data that has not been splitted into test training, and with no noise added.\n x = np.ones(n)\n y = np.ones(n)\n z = np.ones(n)\n for i in range(0,n):\n y[i] = exfunc(num_points, i+1)[0]\n z[i] = exfunc(num_points, i+1)[1]\n x[i] = i\n fig = plt.figure(figsize = (6, 4))\n ax1 = fig.add_subplot(2,1, 1)\n ax1.plot(x, y)\n plt.ylabel(\"R2-score\")\n # ax1.set_title(\"R2-score(polynomial degree)\")\n ax2 = fig.add_subplot(2, 1, 2)\n plt.plot(x, z)\n plt.ylabel(\"MSE\")\n plt.xlabel(\"Polynomial degree n\")\n\ndef testExNoisy(exfunc, num_points, n,noise):\n#This is a method that performs the OLS-regression, calculates the MSE and R2-score\n#and plots the results as a function of the polynomial degree.\n#This is for data that has not been splitted into test training, and with no noise added.\n x = np.ones(n)\n y = np.ones(n)\n z = np.ones(n)\n for i in range(0,n):\n y[i] = exfunc(num_points, i+1, False, noise)[0]\n z[i] = exfunc(num_points, i+1, False, noise)[1]\n x[i] = i\n\n return x, y, z\n\ndef testEx2(exfunc, num_points, n):\n#This is a method that performs the OLS-regression, calculates the MSE and R2-score\n#and plots the results as a function of the polynomial degree.\n#This is for data that has not been splitted into test training, and with no noise added.\n y = np.ones(n)\n z = np.ones(n)\n x = np.array(range(n))\n for i in range(0,n):\n y[i] ,z[i] = exfunc(num_points, i+1)\n\n return x, y, z\ndef biasVarianceTradeoff(exfunc, num_points, n, plot=False):\n\t#This is a method that performs the regression, calculates the MSE and R2-score\n\t#and plots the results as a function of the polynomial degree.\n\t#This is for data that has not been splitted into test training, and with no noise added.\n\tx = np.ones(n)\n\tr2 = np.ones(n)\n\tmse = np.ones(n)\n\tb = np.ones(n)\n\tv = np.ones(n)\n\tx = np.array(range(n))\n\tfor i in range(0,n):\n\t\tr2[i] ,mse[i] , v[i] ,b[i] = exfunc(num_points, i+1)\n\t\tprint(mse[i]- (b[i]**2 + v[i]))\n\tif plot:\n\t\tfig = plt.figure(figsize = (10, 10))\n\t\tax1 = fig.add_subplot(2, 2, 1)\n\t\tax1.set_title(\"R2-score(polynomial degree)\")\n\t\tax1.plot(x, r2)\n\t\tax2 = fig.add_subplot(2, 2, 2)\n\t\tax2.set_title(\"MSE(polynomial degree)\")\n\t\tax2.plot(x, mse)\n\t\tax3 = fig.add_subplot(2, 2, 3)\n\t\tax3.set_title(\"Bias(polynomial degree)\")\n\t\tax3.plot(x, b)\n\t\tax4 = fig.add_subplot(2, 2, 4)\n\t\tax4.plot(x, v)\n\t\tax4.set_title(\"Variance(polynomial degree)\")\n\t\tax4.plot(x, v)\n\treturn r2, mse\n\n\ndef train_test(model, noise_degree, polydegree, num_points, ridgeparam=0):\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\tx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.36)\n\tX_train = CreateDesignMatrix(x_train, y_train, polydegree)\n\tX_test = CreateDesignMatrix(x_test, y_test, polydegree)\n\tz_train = FrankeFunction(x_train, y_train) + noise_degree*np.random.randn(len(x_train))\n\tz_test = FrankeFunction(x_test, y_test) + noise_degree*np.random.randn(len(x_test))\n\tbeta = model(X_train, z_train, ridgeparam)\n\tz_pred = X_test@beta\n\treturn(MSE(z_test, z_pred), r2_score(z_test, z_pred))\n\ndef exc(ridge, num_points, noise_degree, ridgeparam):\n\tk = 20\n\tarr = np.arange(k)\n\tmse = np.zeros(k)\n\tr = np.zeros(k)\n\tfor i in range(0, k):\n\t mse[i], r[i] = train_test(ols, noise_degree, i, num_points, ridgeparam)\n\tfig = plt.figure(figsize = (6, 4))\n\tax1 = fig.add_subplot(2,1, 1)\n\tax1.set_title(\"MSE & r2-score as functions of complexity. Variance = %.2f\" %(noise_degree**2))\n\tax1.plot(arr, mse)\n\tplt.ylabel(\"MSE\")\n\t# ax1.set_title(\"R2-score(polynomial degree)\")\n\tax2 = fig.add_subplot(2, 1, 2)\n\tplt.plot(arr, r)\n\tplt.ylabel(\"r2-score\")\n\tplt.xlabel(\"Polynomial degree n\")\n\n# Code to generate heatmap of hyperparameter tuning.\ndef tuneParameters(model, noise_level, n = 4, num_points = 100):\n\tfig = plt.figure()\n\t#This is a method to explore the relationship between the ridge parameter lambda,\n\t# the model complexity and the MSE.\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\tM = np.empty((n, n))\n\tT = np.zeros(n)\n\tind = np.linspace(0, 2*(n/100), n)\n\tfor i in range(len(ind)):\n\t\tind[i] = (2*i)/100\n\tfor r in range(1, n+1):\n\t\tfor i in range(1, n+1):\n\t\t\tT[i-1] = k_fold( model,x, y, num_points, i, 2*((r-1)/100), noise_level)[1]\n\t\t\t# T[i-1] = k_fold(ridge,x, y, 50, i, 10**(10-n+i), noise_level)[1]\n\t\tM[r-1]=T\n\tarr = np.array(np.where(M == np.min(M)\t)).flatten()\n\tlamb, comp = indToLambdaN(n, arr)\n\tprint(\"Optimal complexity: \", comp, \" lambda: \", lamb, \". MSE = \", np.min(M))\n\t# ax = sns.heatmap(M)\n\tplt.imshow(M, origin='lower', extent = [0, n, 0, n])\n\tax = plt.gca()\n\tax.set(yticks= (np.arange(1, n+1)-0.5))\n\tax.set(yticklabels = ind)\n\tax.set(xticks= (np.arange(1, n+1)-0.5))\n\tax.set(xticklabels = np.arange(1, n+1))\n\tplt.xlabel(\"Polynomial degree\")\n\tplt.ylabel(\"Ridge parameter lambda\")\n\tplt.title(\"MSE as a function of lambda and complexity\")\n\tplt.colorbar()\n\ndef tuneParametersLasso(model, noise_level, n = 4, num_points = 100 ):\n\tfig = plt.figure()\n\t#This is a method to explore the relationship between the ridge parameter lambda,\n\t# the model complexity and the MSE.\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\tM = np.empty((n, n))\n\tT = np.zeros(n)\n\tind = np.linspace(0, 2*(n/100), n)\n\tfor i in range(len(ind)):\n\t\tind[i] = (5*i)/10000\n\tfor r in range(1, n+1):\n\t\tfor i in range(1, n+1):\n\t\t\tT[i-1] = k_fold(model,x, y, num_points, i, (5*r/10000), noise_level)[1]\n\t\tM[r-1]=T\n\n\tplt.imshow(M, origin='lower', extent = [0, n, 0, n])\n\tax = plt.gca()\n\tax.set(yticks= (np.arange(1, n+1)-0.5))\n\tax.set(yticklabels = ind)\n\tax.set(xticks= (np.arange(1, n+1)-0.5))\n\tax.set(xticklabels = np.arange(1, n+1))\n\tplt.xlabel(\"Polynomial degree\")\n\tplt.ylabel(\"Lasso parameter lambda\")\n\tplt.title(\"MSE as a function of lambda and complexity\")\n\tplt.colorbar()\ndef indToLambdaN(n, ind):\n\tlamb = (ind[0]*2)/100\n\tn = ind[1]\n\treturn lamb, n\n\ndef initialize(num_points):\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\treturn x, y\n\ndef exd(model, lamb, num_points):\n\t#Initialization\n\tx, y = initialize(num_points)\n\tz = FrankeFunction(x, y)\n\tM = np.zeros(6)\n\tR = np.zeros(6)\n\tfor i in range(1, 6):\n\t\tX = CreateDesignMatrix(x, y, i)\n\t\tbeta = ridge(X, z, lamb)\n\t\tpred = X@beta\n\t\tmse = MSE(z, pred)\n\t\tr2 = r2_score(z, pred)\n\t\tM[i-1] = mse\n\t\tR[i-1] = r2\n\t\tprint(\"Degree: %i MSE: %.3f r2-score: %.3f\" %(i, mse, r2))\n\tX = CreateDesignMatrix(x, y, 50)\n\tbeta = model(X, y, lamb)\n\tpred = X@beta\n\tmse = MSE(z, pred)\n\tr2 = r2_score(z, pred)\n\tprint(\"Degree: %i MSE: %.3f r2-score: %.3f\" %(50, mse, r2))\n\ndef exd2(model, noise_degree, lamb, num_points):\n\tM = np.zeros(20)\n\tR = np.zeros(20)\n\tx = np.linspace(0, 1, num_points)\n\ty = np.linspace(0, 1, num_points)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\n\tfor i in range(1, 21):\n\t\tret = k_fold(model, x, y, num_points, i, lamb, noise_degree)\n\t\tM[i-1] = ret[1]\n\t\tR[i-1] = ret[0]\n\tind = np.arange(1, 21)\n\tfig = plt.figure(figsize = (6, 4))\n\tax1 = fig.add_subplot(2,1, 1)\n\tax1.set_title(\"MSE & r2-score as functions of complexity. K-fold. variance = %.2f.\" %(noise_degree**2))\n\tax1.plot(ind, M)\n\tplt.ylabel(\"MSE-score\")\n\t# ax1.set_title(\"R2-score(polynomial degree)\")\n\tax2 = fig.add_subplot(2, 1, 2)\n\tplt.plot(ind, R)\n\tplt.ylabel(\"r2\")\n\tplt.xlabel(\"Polynomial degree n\")\n\n\ndef surface_plot(surface,title, surface1=None):\n M,N = surface.shape\n ax_rows = np.arange(M)\n ax_cols = np.arange(N)\n [X,Y] = np.meshgrid(ax_cols, ax_rows)\n fig = plt.figure()\n if surface1 is not None:\n ax = fig.add_subplot(1,2,1,projection='3d')\n ax.plot_surface(X,Y,surface,cmap=cm.coolwarm,linewidth=0)\n plt.title(title)\n ax = fig.add_subplot(1,2,2,projection='3d')\n ax.plot_surface(X,Y,surface1,cmap=cm.coolwarm,linewidth=0)\n plt.title(title)\n else:\n ax = fig.gca(projection='3d')\n ax.plot_surface(X,Y,surface,cmap=cm.viridis,linewidth=0)\n plt.title(title)\ndef surface_plot_1(x, y, z, title):\n\tfig = plt.figure()\n\tax = fig.add_subplot(projection='3d')\n\tax.plot_surface(x,y,z, cmap=cm.coolwarm,linewidth=0)\n\tplt.title(title)\n\n\ndef k_fold_terrain(model, x, y, data, polydegree, lamb, folds, noise_level = 0):\n\tk = folds #Nuber of folds\n\tind = np.arange(len(data))\n\tnp.random.shuffle(ind)\n\tfold_size = int((len(x))/k)\n\tx_train_fold = np.zeros( (k-1)*fold_size)\n\ty_train_fold = np.zeros( (k-1)*fold_size)\n\tz_train_fold = np.zeros((k-1)*fold_size)\n\tx_test_fold = np.zeros(fold_size)\n\ty_test_fold = np.zeros(fold_size)\n\tz_test_fold = np.zeros(fold_size)\n\tbetas = np.empty((k, int((polydegree+1)*(polydegree+2)/2)))\n\tmse_vec = np.zeros(k)\n\tr2_vec = np.zeros(k)\n\tbias_vec = np.zeros(k)\n\ts_vec = np.zeros(k)\n\tfor i in range(k):\n\t\tind1 = ind[:fold_size*i]\n\t\tind2 = ind[fold_size*(i+1):]\n\t\ttrain_ind = np.array(list(ind1)+list(ind2))\n\t\ttest_ind = ind[fold_size*i: fold_size*(i+1)]\n\t\tfor j in range(len(train_ind)-1):\n\t\t\tx_train_fold[j] = x[train_ind[j]]\n\t\t\ty_train_fold[j] = y[train_ind[j]]\n\t\t\tz_train_fold[j] = data[train_ind[j]]\n\t\tfor l in range(fold_size):\n\t\t\tx_test_fold[l] = x[test_ind[l]]\n\t\t\ty_test_fold[l] = y[test_ind[l]]\n\t\t\tz_test_fold[l] = data[test_ind[l]]\n\t\tX_train_fold = CreateDesignMatrix(x_train_fold, y_train_fold, polydegree)\n\t\tX_test_fold = CreateDesignMatrix(x_test_fold, y_test_fold, polydegree)\n\t\tbeta = model(X_train_fold, z_train_fold, lamb)\n\t\testimated = X_test_fold@beta\n\t\tmse_vec[i] = metrics.mean_squared_error(estimated, z_test_fold)\n\t\tr2_vec[i] = r2_score(estimated, z_test_fold)\n\t\tbetas[i] = beta\n\t\ts_vec[i] = np.var(estimated)\n\t\tbias_vec[i] = np.mean((z_test_fold-np.mean(estimated) )**2, keepdims=True)\n\tr2 = sum(r2_vec)/k\n\tmse = sum(mse_vec)/k\n\tbias = sum(bias_vec)/k\n\ts = sum(s_vec)/k\n\treturn (r2, mse, s, bias)\n\ndef terrain_function(model, terrain, polynomial_degree, lamb, folds):\n\tx = np.linspace(0, 1.8, 1801)\n\ty = np.linspace(0, 3.6, 3601)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\tret = k_fold_terrain(model, x, y, np.ravel(terrain), polynomial_degree, lamb, folds)\n\tprint(\"Polynomial degree: %i. Lambda: %f \\nMSE: %f \\nR2-score: %f\" % (polynomial_degree, lamb, ret[1], ret[0]))\n\t# f.write(\"%i & %6.3f & %f & %f\\\\\\ \\n\" % (polynomial_degree, lamb, ret[1], ret[0]))\n\n\ndef terrain_ols(model, terrain, polynomial_degree, lamb):\n\t# x = np.linspace(0, 1.8, 1801)\n\t# y = np.linspace(0, 3.6, 3601)\n\tdim_x = terrain.shape[1]\n\tdim_y = terrain.shape[0]\n\tx = np.linspace(0, (dim_x-1)/1000, dim_x)\n\ty = np.linspace(0, (dim_y-1/1000), dim_y)\n\tprint(x, y)\n\tx, y = np.meshgrid(x, y)\n\tx = np.ravel(x)\n\ty = np.ravel(y)\n\n\tterrain = np.ravel(terrain)\n\tX = CreateDesignMatrix(x, y, polynomial_degree)\n\n\tbeta = model(X, terrain, lamb)\n\tprint(\"Betas:\" , beta)\n\treturn beta, X\n\n\n\n\n# x = np.arange(terrain.shape[1])\n# y = np.arange(terrain.shape[0])\n# x, y = np.meshgrid(x, y)\n\n# surface_plot_1(x, y, np.reshape(predicted, (3601,1801)), \"Parameterized surface\" )\n# # data = terrain_function(terrain, 2)\n# f = open(\"Kjoring.txt\", \"w\")\n# for i in range(0, 7):\n# \tterrain_function(ridge, terrain, 9, 0, 5)\n\n# for polydegree in range(1, 7):\n# \tfor lamb in range(1, 7):\n# \t\tterrain_function(ridge, terrain, polydegree, lamb/100, 5)\n# for polydegree in range(1, 6):\n# \tfor lamb in range(1, 5):\n# \t\tterrain_function(lasso, terrain, polydegree, (5*lamb)/10000, 5)\n# f.close()\n# x = np.arange(1801)\n# y = np.arange(3601)\n# x, y = np.meshgrid(x, y)\n# x = np.ravel(x)\n# y = np.ravel(y)\n# X = CreateDesignMatrix(x, y, 5)\n# reg = skl.linear_model.LinearRegression(fit_intercept = True, normalize = True)\n# reg.fit(X, np.ravel(terrain))\n# beta = reg.coef_\n# data = X@beta\n# print(beta)\n# y = np.arange(3601)\n# x = np.arange(1801)\n# x, y = np.meshgrid(x, y)\n#\n# data = np.reshape(data, (3601, 1801))\n#\n#\n# plt.imshow(data)\n\n\ndef normalized_terrain(model, data, polydegree, lamb):\n\tplt.figure()\n\txd = data.shape[1]\n\tyd = data.shape[0]\n\tscalex = xd/max(xd,yd)\n\tscaley = yd/max(xd,yd)\n\tx_ind = np.linspace(0, scalex, xd)\n\ty_ind = np.linspace(0, scaley, yd)\n\tx, y = np.meshgrid(x_ind, y_ind)\n\tX = CreateDesignMatrix(np.ravel(x), np.ravel(y), polydegree)\n\tbeta = model(X, np.ravel(data), lamb)\n\tprint(beta)\n\tdat = X@beta\n\tdata = np.reshape(dat, (yd, xd))\n\tplt.imshow(data, cmap = 'coolwarm')\n\tplt.colorbar()\n\tplt.title(\"Parameterized terrain\")\n\tplt.figure()\n\tplt.imshow(data)\n\tplt.show()\n\treturn data\n\n'''CODE THAT PRODUCES FIGURE 3'''\n# num_points = 100\n# polydeg = 3\n# x, y = createMesh(num_points)\n# frankeData = FrankeFunction(x, y)\n# X = CreateDesignMatrix(np.ravel(x), np.ravel(y), polydeg)\n# betas = ols(X, np.ravel(frankeData))\n# estimated = X@betas\n# estmated = np.reshape(estimated, (num_points, num_points))\n# surface_plot_1( np.reshape(x, (num_points, num_points)), np.reshape(y, (num_points, num_points)), np.reshape(estimated, (num_points, num_points)),\"Polynomial degree =%i\"%polydeg)\n#\n# polydeg = 5\n# x, y = createMesh(num_points)\n# frankeData = FrankeFunction(x, y)\n# X = CreateDesignMatrix(np.ravel(x), np.ravel(y), polydeg)\n# betas = ols(X, np.ravel(frankeData))\n# estimated = X@betas\n# estmated = np.reshape(estimated, (num_points, num_points))\n# surface_plot_1( np.reshape(x, (num_points, num_points)), np.reshape(y, (num_points, num_points)), np.reshape(estimated, (num_points, num_points)),\"Polynomial degree = %i\"%polydeg)\n#\n# polydeg = 20\n# x, y = createMesh(num_points)\n# frankeData = FrankeFunction(x, y)\n# X = CreateDesignMatrix(np.ravel(x), np.ravel(y), 3)\n# betas = ols(X, np.ravel(frankeData))\n# estimated = X@betas\n# estmated = np.reshape(estimated, (num_points, num_points))\n# surface_plot_1( np.reshape(x, (num_points, num_points)), np.reshape(y, (num_points, num_points)), np.reshape(estimated, (num_points, num_points)),\"Polynomial degree = 3\")\n#END OF CODE THAT PRODUCES FIGURE 3\n\n'''Code that procuces figure 4'''\n#textEx(exploreOLS, 100, 15)\n#END OF CODE THAT PRODUCES FIGURE 4\n\n\n\n\n\n\n'''Code that produces figure 6'''\n# k = 30\n# mse = np.zeros(k)\n# ind = np.arange(k)\n# fig5 = plt.figure()\n# for i in range(0, k):\n# mse[i] = exploreOLS(100, i, False, 0.3)[1]\n# plt.plot(ind, mse)\n# for i in range(0, k):\n# mse[i] = exploreOLS(100, i, False, 0.6)[1]\n# plt.plot(ind, mse)\n#\n# for i in range(0, k):\n# mse[i] = exploreOLS(100, i, False, 0.9)[1]\n# plt.plot(ind, mse)\n#\n# plt.legend([\"lambda = 0.3\", \"lambda = 0.6\", \"lambda = 0.9\"])\n# plt.title(\"MSE as a function of complexity\")\n#END OF CODE THAT GENERATES FIGURE 6\n\n'''CODE THAT GENERATES FIGURE 7 '''\n# Figure 7a\n# exc(ridge, 20, 0, 0)\n# Figure 7b\n# exc(ridge, 20, 0.5, 0)\n#CODE THAT GENERATES FIGURE 7\n\n'''CODE THAT GENERATES FIGURE 8'''\n# Figure 8a\n# exc(ridge, 100, 0, 0)\n# Figure 8b\n# exc(ridge, 100, 0.5, 0)\n# END OF CODE THAT GENERATES FIGURE 8\n\n'''CODE THAT GENERATES FIGURE 9'''\n# 9a\n# exd2(ridge, 0.5,0, 100)\n# 9b\n# exd2(ridge, 0.5,0, 100)\n# END OF CODE THAT GENERATES FIGURE 9\n\n'''CODE THAT GENERATES FIGURE 10'''\n# tuneParameters(ridge, 0, 15, 100)\n# END OF CODE THAT GENERATES FIGURE 10\n\n\n'''CODE THAT GENERATES FIGURE 11'''\n#11a\n# tuneParameters(ridge, 0.5, 15, 100)\n# 11b\n# tuneParameters(ridge, 0.5, 15, 20)\n# END OF CODE THAT GENERATES FIGURE 11\n\n'''CODE THAT GENERATES FIGURE 12'''\n# 12a\n# exd2(ridge, 0.5, 0.04, 10)\n# 12b\n# exd2(ridge, 0.5, 0.04, 100)\n\n# END OF ODE THAT GENERATES FIGURE 12\n\n\n'''CODE THAT GENERATES FIGURE 13'''\n# 13a\n# tuneParametersLasso(lasso, 0.5,15, 20)\n# 13b\n# tuneParametersLasso(lasso, 0.5, 15, 100)\n# END OF CODE THAT GENERATES FIGURE 13\n\n\nterrain = imread('SRTM_data_Norway_1.tif')\nplt.figure()\nplt.title('Terrain over Norway 1')\nplt.imshow(terrain, cmap='coolwarm')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.colorbar()\n\n\nplt.show()\n","sub_path":"Programming/plotsAndResults.py","file_name":"plotsAndResults.py","file_ext":"py","file_size_in_byte":26184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"120587863","text":"from django.contrib import admin\nfrom suit.admin import SortableModelAdmin, SortableTabularInline\n\nfrom django import forms\nfrom django.forms.util import ErrorList\n\nfrom erikcarter.portfolio.models import PortfolioInformation, Dictum, Medium, \\\n Project, ProjectImage, SubliminalImage\n\n\nclass InfoModelForm(forms.ModelForm):\n def clean(self):\n if PortfolioInformation.objects.count() > 1:\n self._errors.setdefault('__all__', ErrorList()) \\\n .append(\"You can only create one portfolio, dude.\")\n return self.cleaned_data\n\n\nclass DictumInline(SortableTabularInline):\n model = Dictum\n extra = 0\n sortable = 'sort_order'\n exclude = ['text_html']\n\n\nclass InfoAdmin(admin.ModelAdmin):\n fieldsets = [\n ('Portfolio Information', {'fields': [\n 'name',\n 'favicon',\n 'short_bio',\n 'bio',\n 'bio_media',\n 'contact',\n 'awards',\n 'bottom_text'\n ]}),\n ]\n inlines = [\n DictumInline\n ]\n readonly_fields = ('name',)\n form = InfoModelForm\n\n\nclass MediumAdmin(admin.ModelAdmin):\n fieldsets = [\n ('Mediums', {'fields': [\n 'name',\n 'slug'\n ]}),\n ]\n prepopulated_fields = {'slug': ('name',)}\n\n\nclass ProjectImageInline(SortableTabularInline):\n model = ProjectImage\n extra = 0\n sortable = 'sort_order'\n\n\nclass ProjectAdmin(SortableModelAdmin):\n fieldsets = [\n ('Project', {'fields': [\n 'name',\n 'slug',\n 'medium',\n 'info',\n 'commission',\n 'featured'\n ]}),\n ]\n inlines = [\n ProjectImageInline\n ]\n prepopulated_fields = {'slug': ('name',)}\n sortable = 'sort_order'\n\n\nadmin.site.register(SubliminalImage)\nadmin.site.register(PortfolioInformation, InfoAdmin)\nadmin.site.register(Medium, MediumAdmin)\nadmin.site.register(Project, ProjectAdmin)\n","sub_path":"erikcarter/portfolio/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"319385105","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n pt1 = pt2 = head\n for i in range(n):\n pt1 = pt1.next\n if not pt1:\n return head.next\n while pt1.next:\n pt1 = pt1.next\n pt2 = pt2.next\n pt2.next = pt2.next.next\n return head","sub_path":"linkedlist/remove_nthnodeFromend_ll.py","file_name":"remove_nthnodeFromend_ll.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341346684","text":"from keras.preprocessing.text import Tokenizer\n\ntext = '나는 맛있는 밥을 먹었다'\n\ntoken = Tokenizer() # 한개의 문장을 단어 단위로 잘라서 인덱싱(수치화)을 걸어 줌 \ntoken.fit_on_texts([text])\n\nprint(token.word_index) # {'나는': 1, '맛있는': 2, '밥을': 3, '먹었다': 4}\n\n\nx = token.texts_to_sequences([text])\nprint(x) # [[1, 2, 3, 4]] \n # 문제점 : '나는'과 '먹었다'의 가치가 다르다.\n\nfrom keras.utils import to_categorical\n\nword_size = len(token.word_index) + 1 # [0]추가\nx = to_categorical(x, num_classes= word_size)\nprint(x)\n# [[[0. 1. 0. 0. 0.] # 문제점 : 단어 수가 많아지면 data(컬럼)이 너무 많아짐\n# [0. 0. 1. 0. 0.]\n# [0. 0. 0. 1. 0.]\n# [0. 0. 0. 0. 1.]]]\n\n","sub_path":"keras/keras120_embedding1.py","file_name":"keras120_embedding1.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63725060","text":"__author__ = 'Corey'\n# coding: utf-8\nimport pylab as pl\n\n# http://reverland.org/python/2012/09/07/matplotlib-tutorial/#pylab\n# 印象笔记 存有备份\n\nx1 = [1, 2, 3, 4, 5, 6, 7] # Make x, y arrays for each graph\ny1 = [456.4, 412.98, 378.34, 345.98, 310.3, 297.89, 234.9]\n\nx2 = [1, 2, 3, 4, 5, 6, 7]\ny2 = [478.45, 450.9, 411.89, 369.6, 340.34, 305.67, 286.56]\n# use pylab to plot x and y\npl.plot(x1, y1, 'r')\npl.plot(x2, y2, 'g')\n\npl.title('LevelDB - DA-LSM') # give plot a title\npl.xlabel('recordcount axis') # make axis labels\npl.ylabel('throughput axis')\n\npl.xlim(0.0, 9.0) # set axis limits\npl.ylim(200.0, 500.0)\n\npl.show() # show the plot on the screen\n\n\n\n\n#---------------------------\nfrom pylab import *\n\nn = 256\nX = np.linspace(-np.pi, np.pi, 256, endpoint=True)\nC, S = np.cos(X), np.sin(X)\nplot(X, C), plot(X, S)\n\n#savefig(\"../figures/exercice_1.png\",dpi=72)\nshow()\n","sub_path":"PythonCode/helloworld/pylab_example.py","file_name":"pylab_example.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"435425160","text":"#!/usr/bin/env python3\n\nimport connexion\nfrom .encoder import JSONEncoder\n\n\nif __name__ == '__main__':\n app = connexion.App(__name__, specification_dir='./swagger/')\n app.app.json_encoder = JSONEncoder\n app.add_api('swagger.yaml', arguments={'title': 'Stores and Retrieves information needed for the robot simulation'})\n app.run(host=\"ec2-54-202-25-115.us-west-2.compute.amazonaws.com\", port=8080)\n","sub_path":"python-flask-server/swagger_server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604109679","text":"from sql_alchemy import base\n\nclass HotelModel(base.Model):\n __tablename__ = 'list_hotels'\n hotel_id = base.Column(base.String, primary_key = True)\n name = base.Column(base.String(80))\n score = base.Column(base.Float(precision = 1))\n price = base.Column(base.Float(precision = 2))\n city = base.Column(base.String(40))\n\n\n def __init__(self, hotel_id, name, score, price, city):\n self.hotel_id = hotel_id\n self.name = name\n self.score = price\n self.price = price\n self.city = city\n\n\n def json(self):\n return {\n 'hotel_id': self.hotel_id,\n 'name': self.name,\n 'score': self.score,\n 'price': self.price,\n 'city': self.city\n }\n\n\n @classmethod\n def find_hotel(cls, hotel_id):\n hotel = cls.query.filter_by(hotel_id = hotel_id).first()\n if hotel:\n return hotel\n else:\n return None\n\n\n # @staticmethod\n # def find_hotel(hotel_id):\n # for for_hotel in list_hotels:\n # if for_hotel['hotel_id'] == hotel_id:\n # return for_hotel\n # return None\n\n\n def save_hotel(self):\n base.session.add(self)\n base.session.commit()\n\n\n def update_hotel(self, name, score, price, city):\n self.name = name\n self.score = price\n self.price = price\n self.city = city\n\n\n def delete_hotel(self):\n base.session.delete(self)\n base.session.commit()","sub_path":"models/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462564191","text":"# -*- coding: utf-8 -*-\n\n# Redistribution and use in source and binary forms of this file,\n# with or without modification, are permitted. See the Creative\n# Commons Zero (CC0 1.0) License for more details.\n\n# Heart Rate Bricklet communication config\n\nfrom commonconstants import THRESHOLD_OPTION_CONSTANT_GROUP\n\ncom = {\n 'author': 'Ishraq Ibne Ashraf ',\n 'api_version': [2, 0, 0],\n 'category': 'Bricklet',\n 'device_identifier': 245,\n 'name': 'Heart Rate',\n 'display_name': 'Heart Rate',\n 'manufacturer': 'Tinkerforge',\n 'description': {\n 'en': 'Measures heart rate',\n 'de': 'Misst Herzfrequenz'\n },\n 'released': False,\n 'documented': False,\n 'discontinued': False,\n 'features': [\n 'bricklet_get_identity'\n ],\n 'constant_groups': [],\n 'packets': [],\n 'examples': []\n}\n\ncom['constant_groups'].append(THRESHOLD_OPTION_CONSTANT_GROUP)\n\ncom['constant_groups'].append({\n'name': 'Beat State',\n'type': 'uint8',\n'constants': [('Falling', 0),\n ('Rising', 1)]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Get Heart Rate',\n'elements': [('Heart Rate', 'uint16', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['bf', {\n'en':\n\"\"\"\nReturns the current heart rate measured.\n\nIf you want to get the heart rate periodically, it is recommended\nto use the :cb:`Heart Rate` callback and set the period with\n:func:`Set Heart Rate Callback Period`.\n\"\"\",\n'de':\n\"\"\"\nGibt die Herzschlagfrequenz des Sensors zurück.\n\nWenn die Herzschlagfrequenz periodisch abgefragt werden soll,\nwird empfohlen den :cb:`Heart Rate` Callback zu nutzen und die Periode\nmit :func:`Set Heart Rate Callback Period` vorzugeben.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Set Heart Rate Callback Period',\n'elements': [('Period', 'uint32', 1, 'in')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nSets the period in ms with which the :cb:`Heart Rate` callback is triggered\nperiodically. A value of 0 turns the callback off.\n\nThe :cb:`Heart Rate` callback is only triggered if the heart rate has changed\nsince the last triggering.\n\nThe default value is 0.\n\"\"\",\n'de':\n\"\"\"\nSetzt die Periode in ms mit welcher der :cb:`Heart Rate` Callback ausgelöst wird.\nEin Wert von 0 deaktiviert den Callback.\n\nDer :cb:`Heart Rate` Callback wird nur ausgelöst, wenn sich die\nHerzschlagfrequenz seit der letzten Auslösung geändert hat.\n\nDer Standardwert ist 0.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Get Heart Rate Callback Period',\n'elements': [('Period', 'uint32', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nReturns the period as set by :func:`Set Heart Rate Callback Period`.\n\"\"\",\n'de':\n\"\"\"\nGibt die Periode zurück, wie von :func:`Set Heart Rate Callback Period` gesetzt.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Set Heart Rate Callback Threshold',\n'elements': [('Option', 'char', 1, 'in', {'constant_group': 'Threshold Option'}),\n ('Min', 'uint16', 1, 'in'),\n ('Max', 'uint16', 1, 'in')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nSets the thresholds for the :cb:`Heart Rate Reached` callback.\n\nThe following options are possible:\n\n.. csv-table::\n :header: \"Option\", \"Description\"\n :widths: 10, 100\n\n \"'x'\", \"Callback is turned off\"\n \"'o'\", \"Callback is triggered when the heart rate is *outside* the min and max values\"\n \"'i'\", \"Callback is triggered when the heart rate is *inside* the min and max values\"\n \"'<'\", \"Callback is triggered when the heart rate is smaller than the min value (max is ignored)\"\n \"'>'\", \"Callback is triggered when the heart rate is greater than the min value (max is ignored)\"\n\nThe default value is ('x', 0, 0).\n\"\"\",\n'de':\n\"\"\"\nSetzt den Schwellwert für den :cb:`Heart Rate Reached` Callback.\n\nDie folgenden Optionen sind möglich:\n\n.. csv-table::\n :header: \"Option\", \"Beschreibung\"\n :widths: 10, 100\n\n \"'x'\", \"Callback ist inaktiv\"\n \"'o'\", \"Callback wird ausgelöst, wenn die Herzschlagfrequenz *außerhalb* des min und max Wertes ist\"\n \"'i'\", \"Callback wird ausgelöst, wenn die Herzschlagfrequenz *innerhalb* des min und max Wertes ist\"\n \"'<'\", \"Callback wird ausgelöst, wenn die Herzschlagfrequenz kleiner als der min Wert ist (max wird ignoriert)\"\n \"'>'\", \"Callback wird ausgelöst, wenn die Herzschlagfrequenz größer als der min Wert ist (max wird ignoriert)\"\n\nDer Standardwert ist ('x', 0, 0).\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Get Heart Rate Callback Threshold',\n'elements': [('Option', 'char', 1, 'out', {'constant_group': 'Threshold Option'}),\n ('Min', 'uint16', 1, 'out'),\n ('Max', 'uint16', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nReturns the threshold as set by :func:`Set Heart Rate Callback Threshold`.\n\"\"\",\n'de':\n\"\"\"\nGibt den Schwellwert zurück, wie von :func:`Set Heart Rate Callback Threshold` gesetzt.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Set Debounce Period',\n'elements': [('Debounce', 'uint32', 1, 'in')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nSets the period in ms with which the threshold callback\n\n* :cb:`Heart Rate Reached`\n\nis triggered, if the threshold\n\n* :func:`Set Heart Rate Callback Threshold`\n\nkeeps being reached.\n\nThe default value is 100.\n\"\"\",\n'de':\n\"\"\"\nSetzt die Periode in ms mit welcher die Schwellwert Callback\n\n* :cb:`Heart Rate Reached`\n\nausgelöst wird, wenn der Schwellwert\n\n* :func:`Set Heart Rate Callback Threshold`\n\nweiterhin erreicht bleibt.\n\nDer Standardwert ist 100.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Get Debounce Period',\n'elements': [('Debounce', 'uint32', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nReturns the debounce period as set by :func:`Set Debounce Period`.\n\"\"\",\n'de':\n\"\"\"\nGibt die Entprellperiode zurück, wie von :func:`Set Debounce Period` gesetzt.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'callback',\n'name': 'Heart Rate',\n'elements': [('Heart Rate', 'uint16', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['c', {\n'en':\n\"\"\"\nThis callback is triggered periodically with the period that is set by\n:func:`Set Heart Rate Callback Period`. The :word:`parameter` is the heart rate\nof the sensor.\n\nThe :cb:`Heart Rate` callback is only triggered if the heart rate has changed\nsince the last triggering.\n\"\"\",\n'de':\n\"\"\"\nDieser Callback wird mit der Periode, wie gesetzt mit\n:func:`Set Heart Rate Callback Period`, ausgelöst. Der :word:`parameter` ist\ndie Herzschlagfrequenz des Sensors.\n\nDer :cb:`Heart Rate` Callback wird nur ausgelöst, wenn sich die\nHerzschlagfrequenz seit der letzten Auslösung geändert hat.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'callback',\n'name': 'Heart Rate Reached',\n'elements': [('Heart Rate', 'uint16', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['c', {\n'en':\n\"\"\"\nThis callback is triggered when the threshold as set by\n:func:`Set Heart Rate Callback Threshold` is reached.\nThe :word:`parameter` is the heart rate of the sensor.\n\nIf the threshold keeps being reached, the callback is triggered periodically\nwith the period as set by :func:`Set Debounce Period`.\n\"\"\",\n'de':\n\"\"\"\nDieser Callback wird ausgelöst, wenn der Schwellwert, wie von\n:func:`Set Heart Rate Callback Threshold` gesetzt, erreicht wird.\nDer :word:`parameter` ist die Herzschlagfrequenz des Sensors.\n\nWenn der Schwellwert erreicht bleibt, wird der Callback mit der Periode, wie\nmit :func:`Set Debounce Period` gesetzt, ausgelöst.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'callback',\n'name': 'Beat State Changed',\n'elements': [('State', 'uint8', 1, 'out', {'constant_group': 'Beat State'})],\n'since_firmware': [1, 0, 0],\n'doc': ['c', {\n'en':\n\"\"\"\nThis callback provides the current heart beat state.It is called\nevery time a heart beat is detected. The state can either be\n\n* 0 = Falling: The falling edge of a detected heart beat.\n* 1 = Rising: The rising edge of a detected heart beat.\n\"\"\",\n'de':\n\"\"\"\nDieser Callback übergibt den aktuellen Tilt-Status. Der Callback wird\naufgerufen wenn sich der Status ändert. Der Zustand kann folgende Werte\nannehmen:\n\n* 0 = Falling:\n* 1 = Rising:\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Enable Beat State Changed Callback',\n'elements': [],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nEnables the :cb:`Beat State Changed` callback.\n\"\"\",\n'de':\n\"\"\"\nAktiviert den :cb:`Beat State Changed` Callback.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Disable Beat State Changed Callback',\n'elements': [],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nDisables the :cb:`Beat State Changed` callback.\n\"\"\",\n'de':\n\"\"\"\nDeaktiviert den :cb:`Beat State Changed` Callback.\n\"\"\"\n}]\n})\n\ncom['packets'].append({\n'type': 'function',\n'name': 'Is Beat State Changed Callback Enabled',\n'elements': [('Enabled', 'bool', 1, 'out')],\n'since_firmware': [1, 0, 0],\n'doc': ['ccf', {\n'en':\n\"\"\"\nReturns *true* if the :cb:`Beat State Changed` callback is enabled.\n\"\"\",\n'de':\n\"\"\"\nGibt *true* zurück wenn der :cb:`Beat State Changed` Callback aktiviert ist.\n\"\"\"\n}]\n})\n\ncom['examples'].append({\n'name': 'Simple',\n'functions': [('getter', ('Get Heart Rate', 'heart rate'), [(('Heart Rate', 'Heart Rate'), 'uint16', 1, None, 'bpm', None)], [])]\n})\n\ncom['examples'].append({\n'name': 'Callback',\n'functions': [('callback', ('Heart Rate', 'heart rate'), [(('Heart Rate', 'Heart Rate'), 'uint16', 1, None, 'bpm', None)], None, None),\n ('callback_period', ('Heart Rate', 'heart rate'), [], 1000)]\n})\n\ncom['examples'].append({\n'name': 'Threshold',\n'functions': [('debounce_period', 10000),\n ('callback', ('Heart Rate Reached', 'heart rate reached'), [(('Heart Rate', 'Heart Rate'), 'uint16', 1, None, 'bpm', None)], None, None),\n ('callback_threshold', ('Heart Rate', 'heart rate'), [], '>', [(100, 0)])]\n})\n","sub_path":"configs/bricklet_heart_rate_config.py","file_name":"bricklet_heart_rate_config.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226305101","text":"# header\nimport sys\nread = sys.stdin.buffer.read\n\n\n# function\ndef solve(N):\n if N%2 == 0:\n return \"White\"\n else:\n return \"Black\"\n\n\n# main\nif __name__ == '__main__':\n N = int(read())\n print(solve(N))","sub_path":"venv/ABC181/abc181_a.py","file_name":"abc181_a.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470433247","text":"\"\"\"The sqlite model for a sticker.\"\"\"\nfrom sqlalchemy import (\n Column,\n DateTime,\n func,\n String,\n Table,\n ForeignKey,\n UniqueConstraint,\n)\nfrom sqlalchemy.orm import relationship\n\nfrom stickerfinder.db import base\n\n\nsticker_tag = Table(\n 'sticker_tag', base.metadata,\n Column('sticker_file_id',\n String(),\n ForeignKey('sticker.file_id', ondelete='cascade',\n onupdate='cascade', deferrable=True),\n index=True),\n Column('tag_name',\n String(),\n ForeignKey('tag.name', ondelete='cascade',\n onupdate='cascade', deferrable=True),\n index=True),\n UniqueConstraint('sticker_file_id', 'tag_name'),\n)\n\n\nclass Sticker(base):\n \"\"\"The model for a sticker.\"\"\"\n\n __tablename__ = 'sticker'\n\n file_id = Column(String, primary_key=True)\n text = Column(String)\n original_emojis = Column(String)\n created_at = Column(DateTime, server_default=func.now(), nullable=False)\n updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now(), nullable=False)\n\n sticker_set_name = Column(String, ForeignKey('sticker_set.name',\n onupdate='cascade',\n ondelete='cascade'), index=True)\n sticker_set = relationship(\"StickerSet\", back_populates=\"stickers\")\n\n changes = relationship(\"Change\", order_by=\"desc(Change.created_at)\")\n tags = relationship(\n \"Tag\",\n secondary=sticker_tag,\n back_populates=\"stickers\")\n\n def __init__(self, file_id):\n \"\"\"Create a new sticker.\"\"\"\n self.file_id = file_id\n\n def tags_as_text(self):\n \"\"\"Return tag names as single string.\"\"\"\n tags = [tag.name for tag in self.tags]\n return ', '.join(tags)\n\n def add_emojis(self, session, emojis):\n \"\"\"Add tags for every emoji in the incoming string.\"\"\"\n from stickerfinder.models import Tag\n self.original_emojis = emojis\n for emoji in emojis:\n tag = Tag.get_or_create(session, emoji, emoji=True)\n if tag not in self.tags:\n self.tags.append(tag)\n","sub_path":"stickerfinder/models/sticker.py","file_name":"sticker.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211199060","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom ArduinoSerial import ArduinoSerial\nimport time\n\nif len(sys.argv) < 4:\n sys.stderr.write(\"Usage: %s \\n\"%sys.argv[0])\n sys.exit(1)\n\nserial_port = sys.argv[1]\nformat_string = sys.argv[2]\nlog_filename = sys.argv[3]\nstart_time = int(time.time())\n\ns = ArduinoSerial(serial_port, baudrate=9600, timeout=50)\n\nwhile True:\n line = s.readline()\n if line.strip() == \"OK\":\n break\n\nlog_file = open(log_filename, \"w\")\n\ndef gettimestamp():\n global start_time\n return int(time.time())-start_time\n\ntoken_types = {\n \"s\" : s.read_uint8_t,\n \"i\" : s.read_uint16_t,\n \"d\" : s.read_uint16_t,\n \"l\" : s.read_uint32_t,\n \"f\" : s.read_double,\n \"t\" : gettimestamp\n}\n\ndef tee(file_handle, output):\n sys.stdout.write(output)\n file_handle.write(output)\n file_handle.flush()\n\n\ntee(log_file, \"Start-time: %s\"%time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n\ntry:\n token_match = False\n while True:\n for c in format_string:\n if c == \"%\":\n token_match = True\n elif token_match:\n value = str(token_types[c]())\n if value:\n tee(log_file, value)\n token_match = False\n else:\n tee(log_file, c)\n tee(log_file, os.linesep)\nexcept KeyboardInterrupt:\n pass\n\ns.flush()\ns.close()\nlog_file.close()\n","sub_path":"serial-logger/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485302442","text":"from django.contrib.auth.decorators import login_required\nfrom django.urls import path\n\nfrom app_create_chklst import views as accv\nfrom app_create_chklst import chklst_views as accclv\n\napp_name = 'app_create_chklst'\nurlpatterns = [\n # Main page for Cheklists\n path('mainchklst/', login_required(accclv.MainChkLstView.as_view()), name='chk-main'),\n\n # categories and lines tables\n path('catlinemgmt/', login_required(accv.CatandLineMgmtView), name='catlineMgmt'),\n\n # categories\n path('catcreate/', login_required(accv.CategoryCreateView.as_view()), name='chk-catcreate'),\n path('catdisplay/', login_required(accv.CategoryDisplayView.as_view()), name='chk-catdisplay'),\n path('catupdate/', login_required(accv.CategoryUpdateView.as_view()), name='chk-catupdate'),\n path('catdelete/', login_required(accv.CategoryDeleteView.as_view()), name='chk-catdelete'),\n path('catmgmt/', login_required(accv.CategoryMgmtView.as_view()), name='chk-catmgmt'),\n\n # lines\n path('linecreate/', login_required(accv.LineCreateView.as_view()), name='chk-linecreate'),\n path('linedisplay/', login_required(accv.LineDisplayView.as_view()), name='chk-linedisplay'),\n path('lineupdate/', login_required(accv.LineUpdateView.as_view()), name='chk-lineupdate'),\n path('linedelete/', login_required(accv.LineDeleteView.as_view()), name='chk-linedelete'),\n path('linemgmt/', login_required(accv.LineMgmtView.as_view()), name='chk-linemgmt'),\n\n # ckeck-lists\n path('chkcreate/', login_required(accclv.ChkLstCreateView.as_view()), name='chk-chklstcreate'),\n path('chklstdelete/', login_required(accclv.ChklstDeleteView.as_view()), name='chk-chkdelete'),\n path('chklstdisplay/', login_required(accclv.ChklstDisplayView.as_view()), name='chk-chkdisplay'),\n path('chklstupdate/', login_required(accclv.ChkLstUpdateView.as_view()), name='chk-chkupdate'),\n\n # Ajax create chklst\n path('create_chklst/', login_required(accclv.create_chklst), name='create_chklst'),\n]\n","sub_path":"checklistmgr/app_create_chklst/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507503441","text":"# Copyright 2017 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A basic configuration\n\"\"\"\nimport socket\nfrom yacs.config import CfgNode as CN\n\n# -------------------------------------------------------------------------------\n# Basic Information\n# -------------------------------------------------------------------------------\n_C = CN(new_allowed=True)\n_C.INFO = CN(new_allowed=True)\n_C.INFO.NAME = 'default'\n_C.INFO.TASK = 'train'\n_C.INFO.DIR = None\n_C.INFO.OUTPUTS = '_outputs/'\n_C.INFO.DEVICE = 'cpu'\n\n# -------------------------------------------------------------------------------\n# Distributed Configuration\n# -------------------------------------------------------------------------------\n_C.DIST = CN(new_allowed=True)\n_C.DIST.DIR = None # remote root directory\n_C.DIST.RUN = False\n_C.DIST.HOSTNAME = socket.gethostname()\n_C.DIST.IS_ROOT = True\n_C.DIST.ROOT_RANK = 0 # dist root rank\n_C.DIST.RANK = 0 # global rank\n_C.DIST.SIZE = 1 # global size\n_C.DIST.LOCAL_RANK = 0 # local rank\n_C.DIST.LOCAL_SIZE = 1 # local size\n\n# -------------------------------------------------------------------------------\n# Log Configuration\n# -------------------------------------------------------------------------------\n_C.LOG = CN(new_allowed=True)\n_C.LOG.TOTAL_EPOCH = 9999\n_C.LOG.PRINT_STEP = 20\n_C.LOG.SAVE_EPOCH = 1\n_C.LOG.VAL_EPOCH = None\n_C.LOG.TEST_EPOCH = None\n\n# -------------------------------------------------------------------------------\n# Model Related Info\n# -------------------------------------------------------------------------------\n_C.MODEL = CN(new_allowed=True)\n_C.MODEL.WEIGHT = None\n# caffe, detectron, maskrcnn-benchmark, mmdetection, torchvision\n_C.MODEL.SOURCE = 'tw'\n_C.MODEL.ARCH = None\n\n# -------------------------------------------------------------------------------\n# Datasets\n# -------------------------------------------------------------------------------\n_C.DATASET = CN(new_allowed=True)\n_C.DATASET.INFO = CN(new_allowed=True)\n_C.DATASET.INFO.NAME = None\n\n# -------------------------------------------------------------------------------\n# Solver\n# -------------------------------------------------------------------------------\n_C.SOLVER = CN()\n_C.SOLVER.OPTIM = CN(new_allowed=True)\n_C.SOLVER.OPTIM.METHOD = 'sgd'\n_C.SOLVER.OPTIM.WD = 0.0000\n\n# LEARNING RATE\n_C.SOLVER.LR = CN(new_allowed=True)\n_C.SOLVER.LR.METHOD = 'constant'\n_C.SOLVER.LR.BASE = 0.1\n","sub_path":"tw/app/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533574756","text":"import sys\nimport random\n\nfrom matplotlib import pyplot as plt\n\nfrom scipy.spatial import cKDTree\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nfrom torchsupport.training.training import SupervisedTraining\n\nfrom torchsupport.structured import PackedTensor, ScatterStructure\nfrom torchsupport.structured import DataParallel as SDP\n\nfrom protsupport.data.proteinnet import ProteinNet, ProteinNetKNN\nfrom protsupport.utils.geometry import orientation\nfrom protsupport.modules.flexible_transformer import FlexibleTransformer\n\ndef valid_callback(training, data, predictions):\n inputs, labels = data\n confusion = torch.zeros(20, 20)\n for label, prediction in zip(labels, predictions[0][0]):\n pred = prediction.argmax(dim=0)\n confusion[label, pred] += 1\n fig, ax = plt.subplots()\n ax.imshow(confusion / confusion.sum(dim=1, keepdim=True), cmap=\"Reds\")\n training.writer.add_figure(\"confusion\", fig, training.step_id)\n # for name, parameter in training.net.named_parameters():\n # training.writer.add_histogram(f\"phist {name}\", parameter.detach().cpu().numpy(), training.step_id)\n\nclass TransformerNet(ProteinNetKNN):\n def __init__(self, path, num_neighbours=20, n_jobs=1, cache=True):\n ProteinNetKNN.__init__(\n self, path,\n num_neighbours=num_neighbours,\n n_jobs=n_jobs, cache=cache\n )\n self.ors = torch.tensor(\n orientation(self.ters[1].numpy() / 100).transpose(2, 0, 1),\n dtype=torch.float\n )\n\n def __getitem__(self, index):\n window = slice(self.index[index], self.index[index + 1])\n window = window[:500]\n inds = self.inds[window]\n primary = self.pris[window] - 1\n evolutionary = self.evos[:, window]\n tertiary = self.ters[:, :, window]\n orientation = self.ors[window, :, :].view(\n window.stop - window.start, -1\n )\n distances = self.ters[1, :, window].transpose(0, 1) / 100\n indices = torch.tensor(\n range(window.start, window.stop),\n dtype=torch.float\n )\n indices = indices.view(-1, 1)\n\n orientation = torch.cat((distances, orientation, indices), dim=1)\n angles = self.angs[:, window].transpose(0, 1)\n\n tree = cKDTree(distances)\n connections = tree.query_ball_tree(tree, r=8.0)\n\n neighbours = ScatterStructure.from_connections(\n 0, 0, connections\n )\n\n sin = torch.sin(angles)\n cos = torch.cos(angles)\n angle_features = torch.cat((sin, cos), dim=1)\n\n inputs = (\n PackedTensor(angle_features),\n PackedTensor(primary),\n PackedTensor(orientation),\n neighbours\n )\n\n return inputs, PackedTensor(primary)\n\n def __len__(self):\n return ProteinNet.__len__(self)\n\nclass DebugLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, inputs, targets):\n return self.loss(inputs, targets)\n\nclass StructuredTransformerTraining(SupervisedTraining):\n def each_step(self):\n learning_rate = torch.pow(torch.tensor(128.0), -0.5)\n step_num = torch.tensor(float(self.step_id + 1))\n learning_rate *= min(\n torch.pow(step_num, -0.5),\n step_num * torch.pow(torch.tensor(4000.0), -1.5)\n )\n self.optimizer.param_groups[0][\"lr\"] = learning_rate\n\nif __name__ == \"__main__\":\n num_neighbours = 30 if len(sys.argv) < 4 else int(sys.argv[3])\n data = TransformerNet(sys.argv[1], num_neighbours=num_neighbours)\n valid_data = TransformerNet(sys.argv[2], num_neighbours=num_neighbours)\n net = FlexibleTransformer(\n 6, 20, 128, 10, 64,\n attention_size=128, heads=8,\n mlp_depth=2, depth=3, batch_norm=True\n )\n training = StructuredTransformerTraining(\n net, data, valid_data,\n [DebugLoss()],\n batch_size=8,\n max_epochs=1000,\n optimizer=lambda x: torch.optim.Adam(x, lr=1e-5),\n device=\"cuda:0\",\n network_name=\"structured-transformer-long\",\n valid_callback=valid_callback\n ).load()\n final_net = training.train()\n","sub_path":"protsupport/training/train_flexible_transformer.py","file_name":"train_flexible_transformer.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"120779711","text":"import discord\nfrom discord.ext import commands\n\nimport json\nfrom difflib import SequenceMatcher\n\nfrom utils import error\n\n\nwith open(\"text_files/cleaned_items.txt\", \"r\") as file:\n WIKI_ENTRIES = json.load(file)\n\nclass wiki_cog(commands.Cog):\n def __init__(self, bot):\n self.client = bot\n\n @commands.command()\n async def wiki(self, ctx, *, user_input=None):\n\n if user_input is None:\n return await error(ctx, \"No item given.\", \"Please give the item you want to check the price of.\")\n \n formatted_id = user_input.replace(\" \", \"_\").replace(\"- \", \"\").upper()\n\n top_similarity_ration = 0\n for name, entry in WIKI_ENTRIES.items():\n similarity_ratio = SequenceMatcher(None, formatted_id, name).ratio()\n if similarity_ratio > top_similarity_ration:\n top_similarity_ration = similarity_ratio\n wiki_entry = entry\n \n if top_similarity_ration < 0.6:\n # Nobreak: Item was no found, show exceptions\n return await error(ctx, \"No wiki entry with the provided input!\", \"Try giving the internal item name, and exclude special characters.\")\n \n # Everything is fine, send it\n embed = discord.Embed(title=f\"Wiki entry for {formatted_id.replace('_', ' ').title()}:\", description=f\"You can find the wiki entry [here]({wiki_entry}).\", colour=0x3498DB)\n embed.set_footer(text=f\"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.\")\n await ctx.send(embed=embed)\n","sub_path":"bot/player_commands/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"271512970","text":"def C(n, r):\n a = 1\n for x in range(r, n):\n a *= (x + 1)\n b = 1\n for x in range(n - r):\n b *= (x + 1)\n return a // b\n\ndef main():\n print(C(5, 3))\n print(C(23, 10))\n over = []\n for n in range(1, 100 + 1):\n for r in range(1, n - 1):\n c = C(n, r)\n if c > 1000000:\n over.append(c)\n #print(over)\n print(len(over))\n print(len(set(over)))\n\nmain()\n","sub_path":"py/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439468073","text":"import re\n\nfrom livecli.plugin import Plugin\nfrom livecli.plugin.api import http\nfrom livecli.stream import RTMPStream\n\n__livecli_docs__ = {\n \"domains\": [\n \"younow.com\",\n ],\n \"geo_blocked\": [],\n \"notes\": \"\",\n \"live\": True,\n \"vod\": False,\n \"last_update\": \"2018-02-16\",\n}\n\n\nclass younow(Plugin):\n\n _url_re = re.compile(r\"https?://(?:\\w+\\.)?younow\\.com/(?P[^/&?]+)\")\n\n api_url = \"https://api.younow.com/php/api/broadcast/info/curId=0/user={0}\"\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def _get_streams(self):\n match = self._url_re.match(self.url)\n channel = match.group(\"channel\")\n\n res = http.get(self.api_url.format(channel))\n data = http.json(res)\n\n if data.get(\"errorCode\") == 0:\n media = data.get(\"media\")\n if media:\n rtmp_url = \"rtmp://{host}{app}/{stream}\".format(\n host=media[\"host\"],\n app=media[\"app\"],\n stream=media[\"stream\"],\n )\n params = {\n \"rtmp\": rtmp_url,\n \"live\": True\n }\n\n yield \"live\", RTMPStream(self.session, params=params)\n\n\n__plugin__ = younow\n","sub_path":"src/livecli/plugins/younow.py","file_name":"younow.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625334688","text":"from datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"start_date\": datetime(2019, 1, 1),\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5)\n}\n\n\nwith DAG(dag_id=\"simple_catchup_dag\",\n schedule_interval=\"@daily\",\n default_args=default_args,\n catchup=True) as dag:\n task_1 = DummyOperator(task_id=\"task_1\")\n task_2 = DummyOperator(task_id=\"task_2\")\n task_1 >> task_2\n\n","sub_path":"module4-demo3/dags/simple_catchup_dag.py","file_name":"simple_catchup_dag.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460064181","text":"#!/bin/python\n\nfrom __future__ import print_function\nfrom datetime import datetime\nfrom time import sleep\nimport random\nimport syslog\nimport sys\nimport argparse\nimport signal\nimport os\n\n\"\"\"\nThis script simulates a simple service. It runs an infinte loop, sending out random log messages to stdout/stderr and syslyog every once in a while.\nIt's useful to play around with service definitions and background jobs.\n\"\"\"\n\ndef sig_handler(signum, frame):\n\tglobal_logger.log(\"Signal received: {}\".format(signum), severity='info')\n\tif signum == 1 or signum == 15:\n\t\tglobal_logger.log(\"Shutting down.\")\n\t\texit(0)\n\nsignal.signal(signal.SIGUSR1, sig_handler)\nsignal.signal(signal.SIGTERM, sig_handler)\n\n\nclass InfraLogger(object):\n def __init__(self, identifier):\n \n if len(identifier) > 24:\n # Identifiers longer then 24 chars are not correctly handled by logstash...\n print(\"Warning: identifier '{}' is too long and will be truncated. Max length is 24\".format(identifier))\n identifier = identifier[:21] + '...'\n \n self.identifier = identifier\n self.syslog_facility = syslog.LOG_USER\n \n syslog.openlog(logoption=syslog.LOG_PID, ident=self.identifier, facility=self.syslog_facility)\n\n \n def _get_syslog_severity(self, sev):\n sev_mapping = {\n 'debug': syslog.LOG_DEBUG,\n 'info': syslog.LOG_INFO,\n 'notice': syslog.LOG_NOTICE,\n 'warning': syslog.LOG_WARNING,\n 'error': syslog.LOG_ERR\n }\n if sev.lower() not in sev_mapping.keys():\n raise ArgumentException(\"severity must be one of '{}'\".format(','.join(sev_mapping.keys())))\n else:\n return sev_mapping[sev]\n \n def log(self, msg, severity='notice', to_syslog=True): \n if to_syslog: \n syslog.syslog(self._get_syslog_severity(severity), msg)\n \n \n timestamp_format = \"%d %b %Y %H:%M:%S\"\n if severity in ['warning', 'error']:\n output = sys.stderr\n else:\n output = sys.stdout\n \n print_msg = \"{} {} {:<8} {}\".format(datetime.now().strftime(timestamp_format), self.identifier, severity.upper()+':', msg)\n print(print_msg, file=output)\n \n\n\ndef main(name):\n\t\t\n\tweighted_sevs = [ 'info', 'info', 'info', 'debug', 'debug', 'notice', 'notice', 'warning', 'error']\n\tmessages = {\n\t\t'info': \t[\n\t\t\t\t\t\t'Just wanted to say hello!',\n\t\t\t\t\t\t'Still alive!',\n\t\t\t\t\t\t'Alive and well',\n\t\t\t\t\t\t'Everything is a-ok',\n\t\t\t\t\t\t'All systems operational captain'\n\t\t\t\t\t],\n\t\t'debug': \t[\n\t\t\t\t\t\t'{} instructions executed during last period'.format(random.randint(12415, 1390293)),\n\t\t\t\t\t\t'{} IO operations during measured interval'.format(random.randint(564, 999)),\n\t\t\t\t\t\t'{} requests answered since last checkpoint'.format(random.randint(2548, 87965)),\n\t\t\t\t\t],\n\t\t'notice': \t[\n\t\t\t\t\t\t'Slightly deviating from optimal status',\n\t\t\t\t\t\t'Something notable happened but you shouldn\\'t be worried',\n\t\t\t\t\t\t'Performing routine maintenance task now',\n\t\t\t\t\t\t'Actually doing something now (for a change)',\n\t\t\t\t\t\t'Fighting for ressources'\n\t\t\t\t\t],\n\t\t'warning': [\n\t\t\t\t\t\t'Running out of superspace reverse order low priority cache memory',\n\t\t\t\t\t\t'Couldn\\'t locate temporary blob storage',\n\t\t\t\t\t\t'Temporal electron fluctuations detected',\n\t\t\t\t\t\t'Socket connection to master system interrupted',\n\t\t\t\t\t\t'Illegal operation attempted'\n\t\t\t\t\t],\n\t\t'error': \t[\n\t\t\t\t\t\t'Fatal problem detected. Restarting core.',\n\t\t\t\t\t\t'Illegal instruction executed. Restarting core.',\n\t\t\t\t\t\t'Critical fixture collapsed.',\n\t\t\t\t\t\t'Core overheating. Initiating counter measures',\n\t\t\t\t\t\t'Rogue virus attacking system'\n\t\t\t\t\t],\t\t\t\t\n\t}\n\twhile True:\n\t\tsev = random.choice(weighted_sevs)\n\t\tmessage = random.choice(messages.get(sev))\n\t\t\n\t\tglobal_logger.log(message, severity=sev)\n\t\t\n\t\tsleep(random.randint(1, 60))\n\t\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"name\", type=str, nargs='?', default='mock_service1')\n\targs = parser.parse_args()\n\n\tglobal_logger = InfraLogger(args.name)\n\tglobal_logger.log(\"Starting up...\")\n\n\tmain(args.name)\n","sub_path":"python/mock_service_syslog.py","file_name":"mock_service_syslog.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264384148","text":"import qi\nimport argparse\nimport sys\n\n\ndef main(session):\n \"\"\"\n this example shows the addBlockingEvent method from the ALDialog API\n \"\"\"\n ALDialog = session.service(\"ALDialog\")\n ALDialog.setLanguage(\"English\")\n topic_name = ALDialog.loadTopic(\"/home/nao/aldialog_test_topic_file.top\")\n ALDialog.activateTopic(topic_name)\n ALDialog.subscribe(\"my_deactivateTag_test\")\n\n raw_input(\"\\nThe robot is going to start counting from 0 to 5 now. \"\n \"Try touching its head's tactile sensors while it's speaking. \"\n \"It will not interrupt its speech. Press Enter to start:\")\n ALDialog.forceInput(\"start counting\")\n\n raw_input(\"\\nThe robot is going to start counting from 0 to 5 now. \"\n \"Try touching its head's tactile sensors while it's speaking. \"\n \"Touching the head is now declared as a blocking event, the \"\n \"robot will react to your touch immediately. Press Enter to start:\")\n ALDialog.addBlockingEvent(\"MiddleTactilTouched\")\n ALDialog.addBlockingEvent(\"FrontTactilTouched\")\n ALDialog.addBlockingEvent(\"RearTactilTouched\")\n ALDialog.forceInput(\"start counting\")\n\n raw_input(\"\\nThe robot is going to start counting from 0 to 5 now. \"\n \"Try touching its head's tactile sensors while it's speaking. \"\n \"The default behavior is restored now, the robot won't interrupt its speech.\")\n ALDialog.removeBlockingEvent(\"MiddleTactilTouched\")\n ALDialog.removeBlockingEvent(\"FrontTactilTouched\")\n ALDialog.removeBlockingEvent(\"RearTactilTouched\")\n ALDialog.forceInput(\"start counting\")\n\n ALDialog.unsubscribe(\"my_deactivateTag_test\")\n ALDialog.deactivateTopic(topic_name)\n ALDialog.unloadTopic(topic_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, default=\"127.0.0.1\",\n help=\"Robot's IP address. If on a robot or a local Naoqi - use '127.0.0.1' (this is the default value).\")\n parser.add_argument(\"--port\", type=int, default=9559,\n help=\"port number, the default value is OK in most cases\")\n\n args = parser.parse_args()\n session = qi.Session()\n try:\n session.connect(\"tcp://{}:{}\".format(args.ip, args.port))\n except RuntimeError:\n print (\"Can't connect to Naoqi at IP {} (port {}).\\nPlease check your script's arguments.\"\n \" Run with -h option for help.\".format(args.ip, args.port))\n sys.exit(1)\n main(session)\n\n","sub_path":"softbankRobotics/choregraphe-suite-2.5.5.5-linux64/share/doc/_downloads/aldialog_addBlockingEvent.py","file_name":"aldialog_addBlockingEvent.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605626345","text":"import pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier as rf\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier\nfrom xgboost import XGBClassifier as xg\nfrom xgboost import Booster\nimport sklearn.metrics\nfrom sklearn.externals import joblib\n\n\nclass IDSPredictor(object):\n def __init__(self):\n self._model = xg()\n self._loaded_model_path:str\n\n def create_model(self, data:pd.core.frame.DataFrame) -> None:\n X = data.loc[:, data.columns != 'attack_cat']\n y = data['attack_cat']\n self._model.fit(X, y)\n self._classes = self._model.classes_\n \n def save_model(self, filename: str) -> None:\n self._loaded_model_path = filename\n with open(filename, 'wb') as model_file:\n pickle.dump(self._model, model_file)\n\n def load_model(self, filename: str) -> None:\n self._loaded_model_path = filename\n with open(filename, 'rb') as model_file:\n self._model = pickle.load(model_file)\n self._classes = self._model.classes_\n\n def predict_record(self, record: pd.core.frame.DataFrame) -> str:\n return self._model.predict(record)\n\n def predict_record_proba(self, record: pd.core.frame.DataFrame) -> dict:\n cl = self._model.classes_\n prob = self._model.predict_proba(record)[0]\n return dict(zip(cl, prob))\n\n def partial_fit(self, data:pd.core.frame.DataFrame) -> None:\n X = data.loc[:, data.columns != 'attack_cat']\n y = data['attack_cat']\n self._model.fit(X, y, xgb_model=self._model.get_booster())\n self._model.classes_ = self._classes\n\n # def predict_record_log_proba(self, record: pd.core.frame.DataFrame) -> dict:\n # cl = self._model.classes_\n # prob = self._model.predict_log_proba(record)[0]\n # return dict(zip(cl, [-10 if x == float('-inf') else x for x in prob]))\n\n def test_model(self, data: pd.core.frame.DataFrame) -> float:\n X = data.loc[:, data.columns != 'attack_cat']\n y = data['attack_cat']\n y_pred = self._model.predict(X)\n return sklearn.metrics.accuracy_score(y, y_pred, normalize=True)\n\n @classmethod\n def load_data(self ,csv_path:str) -> pd.core.frame.DataFrame:\n return pd.read_csv(csv_path)\n\nclass IDSPredictorNB(IDSPredictor):\n def __init__(self, df:pd.core.frame.DataFrame):\n super().__init__()\n self._model = GaussianNB()\n self.df = df\n \n def partial_fit(self, data:pd.core.frame.DataFrame) -> None:\n self.df = self.df.append(data, ignore_index=True)\n X = self.df.loc[:, self.df.columns != 'attack_cat']\n y = self.df['attack_cat']\n self._model.fit(X, y)\n\nclass IDSPredictorRF(IDSPredictorNB):\n def __init__(self, df:pd.core.frame.DataFrame):\n super().__init__(df)\n self._model = rf(n_estimators=10, n_jobs=4)\n\nclass IDSPredictorKNN(IDSPredictorNB):\n def __init__(self, df:pd.core.frame.DataFrame):\n super().__init__(df)\n self._model = KNeighborsClassifier()\n self.df = df\n \n \ndef main():\n model_path = './resources/models/xgboost.sav'\n # data = IDSPredictor.load_data('./resources/data_cleaned/mixed_learn.csv')\n # data = IDSPredictor.load_data('./resources/data_cleaned/virt_learn.csv')\n # data = IDSPredictor.load_data('./resources/data_cleaned/normal_baseline.csv')\n data = IDSPredictor.load_data('./resources/data_cleaned/normal_syntetic.csv')\n # data = IDSPredictor.load_data('./resources/data_cleaned/UNSW-NB15_learn2.csv')\n\n ids = IDSPredictor()\n # ids.load_model(model_path)\n ids.create_model(data)\n ids.save_model(model_path)\n \n # ids.partial_fit(data[data.attack_cat == 'DoS'])\n\n print(ids.test_model(data))\n\n # X = data.loc[:, data.columns != 'attack_cat']\n # X = X.iloc[0, :]\n # X = pd.DataFrame(X)\n # print(ids.predict_record(X.T))\n # print(ids.predict_record_proba(X.T))\n # print(ids.predict_record_log_proba(X.T))\n\ndef main_nb():\n model_path = './resources/models/naive.sav'\n data = IDSPredictor.load_data('./resources/data_cleaned/normal_syntetic.csv')\n ids = IDSPredictorNB(data)\n # ids.load_model(model_path)\n ids.create_model(data)\n ids.save_model(model_path)\n\ndef main_rf():\n model_path = './resources/models/rf.sav'\n data = IDSPredictor.load_data('./resources/data_cleaned/normal_syntetic_smote.csv')\n # data = IDSPredictor.load_data('./resources/data_cleaned/mixed_learn.csv')\n # data = IDSPredictor.load_data('/home/rasvob/Dokumenty/SKOLA/DP/Server/IDS/resources/data_cleaned/UNSW-NB15_learn2.csv')\n\n ids = IDSPredictorRF(data)\n # ids.load_model(model_path)\n ids.create_model(data)\n ids.save_model(model_path)\n\nif __name__ == '__main__':\n main_rf()","sub_path":"Server/IDS/alg_classiffy.py","file_name":"alg_classiffy.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197009219","text":"import random\nimport turtle\nimport time\n\nscreen = turtle.Screen()\nimage1 = \"./img_coin/coin_f.gif\"\nimage2 = \"./img_coin/coin_b.gif\"\n\nt1 = turtle.Turtle()\nt1.speed(1)\n\nfor i in range(10):\n com = random.randrange(2)\n if com == 0:\n screen.addshape(image1)\n t1.shape(image1)\n t1.stamp()\n else :\n screen.addshape(image2)\n t1.shape(image2)\n t1.stamp()\n\n\nt1.write(\"게임이 끝났습니다.\")\n\ntime.sleep(10)","sub_path":"basic/random03_coinImage.py","file_name":"random03_coinImage.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118860732","text":"\"\"\"Specify a colors for cells\nand points of a Mesh\"\"\"\nfrom vedo import *\n\n##################################### add a cell array\nman1 = Mesh(dataurl+\"man_low.vtk\").lineWidth(0.1)\nnv = man1.NCells() # nr. of cells\nscals = range(nv) # coloring by the index of cell\n\nman1.celldata[\"mycellscalars\"] = scals # add an array of scalars to mesh\nman1.cmap(\"Paired\", scals, on='cells').addScalarBar(\"cell nr\")\nshow(man1, __doc__, at=0, N=3, axes=11, elevation=-60)\n\n\n##################################### Point coloring\nman2 = Mesh(dataurl+\"man_low.vtk\")\nscals = man2.points()[:, 0] + 37 # pick x coordinates of vertices\n\nman2.cmap(\"hot\", scals)\nman2.addScalarBar(horizontal=True)\nshow(man2, \"mesh.cmap()\", at=1)\n\n\n##################################### Cell coloring\nman3 = Mesh(dataurl+\"man_low.vtk\")\nscals = man3.cellCenters()[:, 2] + 37 # pick z coordinates of cells\nman3.cmap(\"afmhot\", scals, on='cells')\n\n# add a fancier 3D scalar bar embedded in the scene\nman3.addScalarBar3D(sy=3)\nman3.scalarbar.rotateX(90).y(0.2)\nshow(man3, \"mesh.cmap(on='cells')\", at=2, interactive=True).close()\n","sub_path":"examples/basic/mesh_coloring.py","file_name":"mesh_coloring.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568722288","text":"A=float(input(\"Insira o primeiro coeficiente da equação: \"))\r\nB=float(input(\"Insira o segundo coeficiente da equação: \"))\r\nC=float(input(\"Insira o terceiro coeficiente da equação: \"))\r\nD=(B**2)-4*A*C\r\nX1=(-B+(D**0.5))/(2*A)\r\nX2=(-B-(D**0.5))/(2*A)\r\nif D<0:\r\n print(\"Não existem raízes reais para Delta negativo: \")\r\nelif D==0:\r\n print(\"Existe apenas uma raiz para Delta igual a 0: \")\r\n print(\"A raiz é %.2f\" % X1)\r\nelse:\r\n print(\"Existem duas raízes reais para Delta positivo: \")\r\n print(\"A primeira raiz é %.2f\" % X1)\r\n print(\"A segunda raiz é %.2f\" % X2)\r\n","sub_path":"4.7.py","file_name":"4.7.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202622057","text":"from rest_framework import serializers\nfrom v1.accounts.models import Post, PostMedia, PostSource\nfrom .mediatypes import MediaTypeSerializer\nfrom rest_framework.fields import CurrentUserDefault\n\n\nclass PostSourceSerializer(serializers.ModelSerializer):\n class Meta:\n model = PostSource\n fields = \"__all__\"\n\n\nclass PostMediaSerializer(serializers.ModelSerializer):\n media_type = MediaTypeSerializer(many=True, read_only=True)\n file_attributes = serializers.JSONField()\n\n class Meta:\n model = PostMedia\n fields = \"__all__\"\n\n\nclass PostSerializer(serializers.ModelSerializer):\n media = PostMediaSerializer(many=True, read_only=True)\n owner = serializers.PrimaryKeyRelatedField(read_only=True, default=CurrentUserDefault()) # noqa : E501\n\n class Meta:\n model = Post\n fields = \"__all__\"\n","sub_path":"v1/accounts/serializers/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486296701","text":"from .base import * # noqa\nfrom .base import env\n\nSECRET_KEY = env(\"SECRET_KEY\", default=\"only dev replace me\")\n\nALLOWED_HOSTS = [\"*\"]\n\n# REST FRAMEWORK\n# ----------------------------------------------------------------------------------------------------------------------\n# http://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n # \"DEFAULT_RENDERER_CLASSES\": [\"rest_framework.renderers.JSONRenderer\"],\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": [\"rest_framework.permissions.AllowAny\"],\n \"DEFAULT_FILTER_BACKENDS\": [\n \"django_filters.rest_framework.DjangoFilterBackend\",\n \"rest_framework.filters.OrderingFilter\",\n \"rest_framework.filters.SearchFilter\",\n ],\n \"DEFAULT_PAGINATION_CLASS\": \"apps.ext.rest.pagination.PageNumberPagination\",\n \"PAGE_SIZE\": 34,\n \"DATETIME_FORMAT\": \"%Y-%m-%d %H:%M:%S\",\n}\n\n# django extensions shell plus\n# ----------------------------------------------------------------------------------------------------------------------\n# https://django-extensions.readthedocs.io/en/latest/shell_plus.html\nSHELL_PLUS = \"ptpython\"\nSHELL_PLUS_PRINT_SQL = True\nSHELL_PLUS_PRINT_SQL_TRUNCATE = 1000\n\nSERV_TOKEN = env(\"SERV_TOKEN\", default=\"\")\nPOST_HEADERS = {\n \"Authorization\": f\"edittoken {SERV_TOKEN}\",\n \"Content-Type\": \"application/json; charset=utf-8\"\n}\nPOST_URL = env(\"POST_URL\", default=None)\nPOST_TOTAL_EPIDEMIC_URL = env(\"POST_TOTAL_EPIDEMIC_URL\", default=None)\n\n\n","sub_path":"ncov/ncov/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91519846","text":"import plotly.graph_objects as go\nimport numpy as np\n\nt=np.linspace(0,2*np.pi,3600) \nfig=go.Figure(); steps=[]\nfor step in np.arange(1,41,4):\n f=np.cos(14*t)+np.cos(6*t)\n fig.add_trace(go.Scatter(\n visible=False,name='k='+str(step),\n line=dict(color='rgb(.7,0,%f)'%(step/41),width=2),\n x=f*np.cos(step*t),y=f*np.sin(step*t)))\nfig.data[0].visible=True\nst='x=(cos(14t)+sin(6t))cos(kt) \\n'+\\\n 'y=(cos(14t)+sin(6t))sin(kt)'\nfor i in range(len(fig.data)):\n step=dict(method='update',\n args=[{'visible':[False]*len(fig.data)}])\n step['args'][0]['visible'][i]=True\n steps.append(step)\nsliders=[dict(active=10,pad={'t':20},steps=steps)]\nfig.update_layout(width=500,height=550,sliders=sliders,\n template='plotly_dark',\n title_text=st,title_font=dict(size=15))\nfig.show()","sub_path":"python_recipes/plotly_function.py","file_name":"plotly_function.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539176326","text":"from _Framework.SessionComponent import SessionComponent\nfrom _Framework.SceneComponent import SceneComponent\n\nfrom LO2SceneComponent import LO2SceneComponent\nfrom LO2Mixin import LO2Mixin, wrap_init\n\nclass LO2SessionComponent(SessionComponent, LO2Mixin):\n #self.parent = parent\n scene_component_type = LO2SceneComponent\n \n @wrap_init\n def __init__(self, c_instance = None, *args, **kwargs):\n self._scene_count = -1\n self._scenes_count = 0\n super(LO2SessionComponent, self).__init__(*args, **kwargs)\n self.__c_instance = c_instance\n #self._selected_scene.disconnect()\n #self._selected_scene = None\n self._selected_scene.set_is_enabled(False)\n \n self._reassign_scenes()\n \n self.add_callback('/live/scene/name/block', self._scene_name_block)\n self.add_callback('/live/clip/name/block', self._clip_name_block)\n self.add_callback('/live/track/name/block', self._track_name_block)\n self.add_callback('/live/track/input/block', self._track_input_block)\n self.add_callback('/live/clip/delete/all', self._delete_all_clips)\n self.add_callback('/live/midi/note', self._midi_note)\n self.add_callback('/live/midi/cc', self._midi_cc)\n self.add_callback('/live/midi/controller', self._midi_cc)\n self.add_callback('/live/midi/pgm', self._midi_pgm)\n self.add_callback('/live/midi/program', self._midi_pgm)\n\n self.add_function_callback('/live/scenes', self._lo2_on_scene_list_changed)\n\n \n def send_midi(self, midi_event_bytes):\n \"\"\"Script -> Live\n Use this function to send MIDI events through Live to the _real_ MIDI devices\n that this script is assigned to.\n \"\"\"\n self.log_message(\"midibytes\",midi_event_bytes)\n self.__c_instance.send_midi(midi_event_bytes)\n \n def _create_scene(self):\n #obj = SceneComponent if self._scene_count == -1 else self.scene_component_type\n sc = self.scene_component_type(num_slots=self._num_tracks, tracks_to_use_callback=self.tracks_to_use, id=self._scene_count)\n \n self._scene_count += 1\n return sc\n \n \n def on_scene_list_changed(self):\n self._reassign_scenes()\n \n \n def _reassign_scenes(self):\n self.log_message('reassigning scenes')\n diff = len(self.song().scenes) - len(self._scenes)\n \n if diff > 0:\n for i in range(diff):\n self._scenes.append(self._create_scene())\n \n if diff < 0:\n for i in range(len(self._scenes)-1, len(self.song().scenes)-1, -1):\n self._scene[i].disconnect()\n self._scene.remove(self._scene[i])\n \n for i,sc in enumerate(self._scenes):\n sc.set_scene(self.song().scenes[i])\n\n \n \n # Listeners\n def _lo2_on_scene_list_changed(self):\n if len(self.song().scenes) != self._scenes_count:\n self.send('/live/scenes', len(self.song().scenes))\n self._scenes_count = len(self.song().scenes)\n\n\n def _lo2_on_selected_scene_changed(self):\n idx = list(self.song().scenes).index(self.song().view.selected_scene)\n self.send('/live/scene/select', idx)\n\n\n\n # Scene Callbacks\n def _scene_name_block(self, msg, src):\n \"\"\" Gets block of scene names\n \"\"\"\n b = []\n for i in range(msg[2], msg[2]+msg[3]):\n if i < len(self._scenes):\n s = self.scene[i]\n b.append(i, s.scene_name)\n else:\n b.append(i, '')\n\n self.send('/live/scene/name/block', b)\n \n \n def _scene_selected(self, msg, src):\n \"\"\" Selects a scene to view\n /live/scene/selected (int track) \"\"\"\n if self.has_arg(msg):\n if msg[2] < len(self.song().scenes):\n self.song().view.selected_scene = self.song().scenes[msg[2]]\n else:\n idx = list(self.song().scenes).index(self.song().view.selected_scene)\n self.send('/live/scene/selected', idx)\n\n\n\n\n\n # Clip Callbacks\n def _clip_name_block(self, msg, src):\n \"\"\" Gets a block of clip names\n \"\"\"\n b = []\n for i in range(msg[2], msg[2]+msg[3]):\n if i < len(self._scenes):\n s = self.scene[i]\n for j in range(msg[4], msg[4]+msg[5]):\n if j < len(s._clip_slots):\n c = s.clip_slots(j)\n b.append(i, j, c.clip_name)\n else:\n b.append(i, j, '')\n else:\n b.append(i, j, '')\n \n self.send('/live/clip/name/block', b)\n\n # Track Callbacks \n \n def _track_name_block(self, msg, src):\n \"\"\"\n Gets block of scene names\n \"\"\"\n fullList = False\n if msg[3] == 0:\n msg[3] = len(self._channel_strips) - msg[2]\n #if msg[2] == 0:\n #self.track_names = []\n #fullList = True\n #b = []\n for i in range(msg[2], msg[2]+msg[3]):\n if i < len(self._channel_strips):\n t = self.channel_strip(i)\n #b.append(t.track_name)\n self.send('/live/track/name', i, t.track_name)\n #if fullList:\n # self.track_names.append(t.track_name)\n #else:\n #b.append('')\n #self.log_message(self.track_names)\n self.send('/live/track/name/block', b)\n\n\n def _track_input_block(self, msg, src):\n \"\"\" creates a block of tracks and sets their names if given\n \"\"\"\n \n try:\n main_track = int(msg[2],10)\n except:\n self.log_message(\"ib\",self.track_id_from_name(msg[2]))\n main_track = self.track_id_from_name(msg[2])\n i = main_track \n for input_id in msg[3:]:\n i += 1 #for now this relies on group tracks\n self.log_message(\"input block\",i,main_track)\n\n try:\n input_id = int(input_id)\n if input_id < len(self.song().tracks[i].input_routings):\n self.log_message(\"input by #\",input_id,self.song().tracks[i].current_input_routing)\n self.song().tracks[i].current_input_routing = self.song().tracks[i].input_routings[input_id]\n except:\n if 0 <= i < len(self.song().tracks):\n lc_routes = [str(route_name).lower() for route_name in self.song().tracks[i].input_routings]\n try:\n self.log_message(\"input by name\",i,input_id.lower(),lc_routes.index(input_id.lower()),lc_routes)\n self.song().tracks[i].current_input_routing = self.song().tracks[i].input_routings[lc_routes.index(str(input_id).lower())]\n except ValueError:\n self.log_message(\"input by name: name not found\",i,self.song().tracks[i].name,input_id,lc_routes)\n\n\n\n\n def _delete_all_clips(self, msg, src):\n tn = [track.name.lower() for track in self.song().tracks]\n save_names = [save_name.lower() for save_name in msg[3:]]\n self.log_message(\"save names:\",save_names,msg)\n for scene in self.song().scenes:\n if not scene.is_empty:\n for clipslot in scene.clip_slots:\n if clipslot.has_clip and str(clipslot.canonical_parent.name).lower() not in save_names:\n clipslot.delete_clip()\n self.log_message(\"deleting clip\",clipslot.canonical_parent.name,\"#\",scene.name)\n \n def _midi_pgm(self, msg, src):\n midi_pgm = int(msg[2])\n midi_ch = 1\n if len(msg) > 3:\n midi_ch = int(msg[3])\n midi_msg = (191 + midi_ch, midi_pgm)\n self.log_message(\"midi_msg\",midi_msg)\n self.send_midi(midi_msg)\n\n def _midi_cc(self, msg, src):\n midi_cc = int(msg[2])\n cc_val = 127\n try:\n cc_val = int(msg[3])\n except:\n pass\n midi_ch = 1\n if len(msg) > 4:\n midi_ch = int(msg[len(msg) - 1])\n midi_msg = (175 + midi_ch, midi_cc, cc_val)\n self.log_message(\"midi_msg\",midi_msg)\n self.send_midi(midi_msg)\n\n def _midi_note(self, msg, src):\n note_dict = {\n \"c\" : 0,\n \"c#\" : 1,\n \"db\" : 1,\n \"d\" : 2,\n \"d#\" : 3,\n \"eb\" : 3,\n \"e\" : 4,\n \"f\" : 5,\n \"f#\" : 6,\n \"gb\" : 6,\n \"g\" : 7,\n \"g#\" : 8,\n \"ab\" : 8,\n \"a\" : 9,\n \"a#\" : 10,\n \"bb\" : 10,\n \"b\" : 11 \n }\n \n try:\n midi_note = int(msg[2])\n except ValueError:\n try:\n octave = int(msg[2][-1])\n msg[2] = msg[2][0:-1]\n except ValueError:\n octave = 0\n midi_note = note_dict[msg[2].lower()] + octave * 12\n midi_ch = 1\n note_vel = 127\n if len(msg) > 3:\n note_vel = int(msg[3])\n if len(msg) > 4:\n midi_ch = int(msg[4])\n midi_msg = (143 + midi_ch, midi_note, note_vel)\n self.log_message(\"midi_msg\",midi_msg)\n self.send_midi(midi_msg)\n\n def _mid_rec_quant(self, msg, src):\n AVAILABLE_QUANTIZATION = [Live.Song.Quantization.q_no_q,\n Live.Song.Quantization.q_8_bars,\n Live.Song.Quantization.q_4_bars,\n Live.Song.Quantization.q_2_bars,\n Live.Song.Quantization.q_bar,\n Live.Song.Quantization.q_quarter,\n Live.Song.Quantization.q_eight,\n Live.Song.Quantization.q_sixtenth]\n \n self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_eight\n self._update_quantization_state()","sub_path":"LO2SessionComponent.py","file_name":"LO2SessionComponent.py","file_ext":"py","file_size_in_byte":9911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595582326","text":"# coding=utf-8\nimport time\nimport sys\n\nimport re\n\nimport itchat\nfrom itchat.content import *\n\n\nsend_config = {\n 'MUST_KEYWORDS': ['推拿'],\n\n 'SHOULD_KEYWORDS': [],\n\n 'percent': 100,\n\n 'content': [\n {\n 'msg': [\n {\n 'type': PICTURE,\n 'content': 'groupsend/tuina_guanggao_04.png',\n },\n {\n 'type': TEXT,\n 'content': '【今日小儿推拿课程】点击上图免费学习。没有看到图片的家长可以,在微信搜索“天天教你推拿小技巧”关注微信公众号,点击菜单【每日课程】免费获取!'\n },\n ]\n },\n ]\n}\n\nsend_groups = []\nmedias_cache = {}\nmedias_count = {}\nsend_switch = True\nin_sending = False\n\n\ndef show_chatrooms(core):\n print('show all chatrooms')\n chatrooms = core.get_chatrooms()\n if chatrooms is None:\n print('chatrooms is None')\n return\n\n if len(send_groups) > 0:\n return\n\n must_keywords = send_config['MUST_KEYWORDS']\n should_keywords = send_config['SHOULD_KEYWORDS']\n\n for chatroom in chatrooms:\n try:\n nickname = chatroom['NickName']\n username = chatroom['UserName']\n isowner = chatroom['IsOwner']\n except Exception as e:\n print(chatroom)\n print(e)\n continue\n\n need_send = True\n if len(must_keywords) > 0:\n for keywords in must_keywords:\n if keywords not in nickname:\n need_send = False\n break\n if not need_send:\n print('not need send to {0}, {1}'.format(nickname, username))\n continue\n\n # need_send = False\n # for keywords in should_keywords:\n # if nickname in keywords or keywords is '*':\n # need_send = True\n # break\n\n number = get_group_number(nickname)\n\n #500, 1000, 1500, 2000, 2500\n # if not isowner or not need_send or number is None or int(number) > 3000 or int(number) < 1500:\n if not isowner or not need_send:\n # if not need_send:\n print('not need send to {0}, {1}, isOwner: {2}, number: {3}'.format(nickname, username, isowner, number))\n continue\n\n print('---- send msg to group: {0}, {1}, isOwner: {2}, number: {3}'.format(nickname, username, isowner, number))\n send_groups.append(chatroom)\n\n\ndef get_group_number(nickname):\n rs = '\\d+班'\n l = re.search(r'%s' % rs, nickname)\n if l:\n number = l.group()\n l = re.search(r'\\d+', number)\n if l:\n return int(l.group())\n\n\ndef send_msg_to_group(group_info, content_arr):\n user_name = group_info['UserName']\n\n # itchat.set_chatroom_name(user_name, '骗子群')\n\n print('send msg to group: {0}'.format(group_info['NickName']))\n for item in content_arr:\n content_type = item['type']\n content = item['content']\n\n if content_type == TEXT:\n print('send text: {0}'.format(content))\n\n if send_switch:\n itchat.send_msg(content, group_info['UserName'])\n\n elif content_type == PICTURE:\n\n print('send image {0}'.format(content))\n\n mid = medias_cache.get(content, None)\n mid_count = medias_count.get(content, 0)\n if mid_count >= 10:\n medias_count[content] = 0\n mid = None\n elif mid_count == 0:\n medias_count[content] = 0\n\n if mid is None:\n print('upload image {0}'.format(content))\n r = itchat.upload_file(content, isPicture=True)\n if r:\n mid = r['MediaId']\n else:\n print('upload image failed {0}'.format(content))\n continue\n\n medias_cache[content] = mid\n\n medias_count[content] += 1\n\n if send_switch:\n itchat.send_image(content, toUserName=group_info['UserName'], mediaId=mid)\n\n time.sleep(1)\n\n return True\n\n\ndef send_groups_msg():\n\n send_count = len(send_groups)\n if send_count == 0:\n return\n\n print('send groups count: {0}'.format(send_count))\n\n def get_send_msg_arr(n, send_count):\n\n contents = send_config['content']\n n = 0\n percent = send_config['percent']\n for msg_item in contents:\n cnt = int(n * percent / 100.0 * send_count)\n\n if send_count <= cnt:\n # print json.dumps(msg_item)\n return msg_item['msg']\n n += 1\n\n return contents[len(contents) - 1]['msg']\n\n n = 0\n for group_info in send_groups:\n msg_arr = get_send_msg_arr(n, send_count)\n\n for i in range(1):\n send_msg_to_group(group_info, msg_arr)\n\n time.sleep(10)\n\n\ndef schedule(core):\n print('schedule ---- init: {0}'.format(core.wx_init))\n\n if core.wx_init:\n if len(send_groups) > 0:\n return\n\n show_chatrooms(core)\n\n send_groups_msg()\n\n time.sleep(2)\n\n\ndef main(hotStorageDir=None):\n itchat.auto_login(hotReload=True, statusStorageDir=hotStorageDir)\n itchat.run(debug=True, blockThread=True, schedule=schedule)\n\n\nif __name__ == '__main__':\n is_sending = False\n statusStorageDir = None\n if len(sys.argv) > 1:\n statusStorageDir = sys.argv[1]\n print('statusStorageDir {0}'.format(statusStorageDir))\n main(statusStorageDir)\n\n","sub_path":"multi_group_send.py","file_name":"multi_group_send.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90962767","text":"from openpyxl import Workbook\nfrom openpyxl.drawing.image import Image\n\nwb = Workbook()\n\n# default sheet 활성화\nsheet1 = wb.active\n\n# 데이터 추가\n# 제목 행\nsheet1.append(['이름', '생년월일', '이미지'])\n\n# 데이터 행 추가\nrows = [\n ['홍길동', '801020'],\n ['송혜교', '851115'],\n ['김지원', '860912'],\n ['남주혁', '880705'],\n\n]\n\nfor idx, row in enumerate(rows, 2):\n sheet1.append(row)\n img = Image(\"./data/tent1.jpg\")\n img.width = 30\n img.height = 30\n sheet1.add_image(img, 'C'+str(idx)) # C2, C3\n\n\nwb.save(\"./data/test3.xlsx\")","sub_path":"file/xls_write3.py","file_name":"xls_write3.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21999895","text":"import re\nimport os\nimport os.path as op\nimport numpy as np\nimport csv\nimport nltk\nimport spacy\nfrom nltk.corpus import stopwords \nfrom collections import defaultdict\n\nnlp = spacy.load(\"en_core_web_sm\")\n\nmimir_dir = os.environ[\"MIMIR_DIR\"]\n\ndata_dir = op.join(mimir_dir, \"data\")\n\n\nstop_words = set(stopwords.words(\"english\"))\n\ndef remove_newline(text):\n\tnewline_trans = str.maketrans(\"\", \"\", \"\\n\") \n\treturn(text.translate(newline_trans))\n\ndef re_sub_newlines(text):\n\treturn (re.sub(r\"(?<=\\S)\\n(?=\\S)\", lambda x: \" \", text))\t\t\n\ndef newline_to_space(text):\n\tnewline_trans = str.maketrans(\"\\n\", \" \") \n\treturn(text.translate(newline_trans))\n\n#def sentence_tokenize(text): \n#\treturn (re.split(r'(?<=(?(Mr|Mrs|Ms|Dr|Sr|Jr|M|St|Cl|No)|[\\.A-Z]+)\"\n\tsplit_punct = \"[\\.?!]\"\n\tsplit_re = re.compile(r\"\" + abbreviations + \"?\" + \"(?P\" + split_punct + \"[\\s\\n]*)\")\n\tnew_text = re.sub(split_re, handle_abbreviations, new_text)\n\t\n\tnew_line = re.compile(r\"[\\s\\n]*\\n[\\s]*(😠)?\") # if we are at a new line, we will split anyway\n\tnew_text = re.sub(new_line, handle_newline, new_text)\n\tnew_text = new_text.translate(revert_punct)\t\n\n\t#print(new_text)\n\n\treturn(new_text.split(\"😠\"))\n\n\t\ndef csv_to_list(csv_file_path):\n\n\tline_list = []\n\t\n\twith open(csv_file_path, newline=\"\") as csvfile:\n\t\tcsvreader = csv.reader(csvfile)\n\t\tfor row in csvreader:\n\t\t\tline_list.append(row)\n\n\treturn(line_list)\n\ndef list_to_csv(csv_file_path, line_list):\n\t\n\twith open(csv_file_path, \"w+\") as csvfile:\n\t\tcsvwriter = csv.writer(csvfile)\n\t\tfor row in line_list:\n\t\t\tcsvwriter.writerow(row)\n\ndef load_or_create_object(numpy_filename: str, obj: object):\n\tif not numpy_filename.endswith(\".npy\"):\n\t\tnumpy_filename += \".npy\"\n\tif not op.exists(numpy_filename):\n\t\treturn obj\n\telse:\n\t\treturn np.load(numpy_filename, allow_pickle=True)[0]\t\n\n\ndef load_or_create(numpy_filename: str, create_function, save=False):\n\t\"\"\"Loads a numpy object if the path exists, or, if it doesn't, creates\n\t\tit with create_function. If save == True, also saves the object\"\"\"\n\n\tif not numpy_filename.endswith(\".npy\"):\n\t\tnumpy_filename += \".npy\"\n\t\n\tif not op.exists(numpy_filename):\n\t\toutput = create_function()\n\telse:\n\t\treturn np.load(numpy_filename, allow_pickle=True)[0]\t\n\t\n\tif save == True:\n\t\tif isinstance(output, np.ndarray):\n\t\t\tnp.save(numpy_filename, output)\n\t\telif isinstance(output, list):\n\t\t\tnp.save(numpy_filename, np.array(output))\n\t\telse:\n\t\t\tnp.save(numpy_filename, np.array([output]))\n\t\n\treturn(output)\n\ndef extract_text_from_gutenberg(text):\n\ttext = re.split(r\"\\*\\*\\*.*?START.*?PROJECT GUTENBERG EBOOK.*?\\*\\*\\*\", text)[1]\n\ttext = re.split(r\"\\*\\*\\*.*?END.*?PROJECT GUTENBERG EBOOK.*?\\*\\*\\*\", text)[0]\n\treturn(text)\n\ndef make_id_name_dict():\n\tid_name_dict = {}\n\tdocs_csv = op.join(mimir_dir, \"data\", \"documents.csv\")\n\tdocs_list = csv_to_list(docs_csv)\n\tfor line in docs_list[1:]:\n\t\ttry:\n\t\t\tif line[2] == \"gutenberg\":\n\t\t\t\tid_name_dict[line[0]] = line[6]\n\t\texcept:\n\t\t\tpass\n\treturn(id_name_dict)\n\ndef make_name_url_dict():\n\tid_name_dict = make_id_name_dict()\n\tname_url_dict = {}\n\tid_url_csv = op.join(mimir_dir, \"data\", \"id_url.csv\")\n\tid_url_list = csv_to_list(id_url_csv)\n\tfor line in id_url_list[1:]:\n\t\ttry:\n\t\t\tif line[1] == \"gutenberg\":\n\t\t\t\tname_url_dict[id_name_dict[line[0]]] = line[2]\n\t\texcept:\n\t\t\tpass\n\treturn name_url_dict\n\ndef levenshtein(input_sentence, target_sentence):\n\t#Calculates the minimum edit distance (Levenshtein distance) between two strings (or lists)\n\n\ttrellis = np.zeros((len(input_sentence) + 1, len(target_sentence) + 1))\n\ttrellis[:,0] = np.arange(len(input_sentence)+1)\n\ttrellis[0,:] = np.arange(len(target_sentence)+1)\n\n\tfor i in range(1, len(input_sentence) + 1):\n\t\tw_in = input_sentence[i-1]\n\t\tfor j in range(1, len(target_sentence) +1):\n\t\t\tw_t = target_sentence[j-1]\n\t\t\tif w_in == w_t:\n\t\t\t\tsquare_score = 0\n\t\t\telse:\n\t\t\t\tsquare_score = 1\n\t\t\ttrellis[i,j] = min(trellis[i-1,j], trellis[i-1,j-1], trellis[i,j-1]) +square_score\n\n\treturn(trellis[-1,-1])\n\n\ndef download_models():\n\tnltk.download('punkt')\n\tnltk.download('averaged_perceptron_tagger')\n\tnltk.download('maxent_ne_chunker')\n\tnltk.download('words')\n\ndef get_line_list_from_file(file):\n\twith open(file) as f:\n\t\tline_list = f.readlines()\n\t\n\treturn line_list\n\n\ndef remove_stopwords(sent:list):\n\tstop_words = set(stopwords.words(\"english\"))\n\treturn ([word for word in sent if word.lower() not in stop_words])\n\n\ndef tokenize(line):\n\ttokens = nltk.word_tokenize(line)\t \n\treturn tokens\n\n\ndef spacy_single_line(line, return_indices = False):\n\tdoc = nlp.make_doc(line)\n\tbeams = nlp.entity.beam_parse([doc], beam_width=16, beam_density=0.0001)\n\tentity_scores = defaultdict(float)\n\tparses = nlp.entity.moves.get_beam_parses(beams[0])\n\tfor score, ents in parses:\n\t#\tprint (score, ents)\n\t\tfor start, end, label in ents:\n\t\t\t# print (\"here\")\n\t\t\tentity_scores[(start, end, label)] += score\n\t#print ('entity_scores', entity_scores)\n\tbest_parse = parses[0][1]\n\tents = nlp(line).ents\n\ttokens = []\n\tfor ent in ents:\n\t\tscore = entity_scores[(ent.start, ent.end, ent.label_)]\n\t\tif return_indices == False:\n\t\t\ttokens.append(((ent.text, ent.label_),score))\n\t\telse:\n\t\t\ttokens.append(((ent.start, ent.end),(ent.text, ent.label_),score))\n\treturn(tokens)\n\t\ndef ntokens_spacy(line):\n\tdoc = nlp(line)\n\treturn(len(doc))\n\ndef spacy_get_entity_types(line_list):\n\tall_ents = []\n\tfor line in line_list:\n\t\tprocessed = nlp(line)\n\t\tfor ent in processed.ents:\n\t\t\tnew_entity = [ent.text,ent.label_]\n\t\t\tif new_entity not in all_ents:\n\t\t\t\tall_ents.append(new_entity)\n\treturn(all_ents)\n\n\ndef spacy_get_entity_tokens(line_list):\n\tall_ents = []\n\tfor line in line_list:\n\t\tall_ents += spacy_single_line(line)\n\treturn(all_ents)\n\n\ndef get_named_entities(tokens):\n\tentities = nltk.chunk.ne_chunk(nltk.pos_tag(tokens))\n\treturn entities\n\ndef ne_list_from_file(file_path):\n\tline_list = get_line_list_from_file(file_path)\n\tentity_list = list()\n\tfor line in line_list: #for each sentence in the input file \n\t\tline.strip(\"\\n\")\n\t\tt = tokenize(line) #get tokens \n\t\tentities = get_named_entities(t) # get entities tree\n\n\t\tfor e in entities:\n\t\t\tif isinstance(e, nltk.tree.Tree): #it's an entity\n\t\t\t\tent_string = \" \".join([leaf[0] for leaf in e.leaves()])\n\t\t\t\tent_tuple = (ent_string, e.label())\n\t\t\t\tif ent_tuple not in entity_list:\n\t\t\t\t\tentity_list.append(ent_tuple)\n\treturn(entity_list)\n\n\nif __name__ == \"__main__\":\n\tsents = \"\\n\".join(get_line_list_from_file(op.join(mimir_dir,\"preprocessed_data\",\"sentence_tokenized\",\"summaries\",\"train\", \"Anna Karenina.sents\")))\n\n\tspacy_single_line(sents)\n#\tspacy_single_line(\"This is a line with the name Harry in it. He went to Anatole France.\")\n\t#print(ne_list_from_file(op.join(mimir_dir,\"data\",\"nqa_summary_text_files\",\"train\", \"Anna Karenina\")))\n","sub_path":"qa/corpus_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624132147","text":"import struct\nimport threading\nimport os\nfrom socket import *\n\nclass MyClient:\n def __init__(self, serverIp, serverPort):\n ADDR = (serverIp, serverPort)\n self.tcpCliSock = socket(AF_INET, SOCK_STREAM)\n try:\n self.tcpCliSock.connect(ADDR)\n except:\n print (\"The gate to the tiny world of warcraft is not ready.\")\n sys.stdout.flush()\n os._exit(2)\n self.sendSwitch = {\"login\":self.login,\n \"move\":self.move,\n \"attack\":self.attack,\n \"speak\":self.speak,\n \"logout\":self.logout,\n }\n self.recvSwitch = {2:self.login_reply,\n 4:self.move_notify,\n 6:self.attack_notify,\n 8:self.speak_notify,\n 10:self.logout_notify,\n 11:self.invalid_state,\n }\n\n self.version = 4\n self.isLogin = False\n self.isLogout = False\n self.name = \"\"\n self.HP = 0\n self.EXP = 0\n self.x = 0\n self.y = 0\n self.nearbyPlayer = []\n self.direction = {\"north\" : 0, \"south\" : 1, \"east\" : 2, \"west\" : 3}\n\n #while True:\n\t #self.sendMsg()\n\t #self.recvMsg()\n\t #if self.isLogout:\n\t\t#exit(0)\n\n sendMessageThread = threading.Thread(target=self.sendMsg)\n sendMessageThread.start()\n\n receiveMessageThread = threading.Thread(target=self.recvMsg)\n receiveMessageThread.start()\n\n\n def sendMsg(self):\n while True:\n #print \"command>\",\n line = raw_input()\n arr = line.split(\" \",1)\n try:\n self.sendSwitch.get(arr[0],self.defaultSend)(arr)\n except:\n os._exit(2)\n #sys.stdout.flush()\n\n def login(self,arr):\n content = arr[1]\n if len(content) > 9:\n print (\"!!The length of name is over 9.\")\n return\n elif not content.isalnum():\n print (\"!!The name contains other special characters.\")\n return\n self.name = content\n msg = struct.pack(\">BHB10sH\", self.version, 16, 1, content, 0)\n self.tcpCliSock.sendall(msg)\n\n def move(self,arr):\n content = arr[1]\n if not content in self.direction:\n print (\"! Invalid direction:\",arr[1])\n return\n msg = struct.pack(\">BHBBBH\", self.version, 8, 3, self.direction[content], 0, 0)\n self.tcpCliSock.sendall(msg)\n\n def attack(self,arr):\n if arr[1] in self.nearbyPlayer:\n msg = struct.pack(\">BHB10sH\", self.version, 16, 5, arr[1], 0)\n self.tcpCliSock.sendall(msg)\n else:\n print (\"The target is not visible\")\n\n def speak(self,arr):\n length = len(arr[1])\n if length > 255:\n print (\"! Invalid text message.\")\n return\n #if self.contain_unvisable_words(arr[1]):\n #print (\"Meteor is striking the world.\")\n #sys.stdout.flush()\n #os._exit(2)\n length = (length/4 + 1)*4\n msg = struct.pack(\">BHB\"+str(length)+\"s\",self.version,length+4,7,arr[1])\n self.tcpCliSock.sendall(msg)\n\n def logout(self,arr):\n msg = struct.pack(\">BHB\", self.version, 4, 9)\n self.tcpCliSock.sendall(msg)\n print (\"The gate to the tiny world of warcraft has disappeared.\")\n sys.stdout.flush()\n self.isLogout = True\n os._exit(1)\n\n def defaultSend(self,arr):\n print (\"!!Commands are login, move, attack, speak, logout.\")\n\n\n def recvMsg(self):\n while True:\n #if self.isLogout:\n #os._exit(1)\n data = self.tcpCliSock.recv(1024)\n if len(data) == 0:\n print (\"The gate to the tiny world of warcraft has disappeared.\")\n sys.stdout.flush()\n os._exit(2)\n while len(data)>0:\n #self.str_to_hex(data)\n version,length,kind,info = struct.unpack(\">BHB\"+str(len(data)-4)+\"s\",data)\n if version != self.version:\n print (\"Meteor is striking the world.\")\n sys.stdout.flush()\n os._exit(2)\n if length - 4 < len(info):\n data = info[length-4:len(info)]\n else:\n data = \"\"\n info = info[0:length-4]\n if len(info)%4 != 0:\n print (\"Meteor is striking the world.\")\n sys.stdout.flush()\n os._exit(2)\n self.recvSwitch.get(kind)(info)\n sys.stdout.flush()\n \n\n def login_reply(self,info): #2\n error_code,HP,EXP,X,Y,P = struct.unpack(\">BiiBBB\",info)\n if not self.isLogin:\n if error_code == 0x00:\n print (\"Welcome to the tiny world of warcraft.\")\n self.isLogin = True\n self.HP = HP\n self.EXP = EXP\n self.X = X\n self.Y = Y\n elif error_code == 0x01:\n print (\"A player with the same name is already in the game.\")\n #print \"command>login \",\n\n def move_notify(self,info): #4\n name,X,Y,HP,EXP = struct.unpack(\">10sBBii\",info)\n name = self.get_name(name)\n if not (0<=X<=100 and 0<=Y<=100):\n print (\"Meteor is striking the world.\")\n sys.stdout.flush()\n os._exit(2)\n if self.isLogin:\n if name == self.name:\n self.HP = HP\n self.EXP = EXP\n self.X = X\n self.Y = Y\n if self.X-5 < X < self.X+5 and self.Y-5 < Y < self.Y+5:\n if name != self.name and name not in self.nearbyPlayer:\n self.nearbyPlayer.append(name)\n print (name+\": location=(\"+str(X)+\",\"+str(Y)+\"), HP=\"+str(HP)+\", EXP=\"+str(EXP))\n #print \"command>move \",\n elif name in self.nearbyPlayer:\n self.nearbyPlayer.remove(name)\n\n def attack_notify(self,info): #6\n attacker,victim,damage,HP,p = struct.unpack(\">10s10sBi3s\",info)\n attacker = self.get_name(attacker)\n victim = self.get_name(victim)\n if attacker == self.name or victim == self.name or (attacker in self.nearbyPlayer and victim in self.nearbyPlayer):\n if victim == self.name:\n self.HP = HP\n if HP <= 0:\n print (attacker+\" killed \"+victim)\n if victim in self.nearbyPlayer:\n self.nearbyPlayer.remove(victim)\n if victim == self.name:\n self.nearbyPlayer = []\n else:\n print (attacker+\" damaged \"+victim+\" by \"+str(damage)+\". \"+victim+\"'s HP is now \"+str(HP))\n #print \"command>attack \",\n\n def speak_notify(self,info): #8\n length = len(info)-10\n name,words = struct.unpack(\">10s\"+str(length)+\"s\",info)\n name = self.get_name(name)\n if ord(words[len(words)-1]) == 0:\n words = words.strip(words[len(words)-1])\n if len(words)>255 or self.contain_unvisable_words(words):\n print (\"Meteor is striking the world.\")\n sys.stdout.flush()\n os._exit(2)\n print (name+\": \"+words)\n #print \"command>speak \",\n\n def logout_notify(self,info): #a\n length = len(info) - 10\n name,p = struct.unpack(\">10s\"+str(length)+\"s\",info)\n name = self.get_name(name)\n print (\"Player \"+name+\" has left the tiny world of warcraft\")\n #print \"command>logout \",\n\n def invalid_state(self,info):\n error_code,p = struct.unpack(\">B3s\",info)\n if error_code == 0x00:\n print (\"You must log in first.\")\n elif error_code == 0x01:\n print (\"You already logged in.\")\n\n\n def str_to_hex(self,s):\n for c in s:\n num = hex(ord(c)).replace('0x', '')\n if len(num) < 2:\n num = \"0\"+num\n print (num,)\n print (\"\")\n\n def get_name(self,name):\n name = name[0:name.index('\\0')]\n if len(name) > 9 or not name.isalnum():\n print (\"Meteor is striking the world.\")\n sys.stdout.flush()\n os._exit(2)\n return name\n \n def contain_unvisable_words(self,info):\n for c in info:\n if ord(c) < 32 or ord(c) > 126:\n return True\n return False\n\nif __name__ == \"__main__\":\n import getopt\n import sys\n\n server = \"127.0.0.1\"\n port = 12345\n\n try:\n opts, args = getopt.getopt(sys.argv[1:],\"s:p:\")\n except:\n pass\n\n for o,a in opts:\n if o == \"-s\":\n server = a\n elif o == \"-p\":\n port = int(a)\n\n client = MyClient(server,port)\n","sub_path":"我的代码/project1/project1代码与二进制文件/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30586583","text":"#! /Library/Frameworks/Python.framework/Versions/2.7/bin/python\n\nclass DiscountCalculator(object):\n\n def calculate(self, total, discount_amount, discount_type):\n if discount_type == 'percent':\n if discount_amount > 100:\n raise ValueError(\"Percentage discount cannot exceed 100%\")\n percentage_discount = float(discount_amount) / 100\n discount = float(total) * percentage_discount\n elif discount_type == 'absolute':\n if discount_amount > total:\n raise ValueError(\"Absolute discount cannot exceed order total\")\n discount = discount_amount\n else:\n raise ValueError(\"Invalid discount type\")\n return discount\n","sub_path":"discount_calculator.py","file_name":"discount_calculator.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535070360","text":"from __future__ import print_function\n\nimport sys\nimport signal\nimport warnings\nfrom multiprocessing import Pool\n\nclass MyPool:\n def __init__(self, processes = 1, warningFilter = \"default\"):\n self.warningFilter = warningFilter\n self.pool = Pool(processes, self.initWorker)\n self.results = []\n \n def applyAsync(self, f, args):\n r = self.pool.apply_async(f, args)\n self.results.append(r)\n \n def initWorker(self):\n return init_worker(self.warningFilter)\n \n def checkPool(self, printProgressEvery = -1):\n try:\n outputs = list()\n for res in self.results:\n outputs.append(res.get(timeout = 1000))\n if printProgressEvery > 0 and len(outputs) % printProgressEvery == 0:\n print(\" \", len(outputs),\"/\", len(self.results), \"%.2f\" % (float(len(outputs)) / len(self.results) * 100) + \"%\")\n self.pool.close()\n self.pool.join()\n return outputs\n except (KeyboardInterrupt, SystemExit):\n print(\"Caught KeyboardInterrupt, terminating workers\")\n self.pool.terminate()\n self.pool.join()\n sys.exit()\n except Exception:\n print(\"Caught Unknown exception, terminating workers\")\n self.pool.terminate()\n self.pool.join()\n sys.exit()\n\n\ndef init_worker(warningFilter):\n # set warningFilter for the child processes\n warnings.simplefilter(warningFilter)\n \n # causes child processes to ignore SIGINT signal and lets main process handle\n # interrupts instead (https://noswap.com/blog/python-multiprocessing-keyboardinterrupt)\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\ndef addOne(i):\n return i+1\n\ndef unitTest():\n pool = MyPool(4)\n for i in range(20):\n pool.applyAsync(addOne, [i])\n results = pool.checkPool()\n print(results)\n","sub_path":"triqler/multiprocessing_pool.py","file_name":"multiprocessing_pool.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33438655","text":"import matplotlib.pyplot as plt\r\nimport sys\r\nimport datetime\r\nimport matplotlib.dates as mdates\r\nimport matplotlib\r\nimport gc\r\n\r\nargs = sys.argv[1:]\r\nif (len(args) <= 0):\r\n sys.exit()\r\nnumber_of_dates = int(args[0])\r\ndates = args[1 : number_of_dates + 1]\r\nvalues_index = number_of_dates + 1\r\nnumber_of_values = int(args[values_index])\r\nvalues = args[values_index + 1: values_index + number_of_values + 1]\r\nvalues = [float(x.replace(',', '.')) for x in values]\r\n\r\ndates = [datetime.datetime.strptime(d, \"%d/%m/%Y\").date() for d in dates]\r\n\r\nx, y = dates,values\r\n\r\nmatplotlib.rcParams['toolbar'] = 'None'\r\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\r\nplt.gca().xaxis.set_major_locator(mdates.DayLocator())\r\nplt.gcf().set_size_inches(8,6)\r\nplt.plot(x, y, lw = 2, color = '#ffd700')\r\nplt.title('Daily Earnings')\r\nplt.xlabel(\"Date\")\r\nplt.ylabel(\"Earnings\")\r\nplt.gcf().autofmt_xdate()\r\n\r\ngc.collect()\r\n\r\nplt.show()\r\n","sub_path":"App/PythonScripts/EarningsPlot(deprecated, do not use).py","file_name":"EarningsPlot(deprecated, do not use).py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2317213","text":"\"\"\"\nCONFIG = defaultdict()\n\nCONFIG['DATASET_PATH'] = './data/'\nCONFIG['TEST_SIZE'] = 0.2\nCONFIG['RANDOM_STATE'] = 25\nCONFIG['STRAT_TYPE'] = 'TARGET'\n\"\"\"\n\nfrom easydict import EasyDict as edict\n\n\n__C = edict()\ncfg = __C\n\n#\n# Dataset Config\n#\n__C.DATASETS = edict()\n__C.DATASETS.PATH = './data/'\n__C.DATASETS.FILENAME = 'application_train.csv'\n__C.DATASETS.IMPUTED_FILENAME = 'imputed_dataframe.csv'\n\n__C.CONST = edict()\n__C.CONST.IMPUTATION_ITERS = 50\n__C.CONST.TARGET_NAME = 'TARGET'\n__C.CONST.STRAT_TYPE = 'TARGET'\n__C.CONST.TEST_RATIO = 0.2\n__C.CONST.RANDOM_SEED = 25\n\n__C.ARGS = edict()\n__C.ARGS.EPOCHS = 20\n__C.ARGS.LR = 0.005\n__C.ARGS.TEST_BATCH_SIZE = 10\n__C.ARGS.BATCH_SIZE = 10\n__C.ARGS.LOG_INTERVAL = 1000\n__C.ARGS.SEED = 1","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602149210","text":"from pathlib import Path\n\nfrom mmcv.runner.dist_utils import master_only\nfrom mmcv.runner.hooks import HOOKS\nfrom mmcv.runner.hooks.logger.base import LoggerHook\n\nimport dvclive\n\n\n@HOOKS.register_module()\nclass DVCLiveLoggerHook(LoggerHook):\n \"\"\"Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str):\n Default None.\n If not None, after each epoch the model will\n be saved to {model_file}.\n interval (int): Logging interval (every k iterations).\n Default 10.\n If `by_epoch` is True, the value will be set to 0 in\n order to properly work with `dvclive`_.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`.\n Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used.\n Default: True.\n\n .. _dvclive:\n https://dvc.org/doc/dvclive\n \"\"\"\n\n def __init__(\n self,\n model_file=None,\n interval=1,\n ignore_last=True,\n reset_flag=False,\n by_epoch=True,\n ):\n self.model_file = model_file\n super().__init__(interval, ignore_last, reset_flag, by_epoch)\n\n @master_only\n def log(self, runner):\n tags = self.get_loggable_tags(runner)\n if tags:\n dvclive.set_step(self.get_iter(runner))\n for k, v in tags.items():\n dvclive.log(k, v)\n\n @master_only\n def after_train_epoch(self, runner):\n super().after_train_epoch(runner)\n if self.model_file is not None:\n runner.save_checkpoint(\n Path(self.model_file).parent,\n filename_tmpl=Path(self.model_file).name,\n create_symlink=False,\n )\n","sub_path":"dvclive/mmcv.py","file_name":"mmcv.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500704681","text":"# -*- coding: utf-8 -*-\n\nimport tkinter\nimport os\nfrom .client_window import ClientMainWindow\nfrom .. import PongLobbyServer\n\n\nclass entryWindow(tkinter.Tk):\n \n def __init__(self):\n super().__init__()\n\n self.server_thread = None\n self.client = None\n\n self.protocol('WM_DELETE_WINDOW', self.close_window)\n\n #sets window title\n self.title('Pong')\n #sets background color\n self.configure(background = 'black')\n #sets little windowicon\n self.call('wm', 'iconphoto', self._w, tkinter.PhotoImage(file=os.sep.join([os.path.dirname(__file__), 'pong_icon.png']), master=self))\n #cant resize the window\n self.resizable(width = False, height = False)\n \n #Title Label\n self.headerLabel = tkinter.Label(self, \n bg = 'black', \n fg = 'white', \n font = ('Comic Sans MS', 15), \n text = \"Menu\")\n self.headerLabel.grid(row = 1, column = 1)\n \n #just a placeholder\n self.placeholder1 = tkinter.Frame(self, bg = 'black', height = 30, width = 300)\n self.placeholder1.grid(row = 2, column = 1)\n \n \n #Button to start the server\n self.startServerBtn = tkinter.Button(self,\n bg = 'black',\n fg = 'red',\n bd = 2,\n font = ('Comic Sans MS', 15),\n text = 'Server: off',\n activeforeground = 'thistle3',\n activebackground = 'black',\n command = self.toggle_server)\n self.startServerBtn.grid(row = 3, column = 1)\n \n #just a placeholder\n self.placeholder2 = tkinter.Frame(self, bg = 'black', height = 30, width = 300)\n self.placeholder2.grid(row = 4, column = 1)\n \n #Button to start the client\n self.startClientBtn = tkinter.Button(self,\n bg = 'black',\n fg = 'red',\n bd = 2,\n font = ('Comic Sans MS', 15),\n text = 'Client: off',\n activeforeground = 'thistle3',\n activebackground = 'black',\n command = self.toggle_client)\n self.startClientBtn.grid(row = 5, column = 1)\n \n #just a placeholder\n self.placeholder2 = tkinter.Frame(self, bg = 'black', height = 30, width = 300)\n self.placeholder2.grid(row = 6, column = 1)\n\n def close_window(self):\n \"\"\"\n Closes the current window and all other Tkinter windows\n :return: None\n \"\"\"\n if self.client:\n self.toggle_client()\n\n if self.server_thread:\n self.toggle_server()\n\n self.destroy()\n\n def toggle_server(self):\n \"\"\"\n Updates the server button and starts or stops the current server\n :return: None\n \"\"\"\n if not self.server_thread:\n self.server_thread = PongLobbyServer.ServerThread()\n self.server_thread.start()\n self.startServerBtn.config(text='Server: on', fg='green')\n else:\n self.server_thread.stop()\n self.server_thread = None\n self.startServerBtn.config(text='Server: off', fg='red')\n\n def toggle_client(self):\n \"\"\"\n Updates the client button and starts or stops the current client\n :return: None\n \"\"\"\n if not self.client:\n self.startClientBtn.config(text='Client: on', fg='green')\n self.client = ClientMainWindow(self)\n self.client.mainloop()\n else:\n self.startClientBtn.config(text='Client: off', fg='red')\n self.client.close_window()\n self.client = None\n\n\ndef main():\n window = entryWindow()\n window.mainloop()\n\n","sub_path":"Lobby/Gui/entry_window.py","file_name":"entry_window.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"217190754","text":"import scrapy\n\nclass CraigsListSpider(scrapy.Spider):\n name = 'contactinfo'\n\n start_urls = ['http://phoenix.craigslist.org/reply/phx/sys/5932228379']\n\n def parse(self, response):\n yield {\n 'name': response.css('aside.reply-flap ul li p::text').extract_first().strip(),\n 'phone_number': response.css('.reply-tel-number::text').extract_first().strip()[2:],\n 'email': response.css('.reply-email-address a::attr(href)').extract_first()\n }\n","sub_path":"craigslistService/craigslistScrapy/spiders/getContactsCraigslistTest_spider.py","file_name":"getContactsCraigslistTest_spider.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524093374","text":"from pyspark.mllib.feature import HashingTF, IDF\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib.classification import NaiveBayes\nfrom pyspark.mllib.evaluation import MulticlassMetrics\nimport json\nimport nltk\nfrom pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName(\"Bayes TFIDF\")\nsc = SparkContext(conf=conf)\n\n\ndef get_labeled_review(x):\n return x.get('stars'), x.get('text')\n\n\ndef format_prediction(x):\n return \"actual: {0}, predicted: {1}\".format(x[0], float(x[1]))\n\n\ndef produce_tfidf(x):\n tf = HashingTF().transform(x)\n idf = IDF(minDocFreq=5).fit(tf)\n tfidf = idf.transform(tf)\n return tfidf\n\n# Load in reviews\nreviews = sc.textFile(\"gs://dataproc-3ba9e17b-802e-4fec-8f2d-4e0d4167cadb-us-central1/Datasets/yelp/review.json\")\n# Parse to json\njson_payloads = reviews.map(json.loads)\n# Tokenize and weed out bad data\nlabeled_data = (json_payloads.map(get_labeled_review)\n .filter(lambda x: x[0] and x[1])\n .map(lambda x: (float(x[0]), x[1]))\n .mapValues(nltk.word_tokenize))\nlabels = labeled_data.map(lambda x: x[0])\n\ntfidf = produce_tfidf(labeled_data.map(lambda x: x[1]))\nzipped_data = (labels.zip(tfidf)\n .map(lambda x: LabeledPoint(x[0], x[1]))\n .cache())\n\n# Do a random split so we can test our model on non-trained data\ntraining, test = zipped_data.randomSplit([0.7, 0.3])\n\n# Train our model\nmodel = NaiveBayes.train(training)\n\n# Use our model to predict\ntrain_preds = (training.map(lambda x: x.label)\n .zip(model.predict(training.map(lambda x: x.features))))\ntest_preds = (test.map(lambda x: x.label)\n .zip(model.predict(test.map(lambda x: x.features))))\n\n# Ask PySpark for some metrics on how our model predictions performed\ntrained_metrics = MulticlassMetrics(train_preds.map(lambda x: (x[0], float(x[1]))))\ntest_metrics = MulticlassMetrics(test_preds.map(lambda x: (x[0], float(x[1]))))\n\nwith open('output_discrete.txt', 'w+') as f:\n f.write(str(trained_metrics.confusionMatrix().toArray()) + '\\n')\n f.write(str(trained_metrics.precision()) + '\\n')\n f.write(str(test_metrics.confusionMatrix().toArray()) + '\\n')\n f.write(str(test_metrics.precision()) + '\\n')\n","sub_path":"MPs/MP5/bayes_tfidf.py","file_name":"bayes_tfidf.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315768026","text":"#!/usr/bin/env python3\n#_*_ coding : utf-8 _*_\n# Author : Pagliacii\n# Date : 2016.08.31\n# Descriptions : show-me-the-code 0008\n\nimport re\nimport urllib.request\n\ndef getcontent(url, replace=[\"\"]):\n response = urllib.request.urlopen(url)\n html = response.read().decode(\"utf-8\", errors=\"replace\")\n pattern = \"

(.*?)

\"\n page = re.findall(pattern, html, re.S)\n for i in range(len(page)):\n for j in range(len(replace)):\n page[i] = re.sub(replace[j], \"\", page[i])\n\n return page\n\nif __name__ == \"__main__\":\n url = \"http://www.quanxue.cn/CT_DaoJia/LaoZi/LaoZi83.html\"\n replace = [\"\", \"\", \"\\r\\n\"]\n print(getcontent(url, replace=replace))","sub_path":"2016/08/31/spiders.py","file_name":"spiders.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"170033581","text":"from xgboost import XGBRegressor\nfrom sklearn import svm\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neighbors import KNeighborsRegressor \nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor\nfrom sklearn.model_selection import RandomizedSearchCV, cross_val_score\nimport matplotlib.pyplot as plt\nfrom housing_preprocessing_raw import X_train, y_train, X_test, y_test, lasso_index_names\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport random\n\nrandom.seed(42)\n# Standardize input and outputs for certain models\nbest=X_train[lasso_index_names]\nscaled_x_fit=StandardScaler().fit(best)\nscaled_x=scaled_x_fit.transform(best)\n\n# Check performance of the Random Forest model with scaled x and log transformed y\nrf_model=RandomForestRegressor()\ncv_rf=cross_val_score(rf_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , cv_rf)\nprint(\"The average error is\", cv_rf.mean())\n\n# Hyperparameter tuning Random Forest\nrf_trees=[i*10 for i in range(1,21)]\nmin_leaf_samples=[i*2 for i in range(1,10)]\nmax_depth=[i*2 for i in range(1,25)]\nrf_params={'n_estimators':rf_trees,\n 'min_samples_leaf':min_leaf_samples,\n 'max_depth':max_depth}\n\nbest_rf=RandomizedSearchCV(rf_model, rf_params, n_iter=25, n_jobs=-1, cv=10, scoring='neg_mean_squared_error').fit(scaled_x, y_train)\n# Best Model according to Random Search\nbest_rf_model=RandomForestRegressor(**best_rf.best_params_)\n\n# Check performance of Random Search model with 10-fold CV\nbest_cv_rf=cross_val_score(best_rf_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\", best_cv_rf)\nprint(\"The average error is\", best_cv_rf.mean())\n\n# Check performance of the Support Vector model with scaled x and log transformed y\nsvr_model=svm.SVR()\ncv_svr=cross_val_score(svr_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , cv_svr)\nprint(\"The average error is\", cv_svr.mean())\n\n# Hyperparameter tuning Support Vector\nkern=['linear', 'poly', 'rbf', 'sigmoid']\nC=[0.01, 0.05, 0.1, 0.5, 1, 2, 5, 10]\nepsilon=[0.01, 0.1, 0.25, 0.5]\nsvr_params={'kernel':kern, 'C':C, 'epsilon': epsilon}\n\n# Best Model According to Random Search\nbest_svr=RandomizedSearchCV(svr_model, svr_params, n_iter=25, n_jobs=-1, cv=10, scoring='neg_mean_squared_error').fit(scaled_x, y_train)\nbest_svr_model=svm.SVR(**best_svr.best_params_)\n\n# Check performance of Random Search model with 10-fold CV\nbest_cv_svr=cross_val_score(best_svr_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\", best_cv_svr)\nprint(\"The average error is\", best_cv_svr.mean())\n\n# KNearestNeighbors Regressor\nknn_model=KNeighborsRegressor()\ncv_knn=cross_val_score(knn_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , cv_knn)\nprint(\"The average error is\", cv_knn.mean())\n\n# Hyperparameter tuning KNN\nneighbors=[3,5,10,20]\nweights=['uniform', 'distance']\np=[1,2]\nknn_params={'n_neighbors':neighbors, 'weights':weights, 'p':p}\n\nbest_knn=RandomizedSearchCV(knn_model, knn_params, n_iter=25, n_jobs=-1, cv=10, scoring='neg_mean_squared_error').fit(scaled_x, y_train)\nbest_knn_model=KNeighborsRegressor(**best_knn.best_params_)\n\n# Validate performance of Random Search Model with 10-fold CV\nbest_cv_knn=cross_val_score(best_knn_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\", best_cv_knn)\nprint(\"The average error is\", best_cv_knn.mean())\n\n# Original XGB model with 10-fold CV\nxgb_regressor=XGBRegressor()\ncv_xgb_df=cross_val_score(xgb_regressor, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , cv_xgb_df)\nprint(\"The average error is\", cv_xgb_df.mean())\n\n# Hyperparameter tuning with random search\nbooster=[\"gbtree\", \"gblinear\", \"dart\"]\ngamma=[0,0.01,0.1,1]\nmax_delta_step=[0, 0.01, 0.1, 0.5, 1]\nreg_lambda=[0, 0.1, 0.5, 1]\nalpha=[0, 0.1, 0.5, 1]\nn_estimators=[10,50,100,200,500]\n\nrandom_grid={'booster': booster, 'gamma':gamma,\n 'max_delta_step':max_delta_step,\n 'reg_lambda':reg_lambda,\n 'alpha':alpha, 'n_estimators':n_estimators}\n\nbest_xgb=RandomizedSearchCV(xgb_regressor, random_grid, n_iter=25, n_jobs=-1, cv=10).fit(scaled_x, y_train)\nbest_xgb_model=XGBRegressor(**best_xgb.best_params_)\n\n# 10-fold cross validation for the new XGB model\nbest_xgb=cross_val_score(best_xgb_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , best_xgb)\nprint(\"The average error is\", best_xgb.mean())\n\n# 10-fold cross validation for the AdaBoost model\nada_model=AdaBoostRegressor()\ncv_ada=cross_val_score(ada_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , cv_ada)\nprint(\"The average error is\", cv_ada.mean())\n\n# Random Search for best parameters\nn_estimators=[10,50,100,200]\nlearning_rate=[0.01, 0.1, 0.5, 1, 2]\nloss=[\"linear\", \"square\", \"exponential\"]\nbase_estimator=[DecisionTreeRegressor(max_depth=3), DecisionTreeRegressor(max_depth=5), DecisionTreeRegressor(max_depth=7), DecisionTreeRegressor(max_depth=10)]\n\nrandom_grid={'n_estimators':n_estimators,\n 'learning_rate':learning_rate,\n 'loss':loss, 'base_estimator':base_estimator}\n\nbest_ada=RandomizedSearchCV(ada_model, random_grid, n_iter=25, n_jobs=-1, cv=5).fit(scaled_x, y_train)\nbest_ada_model=AdaBoostRegressor(**best_ada.best_params_)\n\n# 10-fold cross validation for the new AdalBoost model\nbest_ada_cv=cross_val_score(best_ada_model, scaled_x, y_train, cv=10, scoring='neg_mean_absolute_error')\nprint(\"The errors for each fold are\" , best_ada_cv)\nprint(\"The average error is\", best_ada_cv.mean())\n\n\n# Fitting the entire dataset\nfull_x_train=StandardScaler().fit(X_train)\nstandardized_full_train=full_x_train.transform(X_train)\nstandardized_full_test=full_x_train.transform(X_test)\n\n# Fitting models with training data\nbest_rf_model.fit(standardized_full_train, y_train)\nbest_xgb_model.fit(standardized_full_train, y_train)\nbest_ada_model.fit(standardized_full_train, y_train)\nbest_svr_model.fit(standardized_full_train, y_train)\nbest_knn_model.fit(standardized_full_train, y_train)\n\nrf_full=best_rf_model.predict(standardized_full_test)\nxgb_full=best_xgb_model.predict(standardized_full_test)\nada_full=best_ada_model.predict(standardized_full_test)\nsvr_full=best_svr_model.predict(standardized_full_test)\nknn_full=best_knn_model.predict(standardized_full_test)\n\n# Fitting on full features predictions\nrf_mse_full=mean_squared_error(y_test, np.exp(rf_full))\nrf_mae_full=mean_absolute_error(y_test, np.exp(rf_full))\nxgb_mse_full=mean_squared_error(y_test, np.exp(xgb_full))\nxgb_mae_full=mean_absolute_error(y_test, np.exp(xgb_full))\nada_mse_full=mean_squared_error(y_test, np.exp(ada_full))\nada_mae_full=mean_absolute_error(y_test, np.exp(ada_full))\nsvr_mse_full=mean_squared_error(y_test, np.exp(svr_full))\nsvr_mae_full=mean_absolute_error(y_test, np.exp(svr_full))\nknn_mse_full=mean_squared_error(y_test, np.exp(knn_full))\nknn_mae_full=mean_absolute_error(y_test, np.exp(knn_full))\n\n# Plotting the models\ntest_amts=[i for i in range(len(y_test))]\n\nmodels=[\"Random Forest\", \"XGBoost\", \"Adaboost\", \"SVR\", \"KNN\"]\nresults=[rf_mse_full, xgb_mse_full, ada_mse_full, svr_mse_full, knn_mse_full]\n\nfor i in range(5):\n print(models[i], results[i])\n\nplt.figure(figsize=(15,15))\nplt.plot(models, results)\nplt.show()\n","sub_path":"model_testing_lasso.py","file_name":"model_testing_lasso.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"329418240","text":"#!/usr/bin/env python\r\n\r\nfrom vmthunder.instance import Instance\r\nfrom vmthunder.openstack.common import log as logging\r\nfrom vmthunder.drivers import dmsetup\r\n\r\nLOG = logging.getLogger(__name__)\r\n\r\n\r\nclass InstanceCommon(Instance):\r\n def _create_snapshot(self, origin_path):\r\n snapshot_name = self._snapshot_name()\r\n snapshot_path = dmsetup.snapshot(origin_path, snapshot_name, self.snapshot_dev)\r\n self.snapshot_path = snapshot_path\r\n return snapshot_path\r\n\r\n def _delete_snapshot(self):\r\n snapshot_name = self._snapshot_name()\r\n dmsetup.remove_table(snapshot_name)\r\n\r\n def start_vm(self, origin_path):\r\n LOG.debug(\"instanceCommon start vm according origin_path %s\" % origin_path)\r\n self._create_snapshot(origin_path)\r\n self.link_snapshot()\r\n self.session.add_vm(self.vm_name)\r\n return self.vm_name\r\n\r\n def del_vm(self):\r\n LOG.debug(\"come to instanceSnapCache to delete vm %s\" % self.vm_name)\r\n self._delete_snapshot()\r\n self.unlink_snapshot()\r\n self.session.rm_vm(self.vm_name)\r\n return self.vm_name","sub_path":"vmthunder/instancecommon.py","file_name":"instancecommon.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172473451","text":"'''\n大纲:\n分组、贪婪模式、正则修饰符、字符串前加r的作用\n\n分组:\n (匹配内容) ()会将内部作为一个整体来进行匹配\n (?:匹配内容) 取消()的优先级\n \\1 表示复用第一个分组,\\ number是几就表示复用第几个分组\n\n贪婪模式:\n贪婪模式:在整个表达式匹配成功的前提下,尽可能多的匹配。\n非贪婪模式:非贪婪模式在整个表达式匹配成功的前提下,尽可能少的匹配。\n属于贪婪模式的量词:\n {m,n} 匹配 m~n 个\n {m,} 匹配不少于 m个\n * 匹配 0到多个\n + 匹配 1到多个\n取消贪婪模式:在量词后面加上?即可,如:{m,n}? *? +?\n\n正则修饰符:\n re.I\t\t忽略大小写\n re.M\t\t多行匹配(一般用于^开头匹配,这样就可以将每一行进行单独的开头匹配)\n re.S\t\t单行匹配(可以理解成 . 可以匹配换行符)\n\n字符串前加r的作用:\n r加在字符串的前面,如:r'\\t' 是为了防止 \\ 的转义\n 如:在匹配路径过程中,如:r'C:\\music\\time'\n\n\n在线正则表达式测试工具:\n http://tool.oschina.net/regex/\n http://tool.chinaz.com/regex/\n https://c.runoob.com/front-end/854\n'''\nimport re\n# 分组演示\nstring = 'AAitAAitAA'\nret = re.search(r'(AA)(it)\\1\\2\\1',string)\nprint(ret.group()) # 结果:AAitAAitAA\n\n# 贪婪模式演示\nstr1 = 'cccddd>'\n\nret1 = re.search('<(.*)>',str1)\nprint(ret1.group())\n# 贪婪模式结果:cccddd>\nret2 = re.search('<(.*?)>',str1)\nprint(ret2.group())\n# 非贪婪模式结果:\n\n# 正则修饰符\n\nstr2 = 'AaAaAa'\n\nret1 = re.search('a+',str2,re.I) \nprint(ret1.group())\n# 忽略大小写结果:AaAaAa\n","sub_path":"爬虫知识点整理/04三种文本解析之正则.py","file_name":"04三种文本解析之正则.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542668818","text":"from pyipv8.ipv8.attestation.trustchain.database import TrustChainDB\nfrom pyipv8.ipv8.database import database_blob\n\n\nclass DAppCrowdTrustChainDatabase(TrustChainDB):\n\n def __init__(self, working_directory, db_name):\n super(DAppCrowdTrustChainDatabase, self).__init__(working_directory, db_name)\n self.my_peer = None\n\n def get_pending_review_requests(self, public_key):\n \"\"\"\n Get all pending review requests for/from a given public key.\n \"\"\"\n results = []\n review_request_blocks = self._getall(u\"WHERE type='dappcrowd_review_request' AND (public_key = ? OR link_public_key = ?)\", (database_blob(public_key), database_blob(public_key)))\n for review_request_block in review_request_blocks:\n if not self.get_linked(review_request_block):\n results.append(review_request_block)\n return results\n\n def is_verified_user(self, public_key):\n \"\"\"\n Return whether the user is verified or not (imported GitHub profile).\n \"\"\"\n blocks = self.get_blocks_with_type(block_type='devid_connection', public_key=public_key)\n return len(blocks) > 0\n\n def get_username(self, public_key):\n \"\"\"\n Return the username of a given public key, or unknown if he/she did not import an existing profile.\n \"\"\"\n blocks = self.get_blocks_with_type(block_type='devid_connection', public_key=public_key)\n if not blocks:\n return 'unknown'\n return blocks[0].transaction['info']['username']\n\n def get_detailled_user_info(self, public_key):\n \"\"\"\n Return a dictionary that contains information about a specific user.\n \"\"\"\n github_info = None\n bitbucket_info = None\n\n imported_profiles_blocks = self.get_blocks_with_type(block_type='devid_connection', public_key=public_key)\n if imported_profiles_blocks:\n for profile_block in imported_profiles_blocks:\n if profile_block.transaction['platform'] == 'github' and not github_info:\n github_info = profile_block.transaction['info']\n # TODO BITBUCKET\n\n return {\n \"public_key\": public_key.encode('hex'),\n \"verified\": self.is_verified_user(public_key),\n \"username\": self.get_username(public_key),\n \"skills\": self.get_skills(public_key),\n \"github_info\": github_info,\n \"bitbucket_info\": bitbucket_info,\n \"mid\": self.my_peer.mid.encode('hex')\n }\n\n def get_users_list(self):\n \"\"\"\n Return a list with information about all known users in the system.\n \"\"\"\n user_dicts = []\n pub_keys = list(self.execute(\"SELECT DISTINCT public_key FROM blocks\"))\n for tup_item in pub_keys:\n pub_key = str(tup_item[0])\n user_dicts.append({\n \"public_key\": pub_key.encode('hex'),\n \"verified\": self.is_verified_user(pub_key),\n \"username\": self.get_username(pub_key),\n })\n return user_dicts\n\n def get_num_endorsements(self, skill_block):\n \"\"\"\n Get the number of endorsements for a given skill block\n \"\"\"\n return len(self._getall(\"WHERE type='devid_skill' AND link_public_key = ? AND link_sequence_number = ?\", (database_blob(skill_block.public_key), skill_block.sequence_number)))\n\n def did_endorse_skill(self, skill_block):\n \"\"\"\n Return whether you endorsed this skill already or not\n \"\"\"\n if skill_block.public_key == self.my_peer.public_key.key_to_bin():\n return True\n\n return len(self._getall(\"WHERE type='devid_skill' AND public_key = ? AND link_public_key = ? AND link_sequence_number = ?\",\n (database_blob(self.my_peer.public_key.key_to_bin()), database_blob(skill_block.public_key), skill_block.sequence_number))) > 0\n\n def get_skills(self, public_key):\n \"\"\"\n Get all skills of a specific user.\n \"\"\"\n skills_list = []\n skill_blocks = self._getall(\"WHERE type='devid_skill' AND public_key = ? AND link_sequence_number = 0\", (database_blob(public_key),))\n for skill_block in skill_blocks:\n skills_list.append({\n \"name\": skill_block.transaction['name'],\n \"block_num\": skill_block.sequence_number,\n \"endorsements\": self.get_num_endorsements(skill_block),\n \"did_endorse\": self.did_endorse_skill(skill_block)\n })\n return skills_list\n","sub_path":"dappcrowd/tc_database.py","file_name":"tc_database.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507991509","text":"import pandas\nimport pandas as pd\nfrom fuzzywuzzy import fuzz\nfrom sklearn.metrics import roc_auc_score\n\nTRAIN_FILE = \"input/train.csv\"\nTEST_FILE = \"input/test.csv\"\n\nTRAIN_OUTPUT_FILE = \"features/fuzz_train.csv\"\nTEST_OUTPUT_FILE = \"features/fuzz_test.csv\"\n\ndef extract_features(df):\n features = pd.DataFrame()\n features['fuzz_qratio'] = df.apply(lambda r: fuzz.QRatio(str(r.question1), str(r.question2)), axis=1)\n features['fuzz_wratio'] = df.apply(lambda r: fuzz.WRatio(str(r.question1), str(r.question2)), axis=1)\n features['fuzz_partial_ratio'] = df.apply(lambda r: fuzz.partial_ratio(str(r.question1), str(r.question2)), axis=1)\n features['fuzz_partial_token_set_ratio'] = df.apply(lambda r: fuzz.partial_token_set_ratio(str(r.question1), str(r.question2)), axis=1)\n features['fuzz_partial_token_sort_ratio'] = df.apply(lambda r: fuzz.partial_token_sort_ratio(str(r.question1), str(r.question2)), axis=1)\n return features\n\n\ndef main():\n if TRAIN_FILE != \"\":\n print('loading training output...')\n dataframe = pd.read_csv(TRAIN_FILE)\n print('{0} training output loaded'.format(len(dataframe)))\n\n print('embedding training output...')\n features = extract_features(dataframe)\n\n print('fuzz_qratio AUC:', roc_auc_score(dataframe['is_duplicate'], features['fuzz_qratio']))\n print('fuzz_wratio AUC:', roc_auc_score(dataframe['is_duplicate'], features['fuzz_wratio']))\n print('fuzz_partial_ratio AUC:', roc_auc_score(dataframe['is_duplicate'], features['fuzz_partial_ratio']))\n print('fuzz_partial_token_set_ratio AUC:', roc_auc_score(dataframe['is_duplicate'], features['fuzz_partial_token_set_ratio']))\n print('fuzz_partial_token_sort_ratio AUC:', roc_auc_score(dataframe['is_duplicate'], features['fuzz_partial_token_sort_ratio']))\n\n features.to_csv(TRAIN_OUTPUT_FILE, index=False)\n\n if TEST_FILE != \"\":\n print('loading testing output...')\n dataframe = pd.read_csv(TEST_FILE)\n print('{0} testing output loaded'.format(len(dataframe)))\n\n print('embedding testing output...')\n features = extract_features(dataframe)\n\n features.to_csv(TEST_OUTPUT_FILE, index=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"feature_engineering/feature_extraction_fuzzy.py","file_name":"feature_extraction_fuzzy.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492456383","text":"def makeTree(tree,node):\n if isinstance(tree[1],list):\n makeTree(tree[1],node)\n if isinstance(tree[2],list):\n makeTree(tree[2],node)\n if isinstance(tree[1],int):\n if tree[1]==node[0]:\n tree[1]=node\n return\n if isinstance(tree[2],int):\n if tree[2]==node[0]:\n tree[2]=node\n return\ndef isSearchTree(tree):\n if(isinstance(tree,int)):\n return True\n if(isinstance(tree[2],list)and isinstance(tree[1],list)):\n if tree[1][0]>=tree[0] or tree[2][0]<=tree[0]:\n return False\n else:\n return isSearchTree(tree[1]) and isSearchTree(tree[2])\n elif(isinstance(tree[1],list) and isinstance(tree[2],int)):\n if tree[1][0]>=tree[0]:\n return False\n elif tree[2]!=0 and tree[2]<=tree[0]:\n return False\n else:\n return isSearchTree(tree[1])\n elif(isinstance(tree[2],list)and isinstance(tree[1],int)):\n if tree[2][0]<=tree[0]:\n return False\n elif tree[1]!=0 and tree[1]>=tree[0]:\n return False\n else:\n return isSearchTree(tree[2])\n else:\n if tree[2]!=0 and tree[2]<=tree[0]:\n return False\n elif tree[1]!=0 and tree[1]>=tree[0]:\n return False\n return True\ndef getNodesNum(tree):\n if isinstance(tree,int):\n if tree == 0:\n return 0\n else:\n return 1\n if tree[1]!=0 and tree[2]!=0:\n return 1+getNodesNum(tree[1])+getNodesNum(tree[2])\n if tree[1]!=0 and tree[2]==0:\n return 1+getNodesNum(tree[1])\n if tree[1]==0 and tree[2]!=0:\n return 1+getNodesNum(tree[2])\n if tree[1]==0 and tree[2]==0:\n return 1\ndef subTreeCanSearch(tree):\n if isSearchTree(tree):\n searchNodeNums.append(getNodesNum(tree))\n return\n else:\n if isSearchTree(tree[1]):\n searchNodeNums.append(getNodesNum(tree[1]))\n return\n else:\n subTreeCanSearch(tree[1])\n if isSearchTree(tree[2]):\n searchNodeNums.append(getNodesNum(tree[2]))\n return\n else:\n subTreeCanSearch(tree[2])\nline=input().split()\nnodeNum=int(line[0])\nroot=int(line[1])\ntree=input().split()\ntree=[int(x) for x in tree]\nfor i in range(nodeNum-1):\n line=input().split()\n line=[int(x) for x in line]\n makeTree(tree,line)\nsearchNodeNums=[]\nsubTreeCanSearch(tree)\nsearchNodeNums.sort(reverse=True)\nprint(searchNodeNums[0])","sub_path":"Code/CodeRecords/2318/60677/283095.py","file_name":"283095.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489092602","text":"import discord\nfrom discord import utils\nimport coc\nfrom coc import utils\nimport os\nimport psycopg2\n\n# import donnés\nconfig={\"Coc\":{\"mail\":os.environ.get(\"mail\"),\n \"password\":os.environ.get(\"password\")},\n \"Discord\":{\"token\":os.environ.get(\"Token\"),\n \"prefix\":os.environ.get(\"prefix\")},\n \"bddlink\":os.environ.get(\"DATABASE_URL\")\n }\nclan_tags=[\"#2PU29PYPR\",\"#29Q29PRY9\",\"#29U9YR0QP\",\"#2LL0UCY89\",\"#2LR9RP20J\",\"#2PYR2V202\",\"#2Y2UVR99P\",\"#2L0JQYUPU\",\"#2LLCPYV9P\",\"#2YU08J8UU\"]# mettre ça dans une bdd\ntagsJoueurs=[]\n\n\n# bot discord\nclass discordClient(discord.Client):\n async def on_message(self,message):\n if message.author.bot or message.channel.guild== None:\n return\n if not message.content.startswith(config[\"Discord\"][\"prefix\"]):\n return\n commande = message.content[1:]\n args = commande.split(' ')\n commande = args[0]\n if commande ==\"ping\":\n await message.channel.send(\"bien connecté\")\n if commande==\"trophés\":\n await message.channel.send(\"{0.name} est a {0.trophies} trophés\".format(await cocClient.get_player(args[1])))\n if commande == \"ajouter\"or commande == \"claim\" or commande == \"add\" :\n \"\"\"ajouter un tag a un discord, #coc+@xx#XXXX/me\"\"\"\n if len(args)!=3 and len(message.mentions)!=1:\n return await message.channel.send(\"nombre d'arguments incorect\")\n tag=utils.correct_tag(args[1])\n idDiscord=message.mentions[0].id\n try:\n player= await cocClient.get_player(tag)\n except coc.NotFound:\n return await message.channel.send(\"ce tag ne correspond a aucun joueur\")\n except coc.Maintenance:\n return await message.channel.send(\"maintenance en cour, a plus tard :)\")\n except coc.GatewayError:\n return await message.channel.send(\"une erreur inconue s'est produite dans la vérification du tag :(\")\n else:\n connectionBDD = psycopg2.connect(config[\"bddlink\"],sslmode='require')\n Curseur = connectionBDD.cursor()\n\n try:\n Curseur.execute(\"INSERT INTO nommage VALUES (%s,%s,%s,%s)\",(tag,idDiscord,player.name,player.town_hall))\n connectionBDD.commit()\n connectionBDD.close()\n except psycopg2.IntegrityError:\n await message.channel.send(\"déjà enregistré par le passé\")\n else:\n cocClient.add_player_updates(tag)\n await message.channel.send(\"operation réussie\")\n if commande == \"gc\":#couleur embed: #F6C471\n \"\"\"afficher tous les comptes associés a un joueur \"\"\"\n if len(message.mentions)!=1:# cas nb de tags incoherents \n return await message.channel.send(\"merci de tag __une__ personne\")\n idDiscord=message.mentions[0].id\n pseudo=message.mentions[0].display_name\n connectionBDD= psycopg2.connect(config[\"bddlink\"],sslmode='require')\n Curseur = connectionBDD.cursor()\n Curseur.execute(\"SELECT tagIG,PseudoIG FROM nommage where idDiscord = (%s)\",(str(idDiscord),))\n tags=[]\n for l in Curseur:\n tags.append(l[0])\n connectionBDD.commit()\n connectionBDD.close()\n if len(tags)==0:#cas pas dans bdd\n return await message.channel.send(\"pas encore de comptes associés\")\n rep=discord.Embed(colour=0xf6c471)\n rep.set_author(name=\"Profil de \"+str(pseudo))\n async for player in cocClient.get_players(tags):\n rep.add_field(name=player.name,value=\"<:HdvBot:884202091793506324> Hdv : {} \\n<:ExpBot:884202964896608266> Niveau : {} \\n<:TagBot:884204003070705754> Tag : {}\".format(player.town_hall,player.exp_level,player.tag))\n rep.set_image(url=\"https://media.discordapp.net/attachments/859386512129654794/884100318936330261/comptes_lie.png\")\n rep.set_thumbnail(url=message.mentions[0].avatar_url)\n rep.set_footer(text=\"créé par av#2616\",icon_url=\"https://cdn.discordapp.com/avatars/397116327887896576/93f6ce8dde153200b213ba4ec531dd8f.webp?size=128\")\n await message.channel.send(embed=rep)\n if commande == \"retirer\":\n \"\"\"retirer un tag associé a un joueur\"\"\"\n pass\n if commande== \"VL\" or commande== \"vl\":# probleme de recuperation de member a partir member.id\n \"\"\"commandes en SQL\"\"\"\n await message.channel.send(\"pas encore opé^^\")\n con= psycopg2.connect(config[\"bddlink\"],sslmode='require')\n cur=con.cursor()\n cur.execute(\"\"\"SELECT n.idDiscord,s.Perf,s.bi,s.one,s.black,s.Perfdips,s.bidips,s.onedips,s.blackdips,s.donne,s.recu FROM nommage n,scores s WHERE n.tagIG=s.tag AND s.th={} ORDER BY s.Perf ASC ,s.bi ASC\"\"\".format(args[1]))\n res=[]\n reponse=\"\"\"\n ```pseudo 3|2|1|0| dips: 3|2|1|0|| ratio\"\"\"\n \n for l in cur:\n res.append(l)\n member= await message.guild.fetch_member(int(l[0]))\n reponse+=\"\\n{} {}|{}|{}|{} |||| {}|{}|{}|{}|| {}\".format(member.display_name,l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9]/l[10] if l[10]!=0 else \"NA\")\n await message.channel.send(reponse+\"```\")\n con.close()\n\n\ndef main():\n # on recupere l'ensemble des tags liés a des joueurs enregistrés de l'empire\n con= psycopg2.connect(config[\"bddlink\"],sslmode='require')\n cur = con.cursor()\n cur.execute(\"SELECT tagIG FROM nommage\")\n for l in cur:\n tagsJoueurs.append(l[0])\n con.close()\n\n # connection client coc, non bloquant\n cocClient= coc.login(email=config[\"Coc\"][\"mail\"],\n password=config[\"Coc\"][\"password\"],\n client=coc.EventsClient)\n\n @cocClient.event# quand une attaque de guerre survient\n @coc.WarEvents.war_attack(tags=clan_tags)\n async def current_war_stats(attack, war):\n print(\"un attaque survint:\",attack.attacker.name,\"de\",attack.attacker.clan.name,\"a fait:\",attack.stars,\"étoiles\")\n if attack.attacker.clan.tag in clan_tags and attack.attacker.town_hall>=attack.defender.town_hall:# on controle qu'il est dans un de nos clans\n print(\"tag:\",attack.attacker_tag,\"etoiles:\",attack.stars,\"th\",attack.attacker.town_hall,attack.defender.town_hall,sep=\"\\n\\n\")\n ajouter_bdd(tag=attack.attacker_tag,\n etoiles=attack.stars,\n dips=attack.attacker.town_hall!=attack.defender.town_hall,\n th=attack.attacker.town_hall)# on ajoute a la bdd\n\n\n\n\n @cocClient.event \n @coc.ClanEvents.member_donations(tags=clan_tags)\n async def on_clan_member_donation(old,new):#TODO controller les odns négatifs\n print(\"on a \", old,\" qui a donné \",new.donations-old.donations,\"troupes dans \",old.clan)\n if new.donations deja dans Bdd; 0=>pas encore dans BDD\n for r in Curseur:\n nb=r[0]\n if not nb==1:\n Curseur.execute(\"INSERT INTO scores (tag,th) VALUES (%s,%s)\",(tag,th))\n connectionBDD.commit()\n if etoiles is not None and not dips:\n Curseur.execute(\"SELECT black,one,bi,Perf FROM scores WHERE tag=(%s) AND th=(%s)\",(tag,th))\n for r in Curseur:\n Score = list(r)\n Score[etoiles]+=1\n Curseur.execute(\"UPDATE scores SET black=(%s),one=(%s),bi=(%s),Perf=(%s) WHERE tag=(%s) AND th=(%s)\",(Score[0],Score[1],Score[2],Score[3],tag,th))\n elif etoiles is not None and dips:\n Curseur.execute(\"SELECT blackdips,onedips,bidips,Perfdips FROM scores WHERE tag=(%s) AND th=(%s)\",(tag,th))\n Score=[]\n for r in Curseur:\n Score = list(r)\n Score[etoiles]+=1\n Curseur.execute(\"UPDATE scores SET blackdips=(%s),onedips=(%s),bidips=(%s),Perfdips=(%s) WHERE tag=(%s) AND th=(%s)\",(Score[0],Score[1],Score[2],Score[3],tag,th))\n elif donne is not None:\n Curseur.execute(\"SELECT donne FROM scores WHERE tag=(%s) AND th=(%s)\",(tag,th))\n for r in Curseur:\n don = list(r)[0]\n don+=donne \n Curseur.execute(\"UPDATE scores SET donne=(%s) WHERE tag=(%s) AND th=(%s)\",(don,tag,th))\n elif recu is not None:\n Curseur.execute(\"SELECT recu FROM scores WHERE tag=(%s) AND th=(%s)\",(tag,th))\n anteRecu=0\n for r in Curseur:\n anteRecu = list(r)[0]\n anteRecu+=recu \n Curseur.execute(\"UPDATE scores SET recu=(%s) WHERE tag=(%s) AND th=(%s)\",(anteRecu,tag,th))\n connectionBDD.commit()\n connectionBDD.close()\n\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261828106","text":"import os\r\nimport csv\r\n\r\n# Path to collect data from the resources folder\r\nbudget_path=os.path.join('..','Pybank/Resources','budget_data.csv')\r\n\r\n# open the csv file\r\nwith open(budget_path,'r') as budget_csv:\r\n budget_file=csv.reader(budget_csv,delimiter=',')\r\n# skip the header row\r\n next(budget_file)\r\n\r\n# Create a list of month data and profit/loss data \r\n month=[]\r\n profit=[]\r\n for row in budget_file: \r\n month.append(row[0])\r\n profit.append(row[1])\r\n# Count the total number of months included in the dataset\r\n# Calculate The net total amount of \"Profit/Losses\" over the entire period\r\n monthcount=0\r\n nettotal=0\r\n monthcount = len(month)\r\n nettotal = sum(int(p) for p in profit)\r\n \r\n# To start the iteration again\r\n budget_csv.seek(0)\r\n# skip the header row again\r\n next(budget_file)\r\n def averagechange(profit):\r\n # The average of the changes in \"Profit/Losses\" over the entire period\r\n # Total change equals to (2nd value-1st value)+(3rd value-2nd value)+...+(last value-second last value)) equals to (last value-first value)\r\n # Total times of change equals to (month count -1), e.g. with in 2 months, there is 1 change\r\n lastvalue=float(profit[-1])\r\n firstvalue=float(profit[0])\r\n average=round((float(lastvalue-firstvalue))/(float(monthcount)-1),2)\r\n return average\r\n\r\n# To start the iteration again\r\n budget_csv.seek(0)\r\n# skip the header row again\r\n next(budget_file)\r\n# Creat a list to record increase/decrease in profit/loss\r\n listOfChanges = []\r\n#Calculate each period's change and add it to the list \r\n for p in range(len(profit)-1):\r\n def changes(profit):\r\n diff = int(profit[p+1])-int(profit[p])\r\n return diff\r\n listOfChanges.append(changes(profit)) \r\n\r\n# The greatest increase in profits (date and amount) over the entire period\r\n max_profit = max(listOfChanges)\r\n max_index=listOfChanges.index(max_profit)\r\n max_month=month[max_index+1]\r\n \r\n# The greatest decrease in losses (date and amount) over the entire period\r\n min_loss = min(listOfChanges)\r\n min_index=listOfChanges.index(min_loss)\r\n min_month=month[min_index+1]\r\n\r\n# Print out the results\r\n print (\"Financial Analysis\")\r\n print (\"----------------------------\")\r\n print (f'Total months: {monthcount}') \r\n print (f'Total: ${nettotal}')\r\n print (f'Average change: ${averagechange(profit)}') \r\n print (f'Greatest Increase in Profits: {max_month} (${max_profit})')\r\n print (f'Greatest Decrease in Profits: {min_month} (${min_loss})')\r\n \r\n \r\n# Specify the file to write to\r\noutput_path = os.path.join(\"AnalysisResult.txt\")\r\nwith open(output_path, 'w') as AnalysisResult:\r\n \r\n # Write contents\r\n print (\"Financial Analysis\", file=AnalysisResult)\r\n print (\"----------------------------\",file=AnalysisResult)\r\n print (f'Total months: {monthcount}',file=AnalysisResult) \r\n print (f'Total: ${nettotal}',file=AnalysisResult)\r\n print (f'Average change: {averagechange(profit)}',file=AnalysisResult) \r\n print (f'Greatest Increase in Profits: {max_month} (${max_profit})',file=AnalysisResult)\r\n print (f'Greatest Decrease in Profits: {min_month} (${min_loss})',file=AnalysisResult)\r\n\r\n \r\n\r\n","sub_path":"Python Homework/PyBank.py","file_name":"PyBank.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506462421","text":"#!/usr/bin/python3\nif __name__ == '__main__':\n import sys\n l = sys.argv\n size = len(l) - 1\n ar = \"argument\" if size is 1 else \"arguments\"\n dot = \".\" if size is 0 else \":\"\n print(\"{} {}\".format(size, ar + dot))\n for i in range(1, size + 1):\n print(\"{}: {}\".format(i, l[i]))\n","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634384632","text":"import operator\nfrom folium import Map, Marker, Icon, PolyLine, CircleMarker\nimport user_data\nimport kmeans\n\nclass MobilityMap:\n\n def __init__ (self, geopoints):\n self.geopoints = geopoints\n self.user_map = Map(geopoints[0], zoom_start=7)\n self.cluster_map = Map(geopoints[0], zoom_start=12)\n self.network_map = Map(geopoints[0], zoom_start=7)\n\n def map_points(self, color):\n for p in self.geopoints:\n marker = Marker(p)\n Icon(color=color).add_to(marker)\n marker.add_to(self.user_map)\n self.user_map.add_children(PolyLine(self.geopoints, color=color, weight=2, opacity=2))\n\n def map_clusters(self, interval_points, colors):\n for i in range(len(colors)):\n for point in interval_points[i]:\n marker = Marker(point)\n Icon(color=colors[i]).add_to(marker)\n marker.add_to(self.cluster_map)\n\n def circle_clusters(self, cluster_points, colors, radii, centroids):\n for i in range(len(cluster_points)):\n for point in cluster_points[i]:\n marker = Marker(point)\n Icon(color=colors[i]).add_to(marker)\n marker.add_to(self.network_map)\n\n CircleMarker(centroids[i], radius=1001*radii[i], fill_color=colors[i], fill_opacity=0.2).add_to(self.network_map)\n\n def draw_lines(self, transitions_frequency, centroids):\n for i in range(len(transitions_frequency.keys())):\n destinations_frequency = transitions_frequency[i]\n for j in range(len(destinations_frequency)):\n line = PolyLine((centroids[i], centroids[j]), color='orange', weight=destinations_frequency[j]*2, opacity=destinations_frequency[j])\n self.network_map.add_children(line)\n\n\n# ------------------------------------------------------------------------\n## For Q1 - Visualizing general mobility patterns of individual users\n\nuser158_geopoints = user_data.user158.geopoints\nuser158_map = MobilityMap(user158_geopoints)\nuser158_map.map_points('red')\n\nuser211_geopoints = user_data.user211.geopoints\nuser211_map = MobilityMap(user211_geopoints)\nuser211_map.map_points('orange')\n\nuser534_geopoints = user_data.user534.geopoints\nuser534_map = MobilityMap(user534_geopoints)\nuser534_map.map_points('purple')\n\n# To visualize, run --> user158_map.user_map\n# --> user211_map.user_map\n# --> user534_map.user_map\n# -------------------------------------------------------------------------\n\n\n# -------------------------------------------------------------------------\n## For Q3 - Mapping the KMeans Algorithm\n\n# def helper_method(time_clusters, user_map_instant):\n# first_time_interval = time_clusters[0]\n# first_colors = ['pink', 'red', 'darkred']\n\n# second_time_interval = time_clusters[1]\n# second_colors = ['lightgreen', 'green', 'darkgreen']\n\n# third_time_interval = time_clusters[2]\n# third_colors = ['lightblue', 'blue', 'darkblue']\n\n# user_map_instant.map_clusters(first_time_interval, first_colors)\n# user_map_instant.map_clusters(second_time_interval, second_colors)\n# user_map_instant.map_clusters(third_time_interval, third_colors)\n\n# user158_time_clusters = kmeans.user158_time_clusters\n# user211_time_clusters = kmeans.user211_time_clusters\n# user534_time_clusters = kmeans.user534_time_clusters\n\n# helper_method(user158_time_clusters, user158_map)\n# helper_method(user211_time_clusters, user211_map)\n# helper_method(user534_time_clusters, user534_map)\n\n# To visualize, --> user158_map.cluster_map\n# --> user211_map.cluster_map\n# --> user534_map.cluster_map\n\n# ------------------------------------------------------------------------\n\n\n\n# ---------------------------------------------------------------------------\n## For Q7 - Spatial Network Visualizing\n\n# - User158 -\n# user158_cluster_points = kmeans.user158_cluster_points\n# user158_colors = ['red', 'green', 'blue']\n# user158_centroids = kmeans.user158_km.centroids\n# user158_radii = kmeans.user158_km.maximum_radii\n# user158_transitions_frequency = kmeans.user158_transitions_frequency\n# user158_map.circle_clusters(user158_cluster_points, user158_colors, user158_radii, user158_centroids)\n# user158_map.draw_lines(user158_transitions_frequency, user158_centroids)\n\n# - User211 -\n# user211_cluster_points = kmeans.user211_cluster_points\n# user211_colors = ['orange', 'darkgreen', 'purple']\n# user211_centroids = kmeans.user211_km.centroids\n# user211_radii = kmeans.user211_km.maximum_radii\n# user211_transitions_frequency = kmeans.user211_transitions_frequency\n# user211_map.circle_clusters(user211_cluster_points, user211_colors, user211_radii, user211_centroids)\n# user211_map.draw_lines(user211_transitions_frequency, user211_centroids)\n\n# - User534 -\n# user534_cluster_points = kmeans.user534_cluster_points\n# user534_colors = ['darkorange', 'darkblue', 'lightgreen']\n# user534_centroids = kmeans.user534_km.centroids\n# user534_radii = kmeans.user534_km.maximum_radii\n# user534_transitions_frequency = kmeans.user534_transitions_frequency\n# user534_map.circle_clusters(user534_cluster_points, user534_colors, user534_radii, user534_centroids)\n# user534_map.draw_lines(user534_transitions_frequency, user534_centroids)\n\n# ----------------------------------------------------------------------------\n\n","sub_path":"mobility_map.py","file_name":"mobility_map.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227421769","text":"class Solution:\r\n def maxSubArray(self, nums):\r\n dp = [0 for i in range(len(nums))]\r\n if not nums:\r\n return 0\r\n dp[0] = nums[0]\r\n for k in range(1, len(nums)):\r\n dp[k] = max(nums[k], nums[k] + dp[k-1])\r\n print(dp)\r\n return max(dp)\r\n\r\n\r\nnums = [-2,1,-3,4,-1,2,1,-5,4]\r\nprint(Solution().maxSubArray(nums))\r\n","sub_path":"Leetcode_math_double/动态规划总结_1.py","file_name":"动态规划总结_1.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324637453","text":"from rlberry.envs.tests.test_env_seeding import get_env_trajectory, compare_trajectories\nfrom rlberry.envs import gym_make\nfrom rlberry.envs.classic_control import MountainCar\nfrom rlberry.stats import AgentStats, MultipleStats\nfrom rlberry.agents.kernel_based import RSUCBVIAgent\nfrom rlberry.agents.torch import A2CAgent\nimport gym\nimport pytest\n\n\n@pytest.mark.parametrize(\"env, agent_class\",\n [\n (MountainCar(), RSUCBVIAgent),\n ((gym_make, {'env_name': 'MountainCar-v0'}), RSUCBVIAgent),\n ((gym.make, {'id': 'MountainCar-v0'}), RSUCBVIAgent),\n (MountainCar(), A2CAgent),\n ((gym_make, {'env_name': 'MountainCar-v0'}), A2CAgent),\n ((gym.make, {'id': 'MountainCar-v0'}), A2CAgent)\n ])\ndef test_agent_stats_and_multiple_stats_seeding(env, agent_class):\n agent_stats = AgentStats(agent_class,\n env,\n init_kwargs={'n_episodes': 2, 'horizon': 10},\n n_fit=6,\n seed=3456)\n agent_stats_test = AgentStats(agent_class,\n env,\n init_kwargs={'n_episodes': 2, 'horizon': 10},\n n_fit=6,\n seed=3456)\n\n mstats = MultipleStats()\n mstats.append(agent_stats)\n mstats.append(agent_stats_test)\n mstats.run()\n\n stats1, stats2 = mstats.allstats\n\n for ii in range(2, agent_stats.n_fit):\n traj1 = get_env_trajectory(stats1.fitted_agents[ii-2].env, horizon=10)\n traj2 = get_env_trajectory(stats1.fitted_agents[ii-1].env, horizon=10)\n traj3 = get_env_trajectory(stats1.fitted_agents[ii].env, horizon=10)\n\n traj1_test = get_env_trajectory(stats2.fitted_agents[ii-2].env, horizon=10)\n traj2_test = get_env_trajectory(stats2.fitted_agents[ii-1].env, horizon=10)\n traj3_test = get_env_trajectory(stats2.fitted_agents[ii].env, horizon=10)\n\n assert not compare_trajectories(traj1, traj2)\n assert not compare_trajectories(traj1, traj3)\n assert not compare_trajectories(traj2, traj3)\n assert compare_trajectories(traj1, traj1_test)\n assert compare_trajectories(traj2, traj2_test)\n assert compare_trajectories(traj3, traj3_test)\n\n for ii in range(2, agent_stats.n_fit):\n rand1 = stats1.fitted_agents[ii-2].seeder.rng.integers(2**32)\n rand2 = stats1.fitted_agents[ii-1].seeder.rng.integers(2**32)\n rand3 = stats1.fitted_agents[ii].seeder.rng.integers(2**32)\n\n rand1_test = stats2.fitted_agents[ii-2].seeder.rng.integers(2**32)\n rand2_test = stats2.fitted_agents[ii-1].seeder.rng.integers(2**32)\n rand3_test = stats2.fitted_agents[ii].seeder.rng.integers(2**32)\n\n assert rand1 != rand2\n assert rand1 != rand3\n assert rand2 != rand3\n assert rand1 == rand1_test\n assert rand2 == rand2_test\n assert rand3 == rand3_test\n","sub_path":"rlberry/stats/tests/test_agent_stats_seeding.py","file_name":"test_agent_stats_seeding.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27258004","text":"import socket\nimport struct\nimport typing\n\nfrom modbus_tk import defines as cst\nfrom modbus_tk.exceptions import ModbusError\nfrom modbus_tk.modbus_tcp import TcpMaster\n\nfrom Agreement.FQ.skio import exception\nfrom Agreement.FQ.skio.define import IDev, IVar, T_Val, ValType, SigType\nfrom utils.WorkModels import PointModel\n\n_T_OUTPUT_VALUE = typing.Union[typing.Tuple[int], typing.List[int], int]\n\n\nclass SmPXIDev(IDev, TcpMaster):\n _slave: int = 1\n\n def __init__(self):\n IDev.__init__(self)\n TcpMaster.__init__(self)\n\n def setup(self, uri):\n host, port = uri.split(':')\n self._host = host\n self._port = int(port)\n\n def write(self, var: IVar, value: T_Val) -> T_Val:\n try:\n if var.val_type == ValType.B1:\n value = int(value)\n value = 1 if value > 0 else 0\n elif var.val_type in (ValType.F32, ValType.D64):\n value = float(value)\n else:\n value = int(value)\n\n # TODO: 应用在三门 PMS 项目\n # * 电流给百分比,\n # * 电压给百分比,\n # * 电阻输出 U=IR=0.000476*R 转换为百分比,\n # * 频率输出 HZ\n f_code, address, length = map(int, var.uri.split(':'))\n if var.eu == 'amps':\n value = float(value)\n value = (value - var.rlo) / (var.rhi - var.rlo)\n elif var.eu == 'volts':\n value = float(value)\n value = (value - var.rlo) / (var.rhi - var.rlo)\n elif var.eu == 'OHMS':\n value = float(value) * 0.000476 # TODO: A magic number\n value = (value - var.elo) / (var.ehi - var.elo)\n elif var.eu == 'HZ':\n value = float(value)\n except (TypeError, ValueError):\n raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')\n\n if f_code == cst.COILS:\n value = 1 if int(value) > 0 else 0\n self._cmd(f_code=cst.WRITE_SINGLE_COIL, address=address, output_value=value)\n return value\n elif f_code == cst.HOLDING_REGISTERS:\n if var.val_type == ValType.D64:\n output_value = struct.unpack(' T_Val:\n try:\n f_code, address, length = map(int, var.uri.split(':'))\n except TypeError:\n raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')\n\n data = self._cmd(f_code=f_code, address=address, quantity_of_x=length)\n if var.val_type == ValType.D64:\n return struct.unpack(' _T_OUTPUT_VALUE:\n try:\n return self.execute(\n self._slave,\n f_code,\n address,\n quantity_of_x=quantity_of_x,\n output_value=output_value,\n expected_length=expected_length\n )\n except (socket.timeout, ConnectionRefusedError, ConnectionResetError, OSError) as e:\n raise exception.SkError(exception.NETWORK_ERROR, e)\n except ModbusError as e:\n raise exception.SkError(exception.PROTOCOL_ERROR, e)\n\n\nclass SmHSLDev(SmPXIDev):\n data = []\n\n def setup(self, uri):\n super(SmHSLDev, self).setup(uri)\n\n self.data.clear()\n for model in PointModel.filter(PointModel.sig_type == SigType.HSL_BEAT.name):\n vt, length = model.val_type.split('*')\n vt, length = ValType[vt], int(length)\n beat = int(model.initial)\n\n data = [0] * length\n data[0] = beat\n self.data.append((model, data))\n\n def write(self, var: IVar, value: T_Val) -> T_Val:\n if var.sig_type == SigType.HSL_BEAT:\n for model, data in self.data:\n if model.sig_name == var.name:\n value = super(SmHSLDev, self).write(var, value)\n data[0] = value\n return value\n elif var.sig_type == SigType.HSLO:\n try:\n port, index, bit = var.uri.split(':')\n port, index, bit = int(port), int(index), int(bit)\n except TypeError:\n raise exception.SkError(exception.VAR_CFG_ERROR, f'{var} config error')\n\n value = self.__set(port, index, bit, flag=value)\n self.__push(port)\n return value\n else:\n raise exception.SkError(exception.UNSUPPORTED_TYPE,\n f'{self.__class__.__name__} not support {var.val_type}')\n\n def read(self, var: IVar) -> T_Val:\n raise exception.SkError(exception.UNSUPPORTED_TYPE,\n f'{self.__class__.__name__} not support {var.val_type}')\n\n def __set(self, port, index, bit, flag):\n try:\n flag = int(flag)\n flag = 1 if flag > 0 else 0\n except ValueError:\n flag = 0\n if 0 <= bit < 16:\n bit = bit + 16\n else:\n bit = bit - 16\n\n offset = port // 2 + 1 + index\n _, data = self.data[port // 2]\n byte = data[offset]\n op = 1\n assert 0 <= bit < 32\n op <<= bit\n if flag == 1:\n byte = byte | op\n if flag == 0:\n byte = byte & ~op\n data[offset] = byte\n return flag\n\n def __push(self, port):\n var, data = self.data[port // 2]\n _, address, _ = var.uri.split(':')\n address = int(address)\n\n output_value = struct.unpack(\n f'<{len(data) * 2}H',\n struct.pack(f'<{len(data)}I', *data)\n )\n self._cmd(f_code=cst.WRITE_MULTIPLE_REGISTERS, address=address, output_value=output_value)\n","sub_path":"Agreement/FQ/skio/protocol/pms/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"170444034","text":"import datetime\nimport calendar\nimport os\nimport csv\nfrom xlsxwriter.workbook import Workbook\n\nfrom django.conf import settings\n\n\nclass ReportGenerator(object):\n\n def __init__(self, file_name, border=False):\n self.file_name = file_name\n self.path_to_save = settings.STATIC_ROOT + \"/\" + file_name\n self.workbook = Workbook(self.path_to_save, {'constant_memory': True})\n self.row_num = 0\n\n # define style formats for header and data\n self.header_format = self.workbook.add_format()\n self.header_format.set_bg_color('yellow')\n self.header_format.set_bold()\n\n self.plain_format = self.workbook.add_format()\n if border:\n self.header_format.set_border()\n self.plain_format.set_border()\n\n # add a worksheet and set excel sheet column headers\n self.sheet = self.workbook.add_worksheet()\n\n def write_details(self, *args):\n \"\"\" used to write report details like report name, date of report\"\"\"\n for arg in args:\n self.sheet.write(self.row_num, 5, arg, self.header_format)\n self.row_num += 1\n self.row_num += 1\n\n def write_header(self, col_heads):\n \"\"\" write report headers \"\"\"\n col_count = len(col_heads)\n self.sheet.set_column(self.row_num, col_count, 12) # set column width\n # add filename and set save file path\n for col, name in enumerate(col_heads):\n self.sheet.write(self.row_num, col, name, self.header_format)\n self.row_num += 1\n\n def removeNonAscii(self, str):\n return \"\".join(i for i in str if ord(i)<128)\n\n def manual_sheet_close(self):\n self.workbook.close()\n return self.file_name\n\n def write_body(self, body):\n \"\"\" used to write the body of the report\"\"\"\n for row in body:\n for col, val in enumerate(row):\n value = val.encode('ascii', 'ignore') if isinstance(val, str) else str(val)\n self.sheet.write_string(self.row_num, col, value) #, self.plain_format)\n #self.sheet.write(self.row_num, col, value, self.plain_format)\n self.row_num += 1\n\n self.workbook.close()\n return self.file_name\n\n def write_matrix(self, body):\n \"\"\" used to write the body of the report\"\"\"\n for row in body:\n for col, val in enumerate(row):\n value = val.encode('ascii', 'ignore') if isinstance(val, str) else str(val)\n self.sheet.write_string(self.row_num, col, value) #, self.plain_format)\n #self.sheet.write(self.row_num, col, value, self.plain_format)\n self.row_num += 1\n\n def write_row(self, row):\n for col, val in enumerate(row):\n value = val.encode('ascii', 'ignore') if isinstance(val, str) else str(val)\n #self.sheet.write(self.row_num, col, val, self.plain_format)\n self.sheet.write_string(self.row_num, col, value)\n self.row_num += 1\n\n def current_month_range(self):\n month = datetime.date.today().month\n year = datetime.date.today().year\n day = datetime.date.today().day\n\n month_range = calendar.monthrange(year, month)\n\n start_date = datetime.datetime(year, month, 1).strftime('%Y-%m-%d')\n end_date = datetime.datetime(year, month, day).strftime('%Y-%m-%d')\n return (start_date, end_date)\n\n def last_month_range(self):\n month = datetime.date.today().month - 1\n month = 12 if month == 0 else month\n\n year = datetime.date.today().year\n year = year - 1 if month == 12 else year\n\n month_range = calendar.monthrange(year, month)\n start_date = datetime.datetime(year, month, 1).strftime('%Y-%m-%d')\n end_date = datetime.datetime(year, month, month_range[1]).strftime('%Y-%m-%d')\n return (start_date, end_date)\n\n\nclass CSVReportGenerator(object):\n\n def __init__(self, file_name):\n self.file_name = file_name\n self.base_path = settings.FILE_UPLOAD_TEMP_DIR + '/reports/'\n self.full_path = self.base_path + self.file_name\n file_path, extension = os.path.splitext(self.full_path)\n self.compressed_file_path = file_path + '.zip'\n self.open_file = open(self.full_path, \"wb\")\n self.mywriter = csv.writer(self.open_file)\n\n def write_row(self, row):\n data = [val.encode('ascii', 'ignore') if isinstance(val, str) else str(val) for val in row]\n self.mywriter.writerow(data)\n\n def get_file_path(self):\n return self.full_path\n\n def compress_file(self):\n os.system('cd {0} && zip -j {1} {2}'.format(self.base_path, self.compressed_file_path, self.full_path))\n return self.compressed_file_path","sub_path":"hp_backend/backend/report_api.py","file_name":"report_api.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192183988","text":"from django.contrib.auth.signals import user_logged_in, user_logged_out\nfrom django.dispatch import receiver\n\nfrom .models import Session\n\n\n@receiver(user_logged_in, dispatch_uid='id_capture_login')\ndef capture_login(sender, **kwargs):\n session = Session.objects.for_request(kwargs['request'], kwargs['user'])\n session.record('login')\n\n\n@receiver(user_logged_out, dispatch_uid='id_capture_logout')\ndef capture_logout(sender, **kwargs):\n session = Session.objects.for_request(kwargs['request'], kwargs['user'])\n session.record('logout')\n","sub_path":"hs_tracking/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"437268656","text":"#encoding=utf-8\r\nimport re\r\nimport sys\r\ndefault_encoding = 'utf-8'\r\nif sys.getdefaultencoding() != default_encoding:\r\n reload(sys)\r\n sys.setdefaultencoding(default_encoding)\r\n\r\n#Note: calculating the frequency that word_x and word_y co-occur\r\n\r\nf=open(\"szguba.txt\").readlines()\r\nwordlist=open(\"uncertain_dict.txt\").readlines() #the file is saved by 'ANSI' style\r\nwordlist1=open(\"pessimist_dict.txt\").readlines() #pessimist sentiment words are saved in 'pessimist_dict.txt'.\r\ntotal = f.__len__()\r\npxy=open(\"P(x,y)1.txt\",\"w\")\r\nfor word in wordlist:\r\n word=word.strip().decode('gbk', 'utf-8')\r\n newwordlist=[]\r\n for doc in f:\r\n doc=doc.strip().decode('gbk', 'utf-8')\r\n if re.search(word,doc):\r\n newwordlist.append(doc)\r\n\r\n for word1 in wordlist1:\r\n word1=word1.strip().decode('gbk', 'utf-8')\r\n count=0\r\n for doc in newwordlist:\r\n if re.search(word1,doc):\r\n count+=1\r\n\r\n if count==0:\r\n percent = format(float(count+1) / (total+2), '.6f')\r\n else:\r\n percent = format(float(count) / total, '.6f')\r\n\r\n pxy.write(word)\r\n pxy.write(word1)\r\n pxy.write(',')\r\n pxy.write(percent)\r\n pxy.write('\\r\\n')\r\npxy.close()\r\n","sub_path":"Code_4.py","file_name":"Code_4.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49697184","text":"import random\nfrom time import time\nfrom matplotlib import pyplot as plt\n\n\n\n\ndef weighted_choice(choices):\n total = sum(w for c, w in choices)\n r = random.uniform(0, total)\n upto = 0\n for c, w in choices:\n upto += w\n if upto >= r:\n return c\n\n assert False, \"Shouldn't get here\"\n\n\ndef draw_plot(array, filename):\n plt.clf()\n plt.plot(array)\n plt.savefig(filename)\n\n\nt = time()\n\n\ndef timestamp(i, delta):\n global t\n dt = time() - t\n t = time()\n print(\"iteration {} done in {} sec, causing change in values = {}\".format(i, dt, delta))","sub_path":"Tutorial2/Gambler/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480585157","text":"def get_data(workbook, sheet_name, ps_number,):\r\n mastersheet = workbook.get_sheet_by_name(\"mastersheet\")\r\n for r in range(1, sheet_name.max_row + 1):\r\n # to check the value in column 1\r\n if (sheet_name.cell(row=r, column=1).value == ps_number):\r\n # to traverse through the columns\r\n for c in range(2, sheet_name.max_column + 1):\r\n # to get all the values\r\n print(sheet_name.cell(row=1, column=c).value, sheet_name.cell(row=r, column=c).value)\r\n mastersheet.append([sheet_name.cell(row=1, column=c).value, sheet_name.cell(row=r, column=c).value])\r\n workbook.save(\"student.xlsx\")\r\n\r\nfrom openpyxl import load_workbook\r\nworkbook = load_workbook(\"student.xlsx\")\r\nsheet_list = workbook.get_sheet_names()\r\nif 'mastersheet' in workbook.sheetnames:\r\n remove_mastersheet = workbook['mastersheet']\r\n workbook.remove(remove_mastersheet)\r\nworkbook.create_sheet(\"mastersheet\")\r\nps_number = int(input(\"enter ps number\"))\r\nfor i in range(0, len(sheet_list)):\r\n sheetName = workbook.get_sheet_by_name(sheet_list[i])\r\n get_data(workbook, sheetName, ps_number)\r\n\r\n\r\n","sub_path":"practice _programs/problem_notextended.py","file_name":"problem_notextended.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512181357","text":"#import libraries\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#importing the data set\r\n\r\n\r\n\r\ndata = pd.read_csv('Data.csv')\r\n#we need to get all the independent variable data- use iloc to grab all the rows, and all but the last column\r\n\r\nX = data.iloc[:,:-1].values\r\nY = data.iloc[:, 3].values\r\n\r\n\r\n\r\n#this library allows us to deal with the missing numbers. Lots of classes and methods useful for machine learning preprocessing\r\n\r\nfrom sklearn.preprocessing import Imputer\r\n#create an object of the class, our missing values are NAN, we want to take the average of the column\r\n# and we want the colum mean so axis is 0\r\nImputer= Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\r\n# we fit the model to x data sext, we want all the rows, and the second and 3rd column\r\nImputer = Imputer.fit(X[:, 1:3])\r\n# we want to take those mean values and apply them to the Nan that are in the respective columsn\r\nX[:, 1:3] = Imputer.transform(X[:, 1:3]) \r\n\r\n# encoding the catagorical data\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\n#create variable and call the label encoder class-- we then select the column needed \r\nLabelEncoder_X = LabelEncoder()\r\n#we apply fit transform the all the rows and the column we need- in this case country- it will encode the catagories numerical- we put X[:, 0] at the begining because we want to put the encoded values back into the array\r\nX[:, 0]= LabelEncoder_X.fit_transform(X[:, 0])\r\nX\r\n#note- the equation would make you beleive that spain and germany have a higher value than france(0). This is not true. We need to do dummy encoding to make them equal\r\n#call categorical_features on the categorical column(country)\r\nonehotencoder = OneHotEncoder(categorical_features=[0])\r\nX = onehotencoder.fit_transform(X).toarray()\r\nX\r\n#we apply the same logic to the Y variable(purchased) Since it is only 1 column, we do not need to specify like we did in X\r\nLabelEncoder_Y = LabelEncoder()\r\nY= LabelEncoder_Y.fit_transform(Y)\r\n\r\n\r\n# we need to split data into training and testing set. The model will learn from the training set and then test it on the testing set\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)\r\n\r\n\r\n#we need to scale our features to ensure they are evenly represented in the model. As salary is many times larger than age, we dont want that to dominate our model. \r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nsc_X = StandardScaler()\r\n# we want to fit the object to the training set, and then transform it\r\nX_train = sc_X.fit_transform(X_train)\r\n# we dont need to fit on the test set because it is already fit in the training set\r\nX_test = sc_X.transform(X_test)\r\n#you dont necessarily need to scale your dummy variables, although in certain circumstances you may need/want to\r\n# we dont need to scale our y value because it is categorical and a value betwen 0-1. In some regression models and others, you will need to scale your Y as well. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Data Preprocessing/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"398138978","text":"import random\n\nprint(\"Name please\")\nname = input()\nprint(\"Welcome! \" + name + \"! This is a RPS game!\" + \" Choose one of them!\")\n\nchoices = ['R', 'P', 'S']\ncomputer = random.choice(choices)\nplayer = input('R,P,S!:')\n\nprint('computer had', computer)\n\nif player == computer:\n print(\"Draw\")\nelif (player=='S' and computer=='p' or \n player=='R' and computer=='S' or\n player=='P' and computer=='R'):\n print('Win')\nelse:\n print(\"lose\")","sub_path":"Rock_paper_scissor.py","file_name":"Rock_paper_scissor.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139059600","text":"import sys\nimport copy\nimport numpy as np\n\nclass Read_field:\n def __init__(self, input_file):\n #Open FIELD file\n self.field_file = open(input_file, 'r')\n\n #extract lines of FIELD file\n self.lines = self.field_file.readlines()\n\n #strip linebreak from strings in lines\n self.lines = [line.rstrip('\\n') for line in self.lines]\n self.lines = [line.split() for line in self.lines]\n\n #identify number of atom types\n for line in self.lines:\n for i in range(len(line)):\n if (line[i] == \"molecular\") and (line[i+1] == \"types\"):\n self.numatomtypes = int(line[i+2])\n #identify element types and number of each element type\n self.atomlist=[]\n for i in range(len(self.lines)):\n if len(self.lines[i]) == 0:\n pass\n else:\n if (self.lines[i][0] == \"nummols\"):\n atomprops=[]\n atomprops.append(self.lines[i+2][0]) #append atom type\n atomprops.append(int(self.lines[i][1])) #append nummols\n self.atomlist.append(atomprops)\n \nfield=Read_field(\"FIELD\")\nprint(field.numatomtypes)\nprint(field.atomlist)\n\ntotnum = 0\n\nfor atom in field.atomlist:\n totnum = totnum + atom[1]\n\nprint(totnum)\n\nf=open(\"ICOORD\", \"r\")\n\nf.readline()\nf.readline()\n\nicoord = np.empty((totnum+1,30),dtype=object)\nqn_list = np.zeros((totnum+1,1),dtype=int)\nqn_dist = np.zeros(6,dtype=int)\nfor i in range(totnum):\n line=f.readline().split()\n gnum=int(line[0])\n icoord[gnum][0] = line[1]\n icoord[gnum][1] = int(line[2])\n for j in range(int(line[2])):\n icoord[gnum][2+j]=int(line[3+j])\nfor i in range(1,totnum+1):\n if (icoord[i][0] != 'Si'):\n continue\n qcount = 0 \n coord = icoord[i][1]\n for j in range(coord):\n bnum = icoord[i][2+j]\n if icoord[bnum][0] == 'O':\n for k in range(icoord[bnum][1]):\n cnum = icoord[bnum][2+k]\n if (cnum != i) and (icoord[cnum][0] == 'Si'):\n qcount=qcount+1\n qn_list[i]=qcount\n qn_dist[qcount]=qn_dist[qcount]+1 \n\nprint(icoord[1])\nprint(icoord[1540])\nprint(icoord[194])\n\nprint(qn_list[54])\nprint(qn_dist)\n\nqnout = open(\"qn_dist.out\", \"w\")\n\nfor i in range(len(qn_dist)):\n info=\"{:2d} {:10d}\\n\".format(i,qn_dist[i])\n qnout.write(info)\n\n##Open rdf_all.dat produced by bash script from RDFDAT\n#rdf_file = open(\"rdf_all.dat\",'r')\n#lines=rdf_file.readlines()\n#lines = [line.split() for line in lines]\n#rdf_file.close()\n#\n#pair_atoms=[]\n#curpair=['','']\n#cnt=0\n#for atom in lines[0]:\n# if cnt==0 and atom!=\"#\":\n# curpair[0]=atom\n# cnt=1\n# elif cnt==1:\n# curpair[1]=atom\n# pair_atoms.append(curpair.copy())\n# cnt=0\n#\n#Natom = copy.deepcopy(field.atomlist)\n##Define total number of atoms\n#Ntot=0\n#for i in range(len(Natom)):\n# Ntot=Ntot+Natom[i][1]\n#\n##Define c1c2b1b2 for each pair\n#coln=[]\n#\n#for pair in pair_atoms:\n# for element in Natom:\n# if pair[0]==element[0]:\n# N1=element[1]\n# if pair[1]==element[0]:\n# N2=element[1]\n# if pair[0]==pair[1]:\n# A=1.0\n# if pair[0]!=pair[1]:\n# A=2.0\n# c1c2=A*float(N1*N2)/(float(Ntot)*float(Ntot))\n# for element in listb:\n# if pair[0]==element[0]:\n# b1=element[1]\n# if pair[1]==element[0]:\n# b2=element[1]\n# b1b2= b1*b2\n# coln.append(c1c2*b1b2)\n#\n##Define Gr as Sum_ij (c_i*c_j*b_i*b_j*(g_ij(r)-1)) \n#\n#TotGr=[]\n#\n#for i in range(1,len(lines)):\n# Gr=0.0\n# for j in range(1,len(lines[i])):\n# Gr=Gr+(float(lines[i][j])-1.0)*coln[(j-1)]\n# TotGr.append([float(lines[i][0]),Gr])\n#\n#outfile=open('Grtot.dat','w')\n#\n#for rpos in TotGr:\n# info=\"%16.8f %16.12f\\n\" % (rpos[0],rpos[1])\n# outfile.write(info)\n#\n#outfile.close()\n#\n##Define normalisation term for Gdash where Gdash-1=Gr/(Sum_ijc_i*b_i)^2 from Keen JAC (2000)\n#\n#sumbici = 0.0\n#\n#for atomspec in Natom:\n# for element in listb:\n# if atomspec[0]==element[0]:\n# bici=(float(atomspec[1])/float(Ntot))*element[1]\n# sumbici = sumbici + bici\n# \n#normfactor=sumbici**(-2)\n#print(\"(Sum_i bi*ci)^-2 = %16.8f\" % (normfactor))\n#Grdash = copy.deepcopy(TotGr)\n#for i in range(len(TotGr)):\n# Grdash[i][1]=(TotGr[i][1]*normfactor)+1.0\n#\n#outfile2=open('Grdash.dat','w')\n#\n#for rpos in Grdash:\n# info=\"%16.8f %16.12f\\n\" % (float(rpos[0]),rpos[1])\n# outfile2.write(info)\n#\n#outfile2.close()\n#\n##grdata=pd.read_csv(\"Grtot.dat\",header=None,delim_whitespace=True)\n#gr=np.array(TotGr)\n#gr=np.transpose(gr)\n#print(gr)\n#rlist=gr[0]\n#Gr=gr[1]\n#\n#Qlist=np.arange(0.1,45.1,0.05)\n#\n#QiQlist=[]\n#FQlist=[]\n#SQlist=[]\n#\n#for Q in Qlist:\n# QiQlist.append(QiQ(Q,Gr,rlist,rho))\n# FQlist.append(QiQ(Q,Gr,rlist,rho)/Q)\n# SQlist.append((normfactor*QiQ(Q,Gr,rlist,rho)/Q)+1.0)\n#\n#Qoutfile=open('Qi_tot.dat','w')\n#FQoutfile=open('FQ_tot.dat','w')\n#SQoutfile=open('SQ_tot.dat','w')\n#\n#\n#for i in range(len(Qlist)):\n# info=\"%16.8f %16.12f\\n\" % (Qlist[i],QiQlist[i])\n# Qoutfile.write(info)\n#\n#Qoutfile.close()\n#\n#for i in range(len(Qlist)):\n# info=\"%16.8f %16.12f\\n\" % (Qlist[i],FQlist[i])\n# FQoutfile.write(info)\n#\n#FQoutfile.close()\n#\n#for i in range(len(Qlist)):\n# info=\"%16.8f %16.12f\\n\" % (Qlist[i],SQlist[i])\n# SQoutfile.write(info)\n#\n#SQoutfile.close()\n","sub_path":"qn_dist/qn_dist.py","file_name":"qn_dist.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14811231","text":"import logging\n\nfrom flask_restplus import fields as restplus_fields\nfrom flask_restplus.utils import merge\n\nfrom catalant.api.core.schema import ma\n\n_logger = logging.getLogger(__name__)\n\n\nclass APIModelProxy(object):\n \"\"\"A proxy for a Swagger API model class.\n Contains the name of the Swagger model.\n \"\"\"\n def __init__(self, name):\n self.__apidoc__ = dict(name=name)\n\n\ndef create_restplus_fields_from_marshmallow_schema(schema):\n \"\"\"Create a dict of field names to Restplus fields from a Marshmallow schema.\n :param schema: A Marshmallow :class:`Schema` instance.\n :return: A dict of field names to Restplus fields.\n \"\"\"\n return {k: create_restplus_field_from_marshmallow_field(v) for k, v in schema._update_fields().iteritems()}\n\n\ndef create_restplus_field_from_marshmallow_field(field, _nested_field_name=None):\n \"\"\"Create a Restplus field from a Marshmallow field.\n The Restplus field contains the API documentation and metadata necessary to generate the Swagger documentation.\n :param field: A Marshmallow :class:`Field` instance.\n :param _nested_field_name: A derived name for the nested field -- for internal use only.\n :return: A Restplus field derived from its base `Raw` class.\n \"\"\"\n def field_kwargs(f, *attrs):\n kwargs = {}\n for attr in attrs:\n if isinstance(attr, tuple):\n a, k = attr\n else:\n a = k = attr\n try:\n value = getattr(f, a)\n except AttributeError:\n value = f.metadata.get(a)\n kwargs[k] = value\n return kwargs\n\n def merge_apidoc(from_field, to_field):\n if hasattr(from_field, '__apidoc__'):\n to_field.__apidoc__ = merge(getattr(to_field, '__apidoc__', {}), from_field.__apidoc__)\n\n if not hasattr(field, '__apidoc__'):\n field.__apidoc__ = {}\n\n restplus_field = None\n restplus_cls = None\n restplus_attrs = ['attribute', 'default', 'description', 'required'] # default attrs\n\n if isinstance(field, ma.Arbitrary):\n restplus_cls = restplus_fields.Arbitrary\n restplus_attrs += ['min', 'max']\n elif isinstance(field, (ma.ExternalId, ma.Html)):\n restplus_cls = restplus_fields.String\n elif isinstance(field, (ma.Select, ma.Enum)):\n restplus_cls = restplus_fields.String\n restplus_attrs += [('choices', 'enum')]\n elif isinstance(field, ma.Fixed):\n restplus_cls = restplus_fields.Fixed\n restplus_attrs += ('min', 'max', 'decimals')\n elif isinstance(field, ma.Float):\n restplus_cls = restplus_fields.Float\n restplus_attrs += ['min', 'max']\n elif isinstance(field, ma.FormattedString):\n restplus_cls = restplus_fields.FormattedString\n restplus_attrs += ['min', 'max', 'src_str']\n elif isinstance(field, ma.Integer):\n restplus_cls = restplus_fields.Integer\n restplus_attrs += ['min', 'max']\n elif isinstance(field, ma.List):\n container_field = field.container\n merge_apidoc(field, container_field)\n restplus_field = restplus_fields.List(\n create_restplus_field_from_marshmallow_field(container_field),\n **field_kwargs(field, *restplus_attrs))\n elif isinstance(field, ma.Nested):\n nested = field.nested\n # Marshmallow supports self-referential nested fields by using the `self` identifier.\n if nested == 'self':\n # In this case, the nested field's schema class will be the same as its parent class\n nested = field.parent.__class__\n _nested_field_name = _nested_field_name or nested.resolve_schema_name()\n\n # Support Swagger array type documentation\n field.__apidoc__ = merge(field.__apidoc__, {'as_list': field.many})\n\n nested_proxy = APIModelProxy(_nested_field_name or field.schema_name)\n restplus_cls = restplus_fields.Nested\n restplus_attrs = ('attribute', 'default', 'allow_null') # set attrs explicitly\n restplus_field = restplus_fields.Nested(nested=nested_proxy, **field_kwargs(field, *restplus_attrs))\n elif isinstance(field, ma.Raw):\n restplus_cls = restplus_fields.Raw\n elif isinstance(field, ma.Url):\n restplus_cls = restplus_fields.Url\n restplus_attrs += ('endpoint', 'absolute', 'scheme')\n elif isinstance(field, (ma.Function, ma.Method)):\n if field.return_field is not None:\n return_field = field.return_field\n merge_apidoc(field, return_field)\n _nested_field_name = field.return_field.schema.resolve_schema_name(field.schema_view) \\\n if isinstance(field.return_field, ma.Nested) else None\n return create_restplus_field_from_marshmallow_field(return_field, _nested_field_name)\n restplus_cls = restplus_fields.Raw\n else:\n try:\n field_name = field.__class__.__name__\n restplus_cls = getattr(restplus_fields, field_name)\n except AttributeError:\n # logger.warn(\n # \"Unable to convert Marshmallow field to Restplus field. Unsupported type: {}\".format(field_name))\n restplus_cls = restplus_fields.String\n\n if restplus_field is None:\n restplus_field = restplus_cls(**field_kwargs(field, *restplus_attrs)) if restplus_cls else None\n\n # Denote deprecated fields\n if 'deprecated' in field.metadata:\n field.__apidoc__['deprecated'] = field.metadata['deprecated']\n\n # Include additional documentation metadata based on field type\n if isinstance(field, ma.Enum):\n field.__apidoc__['enum_cls'] = field.enum_cls\n elif isinstance(field, ma.Html):\n field.__apidoc__['has_html'] = True\n\n # Copy Swagger documentation over to restplus field\n if hasattr(field, '__apidoc__'):\n restplus_field.__apidoc__ = field.__apidoc__\n\n return restplus_field\n","sub_path":"catalant/api/core/swagger/restplus_utils.py","file_name":"restplus_utils.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396137626","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 8 03:43:49 2017\n\n@author: shiha\n\"\"\"\n\"\"\"\nWrite a program that outputs the string representation of numbers from 1 to n.\nBut for multiples of three it should output “Fizz” instead of the number and for\nthe multiples of five output “Buzz”. For numbers which are multiples of both three \nand five output “FizzBuzz”.\n\"\"\"\nclass Solution(object):\n def fizzBuzz(self, n):\n List=[]\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n for i in range(1,n+1):\n if (i%3==0)and(i%5==0):\n List.append('FizzBuzz')\n elif(i%3==0):\n List.append('Fizz')\n elif(i%5==0):\n List.append('Buzz')\n else:\n List.append(str(i))\n return List \n \n \nif __name__ == '__main__':\n my_solution=Solution()\n n=15\n out=my_solution.fizzBuzz(n)\n print(out)\n ","sub_path":"fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510906740","text":"x = ['veera',10,20,30]\nfor i in x:\n print(i)\n\nx ='SUNITHA'\nfor i in x:\n print(i,end=\"\") # print on the same line\n\nfor i in range(11,20,1): # range of values starting point, ending point, and range\n print(i)","sub_path":"ForloopExample.py","file_name":"ForloopExample.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52837268","text":"import sympy\nfrom sympy import *\nfrom sympy_utils.printing import * \n\na,b,c,t,d = symbols('a b c t d')\nx = symbols(\"x\")\n\n# Funnels\npsi_1 = (-a*t**2+b)*exp(-t)\npsi_1_diff = psi_1.diff(t)\npsi_1_diff2 = psi_1_diff.diff(t)\n\npsi_2 = exp(-d*t)*(-exp(-t)-c) \npsi_2_diff = psi_2.diff(t)\npsi_2_diff2 = psi_2_diff.diff(t)\n\n# Rhos\nrho_1 = 0.5*psi_1**2 - 0.5*x**2\nrho_2 = 0.5*x**2 - 0.5*psi_2**2\nrho = rho_1*rho_2\n\n# Control\nnum = rho_2*psi_1**2 + rho_1*psi_2**2\nden = rho_2*(psi_1**2*(1+d) - x**2) + rho_1*(psi_2**2*(1+d)*-x**2)\ngain = num/ den\nctl = -x *gain\n\nrho_dot = psi_1*psi_1_diff * rho_1 + psi_2*psi_2_diff*rho_2 + x*ctl*(rho_1 - rho_2)\n\n\n#print_math(\"psi_1\", psi_1)\n#print_math(\"psi_1_diff\", psi_1_diff)\n#print_math(\"psi_2\", psi_2)\n#print_math(\"psi_2_diff\", psi_2_diff)\n\n# Robustness Conditions\ncond_1b = d*psi_2*psi_2_diff + psi_1**2\ncond_2b = d*psi_1*psi_1_diff + psi_2**2\n\n#print_math(\"cond_1b\", simplify(cond_1b))\n#print_math(\"cond_2b\", simplify(cond_2b))\n\n\nprint_octave(\"psi_1\", psi_1)\nprint_octave(\"psi_1_diff\", psi_1_diff)\nprint_octave(\"psi_1_diff2\", psi_1_diff2)\nprint_octave(\"psi_2\", psi_2)\nprint_octave(\"psi_2_diff\", psi_2_diff)\nprint_octave(\"psi_2_diff2\", psi_2_diff2)\n\n\nprint_octave(\"gain\", gain)\nprint_octave(\"ctl\", ctl)\nprint_octave(\"rho_dot\", rho_dot)\n\n\n","sub_path":"funnels/funnel_3.py","file_name":"funnel_3.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522220567","text":"class decision_node:\n def __init__(self, column_idx=-1, value=None, results=None, true_node=None, false_node=None):\n self.column_idx = column_idx\n self.value = value\n self.results = results\n self.true_node = true_node\n self.false_node = false_node\n\n\n# 切分为两部分\ndef divide_set(rows, column, value):\n if isinstance(value, int) or isinstance(value, float):\n split_func = lambda row: row[column] >= value\n else:\n split_func = lambda row: row[column] == value\n\n set_first = [row for row in rows if split_func(row)]\n set_second = [row for row in rows if not split_func(row)]\n\n return (set_first, set_second)\n\n\n# 统计相同行的数量\ndef unique_counts(rows):\n results = {}\n for row in rows:\n temp_row = row[len(row) - 1]\n if temp_row not in results:\n results[temp_row] = 0\n results[temp_row] += 1\n return results\n\n\n# 基尼不纯度\ndef gini_impurity(rows):\n rows_num = len(rows)\n counts = unique_counts(rows)\n impurity = 0\n\n for count1 in counts:\n p1 = float(counts[count1] / rows_num)\n for count2 in counts:\n if count1 == count2:\n continue\n p2 = float(counts[count2] / rows_num)\n impurity += p1 * p2\n\n return impurity\n\n\n# 熵\ndef entropy(rows):\n from math import log\n log2 = lambda x: log(x) / log(2)\n results = unique_counts(rows)\n ent = 0.0\n rows_num = len(rows)\n for row in results.keys():\n p = float(results[row]) / rows_num\n ent -= (p * log2(p))\n return ent\n\n\n# 建立决策树\ndef build_trees(rows, score_func=entropy):\n if len(rows) == 0:\n return decision_node()\n\n current_score = score_func(rows)\n\n best_gain = 0.0\n best_criteria = None\n best_sets = None\n rows_num = len(rows)\n\n column_max_idx = len(rows[0]) - 1\n\n for column_idx in range(0, column_max_idx):\n column_values = {}\n for row in rows:\n column_values[row[column_idx]] = 1\n\n for value in column_values.keys():\n (set1, set2) = divide_set(rows, column_idx, value)\n\n # 计算信息增益\n p = len(set1) / rows_num\n gain = current_score - p * score_func(set1) - (1 - p) * score_func(set2)\n if gain > best_gain and len(set1) > 0 and len(set2) > 0:\n best_gain = gain\n best_criteria = (column_idx, value)\n best_sets = (set1, set2)\n\n if best_gain > 0:\n true_node = build_trees(best_sets[0])\n false_node = build_trees(best_sets[1])\n return decision_node(\n column_idx=best_criteria[0], value=best_criteria[1],\n true_node=true_node, false_node=false_node)\n else:\n return decision_node(results=unique_counts(rows))\n\n\ndef print_tree(tree, indent=''):\n if tree.results != None:\n print(indent + str(tree.results))\n else:\n print(indent + 'Idx(' + str(tree.column_idx) + '):', str(tree.value) + '? ')\n print(indent + 'T->')\n print_tree(tree.true_node, indent + '\\t')\n print(indent + 'F->')\n print_tree(tree.false_node, indent + '\\t')\n\n\ndef classify(observation, tree_node):\n if tree_node.results != None:\n return tree_node.results\n else:\n v = observation[tree_node.column_idx]\n node = None\n if isinstance(v, int) or isinstance(v, float):\n if v >= tree_node.value:\n node = tree_node.true_node\n else:\n node = tree_node.false_node\n else:\n if v == tree_node.value:\n node = tree_node.true_node\n else:\n node = tree_node.false_node\n return classify(observation, node)\n","sub_path":"c7/DecisionTrees.py","file_name":"DecisionTrees.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243361485","text":"from rest_framework import serializers\n\n\nclass CommentSerializer(serializers.Serializer):\n postId = serializers.IntegerField()\n id = serializers.IntegerField()\n name = serializers.CharField()\n email = serializers.EmailField()\n body = serializers.CharField()\n\n class Meta:\n fields = ['postId', 'id', 'name', 'email', 'body']\n\n\nclass PostSerializer(serializers.Serializer):\n userId = serializers.IntegerField()\n id = serializers.IntegerField()\n title = serializers.CharField()\n body = serializers.CharField()\n comments = CommentSerializer(many=True)\n\n class Meta:\n fields = ['user_id', 'comment_id', 'title', 'body', 'comments']\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637446555","text":"#!/usr/bin/env python3\n''' simile compiler ''' \n''' by Richard A. Benson '''\nimport sys, os\nimport emit, out, util, fn, enums as e\n\nclass s:\n ''' state '''\n pass\n\ndef init (t, long_len = 8):\n if t:\n s.line_n = 0\n s.fn_cur = None\n if not emit.init (long_len):\n return False\n else:\n pass\n\n return True\n\ndef main ():\n if len (sys.argv) > 1:\n res_ = init (True, int(sys.argv [1]))\n else:\n res_ = init (True)\n if not res_:\n out.error ('init failed')\n return -1\n\n lines = sys.stdin.readlines ()\n res_ = process (lines)\n if not res_:\n out.error ('process (pass 1) failed at line ' + str(s.line_n + 1))\n return -2\n init (False)\n\n out.s.output_ = True\n if len (sys.argv) > 1:\n res_ = init (True, int(sys.argv [1]))\n else:\n res_ = init (True)\n res_ = process (lines)\n if not res_:\n out.error ('process (pass 2) failed at line ' + str(s.line_n + 1))\n return -3\n init (False)\n\n return 0\n\ndef process (ls):\n cmdd = {\n 'def': (1, 0, do_def),\n 'ret': (0, 0, do_ret),\n 'end': (0, 0, do_end),\n 'call': (1, 0, do_call),\n 'if': (1, 1, do_if),\n 'else': (0, 0, do_else),\n 'endif': (0, 0, do_endif),\n 'while': (1, 1, do_while),\n 'wend': (0, 0, do_wend),\n 'add': (1, 1, do_add),\n 'sub': (1, 1, do_sub),\n 'mul': (1, 1, do_mul),\n 'div': (1, 1, do_div),\n 'res': (1, 1, do_res),\n 'set': (2, 2, do_set),\n 'addto': (2, 2, do_addto),\n 'subfrom': (2, 2, do_subfrom),\n 'multo': (2, 2, do_multo),\n 'divfrom': (2, 2, do_divfrom)\n }\n\n for s.line_n, s_raw in enumerate(ls):\n if not s_raw:\n continue\n\n # strip beginning and ending whitespaces\n s_stp = s_raw.strip ()\n if not s_stp:\n continue\n\n # split at unquoted spaces \n s_spl = util.q_split (s_stp)\n keyword1 = s_spl [0]\n args = s_spl [1:]\n args_n = len (args)\n\n # ignore\n if keyword1[0] == '#':\n continue\n else:\n try:\n _c = cmdd [keyword1]\n except:\n out.error ('\"' + keyword1 + '\" keyword unknown')\n return False\n\n if args_n < _c [0]:\n out.arg_n_error (keyword1, _c [0], _c [1])\n return False\n\n elif _c [1] and args_n > _c [1]:\n out.arg_n_error (keyword1, _c [0], _c [1])\n return False\n\n if (not s.fn_cur) and keyword1 != 'def':\n out.error ('outside function')\n return False\n\n res_ = _c [2](args)\n if not res_:\n return False\n\n return True\n\ndef do_def (args):\n if s.fn_cur:\n out.error ('nested function')\n return False\n\n _fn = fn.Fn (args [0], args [1:], emit.s.long_len)\n if not _fn:\n return False\n\n s.fn_cur = _fn\n\n res_ = emit.emit (s.fn_cur, e.EMIT_DEF, args [0])\n return res_\n\ndef do_ret (args):\n if not s.fn_cur.flow_n:\n s.fn_cur.flow_ret_t = True\n if len(args):\n res_ = emit.emit (s.fn_cur, e.EMIT_RET, args [0])\n else:\n res_ = emit.emit (s.fn_cur, e.EMIT_RET, None)\n return res_\n\ndef do_end (args):\n if s.fn_cur.flow_n:\n out.error ('unclosed flow control')\n return False\n\n res_ = emit.emit (s.fn_cur, e.EMIT_END, '')\n s.fn_cur = None\n return res_\n\ndef do_call (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_CALL, args [0], val2=args [1:])\n return res_\n\ndef do_if (args):\n s.fn_cur.flow_cur += [[e.FLOW_IF, s.fn_cur.flow_if_n]]\n s.fn_cur.flow_if_n += 1\n s.fn_cur.flow_n += 1\n res_ = emit.emit (s.fn_cur, e.EMIT_IF, args [0])\n return res_\n\ndef do_else (args):\n if (not s.fn_cur.flow_n) or\\\n (s.fn_cur.flow_cur [s.fn_cur.flow_n - 1][0] != e.FLOW_IF):\n out.error ('outside flow control')\n return False\n\n res_ = emit.emit (s.fn_cur, e.EMIT_ELSE, None)\n return res_\n\ndef do_endif (args):\n if (not s.fn_cur.flow_n) or\\\n (s.fn_cur.flow_cur [s.fn_cur.flow_n - 1][0] != e.FLOW_IF):\n out.error ('outside flow control')\n return False\n\n res_ = emit.emit (s.fn_cur, e.EMIT_ENDIF, None)\n\n s.fn_cur.flow_cur.pop ()\n s.fn_cur.flow_n -= 1\n return res_\n\ndef do_while (args):\n s.fn_cur.flow_cur += [[e.FLOW_WHILE, s.fn_cur.flow_while_n]]\n s.fn_cur.flow_while_n += 1\n s.fn_cur.flow_n += 1\n res_ = emit.emit (s.fn_cur, e.EMIT_WHILE, args [0])\n return res_\n\ndef do_wend (args):\n if (not s.fn_cur.flow_n) or\\\n (s.fn_cur.flow_cur [s.fn_cur.flow_n - 1][0] != e.FLOW_WHILE):\n out.error ('outside flow control')\n return False\n\n res_ = emit.emit (s.fn_cur, e.EMIT_WEND, None)\n\n s.fn_cur.flow_cur.pop ()\n s.fn_cur.flow_n -= 1\n return res_\n\ndef do_add (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_ADD, args [0]);\n return res_\n\ndef do_sub (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_SUB, args [0]);\n return res_\n\ndef do_mul (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_MUL, args [0]);\n return res_\n\ndef do_div (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_DIV, args [0]);\n return res_\n\ndef do_res (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_RES, args [0]);\n return res_\n\ndef do_set (args): \n res_ = emit.emit (s.fn_cur, e.EMIT_SET, args [0], val2 = args [1])\n return res_\n\ndef do_addto (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_ADDTO, args [0], val2 = args [1])\n return res_\n\ndef do_subfrom (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_SUBFROM, args [0], val2 = args [1])\n return res_\n\ndef do_multo (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_MULTO, args [0], val2 = args [1])\n return res_\n\ndef do_divfrom (args):\n res_ = emit.emit (s.fn_cur, e.EMIT_DIVFROM, args [0], val2 = args [1])\n return res_\n\nif __name__ == '__main__':\n sys.exit (main())\n\n","sub_path":"simile.py","file_name":"simile.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"483461491","text":"# Using bit operator\n\n\ndef unique_char_bit(input_string):\n # Initialize the checker variable\n checker = 0\n # Loop through each characters\n for i in range(len(input_string)):\n # Get the acii or unicode value\n ascii_value = ord(input_string[i])\n print(1 << ascii_value)\n # If the & is greater than 0 , the character is repeating, return false.\n if (checker & 1 << ascii_value) > 0:\n return False\n # update the checker.\n checker = (1 << ascii_value)\n return True\n\n# Driver Call\nprint(unique_char_bit(\"abcc\"))\n","sub_path":"src/arrays/unique_chars_solution_6.py","file_name":"unique_chars_solution_6.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532787907","text":"import sys\r\nimport os\r\nimport wka_utils \r\nfrom employee_database import EmployeeDB\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass TransferRateAnalysis:\r\n\r\n def __init__(self, employee_database):\r\n self.empDB = employee_database\r\n \r\n def calcTransferRateForManagerTeam(self, manager_name) -> dict:\r\n teamDict = dict()\r\n if manager_name in self.empDB.getManagerList():\r\n for employee in self.empDB.getEmployeeListForManager(manager_name):\r\n teamDict[employee] = self.calcTransferRateForEmployee(employee)\r\n return teamDict\r\n # if we get to here we didn't find manager name\r\n raise Exception('Could not find manager: ' + manager_name)\r\n\r\n def calcTransferRateForEmployee(self, employee_name) -> list:\r\n for mgr in self.empDB.getManagerList():\r\n if employee_name in self.empDB.getEmployeeListForManager(mgr):\r\n # return list of 1 month, 3 month, 6 month, and 12 month xfer rates\r\n return self.calcTransferRates(mgr, employee_name)\r\n # if we get to here we didn't find employee name\r\n raise Exception('Could not find employee: ' + employee_name)\r\n\r\n\r\n ######################################\r\n # Internal methods\r\n ######################################\r\n\r\n def calcTransferRates(self, manager_name, employee_name) -> list:\r\n # transfer rate is direct charge hours + internal order hours divided by 40 (for every week included)\r\n dateList = self.empDB.getDateList()\r\n overhead = self.empDB.getEmployeeLaborTypeData(employee_name, wka_utils.OVERHEAD_TOTAL_LABEL)\r\n internal_order = self.empDB.getEmployeeLaborTypeData(employee_name, wka_utils.INTERNAL_ORDER_TOTAL_LABEL)\r\n direct_charge = self.empDB.getEmployeeLaborTypeData(employee_name, wka_utils.DIRECT_CHARGE_TOTAL_LABEL)\r\n\r\n xferList = list()\r\n # calc 1 month xfer rate (4 weeks)\r\n numerator = 0.0\r\n for i in range(-4,0):\r\n numerator += internal_order[i] + direct_charge[i]\r\n xferList.append(numerator/(40.0 * 4))\r\n\r\n\r\n #calc 3 month xfer rate (12 weeks)\r\n for i in range(-12,-4):\r\n numerator += internal_order[i] + direct_charge[i]\r\n xferList.append(numerator/(40.0 * 12))\r\n\r\n #calc 6 month xfer rate (26 weeks)\r\n for i in range(-26,-12):\r\n numerator += internal_order[i] + direct_charge[i]\r\n xferList.append(numerator/(40.0 * 26))\r\n \r\n #calc full 12 month xfer rate (52 weeks)\r\n for i in range(-52,-26):\r\n numerator += internal_order[i] + direct_charge[i]\r\n xferList.append(numerator/(40.0 * 52))\r\n \r\n # return the list of transfer rates\r\n return xferList\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n from argparse import ArgumentParser\r\n \r\n parser = ArgumentParser()\r\n parser.add_argument(\"-m\", \"--manager\", help=\"Partial manager name from CSV file\")\r\n parser.add_argument(\"-e\", \"--employee\", help=\"Partial employee name from CSV file\")\r\n parser.add_argument(\"csv_file\", help=\"CSV file with weekly actuals\")\r\n args = parser.parse_args()\r\n \r\n if args.employee and args.manager:\r\n print('Choose either manager or employee, not both')\r\n \r\n try:\r\n from wka_utils import parseWeeklyActualsCsvFile\r\n status, employeeDB = parseWeeklyActualsCsvFile(args.csv_file)\r\n if status == False:\r\n print(\"parseWeeklyActualsCsvFile() failed\")\r\n exit()\r\n xferAnalysis = TransferRateAnalysis(employeeDB)\r\n stats = None\r\n \r\n if args.manager:\r\n matchingMgrs = [s for s in employeeDB.getManagerList() if args.manager in s]\r\n for mgr in matchingMgrs:\r\n stats = xferAnalysis.calcTransferRateForManagerTeam(mgr)\r\n print(\"\\n\\nTeam: {0}:\".format(mgr))\r\n print(\"{0:<25s}{1:>11s}{2:>11s}{3:>11s}{4:>11s}\".format('Name', '1 month', '3 month', '6 month', '12 month'))\r\n print(\"-\" * 70)\r\n for emp in stats:\r\n print(\"{0:<25s}{1:>11.1f}{2:>11.1f}{3:>11.1f}{4:>11.1f}\".format(emp, (100*stats[emp][0]), (100*stats[emp][1]), (100*stats[emp][2]), (100*stats[emp][3])))\r\n else:\r\n matchingEmps = [s for s in employeeDB.getEmployeeList() if args.employee in s]\r\n print(\"\\n{0:<25s}{1:>11s}{2:>11s}{3:>11s}{4:>11s}\".format('Name', '1 month', '3 month', '6 month', '12 month'))\r\n print(\"-\" * 70)\r\n for emp in matchingEmps:\r\n stats = xferAnalysis.calcTransferRateForEmployee(emp)\r\n print(\"{0:<25s}{1:>11.1f}{2:>11.1f}{3:>11.1f}{4:>11.1f}\".format(emp, (100*stats[0]), (100*stats[1]), (100*stats[2]), (100*stats[3])))\r\n except Exception as e:\r\n print(e)\r\n","sub_path":"TransferRateTool/xfer_rate_tool.py","file_name":"xfer_rate_tool.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141630418","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport argparse\nimport os\nimport shutil\n\nSAMPLE_RATIOS = np.arange(0.1, 1.0, 0.1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_users', type=int, required=True)\n parser.add_argument('--num_items', type=int, required=True)\n parser.add_argument('--model_dir', required=True)\n args = parser.parse_args()\n\n #sed '1p;2p;3p;15p;624962q;d' item_weights.csv > foo.csv\n num_users = args.num_users\n num_items = args.num_items\n for ratio in SAMPLE_RATIOS:\n print('Running ' + str(ratio))\n\n user_output_dir = args.model_dir + '-' + str(ratio) + '-users'\n if not os.path.exists(user_output_dir):\n os.makedirs(user_output_dir)\n\n with open('%s/user_weights.csv' % args.model_dir) as infile, open(\n '%s/user_weights.csv' % user_output_dir, 'w') as outfile:\n sampled_user_ids = set(\n np.random.choice(\n num_users, int(num_users * ratio), replace=False))\n for line_num, line in enumerate(infile):\n if line_num in sampled_user_ids:\n print(line, end='', file=outfile)\n shutil.copyfile('%s/item_weights.csv' % args.model_dir,\n '%s/item_weights.csv' % user_output_dir)\n #for user_id_chunk in chunks(sampled_user_ids, 100):\n # sed_user_str = 'p;'.join(str(val) for val in user_id_chunk) + 'q;d'\n # cmd = [\n # 'sed', sed_user_str, '%s/user_weights.csv' % args.model_dir,\n # ]\n # print('Running' + str(cmd))\n # subprocess.call(cmd, stdout=outfile)\n\n item_output_dir = args.model_dir + '-' + str(ratio) + '-items'\n if not os.path.exists(item_output_dir):\n os.makedirs(item_output_dir)\n\n with open('%s/item_weights.csv' % args.model_dir) as infile, open(\n '%s/item_weights.csv' % item_output_dir, 'w') as outfile:\n sampled_item_ids = set(\n np.random.choice(\n num_items, int(num_items * ratio), replace=False))\n for line_num, line in enumerate(infile):\n if line_num in sampled_item_ids:\n print(line, end='', file=outfile)\n shutil.copyfile('%s/user_weights.csv' % args.model_dir,\n '%s/user_weights.csv' % item_output_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/sample_model.py","file_name":"sample_model.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84528611","text":"import os\nimport shutil\nfrom collections import namedtuple\n\nimport numpy\n\n# from ws.RLAgents.E_SelfPlay.model_mgt import model_mgt\nfrom ws.RLAgents.E_SelfPlay.model_mgt import model_mgt\nfrom ws.RLAgents.E_SelfPlay.play.greedy_player_mgt import greedy_player_mgt\nfrom ws.RLAgents.E_SelfPlay.play.animated_player_mgt import animated_player_mgt\nfrom ws.RLAgents.E_SelfPlay.play.random_player_mgt import random_player_mgt\nfrom ws.RLAgents.E_SelfPlay.train.playground_mgt import playground_mgt\nfrom ws.RLAgents.E_SelfPlay.search.monte_carlo_tree_search_mgt import monte_carlo_tree_search_mgt\nfrom ws.RLAgents.E_SelfPlay.train.training_mgt import training_mgt\nfrom ws.RLEnvironments.self_play_games.othello.game_mgt import game_mgt\n\n\nfrom ws.RLUtils.monitoring.tracing.tracer import tracer\n\ndef agent_mgt(app_info, common_functions):\n game_mgr = game_mgt(app_info.BOARD_SIZE)\n\n\n training_mgr = training_mgt(game_mgr, app_info)\n\n\n @tracer(app_info, verboscity= 4)\n def fn_train():\n training_mgr.fn_execute_training_iterations()\n return agent_mgr\n\n @tracer(app_info)\n def fn_test_against_human():\n fn_human_player_policy = lambda g: animated_player_mgt(g)\n fn_test(app_info, fn_human_player_policy, verbose=True, NUM_TEST_GAMES=2)\n return agent_mgr\n\n @tracer(app_info, verboscity= 4)\n def fn_test_against_random():\n fn_random_player_policy = lambda g: random_player_mgt(g)\n fn_test(app_info, fn_random_player_policy, NUM_TEST_GAMES=app_info.NUM_TEST_GAMES)\n return agent_mgr\n\n @tracer(app_info, verboscity= 4)\n def fn_test_against_greedy():\n fn_random_player_policy = lambda g: greedy_player_mgt(g)\n fn_test(app_info, fn_random_player_policy, NUM_TEST_GAMES=app_info.NUM_TEST_GAMES)\n return agent_mgr\n\n def fn_test(app_info, fn_player_policy, verbose=False, NUM_TEST_GAMES=2):\n system_nn = model_mgt(game_mgr, app_info.RESULTS_PATH_)\n if not system_nn.fn_load_model():\n return\n\n system_mcts = monte_carlo_tree_search_mgt(app_info, system_nn, game_mgr,)\n fn_system_policy = lambda state: numpy.argmax(system_mcts.fn_get_policy(state, do_random_selection=False))\n fn_contender_policy = fn_player_policy(game_mgr)\n playground = playground_mgt(fn_system_policy, fn_contender_policy, game_mgr,\n fn_display=game_mgt(app_info.BOARD_SIZE).fn_display,\n )\n system_wins, system_losses, draws = playground.fn_play_games(NUM_TEST_GAMES, verbose=verbose)\n\n app_info.trace_mgr.fn_write(f'wins:{system_wins} losses:{system_losses} draws:{draws}')\n\n @tracer(app_info, verboscity= 4)\n def fn_reset():\n if os.path.exists(app_info.RESULTS_PATH_):\n shutil.rmtree(app_info.RESULTS_PATH_)\n return agent_mgr\n\n agent_mgr = namedtuple('_',\n [\n 'fn_reset',\n 'fn_train',\n 'fn_test_against_human',\n 'fn_test_againt_random',\n 'fn_test_against_greedy',\n 'fn_change_args',\n 'fn_show_args',\n 'fn_archive_log_file',\n 'app_info'\n ]\n )\n agent_mgr.fn_reset = fn_reset\n agent_mgr.fn_train = fn_train\n agent_mgr.fn_test_against_human = fn_test_against_human\n agent_mgr.fn_test_against_random = fn_test_against_random\n agent_mgr.fn_test_against_greedy = fn_test_against_greedy\n agent_mgr.fn_change_args = common_functions.fn_change_args\n agent_mgr.fn_show_args = common_functions.fn_show_args\n agent_mgr.fn_archive_log_file = common_functions.fn_archive_log_file\n agent_mgr.APP_INFO = app_info\n return agent_mgr\n","sub_path":"ws/RLAgents/E_SelfPlay/agent_mgt.py","file_name":"agent_mgt.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320173336","text":"import base64\n\nfrom Crypto import Random\nfrom Crypto import DES3\n\n\nclass MethodDES3:\n\n key = b'Sixteen byte key'\n\n def __init__(self, iv=None):\n self.initial_vector = iv\n\n @staticmethod\n def encrypt_ecb(text):\n des = DES3.new(MethodDES3.key, DES3.MODE_ECB)\n text = pad(text, 16)\n encrypted = des.encrypt(text)\n return base64.b64encode(encrypted)\n\n @staticmethod\n def decrypt_ecb(text):\n text = base64.b64decode(text)\n des = DES3.new(MethodDES3.key, DES3.MODE_ECB)\n decrypted = des.decrypt(text)\n decrypted = remove_padding(decrypted)\n return decrypted\n\n @staticmethod\n def encrypt_cbc(text, iv=None):\n if iv is None:\n iv = Random.new().read(DES3.block_size)\n\n aes = DES3.new(MethodDES3.key, DES3.MODE_CBC, iv)\n text = pad(text, 16)\n encrypted = aes.encrypt(text)\n return base64.b64encode(iv + encrypted)\n\n @staticmethod\n def decrypt_cbc(text):\n text = base64.b64decode(text)\n iv = text[0:DES3.block_size]\n text = text.replace(iv, \"\")\n aes = DES3.new(MethodDES3.key, DES3.MODE_CBC, iv)\n decrypted = aes.decrypt(text)\n decrypted = remove_padding(decrypted)\n return decrypted\n\n @staticmethod\n def test_all():\n line_break = \"\\n=============================MODE=============================\\n\"\n text = raw_input(\"Text to encrypt: \") # \"This is a confidential string\"\n\n des = MethodDES3()\n\n print(\"Given;\\n\\t-key\\t\\t\\t\\t'\" + str(MethodDES3.key) + \"'\\n\\t-Text\\t\\t\\t\\t'\" + str(text) + \"'\")\n\n raw_input(\"Continue...\")\n print(line_break.replace(\"MODE\", \"Mode ECB\"))\n for i in range(3):\n encrypted = des.encrypt_ecb(text)\n print(\"Encrypted: \" + encrypted)\n print(\"Encrypted Length: \" + str(len(encrypted)))\n print(\"Decrypted: \" + des.decrypt_ecb(encrypted))\n print(\"\")\n\n raw_input(\"Continue...\")\n print(line_break.replace(\"MODE\", \"Mode CBC With Random Initial Vector\"))\n for i in range(3):\n encrypted = des.encrypt_cbc(text)\n print(\"Encrypted: \" + encrypted)\n print(\"Encrypted Length: \" + str(len(encrypted)))\n print(\"Decrypted: \" + des.decrypt_cbc(encrypted))\n print(\"\")\n\n\n\ndef pad(text, buffer_size):\n number_of_blank_space = buffer_size - len(text) % buffer_size\n character_by_that_value = chr(number_of_blank_space)\n filler = number_of_blank_space * character_by_that_value\n return text + filler\n\n\ndef remove_padding(text):\n last_char = text[len(text) - 1:]\n number_of_blank_space = ord(last_char)\n length_of_text = len(text) - number_of_blank_space\n removed_filter = text[:length_of_text]\n return removed_filter\n\n","sub_path":"python/encryption/MethodDES3.py","file_name":"MethodDES3.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468164496","text":"from matplotlib.image import imread\nimport numpy as np\nimport pandas\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nfrom problem1.classify_knn import knn_classifier\nfrom problem1.best_k import plot_k_accuracy\n\n\ndef load_images(path):\n img = imread(path)\n images = []\n for i in range(34):\n for j in range(33):\n m = img[i * 16:(i + 1) * 16, j * 16:(j + 1) * 16]\n if not (j == 32 and i >= 12):\n images.append(np.squeeze(m.T.reshape(-1)))\n return images\n\n\ndef get_data():\n trains = pandas.DataFrame()\n tests = pandas.DataFrame()\n for i in range(1, 6):\n image_data = load_images(f\"images/usps_{i}.jpg\")\n df = pandas.DataFrame(image_data)\n train_df = pandas.DataFrame(image_data[0:int(0.5 * len(image_data))])\n train_df[\"label\"] = i\n test_df = pandas.DataFrame(image_data[int(0.5 * len(image_data)):])\n test_df[\"label\"] = i\n df.to_csv(path_or_buf=f\"results/data/{i}.csv\")\n\n trains = pandas.concat([trains.copy(), train_df])\n tests = pandas.concat([tests.copy(), test_df])\n return trains, tests\n\n\ndef problem3_1():\n trains, tests = get_data()\n prediction = knn_classifier(k=10, x_train=trains.iloc[:, :-1], y_train=trains[\"label\"], x_test=tests.iloc[:, :-1])\n result_df = pandas.DataFrame({\n \"true\": tests[\"label\"],\n \"prediction\": prediction\n })\n info = {\n \"confusion_matrix\": str(confusion_matrix(y_true=tests[\"label\"], y_pred=prediction)),\n \"accuracy\": accuracy_score(y_true=tests[\"label\"], y_pred=prediction)\n }\n result_df.to_csv(path_or_buf=\"results/prediction.csv\", index=False)\n with open(\"results/accuracy.json\", \"w\") as f:\n f.write(str(info))\n\n\ndef problem3_2():\n trains, tests = get_data()\n raw_count = tests[0].count() + 1\n validation_data = tests.iloc[:int(raw_count * 0.1), :]\n test_data = tests.iloc[int(raw_count * 0.1):, :]\n best_k = plot_k_accuracy(x_train_data=trains.iloc[:, :-1], y_train_data=trains[\"label\"],\n validation_data_x=validation_data.iloc[:, :-1], validation_data_y=validation_data[\"label\"],\n path=\"results/k_plot.jpg\")\n\n prediction = knn_classifier(k=best_k, x_train=trains.iloc[:, :-1], y_train=trains[\"label\"],\n x_test=test_data.iloc[:, :-1])\n result_df = pandas.DataFrame({\n \"true\": test_data[\"label\"],\n \"prediction\": prediction\n })\n info = {\n \"confusion_matrix\": str(confusion_matrix(y_true=test_data[\"label\"], y_pred=prediction)),\n \"accuracy\": accuracy_score(y_true=test_data[\"label\"], y_pred=prediction)\n }\n result_df.to_csv(path_or_buf=\"results/prediction.csv\", index=False)\n with open(\"results/accuracy_with_best_k.json\", \"w\") as f:\n f.write(str(info))\n\n\nif __name__ == '__main__':\n problem3_1()\n problem3_2()\n","sub_path":"problem3/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289021124","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"mnist/\",one_hot=True)\npixels = 28*28\nnums = 10\n\nx = tf.placeholder(tf.float32,[None,pixels],name = \"x\")\ny_ = tf.placeholder(tf.float32,[None,nums], name=\"y\")\n\ndef weight_variable(name,shape):\n W_init = tf.truncated_normal(shape,stddev=0.1)\n W = tf.Variable(W_init,name=\"W_\"+name)\n return W\n\ndef bias_variable(name,size):\n b_init = tf.constant(0.1,shape=[size])\n b = tf.Variable(b_init,name=\"b_\"+name)\n return b\n\ndef conv2d(x,W):\n return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding=\"SAME\")\n\n\ndef max_pool(x):\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\")\n\n#5 * 5 * 1 필터 32 를 만든다. \n#힙성곱층\nwith tf.name_scope('conv1') as scope:\n W_conv1 = weight_variable('conv1',[5,5,1,32])\n b_conv1 = bias_variable('conv1',32)\n x_image = tf.reshape(x,[-1,28,28,1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1)+b_conv1)\n\nwith tf.name_scope('pool1') as scope:\n h_pool1 = max_pool(h_conv1)\n\nwith tf.name_scope('conv2') as scope:\n W_conv2 = weight_variable('conv2',[5,5,32,64])\n b_conv2 = bias_variable('conv2',64)\n h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)\n\nwith tf.name_scope('pool2') as scope:\n h_pool2 = max_pool(h_conv2)\n\nwith tf.name_scope('fully_connected') as scope:\n n = 7 * 7*64\n W_fc = weight_variable('fc',[n,1024])\n b_fc = bias_variable('fc',1024)\n h_pool2_flat = tf.reshape(h_pool2,[-1,n])\n h_fc = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc)+b_fc)\n\nwith tf.name_scope('dropout') as scope:\n keep_prob = tf.placeholder(tf.float32)\n h_fc_drop = tf.nn.dropout(h_fc, keep_prob)\n\nwith tf.name_scope('readout') as scope:\n W_fc2 = weight_variable('fc2',[1024,10])\n b_fc2 = bias_variable('fc2',10)\n y_conv = tf.nn.softmax(tf.matmul(h_fc_drop,W_fc2) + b_fc2)\n\nwith tf.name_scope('loss') as scope:\n cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))\nwith tf.name_scope('training') as scope:\n optimizer = tf.train.AdamOptimizer(1e-4)\n train_step = optimizer.minimize(cross_entropy)\n\nwith tf.name_scope('predict') as scope:\n predict_step = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(predict_step,tf.float32))\n\ndef set_feed(images, labels, prob):\n return {x:images,y_:labels,keep_prob:prob}\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n tw = tf.summary.FileWriter(\"log_dir\", graph=sess.graph)\n test_fd = set_feed(mnist.test.images, mnist.test.labels,1)\n for step in range(1000):\n batch = mnist.train.next_batch(50)\n fd = set_feed(batch[0],batch[1],0.5)\n _, loss = sess.run([train_step, cross_entropy],feed_dict=fd)\n if step % 100 == 0:\n acc = sess.run(accuracy,feed_dict = test_fd)\n print(\"step=\",step,\"loss=\",loss,\"acc=\",acc)\n acc = sess.run(accuracy,feed_dict=testfd)\n print(\"정답률 =\",acc)\n\n\n\n\n","sub_path":"ch5/ch5-7/mnist-deep.py","file_name":"mnist-deep.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116262313","text":"#simpleRNN: 85.53%\n#LSTM: 86.24% (无过拟合现象,继续训练应该可以更好)\nfrom keras.layers import LSTM,Embedding,Dense\nfrom keras.models import Sequential\nfrom keras.datasets import imdb\nfrom keras.preprocessing import sequence\nimport numpy as np\n# save np.load\nnp_load_old = np.load\n\n# modify the default parameters of np.load\nnp.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)\n\nmax_features = 10000\nmaxlen = 500 # 这回用的序列长多了\nbatch_size = 32\n\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nx_train1 = sequence.pad_sequences(x_train, maxlen=maxlen)# 对于较短的句子会在句子前面补零,也可调整参数在末尾补零\nx_test1 = sequence.pad_sequences(x_test,maxlen=maxlen)\n\nmodel = Sequential([\n Embedding(max_features,32),\n LSTM(32,return_sequences=False),\n Dense(1,activation='sigmoid')\n])\nmodel.summary()\n\nmodel.compile(optimizer='rmsprop',loss='binary_crossentropy',\n metrics=['acc'])\n\nmodel.fit(x_train1, y_train, epochs=2, batch_size=batch_size, validation_data=(x_test1, y_test))","sub_path":"Chapter6 Practice/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442472877","text":"# -*- coding: utf-8 -*-\nfrom datetime import date\n\nfrom django.db import models, migrations\n\n\n# copied from model because migration don't allow usage of model's methods\ndef workday_count(abs_range, apps):\n Holiday = apps.get_model(\"planner\", \"Holiday\")\n holidays = (Holiday.objects.filter(day__gte=abs_range.begin, day__lt=abs_range.end)\n .values('day').distinct())\n return (abs_range.end - abs_range.begin).days - holidays.count()\n\n\ndef count_total_workdays(apps, schema_editor):\n Absence = apps.get_model(\"planner\", \"Absence\")\n AbsenceRange = apps.get_model(\"planner\", \"AbsenceRange\")\n for absence in Absence.objects.all():\n absence.total_workdays = 0\n for abs_range in AbsenceRange.objects.filter(absence=absence):\n absence.total_workdays += workday_count(abs_range, apps)\n absence.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('planner', '0005_absence_total_workdays'),\n ]\n\n operations = [\n migrations.RunPython(count_total_workdays)\n ]\n","sub_path":"planner/migrations/0006_manual_count_workdays.py","file_name":"0006_manual_count_workdays.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557526684","text":"import lutorpy\n\nimport numpy as np\n\nrequire(\"tripletSelection\")\nrequire(\"torch\")\nembeddings = torch.DoubleTensor(12,3)\nnumImages = 10\nnumPerClass = [5,5]\nprint(numPerClass)\npeoplePerBatch = 2\nalpha = 1\nembSize = 3\ncuda = False\ntrip = triplets(embeddings, numImages, numPerClass, peoplePerBatch, alpha, embSize, cuda)\n\na = trip[0][0].asNumpyArray()\np = trip[0][1].asNumpyArray()\nn = trip[0][2].asNumpyArray()\nids = trip[1].asNumpyArray()\n\n\nprint(a,p,n,ids)\n\n","sub_path":"test_lupa.py","file_name":"test_lupa.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565061670","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 2 10:31:42 2018\r\n第三题:\r\n1.通过复制联网代码获得天气(老家)字典数据\r\n2.打印温度temp,天气情况description,天气气压pre\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n\r\nimport urllib.request as r#导入联网工具包,命令为r\r\nurl='http://api.openweathermap.org/data/2.5/weather?q=chongqing&mode=json&units=metric&lang=zh_cn&APPID=6a67ed641c0fda8b69715c43518b6996'\r\ndata=r.urlopen(url).read().decode('utf-8')\r\n#讲str类型转换为dict\r\nimport json\r\ndata=json.loads(data)\r\nprint('重庆温度是:{}'.format(data['main']['temp']))\r\nprint('重庆气压是:'+str(data['main']['pressure']))\r\nprint('天气情况是:'+str(data['weather'][0]['description']))\r\n##或\r\nd=data['weather']\r\nprint('天气情况是:'+str(d[0]['description']))","sub_path":"联网获取数据.py","file_name":"联网获取数据.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313520633","text":"# -*- coding: utf-8 -*-\nfrom PySide import QtGui, QtCore\n\n\ndef remove_invalid_clipboard_data():\n oldMimeData = QtGui.qApp.clipboard().mimeData()\n newMimeData = QtCore.QMimeData()\n for format in oldMimeData.formats():\n if 'text/uri-list' in format:\n continue\n data = oldMimeData.data(format)\n newMimeData.setData(format, data)\n clipboard = QtGui.qApp.clipboard()\n clipboard.blockSignals(True)\n clipboard.setMimeData(newMimeData)\n clipboard.blockSignals(False)\n QtGui.qApp.clipboard().dataChanged.connect(remove_invalid_clipboard_data)","sub_path":"miraLibs/mayaLibs/remove_invalid_clipboard_data.py","file_name":"remove_invalid_clipboard_data.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"623926137","text":"#!/usr/bin/env python\nimport gzip\nimport sys\n\n\ndef main(argv):\n inFile = argv[1]\n outbc1 = argv[2] + \"_bc1.fq\"\n outbc2 = argv[2] + \"_bc2.fq\"\n outbc3 = argv[2] + \"_bc3.fq\"\n type_bc = argv[3]\n bc1 = open(outbc1, \"w\")\n bc2 = open(outbc2, \"w\")\n bc3 = open(outbc3, \"w\")\n\n count = 0\n with gzip.open(inFile, \"rt\") as read2:\n\n for line in read2:\n count += 1\n if count % 2 == 0:\n b1 = \"\"\n b2 = \"\"\n b3 = \"\"\n if type_bc == \"30\":\n b1 = line[100:110] + \"\\n\"\n b2 = line[110:120] + \"\\n\"\n b3 = line[120:130] + \"\\n\"\n elif type_bc == \"42\":\n b1 = line[100:110] + \"\\n\"\n b2 = line[116:126] + \"\\n\"\n b3 = line[132:142] + \"\\n\"\n elif type_bc == \"54\":\n b1 = line[100:110] + \"\\n\"\n b2 = line[116:126] + \"\\n\"\n b3 = line[144:154] + \"\\n\"\n bc1.write(b1)\n bc2.write(b2)\n bc3.write(b3)\n else:\n bc1.write(line)\n bc2.write(line)\n bc3.write(line)\n bc1.close()\n bc2.close()\n bc3.close()\n read2.close()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"extract_stlfr_bc.py","file_name":"extract_stlfr_bc.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"493488817","text":"# Write a Python program to find the second smallest nuber in a list.\nimport random\n\n# x = [random.randint(-100, 100) for _ in range(10)]\nx = [1, 1, 1, 1]\n\nminm = None\nprev_min = None\nfor current in x:\n if minm is None or current < minm:\n prev_min = minm\n minm = current\n elif (prev_min is None and current != minm) or minm < current < prev_min:\n prev_min = current\n\nprint(x)\nprint(prev_min, minm)\n\n\n\n\n","sub_path":"lists/w3resource.list.27.py","file_name":"w3resource.list.27.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356737655","text":"#!/usr/bin/env vpython3\n# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Simple client for the Gerrit REST API.\n\nExample usage:\n ./gerrit_client.py -j /tmp/out.json -f json \\\n -u https://chromium.googlesource.com/chromium/src/+log\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nimport tarfile\nimport time\n\ntry:\n from urllib import urlencode\n import urlparse\nexcept ImportError: # pragma: no cover\n from urllib.parse import urlencode\n import urllib.parse as urlparse\n\nDEPOT_TOOLS = os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,\n os.pardir))\nsys.path.insert(0, DEPOT_TOOLS)\n\nfrom gerrit_util import CreateHttpConn, ReadHttpResponse, ReadHttpJsonResponse\n\n\ndef reparse_url(parsed_url, query_params):\n return urlparse.ParseResult(\n scheme=parsed_url.scheme,\n netloc=parsed_url.netloc,\n path=parsed_url.path,\n params=parsed_url.params,\n fragment=parsed_url.fragment,\n query=urlencode(query_params, doseq=True))\n\n\ndef gitiles_get(parsed_url, handler, attempts):\n # This insanity is due to CreateHttpConn interface :(\n host = parsed_url.netloc\n path = parsed_url.path\n if parsed_url.query:\n path += '?%s' % (parsed_url.query, )\n\n retry_delay_seconds = 1\n attempt = 1\n while True:\n try:\n return handler(CreateHttpConn(host, path))\n except Exception as e:\n if attempt >= attempts:\n raise\n logging.exception('Failed to perform Gitiles operation: %s', e)\n\n # Retry from previous loop.\n logging.error('Sleeping %d seconds before retry (%d/%d)...',\n retry_delay_seconds, attempt, attempts)\n time.sleep(retry_delay_seconds)\n retry_delay_seconds *= 2\n attempt += 1\n\n\ndef fetch_log_with_paging(query_params, limit, fetch):\n \"\"\"Fetches log, possibly requesting multiple pages to do so.\n\n Args:\n query_params (dict): Parameters to use in the request.\n limit (int): Page size.\n fetch (function): Function to use to make the requests.\n\n Returns:\n Dict with key \"log\", whose value is a list of commits.\n \"\"\"\n # Log api returns {'log': [list of commits], 'next': hash}.\n last_result = fetch(query_params)\n commits = last_result['log']\n while last_result.get('next') and len(commits) < limit:\n query_params['s'] = last_result.get('next')\n last_result = fetch(query_params)\n # The first commit in `last_result` is not necessarily the parent of the\n # last commit in result so far! This is because log command can be done on\n # one file object, for example:\n # https://gerrit.googlesource.com/gitiles/+log/1c21279f337da8130/COPYING\n # Even when getting log for the whole repository, there could be merge\n # commits.\n commits.extend(last_result['log'])\n # Use 'next' field (if any) from `last_result`, but commits aggregated\n # from all the results. This essentially imitates paging with at least\n # `limit` page size.\n last_result['log'] = commits\n logging.debug(\n 'fetched %d commits, next: %s.', len(commits),\n last_result.get('next'))\n return last_result\n\n\ndef main(arguments):\n parser = create_argparser()\n args = parser.parse_args(arguments)\n\n if args.extract_to and args.format != \"archive\":\n parser.error('--extract-to requires --format=archive')\n if not args.extract_to and args.format == \"archive\":\n parser.error('--format=archive requires --extract-to')\n\n if args.extract_to:\n # make sure it is absolute and ends with '/'\n args.extract_to = os.path.join(os.path.abspath(args.extract_to), '')\n os.makedirs(args.extract_to)\n\n parsed_url = urlparse.urlparse(args.url)\n if not parsed_url.scheme.startswith('http'):\n parser.error('Invalid URI scheme (expected http or https): %s' % args.url)\n\n query_params = {}\n if parsed_url.query:\n query_params.update(urlparse.parse_qs(parsed_url.query))\n # Force the format specified on command-line.\n if query_params.get('format'):\n parser.error('URL must not contain format; use --format command line flag '\n 'instead.')\n query_params['format'] = args.format\n\n kwargs = {}\n accept_statuses = frozenset([int(s) for s in args.accept_statuses.split(',')])\n if accept_statuses:\n kwargs['accept_statuses'] = accept_statuses\n\n # Choose handler.\n if args.format == 'json':\n def handler(conn):\n return ReadHttpJsonResponse(conn, **kwargs)\n elif args.format == 'text':\n # Text fetching will pack the text into structured JSON.\n def handler(conn):\n # Wrap in a structured JSON for export to recipe module.\n return {\n 'value': ReadHttpResponse(conn, **kwargs).read() or None,\n }\n elif args.format == 'archive':\n # Archive fetching hooks result to tarfile extraction. This implementation\n # is able to do a streaming extraction operation without having to buffer\n # the entire tarfile.\n def handler(conn):\n ret = {\n 'extracted': {\n 'filecount': 0,\n 'bytes': 0,\n },\n 'skipped': {\n 'filecount': 0,\n 'bytes': 0,\n 'names': [],\n }\n }\n fileobj = ReadHttpResponse(conn, **kwargs)\n with tarfile.open(mode='r|*', fileobj=fileobj) as tf:\n # monkeypatch the TarFile object to allow printing messages and\n # collecting stats for each extracted file. extractall makes a single\n # linear pass over the tarfile, which is compatible with\n # ReadHttpResponse; other naive implementations (such as `getmembers`)\n # do random access over the file and would require buffering the whole\n # thing (!!).\n em = tf._extract_member\n def _extract_member(tarinfo, targetpath):\n if not os.path.abspath(targetpath).startswith(args.extract_to):\n print('Skipping %s' % (tarinfo.name,))\n ret['skipped']['filecount'] += 1\n ret['skipped']['bytes'] += tarinfo.size\n ret['skipped']['names'].append(tarinfo.name)\n return\n print('Extracting %s' % (tarinfo.name,))\n ret['extracted']['filecount'] += 1\n ret['extracted']['bytes'] += tarinfo.size\n return em(tarinfo, targetpath)\n tf._extract_member = _extract_member\n tf.extractall(args.extract_to)\n return ret\n\n if args.log_start:\n query_params['s'] = args.log_start\n\n def fetch(query_params):\n parsed_url_with_query = reparse_url(parsed_url, query_params)\n result = gitiles_get(parsed_url_with_query, handler, args.attempts)\n if not args.quiet:\n logging.info('Read from %s: %s', parsed_url_with_query.geturl(), result)\n return result\n\n if args.log_limit:\n if args.format != 'json':\n parser.error('--log-limit works with json format only')\n result = fetch_log_with_paging(query_params, args.log_limit, fetch)\n else:\n # Either not a log request, or don't care about paging.\n # So, just return whatever is fetched the first time.\n result = fetch(query_params)\n\n with open(args.json_file, 'w') as json_file:\n json.dump(result, json_file)\n return 0\n\n\ndef create_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-j', '--json-file',\n help='Path to json file for output.')\n parser.add_argument(\n '--extract-to',\n help='Local path to extract archive url. Must not exist.')\n parser.add_argument(\n '-f', '--format', required=True, choices=('json', 'text', 'archive'))\n parser.add_argument(\n '-u', '--url', required=True,\n help='Url of gitiles. For example, '\n 'https://chromium.googlesource.com/chromium/src/+refs. '\n 'Insert a/ after domain for authenticated access.')\n parser.add_argument(\n '-a', '--attempts', type=int, default=1,\n help='The number of attempts to make (with exponential backoff) before '\n 'failing. If several requests are to be made, applies per each '\n 'request separately.')\n parser.add_argument(\n '-q', '--quiet', action='store_true',\n help='Suppress file contents logging output.')\n parser.add_argument(\n '--log-limit', type=int, default=None,\n help='Follow gitiles pages to fetch at least this many commits. By '\n 'default, first page with unspecified number of commits is fetched. '\n 'Only for https:////+log/... gitiles request.')\n parser.add_argument(\n '--log-start',\n help='If given, continue fetching log by paging from this commit hash. '\n 'This value can be typically be taken from json result of previous '\n 'call to log, which returns next page start commit as \"next\" key. '\n 'Only for https:////+log/... gitiles request.')\n parser.add_argument(\n '--accept-statuses', type=str, default='200',\n help='Comma-separated list of Status codes to accept as \"successful\" '\n 'HTTP responses.')\n return parser\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n logging.getLogger().setLevel(logging.INFO)\n sys.exit(main(sys.argv[1:]))\n","sub_path":"third_party/depot_tools/recipes/recipe_modules/gitiles/resources/gerrit_client.py","file_name":"gerrit_client.py","file_ext":"py","file_size_in_byte":9167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468345262","text":"\"\"\"\ncreate a label\n\nanchor = 'nw', 'n', 'ne', 'w', 'center', 'e', 'sw', 's', 'se'\nanchor = tk.N, tk.W, ....\n\"\"\"\n\nimport tkinter as tk\n# from tkinter import *\n\nroot = tk.Tk()\nroot.title('Python GUI - label')\n\nwinw= 800\nwinh= 450\nposx=300\nposy=200\nroot.geometry(f'{winw}x{winh}+{posx}+{posy}')\nroot.config(bg='#ddff77')\n\n# create a label\nmytext = 'My Text Label 4'\nlabel1 = tk.Label(root, text=mytext,\n width=30, height=7,\n bg='red', fg='#ffffff',\n anchor='s')\nlabel1.pack()\n\n# create a label\nmytext = 'My Text Label 4'\nlabel2 = tk.Label(root, text=mytext,\n width=30, height=7,\n bg='blue', fg='#ffffff',\n anchor=tk.W)\nlabel2.pack()\n\nroot.mainloop()\n","sub_path":"py210116f_python3a/day04_210206/sample/label_4_anchor.py","file_name":"label_4_anchor.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465604041","text":"import os\n\nfrom kivy.lang import Builder\nfrom kivy.metrics import dp\nfrom kivy.uix.image import AsyncImage\nfrom kivy.graphics import Color, Rectangle\nfrom kivymd.uix.bottomnavigation import MDBottomNavigationItem\nfrom kivymd.uix.imagelist import SmartTile\nfrom kivy.uix.scrollview import ScrollView\nfrom kivymd.uix.gridlayout import MDGridLayout\nfrom kivymd.uix.button import MDFloatingActionButton\nfrom kivymd.uix.filemanager import MDFileManager\n\nfrom config import Config\nfrom src.images_provider import ImagesProvider\n\n\nBuilder.load_file(f\"{Config.TEMPLATES_DIR}/imagecollectiontab.kv\")\n\n\nclass ImageCell(SmartTile):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.box_color = (0, 0, 0, 0)\n\nclass ImageGrid(MDGridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.padding = (dp(2), dp(2))\n self.spacing = dp(4)\n\n def get_free_cell(self):\n for image in self.images:\n if not image.source:\n return image\n return\n\n def add_image_cells(self):\n for image in self.images:\n self.add_widget(image)\n\n\nclass ThreeHorizontalImagesGrid(ImageGrid):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 3\n self.rows = 1\n self.size_hint = (1, 0.083)\n self.images = (ImageCell(), ImageCell(), ImageCell())\n self.add_image_cells()\n\n\nclass ThreeImagesBlockGrid(ImageGrid):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 2\n self.rows = 1\n self.size_hint = (1, 0.16)\n\n\nclass TwoVerticalImagesGrid(ImageGrid):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.rows = 2\n self.size_hint = (0.33, 0.083)\n self.images = (ImageCell(), ImageCell())\n self.add_image_cells()\n\n\nclass BigImageGrid(ImageGrid):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.rows = 1\n self.size_hint = (0.66, 0.16)\n self.images = (ImageCell(),)\n self.add_image_cells()\n\n\nclass BlockOfImages(ImageGrid):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._current_grid = None\n\n self._first_row_grid = None\n self._middle_block_grid = None\n self._middle_small_images_grid = None\n self._middle_big_image_grid = None\n self._last_row_grid = None\n\n self.cols = 1\n self.size_hint = (1, 1.1)\n\n self.images = []\n self._make_new_grid()\n\n def _to_next_grid(self):\n if self._current_grid == self._first_row_grid:\n self._current_grid = self._middle_small_images_grid\n elif self._current_grid == self._middle_small_images_grid:\n self._current_grid = self._middle_big_image_grid\n elif self._current_grid == self._middle_big_image_grid:\n self._current_grid = self._last_row_grid\n elif self._current_grid == self._last_row_grid:\n self._make_new_grid()\n\n def get_free_cell(self):\n if self._last_row_grid.children[0].source:\n return\n image = self._current_grid.get_free_cell()\n if not image:\n self._to_next_grid()\n image = self._current_grid.get_free_cell()\n return image\n\n def _make_new_grid(self):\n self._first_row_grid = ThreeHorizontalImagesGrid()\n self._last_row_grid = ThreeHorizontalImagesGrid()\n\n self._middle_block_grid = ThreeImagesBlockGrid()\n self._middle_small_images_grid = TwoVerticalImagesGrid()\n self._middle_big_image_grid = BigImageGrid()\n\n self._middle_block_grid.add_widget(self._middle_small_images_grid)\n self._middle_block_grid.add_widget(self._middle_big_image_grid)\n\n self.add_widget(self._first_row_grid)\n self.add_widget(self._middle_block_grid)\n self.add_widget(self._last_row_grid)\n\n self._current_grid = self._first_row_grid\n\n\nclass ImageGridBuilder(MDGridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.blocks = [BlockOfImages(), BlockOfImages(), BlockOfImages()]\n self._idx = 0\n self._current_block = self.blocks[self._idx]\n self.cols = 1\n self.size_hint = (1, 3.3)\n\n for block in self.blocks:\n self.add_widget(block)\n\n def _to_next_block(self):\n self._idx += 1\n self._current_block = self.blocks[self._idx]\n\n def add_image(self, source):\n image = self._current_block.get_free_cell()\n if not image:\n self._to_next_block()\n image = self._current_block.get_free_cell()\n image.source = source\n\n\nclass ImageChooser(MDFileManager):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.preview = True\n self.exit_manager = self.exit\n self.external_storage = os.getenv('EXTERNAL_STORAGE')\n self.images_folder = os.path.join(self.external_storage, \"Pictures\")\n\n def select_path(self, path):\n ImageCollectionTab.image_collection.builder.add_image(path)\n self.exit()\n\n def exit(self, *args):\n self.close()\n\n def open(self):\n self.show(self.images_folder)\n\n\nclass ImageCollection(MDGridLayout):\n def __init__(self, **kwargs):\n super().__init__(cols=1, **kwargs)\n\n self.__next_image_index = 0\n\n self.add_image_button = MDFloatingActionButton(\n icon=\"plus\",\n on_release=self.load_images\n )\n\n self.scroll_view = ScrollView(size_hint=(1, 1))\n\n self.builder = ImageGridBuilder()\n\n self.scroll_view.add_widget(self.builder)\n self.add_widget(self.scroll_view)\n self.add_widget(self.add_image_button)\n\n def load_images(self, touch):\n # ImageCollectionTab.image_chooser.open()\n self.images_links = ImagesProvider.load_images()\n for link in self.images_links:\n self.builder.add_image(source=link)\n\n\n\nclass ImageCollectionTab(MDBottomNavigationItem):\n \"\"\"Tab that contains personal information.\"\"\"\n\n image_chooser = None\n image_collection = None\n x_size = None\n\n def __init__(self, **kwargs):\n super().__init__(name=\"img_collection\", text=\"Images\",\n icon=\"image-frame\", **kwargs)\n\n ImageCollectionTab.x_size = self.size[0]\n ImageCollectionTab.image_collection = ImageCollection()\n ImageCollectionTab.image_chooser = ImageChooser()\n\n self.add_widget(ImageCollectionTab.image_collection)\n","sub_path":"lab6/src/ui/imagecollectiontab.py","file_name":"imagecollectiontab.py","file_ext":"py","file_size_in_byte":6599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563215805","text":"import logging\nfrom typing import Optional, Any\n\nfrom homeassistant.components.fan import FanEntity, SUPPORT_SET_SPEED\nfrom meross_iot.cloud.devices.humidifier import GenericHumidifier, SprayMode\nfrom meross_iot.manager import MerossManager\nfrom meross_iot.meross_event import DeviceOnlineStatusEvent, HumidifierSpryEvent\n\nfrom .common import DOMAIN, MANAGER, AbstractMerossEntityWrapper, cloud_io, HA_FAN\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass MerossSmartHumidifier(FanEntity, AbstractMerossEntityWrapper):\n \"\"\"\n At the time of writing, Homeassistant does not offer any specific device implementation that we can extend\n for implementing the smart humidifier. We'll exploit the fan entity to do so\n \"\"\"\n\n def __init__(self, device: GenericHumidifier):\n super().__init__(device)\n self._id = device.uuid\n self._device_name = device.name\n\n # Device state\n self._humidifier_mode = None\n self._is_on = None\n self._is_online = self._device.online\n\n def parse_spry_mode(self, spry_mode):\n if spry_mode == SprayMode.OFF:\n return False, self._humidifier_mode\n elif spry_mode == SprayMode.INTERMITTENT:\n return True, SprayMode.INTERMITTENT\n elif spry_mode == SprayMode.CONTINUOUS:\n return True, SprayMode.CONTINUOUS\n else:\n raise ValueError(\"Unsupported spry mode.\")\n\n def device_event_handler(self, evt):\n if isinstance(evt, DeviceOnlineStatusEvent):\n _LOGGER.info(\"Device %s reported online status: %s\" % (self._device.name, evt.status))\n if evt.status not in [\"online\", \"offline\"]:\n raise ValueError(\"Invalid online status\")\n self._is_online = evt.status == \"online\"\n elif isinstance(evt, HumidifierSpryEvent):\n self._is_on, self._humidifier_mode = self.parse_spry_mode(evt.spry_mode)\n else:\n _LOGGER.warning(\"Unhandled/ignored event: %s\" % str(evt))\n\n self.schedule_update_ha_state(False)\n\n @cloud_io\n def update(self):\n state = self._device.get_status(True)\n self._is_online = self._device.online\n\n if self._is_online:\n self._is_on, self._humidifier_mode = self.parse_spry_mode(self._device.get_spray_mode())\n\n def force_state_update(self, ui_only=False):\n if not self.enabled:\n return\n\n force_refresh = not ui_only\n self.schedule_update_ha_state(force_refresh=force_refresh)\n\n async def async_added_to_hass(self) -> None:\n self._device.register_event_callback(self.device_event_handler)\n\n async def async_will_remove_from_hass(self) -> None:\n self._device.unregister_event_callback(self.device_event_handler)\n\n @property\n def available(self) -> bool:\n return self._is_online\n\n @property\n def is_on(self) -> bool:\n return self._is_on\n\n @property\n def speed(self) -> Optional[str]:\n if self._humidifier_mode is None:\n return None\n return self._humidifier_mode.name\n\n @property\n def supported_features(self) -> int:\n return 0 | SUPPORT_SET_SPEED\n\n @property\n def speed_list(self) -> list:\n \"\"\"Get the list of available speeds.\"\"\"\n return [e.name for e in SprayMode if e != SprayMode.OFF]\n\n @cloud_io\n def set_speed(self, speed: str) -> None:\n mode = SprayMode[speed]\n self._device.set_spray_mode(mode)\n\n @cloud_io\n def set_direction(self, direction: str) -> None:\n # Not supported\n pass\n\n @cloud_io\n def turn_on(self, speed: Optional[str] = None, **kwargs) -> None:\n # Assume the user wants to trigger the last mode\n mode = self._humidifier_mode\n # If a specific speed was provided, override the last mode\n if speed is not None:\n mode = SprayMode[speed]\n # Otherwise, assume we want intermittent mode\n if mode is None:\n mode = SprayMode.INTERMITTENT\n\n self._device.set_spray_mode(mode)\n\n @cloud_io\n def turn_off(self, **kwargs: Any) -> None:\n self._device.set_spray_mode(SprayMode.OFF)\n\n @property\n def name(self) -> Optional[str]:\n return self._device_name\n\n @property\n def device_info(self):\n return {\n 'identifiers': {(DOMAIN, self._id)},\n 'name': self._device_name,\n 'manufacturer': 'Meross',\n 'model': self._device.type + \" \" + self._device.hwversion,\n 'sw_version': self._device.fwversion\n }\n\n @property\n def should_poll(self) -> bool:\n \"\"\"\n This device handles stat update via push notification\n :return:\n \"\"\"\n return False\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n def sync_logic():\n\n fan_devices = []\n manager = hass.data[DOMAIN][MANAGER] # type:MerossManager\n\n # Add smart humidifiers\n humidifiers = manager.get_devices_by_kind(GenericHumidifier)\n for humidifier in humidifiers:\n h = MerossSmartHumidifier(device=humidifier)\n fan_devices.append(h)\n hass.data[DOMAIN][HA_FAN][h.unique_id] = h\n\n return fan_devices\n\n devices = await hass.async_add_executor_job(sync_logic)\n async_add_entities(devices)\n","sub_path":"custom_components/meross_cloud/fan.py","file_name":"fan.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344136239","text":"\"\"\"Tests for `mbed_targets.mbed_tools.list_all`.\"\"\"\nfrom unittest import mock, TestCase\nfrom click.testing import CliRunner\n\nfrom mbed_targets.mbed_tools.list_all import list_all\nfrom mbed_targets.mbed_targets import MbedTargets, MbedTarget\n\n\nclass TestListAll(TestCase):\n \"\"\"Tests for list_all cli command.\"\"\"\n\n @mock.patch(\"mbed_targets.mbed_tools.list_all.MbedTargets\", spec_set=MbedTargets)\n def test_outputs_target_data(self, MbedTargets):\n \"\"\"Invoking a command lists board type for all targets.\"\"\"\n MbedTargets.return_value = [\n mock.Mock(spec_set=MbedTarget, board_type=\"foo\"),\n mock.Mock(spec_set=MbedTarget, board_type=\"bar\"),\n ]\n\n runner = CliRunner()\n result = runner.invoke(list_all)\n\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.output, \"foo\\nbar\\n\")\n","sub_path":"tests/mbed_tools/test_list_all.py","file_name":"test_list_all.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"652650798","text":"#Import and Transform Data \ndef import_files(data_dir):\n from torchvision import datasets, models, transforms\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n #Define transforms for datasets\n train_transforms =transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n valid_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]) \n test_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])]) \n #Import data and apply transforms \n train_data = datasets.ImageFolder(data_dir + '/train',transform=train_transforms) \n valid_data = datasets.ImageFolder(data_dir + '/valid', transform=valid_transforms)\n test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) \n \n return train_data, valid_data, test_data\n#Function for loading data\ndef data_load(train_data,valid_data,test_data):\n import torch\n from torch import nn, optim\n from torch.optim import lr_scheduler\n from torch.autograd import Variable \n\n#Define data loaders\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_data, batch_size=64, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_data, batch_size=64)\n return trainloader, validloader,testloader\n#Function for Label Mapping\ndef label_map():\n import json\n with open('ImageClassifier/cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n return cat_to_name\n\n#function for image processing\ndef process_image(image_path):\n from PIL import Image\n import numpy as np\n img = Image.open(image_path)\n width,height=img.size\n \n #Resize image based on smallest side\n if width > height:\n img=img.resize((int((256/height)*width),256))\n else:\n img=img.resize((256,int((256/width)*height)))\n\n #Set dimensions to crop\n left = (img.width-224)/2\n bottom = (img.height-224)/2\n right = left + 224\n top = bottom + 224\n \n #crop image\n img = img.crop((left, bottom, right, \n top))\n \n #Normalise image and transpose colour channel\n img = np.array(img)/255\n mean = np.array([0.485, 0.456, 0.406]) \n std = np.array([0.229, 0.224, 0.225]) \n img = (img - mean)/std\n img = img.transpose((2, 0, 1))\n \n \n return img\n \n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112874643","text":"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\n\nclass LateFusionEncoder(nn.Module):\n\n @staticmethod\n def add_cmdline_args(parser):\n parser.add_argument_group('Encoder specific arguments')\n parser.add_argument('-img_feature_size', default=4096,\n help='Channel size of image feature')\n parser.add_argument('-img_spatial_size', default=14,\n help='JSON file with image paths and vocab')\n parser.add_argument('-embed_size', default=300,\n help='Size of the input word embedding')\n parser.add_argument('-rnn_hidden_size', default=512,\n help='Size of the multimodal embedding')\n parser.add_argument('-num_layers', default=2,\n help='Number of layers in LSTM')\n parser.add_argument('-max_history_len', default=60,\n help='Size of the multimodal embedding')\n return parser\n\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n\n self.word_embed = nn.Embedding(opt.vocab_size, opt.embed_size, padding_idx=0)\n self.hist_rnn = nn.LSTM(opt.embed_size, opt.rnn_hidden_size, opt.num_layers,\n batch_first=True, dropout=opt.dropout)\n self.ques_rnn = nn.LSTM(opt.embed_size, opt.rnn_hidden_size, opt.num_layers,\n batch_first=True, dropout=opt.dropout)\n\n # fusion layer\n fusion_size = opt.img_feature_size + opt.rnn_hidden_size * 2\n self.fusion = nn.Linear(fusion_size, opt.rnn_hidden_size)\n\n def forward(self, img, ques, hist):\n # repeat image feature vectors to be provided for every round\n img = img.view(-1, 1, self.opt.img_feature_size)\n img = img.repeat(1, self.opt.num_rounds, 1)\n img = img.view(-1, self.opt.img_feature_size)\n\n # embed questions\n ques = ques.view(-1, ques_fwd.size(2))\n ques_embed, _ = self.ques_rnn(self.word_embed(ques), None)\n # pick the last time step (final question encoding)\n ques_embed = ques_embed[:, -1, :]\n\n # embed history\n hist = hist.view(-1, hist.size(2))\n hist_embed, _ = self.hist_rnn(self.word_embed(hist), None)\n hist_embed = hist_embed[:, -1, :]\n\n fused_vector = torch.cat((img_feat, ques_embed, hist_embed), 1)\n if self.opt.dropout > 0:\n fused_vector = F.dropout(fused_vector, self.opt.dropout,\n training=self.opt.training)\n\n fused_embedding = F.tanh(self.fusion(fused_vector))\n return fused_embedding\n","sub_path":"visdial/models/encoders/lf.py","file_name":"lf.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"303758073","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\npyinformer is a library to send desktop notification on Mac OS X\n\n\n\"\"\"\n\n\n\n__title__ = 'pyinformer'\n__version__ = '0.1'\n__author__ = 'Shyam Satyaprasad'\n__license__ = 'MIT'\n\n# imports\nimport sys\nfrom sys import platform as _platform\n\nv = sys.version_info\nif v[:2] < (2, 7):\n raise ImportError('This package requires Python version 2.7')\ndel v\n\n\nif _platform == \"linux\" or _platform == \"linux2\":\n raise OSError(\"This package doesn't support this platform\")\nelif _platform == \"win32\":\n raise OSError(\"This package doesn't support this platform\")\nelif _platform == \"darwin\":\n from pyinformer import Notifications\n\n__all__ = (sys, _platform)\n","sub_path":"pyinformer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391955604","text":"'''\nExercise 1 \nDownload the code in this chapter from http://thinkpython2.com/code/polygon.py.\nDraw a stack diagram that shows the state of the program while executing circle(bob, radius). You can do the arithmetic by hand or add print statements to the code.\n\nFigure 4.1: Turtle flowers.\nExercise 2 \nWrite an appropriately general set of functions that can draw flowers as in Figure 4.1.\nSolution: http://thinkpython2.com/code/flower.py, also requires http://thinkpython2.com/code/polygon.py.\n\n\nFigure 4.2: Turtle pies.\nExercise 3 \nWrite an appropriately general set of functions that can draw shapes as in Figure 4.2.\nSolution: http://thinkpython2.com/code/pie.py.\n\nExercise 4 \nThe letters of the alphabet can be constructed from a moderate number of basic elements, like vertical and horizontal lines and a few curves. Design an alphabet that can be drawn with a minimal number of basic elements and then write functions that draw the letters.\nYou should write one function for each letter, with names draw_a, draw_b, etc., and put your functions in a file named letters.py. You can download a “turtle typewriter” from http://thinkpython2.com/code/typewriter.py to help you test your code.\n\nYou can get a solution from http://thinkpython2.com/code/letters.py; it also requires http://thinkpython2.com/code/polygon.py.\n\nExercise 5 \nRead about spirals at http://en.wikipedia.org/wiki/Spiral; then write a program that draws an Archimedian spiral (or one of the other kinds). Solution: http://thinkpython2.com/code/spiral.py.\n\n----\n\nI didn't do all of that but I made a cute shape I like. \n\n'''\n\nimport turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"#421C52\")\n\ndef draw_square(some_turtle):\n for i in range(0,4):\n some_turtle.forward(200)\n some_turtle.right(90)\n \ndef draw_circle_with_square():\n purple_turtle = turtle.Turtle()\n purple_turtle.shape(\"circle\")\n purple_turtle.speed(9)\n purple_turtle.pensize(3)\n purple_turtle.hideturtle()\n for i in range(1,37):\n if i % 2 == 0:\n purple_turtle.color(\"white\")\n else:\n purple_turtle.color(\"#732C7B\")\n draw_square(purple_turtle)\n purple_turtle.right(10)\n\n#def draw_circle():\n#\tturtle_two = turtle.Turtle()\n#\tturtle_two.shape(\"arrow\")\n#\tturtle_two.color(\"white\")\n#\tturtle_two.circle(100)\n#\t\n#def draw_triangle():\n#\tturtle_three = turtle.Turtle()\n#\tturtle_three.shape(\"turtle\")\n#\tturtle_three.color(\"black\")\n#\tturtle_three.backward(100)\n#\tturtle_three.left(60)\n#\tturtle_three.forward(100)\n#\tturtle_three.right(120)\n#\tturtle_three.forward(100)\n\ndraw_circle_with_square()\n#draw_square()\n#draw_circle()\n#draw_triangle()\n\nwindow.exitonclick()","sub_path":"students/sheree/session_01/homework/TP2-CH4-Exercises.py","file_name":"TP2-CH4-Exercises.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526733030","text":"#!python2.7\n'''\nCreated on 23 Apr 2013\n\n@author: flexsys\n'''\n\nimport xml.etree.ElementTree as ET\nimport paramiko\nimport pymongo as mongo\nimport time\nfrom datetime import datetime, timedelta\nfrom lib.dbtools import connections\nfrom pandas import *\nimport pandas as pd\nfrom lib.data.pyData import convertStr\nimport pytz\nimport lib.dbtools.read_dataset as read_dataset\nfrom lib.logger import *\nfrom lib.io.toolkit import *\nfrom bson.json_util import default\nfrom lib.dbtools.get_repository import get_symbol6_from_ticker\nfrom lib.io.smart_converter import *\nfrom paramiko import ssh_exception\nfrom pickle import TRUE\nfrom django.utils.datetime_safe import strftime\nlogging.getLogger(\"paramiko\").setLevel(logging.WARNING)\nfrom lib.tca import mapping\nfrom lib.io.fix import FixTranslator \nfrom import_FIX import DatabasePlug\n\nclass DatabaseUpdate(DatabasePlug):\n def update_exclude_auction(self):\n to_return = []\n self.io = \"I\"\n \n logging.info('---------------------------------') \n logging.info('-------- Update Algo Orders -------')\n logging.info('---------------------------------')\n for s in self.conf:\n logging.info('Get data from server: ' + str(s))\n self.server = self.conf[s]\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(self.server['ip_addr'], \n username = self.server['list_users']['flexsys']['username'], \n password = self.server['list_users']['flexsys']['passwd'])\n for day in self.dates:\n logging.info('-------- %s -------' % day)\n \n self.logPath = './logs/trades/%s/FLINKI_%s%s%s.fix' %(day, self.source, day, self.io)\n \n \n \n cmd = \"prt_fxlog %s 3\" % self.logPath\n (stdin, stdout_grep, stderr) = ssh.exec_command(cmd)\n i = 0\n for line in stdout_grep:\n try:\n d = self.fix_translator.line_translator(line)\n p_cl_ord_id = day + d[\"ClOrdID\"] + s.replace('02','01')\n #print \"Avant = \" + str(list(self.client[self.database]['AlgoOrders'].find({\"p_cl_ord_id\" : p_cl_ord_id}))[0][\"ExcludeAuction\"]) + \" - Apres = \" + str(d[\"ExcludeAuction\"])\n self.client[self.database]['AlgoOrders'].update({\"p_cl_ord_id\" : p_cl_ord_id}, \n {\"$set\" : {\"ExcludeAuction\" : d[\"ExcludeAuction\"]}})\n i = i + 1\n except IndexError:\n pass\n except KeyError:\n pass\n except :\n get_traceback()\n logging.info('Update %d data '% i)\n ssh.close()\n \n \nif __name__ == '__main__':\n from lib.dbtools.connections import Connections\n Connections.change_connections(\"dev\") \n \n database_server = 'MARS'\n database = 'Mars'\n environment = 'dr'\n source = 'CLNT1'\n date = last_business_day(datetime.now())\n limit = datetime(year = 2013, month = 10, day = 8)\n dates = []\n l_date = []\n \n while date > limit:\n dates.append(datetime.strftime(date, \"%Y%m%d\"))\n date = last_business_day(date)\n\n# dates = [date]\n\n DatabaseUpdate(database_server = database_server, \n database = database,\n environment = environment, \n source = source, \n dates = dates,\n mode = \"write\").update_exclude_auction()\n ","sub_path":"projects/DMAlgo/src/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5806210","text":"import sfml as sf\n\n\nclass scoreTable(object):\n def __init__(self, font, font_size, set_middle, left_pos, right_pos, engine):\n\n self.engine = engine\n\n self.font = font\n self.font_size = font_size\n\n self.left_pos = left_pos\n self.right_pos = right_pos\n\n self.left_score = 0\n self.right_score = 0\n\n self.set_middle = set_middle\n\n self.left_score_text = sf.Text(str(self.left_score), font, font_size)\n self.right_score_text = sf.Text(str(self.right_score), font, font_size)\n\n if set_middle:\n self.left_score_text.position = left_pos - self.left_score_text.global_bounds.size/2.\n self.right_score_text.position = right_pos - self.right_score_text.global_bounds.size/2.\n else:\n self.left_score_text.position = left_pos\n self.right_score_text.position = right_pos\n\n\n def check_for_win(self):\n if self.left_score == self.engine.game_settings.score_to or \\\n self.right_score == self.engine.game_settings.score_to:\n return True\n else:\n return False\n\n def left_scored(self):\n self.left_score += 1\n self.left_score_text = sf.Text(str(self.left_score), self.font, self.font_size)\n\n if self.set_middle:\n self.left_score_text.position = self.left_pos - self.left_score_text.global_bounds.size/2.\n\n else:\n self.left_score_text.position = self.left_pos\n\n def right_scored(self):\n self.right_score += 1\n self.right_score_text = sf.Text(str(self.right_score), self.font, self.font_size)\n\n if self.set_middle:\n self.right_score_text.position = self.right_pos - self.right_score_text.global_bounds.size/2.\n\n else:\n self.right_score_text.position = self.right_pos\n\n def get_left_score_text(self):\n return self.left_score_text\n\n def get_right_score_text(self):\n return self.right_score_text\n\n def get_scores(self):\n return self.left_score, self.right_score\n\n\n\n\npass","sub_path":"gameutils/scoretable.py","file_name":"scoretable.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585560385","text":"from functools import partial\nfrom multiprocessing import Process\nimport multiprocessing\nimport threading\n\nimport NoDaemonProcess as ndp\nimport Shared\nimport product_downloader as prdl\nimport product_meta as pm\nfrom log import get_logger\nfrom utils import rdm_sleep\n\nlogger = get_logger()\n''' Library of communicating processes over a shared object\n index: list of objects\n'''\n\n\nclass download_decorator(object):\n\n def __init__(self, target):\n self.target = target\n\n def __call__(self, *args):\n \"\"\"\n :param args: {'product': '', 'tasks': [{'bands': [], 'index': ''},], 'indices_expr': {'index': 'expr',}}\n :return:\n \"\"\"\n product = args[0]\n task = args[1]\n bands = task['bands']\n index = task['index']\n index_expr = args[2]\n s3conf = args[3]\n whoaim(\"a process assigned to bands %s from %s\" % (bands, product))\n self._register_and_download_bands(product, bands, s3conf)\n\n return partial(self.target, index=index, index_expr=index_expr)\n # return self.target(pm.get_meta_from_prod(self.product), self.params)\n\n def _register_and_download_bands(self, product, bands, s3conf):\n valid_index = [key for key in bands if key not in Shared.shared.dict]\n for v in valid_index:\n Shared.shared.write(v, False)\n logger.info('Registered in shared object: %s' % ','.join(bands))\n logger.info('The shared object: %s' % Shared.shared.dict)\n rdm_sleep()\n\n Shared.shared.dict[\"nbproc\"] += -1\n if Shared.shared.dict[\"nbproc\"] == 0:\n # Shared.shared.write('Init', False)\n self._run_download_manager(product, s3conf)\n\n # barrier until bands are downloaded\n while not all(Shared.shared.dict[k] for k in bands):\n logger.debug('Keys found for @%s, %s', multiprocessing.current_process().name,\n [k for k in bands if Shared.shared.dict[k]])\n rdm_sleep(1)\n logger.debug('The shared object after barrier: %s' % Shared.shared.dict)\n\n def _run_download_manager(self, product, s3conf):\n\n def create_download_threads(bands_loc, metadata_loc):\n whoaim(\"the download manager process for metadata and bands %s for prod %s.\" % (bands_loc, product))\n object_list = [k for k in Shared.shared.dict.keys() if k in bands_loc.keys()]\n logger.info(\"Bands selected: %s for prod %s\", object_list, product)\n meta = threading.Thread(target=prdl.get_product_metadata,\n args=(metadata_loc, s3conf))\n\n bands = threading.Thread(target=prdl.get_product_data,\n args=(bands_loc, s3conf, object_list))\n meta.start()\n meta.join() # Can be optimized\n bands.start()\n bands.join()\n\n def download_manager():\n bands_loc, metadata_loc = prdl.init(s3conf, product)\n create_download_threads(bands_loc, metadata_loc)\n\n downlad_manager_daemon = Process(target=download_manager)\n downlad_manager_daemon.daemon = True\n downlad_manager_daemon.start()\n downlad_manager_daemon.join()\n\n\ndef whoaim(id):\n logger.info(\"I'm running on CPU #%s and I am %s\" % (multiprocessing.current_process(), id))\n\n\ndef main(proc_func, args, s3conf):\n \"\"\"\n :param proc_func: processing function\n :param args: {'product': '', 'tasks': [{'bands': [], 'index': ''},], 'indices_expr': {'index': 'expr',}}\n :return:\n \"\"\"\n nbproc = len(args['tasks'])\n Shared.shared.write(\"nbproc\", nbproc)\n pool = ndp.MyPool(nbproc)\n prod_endpoint = pm.get_meta_from_prod(args['product'])\n\n def proc_func_runner(_proc_func):\n return _proc_func(prod_endpoint)\n\n res = []\n for task in args['tasks']:\n logger.info('Starting async daemon for task: %s' % task)\n res.append(pool.apply_async(\n download_decorator(proc_func),\n args=(args['product'], task, args['indices_expr'][task['index']], s3conf),\n callback=proc_func_runner))\n\n pool.close()\n pool.join()\n","sub_path":"proc_runner.py","file_name":"proc_runner.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626883285","text":"#!/usr/bin/env python3\n\n\"\"\" This module is used to rename every symbol that is defined in the given code base.\nIt works on LLVM IR code.\n\"\"\"\n\nimport re\nimport json\n\ndef detect_names(src, sub):\n \"\"\" This function detects every relevant symbol inside the file of given filename src\n and it returns a mapping of the original symbol to the new symbol that is\n derived by the given function sub.\n \"\"\"\n\n mapping = dict()\n\n catchall = re.compile(\"(?:@(?P\\\\S+) = \"\n \"(?!internal|private|appending|external).*|\"\n \"define (?!internal|private)[^@]*\"\n \"@(?P[^(\\\"]+|\\\"[^\\\"]*\\\")\\\\(.*)[\\n\\r]*\")\n\n with open(src) as fd_src:\n for line in fd_src:\n match = re.match(catchall, line)\n\n if match:\n if match.group(\"variable_name\") is not None:\n name = match.group(\"variable_name\")\n mapping['@' + name] = '@' + sub(name)\n elif match.group(\"function_name\") is not None:\n name = match.group(\"function_name\")\n mapping['@' + name] = '@' + sub(name)\n\n return mapping\n\ndef substitute(dest, src, mapping):\n \"\"\" This function substitutes every given symbol s in mapping.keys() by it's\n associated substitution in mapping[s] inside the given file src and\n writes the result to the given file named dest.\n \"\"\"\n\n with open(src) as fin:\n content = fin.read()\n\n with open(dest, 'w') as fout:\n regex_match_symbols = re.compile('|'.join(map(re.escape, mapping)))\n\n for line in content.split('\\n'):\n substitution = regex_match_symbols.sub(lambda match: mapping[match.group(0)], line)\n print(substitution, file=fout)\n\ndef rename(dest, src, prefix):\n \"\"\" This function renames every relevant symbol inside the file of given filename src\n by adding the given prefix to every symbol. It writes back the result to the file of\n given filename dest.\n \"\"\"\n\n sub = lambda f: \"{0}{2}_{1}\".format(*re.match(r\"([_]*)(.*)\", f).groups(), prefix)\n\n mapping = detect_names(src, sub)\n substitute(dest, src, mapping)\n return mapping\n\ndef main():\n \"\"\" This function is called if this script should be run standalone. \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(description='Rename Symbols in LLVM IR')\n parser.add_argument('-p', '--prefix', help='prefix that should be added to every symbol')\n parser.add_argument('-i', '--input', help='path to input file (content in LLVM IR)')\n parser.add_argument('-o', '--output', help='path to output file (writes LLVM IR)')\n parser.add_argument('-m', '--write-mapping', help='write mapping as json to file')\n parser.add_argument('-v', '--verbose', help='print rename mapping to stdout')\n args = parser.parse_args()\n\n mapping = rename(args.output, args.input, args.prefix)\n\n if args.write_mapping:\n with open(args.write_mapping, 'w') as f:\n json.dump(mapping, f, indent=4)\n\n if args.verbose:\n for name_old, name_new in mapping.items():\n print(name_old, \"-->\", name_new)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sputnik/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367028865","text":"import fengkong\r\n\r\ndef sm_zx_fs(code,lotteryNumber):\r\n a,b,c = code.split('|')[0],code.split('|')[1],code.split('|')[2]\r\n x,y,z = lotteryNumber.split(',')[0],lotteryNumber.split(',')[1],lotteryNumber.split(',')[2]\r\n if x in a and y in b and z in c:\r\n print('中奖')\r\n else:print('未中奖')\r\n\r\ndef sm_zx_hz(code,lotteryNumber):\r\n x,y,z = lotteryNumber.split(',')[0],lotteryNumber.split(',')[1],lotteryNumber.split(',')[2]\r\n sum = int(x)+int(y)+int(z)\r\n print(sum)\r\n if ',' in code:\r\n codeList = code.split(',')\r\n if str(sum) in codeList:\r\n print('中奖')\r\n else:print('未中奖')\r\n else:\r\n codeList = code\r\n if str(sum) in codeList:\r\n print('中奖')\r\n else:print('未中奖')\r\n\r\ndef sm_zux_z3(code,lotteryNumber):\r\n lotteryNumberList = lotteryNumber.split(',')\r\n a = list(set(lotteryNumberList))\r\n if len(a) == 2 and a[0] in code and a[1] in code:\r\n print('中奖')\r\n else:print('未中奖')\r\n\r\ndef sm_zux_z6(code,lotteryNumber):\r\n lotteryNumberList = lotteryNumber.split(',')\r\n a = list(set(lotteryNumberList))\r\n if len(a) == 3 and a[0] in code and a[1] in code and a[2] in code:\r\n print('中奖')\r\n else:print('未中奖')\r\n\r\ndef sm_zux_hz(code,lotteryNumber):\r\n lotteryNumberList = lotteryNumber.split(',')\r\n intlotteryNumber = map(int,lotteryNumberList)\r\n Sum = sum(intlotteryNumber)\r\n a = list(set(lotteryNumberList))\r\n if len(a) != 1:\r\n if str(Sum) in code:\r\n print('中奖')\r\n else:print('不中奖')\r\n else:print('不中奖')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#未:201707310X2JgZX15I20001\r\n#中:201707310X2JgUv15I30001\r\n\r\n# lottery = 'MC3D'\r\n# order = '[{\"method\":\"sm_zx_fs\",\"code\":\"0|1|2\",\"nums\":\"1\",\"piece\":\"1\",\"price\":\"2\",\"odds\":\"1848\",\"point\":\"0\",\"amount\":\"2\"}]'\r\n# F = fengkong.Fengkong('hiro1101','hiro1101')\r\n# orderId = F.add_order(lottery,F.get_issue(lottery),order)\r\n# res = F.get_bonus2('201707310X2JgZX15I20001')\r\n# code = res['code']\r\n# lotteryNumber = res['lotteryNumber']\r\n#\r\n# sm_zux_hz(code,lotteryNumber)\r\nfengkong.to_md5('hiro1101')\r\n","sub_path":"python_case/testpy/MC3D/IfWin.py","file_name":"IfWin.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376118142","text":"\n# coding: utf-8\n\n# In[23]:\n\n\nimport pandas as pd\nimport string\n#opening both the files and\ndef number_g_b():\n with open(\"./namesGirls.txt\") as f:\n lines_g=f.read().splitlines()\n with open(\"./namesBoys.txt\") as f:\n lines_b=f.read().splitlines()\n #print(lines_g)\n #print(lines_b)\n df=pd.DataFrame({\"End Letter\": list(string.ascii_lowercase),\"Number_of_Girls\":0,\"Number_of_Boys\":0})\n #print(df)\n #to check the ascii value of the first letter\n #print(\"The ASCII value of 'a' is\",ord(\"a\"))\n for g in lines_g:\n #taking the last character and assigning it to charg\n charg=g[-1]\n index=ord(charg)-97\n df.loc[index,'Number_of_Girls']=df.loc[index,'Number_of_Girls']+1\n for b in lines_b:\n #taking the last character and assigning it to charg\n charb=b[-1]\n index=ord(charb)-97\n df.loc[index,\"Number_of_Boys\"]=df.loc[index,\"Number_of_Boys\"]+1\n print(df)\n \nnumber_g_b()\n\n","sub_path":"girls_boys.py","file_name":"girls_boys.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591054354","text":"import requests,sys,time,os\nfrom bs4 import BeautifulSoup\nfrom comutils import *\nclass tieba(object):\n\thtml=''\n\ttot_page=2\n\turl=''\n\tusername=''\n\tdef __init__(self,username):\n\t\tself.url=r'http://tieba.baidu.com/f/search/ures?ie=utf-8&un=%s&pn='%username\n\t\tself.username=username\n\t# 获取每一页的发言\n\tdef get_comment(self,pn):\n\t\tpage_cmt=[]\n\t\tself.html=get_web(self.url+str(pn),encoding='GBK')\n\t\tsoup=BeautifulSoup(self.html,\"html5lib\")\n\t\ttry:\n\t\t\tposts=soup.find_all('div',class_='s_post')\n\t\t\tfor s in posts:\n\t\t\t\ta_tag=s.find('span').find('a')\n\t\t\t\ttitle=a_tag.text.replace(\"回复:\",'')\n\t\t\t\turl='http://tieba.baidu.com'+a_tag['href']\n\t\t\t\tcomment=s.find('div').text.strip()\n\t\t\t\tcomment_time=s.find_all('font')[-1].text\n\t\t\t\tpage_cmt.append({'comment':comment,'title':title,'time':comment_time,'url':url})\n\t\t\t# get the max page\n\t\t\tif pn==1:\n\t\t\t\thref=soup.find('div',class_='pager-search').find_all('a')[-1]['href']\n\t\t\t\tt=href.rindex('=')\n\t\t\t\tself.tot_page=int(href[t+1:])\n\t\t\t\tprint(\"tot_page= %s\"%self.tot_page)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tself.tot_page=pn\n\t\tif page_cmt==[]:\n\t\t\tself.tot_page=pn\n\t\treturn page_cmt\n\t\t# 获取所有发言\n\tdef get_all_comment(self,json_path):\n\t\tpage=1\n\t\tcomment,cur,last=[],[],[]\n\t\tsleep_time=3\n\t\twhile(page')\n\t\t\tfor i,s in enumerate(data):\n\t\t\t\ttitle=s.get('title')\n\t\t\t\tcmt,ctime,url=s.get('comment'),s.get('time'),s.get('url')\n\t\t\t\tfp.write('
\\\n\t\t\t\t\t
%s
\\\n\t\t\t\t\t
\\\n\t\t\t\t\t%s--%s %s%s\\\n\t\t\t\t\t

'%(cmt,i+1,L,url,title,ctime))\n\t\t\tfp.write('')\nif __name__ == '__main__':\n\tusername='月光'\n\tt=tieba(username)\n\tt.start()","sub_path":"baidu/baidu_tieba/tieba.py","file_name":"tieba.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59088146","text":"from django.forms import ModelForm\nfrom django import forms\nfrom .models import Line\n\n\nclass OrderLine(ModelForm):\n OPTIONS = (\n ('',''),\n ('02','02'),\n ('03','03'),\n ('04','04'),\n ('05','05'),\n ('06','06'),\n )\n stat = forms.ChoiceField(choices=OPTIONS)\n \n class Meta:\n model = Line \n fields = ['order_number','customer_number', 'customer_name', 'cat', 'ship_days', 'travel_days',\n 'order_total', 'estimated_costs', 'line_number','part_number','description','whse','alloc','qty_order','qty_avail','required_date',\n 'promised_date','unit_price','ship_tolerance_min','ship_tolerance_max','uos','price_book','stat']\n\n","sub_path":"orders/formsLine.py","file_name":"formsLine.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229992325","text":"'''\nPurpose :Market Risk feed files\nDepartment and Desk :IT\nRequester: :Natalie Austin\nDeveloper :Douglas Finkel / Henk Nel\nCR Number :264536\n'''\n\nimport ael, string, acm, MR_MainFunctions\n\nInsL = []\n\n# OPENFILE ##########################################################################################################\n \ndef OpenFile(temp,FileDir,Filename,*rest):\n filename = FileDir + Filename\n outfile = open(filename, 'w')\n outfile.close()\n\n del InsL[:]\n InsL[:] = []\n\n return filename\n\n# OPENFILE ##########################################################################################################\n\ndef vol_bm_mat(ins):\n dt = ael.date_today()\n \n if ins.instype in ('Cap', 'Floor'):\n per = ins.legs()[0].rolling_period\n elif ins.instype == 'Option':\n per = ins.und_insaddr.legs()[0].end_period\n else:\n per = ''\n \n if per == '':\n return ael.date('1899-12-31').to_string('%y%m%d') + '_0d'\n else: \n return dt.add_period(per)\n\n\ndef vol_bm_exp(ins):\n dt = ael.date_today()\n \n if ins.instype in ('Cap', 'Floor'):\n per = ins.legs()[0].end_period \n elif ins.instype == 'Option':\n per = ins.exp_period\n else:\n per = ''\n \n if per == '':\n return ael.date('1899-12-31').to_string('%y%m%d') + '_0d'\n else: \n return dt.add_period(per)\n\n# WRITE - FILE ######################################################################################################\n\ndef Write(v,FileDir,Filename,*rest): \n filename = FileDir + Filename \n if (v.seqnbr) not in InsL:\n InsL.append(v.seqnbr)\n outfile = open(filename, 'a')\n \n relativeStrikePriceFromAbsolute = acm.GetFunction('relativeStrikePriceFromAbsolute', 4)\n cs = acm.Calculations().CreateStandardCalculationsSpaceCollection()\n\n volsurfacedict = {}\n #points in vol surface\n #acm.FInstrument['USD/CAP/1Y/RFWD-2'].Inspect()\n for points in v.points():\n \n if points.insaddr:\n ins = acm.FInstrument[points.insaddr.insaddr]\n ins_calc = ins.Underlying().Calculation()\n und_price = ins_calc.TheoreticalPrice(cs)\n\n \n if points.insaddr.strike_type in ('Rel Frw', 'Rel Spot'):\n Strike = points.insaddr.strike_price\n else:\n Strike = round(relativeStrikePriceFromAbsolute(und_price, ins.StrikePrice(), 3, False).Number(), 1)\n \n if str(Strike) == '1.#QNAN':\n Strike = points.strike\n \n GnVolMnyTrmSf0AXS = ael.date_today().days_between(vol_bm_mat(points.insaddr))\n GnVolMnyTrmSf1AXS = ael.date_today().days_between(vol_bm_exp(points.insaddr))\n GnVolMnyTrmSfNODE = v.vol_get(ins.StrikePrice(), vol_bm_exp(points.insaddr), vol_bm_mat(points.insaddr), 1)\n \n else:\n \n #print v.vol_name, points.strike, v.vol_type\n Strike = points.strike\n GnVolMnyTrmSf0AXS = MR_MainFunctions.CurveDays(points.undmat_period)\n GnVolMnyTrmSf1AXS = MR_MainFunctions.CurveDays(points.exp_period)\n GnVolMnyTrmSfNODE = points.volatility\n \n #Creating dictionary to hold intial 3D vol surface values\n \n if volsurfacedict.has_key(Strike):\n volsurfacedict[Strike][GnVolMnyTrmSf0AXS, GnVolMnyTrmSf1AXS] = GnVolMnyTrmSfNODE\n else:\n volsurfacedict[Strike]={}\n volsurfacedict[Strike][GnVolMnyTrmSf0AXS, GnVolMnyTrmSf1AXS] = GnVolMnyTrmSfNODE\n \n \n for Strike in volsurfacedict.keys():\n \n #Base record\n BAS\t = 'BAS'\n HeaderName =\t'Volatility - Term/TermSPEC'\n OBJECT = 'Volatility - Term/TermSPEC'\n TYPE = 'Volatility - Term/Term'\n \n IDENTIFIER\t = v.vol_name + '_TT_' + str(Strike)\n NAME = v.vol_name + '_TT_' + str(Strike)\n \n ActiveFLAG = 'TRUE'\n \n CurveUnitCAL\t= '' \n CurveUnitDAYC = MR_MainFunctions.DayCountFix('Act/365') \n CurveUnitPERD = 'annual'\t\n CurveUnitUNIT\t= '%'\n \n DatumDATE\t = MR_MainFunctions.Datefix(acm.Time().DateNow())\n OriginOffsetNB\t= '0'\n \n RelativeCurveFLAG\t= 'TRUE'\n \n if v.vol_type == 'Benchmark Spread Call/Put':\n StateProcFUNC = '@spread'\n else: \n StateProcFUNC = ''\n \n TimeEvolutionFUNC\t= '@Constant' \n FunctionIdFLAG\t= 'TRUE' \n \n GenVolSTSfExt0FLAG\t= 'FALSE'\n GenVolSTSfExt1FLAG\t= 'FALSE'\n GenVolStrTrmSf0SIN = '@Linear'\n GenVolStrTrmSf1SIN = '@Linear'\n \n outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'%(BAS, HeaderName, OBJECT, TYPE, IDENTIFIER, NAME, ActiveFLAG, CurveUnitCAL, CurveUnitDAYC, CurveUnitPERD, CurveUnitUNIT, DatumDATE, OriginOffsetNB, RelativeCurveFLAG, StateProcFUNC, TimeEvolutionFUNC, FunctionIdFLAG, GenVolSTSfExt0FLAG, GenVolSTSfExt1FLAG, GenVolStrTrmSf0SIN, GenVolStrTrmSf1SIN))\n\n # Roll Over Function Parameters\n BASFLAG = 'rm_ro'\n Volatility = 'Volatility - Term/TermSPEC : Function Parameters'\n ATTRIBUTE = 'Function Parameters'\n OBJECT = 'Volatility - Term/TermSPEC'\n FunctionParamsVAL = ''\n if FunctionParamsVAL != '':\n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, FunctionParamsVAL))\n \n # Roll Over Procedure Parameter\n BASFLAG = 'rm_ro'\n Volatility = 'Volatility - Term/TermSPEC : Procedure Parameter'\n ATTRIBUTE = 'Procedure Parameter'\n OBJECT = 'Volatility - Term/TermSPEC'\n \n if v.und_vol_seqnbr:\n ProcedureParamXREF = v.und_vol_seqnbr.vol_name + '_TT_0.0'\n else:\n ProcedureParamXREF = ''\n \n if ProcedureParamXREF != '':\n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, ProcedureParamXREF)) \n\n # Build volatility surface\n for Key in volsurfacedict[Strike].keys():\n \n # Roll Over Generic Volatility Term Term Surface\n \n BASFLAG = 'rm_ro'\n Volatility = 'Volatility - Term/TermSPEC : Generic Volatility Term Term Surface'\n ATTRIBUTE = 'Generic Volatility Term Term Surface'\n OBJECT = 'Volatility - Term/TermSPEC'\n\n GnVolMnyTrmSf0AXS = Key[0] \n GnVolMnyTrmSf1AXS = Key[1]\n GnVolMnyTrmSfNODE = volsurfacedict[Strike][GnVolMnyTrmSf0AXS, GnVolMnyTrmSf1AXS]\n\n outfile.write('%s,%s,%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, GnVolMnyTrmSf0AXS, GnVolMnyTrmSf1AXS, GnVolMnyTrmSfNODE))\n\n\n \n BAS\t = 'BAS'\n HeaderName =\t'Volatility - Moneyness/Term/TermSPEC'\n OBJECT = 'Volatility - Moneyness/Term/TermSPEC'\n TYPE = 'Volatility - Moneyness/Term/Term'\n IDENTIFIER\t = v.vol_name\n NAME = v.vol_name\n CurveUnitDAYC = MR_MainFunctions.DayCountFix('Act/365') \n CurveUnitPERD = 'annual'\n CurveUnitCAL\t= '' \n RelativeCurveFLAG\t= 'TRUE'\n OriginOffsetNB\t= '0'\n TimeEvolutionFUNC\t= '@Constant' \n FunctionIdFLAG\t= 'TRUE' \n DatumDATE\t = MR_MainFunctions.Datefix(acm.Time().DateNow())\n CurveFUNC = '@term/term 3D'\n MoneynessTypeENUM = 'S - K'\n\n outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'%(BAS, HeaderName, OBJECT, TYPE, IDENTIFIER, NAME, CurveUnitDAYC, CurveUnitPERD, CurveUnitCAL, RelativeCurveFLAG, OriginOffsetNB, TimeEvolutionFUNC, FunctionIdFLAG, DatumDATE, CurveFUNC, MoneynessTypeENUM))\n \n for Strike in sorted(volsurfacedict.keys()):\n \n BASFLAG = 'rm_ro'\n Volatility = 'Volatility - Moneyness/Term/TermSPEC : Procedure Parameters'\n ATTRIBUTE = 'Procedure Parameter'\n OBJECT = 'Volatility - Moneyness/Term/TermSPEC'\n ProcedureParamXREF = v.vol_name + '_TT_' + str(Strike)\n \n if ProcedureParamXREF != '':\n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, ProcedureParamXREF))\n \n BASFLAG = 'rm_ro'\n Volatility = 'Volatility - Moneyness/Term/TermSPEC : Function Parameters'\n ATTRIBUTE = 'Function Parameters'\n OBJECT = 'Volatility - Moneyness/Term/TermSPEC'\n FunctionParamsVAL = Strike\n \n if FunctionParamsVAL != '':\n outfile.write('%s,%s,%s,%s,%s\\n'%(BASFLAG, Volatility, ATTRIBUTE, OBJECT, FunctionParamsVAL))\n \n outfile.close()\n\n return 'Success'\n \n# WRITE - FILE ######################################################################################################\n\n\n\n\n","sub_path":"Python modules/MR_Vol_Moneyness_TermTerm_Curve.py","file_name":"MR_Vol_Moneyness_TermTerm_Curve.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594031229","text":"import airflow.utils.dates\nfrom airflow.models import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\ndag = DAG(\n dag_id=\"exercise2\",\n default_args={\"owner\": \"Schuberg Philis\", \"start_date\": airflow.utils.dates.days_ago(5)},\n schedule_interval=None,\n)\n\nprint_execution_date = PythonOperator(\n task_id=\"print_execution_date\", python_callable=lambda: print(\"{{ execution_date }}\"), dag=dag\n)\n\nwait1 = BashOperator(task_id=\"wait1\", bash_command=\"sleep 1\", dag=dag)\nwait5 = BashOperator(task_id=\"wait5\", bash_command=\"sleep 5\", dag=dag)\nwait10 = BashOperator(task_id=\"wait10\", bash_command=\"sleep 10\", dag=dag)\n\ntheend = DummyOperator(task_id=\"theend\", dag=dag)\n\nprint_execution_date >> [wait1, wait5, wait10] >> theend\n","sub_path":"dags/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29103277","text":"import os\nimport glob\nimport torch\nimport pickle\nimport numpy as np\nfrom utils.spectral_graph_partition import *\n\n__all__ = ['GraphData']\n\n\nclass GraphData(object):\n\n def __init__(self, config, split='train'):\n assert split == 'train' or split == 'dev' or split == 'test', \"no such split\"\n self.split = split\n self.config = config\n self.seed = config.seed\n self.npr = np.random.RandomState(self.seed)\n self.data_path = config.dataset.data_path\n self.num_edgetype = config.dataset.num_edge_type\n self.model_name = config.model.name\n self.use_eigs = True if hasattr(config.model, 'num_eig_vec') else False\n if self.use_eigs:\n self.num_eigs = config.model.num_eig_vec\n\n if self.model_name == 'GraphSAGE':\n self.num_sample_neighbors = config.model.num_sample_neighbors\n\n self.train_data_files = glob.glob(\n os.path.join(self.data_path, 'synthetic_train_*.p'))\n self.dev_data_files = glob.glob(\n os.path.join(self.data_path, 'synthetic_dev_*.p'))\n self.test_data_files = glob.glob(\n os.path.join(self.data_path, 'synthetic_test_*.p'))\n\n self.num_train = len(self.train_data_files)\n self.num_dev = len(self.dev_data_files)\n self.num_test = len(self.test_data_files)\n self.num_graphs = self.num_train + self.num_dev + self.num_test\n\n def __getitem__(self, index):\n if self.split == 'train':\n return pickle.load(open(self.train_data_files[index], 'rb'))\n elif self.split == 'dev':\n return pickle.load(open(self.dev_data_files[index], 'rb'))\n else:\n return pickle.load(open(self.test_data_files[index], 'rb'))\n\n def __len__(self):\n if self.split == 'train':\n return self.num_train\n elif self.split == 'dev':\n return self.num_dev\n else:\n return self.num_test\n\n def collate_fn(self, batch):\n \"\"\"\n Collate function for mini-batch\n N.B.: we pad all samples to the maximum of the mini-batch\n \"\"\"\n assert isinstance(batch, list)\n\n data = {}\n batch_size = len(batch)\n node_size = [bb['node_feat'].shape[0] for bb in batch]\n batch_node_size = max(node_size) # value -> N\n pad_node_size = [batch_node_size - nn for nn in node_size]\n \n # pad feature: shape (B, N, D)\n data['node_feat'] = torch.stack([\n torch.from_numpy(\n np.pad(\n bb['node_feat'], ((0, pad_node_size[ii]), (0, 0)),\n 'constant',\n constant_values=0.0)) for ii, bb in enumerate(batch)\n ]).float()\n \n # binary mask: shape (B, N)\n data['node_mask'] = torch.stack([\n torch.from_numpy(\n np.pad(\n np.ones(node_size[ii]), (0, pad_node_size[ii]),\n 'constant',\n constant_values=0.0)) for ii, bb in enumerate(batch)\n ]).byte()\n\n # label: shape (B, O)\n data['label'] = torch.cat(\n [torch.from_numpy(bb['label']) for bb in batch], dim=0).float()\n\n if self.model_name == 'GPNN':\n #########################################################################\n # GPNN\n # N.B.: one can perform graph partition offline to speed up\n ######################################################################### \n # graph Laplacian of multi-graph: shape (B, N, N, E)\n L_multi = np.stack(\n [\n np.pad(\n bb['L_multi'], ((0, pad_node_size[ii]),\n (0, pad_node_size[ii]), (0, 0)),\n 'constant',\n constant_values=0.0) for ii, bb in enumerate(batch)\n ],\n axis=0)\n\n # graph Laplacian of simple graph: shape (B, N, N, 1)\n L_simple = np.stack(\n [\n np.expand_dims(\n np.pad(\n bb['L_simple_4'], (0, pad_node_size[ii]),\n 'constant',\n constant_values=0.0),\n axis=3) for ii, bb in enumerate(batch)\n ],\n axis=0)\n\n L = np.concatenate([L_simple, L_multi], axis=3)\n data['L'] = torch.from_numpy(L).float()\n\n # graph partition\n L_cluster, L_cut = [], []\n\n for ii in range(batch_size):\n node_label = spectral_clustering(L_simple[ii, :, :, 0], self.config.model.num_partition)\n \n # Laplacian of clusters and cut\n L_cluster_tmp, L_cut_tmp = get_L_cluster_cut(L_simple[ii, :, :, 0], node_label)\n\n L_cluster += [L_cluster_tmp]\n L_cut += [L_cut_tmp]\n\n data['L_cluster'] = torch.from_numpy(np.stack(L_cluster, axis=0)).float()\n data['L_cut'] = torch.from_numpy(np.stack(L_cut, axis=0)).float()\n elif self.model_name == 'GraphSAGE':\n #########################################################################\n # GraphSAGE\n #########################################################################\n # N.B.: adjacency mat of GraphSAGE is asymmetric\n nonempty_mask = np.zeros((batch_size, batch_node_size, 1))\n nn_idx = np.zeros((batch_size, batch_node_size, self.num_sample_neighbors,\n self.num_edgetype + 1))\n\n for ii in range(batch_size):\n for jj in range(self.num_edgetype + 1):\n if jj == 0:\n tmp_L = batch[ii]['L_simple_4']\n else:\n tmp_L = batch[ii]['L_multi'][:, :, jj - 1]\n\n for nn in range(tmp_L.shape[0]):\n nn_list = np.nonzero(tmp_L[nn, :])[0]\n\n if len(nn_list) >= self.num_sample_neighbors:\n nn_idx[ii, nn, :, jj] = self.npr.choice(\n nn_list, size=self.num_sample_neighbors, replace=False)\n nonempty_mask[ii, nn] = 1\n elif len(nn_list) > 0:\n nn_idx[ii, nn, :, jj] = self.npr.choice(\n nn_list, size=self.num_sample_neighbors, replace=True)\n nonempty_mask[ii, nn] = 1\n\n data['nn_idx'] = torch.from_numpy(nn_idx).long()\n data['nonempty_mask'] = torch.from_numpy(nonempty_mask).float()\n elif self.model_name == 'GAT':\n #########################################################################\n # GAT\n #########################################################################\n # graph Laplacian of multi-graph: shape (B, N, N, E)\n L_multi = np.stack(\n [\n np.pad(\n bb['L_multi'], ((0, pad_node_size[ii]),\n (0, pad_node_size[ii]), (0, 0)),\n 'constant',\n constant_values=0.0) for ii, bb in enumerate(batch)\n ],\n axis=0)\n\n # graph Laplacian of simple graph: shape (B, N, N, 1)\n L_simple = np.stack(\n [\n np.expand_dims(\n np.pad(\n bb['L_simple_4'], (0, pad_node_size[ii]),\n 'constant',\n constant_values=0.0),\n axis=3) for ii, bb in enumerate(batch)\n ],\n axis=0)\n\n L = np.concatenate([L_simple, L_multi], axis=3)\n\n # trick of graph attention networks\n def adj_to_bias(adj, sizes, nhood=1):\n nb_graphs = adj.shape[0]\n mt = np.empty(adj.shape)\n for g in range(nb_graphs):\n mt[g] = np.eye(adj.shape[1])\n for _ in range(nhood):\n mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))\n for i in range(sizes[g]):\n for j in range(sizes[g]):\n if mt[g][i][j] > 0.0:\n mt[g][i][j] = 1.0\n return -1e9 * (1.0 - mt)\n\n L_new = []\n for ii in range(batch_size):\n L_new += [\n np.transpose(\n adj_to_bias(\n np.transpose(L[ii, :, :, :], (2, 0, 1)),\n [batch_node_size] * L.shape[3]), (1, 2, 0))\n ]\n\n data['L'] = torch.from_numpy(np.stack(L_new, axis=0)).float()\n else:\n #########################################################################\n # All other models\n ######################################################################### \n # graph Laplacian of multi-graph: shape (B, N, N, E)\n L_multi = torch.stack([\n torch.from_numpy(\n np.pad(\n bb['L_multi'], ((0, pad_node_size[ii]),\n (0, pad_node_size[ii]), (0, 0)),\n 'constant',\n constant_values=0.0)) for ii, bb in enumerate(batch)\n ]).float()\n\n # graph Laplacian of simple graph: shape (B, N, N, 1)\n L_simple_key = 'L_simple_4'\n if self.model_name == 'DCNN':\n L_simple_key = 'L_simple_7'\n elif self.model_name in ['ChebyNet']:\n L_simple_key = 'L_simple_6'\n\n if self.model_name == 'ChebyNet':\n L_simple = torch.stack([\n torch.from_numpy(\n np.expand_dims(\n np.pad(\n -bb[L_simple_key], (0, pad_node_size[ii]),\n 'constant',\n constant_values=0.0),\n axis=3)) for ii, bb in enumerate(batch)\n ]).float()\n else:\n L_simple = torch.stack([\n torch.from_numpy(\n np.expand_dims(\n np.pad(\n bb[L_simple_key], (0, pad_node_size[ii]),\n 'constant',\n constant_values=0.0),\n axis=3)) for ii, bb in enumerate(batch)\n ]).float()\n\n data['L'] = torch.cat([L_simple, L_multi], dim=3)\n\n # eigenvalues & eigenvectors of simple graph\n if self.use_eigs:\n eigs, eig_vecs = [], []\n for ii, bb in enumerate(batch):\n pad_eigs_len = self.num_eigs - len(bb['D_simple'])\n eigs += [\n bb['D_simple'][:self.num_eigs] if pad_eigs_len <= 0 else np.pad(\n bb['D_simple'], (0, pad_eigs_len),\n 'constant',\n constant_values=0.0)\n ]\n\n # pad eigenvectors\n pad_eig_vec = np.pad(\n bb['V_simple'], ((0, pad_node_size[ii]), (0, 0)),\n 'constant',\n constant_values=0.0)\n\n eig_vecs += [\n pad_eig_vec[:, :self.num_eigs] if pad_eigs_len <= 0 else np.pad(\n pad_eig_vec, ((0, 0), (0, pad_eigs_len)),\n 'constant',\n constant_values=0.0)\n ]\n\n data['D'] = torch.stack([torch.from_numpy(ee) for ee in eigs]).float()\n data['V'] = torch.stack(\n [torch.from_numpy(vv) for vv in eig_vecs]).float()\n\n return data\n","sub_path":"dataset/graph_data.py","file_name":"graph_data.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442915923","text":"import tkinter as tk\nimport tkinter.font as tkFont\nimport config\nfrom config import HEIGHT, WIDTH, FONT10\nfrom db import data\n\nfrom circular_button import CircularButton\nfrom text_properties import TextProperties\nfrom user_info import UserInfoFrame\n\n\nclass _NavBar(tk.Frame):\n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n self.config(bg=config.COLOR)\n\n self.title = tk.Label(\n self,\n fg=config.BG, bg=config.COLOR,\n text=\"Alumnos\",\n font=(\"Arial\", 16))\n\n self.title.place(\n anchor=tk.NW,\n x=72, y=38-24\n )\n\n\nclass _BodyFrame(tk.Frame):\n def __init__(self, controller, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n self.controller = controller\n\n self.config(bg=config.BG)\n self.add_userbtn()\n\n self.title_2 = tk.Label(\n self,\n fg=config.COLOR, bg=config.BG,\n text=\"Personas\",\n font=(\"Arial\", 14))\n\n self.title_2.place(\n anchor=tk.NW,\n x=18, y=14\n )\n\n self.user_list = tk.Frame(self, width=WIDTH, height=491, bg=config.BG)\n self.user_list.place(\n anchor=tk.NW,\n x=0, y=45\n )\n\n def update(self):\n for child in self.user_list.winfo_children():\n child.destroy()\n py = 0\n for user in data[\"users\"]:\n card = UserInfoFrame(self.user_list, self.controller, py, user)\n card.place(\n anchor=tk.NW,\n x=0, y=py\n )\n py += 72\n\n self.add_userbtn()\n\n def add_userbtn(self):\n text = TextProperties(\n fill=\"white\",\n text=\"+\",\n anchor=\"c\",\n font=\"Consolas 20\"\n )\n\n self.add_user = CircularButton(\n self, 28,\n config.COLOR, config.COLOR,\n lambda: self.controller.show(\"create_user\"),\n text)\n\n self.add_user.place(\n anchor=tk.NW,\n x=286, y=HEIGHT-56-81\n )\n\n\nclass _NoUsersFrame(tk.Frame):\n def __init__(self, controller, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n self.controller = controller\n\n tk.Label(self, text=\"NO HAY ALUMNOS\",\n font=tkFont.Font(family='Arial', size=30)).place(\n anchor=tk.CENTER,\n relx=0.5, rely=0.5\n )\n\n self.config(bg=config.BG)\n self.add_userbtn()\n\n def add_userbtn(self):\n text = TextProperties(\n fill=\"white\",\n text=\"+\",\n anchor=\"c\",\n font=\"Consolas 20\"\n )\n\n self.add_user = CircularButton(\n self, 28,\n config.COLOR, config.COLOR,\n lambda: self.controller.show(\"create_user\"),\n text)\n\n self.add_user.place(\n anchor=tk.NW,\n x=286, y=HEIGHT-56-81\n )\n\n\nclass UserListFrame(tk.Frame):\n def __init__(self, controller, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n self.controller = controller\n\n self.nav_bar = _NavBar(self)\n self.nav_bar.place(anchor=tk.NW, width=WIDTH, height=56)\n\n self.body = _BodyFrame(controller, self)\n self.body.place(\n anchor=tk.NW,\n width=WIDTH,\n height=HEIGHT-56,\n y=56\n )\n\n self.no_users = _NoUsersFrame(controller, self)\n self.no_users.place(\n anchor=tk.NW,\n width=WIDTH,\n height=HEIGHT-56,\n y=56\n )\n\n def update(self):\n if data[\"users\"]:\n self.body.tkraise()\n self.body.update()\n else:\n self.no_users.tkraise()\n","sub_path":"P8 Usuarios/user_list_page.py","file_name":"user_list_page.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"196434317","text":"#===============================================================================\n# INFO (__doc__)\n#===============================================================================\n\"\"\"\nAuthor : Adam Chun Wai Fok\n\nDate : AUG 2014\n\nDescription : this is the base class of all metadata node\n\nNote :\n\n\"\"\"\n\n#===============================================================================\n# IMPORT\n#===============================================================================\nimport pymel.core as pm\nimport logging\n\nimport lib.globals as globals\nimport tool.autoRig.utils_autoRig as utils ; reload(utils)\n\n#===============================================================================\n# GLOBALS\n#===============================================================================\nLOG_LEVEL = globals.LOG_LEVEL or logging.INFO\n\n#===============================================================================\n# LOGGER\n#===============================================================================\nlogger = logging.getLogger(__name__)\nlogger.setLevel(LOG_LEVEL)\nlogger.setLevel(logging.DEBUG)\n \n#===============================================================================\n# CLASS\n#===============================================================================\nclass MetaNode(pm.nt.Network):\n \"\"\" this is an example of how to create your own subdivisions of existing nodes. \"\"\"\n \n # display the type of metadata class\n metaClassID = \"MetaNode\"\n\n @classmethod\n def _isVirtual( cls, obj, name ):\n \"\"\"\n PyMEL code should not be used inside the callback, only API and maya.cmds.\n \"\"\"\n fn = pm.api.MFnDependencyNode(obj)\n try:\n if fn.hasAttribute('metaClass'):\n plug = fn.findPlug('metaClass')\n if plug.asString() == cls.metaClassID:\n return True\n return False\n except:\n pass\n return False\n\n @classmethod\n def _preCreateVirtual(cls, **kwargs ):\n \"\"\"\n This class method is called prior to node creation and gives you a\n chance to modify the kwargs dictionary that is passed to the creation\n command. If it returns two dictionaries, the second is used passed\n as the kwargs to the postCreate method\n \n this method must be a classmethod or staticmethod\n \n \"\"\"\n #= STANDARD ARGUMENTS ==================================================\n if 'name' not in kwargs and 'n' not in kwargs:\n # if no name is passed, then use the joint Id as the name.\n kwargs['name'] = cls.metaClassID\n \n if 'n' in kwargs:\n kwargs['name'] = kwargs.pop('n')\n \n postKwargs = {}\n #=======================================================================\n\n #= CUSTOM ARGUMENTS ====================================================\n kwargs, postKwargs = cls._preCreateVirtual_subMetaNode(kwargs, postKwargs)\n #=======================================================================\n\n return kwargs, postKwargs\n \n @classmethod\n def _preCreateVirtual_subMetaNode(cls, kwargs, postKwargs ):\n return kwargs, postKwargs \n \n @classmethod\n def _postCreateVirtual(cls, newNode, **kwargs ):\n \"\"\"\n This method is called after creating the new node, and gives you a\n chance to modify it. The method is passed the PyNode of the newly\n created node, and the second dictionary returned by the preCreate, if\n it returned two items. You can use PyMEL code here, but you should\n avoid creating any new nodes.\n \n this method must be a classmethod or staticmethod\n \"\"\"\n \n #= STANDARD ATTRIBUTES =================================================\n # add the identifying attribute. the attribute name will be set on subclasses of this class\n newNode.addAttr( 'metaClass', dt='string')\n newNode.metaClass.set(cls.metaClassID)\n newNode.metaClass.lock()\n \n # metaChildren\n newNode.addAttr('metaChildren', at=\"message\", multi=True, indexMatters=False)\n \n # metaParent\n newNode.addAttr('metaParent', at='message')\n #=======================================================================\n\n #= CUSTOM ATTRIBUTES ===================================================\n cls._postCreateVirtual_subMetaNode(newNode, kwargs)\n #=======================================================================\n \n @classmethod\n def _postCreateVirtual_subMetaNode(cls, newNode, kwargs):\n return \n\n @property \n def proxy(self):\n result = pm.listConnections(\"%s.%s\"%(self.name(), \"proxy\"))\n if result:\n return result[0]\n else:\n return None\n \n @property\n def rig(self):\n result = pm.listConnections(\"%s.%s\"%(self.name(), \"rig\"))\n if result:\n return result[0]\n else:\n return None\n \n @property\n def metaMaster(self):\n result = self.list_metaRelatives(types=[\"Master_MetaRig\"])\n \n if result:\n return result[0]\n else :\n return None\n\n @property\n def metaRoot(self):\n result = self.list_metaRelatives(types=[\"MetaRoot\"])\n \n if result:\n return result[0]\n else :\n return None \n \n #===========================================================================\n # INTERNAL FUNCTIONS\n #===========================================================================\n def _get_metaParents(self):\n return self.metaParent.listConnections()\n\n def _get_allMetaParents(self, allMetaParents=[]):\n\n for parent in self._get_metaParents():\n parent._get_allMetaParents(allMetaParents=allMetaParents)\n allMetaParents.append(parent)\n \n return allMetaParents\n \n def _get_metaChildren(self, types=[]):\n if not types:\n return self.metaChildren.listConnections()\n \n else:\n return [child for child in self.metaChildren.listConnections() if child.metaClass.get() in types]\n \n def _get_allMetaChildren(self, allMetaChildren=[]):\n for child in self._get_metaChildren():\n child._get_allMetaChildren(allMetaChildren=allMetaChildren)\n allMetaChildren.append(child)\n \n return allMetaChildren \n #===========================================================================\n # FUNCTIONS \n #===========================================================================\n def add_metaChild(self, node):\n if utils.isMetaNode(node):\n self.metaChildren.append(node.metaParent)\n #pm.connectAttr(node.metaParent, self.metaChildren, na=True)\n \n def add_metaChildren(self, nodes):\n for node in nodes:\n self.add_metaChild(node)\n \n def list_metaChildren(self, types=[]):\n all_meta_children = self._get_allMetaChildren(allMetaChildren=[])\n \n if not types:\n return all_meta_children\n else:\n return [child for child in all_meta_children if child.metaClass.get() in types]\n\n def list_metaParents(self, types=[]):\n all_meta_parents = self._get_allMetaParents(allMetaParents=[])\n \n if not types:\n return all_meta_parents\n else:\n return [parent for parent in all_meta_parents if parent.metaClass.get() in types]\n \n def list_metaRelatives(self, types=[]):\n \n result = []\n result.extend( self.list_metaChildren(types=types) )\n result.extend( self.list_metaParents(types=types) )\n \n return result","sub_path":"tool/_archives/20141023/autoRig_aug26/metadata/metaNodes/metaNode.py","file_name":"metaNode.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604136043","text":"# parliaments to be analyzed\n\nfrom Party import Party\nfrom Parliament import Parliament\n\n\nSEJM_V = Parliament([\n Party(\"Prawo i Sprawiedliwość\", 155),\n Party(\"Platforma Obywatelska\", 133),\n Party(\"Samoobrona Rzeczpospolitej Polskiej\", 56),\n Party(\"Sojusz Lewicy Demokratyznej\", 55),\n Party(\"Liga Polskich Rodzin\", 34),\n Party(\"Polskie Stronnictwo Ludowe\", 25),\n Party(\"Niezrzeszeni\", 2)])\n\nSEJM_VI = Parliament([\n Party(\"Platforma Obywatelska\", 209),\n Party(\"Prawo i Sprawiedliwość\", 166),\n Party(\"Lewica\", 53),\n Party(\"Polskie Stronnictwo Ludowe\", 31),\n Party(\"Niezrzeszeni\", 1)])\n\nSEJM_VII = Parliament([\n Party(\"Platforma Obywatelska\", 207),\n Party(\"Prawo i Sprawiedliwość\", 138),\n Party(\"Ruch Palikota\", 41),\n Party(\"Polskie Stronnictwo Ludowe\", 28),\n Party(\"Sojusz Lewicy Demokratycznej\", 26),\n Party(\"Solidarna Polska\", 18),\n Party(\"Niezrzeszeni\", 2)])\n\nSEJM_VIII = Parliament([\n Party(\"Prawo i Sprawiedliwość\", 215),\n Party(\"Platforma Obywatelska\", 138),\n Party(\"Kukiz'15\", 42),\n Party(\"Nowoczesna\", 28),\n Party(\"Polskie Stronnictwo Ludowe\", 16),\n Party(\"Polska Razem\", 12),\n Party(\"Solidarna Polska\", 8),\n Party(\"Niezrzeszeni\", 1)])\n\nSEJM_IX = Parliament([\n Party(\"Prawo i Sprawiedliwość\", 200),\n Party(\"Platforma Obywatelska\", 134),\n Party(\"Lewica\", 49),\n Party(\"Koalicja Polska\", 30),\n Party(\"Porozumienie\", 18),\n Party(\"Solidarna Polska\", 17),\n Party(\"Konfederacja\", 11),\n Party(\"Niezrzeszeni\", 1)])\n","sub_path":"parliaments.py","file_name":"parliaments.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101037220","text":"import matplotlib\nmatplotlib.use('Agg')\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom cStringIO import StringIO\nimport pandas as pd\nimport sys; import os\nsys.path.insert(0, os.path.abspath('..')) # need to do to get analyst.utility\nfrom analysis.utility import get_zip_code_dic,get_data_from_dic\nfrom matplotlib import pyplot as plt\n\ndef make_plot(data):\n \"\"\"\n Returns the fig for the plot of the data\n \"\"\"\n data = pd.DataFrame(data)\n data = data.rename(index={12: '12+'})\n ax = data.plot(kind='bar',legend=False,edgecolor = \"none\",facecolor = '#30a2da')\n ax.set_xlabel(\"Number of Months on Market\")\n ax.set_ylabel(\"Likelihood\")\n ax.spines[\"right\"].set_color('none')\n for tic in ax.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n ax.spines['top'].set_color('none')\n for tic in ax.yaxis.get_major_ticks():\n tic.tick2On = False\n fig = ax.get_figure()\n return fig\n\nclass MakePlot(object):\n \"\"\"\n \"\"\"\n \n def __init__(self):\n \"\"\"\n \"\"\"\n self._clf = pickle.load(open('../data/random_forest_classifier.p','rb'))\n self._zipcode_data = get_zip_code_dic()\n \n def get_plot(self,data):\n \"\"\"\n Given data plot distribution\n \"\"\"\n data = self._get_probas(data)\n fig = make_plot(data[0])\n ## Encode image to png in base64\n io = StringIO()\n fig.savefig(io, format='png',bbox_inches='tight', pad_inches=0)\n data = io.getvalue().encode('base64')\n return data\n\n\n\n def _get_probas(self,data):\n \"\"\"\n Given data as a dic with some input data\n use clf to predict model class probality\n then return the probalities\n \"\"\"\n\n features = []\n lst = ['sqft','bedrooms','bathrooms','sold','zipcode','photos_num']\n for key in lst:\n features.append(data[key])\n \n #add in features from the zip data \n features += get_data_from_dic(self._zipcode_data,data['zipcode'])\n\n return self._clf.predict_proba(features)\n \n","sub_path":"flask_app/makeGraph.py","file_name":"makeGraph.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155141990","text":"### second app url ##\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index), # similar to @app.route('/') in flask\n url(r'^add/(?P\\d+)$', views.add),\n url(r'^cart$', views.cart),\n url(r'^shop$', views.shop),\n url(r'^single_product$', views.single_product),\n url(r'^contact$', views.contact),\n url(r'^blog$', views.blog),\n url(r'^login$', views.login),\n\turl(r'^userreg$', views.register_user),\n\turl(r'^userlog$', views.login_user),\n\turl(r'^address$', views.address),\n\turl(r'^success$', views.success),\n\turl(r'^clear$', views.clear),\n\n]","sub_path":"apps/second_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644713365","text":"## Your name: PRASHANT TOTEJA\n## The option you've chosen: OPTION 2\n\n# Put import statements you expect to need here!\n\nimport unittest\nimport itertools\nimport collections\nimport requests\nimport tweepy\n#import twitter_info -- twitter_info needed in folder in order for this line to run\nimport json\nimport sqlite3\n\n\n\n#Create your caching set up:\n\n\n#--------calling APIs---------\n\n#Define a function Tweet() that takes an input string and returns a dictionary of 20+ tweets on that input\n\n\n#Define a function called Twitterusers() that takes a user screen name that is found in any given tweet and returns a dictionary of all that user's info\n#Hint: use api.get_user() to get the dictionary of user info\n\n#Define a function called OMDB() that takes and input of a movie name and returns a dictionary of all of that movie's info from the OMDB API\n\n\n\n\n#---------Movie Class------------\n\n#Define a class Movie that will have the info representing any given movie\n\n\n#Create the __init__ constructor here:\n\n\n#Define a __str__ method within the movie class that returns a readable output for the user that gives us the name of the movie, by whom it was directed, and the IMDB rating it received\n\n\n#Define a method within the movie class called listactors() that returns a list of actors that were in that movie\n\n\n#Define a method within the movie class called numlanguages() that returns the number of languages that were in that movie\n\n\n\n#---------------------------------\n\n\n#Create 3 sql tables: Tweets, Users, Movies \n#Tweets will have the following columns: text, tweet_id(primary key), username(reference the user table), movie_search, num_fav, num_retweets\n\n#Users will have the following columns: user_id(primary key), username, num_fav \n\n#Movies will have the following columns: movie_id (primary key), title, director, num_languages, IMDB_rating, top_actor\n\n#Load your info into the database and create two queries, one utilizing JOIN INNER, on your new table\n\n\n#Find the frequencies of all the actors across all of the movie we iterate over. Save the dictionary in a variable called actor_frequency\n\n \n#Did you find anything interesting? Create a quick summary about your findings and write it into a .txt file for your users!\n\n\n\n\n\n\n\n\n\n\n\n#---------Tests--------------\n\n# Write your test cases here.\n\nclass Sqltask(unittest.TestCase):\n\tdef test_users1(self):\n\t\tconn = sqlite3.connect('finalproject_tweets.db')\n\t\tcur = conn.cursor()\n\t\tcur.execute('SELECT * FROM Users');\n\t\tresult = cur.fetchall()\n\t\tself.assertTrue(len(result) >= 2, \"There should be 2 or more distinct users in the User table!!\")\n\t\tconn.close()\n\n\tdef test_users2(self):\n\t\tconn = sqlite3.connect('finalproject_tweets.db')\n\t\tcur = conn.cursor()\n\t\tcur.execute('SELECT * FROM Tweets');\n\t\tresult = cur.fetchall()\n\t\tself.assertTrue(len(result[0]) == 6, \"Testing that there are 6 columns in the Tweets table\")\n\t\tconn.close()\n\nclass Moretests(unittest.TestCase):\n\tdef test_tweeter(self):\n\t\tself.assertTrue(type(tweeter('Tom Cruise')), type({\"hi\",\"bye\"}), \"Testing that the tweeter function returns a type dictionary of tweets\")\n\n\tdef test_twitterusers(self):\n\t\tself.assertTrue(type(twitterusers('Tome Cruise')), type({\"hi\",\"bye\"}), \"Testing that the twitterusers function returns a type dictionary of users\")\n\tdef test_Movie1(self):\n\t\tx = omdb('Titanic')\n\t\tmymovie = Movie(dict = x)\n\t\tself.assertTrue(type(mymovie.listactors()), type([]), \"Testing that the listactors function returns a type list when called on a movie\")\n\tdef test_Movie2(self):\n\t\tx = omdb('Titanic')\n\t\tmymovie = Movie(dict = x)\n\t\tself.assertTrue(type(mymovie.numlanguages()), type(1), \"Testing that the numlanguages function returns a type integer when called on a movie\")\n\tdef test_actor_frequency(self):\n\t\tself.assertEqual(type(actor_frequency),type({}),\"Testing that mostcommon_actor across inputed movies is of type dictionary\")\n\n\n\tdef test_movielist(self):\n\t\tself.assertEqual(len(movielist) >= 3,\"Testing that we will be running the ombd function on a list that contains 3 or more movie names\")\n\n\n\n\n## Remember to invoke all your tests...\n\nif __name__ == \"__main__\":\n\tunittest.main(verbosity = 2)","sub_path":"206_project_plan.py","file_name":"206_project_plan.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125472180","text":"from HTMLParser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n print(\"Start : \" + tag)\n if (attrs):\n for i in xrange(len(attrs)):\n sub = '-> ' + str(attrs[i][0])\n\n for j in range(1,len(attrs[i])):\n sub += ' > ' + str(attrs[i][j])\n print(sub)\n\n def handle_endtag(self, tag):\n print(\"End : \" + tag)\n\n def handle_startendtag(self, tag, attrs):\n print(\"Empty : \" + tag)\n if (attrs):\n for i in xrange(len(attrs)):\n sub = '-> ' + str(attrs[i][0])\n\n for j in range(1,len(attrs[i])):\n sub += ' > ' + str(attrs[i][j])\n print(sub)\n\nn = int(raw_input())\ns = ''\n\nfor _ in xrange(n):\n s += raw_input()\n\nparser = MyHTMLParser()\nparser.feed(s)\n","sub_path":"python/regex-and-parsing/html-parser-part-1.py","file_name":"html-parser-part-1.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"98146949","text":"from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom books import views\n\nurlpatterns = [\n path('books/book//', views.BookViewSet.as_view({'get': 'findBookByTitle'})),\n path('books/banner/', views.BannerViewSet.as_view({'get': 'findBanners'})),\n path('books/order/', views.MallOrderViewSet.as_view({'get': 'findOrder'})),\n path('books/orderList/', views.OrderItemViewSet.as_view({'get': 'findOrderList'})),\n path('books/orderAddress/', views.OrderAddressViewSet.as_view({'get': 'findOrderAddress'})),\n path('books/userCollection/', views.UserCollectionViewSet.as_view({'get': 'findUserCollection'})),\n path('books/user/', views.UserViewSet),\n path('books/address/', views.AddressViewSet),\n path('books/token/', views.TokenViewSet),\n path('books/goods/', views.GoodsViewSet),\n path('books/category/', views.CategoryViewSet),\n path('books/cart/', views.CartViewSet),\n\n # 首页\n path('goods/home/', views.HomeViewSet.as_view({'get': 'getHome'})),\n # 搜索\n path('search/', views.SerchViewSet.as_view({'get': 'search'})),\n # 注册\n path('user/register', views.RegisterViewSet.as_view({'post': 'register'})),\n # 登录\n path('user/login', views.LoginViewSet.as_view({'post': 'login'})),\n # 查询用户信息\n path('user/userInfo/', views.UserInfoViewSet.as_view({'post': 'userInfo'})),\n # 更新用户信息\n path('user/updateUserInfo', views.UpdateUserInfoViewSet.as_view({'post': 'updateUserInfo'})),\n # 用户收藏列表\n path('user/collectionList', views.CollectionListViewSet.as_view({'post': 'collectionList'})),\n # 商品模块,分类商品列表\n path('goods/goodsList/', views.GoodsListViewSet.as_view({'get': 'goodsList'})),\n # 单个商品详情\n path('goods/goodsDetails', views.HomeViewSet.as_view({'post': 'getGoodsDetails'})),\n # 查询商品是否已收藏\n path('user/queryCollection', views.UserCollectionViewSet.as_view({'post': 'queryCollection'})),\n # 加入到购物车\n path('u-action/addToShopCart', views.CartViewSet.as_view({'post': 'addToShopCart'})),\n # 查询购物车数据\n path('user/checkShopCart', views.checkShopCartViewSet.as_view({'post': 'ShopCart'})),\n # 地址列表\n path('user/addressList', views.AddressListViewSet.as_view({'post': 'AddressList'})),\n # 获取订单列表\n path('user/orderList', views.orderListViewSet.as_view({'post': 'getOrderList'})),\n # 编辑收货地址\n path('u-action/editAddress', views.editAddressViewSet.as_view({'post': 'editAddress'})),\n # 默认地址\n path('user/defAddress', views.getdefAddressViewSet.as_view({'post': 'defAddress'})),\n # 删除购物车商品\n path('u-action/delCartGoods', views.delCartGoodsViewSet.as_view({'post': 'delCartGoods'})),\n # 商品收藏、取消\n path('u-action/collection', views.isCollectionsViewSet.as_view({'post': 'is_collection'})),\n # 删除收货地址\n path('u-action/delAddress', views.delAddressViewSet.as_view({'post': 'delAddress'})),\n # 提交订单\n path('u-action/submitOrder', views.submitOrderViewSet.as_view({'post': 'submitOrder'})),\n # 分类\n path('category-search', views.CategorySearchViewSet.as_view({'post': 'categorySearch'})),\n]\n\nrouter = DefaultRouter() # 括号不要忘了 ,不然执行不了\nrouter.register(r\"banner\", views.BannerViewSet)\nrouter.register(r\"books\", views.BookViewSet)\nrouter.register(r\"order\", views.MallOrderViewSet)\nrouter.register(r\"orderList\", views.OrderItemViewSet)\nrouter.register(r\"orderAddress\", views.OrderAddressViewSet)\nrouter.register(r\"userCollection\", views.UserCollectionViewSet)\nrouter.register(r\"user\", views.UserViewSet)\nrouter.register(r\"address\", views.AddressViewSet)\nrouter.register(r\"token\", views.TokenViewSet)\nrouter.register(r\"goods\", views.GoodsViewSet)\nrouter.register(r\"category\", views.CategoryViewSet)\nrouter.register(r\"cart\", views.CartViewSet)\n\nurlpatterns += router.urls\n\n# print(router.urls)\n","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154929655","text":"from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as f:\n DESCRIPTION = f.read()\n\nsetup(name=\"pymagnitude-lite\",\n version=\"0.1.143\",\n description=\"Magnitude fork that only supports Word2Vec, GloVe and fastText embeddings\",\n long_description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/neuml/magnitude\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"fasteners>=0.14.1\",\n \"lz4>=1.0.0\",\n \"numpy>=1.14.0\",\n \"xxhash>=1.0.1\"\n ] \n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502294023","text":"# Temperature Converter\n# Param Sandhu - 301118847\n\ndef menu():\n print(\"\\n1. Celsius to Fahrenheit\")\n print(\"2. Fahrenheit to Celsius\")\n print(\"3. Exit\")\n pick = int(input(\"Enter a Choice: \"))\n return pick\n\ndef toCelsius(f):\n return int((f - 32) / 1.8)\n\ndef toFahrenheit(c):\n return int(c * 1.8 + 32)\n\ndef main():\n choice = menu()\n while choice != 3:\n if choice == 1:\n # convert C to F\n c = eval(input(\"Enter Temperature in Celsius: \"))\n print(str(c) + \"C = \" + str(toFahrenheit(c)) + \"F\")\n elif choice == 2:\n # convert F to C\n f = eval(input(\"Enter Temperature in Fahrenheit: \"))\n print(str(f) + \"F = \" + str(toCelsius(f)) + \"C\")\n else:\n print(\"Invalid Entry\")\n choice = menu()\n\n\n\nmain()\n","sub_path":"TempConverter.py","file_name":"TempConverter.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533232672","text":"import torch\nimport numpy as np\nimport random\nfrom data import getTrainingValidationTestingData\nfrom model import Net\n# from common import *\nfrom criterion import DepthLoss\nimport util\n\n# from util import evaluate_model, make_plot, config\n\ntorch.manual_seed(42)\nnp.random.seed(42)\nrandom.seed(42)\n\n\ndef main(device=torch.device('cuda:0')):\n \"\"\"Train CNN and show training plots.\"\"\"\n # Data loaders\n \"\"\"\n if check_for_augmented_data(\"./data\"):\n tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(\n task=\"target\", batch_size=config(\"cnn.batch_size\"), augment=True\n )\n else:\n tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(\n task=\"target\",\n batch_size=config(\"cnn.batch_size\"),\n )\n \"\"\"\n # pathname = \"data/nyu_depth.zip\"\n pathname = \"data/nyu_small.zip\"\n tr_loader, va_loader, te_loader = getTrainingValidationTestingData(pathname,\n batch_size=util.config(\"unet.batch_size\"))\n\n # Model\n model = Net()\n\n # TODO: define loss function, and optimizer\n learning_rate = util.config(\"unet.learning_rate\")\n criterion = DepthLoss(0.1)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n number_of_epoches = 10\n #\n\n # print(\"Number of float-valued parameters:\", util.count_parameters(model))\n\n # Attempts to restore the latest checkpoint if exists\n print(\"Loading unet...\")\n model, start_epoch, stats = util.restore_checkpoint(model, util.config(\"unet.checkpoint\"))\n\n # axes = utils.make_training_plot()\n\n # Evaluate the randomly initialized model\n # evaluate_epoch(\n # axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats\n # )\n # loss = criterion()\n\n # initial val loss for early stopping\n # prev_val_loss = stats[0][1]\n\n running_va_loss = []\n running_va_acc = []\n running_tr_loss = []\n running_tr_acc = []\n # TODO: define patience for early stopping\n # patience = 1\n # curr_patience = 0\n #\n tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)\n acc, loss = util.evaluate_model(model, va_loader, device)\n running_va_acc.append(acc)\n running_va_loss.append(loss)\n running_tr_acc.append(tr_acc)\n running_tr_loss.append(tr_loss)\n\n # Loop over the entire dataset multiple times\n # for epoch in range(start_epoch, config('cnn.num_epochs')):\n epoch = start_epoch\n # while curr_patience < patience:\n while epoch < number_of_epoches:\n # Train model\n util.train_epoch(tr_loader, model, criterion, optimizer)\n tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)\n va_acc, va_loss = util.evaluate_model(model, va_loader, device)\n running_va_acc.append(va_acc)\n running_va_loss.append(va_loss)\n running_tr_acc.append(tr_acc)\n running_tr_loss.append(tr_loss)\n # Evaluate model\n # evaluate_epoch(\n # axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats\n # )\n\n # Save model parameters\n util.save_checkpoint(model, epoch + 1, util.config(\"unet.checkpoint\"), stats)\n\n # update early stopping parameters\n \"\"\"\n curr_patience, prev_val_loss = early_stopping(\n stats, curr_patience, prev_val_loss\n )\n \"\"\"\n\n epoch += 1\n print(\"Finished Training\")\n # Save figure and keep plot open\n # utils.save_training_plot()\n # utils.hold_training_plot()\n util.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":".history/train_20210418211518.py","file_name":"train_20210418211518.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"414481840","text":"from .transform.base import TransformNoDataIndependentRandomness, Pipe\nimport os\n\n\nclass Pipe_GetWorkingDir(Pipe):\n KEY_OUT_working_dir = 'working_dir'\n def before_keys_assertions(self, **data):\n pass\n def after_keys_assertions(self, **data):\n assert len(data.keys())==1\n self.static_assertions__working_dir(data[self.KEY_OUT_working_dir])\n @classmethod\n def static_assertions__working_dir(cls, working_dir):\n assert os.path.isdir(working_dir), working_dir\n\n\nclass GetWorkingDir(TransformNoDataIndependentRandomness):\n PIPE_DEFAULT = Pipe_GetWorkingDir\n\n def _init_(self, **config):\n self.pardir = os.path.dirname(__file__)\n assert os.path.isdir(self.pardir), self.pardir\n\n self.working_dname = 'working_dir'\n\n def transform(self):\n working_dir = os.path.join(self.pardir, self.working_dname)\n\n if not os.path.isdir(working_dir):\n os.mkdir(working_dir)\n return {\n self.PIPE_DEFAULT.KEY_OUT_working_dir: working_dir,\n }\n\n\nif __name__ == '__main__':\n tp = GetWorkingDir(**{})\n out = tp(**{})\n print(out)\n","sub_path":"mscdiploma/mappings.py","file_name":"mappings.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54598079","text":"x = 0\ny = 400\nradius = 25\n\ndef setup():\n size(800, 800)\n\ndef draw():\n global x\n \n background(255)\n fill(0)\n \n #moving across screen\n #if x < width:\n # x = x + 5\n \n x = mouseX\n \n for yPos in range(0, height, 75):\n ellipse(x, yPos, radius*2, radius*2)","sub_path":"forLoopToDrawMultiples/forLoopToDrawMultiples.pyde","file_name":"forLoopToDrawMultiples.pyde","file_ext":"pyde","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181663375","text":"import pytest\nimport logging\n\n\nfrom ocs_ci.ocs.constants import OPENSHIFT_IMAGE_REGISTRY_NAMESPACE\nfrom ocs_ci.ocs.exceptions import UnexpectedBehaviour\nfrom ocs_ci.ocs.registry import (\n validate_registry_pod_status,\n image_pull, image_push, image_list_all, image_rm,\n check_image_exists_in_registry\n)\nfrom ocs_ci.framework.testlib import E2ETest, workloads\nfrom ocs_ci.utility.svt import svt_setup, svt_cleanup\nfrom tests import disruption_helpers\nfrom tests.sanity_helpers import Sanity\n\nlog = logging.getLogger(__name__)\nIMAGE_URL = 'docker.io/library/busybox'\n\n\n@workloads\nclass TestRegistryPodRespin(E2ETest):\n \"\"\"\n Test to run svt workload for pushing\n images to registry and with Ceph pods respin\n \"\"\"\n\n @pytest.fixture(autouse=True)\n def init_sanity(self):\n \"\"\"\n Initialize Sanity instance\n\n \"\"\"\n self.sanity_helpers = Sanity()\n\n @pytest.fixture(autouse=True)\n def teardown(self, request):\n \"\"\"\n Clean up svt\n\n \"\"\"\n self.image_path = None\n\n def finalizer():\n log.info(\"Remove image from registry\")\n image_rm(registry_path=self.image_path, image_url=IMAGE_URL)\n log.info(\"Calling svt cleanup\")\n assert svt_cleanup(), \"Failed to cleanup svt\"\n request.addfinalizer(finalizer)\n\n @pytest.mark.parametrize(\n argnames=[\n \"pod_name\", \"iterations\"\n ],\n argvalues=[\n pytest.param(\n *['mon', 5], marks=pytest.mark.polarion_id(\"OCS-1797\")\n ),\n pytest.param(\n *['osd', 5], marks=pytest.mark.polarion_id(\"OCS-1798\")\n ),\n pytest.param(\n *['mgr', 5], marks=pytest.mark.polarion_id(\"OCS-1799\")\n ),\n pytest.param(\n *['mds', 5], marks=pytest.mark.polarion_id(\"OCS-1790\")\n )\n ]\n )\n def test_registry_respin_pod(self, pod_name, iterations):\n \"\"\"\n Test registry workload when backed by OCS respin of ceph pods\n \"\"\"\n\n # Respin relevant pod\n log.info(f\"Respin Ceph pod {pod_name}\")\n disruption = disruption_helpers.Disruptions()\n disruption.set_resource(resource=f'{pod_name}')\n disruption.delete_resource()\n\n # Start SVT workload for pushing images to registry\n svt_setup(iterations=iterations)\n\n # Image pull and push to registry\n image_pull(image_url=IMAGE_URL)\n self.image_path = image_push(\n image_url=IMAGE_URL, namespace=OPENSHIFT_IMAGE_REGISTRY_NAMESPACE\n )\n\n # List the images in registry\n img_list = image_list_all()\n log.info(f\"Image list {img_list}\")\n\n # Check either image present in registry or not\n validate = check_image_exists_in_registry(image_url=IMAGE_URL)\n if not validate:\n raise UnexpectedBehaviour(\"Image URL not present in registry\")\n\n # Validate image registry pods\n validate_registry_pod_status()\n\n # Validate cluster health ok and all pods are running\n self.sanity_helpers.health_check()\n","sub_path":"tests/e2e/registry/test_registry_pod_respin.py","file_name":"test_registry_pod_respin.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255398035","text":"import os\nfrom time import sleep\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\n\n\ndef find_dim0(x):\n dim0=np.size(x,axis=0)\n\n return dim0\n\n\ndef find_dim1(x):\n dim1=np.size(x,axis=1)\n return dim1\n\n# Remove unlabeled data\n# wafermap = pd.read_pickle(\"./LSWMD.pkl\")\n# print(wafermap.info())\n# wafermap['failureNum'] = wafermap.failureType\n# wafermap['trainTestNum'] = wafermap.trianTestLabel\n# mapping_type = {'Center':0,'Donut':1,'Edge-Loc':2,'Edge-Ring':3,'Loc':4,'Random':5,'Scratch':6,'Near-full':7,'none':8}\n# mapping_traintest = {'Training':0,'Test':1}\n# wafermap = wafermap.replace({'failureNum':mapping_type, 'trainTestNum':mapping_traintest})\n# df_withlabel = wafermap[(wafermap['failureNum']>=0) & (wafermap['failureNum']<=8)]\n# df_withlabel = df_withlabel.reset_index()\n# df_withlabel = df_withlabel.drop(columns=['dieSize', 'lotName', 'waferIndex'])\n# df_withlabel.to_pickle(\"wafermap_withlabel.pkl\")\n\n# Remove images that are too small\n# wafermap_withlabel = pd.read_pickle(\"./wafermap_withlabel.pkl\")\n# wafermap_withlabel = wafermap_withlabel.drop(columns=['index'])\n# print(wafermap_withlabel.info())\n# wafermap_withlabel['waferMapDim0']=wafermap_withlabel.waferMap.apply(find_dim0)\n# wafermap_withlabel['waferMapDim1']=wafermap_withlabel.waferMap.apply(find_dim1)\n# sub_df = wafermap_withlabel.loc[wafermap_withlabel['waferMapDim0'] >= 26]\n# sub_df = wafermap_withlabel.loc[wafermap_withlabel['waferMapDim1'] >= 26]\n# sub_df = sub_df.reset_index()\n# print(sub_df.info())\n# sub_df.to_pickle(\"wafermap_withlabel_big.pkl\")\n\n# Convert data into pictures\ndf = pd.read_pickle(\"./wafermap_withlabel_big.pkl\")\n# df = df.drop(columns=['index'])\nprint(df.info())\ndf_withlabel =df.reset_index()\n# df_withlabel = df[(df['failureNum']>=0) & (df['failureNum']<=8)]\n# df_withlabel =df_withlabel.reset_index()\n# df_withpattern = df[(df['failureNum']>=0) & (df['failureNum']<=7)]\n# df_withpattern = df_withpattern.reset_index()\n# df_nonpattern = df[(df['failureNum']==8)]\n# print(df_withlabel.shape[0], df_withpattern.shape[0], df_nonpattern.shape[0])\nclass_text_num = {'Center': 0, 'Donut': 0, 'Edge-Loc': 0, 'Edge-Ring': 0, 'Loc': 0, 'Random': 0, 'Scratch': 0,\n 'Near-full': 0, 'none': 0}\nclass_train_num = {'Center': 0, 'Donut': 0, 'Edge-Loc': 0, 'Edge-Ring': 0, 'Loc': 0, 'Random': 0, 'Scratch': 0,\n 'Near-full': 0, 'none': 0}\n\nfor index, row in df_withlabel.iterrows():\n usage = str(row['trianTestLabel'][0][0])\n img_dir = str(row['failureType'][0][0])\n if not os.path.isdir(usage+'/'+img_dir):\n os.makedirs(usage+'/'+img_dir)\n # if usage == 'Test':\n # img = Image.fromarray(row['waferMap'] * 127).resize((28, 28)).convert('L')\n # img.save('./'+usage+'/'+img_dir+'/'+str(class_text_num[img_dir]) + '.png')\n # class_text_num[img_dir] += 1\n # elif usage == 'Training':\n # img = Image.fromarray(row['waferMap'] * 127).resize((28, 28)).convert('L')\n # img.save('./'+usage+'/'+img_dir+'/'+str(class_train_num[img_dir]) + '.png')\n # class_train_num[img_dir] += 1\n\n if usage == 'Test':\n color = np.array([(row['waferMap'] == 2) * 240 + (row['waferMap'] == 1) * 94, (row['waferMap'] == 2) * 168 +\n (row['waferMap'] == 1) * 204, (row['waferMap'] == 2) * 60 + (row['waferMap'] == 1) * 204])\n color = np.swapaxes(color, 2, 0)\n img = Image.fromarray(color.astype(np.uint8), 'RGB').resize((28, 28))\n img.save('./'+usage+'/'+img_dir+'/'+str(class_text_num[img_dir]) + '.png')\n class_text_num[img_dir] += 1\n elif usage == 'Training':\n color = np.array([(row['waferMap'] == 2)*240 + (row['waferMap'] == 1)*94, (row['waferMap'] == 2)*168 +\n (row['waferMap'] == 1)*204, (row['waferMap'] == 2)*60 + (row['waferMap'] == 1)*204])\n color = np.swapaxes(color, 2, 0)\n img = Image.fromarray(color.astype(np.uint8), 'RGB').resize((28, 28))\n img.save('./'+usage+'/'+img_dir+'/'+str(class_train_num[img_dir]) + '.png')\n class_train_num[img_dir] += 1\n\n","sub_path":"Data-Driven E2E Latency Estimation/1.Latency Sample Generation/1)Profiling and Modeling/(2)Services/b.Defect Detection/Train/1.auto-cnn/forauto-cnnTrain.py","file_name":"forauto-cnnTrain.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127752294","text":"import pytest\nfrom brownie import FundMe, network, accounts, exceptions\nfrom scripts.helpers import LOCAL_BLOCKCHAINS_ENVIRONMENTS, get_account\nfrom scripts.deploy import deploy\n\n\ndef test_can_fund_and_withdraw():\n account = get_account()\n fund_me = deploy()\n entrance_fee = fund_me.getEntranceFee() + 100\n tx = fund_me.fund({\n 'from': account,\n 'value': entrance_fee,\n })\n tx.wait(1)\n assert fund_me.addressToAmountFunded(account.address) == entrance_fee\n tx = fund_me.withdraw({'from': account})\n tx.wait(1)\n assert fund_me.addressToAmountFunded(account.address) == 0\n\n\ndef test_only_owner_can_withdraw():\n if network.show_active() not in LOCAL_BLOCKCHAINS_ENVIRONMENTS:\n pytest.skip('only for local testing')\n return\n fund_me = deploy()\n bad_actor = accounts.add()\n with pytest.raises(exceptions.VirtualMachineError):\n fund_me.withdraw({'from': bad_actor})\n","sub_path":"tests/test_fund_me.py","file_name":"test_fund_me.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605410170","text":"'''\n****************************************************************************\n* Program lessson_5.py *\n* Author Myles Wilkerson *\n* Date March 31, 2021 *\n* Source Realpython https://realpython.com/python-sql-libraries/#sqlite *\n* Description: *\n* This program is used to introduce Geniuses to using a *\n* database Structured Query Language (SQL). The program *\n* imports the sqlite3 module which allows you to create *\n* and interact with an SQL Database *\n* *\n* - The create_connection function is passed the *\n* path of the SQLite database file then it connects *\n* the app to an exixting SQLite3 database named hgp_pods *\n* or if it;s not present it creates the database file *\n* *\n* - The execute_query function is passed the path and the *\n* query to implement; create_staff_member_table query and *\n* add_staff_member query *\n* *\n* - The execute_read function is passed the path and *\n* the display_staff_member query *\n****************************************************************************\n\n'''\n\nimport sqlite3\nfrom sqlite3 import Error\n\n############### Function Definitions *******************\ndef create_connection(path):\n connection = None\n try:\n connection = sqlite3.connect(path)\n print(\"Connection to SQLite DB successful\")\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n return connection\n\ndef execute_query(connection, query): #Input Query, only inputs database\n cursor = connection.cursor()\n try: #Tries cursor\n cursor.execute(query)\n connection.commit()\n print(\"Query executed successfully\") #If try is successful, green text is printed\n except Error as e: #If try is unseccessful, error is run and green text is pritned\n print(f\"The error '{e}' occurred\")\n\n\ndef execute_read_query(connection, query): #Telling database to do soemthing but is excepting somethingin return\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except Error as e:\n print(f\"The error '{e}' occurred\")\n\n\n################### Connect/Create to the Sqlite3 Database File *********************\nconnection = create_connection(\"/Users/thehiddengeniusproject/Hodari/assignment_27/oak8_pods.sqlite5\")\n\n\n########################## Create staff table variable query ################\ncreate_staff_member_table_query = \"\"\"\nCREATE TABLE IF NOT EXISTS staff (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n cell TEXT NOT NULL,\n position TEXT NOT NULL\n);\n\"\"\"\n\n\n#lines above creates table\n\n#################### Executive query to create staff table #################\nexecute_query(connection, create_staff_member_table_query) #inputs soemthing into databse and doesn't expect return result\n\ncreate_pod_leader_table_query = \"\"\"\nCREATE TABLE IF NOT EXISTS leader (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n cell TEXT NOT NULL,\n position TEXT NOT NULL\n);\n\"\"\"\n\n\nexecute_query(connection, create_pod_leader_table_query)\n\ncreate_pod_member_table_query = \"\"\"\nCREATE TABLE IF NOT EXISTS member (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n cell TEXT NOT NULL,\n position TEXT NOT NULL\n);\n\"\"\"\n\n\nexecute_query(connection, create_pod_member_table_query)\n\n################# Create insert query to add staff members to staff table #######\nadd_staff_members_query = \"\"\"\nINSERT INTO\n staff (name,cell,position)\nVALUES\n ('Baba','510.205.0980','Senior Innovation Educator'),\n ('Brandon','111.111.1111', 'Executive Director'),\n ('Hodari','(510) 435-2594','Curriculum Lead'),\n ('Akeeem','(415) 684-0505','Programs Director');\n\"\"\"\n\n#lines above adds more things to table\n\n#################### Execute insert staff members query ##################\nexecute_query(connection, add_staff_members_query)\n\nadd_pod_leader_query = \"\"\"\nINSERT INTO\n leader (name, cell, position)\nVALUES\n ('Richard','111.111.1111','Pod Leader'),\n ('Andrew','111.111.1111','Pod Leader'),\n ('Jacore','111.111.1111','Pod Leader'),\n ('Aris','111.111.1111','Pod Leader'),\n ('Gabriel','111.111.1111','Pod Leader');\n\"\"\"\n\nexecute_query(connection, add_pod_leader_query)\n\nadd_pod_member_query = \"\"\"\nINSERT INTO\n member (name, cell, position)\nVALUES\n ('Myles','111.111.1111','Pod Member'),\n ('Kymari','111.111.1111','Pod Member'),\n ('Gaelan','111.111.1111','Pod Member'),\n ('David','111.111.1111','Pod Member'),\n ('Emmanuel','111.111.1111','Pod Member'),\n ('Josiah','111.111.1111','Pod Member'),\n ('Glenn','111.111.1111','Pod Member'),\n ('Hyab','111.111.1111','Pod Member'),\n ('Maurice','111.111.1111','Pod Member'),\n ('Milan','111.111.1111','Pod Member'),\n ('Morris','111.111.1111','Pod Member'),\n ('Moussa','111.111.1111','Pod Member'),\n ('Malick','111.111.1111','Pod Member'),\n ('Prince','111.111.1111','Pod Member'),\n ('Ronin','111.111.1111','Pod Member');\n\"\"\"\n\nexecute_query(connection, add_pod_member_query)\n\n########################### Display staff_member Query #####################\ndisplay_staff_query = \"SELECT * from staff\" #This line and next line is returning list as tuple, or unchanging list\nstaff = execute_read_query(connection, display_staff_query) #executes read query is given by a list,\n #and it is being looped and printing out each\n #row, or string\n\ndisplay_leader_query = \"SELECT * from leader\"\nleader = execute_read_query(connection, display_leader_query)\n\n\ndisplay_member_query = \"SELECT * from member\"\nmember = execute_read_query(connection, display_member_query)\n\nfor user in staff: #looping through results and returns in row as print statement/prints each row\n print(user)\n\nprint(\"\\n\")\n\nfor user in leader:\n print(user)\n\nprint(\"\\n\")\n\nfor user in member:\n print(user)\n\nprint(\"\\n\")\n\nexecute_query(connection,'drop table staff') #drops all of the rows from table, essentially deleting the table\nexecute_query(connection,'drop table leader')\nexecute_query(connection,'drop table member')\n","sub_path":"assignment_27/lesson_5.py","file_name":"lesson_5.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354176113","text":"from sqlalchemy import Column, Integer, String, orm, ForeignKey\nfrom sqlalchemy.orm import relationship\n\nfrom Model.util import log_message, change_file_time\nfrom Model.EdgeModel.SQLite.base import (\n BaseSession,\n BaseSQLiteClass,\n BaseSQliteHandler,\n BaseAttribute,\n OTHER,\n DT_MICRO,\n DT_MILLI_ZEROED_MICRO,\n DT_WEBKIT,\n DT_STRING\n)\n\nID = \"ID\"\nURL = \"Url\"\nTITLE = \"Name\"\nLASTVISITED = \"Zuletzt besucht\"\nLASTVISITEDNONE = \"Zuletzt besucht (Null)\"\nVISITED = \"Besucht am\"\nADDEDAT = \"Hinzugefügt am\"\nLASTMODIFIED = \"Geändert am\"\nCONTENT = \"Inhalt\"\nFILE = \"Datei\"\nSTARTTIME = \"Startzeit\"\nENDTIME = \"Endzeit\"\n\n\nclass Urls(BaseSession, BaseSQLiteClass):\n __tablename__ = \"urls\"\n\n id = Column(\"id\", Integer, primary_key=True)\n url = Column(\"url\", String)\n title = Column(\"title\", String)\n last_visited_timestamp = Column(\"last_visit_time\", Integer) # Webkit\n\n\nclass Visits(BaseSession, BaseSQLiteClass):\n __tablename__ = \"visits\"\n\n id = Column(\"id\", Integer, primary_key=True)\n url_id = Column(\"url\", Integer, ForeignKey(\"urls.id\"))\n from_visit = Column(\"from_visit\", Integer)\n visit_timestamp = Column(\"visit_time\", Integer) # Webkit\n place = relationship(\"Urls\")\n\n @orm.reconstructor\n def init(self):\n self.is_date_changed = False\n self.attr_list = []\n self.attr_list.append(BaseAttribute(URL, OTHER, self.place.url))\n self.attr_list.append(BaseAttribute(TITLE, OTHER, self.place.title))\n self.attr_list.append(BaseAttribute(VISITED, DT_WEBKIT, self.visit_timestamp))\n self.attr_list.append(\n BaseAttribute(LASTVISITED, DT_WEBKIT, self.place.last_visited_timestamp)\n )\n \n \n def reload_attributes(self):\n self.attr_list = []\n self.attr_list.append(BaseAttribute(URL, OTHER, self.place.url))\n self.attr_list.append(BaseAttribute(TITLE, OTHER, self.place.title))\n self.attr_list.append(BaseAttribute(VISITED, DT_WEBKIT, self.visit_timestamp))\n self.attr_list.append(\n BaseAttribute(LASTVISITED, DT_WEBKIT, self.place.last_visited_timestamp)\n )\n \n\n def update(self, delta):\n if not delta:\n log_message(\"Kein Delta erhalten in History\", \"error\")\n return\n visited_safe = self.visit_timestamp\n last_visited_safe = self.place.last_visited_timestamp\n for attr in self.attr_list:\n if attr.name == LASTVISITED:\n is_bigger, addi_delta = attr.check_new_bigger(self.attr_list[2].value, delta)\n if visited_safe == last_visited_safe or is_bigger:\n try:\n if is_bigger:\n attr.change_date(addi_delta-delta)\n else:\n attr.change_date(delta)\n attr.date_to_timestamp()\n self.place.last_visited_timestamp = attr.timestamp\n except:\n log_message(\"Fehler bei Update in Visits/History für \" + attr.name, \"error\")\n continue\n self.is_date_changed = True\n elif attr.name == VISITED:\n try:\n attr.change_date(delta)\n attr.date_to_timestamp()\n self.visit_timestamp = attr.timestamp\n except:\n log_message(\"Fehler bei Update in Visits/History für \" + attr.name, \"error\")\n continue\n self.is_date_changed = True\n\n\nclass Download(BaseSession, BaseSQLiteClass):\n __tablename__ = \"downloads\"\n\n id = Column(\"id\", Integer, primary_key=True)\n target_path = Column(\"target_path\", String)\n start_time = Column(\"start_time\", Integer) #Webkit\n end_time = Column(\"end_time\", Integer) #Webit\n last_modified = Column(\"last_modified\", String) #Tue, 26 Jan 2021 13:11:34 GMT\n referrer = Column(\"referrer\", String)\n\n @orm.reconstructor\n def init(self):\n self.is_date_changed = False\n self.attr_list = []\n self.attr_list.append(BaseAttribute(FILE, OTHER, self.target_path))\n self.attr_list.append(BaseAttribute(URL, OTHER, self.referrer))\n self.attr_list.append(BaseAttribute(STARTTIME, DT_WEBKIT, self.start_time))\n self.attr_list.append(BaseAttribute(ENDTIME, DT_WEBKIT, self.end_time))\n self.attr_list.append(BaseAttribute(LASTMODIFIED, DT_STRING, self.last_modified))\n\n def update(self, delta):\n if not delta:\n log_message(\"Kein Delta erhalten in Download\", \"error\")\n return\n \n change_file_time(self.target_path, delta)\n\n for attr in self.attr_list:\n if attr.name == STARTTIME:\n try:\n attr.change_date(delta)\n attr.date_to_timestamp()\n self.start_time = attr.timestamp\n except:\n log_message(\"Fehler bei Update in Downloads für \" + attr.name, \"error\")\n continue\n self.is_date_changed = True\n if attr.name == ENDTIME:\n try:\n attr.change_date(delta)\n attr.date_to_timestamp()\n self.end_time = attr.timestamp\n except:\n log_message(\"Fehler bei Update in Downloads für \" + attr.name, \"error\")\n continue\n self.is_date_changed = True\n elif attr.name == LASTMODIFIED:\n try:\n attr.change_date(delta)\n attr.date_to_timestamp()\n self.last_modified = attr.timestamp\n except:\n log_message(\"Fehler bei Update in Downloads für \" + attr.name, \"error\")\n continue\n self.is_date_changed = True\n\n\nclass HistoryHandler(BaseSQliteHandler):\n def __init__(\n self,\n profile_path: str,\n file_name: str = \"History\",\n logging: bool = False,\n ):\n super().__init__(profile_path, file_name, logging)\n\n\nclass VisitsHandler(HistoryHandler):\n name = \"Visits\"\n\n attr_names = [ID, URL, TITLE, LASTVISITED, VISITED]\n\n def get_all_id_ordered(self):\n history = self.session.query(Visits).order_by(Visits.id).all()\n return history\n \n\n\nclass DownloadHandler(HistoryHandler):\n name = \"Downloads\"\n\n attr_names = [FILE, URL, STARTTIME, ENDTIME, LASTMODIFIED]\n\n def get_all_id_ordered(self):\n query = self.session.query(Download).order_by(Download.id)\n return query.all()\n","sub_path":"src/Model/EdgeModel/SQLite/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93727309","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nclass Fund:\n def retrievefund():\n html=urlopen(\"http://fund.chinastock.com.cn/jjpj/fundAction.do?methodCall=queryFundByParamsYh\")\n bsObj=BeautifulSoup(html,\"lxml\")\n rates=bsObj.findAll(\"td\",{\"style\":\"font-weight:bold;\"})\n return rates[0:1847]\n\nif __name__=='__main__':\n Fund.retrievefund()\n ","sub_path":"fund.py","file_name":"fund.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520750978","text":"from face_detector import FaceDetector\nfrom mark_detector import MarkDetector\n\nimport numpy as np\nimport cv2\nimport imutils\nfrom imutils.video import VideoStream \nimport time\n\ndef get_headpose(h, w, landmarks_2d, landmarks_3d, lm_2d_index):\n f = w # column size = x axis length (focal length)\n u0, v0 = w / 2, h / 2 # center of image plane\n camera_matrix = np.array(\n [[f, 0, u0],\n [0, f, v0],\n [0, 0, 1]], dtype = np.float64\n )\n \n # Assuming no lens distortion\n dist_coeffs = np.zeros((4,1)) \n\n # Filter Landmark \n coords = []\n for i in lm_2d_index:\n coords += [[landmarks_2d[i,0], landmarks_2d[i,1]]]\n landmarks_2d = np.array(coords, dtype = np.float64)\n # Find rotation, translation\n (_, rotation_vector, translation_vector) = cv2.solvePnP(landmarks_3d, landmarks_2d, camera_matrix, distCoeffs = None)\n return rotation_vector, translation_vector, camera_matrix, dist_coeffs #rvec, tvec, cm, dc\n\ndef draw_front_box(image, color, rvec, tvec, cm, dc, b = 10.0):\n\n h, w, c = image.shape\n fs = ((h + w) / 2) / 500\n ls = round(fs * 2) \n\n box = np.array([ #(horizontal, vertical, z)\n ( b, b , b), #upper left (ul)\n ( b, -b , b), #bottom left (bl)\n (-b, b , b), #upper right (ur)\n (-b, -b , b), #bottom right (br)\n ])\n\n # Draw from ul-ur > \n box_lines_seq = np.array([\n (0, 2), (2,3), (3,1), (1,0)\n ])\n\n (projected_box, _) = cv2.projectPoints(box, rvec, tvec, cm, dc)\n pbox = projected_box[:, 0]\n for p in box_lines_seq:\n p1 = pbox[p[0]].astype(int) #point 1\n p2 = pbox[p[1]].astype(int) #point 2\n p1, p2 = tuple(p1), tuple(p2)\n\n cv2.line(image, p1, p2, color, ls)\n\ndef draw_front_box_corner(image, color, rvec, tvec, cm, dc, scale = 4, b = 10.0):\n\n h, w, c = image.shape\n fs = ((h + w) / 2) / 500\n ls = round(fs * 2) \n\n box = np.array([ #(horizontal, vertical, z)\n ( b, b , b), #upper left (ul)\n ( b, -b , b), #bottom left (bl)\n (-b, b , b), #upper right (ur)\n (-b, -b , b), #bottom right (br)\n ])\n\n # Draw from ul-ur > \n box_lines_seq = np.array([\n (0, 2), (2,3), (3,1), (1,0)\n ])\n\n\n (projected_box, _) = cv2.projectPoints(box, rvec, tvec, cm, dc)\n pbox = projected_box[:, 0]\n for p in box_lines_seq:\n p1 = pbox[p[0]].astype(int) #point 1\n p2 = pbox[p[1]].astype(int) #point 2\n\n extra_len = (((p1 - p2) / scale)).astype(int)\n\n p1_3 = p1 - extra_len #point 1.3 \n p1_6 = p2 + extra_len #point 1.6\n\n p1, p1_3, p1_6, p2 = tuple(p1), tuple(p1_3), tuple(p1_6), tuple(p2)\n \n cv2.line(image, p1, p1_3, color, ls)\n cv2.line(image, p1_6, p2, color, ls)\n\n\ndef main():\n\n # Initiate Class for Face & Mark Detector\n face_detector = FaceDetector()\n mark_detector = MarkDetector()\n\n # Landmark 3D for projection and landmark 2d index of corresponding mark\n landmarks_3d = np.array([\n [ 0.000000, 0.000000, 6.763430], # 33 nose bottom edge\n [ 6.825897, 6.760612, 4.402142], # 17 left brow left corner\n [ 1.330353, 7.122144, 6.903745], # 21 left brow right corner\n [-1.330353, 7.122144, 6.903745], # 22 right brow left corner\n [-6.825897, 6.760612, 4.402142], # 26 right brow right corner\n [ 5.311432, 5.485328, 3.987654], # 36 left eye left corner\n [ 1.789930, 5.393625, 4.413414], # 39 left eye right corner\n [-1.789930, 5.393625, 4.413414], # 42 right eye left corner\n [-5.311432, 5.485328, 3.987654], # 45 right eye right corner\n [ 2.005628, 1.409845, 6.165652], # 31 nose left corner\n [-2.005628, 1.409845, 6.165652], # 35 nose right corner\n [ 2.774015, -2.080775, 5.048531], # 48 mouth left corner\n [-2.774015, -2.080775, 5.048531], # 54 mouth right corner\n [ 0.000000, -3.116408, 6.097667], # 57 mouth central bottom corner\n [ 0.000000, -7.415691, 4.070434] # 8 chin corner\n ], dtype=np.double)\n lm_2d_index = [33, 17, 21, 22, 26, 36, 39, 42, 45, 31, 35, 48, 54, 57, 8]\n\n # Define color for facebox\n color = (244, 134, 66)\n\n # Initiate Video Streaming\n vs = cv2.VideoCapture(1 + cv2.CAP_DSHOW)\n time.sleep(2.0)\n\n while True:\n _, frame = vs.read()\n frame = imutils.resize(frame, width=800, height=600)\n (H,W) = frame.shape[:2]\n\n frame = cv2.flip(frame, 1) # Flip if using Webcam\n faceboxes = face_detector.extract_square_facebox(frame)\n\n if faceboxes is not None:\n for facebox in faceboxes:\n face_img = frame[facebox[1]: facebox[3],\n facebox[0]: facebox[2]]\n\n marks = mark_detector.detect_marks(face_img)\n marks *= facebox[2] - facebox[0]\n marks[:, 0] += facebox[0]\n marks[:, 1] += facebox[1]\n\n rvec, tvec, cm, dc = get_headpose(h = H, w = W, landmarks_2d= marks, landmarks_3d= landmarks_3d, lm_2d_index = lm_2d_index)\n\n draw_front_box(frame, color, rvec, tvec, cm, dc)\n\n cv2.imshow(\"3D Face Box\", frame)\n # writer.write(frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"580791358","text":"anya = {'Секретные материалы': 'фантастика', 'Карточный домик': 'драма', 'Рик и Морти': 'фантастика'}\nolya = {'Клан Сопрано': 'криминал', '24': 'драма', 'Во все тяжкие': 'криминал', 'Карточный домик': 'драма'}\nnastya = {'Ведьмак': 'фэнтази', 'Игра престолов': 'фэнтази'}\nsveta = {'Черное зеркало': 'фантастика', 'Карточный домик': 'драма', 'Рик и Морти': 'фантастика'}\n\na = len(list(set(anya) & set(olya)))\nb = len(list(set(anya) & set(nastya)))\nc = len(list(set(anya) & set(sveta)))\n\nif a > b and a > c:\n print('У Ани с Олей больше всего любимых сериалов')\nelif b > a and b > c:\n print('У Ани с Настей больше всего любимых сериалов')\nelse:\n print('У Ани со Светой больше всего любимых сериалов')\n\n\n","sub_path":"4-th_hw_part_2.py","file_name":"4-th_hw_part_2.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253895614","text":"from __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport argparse\nimport copy\n\nplt.ion() # interactive mode\n\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport pathlib\nfrom model import Net\n\nfrom IPython import embed\n\nfrom tqdm import tqdm\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\nprint(torch.cuda.is_available())\ntorch.cuda.set_device(0)\n\nparser = argparse.ArgumentParser(description='PyTorch GAIL')\n\nparser.add_argument('--aug', '-aug', type=int, default=0)\nparser.add_argument('--k', '-k', type=int, default=1)\nparser.add_argument('--file_path', '-sfp', type=str, default='saved_mlp.pt')\nargs = parser.parse_args()\n\ndef evaluateModelOnValidationSet():\n\n\t# Create a pytorch dataset\n\tdata_dir = pathlib.Path('./data/tiny-imagenet-200')\n\t# image_count = len(list(data_dir.glob('**/*.JPEG')))\n\tCLASS_NAMES = np.array([item.name for item in (data_dir / 'train').glob('*')])\n\t# print('Discovered {} images'.format(image_count))\n\n\tassert(len(CLASS_NAMES) == 200)\n\n\t# Create the validation data generator\n\tbatch_size = 64\n\tim_height = 64\n\tim_width = 64\n\n\tnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n\t\t\t\t\t\t\t\t\t std=[0.229, 0.224, 0.225])\n\n\t# Should data augmentation be performed on the training data?\n\tif args.aug == 1:\n\t\tprint(\"Validation with Aug\")\n\t\tvalidation_data_transforms = transforms.Compose([\n\t\t\t# transforms.ColorJitter(brightness = 1, contrast = 1, saturation = 1, hue = [-0.2,0.2]),\n\t\t\t# transforms.RandomAffine(degrees = 20, translate = [0.2, 0.2], scale = None, shear = [-5,5]),\n\t\t\ttransforms.RandomGrayscale(p = 0.15),\n\t\t\ttransforms.RandomHorizontalFlip(p = 0.35),\n\t\t\ttransforms.RandomVerticalFlip(p = 0.35),\n\t\t\ttransforms.RandomRotation(degrees = 5),\n\t\t\ttransforms.RandomPerspective(p = 0.2),\n\n\n\t\t\ttransforms.ToTensor(),\n\t\t\tnormalize\n\t\t])\n\telse:\n\t\tprint(\"Validation without Aug\")\n\t\tvalidation_data_transforms = transforms.Compose([\n\t\t\ttransforms.ToTensor(),\n\t\t\tnormalize\n\t\t])\n\n\tdataPathString = './data/tiny-imagenet-200'\n\n\t# Create the validation data generator\n\tvalidation_set = torchvision.datasets.ImageFolder(dataPathString + '/val/data', validation_data_transforms)\n\tvalidation_loader = torch.utils.data.DataLoader(validation_set, batch_size = batch_size,\n\t\t\t\t\t\t\t\t\t\t shuffle = True, num_workers = 1, pin_memory = True)\n\n\t# Size of Validation Data Set\n\tvalidationDataLength = len(validation_set)\n\tassert(validationDataLength == 10000)\n\n\t# Load Model and its Weights\n\tckpt = torch.load(args.file_path)\n\tmodel = Net(200, im_height, im_width)\n\tmodel.load_state_dict(ckpt['net'])\n\tmodel = model.cuda()\n\n\t# Put the model in evaluation mode (to test on validation data)\n\tmodel.eval()\n\n\trunning_loss = 0.0\n\trunning_corrects = 0\n\t# Loop through validation batches\n\tfor idx, (inputs, targets) in enumerate(tqdm(validation_loader)):\n\n\t\tinputs = inputs.to(device)\n\t\ttargets = targets.to(device)\n\n\t\t# Run the model on the validation batch\n\t\toutputs = model(inputs)\n\n\t\t# Get validation loss and validation accuracy on this batch\n\t\tcriterion = nn.CrossEntropyLoss()\n\t\tloss = criterion(outputs, targets)\n\t\t_, preds = torch.max(outputs, 1)\n\n\t\t# Keep tracking of running statistics on validation loss and accuracy\n\n\t\tvalues,indices = outputs.topk(args.k)\n\n\t\trunning_loss += loss.item() * inputs.size(0)\n\t\tfor i in range(len(targets.data)):\n\t\t\tif targets.data[i].cpu().item() in indices[i]:\n\t\t\t\trunning_corrects += 1\n\n\tvalidationLoss = running_loss / validationDataLength\n\tvalidationAccuracy = running_corrects / validationDataLength\n\n\treturn validationLoss, validationAccuracy\n\n\nif __name__ == '__main__':\n\tvalidationLoss, validationAccuracy = evaluateModelOnValidationSet()\n\tprint(\"validationLoss is: \" + str(validationLoss))\n\tprint(\"validationAccuracy is: \" + str(validationAccuracy))\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530667459","text":"def isperfect(n):\n factors = [1]\n for i in range(2, n):\n if i * i > n:\n break\n if n%i == 0:\n factors.append(i)\n factors.append(n/i)\n return sum(factors) == n\n\ndef find(n):\n count = 0\n for i in range(2, n+1):\n if isperfect(i):\n count += 1\n return count\n\n\nwhile True:\n try:\n n = int(input())\n print(find(n))\n except:\n break","sub_path":"count_perfect.py","file_name":"count_perfect.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345327046","text":"from sklearn import model_selection, preprocessing, metrics, svm\nfrom sklearn import decomposition\nfrom scipy.special import softmax\nimport pandas as pd\nimport numpy as np\n\nfrom thundersvm import *\n\nimport json\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Training SVM with Image and Text Features')\nparser.add_argument('--normalize', type=int, default=1,\n help='0,1')\nparser.add_argument('--vfeat', type=str, default='feats',\n help='feats | logits | feats_fc6 | feats_fc7')\nparser.add_argument('--vmodel', type=str, default='resnet152',\n help='resnet50 | resnet101')\nparser.add_argument('--tmodel', type=str, default='bertbase',\n help='bertbase | bertweet | covidbert | arabert | arabicbert')\nparser.add_argument('--ttype', type=str, default='clean',\n help='clean | raw')\nparser.add_argument('--tfeat', type=str, default='sumavg',\n help='sumavg | catavg | last2 | last')\nparser.add_argument('--vtype', type=str, default='imgnet',\n help='imgnet | plc | hybrid | t4sa')\nparser.add_argument('--dset', type=str, default='clef_en',\n help='clef_en | clef_ar | med')\nparser.add_argument('--split', type=int, default=0,\n help='0-4')\nparser.add_argument('--gpu_id', type=int, default=0,\n help='0,1,2,3')\n\nargs = parser.parse_args()\n\n\ndef get_best_svm_model(feature_vector_train, label_tr, feature_vector_valid, label_vl):\n param_grid = [{'kernel':'rbf', 'C': np.logspace(-1, 1, 15),\n 'gamma': np.logspace(-2, 1, 15)}]\n\n pca_list = [1.00,0.99,0.98,0.97,0.96,0.95]\n best_acc = 0.0\n best_model = 0\n best_fsc = 0.0\n best_pca_nk = 0\n temp_xtrain = feature_vector_train\n temp_xval = feature_vector_valid\n for pca_nk in pca_list:\n print(pca_nk)\n if pca_nk != 1.0:\n pca = decomposition.PCA(n_components=pca_nk).fit(temp_xtrain)\n feature_vector_train = pca.transform(temp_xtrain)\n feature_vector_valid = pca.transform(temp_xval)\n for params in param_grid:\n for C in params['C']:\n for gamma in params['gamma']:\n # Model with different parameters\n model = SVC(C=C, gamma=gamma, kernel=params['kernel'], random_state=42, \n class_weight='balanced', gpu_id=args.gpu_id, max_iter=1000)\n\n # fit the training dataset on the classifier\n model.fit(feature_vector_train, label_tr)\n\n preds = model.predict(feature_vector_valid)\n # predict the acc on validation dataset\n acc = metrics.accuracy_score(label_vl, preds)\n fsc = metrics.f1_score(label_vl, preds, average='weighted')\n\n if round(acc,4) >= round(best_acc,4):\n best_acc = acc\n best_model = model\n best_pca_nk = pca_nk\n best_fsc = fsc\n\n return best_acc, best_fsc, best_pca_nk, best_model\n\n\n\nvmodel = args.vmodel\ntmodel = args.tmodel\nsplit = args.split\ndset = args.dset\nvfeat = args.vfeat\ntfeat = args.tfeat\nvtype = args.vtype\nttype = args.ttype\n\ndata_loc = 'data/%s/splits/'%(args.dset)\n\ntr_df = pd.read_csv(data_loc+'train_%d.txt'%(split), header=None)\nvl_df = pd.read_csv(data_loc+'val.txt', header=None)\nte_df = pd.read_csv(data_loc+'test_%d.txt'%(split), header=None)\n\ntest_idxs = np.array([idx for idx in te_df[0]])\n\nprint(dset, vtype, vmodel, vfeat, ttype, tmodel, tfeat)\n\nimg_dict = json.load(open('features/image/%s_%s_%s.json'%(dset, vtype, vmodel), 'r'))\n\nif dset != 'clef_ar':\n text_dict = json.load(open('features/text/%s_%s/%s.json'%(dset, tmodel, tfeat), 'r'))\nelse:\n text_dict = json.load(open('features/text/%s_%s_%s/%s.json'%(dset, tmodel, ttype, tfeat), 'r'))\n\n\nft_train = np.column_stack((np.array([img_dict[vfeat][str(idx)] for idx in tr_df[0]]),\n np.array([text_dict[str(idx)] for idx in tr_df[0]])))\nft_val = np.column_stack((np.array([img_dict[vfeat][str(idx)] for idx in vl_df[0]]),\n np.array([text_dict[str(idx)] for idx in vl_df[0]])))\nft_test = np.column_stack((np.array([img_dict[vfeat][str(idx)] for idx in te_df[0]]),\n np.array([text_dict[str(idx)] for idx in te_df[0]])))\n\nlab_train = tr_df[1].to_numpy()\nlab_val = vl_df[1].to_numpy()\nlab_test = te_df[1].to_numpy()\n\nif args.normalize:\n ft_train = preprocessing.normalize(ft_train, axis=1)\n ft_val = preprocessing.normalize(ft_val, axis=1)\n ft_test = preprocessing.normalize(ft_test, axis=1)\n\nprint(ft_train.shape, ft_val.shape, ft_test.shape)\n\naccuracy, f1_score, best_pca_nk, classifier = get_best_svm_model(ft_train, lab_train, ft_val, lab_val)\n\nif best_pca_nk != 1.0:\n pca = decomposition.PCA(n_components=best_pca_nk).fit(ft_train)\n ft_train = pca.transform(ft_train)\n ft_val = pca.transform(ft_val)\n ft_test = pca.transform(ft_test)\n\ntest_preds = classifier.predict(ft_test)\nval_preds = classifier.predict(ft_val)\ntrain_preds = classifier.predict(ft_train)\n\nprint(\"SVM %s, Split-%d, %s-%s-%s-%s-%s\"%(dset, split, vtype, vmodel, vfeat, ttype, tmodel))\nprint(\"PCA No. Components: %.2f, Dim: %d, SV: %d\"%(best_pca_nk, ft_val.shape[1], len(classifier.support_)))\nprint(\"C: %.3f, Gamma: %.3f, kernel: %s\\n\"%(classifier.C, classifier.gamma, classifier.kernel))\nprint(\"Train Accuracy: %.4f, Train F1-Score: %.4f\"%(round(metrics.accuracy_score(lab_train, train_preds),4),\n round(metrics.f1_score(lab_train, train_preds, average='weighted'),4)))\n# print(metrics.confusion_matrix(lab_train, train_preds, labels=[0,1]))\nprint(\"Val Accuracy: %.4f, Val F1-Score: %.4f\"%(round(accuracy,4), round(f1_score,4)))\n# print(metrics.confusion_matrix(lab_val, val_preds, labels=[0,1]))\nprint(\"Test Accuracy: %.4f, Test F1-Score: %.4f\"%(\n round(metrics.accuracy_score(lab_test, test_preds),4),\n round(metrics.f1_score(lab_test, test_preds, average='weighted'),4)))\n# print(metrics.confusion_matrix(lab_test, test_preds, labels=[0,1]))\nprint('\\n')","sub_path":"svm_training/svm_imgText.py","file_name":"svm_imgText.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"385265208","text":"# -*- coding: utf-8 -*-\r\n#This file is being created to perform the entire BTM 'optimal' k estimation process\r\n#I'm beginning by copying/pasting 'determine_optK.py' into this file\r\n#Overall, this is program: 39\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport nltk, os, sys, email, re\r\nimport keras\r\nfrom sklearn.cluster import KMeans \r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport random\r\n\r\nrandom.seed(1)\r\nnp.random.seed(2)\r\n\r\n#Read in the data: Below line of code will need to be reconfigured for your filepath\r\ncompany_data = pd.read_excel('C:\\\\Users\\\\Pendl\\\\OneDrive\\\\Documents\\\\TwitterProject\\\\DoyleData\\\\Amazon_Dec_1_2020.xlsx')\r\ncompany_name = company_data.iloc[0, company_data.columns.get_loc(\"Author Name\")]\r\n\r\n#Remove retweets from the company account, as they aren't technically company account tweets\r\npatternDel = \"^RT @\"\r\nfilter1 = company_data[\"Content\"].str.contains(patternDel)\r\ncompany_tweets = company_data[~filter1].copy()\r\n#company_tweets2 = company_data[~filter1].copy()\r\n\r\n#Remove/replace 'smart' apostrophes and quotation marks with standard keyboard equivalents:\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"’\", \"'\") #replace closing smart apostrophes with regular apostrophe\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"‘\", \"'\") #replace opening smart apostrophes with regular apostrophe\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"“\", \"\\\"\") #replace opening smart quotes with regular quotes\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"”\", \"\\\"\") #replace closing smart quotes with regular quotes\r\n\r\n#Examine tweets after removing/replacing 'smart' apostrophes and quotes:\r\n#print(company_tweets[\"Content\"].head(5))\r\n\r\n#Remove apostrophes followed by 's' and replace with nothing (Disney's becomes Disney):\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"'s\", \"\")\r\n\r\n#Remove remaining apostrophes and replace with nothing (convert I'm to Im and such):\r\ncompany_tweets[\"Content\"] = company_tweets[\"Content\"].str.replace(r\"'\", \"\")\r\n\r\n#Perform standardization on the textual contents of the company's tweets:\r\n#No longer keep newline chars in text, replace double spaces with spaces, now keeping hashtag symbols themselves\r\ndef standardize_text(df, text_field):\r\n df[text_field] = df[text_field].str.replace(r\".\", \"\") #remove/replace periods w/ nothing. Should now count acronyms as one word\r\n df[text_field] = df[text_field].str.replace(r\"&\", \"and\") #replace ampersands with 'and'\r\n df[text_field] = df[text_field].str.replace(r\"http\\S+\", \"\") #remove links and replace w/ nothing\r\n df[text_field] = df[text_field].str.replace(r\"http\", \"\") #ensure all links have been removed\r\n df[text_field] = df[text_field].str.replace(r\"@\\S+\", \"\") #remove @username mentions and replace with nothing\r\n df[text_field] = df[text_field].str.replace(r\"[^A-Za-z0-9(),!?#@\\'\\`\\\"\\_]\", \" \")#Remove/replace anything that's not capital/lowercase letter, number, parentheses, comma, or any of the following symbols with a space\r\n df[text_field] = df[text_field].str.replace(r\"@\", \"at\") #replace any remaining '@' symbols with 'at'\r\n df[text_field] = df[text_field].str.lower() #convert all remaining text to lowercase\r\n #remove double spaces and replace with single space\r\n df[text_field] = df[text_field].str.replace(r\"\\s+\", \" \")\r\n return df\r\n\r\ntextual_tweets = standardize_text(company_tweets, \"Content\")\r\n\r\n#Examine tweets after standardization has been performed:\r\n#print(textual_tweets[\"Content\"].head(5))\r\n\r\n#Perform lemmatization on the textual contents of the tweets:\r\n##! Code for this function derived from the following link: https://www.machinelearningplus.com/nlp/lemmatization-examples-python/\r\nfrom textblob import TextBlob, Word\r\n\r\ndef lem_with_postag(df, text_field):\r\n tag_dict = {\"J\": 'a',\r\n \"N\": 'n',\r\n \"V\": 'v',\r\n \"R\": 'r'}\r\n output = []\r\n for tweet in df[text_field]:\r\n sent = TextBlob(tweet)\r\n words_and_tags = [(w, tag_dict.get(pos[0], 'n')) for w, pos in sent.tags]\r\n lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags]\r\n lemTweet = \" \".join(lemmatized_list)\r\n output.append(lemTweet)\r\n return output\r\n\r\ntextual_tweets[\"Content\"] = lem_with_postag(textual_tweets, \"Content\")\r\n#print(textual_tweets[\"Content\"].head(5))\r\n\r\n#Removing tweets that weren't originally in English\r\nEnglish_tweets = textual_tweets[textual_tweets[\"Language\"] == \"en\"]\r\n\r\n#Removing rows with no text left inside them\r\nfilter1 = English_tweets[\"Content\"] != \"\"\r\ncleanGlish_tweets = English_tweets[filter1]\r\n\r\n#Remove stop words from the data:\r\nfrom nltk.corpus import stopwords\r\nstop_words = set(stopwords.words(\"english\"))\r\n\r\n \r\n##Expand on the initial set of stopwords:\r\nstop_words2 = pd.DataFrame(stop_words)\r\nstop_words2[\"Words\"] = stop_words\r\nadd_stopwords = stop_words2[\"Words\"].str.replace(r\"'\", \"\") #replace apostrophes in initial set of stopwords with nothing\r\n\r\n#Add the newly created stopwords to the original set:\r\nfor word in add_stopwords:\r\n if word not in stop_words:\r\n stop_words.add(word)\r\n \r\n#These words need to be added manually to the set of stopwords:\r\nstop_words.add(\"wed\")\r\nstop_words.add(\"us\")\r\n#Lemmatization, for some reason, converts \"us\" to \"u\". Therefore, \"u\" should be added as a stopword as well (for lemmatized versions)\r\nstop_words.add(\"u\")\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nvectorizer = TfidfVectorizer(stop_words=stop_words)\r\n\r\n##Filter out tweets w/ less than 3 words after stop word removal:\r\ndef clean_tokenize(df, text_field, stop_set):\r\n output = []\r\n for tweet in df[text_field]:\r\n clean_toks = []\r\n for tok in tweet:\r\n if tok not in stop_set:\r\n clean_toks.append(tok)\r\n output.append(clean_toks)\r\n return output\r\n\r\n\r\nfrom nltk.tokenize import RegexpTokenizer\r\n \r\ntokenizer = RegexpTokenizer(r'\\w+')\r\n \r\ncleanGlish_tweets[\"tokens\"] = cleanGlish_tweets[\"Content\"].apply(tokenizer.tokenize)\r\ncleanGlish_tweets[\"clean_tokens\"] = clean_tokenize(cleanGlish_tweets, \"tokens\", stop_words)\r\n\r\n#Filter out tweets with less than 3 words:\r\ncleanGlish_tweets[\"num_words\"] = [len(token) for token in cleanGlish_tweets[\"clean_tokens\"]]\r\ncleanGlish_tweets2 = cleanGlish_tweets[cleanGlish_tweets[\"num_words\"] >= 3].copy()\r\n\r\n#Extract the remaining textual contents of tweets:\r\n#clean_tokens = cleanGlish_tweets2[\"clean_tokens\"]\r\n#Doesn't hurt to examine some of them:\r\nprint(cleanGlish_tweets2[\"clean_tokens\"].head(5))\r\n#print(\"Break\")\r\n\r\n#x = vectorizer.fit_transform(clean_tokens)\r\n#x = vectorizer.fit_transform(cleanGlish_tweets2[\"clean_tokens\"])\r\n#x = vectorizer.fit_transform(str(clean_tokens))\r\n#clean_tokens = [clean_tokens]\r\n#x = vectorizer.fit_transform(clean_tokens)\r\n#x = vectorizer.fit_transform(str(clean_tokens))\r\n#x = vectorizer.fit_transform(cleanGlish_tweets2[\"clean_tokens\"].str)\r\ncleanGlish_tweets2[\"clean_tokens\"] = [\" \".join(tok) for tok in cleanGlish_tweets2[\"clean_tokens\"].values]\r\nprint(cleanGlish_tweets2[\"clean_tokens\"].head(5))\r\n#print(\"Break\")\r\nclean_tweets = cleanGlish_tweets2[\"clean_tokens\"]\r\nx = vectorizer.fit_transform(clean_tweets)\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import silhouette_score\r\n\r\nsum_squared_dists = []\r\nkm_silh = []\r\n#Considering I have yet to see a best k greater than 13, I'm reducing K's range from (2, 21) to (2, 16)\r\nK = range(2, 16)\r\n\r\nfor k in K:\r\n km = KMeans(n_clusters=k, max_iter=200, n_init=10)\r\n km = km.fit(x)\r\n preds = km.predict(x)\r\n silh = silhouette_score(x, preds)\r\n sum_squared_dists.append(km.inertia_)\r\n km_silh.append(silh)\r\n \r\nplt.plot(K, sum_squared_dists, 'bx-')\r\nplt.xlabel('k')\r\nplt.ylabel('Sum of squared distances')\r\nplt.title('%s Elbow Method for Optimal k' % company_name)\r\nplt.show()\r\n#######################################################################\r\n#See if silhouette scores are better for determining optimal k\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nscaler = MinMaxScaler()\r\n#Actually, think this can all be done above as well\r\n\r\nplt.figure(figsize=(7,4))\r\nplt.title(\"%s Silhouette Scores\" % company_name)\r\nplt.scatter(x=[i for i in range(2,16)],y=km_silh,s=150,edgecolor='k')\r\nplt.grid(True)\r\nplt.xlabel(\"Number of clusters\",fontsize=6)\r\nplt.ylabel(\"Silhouette score\",fontsize=6)\r\nplt.xticks([i for i in range(2,16)],fontsize=8)\r\nplt.yticks(fontsize=8)\r\nplt.show()\r\n\r\nprint(\"\\nSilhouette scores:\")\r\nfor val in km_silh:\r\n print(val)\r\n\r\n#Function to calculate percent change in silhouette scores\r\n#Code derived from: https://stackoverflow.com/questions/30926840/how-to-check-change-between-two-values-in-percent\r\ndef get_change(current, previous):\r\n if current == previous:\r\n return 0\r\n try:\r\n return ((current - previous) / previous) * 100.0\r\n except ZeroDivisionError:\r\n return -1000\r\n\r\n#Calculate percent changes:\r\nchanges = [0]\r\nfor i in range(len(km_silh) - 1):\r\n j = i + 1\r\n change = get_change(km_silh[j], km_silh[i])\r\n changes.append(change)\r\n\r\n#Examine percent changes:\r\nprint(\"\\nPercent changes:\")\r\nfor val in changes:\r\n print(val)\r\n \r\n#Determine which k values are suitable for testing:\r\npotential_k = []\r\n\r\nfor i in range(len(changes)):\r\n if changes[i] < 1 and i != 0: #if the silhouette score decreased, or only increased by less than 1% (and it's not the first obs, which always has 0% increase)\r\n k = i + 1 # + 1 instead of 2 because we want to grab the value before the decrease (or insignificant increase)\r\n potential_k.append(k)\r\n\r\nprint(\"For %s, the k values to be tested are:\" % company_name)\r\nprint(potential_k)\r\n\r\n\r\n##############################################################################################################################################\r\n#BTM online training:\r\n\r\n#Bring in the vectorizer to be used for BTM and supply pre-defined stopwords\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nvec = CountVectorizer(stop_words=stop_words)\r\n\r\n#Vectorize the tweets:\r\nX = vec.fit_transform(cleanGlish_tweets2[\"Content\"]).toarray()\r\n\r\n#Get the vocabulary and the biterms from the tweets:\r\nfrom biterm.utility import vec_to_biterms, topic_summuary\r\n\r\nvocab = np.array(vec.get_feature_names())\r\nbiterms = vec_to_biterms(X)\r\n\r\n#Create a BTM and pass the biterms to train it, per k value in potential_k:\r\nfrom biterm.btm import oBTM\r\nimport time\r\nbest_k = []\r\nbest_coherence = []\r\n\r\n#Function to perform online BTM training\r\ndef speedyBTM(num_top, vocabulary, b_terms):\r\n btm = oBTM(num_topics=num_top, V=vocabulary) #create the btm object\r\n start_time = time.time()\r\n for i in range(0, len(b_terms), 100): #process chunks of 200 texts\r\n biterms_chunk = biterms[i:i + 100]\r\n btm.fit(biterms_chunk, iterations=50)\r\n topics = btm.transform(biterms)\r\n end_time = time.time()\r\n run_time = end_time - start_time\r\n print(\"For k = %s topics..\" % num_top)\r\n print(\"BTM online took %s seconds to train\" % run_time)\r\n #Examine topic coherence scores:\r\n print(\"\\nTopic Coherence:\")\r\n topic_summuary(btm.phi_wz.T, X, vocab, 10) \r\n\r\n\r\ntotal_start = time.time()\r\n#Train a BTM model on each potential k:\r\nfor val in potential_k:\r\n speedyBTM(val, vocab, biterms)\r\n \r\ntotal_end = time.time()\r\ntotal_time = total_end - total_start\r\nprint(\"For %s, total BTM estimation run-time was %s\" % (company_name, total_time))\r\n#Unfortunately, I don't know how to save average topic coherence within speedyBTM\r\n#Results will have to be inspected manually to determine which value of k produced the best average topic coherence\r\n","sub_path":"estimate_BTM.py","file_name":"estimate_BTM.py","file_ext":"py","file_size_in_byte":11774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75918070","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport xlrd\nimport pymysql\n\nconn = pymysql . connect ( host = '10.55.23.168' ,port=33062 , user = 'root' , passwd = \"1234\" , db = 'rma' ) \ncur = conn . cursor () \n\n# 20210113 于真\n#path = 'D:/RMA需求/COPQ/累計資料/2021 月圖 美金_公版_0113.xlsx'\npath = 'D:/RMA需求/COPQ/累計資料/2021 月圖 美金_公版_0312.xlsx'\n\ndata = xlrd.open_workbook(path)\nsheet_raw = data.sheet_by_name('實績版')\nprint(\" row num:\", sheet_raw.nrows,\",col num:\", sheet_raw.ncols)\n\n# 從execl取得最後有值的月,訂為當月=>yearmonthLog\nyearmonthLog = 202101 \n#yearmonthLog = '0'\nfor row in range(1, sheet_raw.nrows):\n yearmonth = str(sheet_raw.cell(row,0).value).replace('.0','')\n \n copq = sheet_raw.cell(row,6).value\n #print(yearmonth,' ',yearmonthLog,' copq:',copq)\n if copq!='' and copq > 0 and yearmonth not in ('2021改善前','2020實績') and int(yearmonth) > int(yearmonthLog) :\n yearmonthLog = yearmonth\nprint('yearmonthLog:',yearmonthLog)\n\nif sheet_raw.nrows > 0 :\n #cur.execute(\"delete from rma.copq_accumulation_actachievement_w13 where yearmonthLog=%s\",(yearmonthLog)) \n #cur.execute(\"delete from rma.copq_accumulation_actachievement_2021\") \n cur.execute(\"delete from rma.copq_accumulation_actachievement_2021 where yearmonthLog=%s\",(yearmonthLog)) \n for row in range(1, sheet_raw.nrows): \n yearmonth = str(sheet_raw.cell(row,0).value).replace('.0','')\n #if yearmonth == '2019實績' : \n # continue;\n app = sheet_raw.cell(row,2).value\n area = sheet_raw.cell(row,3).value\n m2copq_target = sheet_raw.cell(row,4).value\n copq_target = sheet_raw.cell(row,5).value\n copq = sheet_raw.cell(row,6).value\n m2copq = sheet_raw.cell(row,7).value\n provision = 0 if sheet_raw.cell(row,8).value == '' else sheet_raw.cell(row,8).value\n purge = 0 if sheet_raw.cell(row,9).value == '' else sheet_raw.cell(row,9).value \n mcr = 0 if sheet_raw.cell(row,10).value == '' else sheet_raw.cell(row,10).value \n #print('yearmonth:',yearmonth,' app:',app,',area:',area,',m2copq_target:',m2copq_target,',copq_target:',copq_target,',copq:',copq,',m2copq:',m2copq,',provision:',provision,',purge:',purge)\n print('yearmonth:',yearmonth,',provision:',provision,',purge:',purge,',mcr:',mcr)\n cur.execute(\"insert ignore into rma.copq_accumulation_actachievement_2021(yearmonthLog,yearmonth,app,area,m2copq_target,copq_target,copq,m2copq,provision,`purge`,mcr)values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(yearmonthLog,yearmonth,app,area,m2copq_target,copq_target,copq,m2copq,provision,purge,mcr))\ncur.execute(\"commit\") \n","sub_path":"python/m2copq_accumulation_2021.py","file_name":"m2copq_accumulation_2021.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315792284","text":"from django.conf.urls import patterns, include, url\n#from django.contrib import admin\nfrom settings import STATIC_ROOT,MEDIA_ROOT\nfrom views import login, logout, index\n\nurlpatterns = patterns('',\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': STATIC_ROOT}),\n url(r'^file/(?P.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT}),\n \n url(r'^$', login),\n# url(r'^$', index),\n url(r'^login/$', login, name='login'),\n url(r'^logout/$', logout, name='logout'),\n url(r'^index/$', index, name='index'),\n \n url(r'^product/', include('apps.product.urls')),\n \n)\n","sub_path":"announcement/announcement/announcement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237222168","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity \n self.storage = [None] * capacity\n self.pointer = 0\n\n def append(self, item):\n # if there is no capacity\n if self.capacity == 0:\n return\n\n # add initial items\n self.storage[self.pointer] = item\n\n # check th e length of list\n if len(self.storage) is self.capacity:\n self.storage[self.pointer] = item\n\n # increment the pointer \n self.pointer = (self.pointer + 1) % self.capacity\n \n\n def get(self):\n true_values = [i for i in self.storage if i != None ]\n return true_values","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186140362","text":"\"\"\"Module for handlinkg symlinks on windows\"\"\"\n\nimport ctypes.wintypes\nimport errno\nimport os\nimport struct\n\nFSCTL_GET_REPARSE_POINT = 0x900a8\nFILE_ATTRIBUTE_REPARSE_POINT = 0x0400\nGENERIC_READ = 0x80000000\nOPEN_EXISTING = 3\nINVALID_HANDLE_VALUE = ctypes.wintypes.HANDLE(-1).value\nINVALID_FILE_ATTRIBUTES = 0xFFFFFFFF\nFILE_FLAG_REPARSE_BACKUP = 0x2200000\n\n\ndef _errcheck_link(value, func, args): # pylint: disable=W0613\n \"\"\"Checks CreateSymbolicLinkW and CreateHardLinkW result\"\"\"\n # The windows api returns nonzero if the call was successful\n if value != 0:\n return\n\n last_error = ctypes.windll.kernel32.GetLastError()\n # Somehow CreateSymbolicLinkW and CreateHardLinkW retuns zero\n # and the last error is 2 (The system cannot find the file specified)\n # but the link is created successfuly\n # it seems like a bug in the WinAPI\n if last_error == 0 or last_error == 2:\n return\n if last_error == 183:\n raise OSError(errno.EEXIST,\n \"Cannot create a file when that file already exists\",\n args[0])\n\n\n# pylint: disable=C0103\nDeviceIoControl = ctypes.windll.kernel32.DeviceIoControl\nDeviceIoControl.argtypes = [\n ctypes.wintypes.HANDLE, # HANDLE hDevice\n ctypes.wintypes.DWORD, # DWORD dwIoControlCode\n ctypes.wintypes.LPVOID, # LPVOID lpInBuffer\n ctypes.wintypes.DWORD, # DWORD nInBufferSize\n ctypes.wintypes.LPVOID, # LPVOID lpOutBuffer\n ctypes.wintypes.DWORD, # DWORD nOutBufferSize\n ctypes.POINTER(ctypes.wintypes.DWORD), # LPDWORD lpBytesReturned\n ctypes.wintypes.LPVOID # LPOVERLAPPED lpOverlapped\n]\nDeviceIoControl.restype = ctypes.wintypes.BOOL\n\n# pylint: disable=C0103\nCreateSymbolicLinkW = ctypes.windll.kernel32.CreateSymbolicLinkW\nCreateSymbolicLinkW.argtypes = [\n ctypes.c_wchar_p, # LPTSTR lpSymlinkFileName\n ctypes.c_wchar_p, # LPTSTR lpTargetFileName\n ctypes.c_uint32 # DWORD dwFlags\n]\n\nCreateSymbolicLinkW.restype = ctypes.wintypes.BOOLEAN\nCreateSymbolicLinkW.errcheck = _errcheck_link\n\n# pylint: disable=C0103\nCreateHardLinkW = ctypes.windll.kernel32.CreateHardLinkW\nCreateHardLinkW.argtypes = [\n ctypes.c_wchar_p, # LPCTSTR lpFileName\n ctypes.c_wchar_p, # LPCTSTR lpExistingFileName\n ctypes.c_void_p # LPSECURITY_ATTRIBUTES lpSecurityAttributes\n]\nCreateHardLinkW.restype = ctypes.wintypes.BOOL\nCreateHardLinkW.errcheck = _errcheck_link\n\n\ndef _islink(path):\n \"\"\"Gets whether the specified path is symlink\"\"\"\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0\n\n\ndef device_io_control(hDevice, ioControlCode, input_buffer, output_buffer):\n \"\"\"Sends a control code directly to a specified device driver,\n causing the corresponding device to perform the corresponding operation\"\"\"\n if input_buffer:\n input_size = len(input_buffer)\n else:\n input_size = 0\n\n if isinstance(output_buffer, int):\n output_buffer = ctypes.create_string_buffer(output_buffer)\n\n output_size = len(output_buffer)\n assert isinstance(output_buffer, ctypes.Array)\n bytesReturned = ctypes.wintypes.DWORD()\n\n status = DeviceIoControl(hDevice, ioControlCode, input_buffer, input_size,\n output_buffer, output_size, bytesReturned, None)\n\n if status != 0:\n return output_buffer[:bytesReturned.value]\n else:\n return None\n\n\ndef _readlink(path):\n \"\"\" Windows readlink implementation. \"\"\"\n is_unicode = isinstance(path, str)\n\n if not is_unicode:\n path = str(path)\n\n if not _islink(path):\n raise OSError(errno.EINVAL, \"Invalid argument\", path)\n\n # Open the file correctly depending on the string type.\n hfile = ctypes.windll.kernel32.CreateFileW(path, GENERIC_READ, 0, None,\n OPEN_EXISTING,\n FILE_FLAG_REPARSE_BACKUP, None)\n\n if hfile == INVALID_HANDLE_VALUE:\n raise OSError(errno.ENOENT, \"No such file or directory\", path)\n\n # MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16384 = (16*1024)\n data_buffer = device_io_control(hfile, FSCTL_GET_REPARSE_POINT, None,\n 16384)\n ctypes.windll.kernel32.CloseHandle(hfile)\n\n # Minimum possible length (assuming length of the target is bigger than 0)\n if not data_buffer or len(data_buffer) < 9:\n raise OSError(errno.ENOENT, \"No such file or directory\", path)\n\n # typedef struct _REPARSE_DATA_BUFFER {\n # ULONG ReparseTag;\n # USHORT ReparseDataLength;\n # USHORT Reserved;\n # union {\n # struct {\n # USHORT SubstituteNameOffset;\n # USHORT SubstituteNameLength;\n # USHORT PrintNameOffset;\n # USHORT PrintNameLength;\n # ULONG Flags;\n # WCHAR PathBuffer[1];\n # } SymbolicLinkReparseBuffer;\n # struct {\n # USHORT SubstituteNameOffset;\n # USHORT SubstituteNameLength;\n # USHORT PrintNameOffset;\n # USHORT PrintNameLength;\n # WCHAR PathBuffer[1];\n # } MountPointReparseBuffer;\n # struct {\n # UCHAR DataBuffer[1];\n # } GenericReparseBuffer;\n # } DUMMYUNIONNAME;\n # } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;\n\n SymbolicLinkReparseFormat = 'LHHHHHHL'\n SymbolicLinkReparseSize = struct.calcsize(SymbolicLinkReparseFormat)\n\n # Only handle SymbolicLinkReparseBuffer\n # pylint: disable=W0612\n (tag, dataLength, reserver, SubstituteNameOffset, SubstituteNameLength,\n PrintNameOffset, PrintNameLength,\n Flags) = struct.unpack(SymbolicLinkReparseFormat,\n data_buffer[:SymbolicLinkReparseSize])\n\n start = SubstituteNameOffset + SymbolicLinkReparseSize\n actualPath = \\\n data_buffer[start: start + SubstituteNameLength].decode(\"utf-16\")\n\n index = actualPath.find(\"\\0\")\n if index > 0:\n actualPath = actualPath[:index]\n\n if actualPath.startswith(\"\\\\??\"):\n actualPath = actualPath[4:]\n\n if not is_unicode:\n return str(actualPath)\n\n return actualPath\n\n\ndef _link(filename, existing_filename):\n \"\"\"symlink(source, link_name)\n Creates a symbolic link pointing to source named link_name\"\"\"\n CreateHardLinkW(filename, existing_filename, 0)\n\n\ndef _symlink(source, link_name):\n \"\"\"symlink(source, link_name)\n Creates a symbolic link pointing to source named link_name\"\"\"\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)\n\n\ndef _unlink(path):\n \"\"\"Remove (delete) the file path.\"\"\"\n if os.path.isdir(path):\n os.rmdir(path)\n else:\n os.remove(path)\n\n\nos.symlink = _symlink\nos.link = _link\nos.readlink = _readlink\nos.path.islink = _islink\nos.unlink = _unlink\n","sub_path":"treadmill/syscall/winsymlink.py","file_name":"winsymlink.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84961373","text":"import yaml\n\n\nclass ConfigurationValueObject:\n def __init__(self):\n self.general_configuration = None\n self.calibration_configuration = None\n self.matching_configuration = None\n self.reconstruction_configuration = None\n self.presentation_configuration = None\n\n @staticmethod\n def build_configuration_from_file(file):\n with open(file, 'r') as configuration_file:\n try:\n configuration = yaml.load(configuration_file, Loader=yaml.UnsafeLoader)\n\n configuration_value_object = ConfigurationValueObject()\n configuration_value_object.general_configuration = GeneralConfigurationValueObject(\n configuration['parameters']['general']\n )\n configuration_value_object.calibration_configuration = CalibrationConfigurationValueObject(\n configuration['parameters']['calibration']\n )\n configuration_value_object.matching_configuration = MatchingConfigurationValueObject(\n configuration['parameters']['matching']\n )\n configuration_value_object.reconstruction_configuration = ReconstructionConfigurationValueObject(\n configuration['parameters']['reconstruction']\n )\n configuration_value_object.presentation_configuration = PresentationConfigurationValueObject(\n configuration['parameters']['presentation']\n )\n\n return configuration_value_object\n\n except yaml.YAMLError as exc:\n print(exc)\n\n\nclass GeneralConfigurationValueObject:\n def __init__(self, parameters):\n self.sets_folder = parameters['sets_folder']\n\n\nclass CalibrationConfigurationValueObject:\n def __init__(self, parameters):\n self.type = parameters['type']\n\n\nclass MatchingConfigurationValueObject:\n def __init__(self, parameters):\n self.type = parameters['type']\n\n\nclass ReconstructionConfigurationValueObject:\n def __init__(self, parameters):\n self.type = parameters['type']\n\n\nclass PresentationConfigurationValueObject:\n def __init__(self, parameters):\n self.type = parameters['type']\n","sub_path":"code/src/vision/presentation/value_objects/configuration_value_object.py","file_name":"configuration_value_object.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616935079","text":"import pymysql as ps\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n#DB 접속\nconn = ps.connect(host='localhost',\n user='root',\n password='',\n db='bicycle',\n charset='utf8')\ncurs = conn.cursor()\n#CSV 파일 로드, 안 쓸 columns 삭제\ndef loadcsv(fileName):\n cs = pd.read_csv(fileName,header=None, skiprows=1,\n names=[\"Number\",'Code',\"rentalOffice\",\"OfficeEname\",\n \"esripk\",'latitude','longitude']\n )\n del cs['esripk']\n\n return cs\n# csv Data들을 DB에 저장\ndef insertData():\n cs = loadcsv(r\"../data/csv/seouldata.csv\")\n print(cs.head())\n #load된 csv 파일은 DataFrame으로 저장되있는 상태\n engine = create_engine(\"mysql+pymysql://root:\"+\"\"+\n \"@localhost:3306/bicycle?charset=utf8\",\n encoding='utf-8')\n encon = engine.connect()\n cs.to_sql(name='seoulData',con=engine,if_exists='append',index=False)\n encon.close()\n\ninsertData()","sub_path":"Rental Bicycle/MYSQL/dataLoad_SeoulData.py","file_name":"dataLoad_SeoulData.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33444559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 31 15:32:47 2020\n\n@author: eo\n\"\"\"\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Add local path\n\nimport os\nimport sys\n\ndef find_path_to_local(target_folder = \"local\"):\n \n # Skip path finding if we successfully import the dummy file\n try:\n from local.dummy import dummy_func; dummy_func(); return\n except ImportError:\n print(\"\", \"Couldn't find local directory!\", \"Searching for path...\", sep=\"\\n\")\n \n # Figure out where this file is located so we can work backwards to find the target folder\n file_directory = os.path.dirname(os.path.abspath(__file__))\n path_check = []\n \n # Check parent directories to see if we hit the main project directory containing the target folder\n prev_working_path = working_path = file_directory\n while True:\n \n # If we find the target folder in the given directory, add it to the python path (if it's not already there)\n if target_folder in os.listdir(working_path):\n if working_path not in sys.path:\n tilde_swarm = \"~\"*(4 + len(working_path))\n print(\"\\n{}\\nPython path updated:\\n {}\\n{}\".format(tilde_swarm, working_path, tilde_swarm))\n sys.path.append(working_path)\n break\n \n # Stop if we hit the filesystem root directory (parent directory isn't changing)\n prev_working_path, working_path = working_path, os.path.dirname(working_path)\n path_check.append(prev_working_path)\n if prev_working_path == working_path:\n print(\"\\nTried paths:\", *path_check, \"\", sep=\"\\n \")\n raise ImportError(\"Can't find '{}' directory!\".format(target_folder))\n \nfind_path_to_local()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Imports\n\nimport cv2\nimport numpy as np\n\nfrom time import perf_counter\n\nfrom local.lib.ui_utils.cli_selections import Resource_Selector\n\nfrom local.offline_database.file_database import launch_dbs, close_dbs_if_missing_data\nfrom local.offline_database.object_reconstruction import Smooth_Hover_Object_Reconstruction as Obj_Recon\nfrom local.offline_database.object_reconstruction import Hover_Mapping\nfrom local.offline_database.object_reconstruction import create_trail_frame_from_object_reconstruction\nfrom local.offline_database.snapshot_reconstruction import median_background_from_snapshots\nfrom local.offline_database.classification_reconstruction import create_objects_by_class_dict, get_ordered_object_list\n\nfrom local.lib.ui_utils.local_ui.windows_base import Simple_Window\n\nfrom local.eolib.utils.cli_tools import Datetime_Input_Parser as DTIP\n\nfrom local.lib.file_access_utils.supervised_labels import load_all_supervised_labels, get_svlabel_topclass_label\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define classes\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define functions\n\n# .....................................................................................................................\n\ndef width_height_pair_func(obj_recon_ref, frame_idx):\n \n (x1, y1), (x2, y2) = obj_recon_ref.get_box_tlbr(frame_idx)\n width_norm = (x2 - x1)\n height_norm = (y2 - y1)\n \n return (width_norm, height_norm)\n\n# .....................................................................................................................\n\ndef get_data_pair_list(obj_by_class_dict, data_pair_func, print_feedback = True):\n \n # Initialize output. Should contain keys for each object id, storing all width/height samples in a list\n obj_datapair_lists_dict = {}\n \n # Start timing and provide feedback\n t1 = perf_counter()\n if print_feedback:\n print(\"\", \"Generating data pair lists...\", sep = \"\\n\")\n \n # Loop over every object (of every class) and get all data pair samples\n for each_class_label, each_obj_dict in obj_by_class_dict.items():\n \n # Loop over all object ids\n for each_obj_id, each_obj_recon in each_obj_dict.items():\n \n # Get the start/end frame index of each object\n start_idx = each_obj_recon.start_idx\n end_idx = each_obj_recon.end_idx\n \n # Grab every data pair for each object id\n new_datapair_list = [data_pair_func(each_obj_recon, each_idx) for each_idx in range(start_idx, end_idx)]\n obj_datapair_lists_dict[each_obj_id] = new_datapair_list\n \n # End timing and provide final feedback\n t2 = perf_counter()\n if print_feedback:\n print(\" Done! Took {:.0f} ms\".format(1000 * (t2 - t1)))\n \n return obj_datapair_lists_dict\n\n# .....................................................................................................................\n\ndef draw_one_wh_list(display_frame, wh_list, line_color = (0, 100, 100), line_thickness = 1):\n \n # Get frame scaling so we can draw width/height maps back onto the frame\n frame_height, frame_width = display_frame.shape[0:2]\n frame_scale = np.float32((frame_width - 1, frame_height - 1))\n \n # For clarity\n is_closed = False\n \n # Convert the width/height listings to an array and scale to pixels, so we can draw it into the frame\n wh_array = np.float32(wh_list)\n wh_as_px = np.int32(np.round(wh_array * frame_scale))\n cv2.polylines(display_frame, [wh_as_px], is_closed, line_color, line_thickness, cv2.LINE_AA)\n \n return display_frame\n\n# .....................................................................................................................\n\ndef draw_all_wh_list(obj_wh_lists_dict, frame_side_length = 300, line_color = (0, 100, 100), line_thickness = 1):\n \n # Create a blank frame to draw in to\n display_frame = np.zeros((frame_side_length, frame_side_length, 3), dtype=np.uint8)\n \n # Draw every width/height list into the frame\n for each_obj_id, each_wh_list in obj_wh_lists_dict.items():\n \n sv_label = get_svlabel_topclass_label(sv_labels_dict, each_obj_id)\n \n # Skip over the ignores\n if sv_label == \"ignore\":\n continue\n \n # Hard-code line colors\n line_color = (0, 255, 255)\n if sv_label == \"pedestrian\":\n line_color = (0, 255, 0)\n if sv_label == \"vehicle\":\n line_color = (255, 255, 0)\n \n draw_one_wh_list(display_frame, each_wh_list, line_color, line_thickness)\n \n return display_frame\n\n# .....................................................................................................................\n# .....................................................................................................................\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Make selections\n\nenable_debug_mode = True\n\n# Create selector so we can access existing report data\nselector = Resource_Selector()\n\n# Select data to run\nlocation_select, location_select_folder_path = selector.location(debug_mode = enable_debug_mode)\ncamera_select, _ = selector.camera(location_select, debug_mode = enable_debug_mode)\n\n# For convenience\npathing_args = (location_select_folder_path, camera_select)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Catalog existing data\n\nsnap_db, obj_db, class_db = launch_dbs(*pathing_args,\n \"snapshots\", \"objects\", \"classifications\")\n\n# Catch missing data\nclose_dbs_if_missing_data(snap_db, error_message_if_missing = \"No snapshot data in the database!\")\nclose_dbs_if_missing_data(obj_db, error_message_if_missing = \"No object trail data in the database!\")\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Ask user for time window\n\n# Get the maximum range of the data (based on the snapshots, because all we can show)\nearliest_datetime, latest_datetime = snap_db.get_bounding_datetimes()\n\n# Ask the user for the range of datetimes to use for selecting data\nuser_start_dt, user_end_dt = DTIP.cli_prompt_start_end_datetimes(earliest_datetime, latest_datetime,\n print_help_before_prompt = False,\n debug_mode = enable_debug_mode)\n\n# Provide feedback about the selected time range\nDTIP.print_start_end_time_range(user_start_dt, user_end_dt)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Create background frame\n\n# Ask database for several snapshot images, so we can 'average' them to make a background frame for display\nbg_frame = median_background_from_snapshots(snap_db, user_start_dt, user_end_dt, 10)\nframe_height, frame_width = bg_frame.shape[0:2]\nframe_wh = (frame_width, frame_height)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Load object data\n\n# Get object metadata from the server\nobj_metadata_generator = obj_db.load_metadata_by_time_range(user_start_dt, user_end_dt)\n\n# Create dictionary of 'reconstructed' objects based on object metadata\nobj_dict = Obj_Recon.create_reconstruction_dict(obj_metadata_generator,\n frame_wh,\n user_start_dt,\n user_end_dt)\n\n# Organize objects by class label -> then by object id (nested dictionaries)\nobj_id_list, obj_by_class_dict, obj_id_to_class_dict = create_objects_by_class_dict(class_db, obj_dict)\n\n# Get an ordered list of the objects for drawing\nordered_obj_list = get_ordered_object_list(obj_id_list, obj_by_class_dict, obj_id_to_class_dict)\n\n# Generate trail hover mapping, for quicker mouse-to-trail lookup\nhover_map = Hover_Mapping(obj_by_class_dict)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Create initial images\n\n# Generate the background display frame, containing all object trails\ntrails_background = create_trail_frame_from_object_reconstruction(bg_frame, ordered_obj_list)\n\n\n#%%\n\nsv_labels_dict = load_all_supervised_labels(*pathing_args, obj_id_list)\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Interaction loop\n\n# Close any previously open windows\ncv2.destroyAllWindows()\n\n# Set up parameter comparison windows\nwidth_height_lists_dict = get_data_pair_list(obj_by_class_dict, width_height_pair_func)\nparameter_frame = draw_all_wh_list(width_height_lists_dict)\nparam_window = Simple_Window(\"Width vs Height\")\nparam_window.move_corner_pixels(800, 50)\nparam_window.imshow(parameter_frame)\n\n# Set up main display window\ndisp_window = Simple_Window(\"Display\")\ndisp_window.move_corner_pixels(50, 50)\nprint(\"\", \"Press Esc to close\", \"\", sep=\"\\n\")\n\nwhile True:\n \n # Make clean copies of the frames to display, so we don't muddy up the originals\n display_frame = trails_background.copy()\n \n # Show final display\n winexist = disp_window.imshow(display_frame)\n if not winexist:\n break\n \n # Break on esc key\n keypress = cv2.waitKey(50)\n if keypress == 27:\n break\n\n\n# Some clean up\ncv2.destroyAllWindows()\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Scrap\n\n\n","sub_path":"after_database/configuration_utilities/classifier/by_size_(WIP).py","file_name":"by_size_(WIP).py","file_ext":"py","file_size_in_byte":11893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194032068","text":"\n\nfrom ..utils import Object\n\n\nclass ChatMember(Object):\n \"\"\"\n A user with information about joining/leaving a chat \n\n Attributes:\n ID (:obj:`str`): ``ChatMember``\n\n Args:\n user_id (:obj:`int`):\n User identifier of the chat member \n inviter_user_id (:obj:`int`):\n Identifier of a user that invited/promoted/banned this member in the chat; 0 if unknown\n joined_chat_date (:obj:`int`):\n Point in time (Unix timestamp) when the user joined a chat \n status (:class:`telegram.api.types.ChatMemberStatus`):\n Status of the member in the chat \n bot_info (:class:`telegram.api.types.botInfo`):\n If the user is a bot, information about the bot; may be nullCan be null even for a bot if the bot is not a chat member\n\n Returns:\n ChatMember\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = \"chatMember\"\n\n def __init__(self, user_id, inviter_user_id, joined_chat_date, status, bot_info, **kwargs):\n \n self.user_id = user_id # int\n self.inviter_user_id = inviter_user_id # int\n self.joined_chat_date = joined_chat_date # int\n self.status = status # ChatMemberStatus\n self.bot_info = bot_info # BotInfo\n\n @staticmethod\n def read(q: dict, *args) -> \"ChatMember\":\n user_id = q.get('user_id')\n inviter_user_id = q.get('inviter_user_id')\n joined_chat_date = q.get('joined_chat_date')\n status = Object.read(q.get('status'))\n bot_info = Object.read(q.get('bot_info'))\n return ChatMember(user_id, inviter_user_id, joined_chat_date, status, bot_info)\n","sub_path":"pytglib/api/types/chat_member.py","file_name":"chat_member.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498858788","text":"__author__ = 'Chris_Daigle'\nimport pandas as pd\nimport numpy as np\nfrom time import time\nfrom sklearn.metrics import fbeta_score, accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\n\ndef eng_pipe(data):\n \"\"\"\n Pipeline for completing all transformations\n :param data: pandas dataframe\n \"\"\"\n # Separate, 2.1 in DE\n Y = data['income']\n X = data.drop('income', axis=1)\n\n # One-hot-encoding, 2.2 in DE\n factors = ['age', 'workclass', 'education_level', 'education_num',\n 'marital_status', 'occupation', 'relationship', 'race',\n 'sex', 'capital_gain', 'capital_loss', 'hours_per_week',\n 'native_country']\n X = pd.get_dummies(X[factors], drop_first=True) # Remove the base case and make dummies\n Y = (Y == '>50K').apply(lambda x: x * 1) # Assign positive class\n\n # Logarithmic transform of skewed factors, 2.2.2 in DE\n skewed = ['capital_gain', 'capital_loss']\n X_log_transformed = pd.DataFrame(data=X).copy()\n X_log_transformed[skewed] = X[skewed].apply(lambda x: np.log(x + 1))\n\n # Normalization, 2.2.4 in DE\n scaler = MinMaxScaler(feature_range=(0, 1)) # default=(0, 1)\n numerical = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']\n X_log_minmax = pd.DataFrame(data=X_log_transformed).copy()\n X_log_minmax[numerical] = scaler.fit_transform(X_log_transformed[numerical])\n\n # Final data:\n X_trans = X_log_minmax\n\n # Split, 2.3 in DE\n X_train, X_test, y_train, y_test = train_test_split(X_trans, Y, random_state=0, test_size=0.2, stratify=Y)\n\n return X_trans, X_train, X_test, y_train, y_test\n\n\ndef full_train_predict(learner, x_train, y_train, x_test, y_test):\n \"\"\"\n Pipeline to train, predict, and score algorithms\n\n :param learner: the learning algorithm to be trained and predicted on\n :param x_train: features training set\n :param y_train: income training set\n :param x_test: features testing set\n :param y_test: income testing set\n\n :return results: f-0.5 score, 0.5 chosen for high precision, avoiding false positives\n \"\"\"\n results = {}\n\n # Fitting\n start = time() # Get start time\n learner.fit(x_train, y_train) # Train model\n end = time() # Get end time\n results['train_time'] = end - start # Calculate the training time\n\n # Predicting\n start = time() # Get start time\n predictions_test = learner.predict(x_test)\n predictions_train = learner.predict(x_train)\n end = time() # Get end time\n results['pred_time'] = end - start # Calculate the total prediction time\n\n # Scoring\n results['acc_train'] = accuracy_score(y_train, predictions_train) # Training accuracy\n results['acc_test'] = accuracy_score(y_test, predictions_test) # Testing accuracy\n results['f_train'] = fbeta_score(y_train, predictions_train, beta=0.5) # Training F-0.5 score\n results['f_test'] = fbeta_score(y_test, predictions_test, beta=0.5) # Testing F-0.5 score\n\n # User feedback\n learner_name = learner.__class__.__name__,\n sample_size = len(x_train)\n factor_size = len(x_train.columns)\n print(\"{} trained on {} samples over {} factors.\".format(learner_name, sample_size, factor_size))\n\n return results\n","sub_path":"assets/ml/sup_charity/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84517943","text":"from typing import List\n\nfrom plenum.common.exceptions import InvalidClientRequest, \\\n UnauthorizedClientRequest\nfrom plenum.common.txn_util import reqToTxn, isTxnForced\nfrom plenum.server.ledger_req_handler import LedgerRequestHandler\nfrom plenum.common.constants import TXN_TYPE, NAME, VERSION, FORCE, DATA\nfrom indy_common.auth import Authoriser\nfrom indy_common.constants import POOL_UPGRADE, START, CANCEL, SCHEDULE, ACTION, POOL_CONFIG, NODE_UPGRADE\nfrom indy_common.roles import Roles\nfrom indy_common.transactions import IndyTransactions\nfrom indy_common.types import Request\nfrom indy_node.persistence.idr_cache import IdrCache\nfrom indy_node.server.upgrader import Upgrader\nfrom indy_node.server.pool_config import PoolConfig\n\n\nclass ConfigReqHandler(LedgerRequestHandler):\n write_types = {POOL_UPGRADE, NODE_UPGRADE, POOL_CONFIG}\n\n def __init__(self, ledger, state, idrCache: IdrCache,\n upgrader: Upgrader, poolManager, poolCfg: PoolConfig):\n super().__init__(ledger, state)\n self.idrCache = idrCache\n self.upgrader = upgrader\n self.poolManager = poolManager\n self.poolCfg = poolCfg\n\n def doStaticValidation(self, request: Request):\n identifier, req_id, operation = request.identifier, request.reqId, request.operation\n if operation[TXN_TYPE] == POOL_UPGRADE:\n self._doStaticValidationPoolUpgrade(identifier, req_id, operation)\n elif operation[TXN_TYPE] == POOL_CONFIG:\n self._doStaticValidationPoolConfig(identifier, req_id, operation)\n\n def _doStaticValidationPoolConfig(self, identifier, reqId, operation):\n pass\n\n def _doStaticValidationPoolUpgrade(self, identifier, reqId, operation):\n action = operation.get(ACTION)\n if action not in (START, CANCEL):\n raise InvalidClientRequest(identifier, reqId,\n \"{} not a valid action\".\n format(action))\n if action == START:\n schedule = operation.get(SCHEDULE, {})\n force = operation.get(FORCE)\n force = str(force) == 'True'\n isValid, msg = self.upgrader.isScheduleValid(\n schedule, self.poolManager.getNodesServices(), force)\n if not isValid:\n raise InvalidClientRequest(identifier, reqId,\n \"{} not a valid schedule since {}\".\n format(schedule, msg))\n\n # TODO: Check if cancel is submitted before start\n\n def validate(self, req: Request):\n status = None\n operation = req.operation\n typ = operation.get(TXN_TYPE)\n if typ not in [POOL_UPGRADE, POOL_CONFIG]:\n return\n origin = req.identifier\n try:\n originRole = self.idrCache.getRole(origin, isCommitted=False)\n except BaseException:\n raise UnauthorizedClientRequest(\n req.identifier,\n req.reqId,\n \"Nym {} not added to the ledger yet\".format(origin))\n if typ == POOL_UPGRADE:\n currentVersion = Upgrader.getVersion()\n targetVersion = req.operation[VERSION]\n if Upgrader.compareVersions(currentVersion, targetVersion) < 0:\n # currentVersion > targetVersion\n raise InvalidClientRequest(\n req.identifier,\n req.reqId,\n \"Upgrade to lower version is not allowed\")\n\n trname = IndyTransactions.POOL_UPGRADE.name\n action = operation.get(ACTION)\n # TODO: Some validation needed for making sure name and version\n # present\n txn = self.upgrader.get_upgrade_txn(\n lambda txn: txn.get(\n NAME,\n None) == req.operation.get(\n NAME,\n None) and txn.get(VERSION) == req.operation.get(VERSION),\n reverse=True)\n if txn:\n status = txn.get(ACTION, None)\n\n if status == START and action == START:\n raise InvalidClientRequest(\n req.identifier,\n req.reqId,\n \"Upgrade '{}' is already scheduled\".format(\n req.operation.get(NAME)))\n elif typ == POOL_CONFIG:\n trname = IndyTransactions.POOL_CONFIG.name\n action = None\n status = None\n r, msg = Authoriser.authorised(\n typ, originRole, field=ACTION, oldVal=status, newVal=action)\n if not r:\n raise UnauthorizedClientRequest(\n req.identifier, req.reqId, \"{} cannot do {}\".format(\n Roles.nameFromValue(originRole), trname))\n\n def apply(self, req: Request, cons_time):\n txn = reqToTxn(req, cons_time)\n (start, _), _ = self.ledger.appendTxns([txn])\n return start, txn\n\n def commit(self, txnCount, stateRoot, txnRoot, ppTime) -> List:\n committedTxns = super().commit(txnCount, stateRoot, txnRoot, ppTime)\n for txn in committedTxns:\n # Handle POOL_UPGRADE or POOL_CONFIG transaction here\n # only in case it is not forced.\n # If it is forced then it was handled earlier\n # in applyForced method.\n if not isTxnForced(txn):\n self.upgrader.handleActionTxn(txn)\n self.poolCfg.handleConfigTxn(txn)\n return committedTxns\n\n def applyForced(self, req: Request):\n if req.isForced():\n txn = reqToTxn(req)\n self.upgrader.handleActionTxn(txn)\n self.poolCfg.handleConfigTxn(txn)\n","sub_path":"indy_node/server/config_req_handler.py","file_name":"config_req_handler.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6913318","text":"'''\n@author Maifeng\n\n'''\nfrom BP.Myplot import Myplot\nfrom BP.bp_nn import NN\nfrom BP.quantify import Quantify\nfrom tools.setting import *\nfrom tools.mongoDB import Mymongo\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n'''\n���经网络的计算值和理论公式的计算值之间的误差\n\n\n# rUbV8 relu激活函数 5层 隐藏层relu 输出层 relu \n# EBtbL relu激活函数 6层 隐藏层relu 输出层 relu \n# 6vg2k relu激活函数 7层 隐藏层relu 输出层 relu \n\n'''\n\ndef analysis(hide):\n id_weight = '6vg2k'\n bp_values,real_values = get_bp_news(USERNAME, id_weight,200)\n error = list()\n for i in range(len(bp_values)):\n error.append(math.fabs(bp_values[i] - real_values[i]))\n\n # print(error)\n plot(bp_values, real_values,'../data/bp/error_{hide}'.format(hide=str(hide)), hide)\n\ndef get_bp_news(username, id_weight, data_num):\n\n # 初始化mongo\n mongo = Mymongo(MONGO_URL, MONGO_DATABASE)\n mongo.set_open_client()\n\n # 取出数据\n\n datas = list(mongo.set_find(MONGO_FORM_NEWS))[1000:1000+data_num]\n\n # 取出用户\n user = mongo.set_find_one(MONGO_FORM_USER, {'username':username})\n\n # 取出权重\n history_ws = user['history_w']\n input_w = []\n output_w = []\n for history_w in history_ws:\n if id_weight == history_w['id']:\n input_w = history_w['input_w']\n output_w = history_w['output_w']\n\n # 初始化神经网络\n BP_NN = NN(INPUT_LAYER, HIDE_LAYER, OUTPUT_LAYER)\n # print('input_w:---')\n # for in_w in input_w:\n # print(in_w)\n # print('output_w:---')\n # for out_w in output_w:\n # print(out_w)\n # 添加权重\n BP_NN.add_weight(input_w, output_w)\n input_param = []\n output_param = []\n real_values = []\n combine = []\n test_pats = []\n # 构造计算数据\n for news in datas:\n input_param = []\n output_param = []\n combine = []\n date = news['date']\n click_num = news['click_num']\n title = news['title']\n content = news['content']\n # 初始化量化\n quantify = Quantify()\n quantify.transform_date(date)\n quantify.transform_click(click_num)\n quantify.transform_words(title,content)\n relevant_result = quantify.relevant(T=T, C=C, F=F)\n real_values.append(relevant_result)\n # print(relevant_result)\n # 取量化参数值\n output_param.append(relevant_result)\n input_param.append(quantify.weight_date)\n input_param.append(quantify.weight_click)\n input_param.append(quantify.weight_word)\n # 初始化神经网络参数\n combine.append(input_param)\n combine.append(output_param)\n test_pats.append(combine)\n\n # 开始计算\n BP_NN.test(test_pats)\n hope_values = list()\n # 重构结果\n hope_value = BP_NN.result\n for i,news in enumerate(datas):\n news['hope_value'] = hope_value[i][0]\n hope_values.append(hope_value[i][0])\n\n\n return hope_values,real_values\n\ndef plot(bp_values,real_values,name,hide):\n error = list()\n for i in range(len(bp_values)):\n error.append(math.fabs(bp_values[i] - real_values[i]))\n\n x = np.linspace(1,len(error),len(error))\n plot = Myplot()\n plot.set_linewidth(1.5)\n plot.set_plot_xylabel('测试数量','误差')\n plot.set_title('误差评估', 'right')\n plot.set_figure(dpi=80,len=16,wei=10)\n # plot.add_plot(x,bp_values,'bp_values',marker='_',linestyle=':')\n # plot.add_plot(x,real_values,'real_values',marker='|',linestyle=':')\n plot.add_plot(x,error,'{hide}节点'.format(hide=str(hide)),marker='v')\n plot.save_plot(name)\n plot.show()\n\nif __name__ == '__main__':\n analysis()","sub_path":"BP/analysis_error.py","file_name":"analysis_error.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651747809","text":"from web3.method import (\r\n Method,\r\n default_root_munger,\r\n)\r\n\r\n\r\ndef admin_start_params_munger(module, host='localhost', port='8546', cors='', apis='vns,net,web3'):\r\n return (host, port, cors, apis)\r\n\r\n\r\naddPeer = Method(\r\n \"admin_addPeer\",\r\n mungers=[default_root_munger],\r\n)\r\n\r\n\r\ndatadir = Method(\r\n \"admin_datadir\",\r\n mungers=None,\r\n)\r\n\r\n\r\nnodeInfo = Method(\r\n \"admin_nodeInfo\",\r\n mungers=None,\r\n)\r\n\r\n\r\npeers = Method(\r\n \"admin_peers\",\r\n mungers=None,\r\n)\r\n\r\n\r\nsetSolc = Method(\r\n \"admin_setSolc\",\r\n mungers=[default_root_munger],\r\n)\r\n\r\n\r\nstartRPC = Method(\r\n \"admin_startRPC\",\r\n mungers=[admin_start_params_munger],\r\n)\r\n\r\n\r\nstartWS = Method(\r\n \"admin_startWS\",\r\n mungers=[admin_start_params_munger],\r\n)\r\n\r\n\r\nstopRPC = Method(\r\n \"admin_stopRPC\",\r\n mungers=None,\r\n)\r\n\r\n\r\nstopWS = Method(\r\n \"admin_stopWS\",\r\n mungers=None,\r\n)\r\n","sub_path":"web3/_utils/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96732336","text":"class Solution(object):\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n h, w = len(A), len(A[0])\n ret = [list() for _ in range(w)]\n for i in range(h):\n for j in range(w):\n ret[j].append(A[i][j])\n return ret\n","sub_path":"normal/868_transpose_matrix.py","file_name":"868_transpose_matrix.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467959971","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom . import views, adminviews\n\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index, name=\"index\"),\n url(r'^blog/$', views.index, name=\"index\"),\n url(r\"^blog/login/$\", views.login_way, name=\"login\"),\n url(r\"^blog/logout/$\", views.logout_way, name=\"logout\"),\n url(r'^blog/archive/(?P\\d+)/(?P\\d+)/$', views.archive, name=\"archive\"),\n url(r'^blog/tags/(?P\\w+)/$', views.tags, name=\"tags\"),\n url(r'^blog/search/$', views.search, name=\"search\"),\n url(r'^blog/friends/$', views.friends, name=\"friends\"),\n url(r'^blog/opensource/$', views.opensource, name=\"opensource\"),\n url(r'^blog/catalog/(?P\\w+)/$', views.catalog, name=\"catalog\"),\n url(r'^blog/about/$', TemplateView.as_view(template_name='blog/about.html'), name=\"about\"),\n url(r'^blog/(?P[-\\w]+)/$', views.detail, name=\"detail\"),\n url(r'^blog/admin/$', adminviews.admin, name=\"admin_index\"),\n url(r'^blog/admin/entry/list/$', adminviews.admin_entry_list, name=\"entry_list\"),\n url(r'^blog/admin/entry/update/(?P\\d+)/$', adminviews.admin_entry_update, name=\"entry_update\"),\n url(r'^blog/admin/entry/create/$', adminviews.admin_entry, name=\"entry\"),\n url(r'^blog/admin/entry/content/update/$', adminviews.admin_entry_content_update, name=\"content_update\"),\n url(r'^blog/admin/entry/attr/(?P\\d+)/update/$', adminviews.admin_entry_attr_update, name=\"entry_attr\"),\n url(r'^blog/admin/catalog/$', adminviews.admin_add_catalog, name=\"admin_catalog\"),\n url(r'^blog/admin/catalog/(?P\\d+)/del$', adminviews.admin_del_catalog, name=\"admin_catalog_del\"),\n url(r'^blog/admin/tag/$', adminviews.admin_add_tag, name=\"admin_tag\"),\n url(r'^blog/admin/tag/(?P\\d+)/del$', adminviews.admin_del_tag, name=\"admin_tag_del\"),\n url(r'^blog/admin/report/$', adminviews.log_report, name=\"report\")\n)\n","sub_path":"cblog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11971739","text":"import numpy as np\n\naccs = []\naucs = []\nfor i in range(20):\n with open(f'./log/{i+1}.log', 'r') as f:\n acc, auc = f.readlines()[-2:]\n accs.append(float(acc))\n aucs.append(float(auc))\n\nwith open(f'./log/all.log', 'w') as f:\n f.write(f'acc mean: {np.mean(accs)}\\n')\n f.write(f'acc std: {np.std(accs)}\\n')\n f.write(f'auc mean: {np.mean(aucs)}\\n')\n f.write(f'auc std: {np.std(aucs)}')\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636391176","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 01 00:50:48 2017\r\n\r\n@author: xiaojian\r\n\"\"\"\r\nimport numpy as np\r\nimport datetime as dt\r\nimport pandas as pd\r\nfrom datetime import datetime, timedelta\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nimport numpy as np\r\nfrom datetime import datetime,timedelta\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\nfrom netCDF4 import Dataset\r\n\r\n#plt.axis([-70.94,-70.0,41.52659,42.562711])\r\nlat=np.linspace(41.8,42.4,10)\r\nlon=np.linspace(-70.15,-70.75,10)\r\nlo=[]\r\nla=[]\r\nfor a in np.arange(len(lon)):\r\n for b in np.arange(len(lat)):\r\n lo.append(lon[a])\r\n la.append(lat[b])\r\n\r\ntt2012=np.load('tt2012.npy')\r\nh2012=np.load('h2012.npy')\r\nfig=plt.figure(figsize=(14,10))\r\naxes1=fig.add_subplot(2,2,1)\r\naxes2=fig.add_subplot(2,2,2)\r\n\r\naxes3=fig.add_subplot(2,2,3)\r\n\r\ndian=['A','B','C','D','E','F','G','H','I','J']\r\n\r\nFN='necscoast_worldvec.dat'\r\nCL=np.genfromtxt(FN,names=['lon','lat'])\r\naxes1.plot(CL['lon'],CL['lat'])\r\naxes1.axis([-70.94,-70.0,41.52659,42.562711])\r\naxes1.scatter(lon,lat,s=5,color='red')\r\nfor a in np.arange(len(lon)):\r\n axes1.text(lon[a]-0.05,lat[a],dian[a])\r\naxes1.xaxis.tick_top() \r\naxes1.set_xlabel('a')\r\naxes2.set_xlabel('b')\r\naxes3.set_xlabel('c')\r\naxes2.set_title('2012')\r\naxes3.set_title('2013')\r\n\r\ntime2012=np.load('time2012.npy')\r\ntemp2012=np.load('temp2012.npy')\r\nttt1=[]\r\nfor a in np.arange(len(time2012)):\r\n \r\n ttt1.append('''%s/%s'''%(time2012[a].month,time2012[a].day))\r\nfor a in np.arange(len(temp2012)):\r\n data2010s1=pd.Series(temp2012[a],index=list(ttt1))\r\n data2010s1.plot(linestyle='-',ax=axes2,label=dian[a])\r\n###########################################################\r\n###########################################################\r\ntime2013=np.load('time2013.npy')\r\ntemp2013=np.load('temp2013.npy')\r\nttt=[]\r\nfor a in np.arange(len(time2013)):\r\n \r\n ttt.append('''%s/%s'''%(time2013[a].month,time2013[a].day))\r\nfor a in np.arange(len(temp2013)):\r\n data2010s=pd.Series(temp2013[a],index=list(ttt))\r\n data2010s.plot(linestyle='-',ax=axes3,label=dian[a])\r\n\r\naxes2.legend()\r\naxes2.set_ylim([0,14])\r\n#axes[1].plot([time2013[0],time2013[-1]],[10,10],color='black')\r\n#axes[1].set_ylabel('number')\r\naxes2.set_ylabel('temperature(Degrees Celsius)')\r\n############################################################\r\naxes3.legend()\r\naxes3.set_ylim([0,14])\r\n#axes[1].plot([time2013[0],time2013[-1]],[10,10],color='black')\r\n#axes[1].set_ylabel('number')\r\naxes3.set_ylabel('temperature(Degrees Celsius)')\r\n\r\nplt.savefig('zx2013',dpi=300)\r\n","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308043321","text":"import hashlib\nimport os\nimport pathlib\nimport re\nimport stat\nimport typing as tp\nimport zlib\n\nfrom pyvcs.refs import update_ref\nfrom pyvcs.repo import repo_find\n\n# import os\n\n\ndef hash_object(data: bytes, fmt: str, write: bool = False) -> str:\n header = fmt + f\" \" + f\"{len(data)}\\0\"\n sha = hashlib.sha1(header.encode() + data).hexdigest()\n if write:\n wd = pathlib.Path(\".\", os.environ[\"GIT_DIR\"], \"objects\", sha[0:2])\n pathlib.Path(wd).mkdir(parents=True, exist_ok=True)\n filename = sha[2:]\n f = open(pathlib.Path(wd, filename), \"wb\")\n if isinstance(data, bytes):\n store = header.encode() + data\n filec = zlib.compress(header.encode() + data)\n f.write(filec)\n f.close()\n return sha\n\n\ndef resolve_object(obj_name: str, gitdir: pathlib.Path) -> tp.List[str]:\n wd = pathlib.Path(gitdir, \"objects\", obj_name[0:2])\n files = []\n for (dirpath, dirnames, filenames) in os.walk(wd):\n for i in filenames:\n if obj_name[2:] == i[:3]:\n files.append(\"\".join([obj_name[0:2], i]))\n if len(files) > 0:\n return files\n else:\n raise Exception(f\"Not a valid object name {obj_name}\")\n\n\ndef find_object(obj_name: str, gitdir: pathlib.Path) -> str:\n # PUT YOUR CODE HERE\n ...\n\n\ndef read_object(sha: str, gitdir: pathlib.Path) -> tp.Tuple[str, bytes]:\n wd = pathlib.Path(gitdir, \"objects\", sha[0:2])\n filename = sha[2:]\n f = open(pathlib.Path(wd, filename), \"rb\")\n cont = zlib.decompress(f.read())\n templ = cont.split(b\" \")\n typec = templ[0].decode()\n del templ[0]\n templ_joined = b\" \".join(templ)\n cont = templ_joined.split(b\"\\x00\", maxsplit=1)[1]\n\n return typec, cont\n\n\ndef read_tree(data: bytes) -> tp.List[tp.Tuple[int, str, str]]:\n ans = []\n while data:\n before_sha_ind = data.index(b\"\\00\")\n mode, name = data[:before_sha_ind].decode().split(\" \")\n sha = data[before_sha_ind + 1 : before_sha_ind + 21]\n ans.append((int(mode), sha.hex(), name))\n data = data[before_sha_ind + 21 :]\n return ans\n\n\ndef cat_file(obj_name: str, pretty: bool = True) -> None:\n # PUT YOUR CODE HERE\n typec, cont = read_object(\n obj_name, pathlib.Path(\".\", os.environ.get(\"GIT_DIR\", default=\".git\"))\n )\n if pretty and (typec == \"blob\" or typec == \"commit\"):\n print(cont.decode())\n else:\n tree = read_tree(cont)\n for i in tree:\n if i[0] == 40000:\n obj_type = \"tree\"\n else:\n obj_type = \"blob\"\n print(f\"{i[0]:06}\", obj_type, i[1] + \"\\t\" + i[2])\n\n\ndef find_tree_files(tree_sha: str, gitdir: pathlib.Path) -> tp.List[tp.Tuple[str, str]]:\n ...\n\n\ndef commit_parse(raw: bytes, start: int = 0, dct=None):\n comm_dict: tp.Dict[str, tp.Any]\n comm_dict = {\"message\": []}\n for row in raw.decode().split(\"\\n\"):\n if row.startswith((\"parent\", \"committer\", \"author\", \"tree\")):\n name, val = row.split(\" \", maxsplit=1)\n comm_dict[name] = val\n else:\n comm_dict[\"message\"].append(row)\n return comm_dict\n","sub_path":"homework04/pyvcs/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464295025","text":"# It's recommended to run this with `python3 -I not_impl_gen.py`, to make sure\n# that nothing in your global Python environment interferes with what's being\n# extracted here.\n\nimport pkgutil\nimport os\nimport sys\nimport warnings\n\nsys.path = list(\n filter(\n lambda path: \"site-packages\" not in path and \"dist-packages\" not in path,\n sys.path,\n )\n)\n\n\ndef attr_is_not_inherited(type_, attr):\n \"\"\"\n returns True if type_'s attr is not inherited from any of its base classes\n \"\"\"\n\n bases = type_.__mro__[1:]\n\n return getattr(type_, attr) not in (getattr(base, attr, None) for base in bases)\n\n\ndef gen_methods(header, footer, output):\n objects = [\n bool,\n bytearray,\n bytes,\n complex,\n dict,\n float,\n frozenset,\n int,\n list,\n memoryview,\n range,\n set,\n str,\n tuple,\n object,\n ]\n\n output.write(header.read())\n output.write(\"expected_methods = {\\n\")\n\n for obj in objects:\n output.write(f\" '{obj.__name__}': ({obj.__name__}, [\\n\")\n output.write(\n \"\\n\".join(\n f\" {attr!r},\"\n for attr in dir(obj)\n if attr_is_not_inherited(obj, attr)\n )\n )\n output.write(\"\\n ]),\" + (\"\\n\" if objects[-1] == obj else \"\\n\\n\"))\n\n output.write(\"}\\n\\n\")\n output.write(footer.read())\n\ndef get_module_methods(name):\n with warnings.catch_warnings():\n # ignore warnings caused by importing deprecated modules\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n try:\n return set(dir(__import__(name))) if name not in (\"this\", \"antigravity\") else None\n except ModuleNotFoundError:\n return None\n\n\ndef gen_modules(header, footer, output):\n output.write(header.read())\n\n modules = dict(\n map(\n lambda mod: (\n mod.name,\n # check name b/c modules listed have side effects on import,\n # e.g. printing something or opening a webpage\n get_module_methods(mod.name)\n ),\n pkgutil.iter_modules(),\n )\n )\n\n print(\n f\"\"\"\ncpymods = {modules!r}\nlibdir = {os.path.abspath(\"../Lib/\")!r}\n\"\"\",\n file=output,\n )\n\n output.write(footer.read())\n\n\ngen_funcs = {\"methods\": gen_methods, \"modules\": gen_modules}\n\n\nfor name, gen_func in gen_funcs.items():\n gen_func(\n header=open(f\"generator/not_impl_{name}_header.txt\"),\n footer=open(f\"generator/not_impl_{name}_footer.txt\"),\n output=open(f\"snippets/whats_left_{name}.py\", \"w\"),\n )\n\n","sub_path":"tests/not_impl_gen.py","file_name":"not_impl_gen.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34295261","text":"import pdb\nimport os\n\ndef CreateDbg():\n path = \"./dbg.txt\"\n if os.path.exists(path):\n os.remove(path)\n with open(path, \"w\") as f:\n f.write(\"0\")\n\ndef DeleteDbg():\n path = \"./dbg.txt\"\n if os.path.exists(path):\n os.remove(path)\n\ndef SetBreak(num):\n with open(\"./dbg.txt\", \"w\") as f:\n f.write(str(num))\n\ndef DebugBreak(num):\n with open(\"./dbg.txt\") as f:\n dbg = f.readline().strip()\n if int(dbg) == num:\n pdb.set_trace()\n\ndef foo():\n print(1)\n print(2)\n DebugBreak(0)\n print(3)\n print(4)\n DebugBreak(2)\n print(5)\n print(6)\n DebugBreak(3)\n print(7)\n print(8)\n\n\n\nif __name__ == \"__main__\":\n CreateDbg()\n foo()\n DeleteDbg()\n","sub_path":"debug_helper.py","file_name":"debug_helper.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114615977","text":"import game_func\n\n\ndef play():\n\n # Full body structure of the game, with its corresponding functions\n print(\"LET'S GO!\")\n\n while True:\n # set board to empty strings and ask player for prefer symbol\n theBoard = [' '] * 10\n player1_marker, player2_marker = game_func.player_input()\n \n # Randomly select a player to go first\n turn = game_func.choose_first()\n print(str(turn), \"will go first.\")\n play_game = input(\"Are you ready to play? y or n? \")\n \n # Ask player weather it is ready to play or not\n if play_game == 'y':\n game_on = True\n else:\n game_on = False\n\n \n while game_on:\n \n # Proceed to statement if player one gets to go first\n if turn == 'Player 1':\n \n game_func.display_board(theBoard)\n position = game_func.player_choice(theBoard)\n game_func.place_marker(theBoard, player1_marker, position)\n\n if game_func.win_check(theBoard, player1_marker):\n game_func.display_board(theBoard)\n print('Congratulations! You have won the game!')\n game_on = False\n else:\n if game_func.full_board_check(theBoard):\n game_func.display_board(theBoard)\n print('The game is a draw!')\n break\n else:\n turn = 'Player 2'\n\n else:\n\n # Proceed with statement if its Player 2s' turn\n game_func.display_board(theBoard)\n position = game_func.player_choice(theBoard)\n game_func.place_marker(theBoard, player2_marker, position)\n\n if game_func.win_check(theBoard, player2_marker):\n game_func.display_board(theBoard)\n print('Player 2 has won!')\n game_on = False\n else:\n if game_func.full_board_check(theBoard):\n game_func.display_board(theBoard)\n print('The game is a tie!')\n break\n else:\n turn = 'Player 1'\n \n # Ask players weather they want to play again or not!\n if not game_func.replay():\n break \n\n\n# start the game\nplay()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242052148","text":"import logging\nimport os\nimport time\n\nimport gym\nimport numpy as np\nfrom rl_mdatos.utils.agent import discretize_state, get_tensorboard_writter, state_action_to_tuple\nfrom rl_mdatos.utils.misc import FPS, LOGS_DIR, TRAINED_AGENTS_DIR, VIDEOS_DIR\nfrom rl_mdatos.utils.q_table import (\n create_discrete_q_table,\n create_q_table,\n deterministic_q_table,\n epsilon_greedy_q_table,\n load_q_table,\n save_q_table,\n)\n\n\nclass Sarsa:\n \"\"\"\n :param env: (string) environment registered in gym\n :param params: (dict) hyperparameters specific for a given environment\n :param discrete: (bool) whether to discretize the state or not\n \"\"\"\n\n def __init__(self, env, params, discrete=False):\n logging.debug(\"Creating Sarsa object\")\n\n self.env = env\n self.discrete = discrete\n if discrete:\n self.buckets = params[\"buckets\"]\n self.lower_bounds = params[\"lower_bounds\"]\n self.upper_bounds = params[\"upper_bounds\"]\n\n self.discount_rate = params[\"discount_rate\"]\n self.episodes = params[\"episodes\"]\n self.epsilon = params[\"epsilon\"]\n self.epsilon_rate = params[\"epsilon_rate\"]\n self.learning_rate = params[\"learning_rate\"]\n # terminal_states is only specified for some environments\n if \"terminal_states\" in params:\n self.terminal_states = params[\"terminal_states\"]\n\n def train(self, progress_bar):\n self.writer, log_dir = get_tensorboard_writter(self.env.spec.id, \"Sarsa\")\n logging.info(f\"Training Sarsa agent in {self.env.spec.id}\")\n logging.info(f\"Logging training results in {os.path.normpath(log_dir)}\")\n self.create_q_table()\n for ep in range(self.episodes):\n state = self.reset()\n action = epsilon_greedy_q_table(self.q_table, state, self.epsilon, self.env.action_space)\n self.epsilon *= self.epsilon_rate\n episode_reward = []\n done = False\n while not done:\n new_state, reward, done, _ = self.make_step(action)\n episode_reward.append(reward)\n next_action = epsilon_greedy_q_table(self.q_table, new_state, self.epsilon, self.env.action_space)\n self.epsilon *= self.epsilon_rate\n self.update_q_table(state, new_state, action, next_action, reward, done)\n state = new_state\n action = next_action\n self.writer.add_scalar(\"mean_episode_reward\", np.mean(episode_reward), ep)\n self.writer.add_scalar(\"total_episode_reward\", np.sum(episode_reward), ep)\n progress_bar.update(ep)\n save_q_table(self.q_table, self.env.spec.id, \"sarsa\")\n\n def create_q_table(self):\n if self.discrete:\n self.q_table = create_discrete_q_table(self.buckets, self.env.action_space.n)\n else:\n self.q_table = create_q_table(self.env.observation_space.n, self.env.action_space.n, self.terminal_states)\n\n def run_agent(self, episodes, record=False):\n \"\"\"\n :param episodes: (int)\n :param record: (bool)\n \"\"\"\n if record:\n # TODO there's an issue going on and currently videos are not recorded correctly\n # https://github.com/openai/gym/issues/1925\n # As a workaround, videos can be recorded by installing gym from source (pip install -e gym)\n logging.info(f\"Recording video\")\n video_dir = os.path.join(VIDEOS_DIR, self.env.spec.id, \"Sarsa\")\n self.env = gym.wrappers.Monitor(self.env, video_dir, video_callable=lambda episode_id: True, force=True)\n\n logging.info(f\"Running Sarsa agent\")\n self.q_table = load_q_table(self.env.spec.id, \"sarsa\")\n for ep in range(episodes):\n state = self.reset()\n episode_reward = []\n done = False\n while not done:\n try:\n self.env.render()\n time.sleep(1.0 / FPS)\n except NotImplementedError:\n pass\n action = deterministic_q_table(self.q_table, state)\n new_state, reward, done, _ = self.make_step(action)\n episode_reward.append(reward)\n state = new_state\n logging.info(f\"Episode {ep + 1}\")\n logging.info(f\"Total reward: {np.sum(episode_reward)}\")\n logging.info(f\"Mean reward: {np.mean(episode_reward)} \\n\")\n\n def reset(self):\n state = self.env.reset()\n if self.discrete:\n state = discretize_state(state, self.buckets, self.lower_bounds, self.upper_bounds)\n\n return state\n\n def make_step(self, action):\n new_state, reward, done, _ = self.env.step(action)\n if self.discrete:\n new_state = discretize_state(new_state, self.buckets, self.lower_bounds, self.upper_bounds)\n\n return new_state, reward, done, _\n\n def update_q_table(\n self,\n previous_state,\n current_state,\n previous_action,\n current_action,\n reward,\n done,\n ):\n \"\"\"\n Environments can have one or more observation parameters but only one action, thus state parameter types\n can vary.\n\n :param previous_state: (int/np.ndarray/tuple)\n :param current_state: (int/np.ndarray/tuple)\n :param previous_action: (int)\n :param current_action: (int)\n :param reward: (float)\n :param done: (bool)\n \"\"\"\n index_previous = state_action_to_tuple(previous_state, previous_action)\n previous_state_action_value = self.q_table[index_previous]\n if done:\n current_state_action_value = 0.0\n else:\n index_current = state_action_to_tuple(current_state, current_action)\n current_state_action_value = self.q_table[index_current]\n td_error = reward + self.discount_rate * current_state_action_value - previous_state_action_value\n\n previous_state_action_value += self.learning_rate * td_error\n self.q_table[index_previous] = previous_state_action_value\n","sub_path":"rl_mdatos/algos/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":6120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155577395","text":"import os\nimport re\nimport random\n\nfrom fwk.config import Config\nfrom util.path import absolute_path\nfrom util.lang import to_bool\n\n\nclass Subjects:\n\n @classmethod\n def create_list_from_config(cls):\n\n path = absolute_path(os.path.expanduser(Config.config['DATABASE']['local_processing_directory']))\n\n subjects = cls.list_from(path)\n\n if Config.config.has_option('SUBJECTS', 'shuffle'):\n if to_bool(Config.config['SUBJECTS']['shuffle']):\n random.shuffle(subjects)\n\n if Config.config.has_option('SUBJECTS', 'max_subjects'):\n max_subjects = int(Config.config['SUBJECTS']['max_subjects'])\n subjects = subjects[:max_subjects]\n\n if Config.config.has_option('SUBJECTS', 'percent_train'):\n percent_train = int(Config.config['SUBJECTS']['percent_train'])\n\n num_subjects = len(subjects)\n num_train = int(percent_train/100 * num_subjects)\n num_test = num_subjects - num_train\n\n train_subjects = subjects[:num_train]\n test_subjects = subjects[num_train:num_train + num_test]\n else:\n train_subjects = subjects\n test_subjects = []\n\n return train_subjects, test_subjects\n\n @staticmethod\n def list_from(path):\n abs_path = os.path.expanduser(os.path.join(path, 'HCP_1200_tensor'))\n if not os.path.isdir(abs_path):\n abs_path = path\n\n files = sorted(os.listdir(abs_path))\n subject_pattern = '[0-9]{6}'\n subjects = [file for file in files if bool(re.match(subject_pattern, file))]\n\n model = Config.get_option('DATABASE', 'model', None)\n file_names = {'dti': 'dti_tensor.nii.gz', 'odf': 'odf.nii.gz', 'fa': 'dti_FA.nii.gz', 'odf_mrtrix': 'WM_FODs.nii.gz'}\n\n def url_for_subject(subject):\n registration = Config.get_option('DATABASE', 'registration', None)\n return os.path.join(abs_path, subject, registration, file_names[model])\n\n fitted_subjects = [subject for subject in subjects if os.path.isfile(url_for_subject(subject))]\n\n return fitted_subjects\n\n @staticmethod\n def _partition(subjects, idxs):\n\n partitioned = []\n for idx in idxs:\n partitioned.append(subjects[:idx])\n del(subjects[:idx])\n\n return partitioned\n","sub_path":"dataset/hcp/subjects.py","file_name":"subjects.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92712882","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom plyfile import PlyData, PlyElement\nimport numpy as np\nimport pandas as pd\n\n\n\nplydata = PlyData.read('1.ply')\nprint(type(plydata))\ndata = plydata.elements[0].data # 读取数据\ndata_pd = pd.DataFrame(data) # 转换成DataFrame, 因为DataFrame可以解析结构化的数据\ndata_np = np.zeros(data_pd.shape, dtype=np.float) # 初始化储存数据的array\nproperty_names = data[0].dtype.names # 读取property的名字\nfor i, name in enumerate(property_names): # 按property读取数据,这样可以保证读出的数据是同样的数据类型。\n data_np[:, i] = data_pd[name]\nprint(data_np)\nprint(data_np.shape)\n","sub_path":"laser/read_ply.py","file_name":"read_ply.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552621895","text":"import numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation\r\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten\r\nfrom keras.optimizers import SGD, Adam\r\nfrom keras.utils import np_utils\r\nfrom keras.datasets import mnist\r\n\r\n\r\ndef load_data():\r\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n # number = 10000\r\n # x_train = x_train[0:number]\r\n # y_train = y_train[0:number]\r\n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\r\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\r\n x_train = x_train.astype('float32')\r\n x_test = x_test.astype('float32')\r\n\r\n # 将类向量转换为二进制类矩阵\r\n y_train = np_utils.to_categorical(y_train, 10)\r\n y_test = np_utils.to_categorical(y_test, 10)\r\n\r\n x_train = x_train / 255\r\n x_test = x_test / 255\r\n\r\n return (x_train, y_train), (x_test, y_test)\r\n\r\n\r\nif __name__ == '__main__':\r\n # 导入数据 x_train [60000, 784]\r\n # x_test [10000, 784]\r\n # y_train [60000, 10] 类似于one-hot编码\r\n # y_test [10000, 10] 同上\r\n (x_train, y_train), (x_test, y_test) = load_data()\r\n\r\n # 定义网络结构\r\n model = Sequential()\r\n\r\n # 增加输入层和第一个隐藏层\r\n # 输入层784个input, 第一个隐藏层500个神经元节点,激活函数采用sigmoid函数\r\n model.add(Dense(input_dim=28 * 28, units=500, activation='relu'))\r\n # 增加第二个隐藏层, 500个神经元节点,激活函数采用sigmoid函数\r\n model.add(Dense(units=500, activation='relu'))\r\n # 定义输出层, 十个输出节点\r\n model.add(Dense(units=10, activation='softmax'))\r\n\r\n # 设置配置信息\r\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n # 训练模型\r\n model.fit(x_train, y_train, batch_size=64, epochs=50)\r\n\r\n # 评估模型并输出准确率\r\n result = model.evaluate(x_test, y_test)\r\n print('Test Acc:', result[1])\r\n","sub_path":"数字识别.py","file_name":"数字识别.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135473145","text":"# pylint: disable=invalid-name\nimport ctypes\nimport ctypes.util\nimport os\nfrom typing import Union\n\npid_t = ctypes.c_int\nuid_t = ctypes.c_uint32\ngid_t = ctypes.c_uint32\n\n\n_libc = None\n\n\ndef load_libc() -> ctypes.CDLL:\n global _libc # pylint: disable=global-statement\n\n if _libc is None:\n libc_path = ctypes.util.find_library(\"c\")\n if libc_path is None:\n raise RuntimeError(\"Could not find libc; is your system statically linked?\")\n\n _libc = ctypes.CDLL(libc_path, use_errno=True)\n\n return _libc\n\n\ndef build_oserror(\n eno: int,\n filename: Union[str, bytes, None] = None,\n filename2: Union[str, bytes, None] = None,\n) -> OSError:\n return OSError(eno, os.strerror(eno), filename, None, filename2)\n","sub_path":"psutil_extra/_ffi.py","file_name":"_ffi.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457012603","text":"#!/usr/bin/env python\n\"\"\"\nFunctions with the main logic needed to build the command to build the docs\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nfrom doc_builder import sys_utils\n\ndef get_build_dir(build_dir=None, repo_root=None, version=None):\n \"\"\"Return a string giving the path to the build directory.\n\n If build_dir is specified, simply use that.\n\n Otherwise, repo_root must be given. If version is also given, then\n the build directory will be:\n os.path.join(repo_root, \"versions\", version).\n If version is not given, then determine version by getting the\n current git branch; then use the above path specification.\n\n Error-checking on directory existence:\n - If build_dir is given, then no error checking is done\n - Otherwise, we ensure that repo_root/versions exists\n - If version is not given, then we also ensure that\n repo_root/versions/version exists, for the determined version.\n \"\"\"\n\n if build_dir is not None:\n if repo_root is not None:\n raise RuntimeError(\"Cannot specify both build-dir and repo-root\")\n if version is not None:\n raise RuntimeError(\"Cannot specify both build-dir and version\")\n return build_dir\n\n if repo_root is None:\n raise RuntimeError(\"Must specify either build-dir or repo-root\")\n\n if version is None:\n version_explicit = False\n branch_found, version = sys_utils.git_current_branch()\n if not branch_found:\n raise RuntimeError(\"Problem determining version based on git branch; \"\n \"set --version on the command line.\")\n else:\n version_explicit = True\n\n build_dir_no_version = os.path.join(repo_root, \"versions\")\n if not os.path.isdir(build_dir_no_version):\n raise RuntimeError(\"Directory {} doesn't exist\".format(build_dir_no_version))\n build_dir = os.path.join(build_dir_no_version, version)\n if not version_explicit:\n if not os.path.isdir(build_dir):\n message = \"\"\"\nDirectory {build_dir} doesn't exist yet.\nIf this is where you really want to build the documentation, rerun adding the\ncommand-line argument '--doc-version {version}'\"\"\".format(build_dir=build_dir,\n version=version)\n raise RuntimeError(message)\n\n return build_dir\n\ndef get_build_command(build_dir, build_target, num_make_jobs):\n \"\"\"Return a string giving the build command.\n\n Args:\n - build_dir: string giving path to directory in which we should build\n - build_target: string: target for the make command (e.g., \"html\")\n - num_make_jobs: int: number of parallel jobs\n \"\"\"\n builddir_arg = \"BUILDDIR={}\".format(build_dir)\n build_command = [\"make\", builddir_arg, \"-j\", str(num_make_jobs), build_target]\n return build_command\n","sub_path":"doc_builder/build_commands.py","file_name":"build_commands.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511189468","text":"import pygame, sys \nfrom pygame.locals import *\npygame.init()\n\nimport random\n\nbasicFont = pygame . font . SysFont ( None , 48)\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255 , 0 , 0)\nGREEN = (100, 225 , 0)\nBLUE = (0 , 0 , 255)\nYELLOW = (225,225,0)\nLIME = (75,255,75)\nTURCHESE = (0,128,228)\nARANCIO = (255,122,0)\nMARRONE = (155,85,0)\nCIANO = (0,225.225)\nGIALLINO = (255,155,255)\nROSA = (255,155,255)\nVERDESCOURO = (10,100,20)\nALTROAZZURRO = (100,155,255)\nROSAPASTELLO = (255,196,208)\nBLUPETROLIO = (5,145,180)\nVERDEPETROLIO = (0,128,128)\nROSA2 = (255,226,238)\nVIOLA = (150,5,175)\n\n\nWINDOW_WIDTH = 1300\nWINDOW_HEIGHT = 800\nwindowSurface = pygame . display . set_mode (( WINDOW_WIDTH , WINDOW_HEIGHT ) , 0 , 32)\npygame . display . set_caption (' ')\n\n\nX_POSITION = 300\nY_POSITION = 100\nPLAYER_WIDTH = 50\nPLAYER_HEIGHT = 50\nplayer = pygame . Rect ( X_POSITION , Y_POSITION , PLAYER_WIDTH , PLAYER_HEIGHT )\n\n\nfoodSize = 20\nfood = pygame.Rect (random.randint(0, WINDOW_WIDTH - foodSize), random.randint(0, WINDOW_HEIGHT - foodSize), foodSize, foodSize)\nprint ( food .x , food . y )\n\nwater = pygame.Rect(random.randint(0, WINDOW_WIDTH - foodSize), random.randint(0, WINDOW_HEIGHT - foodSize), foodSize, foodSize)\n\nmoveLeft = False\nmoveRight = False\nmoveUp = False\nmoveDown = False\n\nMOVE_SPEED = 6\nMOVE_SLOW = 1\n\nstartTime = pygame . time . get_ticks ()\n\n\nbackgroundImage = pygame . image . load ('background.png ')\nplayerImage = pygame . image . load ('pacman.png ')\nfoodImage = pygame . image . load ('pizza.png ')\n\nbackgroundStretchedImage = pygame . transform . scale ( backgroundImage , ( WINDOW_WIDTH , WINDOW_HEIGHT ) )\nplayerStretchedImage = pygame . transform . scale ( playerImage , ( PLAYER_WIDTH , PLAYER_HEIGHT ) )\nfoodStretchedImage = pygame . transform . scale ( foodImage , ( foodSize , foodSize ) )\n\nwimdowSurfaceRectangle=windowSurface.get_rect()\n\n\nwhile True:\n for event in pygame . event . get () :\n if event . type == QUIT:\n pygame.quit()\n sys.exit()\n if event . type == KEYDOWN :\n if event . key == K_LEFT or event.key==K_1:\n moveRight = False\n moveLeft = True\n if event . key == K_RIGHT or event.key==K_3:\n moveLeft = False\n moveRight = True\n if event . key == K_UP or event.key==K_5:\n moveDown = False\n moveUp = True\n if event . key == K_DOWN or event.key==K_2:\n moveUp = False\n moveDown = True\n if event . type == KEYUP :\n if event . key == K_ESCAPE or event.key==K_q:\n pygame . quit ()\n sys . exit ()\n if event . key == K_LEFT :\n moveLeft = False\n if event . key == K_RIGHT :\n moveRight = False\n if event . key == K_UP :\n moveUp = False\n if event . key == K_DOWN :\n moveDown = False\n if event . type == MOUSEMOTION :\n print ( event . pos [0] , event . pos [1])\n if event . type == MOUSEBUTTONUP :\n foodSize=foodSize+5\n food=pygame.Rect(food.x, food.y, foodSize, foodSize)\n foodStretchedImage=pygame.transform.scale(foodImage,(foodSize, foodSize))\n if event . type == MOUSEBUTTONDOWN : \n if event . button == 1:\n print ('Hai premuto il tasto sinistro del mouse ')\n elif event . button == 3:\n print ('Hai premuto il tasto destro del mouse ')\n else :\n print (\"Hai premuto il tasto \", event . button )\n\n\n if moveDown and player . bottom < WINDOW_HEIGHT :\n player . top = player . top + MOVE_SPEED\n print ('moving down', player.top,player.bottom, player.right, player.left)\n if moveUp and player . top > 0:\n player . top = player . top - MOVE_SPEED\n print ('moving up', player.top,player.bottom, player.right, player.left)\n if moveLeft and player.left>0:\n player . left = player.left - MOVE_SPEED\n print ('moving left', player.top,player.bottom, player.right, player.left)\n if moveRight and player.right.mobile_button').click()\ntime.sleep(1)\nmenu2 = driver.find_element_by_css_selector('.menu>.mat-menu-trigger>.warranty').click()\ntime.sleep(1)\nbalance_on_Compound = driver.find_element_by_link_text('Check profit on Compound').click()\nnew_window = driver.window_handles[1]\ncurrent_window = driver.current_window_handle\ndriver.switch_to.window(new_window)\nassert driver.current_url == \"https://compound.finance/governance/address/0x0BCbAb2FeCC30B7341132B4Ebb36d352E035f1bD\"\ndisplay.stop()\ndriver.quit()\n","sub_path":"2.profit_on_Compound.py","file_name":"2.profit_on_Compound.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127367469","text":"\nimport requests\nfrom lxml import html\n\n\ndef Get_children(link_p, name_c):\n \"\"\"\n функция\n \"\"\"\n page_req = requests.get(link_p)\n page = html.fromstring(page_req.text)\n page.make_links_absolute('http://rus-linux.net')\n #print(name_c.split(' ')[0])\n #print(page.body.xpath(\".//h1[contains(text(),'%s')]\" % name_c.split(' ')[0])[0].getparent().tag)\n try:\n return page.body.xpath(\".//h1[contains(text(),'%s')]\" % name_c.split(' ')[0])[0].getparent().getchildren()\n except (IndexError):\n print('index Error')\n try:\n return page.body.xpath(\".//h1[text()='%s']\" % name_c.split(' ')[0])[0].getparent().getchildren()##############\n except (IndexError):\n print('index Error')\n return page.body.xpath(\".//h1[contains(text(),'%s')]\" % name_c.split(' ')[1])[0].getparent().getchildren()\n\n\ndef Loop_Chart(childs, lin, bool1 = True, bool2 = False, bool3 = False):\n\n in_tag = bool3 #конец статьи в tag\n jamp = bool2 # проверка на тег h2\n hr_bool = bool1\n\n end_pod_chart = False # проверка на второй тег h2\n new_link_bool = False # проверка на конец\n NG_bool = False\n\n new_link = ''\n\n ii = 0\n #print(len(childs))\n\n for pod_chart in childs:\n ii += 1\n #print(ii)\n\n if len(childs) == ii and jamp:\n Loop_Chart(pod_chart.getchildren(), lin, bool2=True)\n break\n\n if len(childs) == ii:\n if end_pod_chart:\n break\n print('end')\n #jamp = True\n Loop_Chart(childs, lin, bool1=False, bool2=True)\n continue\n\n if end_pod_chart and pod_chart.tag == 'hr':\n #print('hr2')\n pod_charts2 = Get_children(new_link, name_chart) ####################\n Loop_Chart(pod_charts2, lin)\n #print(new_link)\n new_link_bool, end_pod_chart = False, False\n break\n\n if pod_chart.tag == 'hr' and not hr_bool:\n #print('not hr')\n hr_bool = True\n continue\n\n if new_link_bool:\n #print('link')\n if len(pod_chart.xpath(\".//a\")) == 0:\n #print('link = 0')\n new_link_bool, end_pod_chart = False, False\n break\n\n new_link = pod_chart.xpath(\"(.//a)[last()]\")[0].values()[0] ##################[last()]\n if new_link == lin :\n break\n if new_link == 'http://rus-linux.net/MyLDP/BOOKS/Architecture-Open-Source-Applications/index.html':\n break\n if new_link == 'http://www.vistrails.org':\n print(new_link)\n new_link_bool = False\n jamp = True\n else:\n print(new_link)\n new_link_bool = False\n end_pod_chart = True\n continue\n\n if pod_chart.tag == 'h2' or pod_chart.tag == 'h3':\n #print('h2')\n jamp = True\n elif not jamp and not end_pod_chart:\n #print(\"jamp\")\n continue\n\n if pod_chart.tag == 'hr' and hr_bool:############\n\n if name_chart == 'NGINX' and not NG_bool:\n NG_bool = True\n continue\n #print('hr')\n new_link_bool = True\n jamp = False\n continue\n\n if NG_bool:\n #print('NG')\n NG_bool = False\n continue\n\n if jamp:\n #print('in')\n root.append(pod_chart)\n continue\n\n\nf_save = open('Arh.html', 'bw')\n\nfirst = requests.get('http://rus-linux.net/MyLDP/BOOKS/Architecture-Open-Source-Applications/index.html')\n\nparsed_body = html.fromstring(first.text)\n\nroot = html.fromstring('')\n\n#t1 = parsed_body.body.xpath(\".//img[@alt = 'Обложка 1-го тома']\")[0].getparent().xpath(\".//table/tbody/tr/td[2]/a\") #1 and n uninclude\n\nparsed_body.make_links_absolute('http://rus-linux.net')\n\nlink_charts = parsed_body.body.xpath(\".//table/tbody/tr/td[2]/a\")\n\n\n\n#print((len(root)))\n\n\nfor link_chart in link_charts:\n\n if len(link_chart.getchildren()) == 0:\n continue\n\n name_chart = link_chart.getchildren()[0].text\n if not name_chart[0].istitle():\n name_chart = name_chart.upper()\n print(name_chart)\n print((list(link_chart.values())))\n pod_charts = Get_children(link_chart.values()[0], name_chart)\n Loop_Chart(pod_charts, list(link_chart.values())[0])\n\nf_save.write(html.tostring(root))\n\nf_save.close()\n","sub_path":"open sourse/Arh.py","file_name":"Arh.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185415056","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom hrtencent.items import HrtencentItem\n\nclass HrtencentspiderSpider(scrapy.Spider):\n name = 'hrtencentSpider'\n allowed_domains = ['tencent.com']\n offset=0\n url=\"https://hr.tencent.com/position.php?lid=&tid=&start=\"\n surl=url+str(offset)\n start_urls = [surl,]\n\n\n def parse(self, response):\n for node in response.xpath('//tr[@class=\"even\"]|//tr[@class=\"odd\"]'):\n item=HrtencentItem()\n\n nameLength=len(node.xpath(\"./td[1]/a/text()\"))\n item['name']=node.xpath(\"./td[1]/a/text()\").extract()[0] if nameLength>0 else '空'\n\n classifyLength=len(node.xpath(\"./td[2]/text()\"))\n item['classify']=node.xpath(\"./td[2]/text()\").extract()[0] if classifyLength>0 else '空'\n\n numLength=len(node.xpath(\"./td[3]/text()\"))\n item['num']=node.xpath(\"./td[3]/text()\").extract()[0] if numLength>0 else '空'\n\n placeLength=len(node.xpath(\"./td[4]/text()\"))\n item['place'] = node.xpath(\"./td[4]/text()\").extract()[0] if placeLength> 0 else '空'\n\n applytimeLength=len(node.xpath(\"./td[5]/text()\"))\n item['applytime']=node.xpath(\"./td[5]/text()\").extract()[0] if applytimeLength> 0 else '空'\n\n yield item\n\n\n if self.offset <=3290:\n self.offset+=10\n self.surl=self.url+str(self.offset)\n yield scrapy.Request(self.surl,callback=self.parse)\n\n","sub_path":"hrtencent/spiders/hrtencentSpider.py","file_name":"hrtencentSpider.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"179726719","text":"# MyCart Module\n\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nimport sys\nfrom sqlalchemy.sql import func\n\nclass MyCart:\n\n db_url='mysql://root:root@localhost:3306/project'\n engine=create_engine(db_url)\n Base=declarative_base()\n\n class User(Base):\n __tablename__='user'\n id=Column('uid',Integer,primary_key=True)\n uname=Column('uname',String(35))\n password=Column('password',String(35))\n cart=relationship('Cart',uselist=False)\n\n\n class Admin(Base):\n __tablename__='admin'\n id=Column('aid',Integer,primary_key=True)\n uname=Column('uname',String(35))\n password=Column('password',String(35))\n cate=relationship('Category',uselist=False)\n prod=relationship('Product',uselist=False)\n\n class Category(Base):\n __tablename__='category'\n id=Column('cid',Integer,primary_key=True)\n cname=Column('cname',String(35))\n admin=Column(Integer,ForeignKey('admin.aid'))\n\n class Product(Base):\n __tablename__='product'\n id=Column('pid',Integer,primary_key=True)\n pname=Column('pname',String(35))\n amount=Column('amount',Integer())\n description=Column('description',String(35))\n admin=Column(Integer,ForeignKey('admin.aid'))\n\n\n\n class Cart(Base):\n __tablename__='cart'\n user=Column(Integer,ForeignKey('user.uid'))\n id=Column('cartid',Integer,primary_key=True)\n pname=Column('pname',String(35))\n amount=Column('amount',Integer())\n\n\n Base.metadata.create_all(engine)\n\n Session=sessionmaker(bind=engine)\n session=Session()\n\n \n\n def user_register(self):\n try:\n\n # Insert \n uid=int(input('Enter user id :'))\n uname=input('Enter user username :')\n pwd=input('Enter user password :')\n\n insData=self.User(id=uid,uname=uname,password=pwd)\n \n self.session.add(insData)\n self.session.commit()\n print('\\nUser record is inserted...')\n except ValueError as e:\n print('\\nPlease enter only number...')\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n \n def admin_register(self):\n try:\n\n # Insert \n uid=int(input('Enter admin id :'))\n uname=input('Enter admin username :')\n pwd=input('Enter admin password :')\n \n insData=self.Admin(id=uid,uname=uname,password=pwd)\n \n self.session.add(insData)\n self.session.commit()\n print('\\nAdmin record is inserted...')\n except ValueError as e:\n print('\\nPlease enter only number...')\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n\n def add_category(self):\n try:\n \n disData=self.session.query(self.Category).all()\n category_list=[]\n for data in disData:\n category_list.append(data.cname)\n \n print('Category List :',category_list,'\\n')\n\n # Insert \n cid=int(input('Enter category id :'))\n cname=input('Enter category name :')\n\n adminData=self.session.query(self.Admin).all()\n \n admin_list=[]\n for data in adminData:\n admin_list.append(data.id)\n print('\\nAdmin List :',admin_list,'\\n')\n\n admin=int(input('Select admin :'))\n insData=self.Category(id=cid,cname=cname,admin=admin)\n \n self.session.add(insData)\n self.session.commit()\n print('\\nCategory record is inserted...')\n \n except ValueError as e:\n print('\\nPlease enter only number...')\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n\n\n def add_product(self):\n try:\n \n disData=self.session.query(self.Product).all()\n product_list=[]\n for data in disData:\n product_list.append(data.pname)\n \n print('Product List :',product_list,'\\n')\n\n # Insert \n pid=int(input('Enter product id :'))\n pname=input('Enter product name :')\n amount=int(input('Enter amount :'))\n desc=input('Enter description :')\n \n adminData=self.session.query(self.Admin).all()\n admin_list=[]\n for data in adminData:\n admin_list.append(data.id)\n print('\\nAdmin List :',admin_list,'\\n')\n\n admin=int(input('Select admin id :'))\n insData=self.Product(id=pid,pname=pname,amount=amount,description=desc,admin=admin)\n \n self.session.add(insData)\n self.session.commit()\n print('\\nCategory record is inserted...')\n \n except ValueError as e:\n print('\\nPlease enter only number...')\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n\n\n\n def add_to_cart(self):\n try:\n \n\n # Display user\n userData=self.session.query(self.User).all()\n user_list=[]\n for data in userData:\n user_list.append(data.id)\n print('\\nUser List :',user_list,'\\n')\n\n user=int(input('Select user id :'))\n \n while True: \n print('\\n1: Add Cart \\23: Exit')\n chk=int(input('\\nEnter your chooice :'))\n \n if(chk==1):\n\n # Insert \n cartid=int(input('\\nEnter cart id :'))\n\n # Cart Name\n disData=self.session.query(self.Cart).all()\n cart_list=[]\n for data in disData:\n cart_list.append(data.pname)\n \n print('Cart List :',cart_list,'\\n')\n\n\n\n # Display product name\n prodData=self.session.query(self.Product).all()\n prod_dict={}\n for data in prodData:\n prod_dict[data.pname]= data.amount\n print('\\nProduct List :',prod_dict,'\\n')\n \n pname=input('Enter product name :')\n\n amount=int(input('Enter product amount :'))\n \n \n insData=self.Cart(user=user,id=cartid,pname=pname,amount=amount)\n \n self.session.add(insData)\n self.session.commit()\n print('\\n Add into cart...')\n elif(chk==2):\n sys.exit()\n else:\n print(\"Enter valid number....\")\n except ValueError as e:\n print('\\nPlease enter only number...')\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n\n\n def total_bill(self):\n try:\n\n total_product=self.session.query(self.Cart).count()\n print('Total Product :',total_product)\n\n total=self.session.query(self.Cart).all()\n\n bill=0\n for amt in total:\n bill +=amt.amount\n \n print('Total bill is :',bill)\n \n\n # Truncate table\n\n self.session.query(self.Cart).delete()\n self.session.commit()\n except Exception as e:\n print(e)\n finally:\n self.session.close()\n","sub_path":"MyCart.py","file_name":"MyCart.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638602480","text":"import pymel.core as pm\n\ndef scaleControl(x=True,y=True,z=True):\n\t'''\n\tUsage:\n\t\tscaleControl()\n\t'''\n\tif len(pm.ls(sl=True))==0:\n\t\tpm.error('select something numbnuts!')\n\tpm.promptDialog(m='Enter new scale value')\n\tval = float(pm.promptDialog(q=True))\n\tfor transform in pm.ls(sl=True,type='transform'):\n\t\tshapes = transform.getShapes()\n\t\tcvs = []\n\t\tfor shape in shapes:\n\t\t\tcvs = cvs + pm.ls(shape.cv[0:], fl=True)\n\t\tpm.scale(cvs, val*x, val*y, val*z, pivot=transform.getRotatePivot(ws=True), r=True)","sub_path":"maya/_tests_/scaleControl.py","file_name":"scaleControl.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494712152","text":"# -*- coding: utf-8 -*-\nfrom coplay import models\nfrom coplay.models import Discussion, Feedback, LikeLevel, Decision, Task\nfrom django import forms\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.mail.message import EmailMessage\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom django.http.response import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.views import generic\n\nMAX_MESSAGE_INPUT_CHARS = 900\n\n# Create your views here.\ndef root(request):\n return render(request, 'coplay/co_play_root.html', {'rtl': 'dir=\"rtl\"'})\n \n\nclass IndexView(generic.ListView):\n model = Discussion\n template_name = 'coplay/discussion_list.html'\n context_object_name = 'latest_discussion_list'\n \n\n def get_queryset(self):\n return Discussion.objects.order_by('-locked_at')\n \n \nclass AddFeedbackForm(forms.Form):\n content = forms.CharField(max_length=MAX_MESSAGE_INPUT_CHARS, widget=forms.Textarea(attrs= { 'rows': '3'}))\n feedbabk_type = forms.ChoiceField( choices=Feedback.FEEDBACK_TYPES)\n\n\nclass UpdateDiscussionForm(forms.Form):\n description = forms.CharField(max_length=MAX_MESSAGE_INPUT_CHARS, widget=forms.Textarea(attrs= {'rows': '3'}))\n \n \nclass AddDecisionForm(forms.Form):\n content = forms.CharField(max_length=MAX_MESSAGE_INPUT_CHARS, label='', widget=forms.Textarea(attrs= { 'rows': '3', 'class': 'form-control'}))\n\n\nclass VoteForm(forms.Form):\n value = forms.ChoiceField(widget = forms.RadioSelect, choices=LikeLevel.level)\n\n\nclass AddTaskForm(forms.Form):\n goal_description = forms.CharField(max_length=MAX_MESSAGE_INPUT_CHARS, label='', widget=forms.Textarea(attrs= { 'rows': '3', 'class': 'form-control'}))\n target_date = forms.DateTimeField( widget = SelectDateWidget)\n \nclass UpdateTaskForm(forms.Form):\n status_description = forms.CharField(max_length=MAX_MESSAGE_INPUT_CHARS, widget=forms.Textarea(attrs= {'rows': '3'}))\n\n\ndef discussion_details(request, pk):\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return HttpResponseRedirect('coplay_root')\n \n list_encourage =discussion.feedback_set.all().filter( feedbabk_type = Feedback.ENCOURAGE ).order_by( \"-created_at\")\n list_cooperation =discussion.feedback_set.all().filter( feedbabk_type = Feedback.COOPERATION).order_by( \"-created_at\")\n list_intuition =discussion.feedback_set.all().filter( feedbabk_type = Feedback.INTUITION ).order_by( \"-created_at\")\n list_advice =discussion.feedback_set.all().filter( feedbabk_type = Feedback.ADVICE ).order_by( \"-created_at\")\n list_decision =discussion.decision_set.all().order_by( \"-created_at\") \n list_tasks =discussion.task_set.all().order_by( \"target_date\")\n like_levels = LikeLevel.level \n \n vote_form = None\n feedback_form = None\n description_form = None\n add_decision_form = None\n add_task_form = None\n if request.user.is_authenticated(): \n if discussion.is_active(): \n if request.user == discussion.owner:\n description_form = UpdateDiscussionForm()\n add_decision_form = AddDecisionForm()\n else:\n feedback_form =AddFeedbackForm()\n vote_form = VoteForm()\n \n add_task_form = AddTaskForm()\n \n page_name = u'עוזרים ב '+ discussion.title \n \n return render(request, 'coplay/discussion_detail.html', \n { 'discussion' : discussion , \n 'list_encourage' : list_encourage , \n 'list_cooperation': list_cooperation, \n 'list_intuition' : list_intuition ,\n 'list_advice' : list_advice ,\n 'list_decision' : list_decision ,\n 'list_tasks' : list_tasks ,\n 'feedback_form' : feedback_form ,\n 'description_form': description_form,\n 'add_decision_form': add_decision_form,\n 'vote_form' : vote_form ,\n 'add_task_form' : add_task_form ,\n 'like_levels' : like_levels,\n 'page_name' : page_name })\n\n\n\n\nclass NewDiscussionForm(forms.Form):\n title = forms.CharField(max_length=200, widget=forms.Textarea(attrs= { 'rows': '1', 'cols': '50'}))\n description = forms.CharField(max_length= MAX_MESSAGE_INPUT_CHARS, widget=forms.Textarea)\n\n\n \n@login_required\ndef add_discussion(request):\n if request.method == 'POST': # If the form has been submitted...\n form = NewDiscussionForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n user = request.user\n \n discussions_list = Discussion.objects.all().filter(owner =user, title = form.cleaned_data['title'])\n if discussions_list.count() != 0:\n return render(request, 'coplay/message.html', \n { 'message' : 'כבר קיים עבורך דיון באותו נושא',\n 'rtl': 'dir=\"rtl\"'})\n \n \n new_discussion = Discussion(owner = user ,\n title = form.cleaned_data['title'] ,\n description = form.cleaned_data['description'])\n new_discussion.clean()\n new_discussion.save()\n return HttpResponseRedirect(new_discussion.get_absolute_url()) # Redirect after POST\n else:\n form = NewDiscussionForm() # An unbound form\n\n\n return render(request, 'coplay/new_discussion.html', {\n 'form': form,\n 'rtl' : 'dir=\"rtl\"'\n })\n\n\n\ndef send_html_message( subject, html_content, from_email, to_list):\n msg = EmailMessage(subject, html_content, from_email, to_list)\n msg.content_subtype = \"html\" # Main content is now text/html\n msg.send()\n\n\ndef discussion_email_updates(discussion, subject, logged_in_user):\n\n attending_list = discussion.get_attending_list(True)\n html_message = render_to_string(\"coplay/email_discussion_update.html\", { 'ROOT_URL': 'www.kuterless.org.il', \n 'discussion': discussion })\n \n for attensdent in attending_list:\n if attensdent.email and attensdent != logged_in_user:\n send_html_message(subject, html_message, 'do-not-reply@kuterless.org.il', [attensdent.email])\n\n\ndef discussion_task_email_updates(task, subject, logged_in_user):\n\n attending_list = task.parent.get_attending_list(True)\n \n html_message = render_to_string(\"coplay/email_task_update.html\", { 'ROOT_URL': 'www.kuterless.org.il', \n 'task': task })\n \n for attensdent in attending_list:\n if attensdent.email and attensdent != logged_in_user:\n send_html_message(subject, html_message, 'do-not-reply@kuterless.org.il', [attensdent.email])\n\n\n\n \n \n\n\n@login_required\ndef update_discussion(request, pk):\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return render(request, 'coplay/message.html', \n { 'message' : 'הדיון איננו קיים',\n 'rtl': 'dir=\"rtl\"'})\n \n \n \n if request.method == 'POST': # If the form has been submitted...\n form = UpdateDiscussionForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n user = request.user\n if user == discussion.owner:\n discussion.update_description( form.cleaned_data['description'] )\n discussion_email_updates(discussion, 'עידכון מטרות בפעילות שבהשתתפותך', request.user)\n \n return HttpResponseRedirect(discussion.get_absolute_url()) # Redirect after POST\n return render(request, 'coplay/message.html', \n { 'message' : 'רק בעל הדיון מורשה לעדכן אותו',\n 'rtl': 'dir=\"rtl\"'})\n \n \n return render(request, 'coplay/message.html', \n { 'message' : ' לא הוזן תיאור חדש או שהוזן תיאור ארוך מדי ',\n 'rtl': 'dir=\"rtl\"'})\n \n \n\n@login_required\ndef delete_discussion(request, pk):\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return render(request, 'coplay/message.html', \n { 'message' : 'הדיון איננו קיים',\n 'rtl': 'dir=\"rtl\"'})\n \n user = request.user \n if user == discussion.owner:\n discussion.delete()\n return HttpResponseRedirect('discussions_list') # Redirect to discussions list\n \n return render(request, 'coplay/message.html', \n { 'message' : 'רק בעל הדיון מורשה למחוק אותו',\n 'rtl': 'dir=\"rtl\"'})\n \n\n \n@login_required \ndef add_feedback(request, pk): \n if request.method == 'POST': # If the form has been submitted...\n form = AddFeedbackForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n user = request.user\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return HttpResponse('Discussion not found')\n if user != discussion.owner and form.cleaned_data['feedbabk_type'] and form.cleaned_data['content']:\n discussion.add_feedback( user, form.cleaned_data['feedbabk_type'] , form.cleaned_data['content'])\n discussion_email_updates(discussion, 'התקבלה תגובה חדשה בפעילות שבהשתתפותך', request.user)\n \n \n return HttpResponseRedirect(discussion.get_absolute_url()) # Redirect after POST\n return render(request, 'coplay/message.html', \n { 'message' : 'לא הזנת תגובה',\n 'rtl': 'dir=\"rtl\"'})\n return HttpResponse('Request NA')\n\n@login_required \ndef add_decision(request, pk):\n if request.method == 'POST': # If the form has been submitted...\n form = AddDecisionForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return HttpResponse('Discussion not found')\n user = request.user\n if user == discussion.owner:\n decisions_list = Decision.objects.all().filter( content = form.cleaned_data['content'], parent = discussion)\n if decisions_list.count() != 0:\n return render(request, 'coplay/message.html', \n { 'message' : 'כבר רשומה עבורך החלטה באותו נושא',\n 'rtl': 'dir=\"rtl\"'})\n \n discussion.add_decision( form.cleaned_data['content'] )\n discussion_email_updates(discussion, 'התקבלה התלבטות חדשה בפעילות שבהשתתפותך', request.user)\n \n \n \n else:\n return HttpResponse('Forbidden access')\n return HttpResponseRedirect(discussion.get_absolute_url()) # Redirect after POST\n else:\n return render(request, 'coplay/message.html', \n { 'message' : 'בחר אחת מהאפשרויות',\n 'rtl': 'dir=\"rtl\"'})\n return HttpResponseRedirect('coplay_root') # Redirect after POST\n \n@login_required \ndef vote(request, pk): \n if request.method == 'POST': # If the form has been submitted...\n form = VoteForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n try:\n decision = Decision.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return render(request, 'coplay/message.html', \n { 'message' : 'משימה לא ידועה',\n 'rtl': 'dir=\"rtl\"'})\n user = request.user\n if user != decision.parent.owner:\n decision.vote( user, int(form.cleaned_data['value']) )\n return HttpResponseRedirect( decision.parent.get_absolute_url()) # Redirect after POST\n return render(request, 'coplay/message.html', \n { 'message' : 'Please select a vote value'})\n \n \n return( HttpResponse('Forbidden request not via form')) \n \n \n \n\n@login_required\ndef add_task(request, pk): \n if request.method == 'POST': # If the form has been submitted...\n form = AddTaskForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n user = request.user\n try:\n discussion = Discussion.objects.get(id=int(pk))\n except Discussion.DoesNotExist:\n return HttpResponse('Discussion not found')\n target_date = form.cleaned_data['target_date']\n if target_date <= timezone.now():\n return render(request, 'coplay/message.html', \n { 'message' : 'תאריך היעד חייב להיות בעתיד' + str(target_date),\n 'rtl': 'dir=\"rtl\"'})\n \n tasks_list = Task.objects.all().filter(responsible =user, goal_description = form.cleaned_data['goal_description'], parent = discussion)\n if tasks_list.count() != 0:\n return render(request, 'coplay/message.html', \n { 'message' : 'כבר רשומה עבורך משימה באותו נושא',\n 'rtl': 'dir=\"rtl\"'})\n \n new_task = discussion.add_task( user, \n form.cleaned_data['goal_description'] ,\n form.cleaned_data['target_date'] )\n \n discussion_task_email_updates(new_task, 'נוספה משימה חדשה בפעילות שבהשתתפותך', request.user)\n \n \n return HttpResponseRedirect(new_task.get_absolute_url()) # Redirect after POST\n\n return HttpResponseRedirect('coplay_root') # Redirect after POST\n\n \n \ndef task_details(request, pk):\n try:\n task = Task.objects.get(id=int(pk))\n except Task.DoesNotExist:\n return render(request, 'coplay/message.html', \n { 'message' : 'משימה שאיננה קיימת',\n 'rtl': 'dir=\"rtl\"'})\n \n close_possible = False\n update_task_form = None \n \n \n \n \n if request.user.is_authenticated(): \n user = request.user \n if task.get_status() == task.STARTED:\n if user == task.responsible:\n update_task_form = UpdateTaskForm()\n else:\n close_possible = True\n \n \n return render(request, 'coplay/task_detail.html', \n { 'task' : task ,\n 'update_task_form' : update_task_form,\n 'close_possible' : close_possible,\n 'rtl' : 'dir=\"rtl\"',\n 'page_name': u'המשימה:'+ task.goal_description })\n \n \n \n \n@login_required \ndef update_task_description(request, pk): \n if request.method == 'POST': # If the form has been submitted...\n form = UpdateTaskForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data# Process the data in form.cleaned_data\n try:\n task = Task.objects.get(id=int(pk))\n except Task.DoesNotExist:\n return HttpResponse('Task not found')\n user = request.user\n if user == task.responsible:\n task.update_status_description( form.cleaned_data['status_description'] )\n discussion_task_email_updates(task, 'עודכנה משימה בפעילות שבהשתתפותך. יתכן ואפשר לסגור את המשימה', request.user)\n\n return HttpResponseRedirect(task.get_absolute_url()) # Redirect after POST\n \n return HttpResponseRedirect('coplay_root') # Redirect after POST\n \n \n@login_required \ndef close_task(request, pk): \n try:\n task = Task.objects.get(id=int(pk))\n except Task.DoesNotExist:\n return HttpResponse('Task not found')\n user = request.user\n if user != task.responsible:\n task.close( user )\n discussion_task_email_updates(task, 'הושלמה משימה בפעילות שבהשתתפותך', request.user)\n \n \n return HttpResponseRedirect(task.get_absolute_url()) # Redirect after POST\n \n\ndef get_discussions_lists():\n sorted_discussions_by_inverse_locket_at_list = Discussion.objects.all().order_by( \"-locked_at\")\n sorted_discussions_by_locket_at_list = Discussion.objects.all().order_by( \"locked_at\")\n \n active_discussions_by_urgancy_list = []\n locked_discussions_by_relevancy_list = []\n \n for discussion in sorted_discussions_by_inverse_locket_at_list:\n if not discussion.is_active():\n locked_discussions_by_relevancy_list.append(discussion)\n\n for discussion in sorted_discussions_by_locket_at_list:\n if discussion.is_active():\n active_discussions_by_urgancy_list.append(discussion)\n \n \n return active_discussions_by_urgancy_list, locked_discussions_by_relevancy_list\n\ndef get_tasks_lists():\n for task in Task.objects.all():\n task.refresh_status()\n open_tasks_list_by_urgancy_list = Task.objects.all().filter( status = Task.STARTED).order_by(\"target_date\")\n closed_tasks_list_by_relevancy_list = Task.objects.all().filter( status = Task.CLOSED).order_by(\"-closed_at\")\n missed_tasks_list_by_relevancy_list = Task.objects.all().filter( status = Task.MISSED).order_by(\"-target_date\")\n \n return open_tasks_list_by_urgancy_list, closed_tasks_list_by_relevancy_list, missed_tasks_list_by_relevancy_list\n\n \ndef get_user_fullname_or_username(user):\n full_name = user.get_full_name()\n if full_name:\n return full_name\n return user.username\n \ndef user_coplay_report(request, username = None):\n if username:\n try:\n user = User.objects.get(username = username)\n except User.DoesNotExist:\n return HttpResponse('User not found')\n else:\n user = request.user\n \n \n if user == request.user:\n page_name = u'הפעילות שלי '\n else:\n page_name = u'הפעילות של ' + get_user_fullname_or_username(user)\n \n open_tasks_list_by_urgancy_list, closed_tasks_list_by_relevancy_list, missed_tasks_list_by_relevancy_list = get_tasks_lists()\n \n active_discussions_by_urgancy_list, locked_discussions_by_relevancy_list = get_discussions_lists()\n \n user_s_open_tasks_list = []\n other_users_open_tasks_list = []\n missed_tasks_list = []\n user_closed_tasks_list = []\n \n for task in open_tasks_list_by_urgancy_list:\n if task.responsible == user:\n user_s_open_tasks_list.append(task)\n else:\n discussion = task.parent\n if user in discussion.get_attending_list(include_owner = True):\n other_users_open_tasks_list.append(task)\n\n for task in missed_tasks_list_by_relevancy_list:\n discussion = task.parent\n if user in discussion.get_attending_list(include_owner = True):\n missed_tasks_list.append(task)\n \n for task in closed_tasks_list_by_relevancy_list:\n if task.responsible == user:\n user_closed_tasks_list.append(task)\n \n \n user_discussions_active = []\n user_discussions_locked = []\n \n for discussion in active_discussions_by_urgancy_list:\n if user in discussion.get_attending_list(include_owner = True):\n user_discussions_active.append(discussion)\n \n for discussion in locked_discussions_by_relevancy_list:\n if user in discussion.get_attending_list(include_owner = True):\n user_discussions_locked.append(discussion)\n\n \n \n return render(request, 'coplay/coplay_report.html', \n { 'tasks_open_by_increased_time_left' : user_s_open_tasks_list ,\n 'tasks_others_open_by_increased_time_left': other_users_open_tasks_list, \n 'discussions_active_by_increase_time_left' : user_discussions_active ,\n 'discussions_locked_by_increase_locked_at' : user_discussions_locked ,\n 'tasks_closed_by_reverse_time' : user_closed_tasks_list , \n 'tasks_missed_by_reverse_time': missed_tasks_list,\n 'applicabale_user' : user ,\n 'page_name' : page_name })\n\n","sub_path":"kuterless/coplay/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42488334","text":"# Analyze.py\n\nfn = \"furan_test_vib.spm\"\ndata = []\nwith open(fn)as f:\n\tfor line in f.readlines()[30:50]:\n\t\trow = line.split()[1:3]\n\t\tfloatrow = [float(r) for r in row]\n\t\tif floatrow[-1] > 1: data.append(floatrow)\n\noutfile = \"results.csv\"\nwith open(outfile, 'w') as f:\n\tf.write(\"Frequency (cm^-1),Intensity\\n\")\n\tfor d in data: f.write(\"{},{}\\n\".format(round(d[0]),round(d[1])))\n","sub_path":"jaguar/furan/furan_test/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379056835","text":"prices = {\n'ACME': 45.23,\n'AAPL': 612.78,\n'IBM': 205.55,\n'HPQ': 37.20,\n'FB': 10.75}\n\nprint(prices.keys())\nprint(prices.values())\n\na=[\"ala\", \"ma\", \"kota\"]\nc=[\" kot \", \" ma \", \" ale \"]\n\nb=zip(a,c)\nprint(list(b))\n\nmin_price=min(zip(prices.values(),prices.keys()))\nmax_price= max(zip(prices.values(),prices.keys()))\nsorterd_dict = sorted(zip(prices.values()),reverse=True)\n\n\nprint(min_price)\nprint(max_price)\nprint(sorterd_dict)\n\n# Istnieją dwa słowniki i programista chce się dowiedzieć, jakie wspólne dane się w nich znajdują\n\npracownicy = {\n \"Dominik\":110,\n \"Bartek\":220,\n \"Karolina\":120\n}\n\ndlugi = {\n \"Dominik\":110,\n \"Anna\":120\n}\nprint(dlugi.keys() & pracownicy.keys())\nprint(pracownicy.keys()-dlugi.keys())\n\nprint(pracownicy.items() & dlugi.items())\n\n","sub_path":"dziewiaty_czer_jeden.py","file_name":"dziewiaty_czer_jeden.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286826606","text":"#!/usr/bin/env python3\nimport operator\n\nop = {\n '+': operator.add, \n '-': operator.sub,\n '*': operator.mul,\n '/': operator.floordiv,\n '^': operator.ipow,\n '&': operator.and_,\n '|': operator.or_,\n 'xor' : operator.ixor,\n}\n\ndef rotate(l, n):\n return l[n:] + l[:n]\n\ndef calculate(arg):\n # Stack for calculator\n stack = []\n tokens = arg.split()\n\n # process tokens \n for token in tokens:\n try:\n value = int(token)\n stack.append(value)\n \n except ValueError: \n if(token == \"rl\"):\n stack = rotate(stack, 1)\n else:\n val2 = int(stack.pop())\n val1 = int(stack.pop())\n \n if(token != '!'):\n func = op[token]\n \n result = func(val1, val2)\n stack.append(str(result))\n \n return int(stack[0])\n \n\ndef main():\n while True:\n print (calculate(input('rpn calc>')))\n \n\nif __name__ == '__main__':\n main()\n\n","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199935852","text":"# speech-and-text-recognition\nimport speech_recognition as sr\nimport pyaudio\nfrom gtts import gTTS\nimport os\nimport win32com.client as wincl\nimport win32api, sys, os\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n print(\"Speak Anything :\")\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print(\"You said : {}\".format(text))\n tts = gTTS(text, lang='en')\n tts.save(\"good.mp3\")\n os.system(\"mpg321 good.mp3\")\n speak = wincl.Dispatch(\"SAPI.SpVoice\")\n speak.Speak(text)\n except:\n print(\"Sorry could not recognize what you said\")\n \n\n\n\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419323704","text":"import matplotlib.pyplot as plt\n\ndef set_size(width,hratio=1, fraction=1, subplots=(1, 1)):\n\t\n\twidth_pt = width\n\t# Width of figure (in pts)\n\tfig_width_pt = width_pt * fraction\n\t# Convert from pt to inches\n\tinches_per_pt = 1 / 72.27\n\n\tgolden_ratio = (5**.5 - 1) / 2\n\n\t# Figure width in inches\n\tfig_width_in = fig_width_pt * inches_per_pt\n\t# Figure height in inches\n\tfig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])*hratio\n\n\treturn (fig_width_in, fig_height_in)","sub_path":"plot_tools.py","file_name":"plot_tools.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192296644","text":"#!/usr/bin/env python3\n\"\"\"\nGeorgia Institute of Technology - CS1301\nHW05 - Tuples and Modules\n\"\"\"\n\n__author__ = \"\"\" Brandon Wei \"\"\"\n__collab__ = \"\"\" I worked on the homework assignment alone, and referred to\n https://www.pythoncentral.io/how-to-sort-a-list-tuple-or-object-with-sorted-in-python/\"\"\"\n\nimport math\nimport calendar\n\"\"\"\nFunction name: six_flags\nParameters: a list of tuples\nReturns: a tuple\n\"\"\"\n\ndef six_flags(rides):\n solution = ()\n sorted_rides = sorted(rides, key = getKey)\n for tuple in sorted_rides:\n solution += tuple[0],\n return solution\n\n#use comparator to sort\ndef getKey(i):\n return i[1]\n\"\"\"\nFunction name: medical_center\nParameters: a list of tuples, a tuple\nReturns: a tuple\n\"\"\"\n\ndef medical_center(clinics, location):\n min = -1\n closest_clinic = clinics[0]\n for clinic in clinics:\n distance = math.sqrt((clinic[1] - location[0])**2 + (clinic[2] - location[1])**2)\n if(distance < min or min < 0):\n min = distance\n closest_clinic = clinic\n return (closest_clinic[0], round(min, 2))\n\n\n\"\"\"\nFunction name: caffeinated\nParameters: a list of tuples, a list of strings\nReturn value: a tuple of strings\n\"\"\"\n\ndef caffeinated(drinks_list, flavors_list):\n solution = ()\n for i in range(len(flavors_list)):\n flavors_list[i] = flavors_list[i].lower()\n\n for drinks in drinks_list:\n if(drinks[1].lower() in flavors_list and drinks[2].lower() in flavors_list):\n solution += drinks[0],\n return solution\n\n\"\"\"\nFunction name: study_abroad\nParameters: a list of tuples, an int (0-6 inclusive)\nReturns: a list of tuples\n\"\"\"\n\ndef study_abroad(dates, day):\n solution = []\n for date in dates:\n if(calendar.monthrange(date[0], date[1])[0] != day):\n solution += (date[1], date[0]),\n return solution\n\n\"\"\"\nFunction name: simplify\nParameters: a list of strings\nReturns: a list\n\"\"\"\n\ndef simplify(fractions):\n solution = []\n for fraction in fractions:\n nums = fraction.split(\"/\")\n factor = math.gcd(int(nums[0]), int(nums[1]) )\n solution += (factor, str(int(int(nums[0])/factor)) + \"/\" + str(int(int(nums[1])/factor))),\n return solution\n\n\n\n\n\n\n\n","sub_path":"HW05.py","file_name":"HW05.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420882034","text":"from flask import Blueprint, request, jsonify\n\nfrom . import db\n\nbp = Blueprint(\"tasks\", \"tasks\", url_prefix=\"\")\n\n@bp.route('/mytasks', methods=[\"GET\",\"POST\"])\ndef myTasks():\n if request.method == \"POST\":\n data = request.get_json()\n email = data['email']\n conn = db.get_db()\n cursor = conn.cursor()\n cursor.execute(f\"SELECT id FROM users WHERE email = '{email}'\")\n u_id = cursor.fetchone()[0]\n cursor.execute(f\"SELECT id, task ,t_date, t_time, description FROM tasks WHERE user_id = {u_id}\")\n tasks = cursor.fetchall()\n return jsonify(dict(tasks = [dict(id = id, task = task, t_date = t_date, t_time = t_time, description = description) for id, task, t_date, t_time, description in tasks]))\n\n@bp.route('/addtask', methods=[\"GET\",\"POST\"])\ndef addTask():\n if request.method == \"POST\":\n data = request.get_json()\n task = data['task']\n date = str(data['task_date'].split('T',1)[0])\n time = str(data['task_time'])\n email = data['email']\n description = data['description']\n conn = db.get_db()\n cursor = conn.cursor()\n cursor.execute(f\"SELECT id FROM users WHERE email = '{email}';\")\n id = cursor.fetchone()[0]\n cursor.execute(f\"INSERT INTO tasks VALUES(DEFAULT, {id}, '{task}', '{date}', '{time}', '{description}')\")\n conn.commit()\n conn.close()\n return {'status': True}","sub_path":"backend/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636129275","text":"from django.conf import settings\nfrom datadog import initialize\noptions = {\n 'api_key':settings.DATADOG_API_KEY,\n 'app_key':settings.DATADOG_APP_KEY\n}\ninitialize(**options)\nfrom datadog import api\nfrom hipnotify import Room\nfrom django.conf import settings\n\nclass Message(object):\n \"\"\"Generic Message MixIn.\n \"\"\"\n systems = []\n \n def __init__(self, action, username, objs, tags=None):\n object_type = objs[0].__class__.__name__.lower()\n operation = \"save\"\n\n if tags is None:\n tags = []\n\n for obj in objs:\n tags.append('object:%s' % obj.name)\n\n tags.append('object_type:%s' % object_type)\n tags.append('application:devault')\n\n self.title = '%s performed' % action\n self.text = '%s performed by %s' % (action, username)\n self.tags = tags\n self.objs = objs\n self.username = username\n if settings.DATADOG_API_KEY:\n self.systems.append(\"send_message_datadog\")\n if settings.HIPCHAT_TOKEN:\n self.systems.append(\"send_message_hipchat\")\n\n def send_message_hipchat(self):\n room = Room(settings.HIPCHAT_TOKEN, \n settings.HIPCHAT_ROOM_ID)\n message = '%s %s %s %s' % (self.username, self.title, self.text, ' '.join(self.tags))\n room.notify(message)\n\n def send_message_datadog(self):\n api.Event.create(title=self.title, \n text=self.text, tags=self.tags)\n\n def send_message(self):\n for system in self.systems:\n getattr(self, system)()\n\n\n\n","sub_path":"devault/devault/notifications/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165287834","text":"# -*- coding:utf-8 -*-\n# author:yufeixu\n# datetime:2018/9/27 15:26\n# software: PyCharm\n\nimport numpy as np # 快速操作结构数组的工具\nimport pandas as pd # 数据分析处理工具\nimport matplotlib.pyplot as plt # 画图工具\nfrom sklearn import datasets # 机器学习库\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef euclidean_distance():\n vec1 = np.array([1, 2, 3])\n vec2 = np.array([4, 5, 6])\n # 欧氏距离\n print(np.sqrt(np.sum(np.square(vec1 - vec2))))\n\n\ndef load_datasets():\n # 数据集 0-setosa、1-versicolor、2-virginica\n scikit_iris = datasets.load_iris()\n # 转换成pandas的DataFrame数据格式,方便观察数据\n iris = pd.DataFrame(data=np.c_[scikit_iris['data'], scikit_iris['target']],\n columns=np.append(scikit_iris.feature_names, ['y']))\n # print(iris.head(2))\n # print(iris.isnull().sum())\n # print(iris.groupby('y').count())\n\n return scikit_iris,iris\n\nif __name__ == '__main__':\n # euclidean_distance()\n scikit_iris, iris = load_datasets()\n # 选择全部特征训练模型\n X = iris[scikit_iris.feature_names]\n # label\n y = iris['y']\n # 分割训练-测试集\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)\n # 第一步,选择model\n # K=15\n knn = KNeighborsClassifier(n_neighbors=15)\n # 第二步,fit X、y\n knn.fit(X_train, y_train)\n # 第三步,predict新数据\n y_pred_on_train = knn.predict(X_train)\n y_pred_on_test = knn.predict(X_test)\n # print(metrics.accuracy_score(y_train, y_pred_on_train))\n\n print('accuracy: :{}'.format(metrics.accuracy_score(y_test, y_pred_on_test)))","sub_path":"abu_ml-master/rewrite/ch1_Acquaintance/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441971005","text":"# -*- coding: utf-8 -*-\n\n\n\nfrom al4ea.al_modules import *\nimport networkx as nx\nimport pandas as pd\n\n\ndef construct_graph(data_dir, edge_mode):\n # construct graph\n kg1, kg2, _ = read_kgs_n_links(data_dir)\n g = nx.Graph()\n g.add_nodes_from(kg1.entities_list)\n self_ent_pair = [(ent, ent) for ent in kg1.entities_list]\n triples_list = kg1.relation_triples_list\n\n if edge_mode == \"add_inverse\" or edge_mode == \"add_inverse_func\":\n inv_triple_list = [(tri[2], \"inv_\" + tri[1], tri[0]) for tri in triples_list]\n triples_list += inv_triple_list\n\n if edge_mode == \"origin\":\n ent_pairs = [(tri[0], tri[2]) for tri in triples_list]\n else:\n ent_pairs = [(tri[2], tri[0]) for tri in triples_list]\n\n if edge_mode == \"basic_func\" or edge_mode == \"add_inverse_func\":\n triple_df = pd.DataFrame(triples_list, columns=[\"head\", \"relation\", \"tail\"])\n relation_types = triple_df[\"relation\"].unique()\n rel2func_map = dict()\n for rel in relation_types:\n triples_of_rel = triple_df[triple_df[\"relation\"] == rel]\n func = len(triples_of_rel[\"head\"].unique()) / len(triples_of_rel)\n rel2func_map[rel] = func\n all_pairs = ent_pairs + self_ent_pair\n edge_weights = [rel2func_map[tri[1]] for tri in triples_list] + [1.0] * len(self_ent_pair)\n g.add_edges_from(all_pairs, weight=edge_weights)\n else:\n g.add_edges_from(ent_pairs + self_ent_pair)\n return g\n\n\n\ndef measure_uncertainty(simi_mtx, topK=5, measure=\"entropy\"): # measure options: entropy, margin, variation_ratio, similarity\n sorted_simi_mtx = np.sort(simi_mtx, axis=-1)\n if measure == \"entropy\":\n topk_simi_mtx = sorted_simi_mtx[:, -topK:]\n prob_mtx = topk_simi_mtx / topk_simi_mtx.sum(axis=1, keepdims=True)\n uncertainty = - np.sum(prob_mtx*np.log2(prob_mtx), axis=1)\n elif measure == \"margin\":\n margin = sorted_simi_mtx[:, -1] - sorted_simi_mtx[:, -2]\n uncertainty = - margin # larger margin means small uncertainty\n uncertainty = uncertainty - uncertainty.min()\n elif measure == \"variation_ratio\":\n topk_simi_mtx = sorted_simi_mtx[:, -topK:]\n prob_mtx = topk_simi_mtx / topk_simi_mtx.sum(axis=1, keepdims=True)\n uncertainty = 1.0 - prob_mtx[:, -1]\n elif measure == \"similarity\":\n uncertainty = - sorted_simi_mtx[:, -1]\n else:\n raise Exception(\"unknown uncertainty measure\")\n return uncertainty\n\n\n","sub_path":"al4ea/strategies/strategy_util.py","file_name":"strategy_util.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556145778","text":"import smtplib\ndef send_email(user, pwd, recipient, subject, body):\n FROM = user\n TO = recipient if isinstance(recipient, list) else [recipient]\n SUBJECT = subject\n TEXT = body\n\n # Prepare actual message\n message = \"\"\"From: %s\\nTo: %s\\nSubject: %s\\n\\n%s\n \"\"\" % (FROM, \", \".join(TO), SUBJECT, TEXT)\n try:\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(user, pwd)\n server.sendmail(FROM, TO, message)\n server.close()\n print ('successfully sent the mail')\n except Exception as ex:\n print (\"failed to send mail, reason is \"+ str(ex))\n\n \nsend_email('concur@grabtaxi.com', 'tapunkaujkuvwdao', 'yang.shulin@hitachiconsulting.com', 's', 'testing')","sub_path":"Email/EmailFunction.py","file_name":"EmailFunction.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653492859","text":"from types import MethodType\nimport subprocess\nimport os\nimport uuid\nimport logging\n\nfrom Qt.QtWidgets import QAction\nfrom Qt.QtWidgets import QFileDialog\nfrom Qt import QtGui, QtCore\n\nfrom PyFlow.UI.Canvas.UINodeBase import UINodeBase\nfrom PyFlow.UI.Views.CodeEditor import CodeEditor\nfrom PyFlow.ConfigManager import ConfigManager\n\nlogger = logging.getLogger(None)\n\n\nINITIAL_CODE = \"\"\"\n\nfrom PyFlow.Core.Common import *\n\ndef prepareNode(node):\n node.createInputPin(pinName=\"inExec\", dataType=\"ExecPin\", foo=node.processNode)\n node.createOutputPin(pinName=\"outExec\", dataType=\"ExecPin\")\n node.createInputPin(pinName=\"a\", dataType=\"IntPin\", defaultValue=0, foo=None, structure=PinStructure.Single, constraint=None, structConstraint=None, supportedPinDataTypes=[], group=\"\")\n node.createInputPin(pinName=\"b\", dataType=\"IntPin\", defaultValue=0, foo=None, structure=PinStructure.Single, constraint=None, structConstraint=None, supportedPinDataTypes=[], group=\"\")\n node.createOutputPin(pinName=\"c\", dataType=\"IntPin\", defaultValue=0, structure=PinStructure.Single, constraint=None, structConstraint=None, supportedPinDataTypes=[], group=\"\")\n\n\ndef compute(node):\n a = node.getData(\"a\")\n b = node.getData(\"b\")\n node.setData(\"c\", a ** b)\n node[\"outExec\"].call()\n\n\"\"\"\n\n\nclass UIPythonNode(UINodeBase):\n watcher = QtCore.QFileSystemWatcher()\n\n def __init__(self, raw_node):\n super(UIPythonNode, self).__init__(raw_node)\n\n self.actionEdit = self._menu.addAction(\"Edit\")\n self.actionEdit.triggered.connect(self.onEdit)\n self._filePath = ''\n\n self.fileHandle = None\n self.currentEditorProcess = None\n self.actionExport = self._menu.addAction(\"Export\")\n self.actionExport.triggered.connect(self.onExport)\n self.actionImport = self._menu.addAction(\"Import\")\n self.actionImport.triggered.connect(self.onImport)\n\n def onExport(self):\n savePath, selectedFilter = QFileDialog.getSaveFileName(filter=\"Python node data (*.py)\")\n if savePath != \"\":\n with open(savePath, 'w') as f:\n f.write(self.nodeData)\n logger.info(\"{0} data successfully exported!\".format(self.getName()))\n\n def onImport(self):\n openPath, selectedFilter = QFileDialog.getOpenFileName(filter=\"Python node data (*.py)\")\n if openPath != \"\":\n with open(openPath, 'r') as f:\n dataString = f.read()\n self.tryApplyNodeData(dataString)\n\n def mouseDoubleClickEvent(self, event):\n super(UIPythonNode, self).mouseDoubleClickEvent(event)\n self.onEdit()\n\n @property\n def compute(self, *args, **kwargs):\n return self._rawNode.compute\n\n @compute.setter\n def compute(self, value):\n self._rawNode.compute = value\n\n @property\n def nodeData(self):\n return self._rawNode.nodeData\n\n def postCreate(self, jsonTemplate=None):\n super(UIPythonNode, self).postCreate(jsonTemplate)\n self.setHeaderHtml(self.getName())\n\n @nodeData.setter\n def nodeData(self, value):\n self._rawNode.nodeData = value\n\n def onFileChanged(self, path):\n uidStr = str(self.uid).replace(\"-\", \"\")\n if uidStr not in path:\n return\n\n if not os.path.exists(path):\n self._filePath = ''\n if self.fileHandle is not None:\n self.fileHandle.close()\n self.fileHandle = None\n return\n else:\n # open file handle if needed\n if self.fileHandle is None:\n self.fileHandle = open(path, 'r')\n\n # read code string\n self.fileHandle.seek(0)\n codeString = self.fileHandle.read()\n\n self.tryApplyNodeData(codeString)\n\n def tryApplyNodeData(self, dataString):\n try:\n self.nodeData = dataString\n # create wrappers\n for pin in self._rawNode.getOrderedPins():\n self._createUIPinWrapper(pin)\n self.updateNodeShape()\n self.updateNodeHeaderColor()\n self.setHeaderHtml(self.getName())\n except Exception as e:\n logger.warning(e)\n\n def shoutDown(self):\n if self.fileHandle is not None:\n self.fileHandle.close()\n\n def kill(self, *args, **kwargs):\n try:\n if self.fileHandle is not None:\n self.fileHandle.close()\n os.remove(self._filePath)\n except:\n pass\n super(UIPythonNode, self).kill()\n\n def onEdit(self):\n editCmd = ConfigManager().getPrefsValue(\"PREFS\", \"General/EditorCmd\")\n tempFilesDir = self.canvasRef().getApp().getTempDirectory()\n\n if self._filePath == \"\":\n # if no file assotiated - create one\n uidStr = str(self.uid).replace(\"-\", \"\")\n self._filePath = os.path.join(tempFilesDir, \"{}.py\".format(uidStr))\n\n if not os.path.exists(self._filePath):\n f = open(self._filePath, 'w')\n if self.nodeData == \"\":\n f.write(INITIAL_CODE)\n else:\n f.write(self.nodeData)\n f.close()\n\n filePathString = '\"{}\"'.format(self._filePath)\n editCmd = editCmd.replace(\"@FILE\", filePathString)\n\n # create file watcher\n if self._filePath not in self.watcher.files():\n UIPythonNode.watcher.addPath(self._filePath)\n\n try:\n UIPythonNode.watcher.fileChanged.disconnect(self.onFileChanged)\n except:\n pass\n\n result = UIPythonNode.watcher.fileChanged.connect(self.onFileChanged)\n self.currentEditorProcess = subprocess.Popen(editCmd)\n self.fileHandle = open(self._filePath, 'r')\n","sub_path":"PyFlow/Packages/PyFlowBase/UI/UIPythonNode.py","file_name":"UIPythonNode.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496177218","text":"# Created by sarathkaul on 12/11/19\n\nimport requests\n\n_NEWS_API = \"https://newsapi.org/v2/everything?source=reuters&sortBy=top&apiKey=e444c2db750d4ae6b6650a96e95f25ed\"\n\n\ndef fetch_bbc_news(bbc_news_api_key: str) -> None:\n # fetching a list of articles in json format\n bbc_news_page = requests.get(_NEWS_API).json()\n # each article in the list is a dict\n for i, article in enumerate(bbc_news_page[\"articles\"], 1):\n print(f\"{i}.) {article['title']}\")\n\n\nif __name__ == \"__main__\":\n fetch_bbc_news(bbc_news_api_key=\"Iran\")\n","sub_path":"web_programming/fetch_bbc_news.py","file_name":"fetch_bbc_news.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"271833949","text":"import scipy.io as sio\nimport numpy as np\n\ndef load_data():\n data = sio.loadmat('data/YaleBCrop025.mat')\n img = data['Y']\n I = []\n Label = []\n for i in range(img.shape[2]):\n for j in range(img.shape[1]):\n temp = np.reshape(img[:, j, i], [42, 48])\n Label.append(i)\n I.append(temp)\n I = np.array(I)\n label = np.array(Label[:])\n Img = np.transpose(I, [0, 2, 1])\n Img = Img / 255\n Img = np.expand_dims(Img[:], 3)\n Img = np.transpose(Img, [0, 3, 1, 2])\n Img = Img[:640, :, :, :]\n label = label[:640]\n return Img, label","sub_path":"data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171041538","text":"from sklearn import metrics\nfrom operator import itemgetter\n\n\nclass ExternalEvaluation(object):\n \"\"\"A class to evaluate the clustering algorithm.\n\n At the moment, we deploy all methods implemented in scikit-learn [Pedregosa2011]_.\n The methods are briefly described here [scikit-learn-0.18]_.\n\n References\n ----------\n .. [Pedregosa2011] Pedregosa et al., Scikit-learn: Machine Learning in Python, JMLR 12, pp. 2825-2830, 2011.\n .. [scikit-learn-0.18] scikit-learn, Clustering performance evaluation, scikit-learn 0.18 Documentation.\n http://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation\n \"\"\"\n\n @staticmethod\n def set_cluster_label_id(graph, clusters, original_logs, analysis_dir):\n \"\"\"Get all logs per cluster, get most dominant cluster label, and write clustering result to file [Manning2008]_.\n\n Parameters\n ----------\n graph : graph\n Graph to be analyzed.\n clusters : dict[list]\n Dictionary contains sequence of nodes in all clusters.\n original_logs :\n List of event logs.\n analysis_dir : str\n Path to save the analysis result.\n\n References\n ----------\n .. [Manning2008] Christopher D. Manning, Prabhakar Raghavan & Hinrich Schutze, Evaluation of clustering,\n in Introduction to Information Retrieval, 2008, Cambridge University Press.\n http://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-clustering-1.html\n \"\"\"\n new_cluster_member_label = {} # store individiual cluster id for each cluster member\n dominant_cluster_labels = {} # store dominant cluster label from all clusters\n cluster_labels = ['accepted password', 'accepted publickey', 'authentication failure', 'check pass',\n 'connection closed', 'connection reset by peer', 'did not receive identification string',\n 'failed password', 'ignoring max retries', 'invalid user', 'pam adding faulty module',\n 'pam unable to dlopen', 'received disconnect', 'received signal',\n 'reverse mapping checking getaddrinfo', 'server listening', 'session closed',\n 'session opened', 'this does not map back to the address', 'unknown option',\n 'error connect', 'open failed', 'root login refused', 'bad protocol version identification',\n 'subsystem request', 'protocol major versions differ', 'failed none', 'expired password',\n 'unable open env file', 'dispatch protocol error', 'syslogin perform logout',\n 'corrupted mac', 'write ident string']\n max_cluster_id = len(cluster_labels) - 1\n\n for cluster_id, cluster in clusters.iteritems():\n logs_per_cluster = []\n label_counter = dict((cl, 0) for cl in cluster_labels)\n for c in cluster:\n # get all original_logs per cluster\n # for graph-based clustering\n if graph:\n members = graph.node[c]['member']\n for member in members:\n logs_per_cluster.append(original_logs[member])\n # for non graph-based clustering\n elif graph is None:\n logs_per_cluster.append(original_logs[c])\n\n # get dominant label in cluster\n for label in cluster_labels:\n for log in logs_per_cluster:\n if label in log.lower():\n label_counter[label] += 1\n\n # get most dominant cluster label\n dominant_label_counter = sorted(label_counter.items(), key=itemgetter(1), reverse=True)\n\n # if cluster label has already used\n if dominant_label_counter[0][0] in [labels[0] for labels in dominant_cluster_labels.values()]:\n # get existing counter\n existing_counter = 0\n existing_label = ''\n for ec in dominant_cluster_labels.values():\n if ec[0] == dominant_label_counter[0][0]:\n existing_counter = ec[1]\n existing_label = ec[0]\n\n # check for which one is more dominant\n if dominant_label_counter[0][1] > existing_counter:\n # get existing cluster with lower existing counter\n existing_cluster = \\\n dominant_cluster_labels.keys()[dominant_cluster_labels.values().index((existing_label,\n existing_counter))]\n for c in cluster:\n new_cluster_member_label[c] = cluster_labels.index(dominant_label_counter[0][0])\n # set old cluster to max_cluster_id + 1\n for c in existing_cluster:\n new_cluster_member_label[c] = max_cluster_id + 1\n\n else:\n for c in cluster:\n new_cluster_member_label[c] = max_cluster_id + 1\n # if cluster label has not used\n else:\n dominant_cluster_labels[frozenset(cluster)] = dominant_label_counter[0]\n for c in cluster:\n new_cluster_member_label[c] = cluster_labels.index(dominant_label_counter[0][0])\n\n analysis_result = {}\n if graph:\n # set new cluster label\n for node_id, new_label in new_cluster_member_label.iteritems():\n graph.node[node_id]['cluster'] = new_label\n\n # set new cluster id for each cluster member\n for node in graph.nodes_iter(data=True):\n members = node[1]['member']\n for member in members:\n analysis_result[member] = new_cluster_member_label[node[0]]\n elif graph is None:\n for cluster in clusters:\n for c in cluster:\n analysis_result[c] = new_cluster_member_label[c]\n # get sorted log line id - cluster id results\n sorted(analysis_result.items(), key=itemgetter(0))\n\n # write clustering result to file (clustering result for all members in a node)\n fopen = open(analysis_dir, 'w')\n for rowid, cluster_id in analysis_result.iteritems():\n cluster_label = 'undefined' if cluster_id > max_cluster_id else cluster_labels[cluster_id]\n fopen.write(str(cluster_id) + '; ' + cluster_label + '; ' + original_logs[rowid])\n fopen.close()\n\n @staticmethod\n def get_evaluated(evaluated_file):\n \"\"\"Get evaluated log file.\n\n Parameters\n ----------\n evaluated_file : str\n The evaluated log file.\n\n Returns\n -------\n evaluation_labels : list\n Labels for each row in evaluated files.\n \"\"\"\n with open(evaluated_file, 'r') as ef:\n evaluations = ef.readlines()\n\n evaluation_labels = [evaluation.split(';')[0] for evaluation in evaluations]\n return evaluation_labels\n\n @staticmethod\n def get_adjusted_rand(standard_file, prediction_file):\n \"\"\"Get adjusted rand index [Hubert1985]_.\n\n Parameters\n ----------\n standard_file : str\n The ground truth or standard filename.\n prediction_file : str\n The analyzed or predicted filename.\n\n Returns\n -------\n adjusted_rand_index : float\n Adjusted rand index.\n\n References\n ----------\n .. [Hubert1985] Lawrence Hubert and Phipps Arabie. Comparing partitions.\n Journal of Classification, 2(1):193-218, 1985.\n \"\"\"\n standard_labels = ExternalEvaluation.get_evaluated(standard_file)\n prediction_labels = ExternalEvaluation.get_evaluated(prediction_file)\n adjusted_rand_index = metrics.adjusted_rand_score(standard_labels, prediction_labels)\n\n return adjusted_rand_index","sub_path":"pygraphc-master/pygraphc/evaluation/ExternalEvaluation.py","file_name":"ExternalEvaluation.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127340416","text":"import numpy as np\nimport cv2\n\n#Cargar la mascara\nimagen = cv2.imread('esc_erosion.png', 0)\n\n#Crear un kernel de '1' de 3x3\nkernel = np.ones((3, 3), np.uint8)\n\n#Se aplica la transformacion: Closing\ntransformacion = cv2.morphologyEx(imagen, cv2.MORPH_CLOSE, kernel)\n\n#Mostrar el resultado y salir\ncv2.imshow('resultado', transformacion)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"closing.py","file_name":"closing.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38352632","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport unittest\nfrom unittest.mock import patch\nimport logging\nfrom configparser import ConfigParser\n\nfrom TaxonomyAbundance.TaxonomyAbundanceServer import MethodContext\nfrom TaxonomyAbundance.authclient import KBaseAuth as _KBaseAuth\nfrom installed_clients.WorkspaceClient import Workspace\n\nfrom TaxonomyAbundance.TaxonomyAbundanceImpl import TaxonomyAbundance\nfrom TaxonomyAbundance.error import * # custom exceptions\nfrom TaxonomyAbundance import TAUtils\nfrom mocks import * # mocks, upas ...\n\n\n######################################\n######################################\n######### TOGGLE PATCH ###############\n######################################\n###################################### \ndo_patch = True # toggle this to turn on/off @patch decorators\n\nif do_patch:\n patch_ = patch\n patch_dict_ = patch.dict\n\nelse:\n patch_ = lambda *args, **kwargs: lambda f: f\n patch_dict_ = lambda *args, **kwargs: lambda f: f\n######################################\n######################################\n######################################\n######################################\n\n\nclass TaxonomyAbundanceTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n logging.info('setUpClass')\n\n token = os.environ.get('KB_AUTH_TOKEN', None)\n config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)\n cls.cfg = {}\n config = ConfigParser()\n config.read(config_file)\n for nameval in config.items('TaxonomyAbundance'):\n cls.cfg[nameval[0]] = nameval[1]\n # Getting username from Auth profile for token\n authServiceUrl = cls.cfg['auth-service-url']\n auth_client = _KBaseAuth(authServiceUrl)\n user_id = auth_client.get_user(token)\n # WARNING: don't call any logging methods on the context object,\n # it'll result in a NoneType error\n cls.ctx = MethodContext(None)\n cls.ctx.update({'token': token,\n 'user_id': user_id,\n 'provenance': [\n {'service': 'TaxonomyAbundance',\n 'method': 'please_never_use_it_in_production',\n 'method_params': []\n }],\n 'authenticated': 1})\n cls.wsURL = cls.cfg['workspace-url']\n cls.wsClient = Workspace(cls.wsURL)\n cls.serviceImpl = TaxonomyAbundance(cls.cfg)\n cls.scratch = cls.cfg['scratch']\n cls.callback_url = os.environ['SDK_CALLBACK_URL']\n suffix = int(time.time() * 1000)\n cls.wsName = \"test_ContigFilter_\" + str(suffix)\n ret = cls.wsClient.create_workspace({'workspace': cls.wsName}) # noqa\n\n @classmethod\n def tearDownClass(cls):\n\n logging.info('tearDownClass')\n\n if hasattr(cls, 'wsName'):\n cls.wsClient.delete_workspace({'workspace': cls.wsName})\n print('Test workspace was deleted')\n\n def shortDescription(self):\n '''Override unittest using test*() docstrings in lieu of test*() method name in output summary'''\n return None\n\n\n @patch('TaxonomyAbundance.TAUtils.DataFileUtil', new=lambda *a, **k: get_mock_dfu('moss-amp_standardizedTax'))\n @patch_('TaxonomyAbundance.TaxonomyAbundanceImpl.KBaseReport', new=lambda *a, **k: get_mock_kbr())\n def test(self):\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': moss_amp_colAttrMap,\n 'threshold': 0.005,\n 'meta_group': 'Field name (informal classification)',\n })\n\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': moss_amp_colAttrMap,\n 'threshold': 0.005,\n 'meta_group': '',\n })\n\n\n @patch('TaxonomyAbundance.TAUtils.DataFileUtil', new=lambda *a, **k: get_mock_dfu('moss-amp_standardizedTax'))\n @patch_('TaxonomyAbundance.TaxonomyAbundanceImpl.KBaseReport', new=lambda *a, **k: get_mock_kbr())\n def test_local_data(self):\n '''\n Don't un-patch since the `parsed_user_taxonomy` doesn't exist on the remote version\n '''\n logging.info('test_your_method')\n\n # with grouping\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': moss_amp_colAttrMap,\n 'threshold': 0.005,\n 'meta_group': 'Field name (informal classification)',\n })\n\n # without grouping\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': moss_amp_colAttrMap,\n 'threshold': 0.005,\n 'meta_group': '',\n })\n\n # without grouping\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': None,\n 'threshold': 0.005,\n 'meta_group': 'Field name (informal classification)',\n })\n\n # without grouping\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': None,\n 'threshold': 0.005,\n 'meta_group': '',\n })\n\n @patch_('TaxonomyAbundance.TaxonomyAbundanceImpl.KBaseReport', new=lambda *a, **k: get_mock_kbr())\n def test_remote_data(self):\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': secret_wRDP_AmpMat,\n 'attri_mapping_ref': None,\n 'threshold': 0.005,\n 'meta_group': '',\n })\n \n\n @patch_('TaxonomyAbundance.TaxonomyAbundanceImpl.KBaseReport', new=lambda *a, **k: get_mock_kbr())\n def test_no_taxonomy(self):\n with self.assertRaises(ObjectException) as cm:\n ret = self.serviceImpl.run_TaxonomyAbundance(\n self.ctx, {\n 'workspace_name': self.wsName,\n 'amplicon_matrix_ref': moss_amp_AmpMat,\n 'attri_mapping_ref': moss_amp_colAttrMap,\n 'threshold': 0.005,\n 'meta_group': 'Field name (informal classification)',\n })\n logging.info(str(cm))\n\n\nrun_tests = ['test']\nlocal_tests = ['test_local_data']\nCI_tests = ['test_remote_data']\nprod_tests = ['test_no_taxonomy']\n\n\nfor key, value in TaxonomyAbundanceTest.__dict__.copy().items():\n if type(key) == str and key.startswith('test') and callable(value):\n if key not in run_tests:\n delattr(TaxonomyAbundanceTest, key)\n pass\n","sub_path":"test/TaxonomyAbundance_server_test.py","file_name":"TaxonomyAbundance_server_test.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199316161","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\ndef add_data(path):\n\tf = open(path)\n\tarr = []\n\tfor line in f:\n\t\tx,y,z = line.split()\n\t\tarr.append([float(x), float(y), float(z)])\n\treturn arr\n\n\npath = 'output/static_array.out'\ndata = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ndata['Type'] = 'Static'\ndata['Number'] = 1\n\npath = 'output/module_array.out'\ntmp = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ntmp['Type'] = 'Module'\ntmp['Number'] = 1\ndata = pd.concat([data, tmp])\n\npath = 'output/module_array4.out'\ntmp = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ntmp['Type'] = 'Module'\ntmp['Number'] = 4\ndata = pd.concat([data, tmp])\n\npath = 'output/static_array4.out'\ntmp = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ntmp['Type'] = 'Static'\ntmp['Number'] = 4\ndata = pd.concat([data, tmp])\n\npath = 'output/module_array8.out'\ntmp = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ntmp['Type'] = 'Module'\ntmp['Number'] = 8\ndata = pd.concat([data, tmp])\n\n\npath = 'output/static_array8.out'\ntmp = pd.DataFrame(add_data(path), columns = ['Size', 'Time', 'Error'])\ntmp['Type'] = 'Static'\ntmp['Number'] = 8\ndata = pd.concat([data, tmp])\n\n\ncolours = sns.color_palette('colorblind')\n\n# Scaling with array size\ndata_number = data.ix[data['Number']==8].groupby(['Type'])\n\nfig, ax = plt.subplots()\ncount = 0\nfor key, grp in data_number:\n\tgrp.plot(kind='scatter', x='Size', y='Time',\\\n\t\t\t\tyerr='Error', color=colours[count], ax = ax, loglog=True)\n\tgrp.plot(kind='line', x='Size', y='Time', label = key, \\\n\t\t\t\tyerr='Error', color=colours[count], ax = ax, loglog=True)\t\t\n\n\tcount += 1\n\n# plt.xlim((0,65))\nplt.title('Scaling with array size\\n(Num of arrays 8)')\n# plt.show()\nplt.savefig('Array_size_scaling.png')\nplt.close()\n\n# Scaling with number of arrays\ndata_size = data.ix[data['Size']==64].groupby(['Type'])\n\nfig, ax = plt.subplots()\ncount = 0\nfor key, grp in data_size:\n\tgrp.plot(kind='scatter', x='Number', y='Time',\\\n\t\t\t\tyerr='Error', color=colours[count], ax = ax, loglog=True)\n\tgrp.plot(kind='line', x='Number', y='Time', label = key, \\\n\t\t\t\tyerr='Error', color=colours[count], ax = ax, loglog=True)\t\t\n\n\tcount += 1\n\n# plt.xlim((0,10))\nplt.title('Scaling with number of arrays\\n(Array size = 64)')\n# plt.show()\nplt.savefig('Num_array_scaling.png')\nplt.close()\n\n","sub_path":"scaling.py","file_name":"scaling.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105792243","text":"#!/usr/bin/python\n\nimport logging\nfrom functools import lru_cache\nfrom collections import namedtuple, defaultdict\nfrom heapq import heappop, heappush, heapify\n\nfrom advent import Advent\n\n\"\"\"\n--- Day 22: Mode Maze ---\nThis is it, your final stop: the year -483. It's snowing and dark outside; the\nonly light you can see is coming from a small cottage in the distance. You make\nyour way there and knock on the door.\n\nA portly man with a large, white beard answers the door and invites you inside.\nFor someone living near the North Pole in -483, he must not get many visitors,\nbut he doesn't act surprised to see you. Instead, he offers you some milk and\ncookies.\n\nAfter talking for a while, he asks a favor of you. His friend hasn't come back\nin a few hours, and he's not sure where he is. Scanning the region briefly, you\ndiscover one life signal in a cave system nearby; his friend must have taken\nshelter there. The man asks if you can go there to retrieve his friend.\n\nThe cave is divided into square regions which are either dominantly rocky,\nnarrow, or wet (called its type). Each region occupies exactly one coordinate in\nX,Y format where X and Y are integers and zero or greater. (Adjacent regions can\nbe the same type.)\n\nThe scan (your puzzle input) is not very detailed: it only reveals the depth of\nthe cave system and the coordinates of the target. However, it does not reveal\nthe type of each region. The mouth of the cave is at 0,0.\n\nThe man explains that due to the unusual geology in the area, there is a method\nto determine any region's type based on its erosion level. The erosion level of\na region can be determined from its geologic index. The geologic index can be\ndetermined using the first rule that applies from the list below:\n\n - The region at 0,0 (the mouth of the cave) has a geologic index of 0.\n - The region at the coordinates of the target has a geologic index of 0.\n - If the region's Y coordinate is 0, the geologic index is its X coordinate\n times 16807.\n - If the region's X coordinate is 0, the geologic index is its Y coordinate\n times 48271.\n - Otherwise, the region's geologic index is the result of multiplying the\n erosion levels of the regions at X-1,Y and X,Y-1.\n\nA region's erosion level is its geologic index plus the cave system's depth, all\nmodulo 20183. Then:\n\n - If the erosion level modulo 3 is 0, the region's type is rocky.\n - If the erosion level modulo 3 is 1, the region's type is wet.\n - If the erosion level modulo 3 is 2, the region's type is narrow.\n\nFor example, suppose the cave system's depth is 510 and the target's coordinates\nare 10,10. Using % to represent the modulo operator, the cavern would look as\nfollows:\n\n - At 0,0, the geologic index is 0. The erosion level is (0 + 510) % 20183 =\n 510. The type is 510 % 3 = 0, rocky.\n - At 1,0, because the Y coordinate is 0, the geologic index is 1 * 16807 =\n 16807. The erosion level is (16807 + 510) % 20183 = 17317. The type is 17317 %\n 3 = 1, wet.\n - At 0,1, because the X coordinate is 0, the geologic index is 1 * 48271 =\n 48271. The erosion level is (48271 + 510) % 20183 = 8415. The type is 8415 % 3\n = 0, rocky.\n - At 1,1, neither coordinate is 0 and it is not the coordinate of the target,\n so the geologic index is the erosion level of 0,1 (8415) times the erosion\n level of 1,0 (17317), 8415 * 17317 = 145722555. The erosion level is\n (145722555 + 510) % 20183 = 1805. The type is 1805 % 3 = 2, narrow.\n - At 10,10, because they are the target's coordinates, the geologic index is\n 0. The erosion level is (0 + 510) % 20183 = 510. The type is 510 % 3 = 0,\n rocky.\n\nDrawing this same cave system with rocky as ., wet as =, narrow as |, the mouth\nas M, the target as T, with 0,0 in the top-left corner, X increasing to the\nright, and Y increasing downward, the top-left corner of the map looks like\nthis:\n\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\nBefore you go in, you should determine the risk level of the area. For the the\nrectangle that has a top-left corner of region 0,0 and a bottom-right corner of\nthe region containing the target, add up the risk level of each individual\nregion: 0 for rocky regions, 1 for wet regions, and 2 for narrow regions.\n\nIn the cave system above, because the mouth is at 0,0 and the target is at\n10,10, adding up the risk level of all regions with an X coordinate from 0 to 10\nand a Y coordinate from 0 to 10, this total is 114.\n\nWhat is the total risk level for the smallest rectangle that includes 0,0 and\nthe target's coordinates?\n\n\n--- Part Two ---\nOkay, it's time to go rescue the man's friend.\n\nAs you leave, he hands you some tools: a torch and some climbing gear. You\ncan't equip both tools at once, but you can choose to use neither.\n\nTools can only be used in certain regions:\n\n - In rocky regions, you can use the climbing gear or the torch. You cannot\n use neither (you'll likely slip and fall).\n - In wet regions, you can use the climbing gear or neither tool. You cannot\n use the torch (if it gets wet, you won't have a light source).\n - In narrow regions, you can use the torch or neither tool. You cannot use\n the climbing gear (it's too bulky to fit).\n\nYou start at 0,0 (the mouth of the cave) with the torch equipped and must reach\nthe target coordinates as quickly as possible. The regions with negative X or Y\nare solid rock and cannot be traversed. The fastest route might involve entering\nregions beyond the X or Y coordinate of the target.\n\nYou can move to an adjacent region (up, down, left, or right; never diagonally)\nif your currently equipped tool allows you to enter that region. Moving to an\nadjacent region takes one minute. (For example, if you have the torch equipped,\nyou can move between rocky and narrow regions, but cannot enter wet regions.)\n\nYou can change your currently equipped tool or put both away if your new\nequipment would be valid for your current region. Switching to using the\nclimbing gear, torch, or neither always takes seven minutes, regardless of\nwhich tools you start with. (For example, if you are in a rocky region, you can\nswitch from the torch to the climbing gear, but you cannot switch to neither.)\n\nFinally, once you reach the target, you need the torch equipped before you can\nfind him in the dark. The target is always in a rocky region, so if you arrive\nthere with climbing gear equipped, you will need to spend seven minutes\nswitching to your torch.\n\nFor example, using the same cave system as above, starting in the top left\ncorner (0,0) and moving to the bottom right corner (the target, 10,10) as\nquickly as possible, one possible route is as follows, with your current\nposition marked X:\n\n Initially:\n X=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Down:\n M=.|=.|.|=.|=|=.\n X|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Right:\n M=.|=.|.|=.|=|=.\n .X=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Switch from using the torch to neither tool:\n M=.|=.|.|=.|=|=.\n .X=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Right 3:\n M=.|=.|.|=.|=|=.\n .|=|X|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Switch from using neither tool to the climbing gear:\n M=.|=.|.|=.|=|=.\n .|=|X|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Down 7:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..X==..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Right:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..=X=..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Down 3:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||.X.|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Right:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||..X|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Down:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.X..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Right 4:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===T===||\n =|||...|==..|=.|\n =.=|=.=..=X||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Up 2:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===X===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\n Switch from using the climbing gear to the torch:\n M=.|=.|.|=.|=|=.\n .|=|=|||..|.=...\n .==|....||=..|==\n =.|....|.==.|==.\n =|..==...=.|==..\n =||.=.=||=|=..|=\n |.=.===|||..=..|\n |..==||=.|==|===\n .=..===..=|.|||.\n .======|||=|=.|=\n .===|=|===X===||\n =|||...|==..|=.|\n =.=|=.=..=.||==|\n ||=|=...|==.=|==\n |=.=||===.|||===\n ||.|==.|.|.||=||\n\nThis is tied with other routes as the fastest way to reach the target: 45\nminutes. In it, 21 minutes are spent switching tools (three times, seven\nminutes each) and the remaining 24 minutes are spent moving.\n\nWhat is the fewest number of minutes you can take to reach the target?\n\n\"\"\"\n\n\nROCKY = 0\nWET = 1\nNARROW = 2\n\nTORCH = 1\nNAKED = 2\nCLIMB = 3\n\ncombos = [\n (ROCKY, CLIMB), (ROCKY, TORCH),\n (WET, CLIMB), (WET, NAKED),\n (NARROW, TORCH), (NARROW, NAKED),\n]\n\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger()\n\n\nclass State(namedtuple('State', ['time', 'gear', 'x', 'y'])):\n def pos(self):\n return (self.x, self.y)\n\n\nclass Cave(object):\n def __init__(self, depth, target):\n self.depth = depth\n self.target = target\n e, t = self.erosion(0)\n self.data = {\n (0, 0): (0, e, t)\n } # (x, y) => (geo, ero, rocktype\n self.maxx = 0\n self.maxy = 0\n p = int(max(self.target) * 1.5)\n self.extend(p, p)\n\n\n def fill(self, topleft, bottomright):\n \"\"\"calculate values for given area.\n Assumes that values to the left and above already exist\n \"\"\"\n tx, ty = topleft\n bx, by = bottomright\n for y in range(ty, by + 1):\n for x in range(tx, bx + 1):\n idx = self.geoindex(x, y)\n ero, typ = self.erosion(idx)\n self.data[(x, y)] = (idx, ero, typ)\n\n def extend(self, x, y):\n \"\"\"\n +----+---+\n |....| A |\n +----+---+\n | B | C |\n +----+---+\n \"\"\"\n log.debug('extending from (%d, %d) to (%d, %d)',\n self.maxx, self.maxy, x, y)\n # first fill A\n if x > self.maxx:\n self.fill((self.maxx, 0), (x, self.maxy))\n # next B\n if y > self.maxy:\n self.fill((0, self.maxy), (self.maxx, y))\n # and now C\n if x > self.maxx and y > self.maxy:\n self.fill((self.maxx, self.maxy), (x, y))\n self.maxx = max(self.maxx, x)\n self.maxy = max(self.maxy, y)\n\n def rocktype(self, x, y):\n if x > self.maxx or y > self.maxy:\n self.extend(x, y)\n _, _, result = self.data[(x, y)]\n return result\n\n def erosion(self, geoindex):\n result = (geoindex + self.depth) % 20183\n rocktype = result % 3\n return result, rocktype\n\n def geoindex(self, x, y):\n if (x, y) == self.target:\n return 0\n if x == 0:\n return y * 48271\n if y == 0:\n return x * 16807\n geo_left, ero_left, _ = self.data[(x - 1, y)]\n geo_up, ero_up, _ = self.data[(x, y - 1)]\n return ero_left * ero_up\n\n def dump(self):\n t = {\n ROCKY: '.',\n WET: '=',\n NARROW: '|',\n }\n for y in range(self.maxy + 1):\n for x in range(self.maxx + 1):\n if (x, y) == self.target:\n print('T', end='') # noqa\n else:\n print(t[self.rocktype(x, y)], end='') # noqa\n print()\n\n\nclass Day(Advent):\n lines = True\n\n def prepare(self):\n super(Day, self).prepare()\n self.depth = int(self.data[0].split(': ')[1])\n self.target = tuple(\n [int(x) for x in self.data[1].split(': ')[1].split(',')])\n self.cave = Cave(self.depth, self.target)\n\n def solve1(self):\n cave = Cave(510, (10, 10))\n assert(cave.rocktype(0, 0) == ROCKY)\n assert(cave.rocktype(1, 0) == WET)\n assert(cave.rocktype(0, 1) == ROCKY)\n assert(cave.rocktype(1, 1) == NARROW)\n assert(cave.rocktype(10, 10) == ROCKY)\n\n tx, ty = self.target\n result = 0\n for x in range(tx + 1):\n for y in range(ty + 1):\n result += self.cave.rocktype(x, y)\n return result\n\n def neighbours(self, x, y):\n yield (x + 1, y)\n yield (x, y + 1)\n if x > 0:\n yield (x - 1, y)\n if y > 0:\n yield (x, y - 1)\n\n def options(self, state):\n \"\"\"yield neighbours, which we could enter with current gear\"\"\"\n rtype = self.cave.rocktype(state.x, state.y)\n for nx, ny in self.neighbours(state.x, state.y):\n nrtype= self.cave.rocktype(nx, ny)\n if (nrtype, state.gear) in combos:\n yield State(state.time + 1, state.gear, nx, ny)\n\n def solve2(self):\n \"\"\"\n build a graph from the start. adding nodes containining the time to\n reach them from the start.\n maintain a priority queue of the leaf nodes (heap)\n take the nearest leaf, expand its options and add them to queue.\n \"\"\"\n # intitial state, entrance with a torch in our hand\n state = State(time=0, gear=TORCH, x=0, y=0)\n heap = [state]\n best = {} # (x, y, gear) => minutes\n while heap:\n state = heappop(heap)\n if (state.x, state.y) == self.target and state.gear == TORCH:\n # if gear is not a torch, there will be another leaf\n # that already added the seven minutes to switch gear\n return state.time\n key = (state.x, state.y, state.gear)\n rocktype = self.cave.rocktype(state.x, state.y)\n if key in best and best[key] <= state.time:\n continue # better way already known\n best[key] = state.time # remember this solution\n # changing gear adds seven minutes\n for gear in TORCH, CLIMB, NAKED:\n if gear == state.gear:\n continue # thats's the current gear, already in heap\n # but would not hurt, it's simpy seven minutes slower\n if (rocktype, gear) in combos:\n # changing to this gear is allowed, takes seven minutes\n nstate = State(state.time + 7, gear=gear, x=state.x, y=state.y)\n heappush(heap, nstate)\n # movement adds one minute\n for o in self.options(state):\n heappush(heap, o)\n\n\nDay.main()\n","sub_path":"2018/22/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":17577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160777474","text":"import sys\nimport os\nimport PyPDF2\n\ninput = sys.argv[1:]\n\ndef pdf_combiner(pdf_list):\n\tmerger = PyPDF2.PdfFileMerger()\n\tfor pdf in pdf_list:\n\t\tmerger.append(pdf)\n\n\tmerger.write('merged.pdf')\n\npdf_combiner(input)","sub_path":"pdf-merger/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454091697","text":"from django.http import JsonResponse\nfrom .models import TaskList, Task\n\ndef task_list(request):\n tasks = TaskList.objects.all()\n json_tasks = [c.to_json() for c in tasks]\n return JsonResponse(json_tasks, safe=False)\n\n\ndef task_detail(request, pk, fk):\n try:\n task = Task.objects.filter(task_list_id = pk)[fk-1]\n except TaskList.DoesNotExist as e:\n return JsonResponse({'error': str(e)})\n return JsonResponse(task.to_json())\n\n\ndef task(request, pk):\n try:\n task = Task.objects.filter(task_list_id = pk)\n except TaskList.DoesNotExist as e:\n return JsonResponse({'error': str(e)})\n json_detail = [p.to_view() for p in task]\n return JsonResponse(json_detail, safe=False)","sub_path":"todoback/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63339112","text":"\"\"\"\nAdd the following to iterm2_print_user_vars\n\niterm2_set_user_var awsProfile $AWSUME_PROFILE\n\"\"\"\n\nimport iterm2\n\nasync def main(connection):\n component = iterm2.StatusBarComponent(\n short_description='AWSume Profile',\n detailed_description='The AWS profile as configured by awsume',\n exemplar='AWSume Profile',\n update_cadence=2,\n identifier='com.ironhalo.iterm-statusbar.awsume-profile',\n knobs=[],\n )\n\n @iterm2.StatusBarRPC\n async def awsume_profile(knobs, profile=iterm2.Reference('user.awsProfile?')):\n if profile:\n return f'aws: {profile}'\n else:\n return f''\n\n await component.async_register(connection, awsume_profile)\niterm2.run_forever(main)\n","sub_path":"awsume-profile.py","file_name":"awsume-profile.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492321873","text":"# Descargar Archivos\n\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\n\nclass suite(unittest.TestCase):\n\n def setUp(self): \n chromeOptions=Options()\n chromeOptions.add_experimental_option(\"prefs\", \n {\n \"download.default_directory\" : \"D:\\\\Automatizacion\",\n })\n self.driver = webdriver.Chrome(executable_path=r\"D:\\Automatizacion\\driverchrome\\chromedriver.exe\", chrome_options=chromeOptions)\n\n def test_descargando_archivos(self):\n driver = self.driver\n driver.get(\"https://www.w3schools.com/tags/tryit.asp?filename=tryhtml5_a_download\")\n time.sleep(3)\n driver._switch_to.frame(driver.find_element_by_xpath(\"/html/body/div[7]/div[4]/div/div/iframe\"))\n time.sleep(3)\n driver.find_element_by_xpath(\"/html/body/p[2]/a/img\").click()\n time.sleep(3)\n\n def tearDown(self):#Limpiesa\n self.driver.close()\n\nif __name__=='__main__':\n unittest.main()","sub_path":"Descargar Archivos/DescargarArchivos.py","file_name":"DescargarArchivos.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"543462566","text":"# -*- coding: utf-8 -*- \n# @Time : 2021/4/29 13:53 \n# @Author : xu \n# @File : chat_client.py\nfrom __future__ import annotations\nfrom typing import IO\nimport asyncio\nimport sys\nimport contextlib\nimport aiofiles.threadpool\n\nfrom chat_streams import split_lines, write, handle_writes\n\nasync def handle_reads(reader: asyncio.StreamReader) -> None:\n async for message in split_lines(reader):\n text = message.decode()\n print(f\"Received {text!r}\")\n if text == \"quit\\n\":\n break\n\nasync def stream_file_to_queue(file: IO[str], queue: asyncio.Queue[bytes]) -> None:\n loop = asyncio.get_event_loop()\n async for message in aiofiles.threadpool.wrap(file, loop=loop):\n await queue.put(message.encode())\n\nasync def send_file(file: IO[str]) -> None:\n\n write_queue: asyncio.Queue[bytes] = asyncio.Queue()\n\n reader, writer = await asyncio.open_connection(\"127.0.0.1\", 8888)\n\n read_handler = asyncio.create_task(handle_reads(reader))\n write_handler = asyncio.create_task(handle_writes(writer, write_queue))\n copy_handler = asyncio.create_task(stream_file_to_queue(file, write_queue))\n done, pending = await asyncio.wait([read_handler, write_handler, copy_handler], return_when=asyncio.FIRST_COMPLETED)\n\n print(\"Closing the connection\")\n for task in pending:\n task.cancel()\n with contextlib.suppress(asyncio.CancelledError):\n await task\n\nif __name__ == \"__main__\":\n\n asyncio.run(send_file(sys.stdin))","sub_path":"note-taking/asyncio_chat/chat_client.py","file_name":"chat_client.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429186365","text":"#coding=utf-8\nimport pytest\nfrom pages.bi.index.indexPage import IndexPage\nfrom pages.bi import headerPage\nfrom pages.login import loginPage\nfrom allure import MASTER_HELPER as helper\nfrom selenium import webdriver\nimport time\nimport os\n\n'''首页的测试用例'''\n@helper.feature(\"首页测试用例集\")\nclass TestIndex1():\n\n @helper.step(\"初始化浏览器\")\n def setup_class(self):\n self.driver = webdriver.Chrome()\n self.driver.maximize_window()\n self.loginpage = loginPage.LoginPage(self.driver)\n self.headerpage = headerPage.HeaderPage(self.driver)\n self.indexpage = IndexPage(self.driver)\n\n\n with helper.step(\"登录bi\"):\n self.loginpage.bi_login(\"1000000\", \"dj123456\")\n\n\n @helper.testcase(\"用例名:查看采购模块报表\")\n def test_report_num(self):\n '''\n 切换模块的报表数量是否正确展示\n '''\n with helper.step(\"进入采购模块\"):\n self.indexpage.click_moudel(\"采购\")\n assert self.indexpage.get_report_count() == 7\n\n\n\n @helper.testcase(\"用例名:测试切换区域是否正常\")\n def test_change_region(self):\n '''切换城市后,查看是否过滤报表'''\n with helper.step(\"切换区域\"):\n self.indexpage.click_moudel(\"全部\")\n assert self.indexpage.get_report_count() == 49\n\n\n\n\n\n\n\n @helper.step(\"用例结束后,关闭浏览器\")\n def teardown_class(self):\n self.indexpage.quit()\n\nif __name__ == '__main__':\n pytest.main(['-s','test_index1.py'])\n\n","sub_path":"testcase/bi/test_index1.py","file_name":"test_index1.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211910542","text":"#program to check if a given string is an anagram of another given string.\ndef is_anagram(str1, str2):\n list_str1 = list(str1)\n list_str1.sort()\n list_str2 = list(str2)\n list_str2.sort()\n\n return (list_str1 == list_str2)\n\nprint(is_anagram('anagram','nagaram'))\nprint(is_anagram('cat','rat'))","sub_path":"Darlington/phase-3/python challenge/day 93 solution/qtn2.py","file_name":"qtn2.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"640764700","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 20 12:57:46 2020\n\n@author: Vasilica\n\"\"\"\n\nimport random\nimport itertools\nimport operator\nfrom math import sin, pow\n\nclass Entity:\n def __init__(self, nrOfIndividuals, crossProb, mutationProb, dimIndividual, firstSet, secondSet):\n self.__nrOfIndividuals = nrOfIndividuals\n self.__crossProb = crossProb\n self.__mutationProb = mutationProb\n self.__dimIndividual = dimIndividual\n self.__firstSet = firstSet\n self.__secondSet = secondSet\n \n def getNrOfIndividuals(self):\n return self.__nrOfIndividuals\n \n def getFirstSet(self):\n return self.__firstSet\n \n def getSecondSet(self):\n return self.__secondSet\n \n def getDimIndividual(self):\n return self.__dimIndividual\n \n def getMutationProb(self):\n return self.__mutationProb\n \n def getCrossProb(self):\n return self.__crossProb\n \n def individual(self, length, firstSet, secondSet):\n '''\n Create a member of the population - an individual\n \n length: the number of genes (components)\n firstSet: the first set of values\n secondSet: the second set of values\n '''\n set = list(range(0, length)) \n perm = list(itertools.permutations(set))\n n = 2 * length\n perm = perm[:n]\n individ = []\n for i in range(0, length):\n auxSet = []\n for j in range(0, length):\n auxSet.append(firstSet[perm[i][j]])\n individ.append(auxSet)\n \n for i in range(length, len(perm)):\n auxSet = []\n for j in range(0, length):\n auxSet.append(secondSet[perm[i][j]])\n individ.append(auxSet)\n \n return individ \n \n def population(self, count, length, firstSet, secondSet):\n \"\"\"\n Create a number of individuals (i.e. a population).\n \n count: the number of individuals in the population\n length: the number of values per individual\n firstSet: the first set of values\n secondSet: the second set of values\n \"\"\"\n population = []\n for x in range(count):\n individ = self.individual(length, firstSet, secondSet)\n random.shuffle(individ)\n population.append(individ)\n return population\n \n \n def fitness(self, individual):\n \"\"\"\n Determine the fitness of an individual. How many mistakes has the current \n individual\n individual: the individual to evaluate\n \"\"\"\n fitness = 0\n length = len(individual) // 2\n for k in range(len(individual)):\n for i in range(length):\n for j in range(i + 1, length):\n if individual[k][i] == individual[k][j]:\n fitness += 1\n \n for k in range(length):\n for i in range(length):\n for j in range(i + 1, length):\n if individual[i][k] == individual[j][k]:\n fitness += 1\n \n for k in range(length):\n for i in range(length, len(individual)):\n for j in range(i + 1, len(individual)):\n if individual[i][k] == individual[j][k]:\n fitness += 1\n \n return fitness\n \n def mutate(self, individual, pM, firstSet, secondSet): \n '''\n Performs a mutation on an individual with the probability of pM.\n \n individual:the individual to be mutated\n pM: the probability the mutation to occure\n firstSet: the first set of values\n secondSet: the second set of values\n '''\n if pM > random.random():\n length = len(individual) // 2\n set = list(range(0, length)) \n perm = list(itertools.permutations(set))\n if len(individual) > 1:\n p = random.randint(0, len(individual)-1)\n \n if p < length:\n aux = []\n for i in range(length):\n aux.append(firstSet[perm[p][i]])\n individual[p] = aux\n else:\n aux = []\n for i in range(length):\n aux.append(secondSet[perm[p][i]])\n individual[p] = aux\n \n return individual\n \n \n def crossover(self, parent1, parent2, pC):\n '''\n crossover between 2 parents\n '''\n child=[]\n n = len(parent1)\n t1 = random.randint(0, n - 2)\n t2 = random.randint(t1 + 1, n - 1)\n \n if pC > random.random():\n for i in range(t1):\n child.append(parent1[i])\n \n for i in range(t1, t2):\n child.append(parent2[i])\n \n for i in range(t2, n):\n child.append(parent1[i])\n \n return child\n else:\n return parent1\n \n \n \n \nclass Particle: \n def __init__(self, length, firstSet, secondSet):\n self.__length = length\n self.__firstSet = firstSet\n self.__secondSet = secondSet\n self._position = self.individual()\n self.evaluate()\n self.velocity = [ 0 for i in range(length)]\n \n #the memory of that particle\n self._bestPosition=self._position.copy()\n self._bestFitness=self._fitness\n \n def __str__(self):\n return str(self._position)\n \n def individual(self):\n '''\n Create a member of the population - an individual\n \n length: the number of genes (components)\n firstSet: the first set of values\n secondSet: the second set of values\n '''\n set = list(range(0, self.__length)) \n perm = list(itertools.permutations(set))\n random.shuffle(perm)\n n = 2 * self.__length\n perm = perm[:n]\n individ = []\n for i in range(0, self.__length):\n auxSet = []\n for j in range(0, self.__length):\n auxSet.append(self.__firstSet[perm[i][j]])\n individ.append(auxSet)\n \n for i in range(self.__length, len(perm)):\n auxSet = []\n for j in range(0, self.__length):\n auxSet.append(self.__secondSet[perm[i][j]])\n individ.append(auxSet)\n \n return individ \n \n def fit(self):\n \"\"\"\n Determine the fitness of an individual. How many mistakes has the current \n individual\n individual: the individual to evaluate\n \"\"\"\n fitness = 0\n length = len(self._position) // 2\n for k in range(len(self._position)):\n for i in range(length):\n for j in range(i + 1, length):\n if self._position[k][i] == self._position[k][j]:\n fitness += 1\n \n for k in range(length):\n for i in range(length):\n for j in range(i + 1, length):\n if self._position[i][k] == self._position[j][k]:\n fitness += 1\n \n for k in range(length):\n for i in range(length, len(self._position)):\n for j in range(i + 1, len(self._position)):\n if self._position[i][k] == self._position[j][k]:\n fitness += 1\n \n return fitness\n \n def evaluate(self):\n \"\"\" evaluates the particle \"\"\"\n self._fitness = self.fit()\n \n \n @property\n def position(self):\n \"\"\" getter for pozition \"\"\"\n return self._position\n\n @property\n def fitness(self):\n \"\"\" getter for fitness \"\"\"\n return self._fitness\n\n @property\n def bestPosition(self):\n \"\"\" getter for best pozition \"\"\"\n return self._bestPosition\n\n @property\n def bestFitness(self):\n \"\"\"getter for best fitness \"\"\"\n return self._bestFitness\n \n @position.setter\n def position(self, newPosition):\n self._position = newPosition.copy()\n # automatic evaluation of particle's fitness\n self.evaluate()\n # automatic update of particle's memory\n if (self._fitness < self._bestFitness):\n self._bestPosition = self._position\n self._bestFitness = self._fitness\n \n''' \ndef main():\n length = 3\n firstSet = [1, 2, 3]\n secondSet = [1, 2, 3]\n particle = Particle(length, firstSet, secondSet)\n print(particle.individual(length, firstSet, secondSet))\n \nmain()\n'''","sub_path":"lab3/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":8846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10190023","text":"import json\nfrom typing import Optional, List\nfrom aqt import mw\nfrom os import path\n\nfrom .config_types import StraightSetting\n\nfrom .utils import (\n safenav,\n safenav_preset,\n)\n\nSCRIPTNAME = path.dirname(path.realpath(__file__))\n\nwith open(path.join(SCRIPTNAME, '../../config.json'), encoding='utf-8') as config:\n config_default = json.load(config)\n\n SETTINGS_DEFAULT = config_default['settings']['1'][0]\n deck_default = SETTINGS_DEFAULT\n\n safenav_setting = safenav_preset(deck_default)\n\ndef serialize_setting(setting: StraightSetting) -> dict:\n return {\n 'deckConfName': setting.deck_conf_name,\n 'enableNotifications': setting.enable_notifications,\n 'straightLength': setting.straight_length,\n 'baseEase': setting.base_ease,\n 'stepEase': setting.step_ease,\n 'startEase': setting.start_ease,\n 'stopEase': setting.stop_ease,\n }\n\ndef deserialize_setting(deck_conf_name, setting_data, access_func = safenav_setting) -> StraightSetting:\n result = setting_data if type(setting_data) == StraightSetting else StraightSetting(\n deck_conf_name,\n access_func([setting_data], ['straightLength']),\n access_func([setting_data], ['enableNotifications']),\n access_func([setting_data], ['baseEase']),\n access_func([setting_data], ['stepEase']),\n access_func([setting_data], ['startEase']),\n access_func([setting_data], ['stopEase']),\n )\n\n return result\n\ndef deserialize_setting_with_default(deck_conf_name, settings) -> StraightSetting:\n found = filter(\n lambda v: safenav([v], ['deckConfName'], default='') == deck_conf_name,\n settings,\n )\n\n try:\n deck_deserialized = deserialize_setting(deck_conf_name, next(found))\n\n except StopIteration as e:\n deck_deserialized = deserialize_setting(deck_conf_name, deck_default)\n\n return deck_deserialized\n\ndef get_default_setting(deck_conf_name) -> StraightSetting:\n return deserialize_setting(deck_conf_name, deck_default)\n\ndef get_setting(col, deck_conf_name='Default') -> Optional[StraightSetting]:\n all_config = mw.addonManager.getConfig(__name__)\n setting = safenav(\n [all_config],\n ['settings', str(col.crt)],\n default=[],\n )\n\n return deserialize_setting_with_default(\n deck_conf_name,\n setting,\n )\n\ndef get_settings(col) -> List[StraightSetting]:\n all_config = mw.addonManager.getConfig(__name__)\n setting = safenav([all_config], ['settings', str(col.crt)], default=[])\n\n deck_settings = [\n get_setting(col, deck['name'], setting)\n for deck\n in col.decks.decks.values()\n ]\n\n return deck_settings\n\n# write config data to config.json\ndef write_settings(col, settings: List[StraightSetting]) -> None:\n serialized_settings = [\n serialize_setting(setting)\n for setting\n in settings\n ]\n\n all_config = mw.addonManager.getConfig(__name__)\n\n new_config = safenav([all_config], ['settings'], default={})\n new_config[str(col.crt)] = serialized_settings\n\n mw.addonManager.writeConfig(__name__, {\n 'settings': new_config,\n })\n\ndef write_setting(col, setting: StraightSetting) -> None:\n serialized_setting = serialize_setting(setting)\n\n all_config = mw.addonManager.getConfig(__name__)\n current_config = safenav(\n [all_config],\n ['settings', str(col.crt)],\n default=[],\n )\n\n try:\n idx = next(i for i,v in enumerate(current_config) if v['deckConfName'] == setting.deck_conf_name)\n current_config[idx] = serialized_setting\n\n except StopIteration:\n current_config.append(serialized_setting)\n\n new_config = safenav([all_config], ['settings'], default={})\n new_config[str(col.crt)] = current_config\n\n mw.addonManager.writeConfig(__name__, {\n 'settings': new_config,\n })\n\ndef rename_setting(col, old_name: str, new_name: str) -> None:\n all_config = mw.addonManager.getConfig(__name__)\n\n current_config = safenav(\n [all_config],\n ['settings', str(col.crt)],\n default=[],\n )\n\n found = filter(\n lambda v: safenav([v], [1, 'deckConfName'], default='') == old_name,\n enumerate(current_config),\n )\n\n try:\n conf_for_rename = next(found)\n current_config[conf_for_rename[0]]['deckConfName'] = new_name\n\n except StopIteration as e:\n pass\n\n new_config = safenav([all_config], ['settings'], default={})\n new_config[str(col.crt)] = current_config\n\n mw.addonManager.writeConfig(__name__, {\n 'settings': new_config,\n })\n\ndef remove_setting(col, conf_name: str) -> None:\n all_config = mw.addonManager.getConfig(__name__)\n\n current_config = safenav(\n [all_config],\n ['settings', str(col.crt)],\n default=[],\n )\n\n found = filter(\n lambda v: safenav([v], [1, 'deckConfName'], default='') == conf_name,\n enumerate(current_config),\n )\n\n try:\n conf_for_deletion = next(found)\n del current_config[conf_for_deletion[0]]\n\n except StopIteration as e:\n pass\n\n new_config = safenav([all_config], ['settings'], default={})\n new_config[str(col.crt)] = current_config\n\n mw.addonManager.writeConfig(__name__, {\n 'settings': new_config,\n })\n","sub_path":"src/lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10189179","text":"import re\n\n# change list of string from 'YYYY Mon DD' to 'DD-MM-YYYY'\n\nchange_list = {\n \"January\": \"01\",\n \"February\": \"02\",\n \"March\": \"03\",\n \"April\": \"04\",\n \"May\": \"05\",\n}\nlist_result = []\n\nname = input(\"Enter file name: \") # request file name\nhandler = open(name, \"r\") # open file for analysis\nf = open(\"result.txt\", \"w\") # create or recreate file result.txt\nfor row in handler: # analysis of each row\n match = re.search(r\"(\\d+)\\s([a-zA-Z]+)\\s(\\d+)\", row) # find pattern\n tmp_var = change_list[match.group(2)] # tmp_var will contain Mon value\n result_var = (\n match.group(3) + \"-\" + tmp_var + \"-\" + match.group(1)\n ) # create new row under result_var variable\n f.write(result_var + \"\\n\") # put new row to new file\nhandler.close()\nf.close()\n","sub_path":"re_formatting.py","file_name":"re_formatting.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110882696","text":"from copy import deepcopy\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db import models\nimport django_filters\nfrom django_filters.constants import EMPTY_VALUES\nfrom django_filters.utils import get_model_field, resolve_field\n\nfrom nautobot.dcim.forms import MACAddressField\nfrom nautobot.extras.models import Tag\nfrom nautobot.utilities.constants import (\n FILTER_CHAR_BASED_LOOKUP_MAP,\n FILTER_NEGATION_LOOKUP_MAP,\n FILTER_TREENODE_NEGATION_LOOKUP_MAP,\n FILTER_NUMERIC_BASED_LOOKUP_MAP,\n)\n\n\ndef multivalue_field_factory(field_class):\n \"\"\"\n Given a form field class, return a subclass capable of accepting multiple values. This allows us to OR on multiple\n filter values while maintaining the field's built-in validation. Example: GET /api/dcim/devices/?name=foo&name=bar\n \"\"\"\n\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v)\n for v in value\n if v\n ]\n\n return type(\"MultiValue{}\".format(field_class.__name__), (NewField,), dict())\n\n\n#\n# Filters\n#\n\n\nclass MultiValueCharFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(forms.CharField)\n\n\nclass MultiValueDateFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(forms.DateField)\n\n\nclass MultiValueDateTimeFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(forms.DateTimeField)\n\n\nclass MultiValueNumberFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(forms.IntegerField)\n\n\nclass MultiValueBigNumberFilter(MultiValueNumberFilter):\n \"\"\"Subclass of MultiValueNumberFilter used for BigInteger model fields.\"\"\"\n\n\nclass MultiValueTimeFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(forms.TimeField)\n\n\nclass MACAddressFilter(django_filters.CharFilter):\n field_class = MACAddressField\n\n\nclass MultiValueMACAddressFilter(django_filters.MultipleChoiceFilter):\n field_class = multivalue_field_factory(MACAddressField)\n\n\nclass TreeNodeMultipleChoiceFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Filters for a set of Models, including all descendant models within a Tree. Example: [,]\n \"\"\"\n\n def get_filter_predicate(self, v):\n # Null value filtering\n if v is None:\n return {f\"{self.field_name}__isnull\": True}\n return super().get_filter_predicate(v)\n\n def filter(self, qs, value):\n value = [node.get_descendants(include_self=True) if not isinstance(node, str) else node for node in value]\n return super().filter(qs, value)\n\n\nclass NullableCharFieldFilter(django_filters.CharFilter):\n \"\"\"\n Allow matching on null field values by passing a special string used to signify NULL.\n \"\"\"\n\n def filter(self, qs, value):\n if value != settings.FILTERS_NULL_CHOICE_VALUE:\n return super().filter(qs, value)\n qs = self.get_method(qs)(**{\"{}__isnull\".format(self.field_name): True})\n return qs.distinct() if self.distinct else qs\n\n\nclass TagFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Match on one or more assigned tags. If multiple tags are specified (e.g. ?tag=foo&tag=bar), the queryset is filtered\n to objects matching all tags.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n kwargs.setdefault(\"field_name\", \"tags__slug\")\n kwargs.setdefault(\"to_field_name\", \"slug\")\n kwargs.setdefault(\"conjoined\", True)\n kwargs.setdefault(\"queryset\", Tag.objects.all())\n\n super().__init__(*args, **kwargs)\n\n\nclass NumericArrayFilter(django_filters.NumberFilter):\n \"\"\"\n Filter based on the presence of an integer within an ArrayField.\n \"\"\"\n\n def filter(self, qs, value):\n if value:\n value = [value]\n return super().filter(qs, value)\n\n\nclass ContentTypeFilter(django_filters.CharFilter):\n \"\"\"\n Allow specifying a ContentType by . (e.g. \"dcim.site\").\n \"\"\"\n\n def filter(self, qs, value):\n if value in EMPTY_VALUES:\n return qs\n\n try:\n app_label, model = value.lower().split(\".\")\n except ValueError:\n return qs.none()\n return qs.filter(\n **{\n f\"{self.field_name}__app_label\": app_label,\n f\"{self.field_name}__model\": model,\n }\n )\n\n\nclass ContentTypeMultipleChoiceFilter(django_filters.MultipleChoiceFilter):\n \"\"\"\n Allows multiple-choice ContentType filtering by . (e.g. \"dcim.site\").\n\n Defaults to joining multiple options with \"AND\". Pass `conjoined=False` to\n override this behavior to join with \"OR\" instead.\n\n Example use on a `FilterSet`:\n\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery('statuses').get_choices,\n )\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"conjoined\", True)\n super().__init__(*args, **kwargs)\n\n def filter(self, qs, value):\n \"\"\"Filter on value, which should be list of content-type names.\n\n e.g. `['dcim.device', 'dcim.rack']`\n \"\"\"\n if not self.conjoined:\n q = models.Q()\n\n for v in value:\n if self.conjoined:\n qs = ContentTypeFilter.filter(self, qs, v)\n else:\n # Similar to the ContentTypeFilter.filter() call above, but instead of narrowing the query each time\n # (a AND b AND c ...) we broaden the query each time (a OR b OR c ...).\n # Specifically, we're mapping a value like ['dcim.device', 'ipam.vlan'] to a query like\n # Q((field__app_label=\"dcim\" AND field__model=\"device\") OR (field__app_label=\"ipam\" AND field__model=\"VLAN\"))\n try:\n app_label, model = v.lower().split(\".\")\n except ValueError:\n continue\n q |= models.Q(\n **{\n f\"{self.field_name}__app_label\": app_label,\n f\"{self.field_name}__model\": model,\n }\n )\n\n if not self.conjoined:\n qs = qs.filter(q)\n\n return qs\n\n\n#\n# FilterSets\n#\n\n\nclass BaseFilterSet(django_filters.FilterSet):\n \"\"\"\n A base filterset which provides common functionaly to all Nautobot filtersets\n \"\"\"\n\n FILTER_DEFAULTS = deepcopy(django_filters.filterset.FILTER_FOR_DBFIELD_DEFAULTS)\n FILTER_DEFAULTS.update(\n {\n models.AutoField: {\"filter_class\": MultiValueNumberFilter},\n models.BigIntegerField: {\"filter_class\": MultiValueBigNumberFilter},\n models.CharField: {\"filter_class\": MultiValueCharFilter},\n models.DateField: {\"filter_class\": MultiValueDateFilter},\n models.DateTimeField: {\"filter_class\": MultiValueDateTimeFilter},\n models.DecimalField: {\"filter_class\": MultiValueNumberFilter},\n models.EmailField: {\"filter_class\": MultiValueCharFilter},\n models.FloatField: {\"filter_class\": MultiValueNumberFilter},\n models.IntegerField: {\"filter_class\": MultiValueNumberFilter},\n models.PositiveIntegerField: {\"filter_class\": MultiValueNumberFilter},\n models.PositiveSmallIntegerField: {\"filter_class\": MultiValueNumberFilter},\n models.SlugField: {\"filter_class\": MultiValueCharFilter},\n models.SmallIntegerField: {\"filter_class\": MultiValueNumberFilter},\n models.TimeField: {\"filter_class\": MultiValueTimeFilter},\n models.URLField: {\"filter_class\": MultiValueCharFilter},\n models.UUIDField: {\"filter_class\": MultiValueCharFilter},\n MACAddressField: {\"filter_class\": MultiValueMACAddressFilter},\n }\n )\n\n @staticmethod\n def _get_filter_lookup_dict(existing_filter):\n # Choose the lookup expression map based on the filter type\n if isinstance(\n existing_filter,\n (\n MultiValueDateFilter,\n MultiValueDateTimeFilter,\n MultiValueNumberFilter,\n MultiValueTimeFilter,\n ),\n ):\n lookup_map = FILTER_NUMERIC_BASED_LOOKUP_MAP\n\n elif isinstance(existing_filter, (TreeNodeMultipleChoiceFilter,)):\n # TreeNodeMultipleChoiceFilter only support negation but must maintain the `in` lookup expression\n lookup_map = FILTER_TREENODE_NEGATION_LOOKUP_MAP\n\n # These filter types support only negation\n elif isinstance(\n existing_filter,\n (\n django_filters.ModelChoiceFilter,\n django_filters.ModelMultipleChoiceFilter,\n TagFilter,\n ),\n ):\n lookup_map = FILTER_NEGATION_LOOKUP_MAP\n # These filter types support only negation\n elif existing_filter.extra.get(\"choices\"):\n lookup_map = FILTER_NEGATION_LOOKUP_MAP\n elif isinstance(\n existing_filter,\n (\n django_filters.filters.CharFilter,\n django_filters.MultipleChoiceFilter,\n MultiValueCharFilter,\n MultiValueMACAddressFilter,\n ),\n ):\n lookup_map = FILTER_CHAR_BASED_LOOKUP_MAP\n\n else:\n lookup_map = None\n\n return lookup_map\n\n @classmethod\n def get_filters(cls):\n \"\"\"\n Override filter generation to support dynamic lookup expressions for certain filter types.\n\n For specific filter types, new filters are created based on defined lookup expressions in\n the form `__`\n \"\"\"\n filters = super().get_filters()\n\n new_filters = {}\n for existing_filter_name, existing_filter in filters.items():\n # Loop over existing filters to extract metadata by which to create new filters\n\n # If the filter makes use of a custom filter method or lookup expression skip it\n # as we cannot sanely handle these cases in a generic mannor\n if existing_filter.method is not None or existing_filter.lookup_expr not in [\"exact\", \"in\"]:\n continue\n\n # Choose the lookup expression map based on the filter type\n lookup_map = cls._get_filter_lookup_dict(existing_filter)\n if lookup_map is None:\n # Do not augment this filter type with more lookup expressions\n continue\n\n # Get properties of the existing filter for later use\n field_name = existing_filter.field_name\n field = get_model_field(cls._meta.model, field_name)\n\n # Create new filters for each lookup expression in the map\n for lookup_name, lookup_expr in lookup_map.items():\n new_filter_name = \"{}__{}\".format(existing_filter_name, lookup_name)\n\n try:\n if existing_filter_name in cls.declared_filters:\n # The filter field has been explicity defined on the filterset class so we must manually\n # create the new filter with the same type because there is no guarantee the defined type\n # is the same as the default type for the field\n resolve_field(field, lookup_expr) # Will raise FieldLookupError if the lookup is invalid\n new_filter = type(existing_filter)(\n field_name=field_name,\n lookup_expr=lookup_expr,\n label=existing_filter.label,\n exclude=existing_filter.exclude,\n distinct=existing_filter.distinct,\n **existing_filter.extra,\n )\n else:\n # The filter field is listed in Meta.fields so we can safely rely on default behaviour\n # Will raise FieldLookupError if the lookup is invalid\n new_filter = cls.filter_for_field(field, field_name, lookup_expr)\n except django_filters.exceptions.FieldLookupError:\n # The filter could not be created because the lookup expression is not supported on the field\n continue\n\n if lookup_name.startswith(\"n\"):\n # This is a negation filter which requires a queryset.exclude() clause\n # Of course setting the negation of the existing filter's exclude attribute handles both cases\n new_filter.exclude = not existing_filter.exclude\n\n new_filters[new_filter_name] = new_filter\n\n filters.update(new_filters)\n return filters\n\n\nclass NameSlugSearchFilterSet(django_filters.FilterSet):\n \"\"\"\n A base class for adding the search method to models which only expose the `name` and `slug` fields\n \"\"\"\n\n q = django_filters.CharFilter(\n method=\"search\",\n label=\"Search\",\n )\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(models.Q(name__icontains=value) | models.Q(slug__icontains=value))\n","sub_path":"nautobot/utilities/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":13588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60097369","text":"from django.http import HttpResponse\nfrom channels.handler import AsgiHandler\nfrom channels.sessions import channel_session\nfrom channels import Group\nfrom urlparse import urlparse\nfrom channels import Channel\nimport ast\nimport json\n# In consumers.py\n\ndef ws_add(message):\n # Accept the connection\n message.reply_channel.send({\"accept\": True})\n # Add to the chat group\n Group(\"chat\").add(message.reply_channel)\n\n@channel_session\ndef ws_message(message):\n # ASGI WebSocket packet-received and send-packet message types\n # both have a \"text\" key for their textual data.\n payload = json.loads(message['text'])\n dictpayload = ast.literal_eval(payload)\n print(\"dictpayload\")\n print(dictpayload)\n Channel(\"chat.receive\").send(dictpayload)\n\n\n\ndef ws_disconnect(message):\n Group(\"chat\").discard(message.reply_channel)\n\n\n\ndef we_draw(message):\n print(\"here==!!!!!!\")\n print(\"here==!!!!!!\")\n print(\"here==!!!!!!\")\n print(\"here==!!!!!!\")\n # array = message.content['text']\n print(message.content)\n # array = message.content\n \n # print (type(array) == type({}) )\n # arrayjson = json.loads(array)\n arrayjson = message.content\n print(arrayjson)\n print(\"arrayjson\")\n x = arrayjson['x']\n y = arrayjson[\"y\"]\n startX = arrayjson[\"startX\"]\n startY = arrayjson[\"startY\"]\n drawstyle = arrayjson[\"drawstyle\"]\n curColor = arrayjson[\"curColor\"]\n curRadius = arrayjson[\"curRadius\"]\n Group(\"chat\").send({\n \"text\": json.dumps({\n \"draw\": \"candraw\",\n \"x\": x,\n \"y\": y,\n \"startX\":startX,\n \"startY\":startY,\n \"drawstyle\":drawstyle,\n \"curColor\":curColor,\n \"curRadius\":curRadius,\n }),\n })","sub_path":"src/sprint1/wedraw/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447270523","text":"import h5py as h5\r\nimport theano\r\nimport json\r\n\r\nclass Compile():\r\n def __init__(self, filePath):\r\n self.group = h5.File(filePath, 'r')\r\n #self.graph_flow = {}\r\n #self.sort_by_graph()\r\n\r\n def sort_by_graph(self):\r\n nodes = self.group.keys()\r\n for node in nodes:\r\n \tname = node\r\n \tlayer = node.split('_')[1]\r\n \tif len(node.split('_')) > 3:\r\n \t\tlayer = node.split('_')[2]\r\n \tidx = node.split('_')[-1]\r\n \tvalues = [name, layer, node]\r\n \tself.graph_flow[int(idx)] = values\r\n","sub_path":"DeepInferenceLib/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338949244","text":"from django.conf.urls import url\n\nfrom proposals import views\n\nurlpatterns = [\n url(r'^markdown/$', views.markdown_preview, name='markdown'),\n url(r'^$', views.home, name='home'),\n url(r'^conference/$', views.conference, name='conference'),\n url(r'^talk/$', views.talk_list, name='list-talks'),\n url(r'^talk/add/$', views.talk_edit, name='add-talk'),\n url(r'^talk/edit/(?P[-\\w]+)$', views.talk_edit, name='edit-talk'),\n url(r'^talk/vote/(?P[-\\w]+)/(?P[-0-2]+)$', views.vote, name='vote'),\n url(r'^talk/details/(?P[-\\w]+)$', views.TalkDetail.as_view(), name='show-talk'),\n url(r'^talk/by-topic/(?P[-\\w]+)$', views.talk_list_by_topic, name='list-talks-by-topic'),\n url(r'^topic/$', views.TopicList.as_view(), name='list-topics'),\n url(r'^topic/add/$', views.TopicCreate.as_view(), name='add-topic'),\n url(r'^topic/(?P[-\\w]+)/edit/$', views.TopicUpdate.as_view(), name='edit-topic'),\n url(r'^speakers/$', views.SpeakerList.as_view(), name='list-speakers'),\n url(r'^speaker/(?P[\\w.@+-]+)$', views.user_details, name='show-speaker'),\n]\n","sub_path":"proposals/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135911147","text":"from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\n\nfrom django.template import loader\nfrom django.http import Http404\nfrom django.urls import reverse\nfrom django.db.models import F\nfrom django.views import generic\nfrom .models import Product, Image\n\n\ndef index(request):\n product_list=Product.objects.all()\n cart_sum = 0\n for product in product_list:\n cart_sum = cart_sum+product.in_cart_number\n context = {'product_list': product_list, 'cart_sum': cart_sum}\n return render(request, 'shop/index.html', context)\n\n\ndef bought(request):\n product_list=Product.objects.filter(in_cart_number__gte=1)\n total_price_dict={}\n for item in product_list:\n total_price_dict[item.id]=item.in_cart_number*item.official_price\n context = {'product_list': product_list, 'total_price_dict': total_price_dict}\n return render(request, 'shop/cart.html', context)\n\n\ndef add_to_cart(request):\n selected_prod = get_object_or_404(Product, pk=request.POST['product_bought'])\n\n product = get_object_or_404(Product, pk=selected_prod.id)\n product.in_cart_number = F('in_cart_number')+1\n product.save()\n return HttpResponseRedirect(reverse('shop:index'))\n","sub_path":"mysite/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"652300467","text":"import torch.nn as nn\r\nimport torch.utils.model_zoo as model_zoo\r\nimport torch\r\nimport numpy as np\r\nimport tqdm\r\n#import torch.utils.data as utils\r\nimport matplotlib.pyplot as plt\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms\r\nimport pandas as pd \r\nfrom PIL import Image\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nprint('Device:',device)\r\ndef print_network(model, name):\r\n num_params=0\r\n for p in model.parameters():\r\n num_params+=p.numel()\r\n print(name)\r\n print(model)\r\n print(\"The number of parameters {}\".format(num_params))\r\n\r\n__all__ = [\r\n 'VGG', 'vgg11', 'vgg11_bn',\r\n]\r\n\r\n\r\nmodel_urls = {\r\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\r\n}\r\n\r\n\r\nclass VGG(nn.Module):\r\n\r\n def __init__(self, features, num_classes=10, init_weights=True):\r\n super(VGG, self).__init__()\r\n self.features = features\r\n self.avgpool = nn.AdaptiveAvgPool2d((4, 4))\r\n #self.avgpool = nn.AdaptiveAvgPool2d((2, 2))\r\n self.classifier = nn.Sequential(\r\n nn.Linear(256 * 4 * 4, 1024),\r\n #nn.Linear(48 * 2 * 2, 192),\r\n nn.ReLU(True),\r\n nn.Dropout(),\r\n nn.Linear(1024, 256),\r\n #nn.Linear(192, 96),\r\n nn.ReLU(True),\r\n nn.Dropout(),\r\n nn.Linear(256, num_classes),\r\n #nn.Linear(96, num_classes),\r\n )\r\n if init_weights:\r\n self._initialize_weights()\r\n\r\n def forward(self, x, verbose=False):\r\n if verbose: \"Output Layer by layer\"\r\n if verbose: print(x.size())\r\n x = self.features(x)\r\n if verbose: print(x.size())\r\n x = self.avgpool(x)\r\n if verbose: print(x.size())\r\n x = x.view(x.size(0), -1)\r\n if verbose: print(x.size())\r\n x = self.classifier(x)\r\n if verbose: print(x.size())\r\n return x\r\n\r\n def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.Linear):\r\n nn.init.normal_(m.weight, 0, 0.01)\r\n nn.init.constant_(m.bias, 0)\r\n \r\n def training_params(self):\r\n self.optimizer = torch.optim.SGD(self.parameters(), lr=0.003, momentum=0.9, weight_decay=0.00001)\r\n self.Loss = nn.BCEWithLogitsLoss()\r\n\r\n\r\ndef make_layers(cfg, batch_norm=False):\r\n layers = []\r\n in_channels = 3\r\n for v in cfg:\r\n if v == 'M':\r\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\r\n else:\r\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\r\n if batch_norm:\r\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\r\n else:\r\n layers += [conv2d, nn.ReLU(inplace=True)]\r\n in_channels = v\r\n return nn.Sequential(*layers)\r\n\r\nclass CustomDatasetFromImages(Dataset):\r\n def __init__(self, csv_path,stage):\r\n \"\"\"\r\n Args:\r\n csv_path (string): path to csv file\r\n img_path (string): path to the folder where images are\r\n transform: pytorch transforms for transforms and tensor conversion\r\n \"\"\"\r\n # Transforms\r\n self.to_tensor = transforms.ToTensor()\r\n # Read the csv file\r\n self.data_info = pd.read_csv(csv_path, header=None)\r\n if stage == 'train':\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[1:162771, 0])\r\n # Second column is the labels\r\n self.label_arr = np.asarray(self.data_info.iloc[1:162771, 1:11]).astype(np.int32)\r\n self.label_arr = np.where(self.label_arr>0,1,0)\r\n # Calculate len\r\n self.data_len = 162770\r\n elif stage == 'val':\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[162771:182638, 0])\r\n # Second column is the labels\r\n self.label_arr = np.asarray(self.data_info.iloc[162771:182638, 1:11]).astype(np.int32)\r\n self.label_arr = np.where(self.label_arr>0,1,0)\r\n # Calculate len\r\n self.data_len = 19867\r\n \r\n elif stage == 'test':\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[182638:202600, 0])\r\n # Second column is the labels\r\n self.label_arr = np.asarray(self.data_info.iloc[182638:202600, 1:11]).astype(np.int32)\r\n self.label_arr = np.where(self.label_arr>0,1,0)\r\n # Calculate len\r\n self.data_len = 19962 \r\n \r\n\r\n def __getitem__(self, index):\r\n # Get image name from the pandas df\r\n single_image_name = self.image_arr[index]\r\n # Open image\r\n img_as_img = Image.open('img_align_celeba'+'/'+single_image_name)\r\n\r\n # Transform image to tensor\r\n img_as_tensor = self.to_tensor(img_as_img)\r\n \r\n\r\n # Get label(class) of the image based on the cropped pandas column\r\n single_image_label = self.label_arr[index]\r\n #single_image_label = self.to_tensor(single_image_label)\r\n \r\n return (img_as_tensor, single_image_label)\r\n\r\n def __len__(self):\r\n return self.data_len\r\n\r\ncfg = {\r\n 'A': [64, 'M', 128, 'M', 192, 'M', 256, 'M', 256, 'M'],\r\n #'A': [6, 'M', 12, 'M', 24, 24, 'M', 48, 48, 'M', 48, 48, 'M'],\r\n}\r\n\r\n\r\ndef vgg11(pretrained=False, **kwargs):\r\n \"\"\"VGG 11-layer model (configuration \"A\")\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n if pretrained:\r\n kwargs['init_weights'] = False\r\n model = VGG(make_layers(cfg['A']), **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\r\n return model\r\n\r\n\r\ndef vgg11_bn(pretrained=False, **kwargs):\r\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n if pretrained:\r\n kwargs['init_weights'] = False\r\n model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))\r\n return model\r\n \r\n\r\ndef demo(data_loader, model):\r\n model.eval() \r\n for batch_idx, (data,_) in tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc=\"[DEMO]\"):\r\n data = data.to(device).requires_grad_(False)\r\n\r\n output = model(data)\r\n prediction = torch.where(output.data.cpu() > 0, torch.Tensor([1]), torch.Tensor([0]))\r\n\r\n res = (prediction[0][:]).long()\r\n plt.imshow(np.swapaxes(np.swapaxes(data.cpu()[0],0,1),1,2))\r\n plt.show()\r\n print('Prediction:',res)\r\n break\r\n\r\nif __name__=='__main__':\r\n epochs=16\r\n batch_size=25\r\n\r\n model = vgg11_bn()\r\n model.load_state_dict(torch.load('SelfArch16.pth',map_location='cuda:0'))\r\n model.to(device)\r\n #model.eval()\r\n celebA_images_test = CustomDatasetFromImages('annotations.csv',stage='test')\r\n celebA_loader_test = DataLoader(dataset=celebA_images_test,batch_size=1,shuffle=True)\r\n demo(celebA_loader_test, model) \r\n ","sub_path":"12-CNN/demo_ourVGG.py","file_name":"demo_ourVGG.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445385064","text":"#!/usr/bin/env python\nimport numpy as np\nimport cPickle, hickle\nimport matplotlib.pyplot as plt\nimport scipy.optimize \nimport scipy.odr\n\n# ENERGYLIST = [25, 50, 75, 100, 125] #[25, 30, 40, 50, 75, 100, 125, 148]\n# INFILELIST = ['temperatures_no_sensor/temperatureToT_col0_%d.p' % energy for energy in ENERGYLIST] # ['temperatureToT_col0_%d.p' % energy for energy in ENERGYLIST] \nINFILELIST = ['temperatureToT_16.p']\nOUTDIR = 'plotTemperatureToT'\nCUTTEMP = 0\nOFFSETTEMP = 1570\nPLOT = True\n\ndef main():\n for INFILE in INFILELIST:\n plotTemperature(INFILE)\n \ndef plotTemperature(tempDict, offsettemp=1570, plot=False, outdir=None):\n offsetList, offsetErrList = [], []\n slopeList, slopeErrList = [], []\n\n # Load data from file\n time_, energy_, temp_, tempErr_, ToT_, ToTErr_ = getData(tempDict)\n \n # Loop over energies\n for energy in sorted(list(set(energy_))):\n # Filter data by energies\n energyCond = (energy_ == energy)\n time, temp, tempErr, ToT, ToTErr = time_[energyCond], temp_[energyCond], tempErr_.T[energyCond].T, ToT_.T[energyCond].T, ToTErr_.T[energyCond].T\n \n # = Plot =\n if plot:\n figList, axList = [], []\n for i in range(16):\n fig, ax = plt.subplots(1, 2, figsize=(13, 5), gridspec_kw = {'width_ratios': [1, 1]}, sharey=True)\n\n # ax0 label\n ax[0].set_xlabel('Time (s)')\n ax[0].set_ylabel('Temperature (DAC)')\n\n # ax1 label\n ax[1].set_xlabel(r'$\\mu_\\mathrm{ToT}$')\n # ax[1].set_ylabel('Temperature (DAC)')\n\n # Temperature vs time\n ax[0].errorbar(time, temp, yerr=tempErr, marker='x', ls='', color='cornflowerblue')\n ax[0].axhline(y=offsettemp, ls='--')\n \n figList.append( fig ), axList.append( ax )\n\n # Fit\n try:\n popt, pcov = scipy.optimize.curve_fit(heating, time, temp, sigma=tempErr, p0=(600, 200, 1590, 1., 1., 1540, 1550))\n timeFit = np.linspace(min(time), max(time), 1000)\n # if PLOT:\n # ax[0].plot(timeFit, heating(timeFit, *popt), color='cornflowerblue', alpha=.7)\n except:\n pass\n\n # Plot for each pixel\n offset, slope = [], []\n offsetErr, slopeErr = [], []\n\n ToTFitList, poptList = [], []\n for i in range(len(ToT)):\n # print len(ToT[i]), ToT[i]\n # print len(temp), temp\n\n # Fit\n fitModel = scipy.odr.Model(linear)\n fitData = scipy.odr.RealData(ToT[i], temp, sx=ToTErr[i], sy=tempErr)\n odr = scipy.odr.ODR(fitData, fitModel, beta0=[1., 1.])\n out = odr.run()\n popt, perr = out.beta, out.sd_beta\n poptList.append( popt )\n \n ToTFit = np.asarray( [np.min(ToT), np.max(ToT)] )\n ToTFitList.append( ToTFit )\n\n # if any(np.isnan(popt)):\n # continue\n # print 'popt', popt\n\n m, t = popt\n mErr, tErr = perr\n offset.append( (offsettemp - t) / m )\n offsetErr.append( 0 ) # np.sqrt((tErr/m)**2 + ((offsettemp - t)/m**2 * mErr)**2) )\n slope.append( m )\n slopeErr.append( 0 ) # mErr )\n \n offsetList.append( offset ), offsetErrList.append( offsetErr )\n slopeList.append( slope ), slopeErrList.append( slopeErr )\n \n if plot:\n for idx in range(len(axList)):\n ax, fig = axList[idx], figList[idx]\n \n minList, maxList = [], []\n for i in range(16):\n ax[1].errorbar(ToT[idx * 16 + i], temp, xerr=ToTErr[idx * 16 + i], yerr=tempErr, color=getColor('tab20', len(ToT) // 16, i % 16), marker='x', ls='')\n ax[1].plot(ToTFitList[idx + i], linear(poptList[idx * 16 + i], ToTFitList[idx * 16 + i]), color=getColor('tab20', len(ToT) // 16, i % 16), label=str(i % 16))\n \n minList.append( min(ToT[idx * 16 + i]) ), maxList.append( max(ToT[idx * 16 + i]) )\n plt.legend()\n\n ax[1].axhline(y=offsettemp, ls='--')\n ax[1].set_ylim(0.99 * min(temp), 1.01 * max(temp))\n ax[1].set_xlim(0.95 * min(minList), 1.05 * max(maxList))\n\n title = 'Column: %d, Energy: %.2f keV' % (idx, energy * 1.e-3) # INFILE.split('.')[0]\n fig.suptitle(title)\n # plt.tight_layout()\n if outdir:\n plt.savefig(outdir + '/%s.pdf' % title)\n plt.show()\n for fig in figList:\n plt.close(fig)\n \n offsetList, slopeList = np.asarray( offsetList ).T, np.asarray( slopeList ).T\n offsetErrList, slopeErrList = np.asarray( offsetErrList ).T, np.asarray( slopeErrList ).T\n # print slopeList\n\n # Slope plot\n # fig, ax = plt.subplots(2, 1, figsize=(5, 8), sharex=True)\n\n calibSlopeList, calibOffsetList = [], []\n for i in range(len(offsetList)):\n # ax[0].plot(ENERGYLIST, offsetList[i], color=getColor('tab20', len(ToT), i))\n # ax[1].plot(ENERGYLIST, slopeList[i], color=getColor('tab20', len(ToT), i))\n if plot:\n plt.errorbar(offsetList[i], 1./np.asarray(slopeList[i]), xerr=offsetErrList[i], yerr=np.asarray(slopeErrList[i]), color=getColor('tab20', len(ToT), i), marker='x', ls='')\n\n # Fit\n offsetList_ = np.asarray(offsetList[i])\n offsetList_ = offsetList_[~np.isnan(offsetList_)]\n slopeList_ = np.asarray(slopeList[i])\n slopeList_ = slopeList_[~np.isnan(slopeList_)]\n\n popt, perr = scipy.optimize.curve_fit(lambda x, m, t: m*x + t, offsetList_, 1./slopeList_)\n try:\n if plot:\n plt.plot(offsetList[i], popt[0]*offsetList_ + popt[1], color=getColor('tab20', len(ToT), i))\n except:\n pass\n \n calibSlopeList.append( popt[0] )\n calibOffsetList.append( popt[1] )\n\n if plot:\n plt.xlabel('Offset (ToT)')\n plt.ylabel('Slope (ToT/DAC)')\n plt.tight_layout()\n plt.show()\n \n # Histogram of slope and offset distribution\n plt.hist(calibSlopeList, bins=50)\n plt.xlabel('Slope (ToT/DAC)')\n plt.ylabel('Counts')\n plt.show()\n \n plt.hist(calibOffsetList, bins=50)\n plt.xlabel('Offset (ToT)')\n plt.ylabel('Counts')\n\n # Check goodness of fit\n meanListTotal, stdListTotal = [], []\n realMeanListTotal, realStdListTotal = [], []\n for energy in sorted(list(set(energy_))):\n # Filter data by energies\n energyCond = (energy_ == energy)\n time, temp, tempErr, ToT, ToTErr = time_[energyCond], temp_[energyCond], tempErr_.T[energyCond].T, ToT_.T[energyCond].T, ToTErr_.T[energyCond].T\n \n fig, ax = plt.subplots(1, 2, figsize=(12, 5), sharey=True)\n\n # Loop over pixels\n meanList, stdList = [], []\n realMeanList, realStdList = [], []\n \n # print ToT, calibSlopeList, calibOffsetList\n for i in range(len(ToT)):\n realToT = getRealToT(ToT[i], np.asarray(temp), offsettemp, calibSlopeList[i], calibOffsetList[i])\n # print realToT\n\n # ax.hist(realToT, color=getColor('tab20', 16, i))\n ax[0].plot(ToT[i], temp, color=getColor('tab20', 16, i % 16), alpha=.5)\n ax[1].plot(realToT, temp, color=getColor('tab20', 16, i % 16))\n\n # Calculate mean and std\n realMeanList.append( np.mean(realToT) ), realStdList.append( np.std(realToT) )\n meanList.append( np.mean(ToT[i]) ), stdList.append( np.std(ToT[i]) )\n \n meanListTotal.append( meanList )\n realMeanListTotal.append( realMeanList )\n stdListTotal.append( stdList )\n realStdListTotal.append( realStdList )\n\n ax[0].set_title('Before correction')\n ax[1].set_title('After correction')\n ax[0].set_xlabel(r'$\\mu_{\\mathrm{ToT}}$')\n ax[0].set_ylabel(r'Temperature (DAC)')\n ax[1].set_xlabel(r'$\\mu_{\\mathrm{ToT}}$')\n\n ax[0].set_xlim(0, 800)\n ax[1].set_xlim(0, 800)\n plt.tight_layout()\n plt.show()\n plt.clf()\n\n # Plot mean and std\n meanListTotal, stdListTotal = np.asarray(meanListTotal).T, np.asarray(stdListTotal).T\n realMeanListTotal, realStdListTotal = np.asarray(realMeanListTotal).T, np.asarray(realStdListTotal).T\n offsetList = np.asarray(offsetList)\n # print offsetList\n\n for i in range(16):\n figMeanStd, axMeanStd = plt.subplots(2, 1, figsize=(5, 5), gridspec_kw={'height_ratios': (3, 1)}, sharex=True)\n\n axMeanStd[0].plot(offsetList[i], realStdListTotal[i], marker='x')\n axMeanStd[0].plot(offsetList[i], stdListTotal[i], marker='x')\n axMeanStd[1].plot(offsetList[i], (stdListTotal[i] - realStdListTotal[i]) / stdListTotal[i], marker='x')\n\n axMeanStd[1].set_xlabel(r'ToT$_\\mathrm{offset}$')\n axMeanStd[0].set_ylabel(r'$\\sigma_\\mathrm{ToT}$')\n axMeanStd[1].set_ylabel(r'$(\\sigma_\\mathrm{ToT} - \\sigma_\\mathrm{ToT, real}) / \\sigma_\\mathrm{ToT}$')\n axMeanStd[1].axhline(y=0, color='k', ls='--', lw='.9', alpha=.5)\n\n axMeanStd[0].set_title('Pixel #%d' % i)\n plt.tight_layout()\n if outdir:\n plt.savefig(outdir + '/pixel%s.pdf' % i)\n plt.show()\n \n # Return calibrated slopes and offsets for all pixels. Included is the offset temperature\n # for which the calibration was performed. By inserting these values into getRealToT, a \n # transformation from a measured ToT value at a certain temperature to the corresponding\n # ToT value at the offset temperature is provided.\n outDict = {'slope': calibSlopeList, 'offset': calibOffsetList, 'Toffset': offsettemp}\n return outDict\n \ndef getData(d, cuttemp=0):\n # Get data\n temp, tempErr = np.asarray(d['temp']), np.asarray(d['tempErr'])\n ToT, ToTErr = np.asarray(d['ToT']), np.asarray(d['ToTErr'])\n # print ToT\n time = np.asarray(d['time'])\n \n # Old files do not have the energy key\n try:\n energy = np.asarray(d['energy'])\n except:\n energy = np.zeros(len(time))\n energy.fill(np.nan)\n\n # Get minimum dimension\n dim = min(len(temp), len(ToT), len(time))\n\n # Cut on temperature\n temp = temp[:dim]\n ToT, ToTErr = ToT[:dim][temp > cuttemp].T, ToTErr[:dim][temp > cuttemp].T\n time, energy = time[:dim][temp > cuttemp], energy[:dim][temp > cuttemp]\n tempErr, temp = tempErr[:dim][temp > cuttemp], temp[:dim][temp > cuttemp]\n\n return time, energy, temp, tempErr, ToT, ToTErr\n\ndef linear(p, x):\n m, t = p\n return m*x + t\n\ndef heating(x, tmax, toff, Tmax, tau1, tau2, offset1, offset2):\n return np.where(x <= toff, offset1, np.where(x <= tmax, (Tmax - offset1)*(1 - np.exp(-tau1*(x - toff))) + offset1, (Tmax - offset2)*np.exp(-tau2*(x - tmax)) + offset2))\n\ndef getRealToT(x, T, Toff, m, t):\n return -(t*(Toff - T) + x) / (m*(Toff - T) - 1)\n\ndef getColor(c, N, idx):\n import matplotlib as mpl\n cmap = mpl.cm.get_cmap(c)\n norm = mpl.colors.Normalize(vmin=0.0, vmax=N - 1)\n return cmap(norm(idx))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"dpx_func_python/plotTemperatureToT.py","file_name":"plotTemperatureToT.py","file_ext":"py","file_size_in_byte":11304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647315095","text":"from collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n # if not root:\n # return 0\n # if root.left and root.right:\n # return 1 + min(self.minDepth(root.left), self.minDepth(root.right))\n # return 1 + max(self.minDepth(root.left), self.minDepth(root.right))\n if not root:\n return 0\n queue = deque([(root, 1)])\n while queue:\n node, depth = queue.popleft()\n if not node.left and not node.right:\n return depth\n for child in [node.left, node.right]:\n if not child:\n continue\n queue.append((child, depth + 1))\n","sub_path":"LeetCode31DaysChallenge-202010/Minimum Depth of Binary Tree.py","file_name":"Minimum Depth of Binary Tree.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30912951","text":"import os\nimport re\nimport json\nimport shutil\nimport tempfile\nimport subprocess\n\nimport click\n\n\nconfig_override_template = '''\\\nimport os\nimport sys\n\nsys.path.insert(0, %(theme_path)r)\n#__import__('pocoo_theme_support')\nsys.path[:] = [os.path.abspath(x) for x in sys.path]\n\n# Source the old file and ensure the paths are setup correctly afterwards\n_old_file = __file__\n__file__ = 'conf.py'\n_here = os.getcwd()\n_real_path = %(real_path)r\nos.chdir(_real_path)\nexecfile('conf.py')\nsys.path[:] = [os.path.abspath(x) for x in sys.path]\nos.chdir(_here)\nhtml_static_path = [os.path.join(_real_path, _x) for _x in html_static_path]\n__file__ = _old_file\n\nhtml_additional_pages = dict(globals().get('html_additional_pages') or {})\nhtml_additional_pages['404'] = '404.html'\n\n# Overrides\nhtml_favicon = None\nproject = %(project)r\nversion = %(version)r\n\ntemplates_path = []\nhtml_title = '%%s Documentation (%%s)' %% (project, version)\nhtml_theme = %(theme)r\nhtml_theme_options = {}\nhtml_theme_path = [%(theme_path)r]\nhtml_sidebars = %(sidebars)r\nhtml_context = %(context_vars)r\n\npygments_style = %(pygments_style)r\n'''\n\nnginx_template = '''\\\nlocation %(doc_url)s {\n alias %(doc_path)s;\n\n rewrite ^%(doc_url_escaped)s/?$ $(doc_url)s/latest/ redirect;\n\n set $doc_path _;\n if ($request_uri ~* \"^%(doc_url_escaped)s/latest(|/[^?]*?)$\") {\n set $doc_path $1;\n }\n if (-f /srv/websites/flask.pocoo.org/docs/0.10$doc_path/index.html) {\n return 302 /docs/0.10$doc_path;\n }\n if (-f /srv/websites/flask.pocoo.org/docs/dev$doc_path/index.html) {\n return 302 /docs/dev$doc_path;\n }\n}\n'''\n\n\ndef build_context_vars(this_version, config):\n versions = []\n warning = None\n\n for version in config['versions']:\n is_current = this_version == version['slug']\n versions.append({\n 'slug': version['slug'],\n 'title': version['title'],\n 'note': version.get('note'),\n 'is_current': is_current,\n })\n if is_current:\n warning = version.get('warning')\n\n return {\n 'documentation_versions': versions,\n 'documentation_version_warning': warning,\n }\n\n\ndef ensure_checkout(checkout_folder, repo_url):\n try:\n os.makedirs(checkout_folder)\n except OSError:\n pass\n\n url, branch = repo_url.rsplit('@', 1)\n if os.path.isdir(os.path.join(checkout_folder, '.git')):\n subprocess.Popen([\n 'git', 'fetch', 'origin',\n '%s:%s' % (branch, branch),\n '--update-head-ok',\n '--depth', '1',\n ], cwd=checkout_folder).wait()\n subprocess.Popen([\n 'git', 'reset', '--hard',\n ], cwd=checkout_folder).wait()\n subprocess.Popen([\n 'git', 'checkout', branch,\n ], cwd=checkout_folder).wait()\n else:\n subprocess.Popen([\n 'git', 'clone',\n '--depth', '1',\n '--branch', branch,\n url,\n checkout_folder\n ]).wait()\n\n\ndef build_version(config, version_config, output_folder, checkout_folder):\n version_checkout_folder = os.path.join(\n checkout_folder, str('%s-%s' % (config['id'],\n version_config['slug'])))\n\n ensure_checkout(version_checkout_folder, version_config['repo'])\n doc_source_path = os.path.join(version_checkout_folder,\n str(config['doc_path']))\n\n config_path = tempfile.mkdtemp(prefix='.versionoverlay')\n context_vars = build_context_vars(version_config['slug'], config)\n\n try:\n with open(os.path.join(config_path, 'conf.py'), 'w') as f:\n f.write(config_override_template % {\n 'project': config['name'],\n 'version': '.'.join(version_config['version'].split('.')[:2]),\n 'release': version_config['version'],\n 'real_path': os.path.abspath(doc_source_path),\n 'theme_path': config['theme_path'],\n 'theme': config.get('theme') or 'pocoo',\n 'pygments_style': config.get('pygments_style')\n or 'pocoo_theme_support.PocooStyle',\n 'sidebars': config.get('sidebars') or {},\n 'context_vars': context_vars,\n } + '\\n')\n\n # Make sure the checkout is added to the pythonpath before Sphinx\n # invokes as Sphinx itself depends on Jinja2 for instance.\n env = dict(os.environ)\n env['PYTHONPATH'] = os.path.abspath(version_checkout_folder)\n\n for builder in'dirhtml', 'json':\n subprocess.Popen([\n 'sphinx-build',\n '-d', os.path.join(doc_source_path, '.doctrees'),\n '-b', builder,\n '-c', config_path,\n '.',\n os.path.abspath(output_folder),\n ], cwd=doc_source_path, env=env).wait()\n finally:\n try:\n shutil.rmtree(config_path)\n except (OSError, IOError):\n pass\n\n\ndef load_config(ctx, param, filename):\n try:\n with open(filename) as f:\n cfg = json.load(f)\n except IOError as e:\n raise click.BadParameter('Could not load config: %s' % e)\n cfg['base_path'] = os.path.abspath(os.path.dirname(filename))\n cfg['theme_path'] = os.path.join(\n cfg['base_path'], cfg.get('theme_path', './themes'))\n return cfg\n\n\n@click.group()\ndef cli():\n \"\"\"A wrapper around sphinx-build.\"\"\"\n\n\n@cli.command()\n@click.option('--config', type=click.Path(), required=True,\n callback=load_config,\n help='The path to the documentation config file.')\n@click.option('--checkout-folder', type=click.Path(),\n default='checkouts')\n@click.option('--output', '-O', type=click.Path(), default=None,\n help='The path to the output folder.')\ndef build(config, checkout_folder, output):\n \"\"\"Builds all documentation.\"\"\"\n if output is None:\n output = 'build/%s' % str(config['id'])\n\n for version_cfg in config['versions']:\n build_version(config, version_cfg,\n os.path.join(output, str(version_cfg['slug'])),\n checkout_folder)\n\n\n@cli.command('nginx-config')\n@click.option('--config', type=click.Path(), required=True,\n callback=load_config,\n help='The path to the documentation config file.')\n@click.option('--url-prefix', default='/docs',\n help='The URL prefix for the documentation.')\n@click.option('--path', type=click.Path(),\n help='The path to the documentation on the filesystem.')\ndef nginx_config(url_prefix, path, config):\n \"\"\"Spits out an nginx config for the given project that is ready\n for inclusion. This is useful because the docs have links to the\n latest version of the docs but it requires webserver interaction\n to support that pseudo URL.\n \"\"\"\n escaped_prefix = re.escape(url_prefix)\n if path is None:\n path = os.path.abspath('build/%s' % str(config['id']))\n\n try_versions = []\n for version in config['versions']:\n t = version.get('type')\n if t == 'stable':\n try_versions.append((0, version['slug']))\n elif t == 'unstable':\n try_versions.append((1, version['slug']))\n try_versions.sort()\n\n w = click.echo\n\n w('location %s {' % url_prefix)\n w(' alias %s;' % path)\n w(' rewrite ^%s/?$ %s/latest/ redirect;' % (escaped_prefix, url_prefix))\n w()\n w(' set $doc_path XXX;')\n w(' if ($request_uri ~* \"^%s/latest(|/[^?]*?)$\") {' % escaped_prefix)\n w(' set $doc_path $1;')\n w(' }')\n for _, version in try_versions:\n w()\n w(' if (-f %s/%s$doc_path/index.html) {' % (path, version))\n w(' return 302 %s/%s$doc_path;' % (url_prefix, version))\n w(' }')\n w()\n w(' error_page 404 %s/404/index.html' % url_prefix)\n w('}')\n","sub_path":"docbuilder.py","file_name":"docbuilder.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37642739","text":"#!/usr/bin/env python\nimport os\nimport sys\nfrom fl0w3r import error, run\n'''reimplemented 20170617\n\nprogram based on cv.c:\n\nUsage: cv [input file name] [input data type] [samples]\n [lines] [bands] [output file name] [output data type]\n'''\nargs = sys.argv\nif len(args) != 4:\n msg = \"cv.py: Convert between ENVI data types. Updated 200905 by AR.\"\n msg += \"\\nUsage: cv [input ile name] [outfile name] [output data type]\"\n error(msg + \"\\nheader file called [input file name].hdr needed\")\n\ninfile, outfile = args[1], args[2]\nouttype = int(float(args[3]))\ninphdr, outhdr = infile[:-4] + \".hdr\", outfile[:-4] + \".hdr\"\n\nflines = open(inphdr).readlines()\n\n# open output file\ng = open(outhdr, \"w\")\n\nsamples, lines, bands, old = 0, 0, 0, 0\nfor _l in flines:\n line = _l.strip()\n\n if(len(line.split(\"samples\")) == 2):\n ll = line.split(\"samples\")[1].strip().strip(\"=\")\n samples = int(ll)\n\n if(len(line.split(\"lines\")) == 2):\n ll = line.split(\"lines\")[1].strip().strip(\"=\")\n lines = int(ll)\n\n if(len(line.split(\"bands\")) == 2):\n ll = line.split(\"bands\")[1].strip().strip(\"=\")\n bands = int(ll)\n\n if(len(line.split(\"data type\")) == 2):\n # is data type line\n ll = line.split(\"data type\")[1].strip().strip(\"=\")\n old = int(ll[1])\n g.write(\"data type = \" + str(outtype) + \"\\n\")\n else:\n # is some other kind of line (any kind)\n g.write(line + \"\\n\")\n\ng.close()\n\nif(samples * lines * bands == 0):\n error(\"Error reading samples lines or bands from header file.\\n\")\n\nrun(\"cv \" + infile + \" \" + str(old) + \" \" + str(samples) + \" \" + str(lines) +\n \" \" + str(bands) + \" \" + outfile + \" \" + str(outtype))\n","sub_path":"src/cvn.py","file_name":"cvn.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"28441624","text":"\r\ndef main(file_input, file_output):\r\n # read input\r\n # n = number of players\r\n # k = number of opponents each player faces\r\n # value = list tuple of players' info\r\n n, k, value, avgPlayerScore, minPlayerScore = readInput(file_input)\r\n\r\n #check\r\n print(\"avgScore\"+str(avgPlayerScore))\r\n print(value)\r\n\r\n # have a list of all opponent points for each player\r\n # listOpponent = [] # [ [(score1, 1), (score2, 2)] , [(score2, 2) , (score3, 3) ...] , ...]\r\n # listSumScore = [] # [sum1, sum2, sum3, ...]\r\n listOpponent, listSumScore = initialize(value, k)\r\n i = 0\r\n checkedMaxMin = []\r\n\r\n #check\r\n print(listOpponent)\r\n # print(listSumScore)\r\n\r\n i = matchScheduling(n, k, listOpponent, listSumScore, avgPlayerScore, minPlayerScore, checkedMaxMin, i)\r\n\r\n # print(listOpponent)\r\n print(listSumScore)\r\n print(i)\r\n # print the list of opponents as a valid solution.\r\n writeOutput(listOpponent, file_output)\r\n\r\n\r\ndef stopCondition(n, listSumScore, maxIndex, minIndex, avgPlayerScore, minPlayerScore):\r\n isOver = False\r\n if n>100:\r\n if listSumScore[maxIndex]-listSumScore[minIndex] < ((avgPlayerScore/2) + 1 ):\r\n print(str(listSumScore[maxIndex]-listSumScore[minIndex])+\"why\"+str(avgPlayerScore/2)) \r\n isOver = True\r\n else:\r\n if listSumScore[maxIndex]-listSumScore[minIndex] < (minPlayerScore + 1 ):\r\n print(str(listSumScore[maxIndex]-listSumScore[minIndex])+\"why\"+str(avgPlayerScore/2)) \r\n isOver = True\r\n \r\n if listSumScore[maxIndex] == listSumScore[minIndex]:\r\n isOver = True\r\n return isOver\r\n\r\n\r\ndef repeating(listOpponent, index, opponent):\r\n for other in listOpponent[index]:\r\n if other[1] == opponent[1]:\r\n return True\r\n return False\r\n\r\n\r\ndef calPoint(point, newMaxScore, newMinScore, idealScore, listSumScore, maxIndex, minIndex):\r\n if newMaxScore > idealScore:\r\n point += listSumScore[maxIndex] - newMaxScore \r\n else:\r\n point += newMaxScore - listSumScore[minIndex]\r\n if newMinScore > idealScore:\r\n point += listSumScore[maxIndex] - newMinScore\r\n else:\r\n point += newMinScore - listSumScore[minIndex]\r\n return point\r\n\r\n\r\ndef matchScheduling(n, k, listOpponent, listSumScore, avgPlayerScore, minPlayerScore, checkedMaxMin, i):\r\n while (True):\r\n i+=1 #check number of max-min processed\r\n\r\n maxIndex, minIndex = maxMin(listSumScore,checkedMaxMin) # e.x. [10, 20, 30] min 0 max 2\r\n checkedMaxMin.insert(0,(maxIndex,minIndex))\r\n\r\n #check\r\n print(\"max\"+str(maxIndex)+\" \"+str(listSumScore[maxIndex])+\" \"+str(listSumScore[minIndex]))\r\n print(minIndex)\r\n\r\n #Condition to stop\r\n isFinish = stopCondition(n, listSumScore, maxIndex, minIndex, avgPlayerScore, minPlayerScore)\r\n if isFinish == True:\r\n break\r\n\r\n scoreDiff = listSumScore[maxIndex] - listSumScore[minIndex]\r\n idealScore = (listSumScore[maxIndex] + listSumScore[minIndex])/2\r\n # print(scoreDiff)\r\n\r\n bestDiff = -1\r\n indexOpponentMax = -1\r\n indexOpponentMin = -1\r\n # an array [ (score1, 1), ... ]\r\n # listOpponent[maxIndex] is a list have highest sum of score [(score1, 1), (score2, 2)], opponentMax maybe (score1, 1)\r\n for opponentMax in listOpponent[maxIndex]:\r\n # print(\"check5\")\r\n if opponentMax[1] == (minIndex+1):\r\n continue\r\n if repeating(listOpponent, minIndex, opponentMax) == True:\r\n continue\r\n \r\n # print(\"check2\")\r\n for opponentMin in listOpponent[minIndex]:\r\n if opponentMin[1] == (maxIndex+1):\r\n continue\r\n if opponentMin[0] < opponentMax[0]:\r\n if (opponentMax[0] - opponentMin[0]) > scoreDiff:\r\n continue\r\n if repeating(listOpponent, maxIndex, opponentMin) == True:\r\n continue\r\n\r\n #new way to evaluate\r\n newMaxScore = listSumScore[maxIndex] - opponentMax[0] + opponentMin[0]\r\n newMinScore = listSumScore[minIndex] - opponentMin[0] + opponentMax[0]\r\n point = 0 #if newMaxScore > oldMaxScore, point is negative, bad effect, oldMaxScore -newMaxScore = score lost \r\n point = calPoint(point, newMaxScore, newMinScore, idealScore, listSumScore, maxIndex, minIndex)\r\n\r\n #need to check the affected nodes\r\n affectedPlayerMax = listOpponent[maxIndex][listOpponent[maxIndex].index(opponentMax)][1] - 1\r\n affectedPlayerMin = listOpponent[minIndex][listOpponent[minIndex].index(opponentMin)][1] - 1\r\n indexBeChangedMax = -1\r\n indexBeChangedMin = -1\r\n for opponent in listOpponent[affectedPlayerMax]:\r\n if opponent[1] == maxIndex+1:\r\n indexBeChangedMax = listOpponent[affectedPlayerMax].index(opponent)\r\n for opponent in listOpponent[affectedPlayerMin]:\r\n if opponent[1] == minIndex+1:\r\n indexBeChangedMin = listOpponent[affectedPlayerMin].index(opponent)\r\n\r\n if listOpponent[affectedPlayerMax][indexBeChangedMax][1] == affectedPlayerMin + 1:\r\n continue\r\n if listOpponent[affectedPlayerMin][indexBeChangedMin][1] == affectedPlayerMax + 1:\r\n continue\r\n \r\n if repeating(listOpponent, affectedPlayerMax, listOpponent[affectedPlayerMin][indexBeChangedMin]) == True:\r\n continue\r\n if repeating(listOpponent, affectedPlayerMin, listOpponent[affectedPlayerMax][indexBeChangedMax]):\r\n continue\r\n\r\n #new way to evaluate\r\n idealScoreAffected = (listSumScore[affectedPlayerMax] + listSumScore[affectedPlayerMin])/2\r\n newMaxScoreAffected = listSumScore[affectedPlayerMax] - listOpponent[affectedPlayerMax][indexBeChangedMax][0] + listOpponent[affectedPlayerMin][indexBeChangedMin][0]\r\n newMinScoreAffected = listSumScore[affectedPlayerMin] - listOpponent[affectedPlayerMin][indexBeChangedMin][0] + listOpponent[affectedPlayerMax][indexBeChangedMax][0]\r\n if listSumScore[affectedPlayerMax] > listSumScore[affectedPlayerMin]:\r\n point = calPoint(point, newMaxScoreAffected, newMinScoreAffected, idealScoreAffected, listSumScore, affectedPlayerMax, affectedPlayerMin)\r\n else:\r\n point = calPoint(point, newMaxScoreAffected, newMinScoreAffected, idealScoreAffected, listSumScore, affectedPlayerMin, affectedPlayerMax)\r\n\r\n if point <= 0:\r\n continue\r\n\r\n if bestDiff == -1:\r\n bestDiff = point\r\n indexOpponentMax = listOpponent[maxIndex].index(\r\n opponentMax)\r\n indexOpponentMin = listOpponent[minIndex].index(\r\n opponentMin)\r\n elif point > bestDiff:\r\n bestDiff = point\r\n indexOpponentMax = listOpponent[maxIndex].index(\r\n opponentMax)\r\n indexOpponentMin = listOpponent[minIndex].index(\r\n opponentMin)\r\n # print(str(bestDiff)+\" max \"+str(indexOpponentMax)+\" min \"+str(indexOpponentMin))\r\n if bestDiff != -1:\r\n\r\n # only change affected, not evaluate\r\n affectedPlayerMax = listOpponent[maxIndex][indexOpponentMax][1] - 1\r\n affectedPlayerMin = listOpponent[minIndex][indexOpponentMin][1] - 1\r\n\r\n indexBeChangedMax = -1\r\n indexBeChangedMin = -1\r\n\r\n for opponent in listOpponent[affectedPlayerMax]:\r\n if opponent[1] == maxIndex+1:\r\n indexBeChangedMax = listOpponent[affectedPlayerMax].index(\r\n opponent)\r\n\r\n for opponent in listOpponent[affectedPlayerMin]:\r\n if opponent[1] == minIndex+1:\r\n indexBeChangedMin = listOpponent[affectedPlayerMin].index(\r\n opponent)\r\n\r\n listSumScore[affectedPlayerMax] = listSumScore[affectedPlayerMax] - \\\r\n listOpponent[affectedPlayerMax][indexBeChangedMax][0] + \\\r\n listOpponent[affectedPlayerMin][indexBeChangedMin][0]\r\n listSumScore[affectedPlayerMin] = listSumScore[affectedPlayerMin] - \\\r\n listOpponent[affectedPlayerMin][indexBeChangedMin][0] + \\\r\n listOpponent[affectedPlayerMax][indexBeChangedMax][0]\r\n\r\n listSumScore[maxIndex] = listSumScore[maxIndex] - \\\r\n listOpponent[maxIndex][indexOpponentMax][0] + \\\r\n listOpponent[minIndex][indexOpponentMin][0]\r\n listSumScore[minIndex] = listSumScore[minIndex] - \\\r\n listOpponent[minIndex][indexOpponentMin][0] + \\\r\n listOpponent[maxIndex][indexOpponentMax][0]\r\n\r\n listOpponent[affectedPlayerMax][indexBeChangedMax], listOpponent[affectedPlayerMin][indexBeChangedMin] = listOpponent[\r\n affectedPlayerMin][indexBeChangedMin], listOpponent[affectedPlayerMax][indexBeChangedMax]\r\n\r\n listOpponent[minIndex][indexOpponentMin], listOpponent[maxIndex][indexOpponentMax] = listOpponent[\r\n maxIndex][indexOpponentMax], listOpponent[minIndex][indexOpponentMin]\r\n #print(listOpponent) \r\n return i \r\n\r\n\r\ndef writeOutput(listOpponent, file_output):\r\n f = open(file_output, 'w')\r\n for player in listOpponent:\r\n print(str(listOpponent.index(player)+1)+\": \", end=\" \")\r\n for opponent in player:\r\n f.write(str(opponent[1])+'\\n')\r\n print(str(opponent[1]), end=\" \")\r\n print(\" \")\r\n f.close()\r\n\r\n\r\ndef maxMin(listSumScore, usedList):\r\n max = listSumScore[0]\r\n min = listSumScore[0]\r\n\r\n maxIndex = 0\r\n minIndex = 0\r\n\r\n i = 0\r\n for score in listSumScore:\r\n isUsed = False\r\n if score >= max:\r\n for used in usedList:\r\n if used[0] == i:\r\n if used[1] == minIndex:\r\n isUsed = True\r\n break\r\n if isUsed == False: \r\n max = score\r\n maxIndex = i\r\n elif score <= min:\r\n for used in usedList:\r\n if used[1] == i:\r\n if used[0] == maxIndex:\r\n isUsed = True\r\n break\r\n if isUsed == False:\r\n min = score\r\n minIndex = i\r\n i += 1\r\n\r\n return maxIndex, minIndex\r\n\r\ndef readInput(file_input):\r\n f = open(file_input, 'r')\r\n\r\n firstline = f.readline()\r\n n, k = firstline.split(\" \")\r\n n = int(n)\r\n k = int(k)\r\n # number of players, number of opponents each player faces\r\n value = []\r\n\r\n avgPlayerScore = 0\r\n minPlayerScore = -1\r\n i = 1\r\n # add value as tuple (value, position) into a list\r\n for line in f:\r\n if i>n:\r\n break\r\n if minPlayerScore == -1:\r\n minPlayerScore = int(line)\r\n elif minPlayerScore > int(line):\r\n minPlayerScore = int(line)\r\n avgPlayerScore += int(line)\r\n value.append((int(line), i))\r\n i += 1\r\n avgPlayerScore /= n \r\n return n, k, value, avgPlayerScore, minPlayerScore\r\n\r\ndef initialize(list, k):\r\n listOpponent = []\r\n listSumScore = []\r\n for i in range(len(list)):\r\n j = 1\r\n sumOfOpponents = 0\r\n opponents = []\r\n half = k/2\r\n\r\n if k % 2 == 0:\r\n while j <= half:\r\n index = i+j\r\n if index > (len(list)-1):\r\n index = index - len(list)\r\n sumOfOpponents += list[index][0]\r\n opponents.append(list[index])\r\n\r\n index = i-j\r\n if index < 0:\r\n index = len(list) + index\r\n sumOfOpponents += list[index][0]\r\n opponents.append(list[index])\r\n\r\n j += 1\r\n\r\n else:\r\n # opposite node\r\n index = i+int(len(list)/2)\r\n if index > (len(list)-1):\r\n index = index - len(list)\r\n sumOfOpponents += list[index][0]\r\n opponents.append(list[index])\r\n\r\n while j <= half:\r\n # nearest next nodes\r\n index = i+j\r\n if index > (len(list)-1):\r\n index = index - len(list)\r\n sumOfOpponents += list[index][0]\r\n opponents.append(list[index])\r\n\r\n # nearest previous nodes\r\n index = i-j\r\n if index < 0:\r\n index = len(list) + index\r\n sumOfOpponents += list[index][0]\r\n opponents.append(list[index])\r\n\r\n j += 1\r\n\r\n listOpponent.append(opponents)\r\n listSumScore.append(sumOfOpponents)\r\n\r\n return listOpponent, listSumScore\r\n\r\n\r\nmain('input.txt', 'output.txt')\r\n","sub_path":"matchScheduling.py","file_name":"matchScheduling.py","file_ext":"py","file_size_in_byte":13456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411754040","text":"import os\nimport glob\nfrom time import time, sleep\nimport RPi.GPIO as GPIO\nimport csv\nimport pandas as pd\nfrom datetime import datetime\nfrom numpy.random import normal\n\n#Initialize GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nFMPIN = 6 #flow meter GPIO pin\nVPIN = 12 #valve GPIO pin\nHEPIN = 25 #heating element pin\nTSPIN = 23 #temperature sensor pin\nGPIO.setup(FMPIN, GPIO.IN, GPIO.PUD_UP) #setup flow meter pin as input\nGPIO.setup(VPIN, GPIO.OUT, initial=GPIO.LOW) #setup valve pin as output\nGPIO.add_event_detect(FMPIN, GPIO.RISING) #add rising edge detection\nGPIO.setup(HEPIN, GPIO.OUT, initial=GPIO.LOW) #setup heating element pin as output\nGPIO.setup(TSPIN, GPIO.IN, pull_up_down=GPIO.PUD_UP) #setup temp. sensor pin as input\n\n#Initialize temperature sensor\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28-00043e9dc3ff')[0]\ndevice_file = device_folder + '/w1_slave'\n\n#Define functions for reading from temperature sensor\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_f\n\n#Define function to draw water\ndef draw_water(target):\n if target <= 0:\n print('No draw for this hour')\n return()\n print ('Drawing %.2f gallon(s).' % target)\n volume = 0\n numPulses = 0\n start_time = time()\n GPIO.output(VPIN, GPIO.HIGH) #open valve\n while volume < target: #keep valve open until desired volume has passed\n if GPIO.event_detected(FMPIN):\n numPulses += 1 #Count pulses from flow meter\n volume = float(numPulses) / 476 #Calculate volume\n run_time = time()\n elapsed_time = run_time - start_time\n if elapsed_time > 50:\n print('Timeout Error.')\n break\n GPIO.output(VPIN, GPIO.LOW) #close valve\n print ('Volume drawn: %.2f gallon(s).' % volume)\n\n#Read csv file with daily usage profile (one column for hours, one for gallons)\ndp = pd.read_csv('DailyProfile.csv')\nhours = []\ngallons = []\nrow = 0\nwhile row < len(dp):\n hours.append(dp.get_value(row,'Hour '))\n gallons.append(dp.get_value(row,'gallons'))\n row += 1\n\nstate = 0 #Variable to mark heating element state (0 is off)\n#Enter main program loop\nwhile True:\n now = datetime.now() #Update date/time\n filename = 'WH_Data_' + str(now.month) + '-' + str(now.day) + '-' + str(now.year) + '.csv'\n if not os.path.isfile(filename):\n data = open(filename, 'w')\n data.write('Time,Temperature\\n')\n data.close\n print('Creating new data file for ' + str(now.month) + '-' + str(now.day) + '-' + str(now.year))\n \n hour = float(now.hour) + float(now.minute)/60.0\n date = str(now.month) + '-' + str(now.day) + '-' + str(now.year)\n \n #Read temperature sensor, and adjust heating element if too hot or cold\n temp = read_temp()\n if temp > 140 and state == 1:\n GPIO.output(HEPIN, GPIO.LOW)\n state = 0\n print('Temperature has exceeded 120 degrees - turning off heating element.')\n elif temp < 118 and state == 0:\n GPIO.output(HEPIN, GPIO.HIGH)\n state = 1\n print('Temperature is below 118 degrees - turning on heating element.')\n\n #Log data every minute\n if now.minute % 1 == 0 and now.second == 0:\n data = open(filename, 'a')\n data.write(str(now.hour) + ':' + str(now.minute) + ',' + str(temp) + '\\n')\n data.close\n print('Logging data: %.2f degrees Fahrenheit at %g:%g' % (temp,now.hour,now.minute)) \n sleep(1)\n \n #Draw water at the start of each hour\n for i in range(len(hours)):\n if hours[i] == now.hour and now.minute == 0 and now.second < 2:\n draw_amount = gallons[i]\n if draw_amount > 0:\n draw_amount = normal(draw_amount, draw_amount*0.25) #randomize draw over Gaussian distribution\n draw_water(draw_amount) #Draw scheduled volume for current hour\n sleep(2) #Wait two seconds to prevent draw_water call from repeating\n","sub_path":"LeightonProgams1/WH_Controller_1.3.py","file_name":"WH_Controller_1.3.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36639534","text":"import numpy as np\r\nfrom scipy.optimize import fsolve\r\n\r\nfrom eos import VanDerWaalsEos, PengRobinsonEos, SoaveRedlichKwongEos\r\nfrom flash import ss_flash, flash_residual_function\r\nfrom stability import calculate_stability_test\r\nfrom utils import calculate_K_values_wilson\r\n\r\n\r\ndef input_properties_case_whitson_problem_18_PR():\r\n '''\r\n TEST PROBLEM PHASE BEHAVIOUR WHITSON PROBLEM 18 APPENDIX\r\n\r\n Methane, Butane and Decane (C1, C4 and C10).\r\n\r\n Properties for the Van der Waals Equation of State.\r\n\r\n '''\r\n temperature = (280.0 + 459.67) * 5.0 / 9.0\r\n pressure = 500.0 * 6894.75729\r\n\r\n critical_pressure = 6894.75729 * np.array([667.8, 550.7, 304.0]) # [atm]\r\n critical_temperature = (5.0 / 9.0) * np.array([343.0, 765.3, 1111.8]) # [K]\r\n acentric_factor = np.array([0.011500, 0.192800, 0.490200]) # [-]\r\n molar_mass = 0.001 * np.array([16.04, 58.12, 142.29]) # [g/mol]\r\n omega_a = 0.45724 * np.array([1.0, 1.0, 1.0]) # [-]\r\n omega_b = 0.07780 * np.array([1.0, 1.0, 1.0]) # [-]\r\n\r\n binary_interaction = np.array(\r\n [[0.000000, 0.000000, 0.000000],\r\n [0.000000, 0.000000, 0.000000],\r\n [0.000000, 0.000000, 0.000000]]\r\n )\r\n\r\n global_molar_fractions = np.array([0.5, 0.42, 0.08])\r\n\r\n return (pressure, temperature, global_molar_fractions,\r\n critical_pressure, critical_temperature, acentric_factor,\r\n molar_mass, omega_a, omega_b, binary_interaction)\r\n\r\n\r\n\r\n\r\ndef test_phase_equilibria():\r\n # Get input properties\r\n #props = input_properties_case_7_psudocomponents()\r\n props = input_properties_case_whitson_problem_18_PR()\r\n #props = input_properties_case_whitson_problem_18_SRK()\r\n #props = input_properties_case_whitson_problem_18_VDW()\r\n\r\n (pressure, temperature, global_molar_fractions,\r\n critical_pressure, critical_temperature, acentric_factor,\r\n molar_mass, omega_a, omega_b, binary_interaction) = props\r\n\r\n #temperature = 350.0 # [K]\r\n #pressure = 50.0 * 1e5 # [Pa]\r\n\r\n # Estimate initial K-values\r\n initial_K_values = calculate_K_values_wilson(\r\n pressure,\r\n temperature,\r\n critical_pressure,\r\n critical_temperature,\r\n acentric_factor\r\n )\r\n\r\n # Create EoS object and set properties\r\n #eos = VanDerWaalsEos, PengRobinsonEos, SoaveRedlichKwongEos\r\n eos = PengRobinsonEos(critical_pressure, critical_temperature, acentric_factor,\r\n omega_a, omega_b, binary_interaction)\r\n\r\n is_stable, K_values_est = calculate_stability_test(\r\n eos,\r\n pressure,\r\n temperature,\r\n global_molar_fractions,\r\n initial_K_values\r\n )\r\n\r\n print ('System is stable?', is_stable)\r\n print ('K_values estimates:', K_values_est)\r\n\r\n K_values_from_ss_flash, F_V, f_L = ss_flash(eos, pressure, temperature, global_molar_fractions, K_values_est, tolerance = 1.0e-1)\r\n\r\n fugacity_expected = np.array([294.397, 148.342, 3.02385]) * 6894.75729\r\n K_values_expected = np.array([6.65071, 0.890061, 0.03624])\r\n x_expected = np.array([0.08588, 0.46349, 0.45064])\r\n y_expected = np.array([0.57114, 0.41253, 0.01633])\r\n \r\n print ('K_values Successive Subst:', K_values_from_ss_flash)\r\n print ('Vapor molar fraction:', F_V)\r\n print ('\\n-----\\nFugacities obtained:', f_L)\r\n print ('Fugacities expected:', fugacity_expected)\r\n\r\n # Use estimates from Wilson's Equation!!!\r\n #x0 = np.append(initial_K_values, F_V) # It does not work!\r\n\r\n # Use estimates from stability test!!!\r\n #x0 = np.append(K_values_est, F_V) # It does not work!\r\n\r\n # Use estimates from successive substitutions!!!\r\n x0 = np.append(K_values_from_ss_flash, F_V) # Good estimate!\r\n\r\n result = fsolve(\r\n func=flash_residual_function,\r\n x0=x0,\r\n args=(temperature, pressure, eos, global_molar_fractions),\r\n )\r\n\r\n size = result.shape[0]\r\n K_values_newton = result[0:size-1]\r\n F_V = result[size-1]\r\n print ('K_values newton:', K_values_newton)\r\n print ('K_values expected:', K_values_expected)\r\n print ('Norm difference:', np.linalg.norm(K_values_expected - K_values_newton))\r\n print ('Vapor molar fraction:', F_V)\r\n\r\n assert np.allclose(K_values_newton, K_values_expected, rtol=0.01)\r\n assert np.allclose(fugacity_expected, f_L, rtol=0.1)\r\n\r\n","sub_path":"phase_equilibria/_test/test_phase_equilibria.py","file_name":"test_phase_equilibria.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321902041","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 6 10:24:27 2019\r\n\r\n@author: 45945\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('seaborn-whitegrid')\r\nimport numpy as np\r\nimport pickle\r\nimport re\r\nimport os\r\nimport math\r\n\r\ndef find_peaks(chromo,upper=200000):\r\n result=[]\r\n with open(\"Stadler_peaks.txt\") as f:\r\n lines=f.readlines()\r\n for line in lines:\r\n num=line.split()\r\n if num[0]==\"chr\"+str(chromo):\r\n if int(num[1])=1000 and lst[0]=1000 and lst[1]0: \r\n# count_sum[index]=float(count_sum[index]*2)/(s1+s2+count_sum[index])\r\n \r\n# index+=1\r\n \r\n #way 2 of calculating IS \r\n# count_sum=list(count_sum/np.mean(count_sum))\r\n# count_sum=np.log2(np.add(count_sum,0.1))3\r\n# print(len(count_sum))\r\n if count_sum==0:\r\n return 0\r\n else: \r\n return count_sum*2/(count_sum+s1+s2)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n num_region=[19719 ,18174, 15959 ,15563 ,15253 ,14951 ,15252 ,13173, 12407, 12999, 12184 ,12125 ,12028 ,12519 ,10349, 9831, 9527, 9077 ,6134 ,16664, 290, 0, 0, 0 ]\r\n box_length=20\r\n for ch in range(1,20):\r\n print(ch)\r\n peaks=find_peaks(ch,upper=200000)\r\n result=np.zeros((len(peaks),1171))\r\n for j in range(1171):\r\n contact_map=get_full_map(\"C:/study/HIC/ordered_adj/\"+str(j), ch,upper=20000)\r\n for i in range(len(peaks)):\r\n result[i,j]=find_sum(contact_map,20,peaks[i])\r\n # np.save(\"4_stages_contact.npy\",contact_map)\r\n # target_name=\"4_stages/chr\"+str(ch)+\"_stage\"+str(i)+\"_IS.npy\"\r\n # count_sum=find_sum(contact_map,box_length,total_length,adj_file,target_name) \r\n np.save(\"IS/\"+str(ch),result)\r\n\r\n","sub_path":"Insoluation Score/find_IS.py","file_name":"find_IS.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347361895","text":"from __future__ import division, absolute_import\n\n__copyright__ = \"Copyright (C) 2013 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport six\nfrom six.moves import range\n\nimport numpy as np\nimport loopy as lp\nimport sympy as sp\nfrom sumpy.tools import KernelCacheWrapper\n\n\n__doc__ = \"\"\"\n\nExpansion-to-particle\n---------------------\n\n.. autoclass:: E2PBase\n.. autoclass:: E2PFromCSR\n.. autoclass:: E2PFromSingleBox\n\n\"\"\"\n\n\n# {{{ E2P base class\n\nclass E2PBase(KernelCacheWrapper):\n def __init__(self, ctx, expansion, kernels,\n options=[], name=None, device=None):\n \"\"\"\n :arg expansion: a subclass of :class:`sympy.expansion.ExpansionBase`\n :arg strength_usage: A list of integers indicating which expression\n uses which source strength indicator. This implicitly specifies the\n number of strength arrays that need to be passed.\n Default: all kernels use the same strength.\n \"\"\"\n\n if device is None:\n device = ctx.devices[0]\n\n self.ctx = ctx\n self.expansion = expansion\n self.kernels = kernels\n self.options = options\n self.name = name or self.default_name\n self.device = device\n\n self.dim = expansion.dim\n\n from sumpy.kernel import TargetDerivativeRemover\n tdr = TargetDerivativeRemover()\n for knl in kernels:\n assert tdr(knl) == expansion.kernel\n\n def get_loopy_insns_and_result_names(self):\n from sumpy.symbolic import make_sympy_vector\n bvec = make_sympy_vector(\"b\", self.dim)\n\n from sumpy.assignment_collection import SymbolicAssignmentCollection\n sac = SymbolicAssignmentCollection()\n\n coeff_exprs = [sp.Symbol(\"coeff%d\" % i)\n for i in range(len(self.expansion.get_coefficient_identifiers()))]\n value = self.expansion.evaluate(coeff_exprs, bvec)\n result_names = [\n sac.assign_unique(\"result_%d_p\" % i,\n knl.postprocess_at_target(value, bvec))\n for i, knl in enumerate(self.kernels)\n ]\n\n sac.run_global_cse()\n\n from sumpy.symbolic import kill_trivial_assignments\n assignments = kill_trivial_assignments([\n (name, expr)\n for name, expr in six.iteritems(sac.assignments)],\n retain_names=result_names)\n\n from sumpy.codegen import to_loopy_insns\n loopy_insns = to_loopy_insns(assignments,\n vector_names=set([\"b\"]),\n pymbolic_expr_maps=[self.expansion.get_code_transformer()],\n complex_dtype=np.complex128 # FIXME\n )\n\n return loopy_insns, result_names\n\n def get_kernel_scaling_assignment(self):\n from pymbolic.interop.sympy import SympyToPymbolicMapper\n sympy_conv = SympyToPymbolicMapper()\n return [lp.Assignment(id=None,\n assignee=\"kernel_scaling\",\n expression=sympy_conv(self.expansion.kernel.get_scaling()),\n temp_var_type=lp.auto)]\n\n def get_cache_key(self):\n return (type(self).__name__, self.expansion, tuple(self.kernels))\n\n# }}}\n\n\n# {{{ E2P to single box (L2P, likely)\n\nclass E2PFromSingleBox(E2PBase):\n default_name = \"e2p_from_single_box\"\n\n def get_kernel(self):\n ncoeffs = len(self.expansion)\n\n loopy_insns, result_names = self.get_loopy_insns_and_result_names()\n\n loopy_knl = lp.make_kernel(\n [\n \"{[itgt_box]: 0<=itgt_box tgt_ibox = target_boxes[itgt_box]\n <> itgt_start = box_target_starts[tgt_ibox]\n <> itgt_end = itgt_start+box_target_counts_nonchild[tgt_ibox]\n\n <> center[idim] = centers[idim, tgt_ibox] {id=fetch_center}\n\n \"\"\"] + [\"\"\"\n <> coeff{coeffidx} = \\\n src_expansions[tgt_ibox - src_base_ibox, {coeffidx}]\n \"\"\".format(coeffidx=i) for i in range(ncoeffs)] + [\"\"\"\n\n for itgt\n <> b[idim] = targets[idim, itgt] - center[idim] {dup=idim}\n\n \"\"\"] + loopy_insns + [\"\"\"\n\n result[{resultidx},itgt] = \\\n kernel_scaling * result_{resultidx}_p \\\n {{id_prefix=write_result}}\n \"\"\".format(resultidx=i) for i in range(len(result_names))\n ] + [\"\"\"\n end\n end\n \"\"\"],\n [\n lp.GlobalArg(\"targets\", None, shape=(self.dim, \"ntargets\"),\n dim_tags=\"sep,C\"),\n lp.GlobalArg(\"box_target_starts,box_target_counts_nonchild\",\n None, shape=None),\n lp.GlobalArg(\"centers\", None, shape=\"dim, naligned_boxes\"),\n lp.GlobalArg(\"result\", None, shape=\"nresults, ntargets\",\n dim_tags=\"sep,C\"),\n lp.GlobalArg(\"src_expansions\", None,\n shape=(\"nsrc_level_boxes\", ncoeffs), offset=lp.auto),\n lp.ValueArg(\"nsrc_level_boxes,naligned_boxes\", np.int32),\n lp.ValueArg(\"src_base_ibox\", np.int32),\n lp.ValueArg(\"ntargets\", np.int32),\n \"...\"\n ] + [arg.loopy_arg for arg in self.expansion.get_args()],\n name=self.name,\n assumptions=\"ntgt_boxes>=1\",\n silenced_warnings=\"write_race(write_result*)\",\n default_offset=lp.auto)\n\n loopy_knl = lp.fix_parameters(loopy_knl,\n dim=self.dim,\n nresults=len(result_names))\n\n loopy_knl = lp.tag_inames(loopy_knl, \"idim*:unr\")\n loopy_knl = self.expansion.prepare_loopy_kernel(loopy_knl)\n\n return loopy_knl\n\n def get_optimized_kernel(self):\n # FIXME\n knl = self.get_kernel()\n knl = lp.tag_inames(knl, dict(itgt_box=\"g.0\"))\n return knl\n\n def __call__(self, queue, **kwargs):\n \"\"\"\n :arg expansions:\n :arg target_boxes:\n :arg box_target_starts:\n :arg box_target_counts_nonchild:\n :arg centers:\n :arg targets:\n \"\"\"\n knl = self.get_cached_optimized_kernel()\n\n return knl(queue, **kwargs)\n\n# }}}\n\n\n# {{{ E2P from CSR-like interaction list\n\nclass E2PFromCSR(E2PBase):\n default_name = \"e2p_from_csr\"\n\n def get_kernel(self):\n ncoeffs = len(self.expansion)\n\n loopy_insns, result_names = self.get_loopy_insns_and_result_names()\n\n loopy_knl = lp.make_kernel(\n [\n \"{[itgt_box]: 0<=itgt_box tgt_ibox = target_boxes[itgt_box]\n <> itgt_start = box_target_starts[tgt_ibox]\n <> itgt_end = itgt_start+box_target_counts_nonchild[tgt_ibox]\n\n for itgt\n <> tgt[idim] = targets[idim,itgt]\n\n <> isrc_box_start = source_box_starts[itgt_box]\n <> isrc_box_end = source_box_starts[itgt_box+1]\n\n for isrc_box\n <> src_ibox = source_box_lists[isrc_box]\n \"\"\"] + [\"\"\"\n <> coeff{coeffidx} = \\\n src_expansions[src_ibox - src_base_ibox, {coeffidx}]\n \"\"\".format(coeffidx=i) for i in range(ncoeffs)] + [\"\"\"\n\n <> center[idim] = centers[idim, src_ibox] {dup=idim}\n <> b[idim] = tgt[idim] - center[idim] {dup=idim}\n\n \"\"\"] + loopy_insns + [\"\"\"\n end\n \"\"\"] + [\"\"\"\n result[{resultidx}, itgt] = result[{resultidx}, itgt] + \\\n kernel_scaling * simul_reduce(sum, isrc_box,\n result_{resultidx}_p) {{id_prefix=write_result}}\n \"\"\".format(resultidx=i) for i in range(len(result_names))] + [\"\"\"\n end\n end\n \"\"\"],\n [\n lp.GlobalArg(\"targets\", None, shape=(self.dim, \"ntargets\"),\n dim_tags=\"sep,C\"),\n lp.GlobalArg(\"box_target_starts,box_target_counts_nonchild\",\n None, shape=None),\n lp.GlobalArg(\"centers\", None, shape=\"dim, aligned_nboxes\"),\n lp.GlobalArg(\"src_expansions\", None,\n shape=(\"nsrc_level_boxes\", ncoeffs), offset=lp.auto),\n lp.ValueArg(\"src_base_ibox\", np.int32),\n lp.ValueArg(\"nsrc_level_boxes,aligned_nboxes\", np.int32),\n lp.ValueArg(\"ntargets\", np.int32),\n lp.GlobalArg(\"result\", None, shape=\"nresults,ntargets\",\n dim_tags=\"sep,C\"),\n lp.GlobalArg(\"source_box_starts, source_box_lists,\",\n None, shape=None, offset=lp.auto),\n \"...\"\n ] + [arg.loopy_arg for arg in self.expansion.get_args()],\n name=self.name,\n assumptions=\"ntgt_boxes>=1\",\n silenced_warnings=\"write_race(write_result*)\",\n default_offset=lp.auto)\n\n loopy_knl = lp.fix_parameters(loopy_knl,\n dim=self.dim,\n nresults=len(result_names))\n\n loopy_knl = lp.tag_inames(loopy_knl, \"idim*:unr\")\n loopy_knl = lp.prioritize_loops(loopy_knl, \"itgt_box,itgt,isrc_box\")\n loopy_knl = self.expansion.prepare_loopy_kernel(loopy_knl)\n\n return loopy_knl\n\n def get_optimized_kernel(self):\n # FIXME\n knl = self.get_kernel()\n knl = lp.tag_inames(knl, dict(itgt_box=\"g.0\"))\n return knl\n\n def __call__(self, queue, **kwargs):\n knl = self.get_cached_optimized_kernel()\n return knl(queue, **kwargs)\n\n# }}}\n\n# vim: foldmethod=marker\n","sub_path":"sumpy/e2p.py","file_name":"e2p.py","file_ext":"py","file_size_in_byte":11661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"611768201","text":"import multiprocessing as mp\nimport os\nimport pickle\nimport random\nimport subprocess\nimport time\nfrom os.path import join\nimport numpy as np\nimport c4_nn_agent as c4_nn\nimport connect4_base as cb\nimport monte_carlo as mc\nimport sys\n\n\"\"\"Main file through which the training loop runs, including self play games, comparison against past iterations, comparison to an\nobjective alpha beta (AB) engine (using fixed depth of 13 ply), and all necessary IO operations.\"\"\"\n\n\ndef play_AB(games, proc_id):\n \"\"\"Compare the strength of the self-learning neural network agent to a traditional AB engine by playing some games\"\"\"\n nn_wins = 0\n ab_wins = 0\n draws = 0\n NN = c4_nn.Connect4NN(\"./connect4_models\", device=proc_id) # load the most current version of the network\n\n for being_played in range(games): # play 'games' num of games\n winner = \"No one, yet\"\n previous_move = 4 # dummy move needed for the poorly written AB engine\n game_data = [] # save game data to watch them later\n# print(being_played) # to show progress\n game = cb.BoardState() # generate a starting position\n tree = mc.MCTS() # init game tree for neural network (NN)\n switch = bool(random.getrandbits(1)) # pick a random player to go first\n temperature = 1.75\n if switch:\n order = \"AB-NN\"\n else:\n order = \"NN-AB\"\n for n in range(64): # play out 64 moves\n if switch:\n output = subprocess.run( # get AB move\n [\"./connect_4_AB.exe\", cb.parse_board(game.board), \"W\", str(previous_move)],\n stdout=subprocess.PIPE)\n numeric_move = int(output.stdout.decode(\"utf-8\")) # get AB move\n if numeric_move in game.get_legal(): # make sure the AB move is legal, otherwise play a random legal move\n game.move(numeric_move)\n else:\n game.move(random.choice(game.get_legal()))\n if game.move_check(numeric_move) == 1: # if AB won, continue to the next game\n ab_wins += 1\n winner = \"AB\"\n break\n else:\n result, previous_move = play_turn(NN, tree, 1600, 80, 4.0, temperature, game, previous_move)\n if result: # if nn agent wins\n nn_wins += 1\n winner = \"NN\"\n break\n switch = not switch\n try: # try to get an idea of what moves the neural network was thinking about\n game_data.append((cb.BoardState(game.board), tree.get_policy(game), tree.get_eval(game)))\n except KeyError:\n game_data.append((cb.BoardState(game.board), \"AB played an unexpected move!\", \"Eval unknown!\"))\n if n == 7:\n temperature = 1e-5\n if n == 63: # draw\n winner = \"DR\"\n draws += 1\n\n game_name = join(\"c4_games\", f\"{order}(v{NN.get_step()})-game-{int(time.time() * 100.)}-{winner}.pickle\")\n with open(game_name, \"wb\") as f: # save the game\n pickle.dump(game_data, f)\n nn_score = 1.0 * nn_wins + 0.5 * draws\n ab_score = games - nn_score\n print(f\"Final score for NN: {nn_wins}-{draws}-{ab_wins} ({nn_score}/{games})\")\n print(f\"Final score for AB: {ab_wins}-{draws}-{nn_wins} ({ab_score}/{games})\")\n NN.kill()\n\n\ndef add_rewards(train_game, gamma=0.97):\n \"\"\"+ number if win, - number if lost, 0 if draw, assigning rewards so nn learns to predict q properly\"\"\"\n reward = 1.\n for sample in train_game[::-1]:\n sample[2] += reward / 2 # divide by 2 so its an average of improved q and actual score z\n reward *= -gamma\n\n\ndef deduplicate(train_data):\n \"\"\"takes duplicate boards in training data, and combines them into 1 sample by averaging both the reward and policy\"\"\"\n found = {}\n for sample in train_data:\n the_hash = hash(sample[0].tostring()) # keep a unique id for each board by its hash\n if the_hash not in found.keys():\n found[the_hash] = [0, sample] # times_seen, sample\n else:\n found[the_hash][0] += 1 # num of times the position has been seen\n found[the_hash][1][1] = ((found[the_hash][1][1] * found[the_hash][0]) + sample[1]) / (\n found[the_hash][0] + 1) # average policy\n found[the_hash][1][2] = ((found[the_hash][1][2] * found[the_hash][0]) + sample[2]) / (\n found[the_hash][0] + 1) # average reward\n return [unique_sample[1] for unique_sample in\n found.values()] # return a list of the deduplicated and averaged samples\n\n\ndef play_turn(agent, tree, search_iters, search_threads, c_val, temp, board_state, prev_move):\n \"\"\"Given a board position, nn agent, search tree, and simulation parameters, it plays a move on the board and returns a tuple of\n (game_result, move_played) where game_result = 1 if the move was a win, else 0\"\"\"\n tree.search_iter(board_state, agent, prev_move, iters=search_iters, threads=search_threads, c_explore=c_val)\n chosen_move = np.random.choice(8, p=tree.get_policy(board_state, temperature=temp))\n while chosen_move not in board_state.get_legal():\n chosen_move = np.random.choice(8, p=tree.get_policy(board_state, temperature=temp))\n board_state.move(chosen_move)\n if board_state.move_check(chosen_move) == 1:\n return 1, chosen_move # 1 corresponds to a win being detected\n return 0, chosen_move # 0 corresponds to nothing being detected\n\n\ndef compete_one_game(old, new, search_iters, search_threads, c_val):\n \"\"\"To compare the newly trained nn to the old version, we pit them against each other and see who wins\"\"\"\n old_tree = mc.MCTS() # old nn and new nn need to have separate trees because otherwise the old nn might get\n new_tree = mc.MCTS() # extra insight from the new version it might not have realized\n new_game = cb.BoardState() # generate a new board and pick a random player to go first\n switch = bool(random.getrandbits(1))\n started = \"with new playing as white,\" # display message so we can see if the new version is winning equal numbers of games\n if switch: # as both white and black (was an issue early in development)\n started = \"with new playing as black,\"\n current_temp = 1.75 # temperature starts at 1.75 for the first 7 moves (exploration) then the rest use an infinitesimal amount\n prev_move = 0 # dummy starting move for the monte carlo search\n for dummy in range(64):\n if switch: # if old nn's turn\n result, prev_move = play_turn(old, old_tree, search_iters, search_threads, c_val, current_temp, new_game,\n prev_move)\n if result:\n print(started, \"old one won one\")\n return -1\n else: # if new nn's turn\n result, prev_move = play_turn(new, new_tree, search_iters, search_threads, c_val, current_temp, new_game,\n prev_move)\n if result:\n print(started, \"new won!\")\n return 1 # 1 corresponds to the new nn winning the game\n switch = not switch\n if dummy == 7: # after 1st 7 exploratory moves, only only choose best ones by using \"infinitesimal\" temperature\n current_temp = 1e-5\n print(started, \"there was a draw?!\")\n return 0\n\n\ndef compete(old, new, c_val, games=51, search_iters=1000, search_threads=50):\n \"\"\"Compares the old and new models to see if the new one really learned anything\"\"\"\n old_score = 0.\n new_score = 0.\n for _ in range(games):\n game_result = compete_one_game(old, new, search_iters, search_threads, c_val)\n if game_result == -1:\n old_score += 1.\n elif game_result == 1:\n new_score += 1.\n else:\n old_score += 0.5\n new_score += 0.5\n return float(new_score) / games\n\n\ndef try_training(games_num, c_val, p_id=0, perf_list=None):\n \"\"\"After training data through self play has been created, train the nn on it to see if any improvements can be made\n after the nn has been actually trained on the data, compare this 'new' version to the old, untrained version to make sure we\n are actually learning the right things\"\"\"\n old_model = c4_nn.Connect4NN(\"./connect4_models\", device=p_id)\n new_model = c4_nn.Connect4NN(\"./connect4_models\",\n log_path=\"/tmp/tensorflow_logs/c4_new_head2\",\n device=p_id) # log some training info stuff\n training_data = [] # get training data from past game generation iters\n games_loc = os.fsencode(\n join(\"datasetts\", \"c4_train_games\")) # load all training data files in the c4_train_games folder\n for file in os.listdir(games_loc):\n filename = os.fsdecode(file)\n with open(join(\"datasetts\", \"c4_train_games\", filename), \"rb\") as f:\n training_data += list(pickle.load(f))\n\n training_data = deduplicate(training_data) # deduplicate and shuffle data\n random.shuffle(training_data)\n new_model.train_update(training_data,\n epochs=2) # train the new nn on the data, running over it 3 times for good measure\n print(\"Done training model, comparing trained new model to old model...\")\n new_performance = compete(old_model, new_model, c_val, games=games_num) # compare the trained and old version\n print(\"Done comparing to old model..\")\n if perf_list is None:\n if new_performance >= 0.55: # if the new version wins more than 55% of the games (assuming they are exactly equal, this happens over 51\n print(\n f\"New model won by {new_performance}!\") # games about 7% of the time by pure chance), then it becomes the new model and is saved\n new_model.save()\n new_model.kill()\n old_model.kill()\n else: # however if new made no significant improvement (didn't win more than 55%) then just keep the old version\n print(f\"Old model won by {1. - new_performance}\")\n old_model.save()\n new_model.kill()\n old_model.kill()\n else:\n perf_list.append(new_performance)\n if p_id == 0:\n while len(perf_list) != proc_num:\n time.sleep(0.08)\n new_performance = sum(perf_list) / proc_num\n if new_performance >= 0.55: # if the new version wins more than 55% of the games (assuming they are exactly equal, this happens over 51\n print(\n f\"New model won by {new_performance}!\") # games about 7% of the time by pure chance), then it becomes the new model and is saved\n new_model.save()\n new_model.kill()\n old_model.kill()\n else: # however if new made no significant improvement (didn't win more than 55%) then just keep the old version\n print(f\"Old model won by {1. - new_performance}\")\n old_model.save()\n new_model.kill()\n old_model.kill()\n\n\ndef generate(games_num, window, c_val, proc_id=0):\n \"\"\"Does one iteration of game generation (plays games_num games to generate training data), trains new_model on that\n and then if it does better than old_model, it new_model is saved, else old_model is saved\"\"\"\n agent = c4_nn.Connect4NN('./connect4_models', device=proc_id) # load self play agent\n global_training_data = [] # store training data\n global_tree = mc.MCTS() # tree that gets reused between games to improve simulations\n print(\"Getting training data...\")\n pct = 0.04\n pct_2 = 0.1\n for _ in range(games_num):\n if pct <= float(_) / games_num: # every 4% of 2048 ~= 82 games, save current training data\n print(\"#\", end='', flush=True)\n pct += 0.04\n with open(join(f\"train_progress\", f\"train_dump_{proc_id}_-{int(pct*1000)}.pickle\"), \"wb\") as f:\n pickle.dump(global_training_data, f)\n if pct_2 <= float(_) / games_num:\n global_tree = mc.MCTS() # emtpy out global tree, to save memory every so often and also given an indicator of progress\n pct_2 += 0.1\n print(\"|\", end='', flush=True)\n play_game(agent, global_tree, global_training_data,\n c_val) # actually generate the training data through self play\n agent.save() # save the current agent\n agent.kill() # free some memory\n window_size = 10\n try: # relocate old trainng data so it isn't deleted\n with open(join(\"datasetts\", \"c4_train_games\", f\"training_data_{proc_id}_-{window % window_size}.pickle\"), \"rb\") as f:\n move_this = pickle.load(f)\n with open(join(\"datasetts\", \"old_c4_games\", f\"training_data_{proc_id}_-{int(time.time()*1000)}.pickle\"), \"wb\") as f:\n pickle.dump(move_this, f)\n except FileNotFoundError:\n pass\n with open(join(\"datasetts\", \"c4_train_games\", f\"training_data_{proc_id}_-{window % window_size}.pickle\"), \"wb\") as f:\n pickle.dump(global_training_data, f)\n print(\"\\nDone getting training data, training model..\")\n\n\ndef play_game(agent, global_tree, global_training, c_val, search_iters=1000, search_threads=50):\n \"\"\"Generates 1 game's worth of self play training data with samples of the form [state (s), improved policy (pi), reward (z)]\"\"\"\n game = cb.BoardState()\n training = [] # training data for this particular game\n chosen_move = 0\n for dummy in range(64): # max number of turns per game\n global_tree.search_iter(game, agent, chosen_move, iters=search_iters, threads=search_threads)\n training.append(np.array([np.copy(game.board), global_tree.get_policy(game), global_tree.get_eval(game) / 2]))\n chosen_move = np.random.choice(8, p=global_tree.get_policy(game))\n while chosen_move not in game.get_legal(): # in theory this shouldn't be necessary\n chosen_move = np.random.choice(8, p=global_tree.get_policy(game))\n game.move(chosen_move)\n if game.move_check(chosen_move) == 1:\n add_rewards(training)\n global_training += training\n return\n global_training += training\n\n\ndef main():\n my_c = 4.0\n for cnt in range(40):\n if proc_num > 1:\n pool = mp.Pool(proc_num)\n manager = mp.Manager()\n\n args = [(1024, cnt, my_c, i) for i in range(proc_num)]\n pool.starmap(generate, args)\n pool.close()\n pool.join()\n\n pool = mp.Pool(proc_num)\n perf_list = manager.list()\n args = [(128//proc_num, my_c, i, perf_list) for i in range(proc_num)]\n pool.starmap(try_training, args)\n pool.close()\n pool.join()\n\n pool = mp.Pool(proc_num)\n args = [(25, i) for i in range(proc_num)]\n pool.map(play_AB, [25]*proc_num)\n pool.close()\n pool.join()\n else:\n p = mp.Process(target=try_training, args=(21, my_c,)) # try to train neural net on the games just played out\n p.start()\n p.join() # its a process to manage memory efficiently (so gpu memory can be cleared)\n p = mp.Process(target=play_AB, args=(3,)) # play 25 game matches against AB engine\n p.start()\n p.join()\n p = mp.Process(target=generate, args=(100, cnt, my_c,)) # do 768 games of self play\n p.start()\n p.join()\n print(f\"Total sessions completed: {cnt}\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n proc_num = int(sys.argv[1])\n else:\n proc_num = 1\n main()\n","sub_path":"connect4/self-learning/connect_4_A0.py","file_name":"connect_4_A0.py","file_ext":"py","file_size_in_byte":15836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117866244","text":"from easy_table import EasyTable\n\n#This example shows how to use the various data functions\n#insertData, sortData, deleteData and updateData\n\n#Create the table and set the structure\ntable = EasyTable(\"Employees Table\")\ntable.setCorners(\"/\", \"\\\\\", \"\\\\\", \"/\")\ntable.setOuterStructure(\"|\", \"-\")\ntable.setInnerStructure(\"|\", \"-\", \"+\")\n\n#Set the table data\ntable.setData([\n {\"Employee ID\": 1, \"First Name\": \"Jim\", \"Last Name\": \"Johnson\"},\n {\"Employee ID\": 2, \"First Name\": \"Bill\", \"Last Name\": \"Smith\"}\n ])\n\n#Insert new rows into the table using a list of lists\ntable.insertData([[3, \"Tom\", \"Jones\"]])\n#Insert new rows into the table using a list of dicts\ntable.insertData([{\"Employee ID\": 4, \"First Name\": \"John\", \"Last Name\": \"Williams\"}])\n\n#Create a function to be used by sort data\n#All functions used by sortData take the row as a parameter\n#and return whatever will be the key for sorting\ndef getLastName(row):\n return row[\"Last Name\"]\n\n#Sort based off the last name using the function created above\ntable.sortData(getLastName)\n\n#Create a function to serve as the condition for deleting data\n#All functions used by deleteData take the row as a parameter\n#and return False if the row should be deleted and True if it should\n#be kept\ndef removeOddNumbers(row):\n if(row[\"Employee ID\"] % 2) == 0:\n return True\n else:\n return False\n\n#Delete all rows with an odd number Employee IDs using the function\n#created above\n#If no function is given all the data is deleted\ntable.deleteData(removeOddNumbers)\n\n#Create a function to serve as the condition for updating the data\n#All functions used by updateData as a condition take the row as a\n#parameter and return True if they should be updated\ndef startsWithJ(row):\n if(row[\"First Name\"].startswith(\"J\")):\n return True\n else:\n return False\n\n#Create a function to update the data\n#All functions used by updateData to provide the new value\n#take the row and return the row with the values updated\ndef uppercaseFirstName(row):\n row[\"First Name\"] = row[\"First Name\"].upper()\n return row\n\n#If the row has a first name that starts with 'J' then\n#update the first name to be all uppercase\n#If no function is provided for the condition then the\n#update will be done for every row\ntable.updateData(uppercaseFirstName, startsWithJ)\n\n#Display the table\ntable.displayTable()\n","sub_path":"examples/data_functions.py","file_name":"data_functions.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352199681","text":"import json\n\nfrom commonmark import commonmark as marked\nfrom django import template\nfrom django.template.base import Node\nfrom django.utils.safestring import mark_safe\nfrom urllib.parse import quote_plus\n\nfrom hipeac.functions import truncate_md\nfrom hipeac.models import get_cached_metadata\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef active(request, patterns):\n if patterns:\n for pattern in patterns.split(\",\"):\n try:\n if pattern == request.resolver_match.url_name:\n return \"active\"\n except Exception:\n return \"\"\n return \"\"\n\n\n@register.filter\ndef euro(value):\n # http://publications.europa.eu/code/en/en-370303.htm\n return mark_safe('EUR %s' % str(\"{0:,}\".format(value).replace(\",\", \" \")))\n\n\n@register.filter\ndef join_json(json_string, separator=\",\"):\n return mark_safe(separator.join(json.loads(json_string)))\n\n\n@register.filter\ndef markdown(text):\n return mark_safe(marked(text))\n\n\n@register.tag(name=\"markdown\")\ndef do_markdown(parser, token):\n nodelist = parser.parse((\"endmarkdown\",))\n parser.delete_first_token()\n bits = token.split_contents()\n if len(bits) > 1:\n raise template.TemplateSyntaxError(\"`markdown` tag requires exactly zero arguments\")\n return MarkdownNode(nodelist)\n\n\nclass MarkdownNode(Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n text = self.nodelist.render(context)\n return mark_safe(marked(text))\n\n\ndef metadata_output(ids, title, *, output_format: str = \"{0}
\", maps=[str]):\n keys = [int(key) for key in ids.split(\",\")]\n metadata_items = get_cached_metadata()\n\n output = []\n output.append(f'
{title}

')\n for metadata in [metadata_items[key] for key in keys if key in metadata_items]:\n output.append(output_format.format(*[m(metadata.value) for m in maps]))\n output.append(\"

\")\n\n return mark_safe(\"\".join(output))\n\n\n@register.filter\ndef metadata_list(ids, title):\n if not ids:\n return \"\"\n return metadata_output(ids, title)\n\n\n@register.filter\ndef metadata_list_jobs(ids, title):\n if not ids:\n return \"\"\n return metadata_output(\n ids,\n title,\n output_format='{1}
',\n maps=[quote_plus, str],\n )\n\n\n@register.filter\ndef metadata_badges(ids, title):\n if not ids:\n return \"\"\n return metadata_output(ids, title, output_format='{0}')\n\n\n@register.filter\ndef metadata_badges_jobs(ids, title):\n if not ids:\n return \"\"\n return metadata_output(\n ids,\n title,\n output_format='{1}',\n maps=[quote_plus, str],\n )\n\n\n@register.filter\ndef truncate(text, limit=300, smart=True):\n if not text:\n return \"\"\n\n text = truncate_md(text, limit=limit)\n\n if smart:\n words = text.split(\" \")[:-1]\n return \" \".join(words) + \"...\"\n\n return f\"{text}...\"\n\n\nclass SpacelessNode(Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n return json.dumps(json.loads(self.nodelist.render(context).strip()))\n\n\n@register.tag\ndef spaceless_json(parser, token):\n nodelist = parser.parse((\"endspaceless_json\",))\n parser.delete_first_token()\n return SpacelessNode(nodelist)\n","sub_path":"hipeac/templatetags/hipeac.py","file_name":"hipeac.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59847565","text":"from src.vppNode.VppBoxNode import VppBoxNode\nfrom src import vppNode\nfrom src.vppNode.VppNode import VppNode\nfrom src.vppNode.VppBoxNode import VppBoxNode\nimport pandas as pd\n\nclass NodeResults:\n \"\"\"a class for creating output files based on final resutls of the optimizer.\n \"\"\"\n def __init__(self, node: VppNode) -> None:\n self.node = node\n self.dir_sp_p_da_buy = 'output/buy.xlsx'\n self.dir_sp_p_da_sell = 'output/sell.xlsx'\n self.dir_u_dg = 'output/u_dg.xlsx'\n self.dir_v_dg_su = 'output/v_dg_su.xlsx'\n self.dir_v_dg_sd = 'output/v_dg_sd.xlsx'\n self.dir_opt_res = 'output/ofv.xlsx'\n\n def to_excel(self, **kwargs):\n \"\"\"a method to create excel files based on output variables of VppBoxes\n (setpoints)\n Args:\n **kwargs: Arbitrary keyword arguments.\n buy (bool): True to create output/buy.xlsx\n sell (bool): True to create output/sell.xlsx\n u_dg (bool): True to create output/u_dg.xlsx\n v_dg_su (bool): True to create output/v_dg_su.xlsx\n v_dg_sd (bool): True to create output/v_dg_sd.xlsx\n opt (bool): True to create output/ofv.xlsx\n \"\"\"\n sb_box: VppBoxNode = self.node.get_junction(1)\n\n df_buy = pd.DataFrame(sb_box.sp_p_da_buy.values(), index=[0])\n df_sell = pd.DataFrame(sb_box.sp_p_da_sell.values(), index=[0])\n \n u_dg_row = []\n u_dg_cols = []\n\n v_dg_su_row = []\n v_dg_su_cols = []\n\n v_dg_sd_row = []\n v_dg_sd_cols = []\n\n indecies = []\n\n for key, box_node in self.node.junctionMp.getItems():\n if not box_node.dg_resources.is_empty():\n for key1, value1 in box_node.dg_resources.getItems():\n \n w1_sp_u_dg = value1.sp_u_dg[1]\n w1_sp_v_dg_su = value1.sp_v_dg_su[1]\n w1_sp_v_dg_sd = value1.sp_v_dg_sd[1]\n \n u_dg_row.append(list(w1_sp_u_dg.values()))\n u_dg_cols.append(list(w1_sp_u_dg.keys()))\n\n v_dg_su_row.append(list(w1_sp_v_dg_su.values()))\n v_dg_su_cols.append(list(w1_sp_v_dg_su.keys()))\n\n v_dg_sd_row.append(list(w1_sp_v_dg_sd.values()))\n v_dg_sd_cols.append(list(w1_sp_v_dg_sd.keys()))\n\n indecies.append(f\"{key},{key1}\")\n\n df_u_dg = pd.DataFrame(u_dg_row, columns=u_dg_cols[0], index=indecies)\n df_v_dg_su = pd.DataFrame(v_dg_su_row, columns=v_dg_su_cols[0], index=indecies)\n df_v_dg_sd = pd.DataFrame(v_dg_sd_row, columns=v_dg_sd_cols[0], index=indecies)\n df_ofv = pd.DataFrame(self.node.OFV, columns=list(range(1, len(self.node.OFV)+1)), index=[0])\n\n if kwargs.pop('buy', False):\n df_buy.to_excel(self.dir_sp_p_da_buy)\n if kwargs.pop('sell', False):\n df_sell.to_excel(self.dir_sp_p_da_sell)\n if kwargs.pop('u_dg', False):\n df_u_dg.to_excel(self.dir_u_dg)\n if kwargs.pop('v_dg_su', False):\n df_v_dg_su.to_excel(self.dir_v_dg_su)\n if kwargs.pop('v_dg_sd', False):\n df_v_dg_sd.to_excel(self.dir_v_dg_sd)\n if kwargs.pop('opt', False):\n df_ofv.to_excel(self.dir_opt_res)","sub_path":"src/Utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322688695","text":"import abc\nimport inspect\nimport json\nimport logging\nimport threading\nimport time\nimport zmq\n\n\nclass Injector:\n def __init__(self, ip=\"172.21.20.70\"):\n address = f\"tcp://{ip}:5555\"\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.DEALER)\n identity = b\"Injector\" # Setting a unique identity for the DEALER socket\n self.socket.setsockopt(zmq.IDENTITY, identity)\n self.socket.connect(address)\n\n def send_request(self, request):\n print(f\"Sending request: {request}\")\n request_bytes = json.dumps(request).encode('utf-8')\n self.socket.send(request_bytes)\n\n def recv(self):\n reply = self.socket.recv()\n print(f\"Received reply: {reply.decode()}\")\n return reply\n\n def close(self):\n self.socket.close()\n self.context.term()\n\n\nclass InjectionServer(threading.Thread):\n def __init__(self, logger=None, handler=None, riaps_port=None):\n super().__init__()\n\n if logger is None:\n # If no logger is provided, use the basicConfig logger\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n else:\n # Use the provided logger\n self.logger = logger\n\n self.handler = handler\n if riaps_port:\n self.riaps_port = riaps_port\n self.riaps_plug = riaps_port.setupPlug(self)\n self.logger.debug(f\"InjectionServer | riaps_plug: {self.riaps_plug} \"\n f\"plug identity: {self.riaps_plug.identity}\"\n f\"socket name: {riaps_port.instName}\")\n # self.poller.register(self.riaps_plug, zmq.POLLIN)\n\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.ROUTER)\n self.socket.bind(\"tcp://*:5555\")\n\n def run(self):\n self.logger.info(\"Server started. Listening for requests...\")\n poller = zmq.Poller()\n poller.register(self.socket, zmq.POLLIN)\n\n while True:\n self.logger.info(f\"Waiting for messages...{time.time()}\")\n events = dict(poller.poll(1000))\n if self.socket in events and events[self.socket] == zmq.POLLIN:\n identity, request_bytes = self.socket.recv_multipart()\n request = json.loads(request_bytes.decode('utf-8'))\n self.logger.info(f\"Received request from client {identity}: {request}\")\n if self.handler is not None:\n self.handler(request[\"function\"], request[\"patch\"])\n if self.riaps_plug is not None:\n self.riaps_plug.send_pyobj(request)\n\n def close(self):\n self.socket.close()\n self.context.term()\n\n\nclass TestInterface:\n def __init__(self, app, logger=None):\n\n self.app = app\n\n if not hasattr(self.app, \"function_patches\"):\n raise Exception(\"No function patches provided\")\n\n if logger is None:\n # If no logger is provided, use the basicConfig logger\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n else:\n # Use the provided logger\n self.logger = logger\n\n # self.test_server_thread = InjectionServer(logger=self.logger,\n # handler=self.patch_function)\n # self.test_server_thread.start()\n\n def revert_patch(self, function_name):\n original_function = getattr(self.app.__class__, function_name)\n setattr(self.app, function_name, original_function.__get__(self.app))\n\n def patch_function(self, function_name, new_function_name):\n\n if new_function_name == \"revert\":\n self.revert_patch(function_name)\n return\n\n self.logger.info(f\"Received patch request for function {function_name} with {new_function_name}\")\n original_function = getattr(self.app, function_name)\n new_function = self.app.function_patches[new_function_name]\n\n if not new_function:\n raise Exception(f\"Function {new_function} not found\")\n\n new_function = new_function.__get__(self.app)\n\n original_params = inspect.signature(original_function).parameters\n patched_params = inspect.signature(new_function).parameters\n\n if original_params == patched_params:\n self.logger.info(\"Patched function has the same parameters as the original function.\")\n setattr(self.app, function_name, new_function)\n else:\n self.logger.info(\n f\"{new_function_name} has {patched_params} while original {function_name} has {original_params}.\")\n\n\n\"\"\"\nCode below is for demonstration/testing purposes only\n\"\"\"\n\n\ndef run_server():\n server = InjectionServer()\n server.start()\n\n\ndef run_client():\n client = Injector()\n for request_number in range(5):\n request = {\"request_number\": request_number + 1}\n client.send_request(request)\n time.sleep(1) # Add a small delay between each request for demonstration purposes\n\n\nif __name__ == \"__main__\":\n import multiprocessing as mp\n\n server_process = mp.Process(target=run_server)\n client_process = mp.Process(target=run_client)\n\n server_process.start()\n client_process.start()\n\n server_process.join()\n client_process.join()\n","sub_path":"python-fabric/interface.test/example2_app/test_suite/testInterface.py","file_name":"testInterface.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574178448","text":"# -*- coding: utf-8 -*-\r\nfrom chatterbot.trainers import ListTrainer\r\nfrom chatterbot import ChatBot\r\nimport os\r\n\r\nbot = ChatBot('Teste')\r\nbot.set_trainer(ListTrainer)\r\n\r\nfor arqv in os.listdir('arqv'):\r\n chats = open('arqv/'+arqv, 'r').readlines()\r\n bot.train(chats)\r\n\r\nwhile True:\r\n resq = input('Você: ')\r\n resp = bot.get_response(resq)\r\n print('Bot: ' + str(resp))\r\n","sub_path":"ChatBot/main_bot.py","file_name":"main_bot.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599134705","text":"from django.shortcuts import render\nfrom products.models import Product, CaroPics\nfrom .models import Review\n\n\n# We're in producto views.\n\ndef productPage(request, product_id):\n try:\n producto = Product.objects.get(id=product_id)\n except Exception as e:\n raise e\n\n if request.method == 'POST' and request.user.is_authenticated and \\\n request.POST['content'].strip() != '':\n Review.objects.create(\n product=producto, user=request.user,\n content=request.POST['content'])\n\n this_url = request.path\n reviews = Review.objects.filter(product=producto,)\n caropics = CaroPics.objects.filter(product=producto)\n context = {\n 'producto': producto,\n 'reviews': reviews,\n 'caropics': caropics,\n 'this_url':this_url,\n\n }\n\n return render(request, 'producto/product.html', context)\n","sub_path":"producto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57282306","text":"# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy (c) 2017-2023\n# ryanss (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom holidays.countries.nicaragua import Nicaragua, NI, NIC\nfrom tests.common import TestCase\n\n\nclass TestNicaragua(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass(Nicaragua)\n\n def setUp(self):\n super().setUp()\n self.holidays_an = Nicaragua(subdiv=\"AN\")\n\n def test_country_aliases(self):\n self.assertCountryAliases(Nicaragua, NI, NIC)\n\n def test_2020(self):\n self.assertHoliday(\n \"2020-01-01\",\n \"2020-04-09\",\n \"2020-04-10\",\n \"2020-05-01\",\n \"2020-07-19\",\n \"2020-08-01\",\n \"2020-08-10\",\n \"2020-09-14\",\n \"2020-09-15\",\n \"2020-12-08\",\n \"2020-12-25\",\n )\n self.assertNoHoliday(self.holidays_an, \"2020-08-01\", \"2020-08-10\")\n\n def test_ni_holidays_1979(self):\n self.assertHoliday(\n \"1979-01-01\",\n \"1979-04-12\",\n \"1979-04-13\",\n \"1979-05-01\",\n \"1979-07-19\",\n \"1979-09-14\",\n \"1979-09-15\",\n \"1979-12-08\",\n \"1979-12-25\",\n )\n\n def test_pre_1979(self):\n self.assertNoHoliday(\"1978-07-19\")\n\n def test_l10n_default(self):\n def run_tests(languages):\n for language in languages:\n ni = Nicaragua(language=language)\n self.assertEqual(ni[\"2022-01-01\"], \"Año Nuevo\")\n self.assertEqual(ni[\"2022-12-25\"], \"Navidad\")\n\n run_tests((Nicaragua.default_language, None, \"invalid\"))\n\n self.set_language(\"en_US\")\n run_tests((Nicaragua.default_language,))\n\n def test_l10n_en_us(self):\n en_us = \"en_US\"\n\n ni = Nicaragua(language=en_us)\n self.assertEqual(ni[\"2022-01-01\"], \"New Year's Day\")\n self.assertEqual(ni[\"2022-12-25\"], \"Christmas\")\n\n self.set_language(en_us)\n for language in (None, en_us, \"invalid\"):\n ni = Nicaragua(language=language)\n self.assertEqual(ni[\"2022-01-01\"], \"New Year's Day\")\n self.assertEqual(ni[\"2022-12-25\"], \"Christmas\")\n\n def test_l10n_uk(self):\n uk = \"uk\"\n\n ni = Nicaragua(language=uk)\n self.assertEqual(ni[\"2022-01-01\"], \"Новий рік\")\n self.assertEqual(ni[\"2022-12-25\"], \"Різдво Христове\")\n\n self.set_language(uk)\n for language in (None, uk, \"invalid\"):\n ni = Nicaragua(language=language)\n self.assertEqual(ni[\"2022-01-01\"], \"Новий рік\")\n self.assertEqual(ni[\"2022-12-25\"], \"Різдво Христове\")\n","sub_path":"tests/countries/test_nicaragua.py","file_name":"test_nicaragua.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397672681","text":"import time\nimport random\nfrom HangmanPhases import hangPhase\n\n\n# wordList\ndef wordList():\n with open('EnglishLanguage.txt') as word_file:\n valid_words = list(word_file.read().split())\n return valid_words\n\n\n# clearFunc\ndef clear(lines):\n count = 0\n while count != lines:\n count = count + 1\n print(\"\\n\")\n\n\n# AI-wordFunc\ndef AIWord():\n valid_words = wordList()\n time.sleep(2)\n length = len(wordList()) - 1\n while True:\n AIChoice = random.randint(0, length)\n word = valid_words[AIChoice]\n if len(word) > 2:\n break\n return word\n\n\n# wordSelectFunc\ndef wordSelect():\n valid_words = wordList()\n wordLen = 0\n while True:\n word = input(\"Input a word (at least 3 letters)\\n> \").lower()\n oneWord = 1\n for i in word:\n if i == \" \":\n print(\"Please input only one word, without spaces.\")\n oneWord = 0\n break\n wordLen = wordLen + 1\n if wordLen < 3:\n print(\"\\nThat word is too short.\")\n continue\n if word in valid_words:\n pass\n else:\n print(\"That word is not an English word.\\n...\")\n time.sleep(2)\n continue\n if oneWord == 1:\n break\n\n print(f\"OK. The word is '{word}'.\\n...\")\n time.sleep(3)\n clear(100)\n return word\n\n\n# letterGuessFunc\ndef letterGuess():\n valid_letters = \"abcdefghijklmnopqrstuvwxyz-\"\n \n mistakes = 0\n letterLen = 0\n letters = \"\"\n remaining = \"\"\n totalLetters = \"\"\n wordLetters = \"\"\n\n for i in word:\n if i in wordLetters:\n continue\n else:\n wordLetters = wordLetters + i\n \n while mistakes < 10:\n check = 0\n chances = 10 - mistakes\n remaining = \"\"\n valid = 0\n print(f\"{hangPhase(mistakes)}\\n\")\n for i in word:\n if i in letters:\n remaining = remaining + f\"{i} \"\n else:\n remaining = remaining + \"_ \"\n print(f\"Word:\\n{remaining}\\n\\nYou have {chances} guesses remaining.\\nGuess a letter:\")\n guess = input(\"> \").lower()\n if len(guess) != 1:\n print(\"Please guess only one letter.\")\n time.sleep(2)\n continue\n for i in valid_letters:\n if i == guess:\n valid = 1\n break\n if valid == 0:\n print(\"Please only use letters and hyphens (-).\")\n time.sleep(2)\n clear(50)\n continue\n for i in word:\n if guess in totalLetters:\n print(f\"You have already guessed {guess}. Please try again.\")\n check = 2\n time.sleep(2)\n break\n elif guess == i:\n print(f\"You guessed {guess}. That is a correct guess!\\n\")\n letters = letters + guess\n check = 1\n time.sleep(2)\n break\n if check == 0:\n print(f\"You guessed {guess}. That is INCORRECT :(\\n\")\n mistakes = mistakes + 1\n time.sleep(2)\n totalLetters = totalLetters + guess\n if len(letters) == len(wordLetters):\n break\n clear(50)\n\n return chances, mistakes\n\n\n# resultFunc\ndef result():\n if mistakes == 10:\n print(f\"{hangPhase(10)}\\n\\nOH NO you got hung :(. The word was '{word}'.\")\n else:\n clear(50)\n print(f\"{hangPhase(11)}\\n\\nCONGRATULATIONS!!!!!\\n\"\n f\"You guessed '{word}' correctly. You had {chances} incorrect guesses left.\")\n\n\n# run\nwhile True:\n play = input(\"Would you like to play?\\n> \").lower()\n if \"y\" in play:\n pass\n else:\n print(\"OK, exiting now...\")\n time.sleep(2)\n break\n while True:\n opponent = input(\"Would you like to play against the computer or a friend?\\n> \").lower()\n if \"computer\" in opponent:\n print(\"OK, initializing computer...\\n\")\n time.sleep(2)\n word = AIWord()\n break\n elif \"friend\" in opponent:\n print(\"OK, please choose a word.\\n\")\n word = wordSelect()\n break\n else:\n print(\"Please answer 'computer' or 'friend':\")\n chances, mistakes = letterGuess()\n result()\n input(\"PRESS ENTER TO CONTINUE\")\n clear(50)\n","sub_path":"MinorTasks/Hangman/Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35330910","text":"\"\"\"Modulo feito para lidar com arquivos que devem ser armazenados no banco de dados.\r\nA classe principal a ser usada eh vzPtsFile, pois ela agrupa a classe File (responsavel por dados do BD)\r\ne a classe vzStorageFile (responsavel por dados no sistema de arquivos).\r\n\r\n\"\"\"\r\nfrom pts.vzComponents import *\r\nimport os\r\nfrom datetime import datetime\r\nfrom settings import FILE_STORAGE_PATH, DATE_FORMAT\r\nimport mimetypes\r\nfrom utils import convert_file_name\r\nfrom vzThumbnail import save_full,delete_thumb\r\nfrom settings import IMAGE_FULL_FOLDER,IMAGE_SMALL_FOLDER\r\n\r\nDEFAULT_TYPE_ID = 1\r\nACTIVE_TYPE_STATUS_ID = 1\r\nIMAGE_PREVIEW_SIZE = 480\r\nIMAGE_SMALL_PREVIEW_SIZE=96\r\n\r\nclass File(vzMultipleComponent):\r\n\r\n table_name = \"file\"\r\n\r\n name=\"file\"\r\n\r\n type=\"single\"\r\n\r\n fields = {\"id\":[\"hidden\",False,False,\"file.file_id\"],\r\n \"name\":[\"text\",True,False,\"file.file_name\"],\r\n \"type\":[\"select\",True,False,\"file_type.file_type_id\"],\r\n \"status\":[\"select\",True,False,\"file_status.file_status_id\"],\r\n \"date created\":[\"data\",True,False,\"file.file_date_created\"],\r\n \"date modified\":[\"data\",False,False,\"file.file_date_modified\"],\r\n }\r\n\r\n list_field = [\"id\",\"name\",\"type\",\"status\",\"date created\",\"date modified\"]\r\n\r\n select_unique_id_sql = \"SELECT * FROM file WHERE file_id = %s\"\r\n\r\n count_sql = \"SELECT COUNT(file_id) FROM file\"\r\n\r\n delete_sql = \"DELETE FROM file WHERE file_id = %s\"\r\n\r\n delete_task_files_sql = \"DELETE FROM task_x_file WHERE task_id = %s AND file_id = %s \"\r\n\r\n select_by_name_sql = \"SELECT * FROM file WHERE file_name = %s\"\r\n\r\n select_by_task_sql = \"\"\"SELECT file.file_id, file.file_name, file.file_type_id, file.file_status_id FROM file, task_x_file\r\n WHERE file.file_id = task_x_file.file_id AND task_id = %s\"\"\"\r\n\r\n def add(self,params_dict,id=None):\r\n params_c = params_dict.copy()\r\n params_c['file_name'] = self.calculate_name(params_c['file_name'])\r\n params_c['file_date created'] = datetime.now().strftime(DATE_FORMAT)\r\n return super(File,self).add(params_c,id)\r\n\r\n def calculate_name(self,name):\r\n return convert_file_name(name)\r\n \r\n def select_by_name(self,name):\r\n return mysql_db.execute(self.select_by_name_sql,name).fetchall()[0]\r\n\r\n #nao devia estar nessa classe, mas na task_file_manager, na forma de filtro\r\n def select_by_task(self,task_id):\r\n return mysql_db.execute(self.select_by_task_sql,task_id).fetchall()\r\n \r\n def main_delete(self,task_id,file_id):\r\n mysql_db.execute(self.delete_task_files_sql,(task_id,file_id))\r\n second = mysql_db.execute(self.delete_sql,file_id)\r\n return second.rowcount\r\n\r\n#classe que se comunica com as classes responsaveis pelos meta-dados\r\n#e pelo storage\r\nclass vzPtsFile(object):\r\n \"\"\" Esta classe lida tanto com os meta-dados armazenados no banco de dados,\r\n quanto com os dados do arquivo armazenados no sistema de arquivos.\r\n para abrir o arquivo para leitura:\r\n v = vzPtsFile() \r\n v.open('r',file_id=ID)\r\n arquivo = v.get_file() # retorna um objeto file\r\n para abrir o arquivo para escrita:\r\n v = vzPtsFile()\r\n v.open('w',name='ALGO')\r\n v.write(dados)\r\n v.close()\r\n ou\r\n v.open('w',file_id=ID)\r\n ira sobrescrever um arquivo ja existente.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.comp_file = File()\r\n self.vzfile = None\r\n \r\n def open(self,mode,name=None,file_id=None,filetype=DEFAULT_TYPE_ID):\r\n if mode == 'w':\r\n return self._open_write(name,file_id,filetype=filetype)\r\n elif mode =='r':\r\n return self._open_read(file_id)\r\n else:\r\n raise ValueError(\"modo admite apenas 'w' ou 'r'\")\r\n\r\n def _open_write(self,name=None,file_id=None,filetype=DEFAULT_TYPE_ID):\r\n #o arquivo ja existe\r\n if file_id:\r\n self.db_file = self.comp_file.select_unique_id(file_id)[0]\r\n #abre o arquivo para escrita\r\n #precisa fazer o processamento do novo arquivo, e marcar a data de modificacao\r\n #no banco de dados.\r\n self.vzfile = vzStorageFile(name=self.db_file[1],file_id=self.db_file[0])\r\n self.vzfile.open('w')\r\n elif name:\r\n #precisa criar uma nova entrada no banco de dados, e marcar a data de criacao.\r\n params = self.build_add_param(name,filetype)\r\n new_id = self.comp_file.add(params)\r\n self.db_file = self.comp_file.select_unique_id(new_id)\r\n self.vzfile = vzStorageFile(name=name,file_id=new_id)\r\n self.vzfile.open('w')\r\n else:\r\n raise TypeError(\"Precisa ter um id ou um nome.\")\r\n\r\n def _open_read(self,file_id):\r\n self.db_file = self.comp_file.select_unique_id(file_id)[0]\r\n self.vzfile = vzStorageFile(name=self.db_file[1],file_id=self.db_file[0])\r\n self.vzfile.open('r')\r\n\r\n def build_add_param(self,name,filetype):\r\n return {\r\n 'file_name':name,\r\n 'file_type':filetype,\r\n 'file_status':ACTIVE_TYPE_STATUS_ID,\r\n }\r\n \r\n def close(self):\r\n self.vzfile.close()\r\n return self.db_file\r\n\r\n def write(self,data):\r\n self.vzfile.write(data)\r\n \r\n def get_mime_type(self):\r\n #Todos arquivos de download estao sendo tratados como bin/application\r\n #para que o browser nao abra os arquivos, mas abra um dialogo de 'salvar como'\r\n return \"application\"\r\n \r\n def get_file(self):\r\n return self.vzfile.f\r\n\r\ndef delete_file(task_id,file_id):\r\n f= File()\r\n file_list = f.select_unique_id(file_id)\r\n vzfile = vzStorageFile(file_id=file_list[0][0],name=file_list[0][1])\r\n vzfile.delete()\r\n row = f.main_delete(task_id,file_id)\r\n return row\r\n\r\n#classe responsavel por trabalhar com o storage de dados\r\n#constroi uma abstracao sobre file, para nao precisar se \r\n#trabalhar com file diretamente em todas funcoes\r\nclass vzStorageFile(object):\r\n\r\n def __init__(self,name,file_id,subpath=None):\r\n self.subpath = subpath\r\n self.name = self.calculate_name(name,file_id)\r\n self.path = self.get_path(self.name)\r\n\r\n def get_path(self,name,subpath=None):\r\n if self.subpath:\r\n return os.path.join(os.path.join(FILE_STORAGE_PATH,self.subpath),name)\r\n else:\r\n return os.path.join(FILE_STORAGE_PATH,name)\r\n \r\n def calculate_name(self,name,file_id):\r\n return str(file_id)+convert_file_name(name)\r\n \r\n def open(self,mode):\r\n if mode == 'w':\r\n self._open_write()\r\n elif mode =='r':\r\n self._open_read()\r\n else:\r\n raise ValueError(\"modo admite apenas 'w' ou 'r'\")\r\n\r\n #vai abrir o arquivo para escrita, deve retornar o nome do arquivo\r\n #e manter em self.f o arquivo aberto.\r\n def _open_write(self):\r\n self.f = open(self.path,'wb')\r\n\r\n def _open_read(self):\r\n self.f = open(self.path,'rb')\r\n\r\n def write(self,data):\r\n self.f.write(data)\r\n \r\n def close(self):\r\n if self.f:\r\n self.f.close()\r\n if self._is_image():\r\n self.save_full_aux()\r\n \r\n def _is_image(self):\r\n l_name = self.name.split('.')\r\n ext = l_name[-1]\r\n if ext.upper() in ('JPG','GIF','PNG','BMP'):\r\n return True\r\n else:\r\n return False\r\n \r\n def save_full_aux(self):\r\n save_full(self.path,IMAGE_PREVIEW_SIZE,IMAGE_FULL_FOLDER,'vzfile_%s'%self.name)\r\n save_full(self.path,IMAGE_SMALL_PREVIEW_SIZE,IMAGE_SMALL_FOLDER,'vzfile_%s'%self.name)\r\n\r\n def delete(self):\r\n os.remove(self.path)\r\n if self._is_image():\r\n try:\r\n delete_thumb(IMAGE_FULL_FOLDER,'vzfile_%s'%self.name)\r\n except:\r\n pass\r\n try:\r\n delete_thumb(IMAGE_SMALL_FOLDER,'vzfile_%s'%self.name)\r\n except:\r\n pass\r\n","sub_path":"vzFile.py","file_name":"vzFile.py","file_ext":"py","file_size_in_byte":8030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541449429","text":"import sys\nsys.setrecursionlimit(10 ** 7)\n\nMOD = 10**9 + 7\nN, K = map(int,input().split())\n\nedge = [[] for _ in range(N)]\n\nfor _ in range(N-1) :\n fromNode, toNode = map(int,input().split())\n\n fromNode -= 1\n toNode -= 1\n\n edge[fromNode].append(toNode)\n edge[toNode].append(fromNode)\n\n# 深さ優先探索\ndef bfs(nowNode, fromNode) :\n colorNum = K # 使用可能な色の数\n if fromNode == -1 : # 根の場合\n colorNum -= 1\n else :\n colorNum -= 2\n\n if len(edge[nowNode]) - 1 > K : # つながっている頂点数 - 1(親) == K までOK\n return 0\n\n caseNum = 1\n for nextNode in edge[nowNode] :\n if nextNode == fromNode :\n continue\n caseNum *= colorNum\n colorNum -= 1\n caseNum = caseNum % MOD\n\n for nextNode in edge[nowNode] :\n if nextNode == fromNode :\n continue\n caseNum *= bfs(nextNode, nowNode)\n caseNum = caseNum % MOD\n\n return caseNum\n\nans = bfs(0, -1) * K\nans = ans % MOD\nprint(ans)\n\n","sub_path":"AtCoder/abc/133e.py","file_name":"133e.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122077556","text":"from bs4 import BeautifulSoup\n\nfrom tests.src import env\n\n\ndef test_past_alerts_page():\n template = env.get_template(\"src/past-alerts.html\")\n content = template.render()\n html = BeautifulSoup(content, 'html.parser')\n assert html.select_one('h1').text.strip() == \"Past alerts\"\n","sub_path":"tests/src/test_past_alerts.py","file_name":"test_past_alerts.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72175897","text":"from __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\ndef block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n # Inception-Resnet-A\n \"\"\"Builds the 35x35 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(\n tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(\n tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(\n tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')\n mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n\n\ndef block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n # Inception-Resnet-B\n \"\"\"Builds the 17x17 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],\n scope='Conv2d_0b_1x7')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],\n scope='Conv2d_0c_7x1')\n mixed = tf.concat([tower_conv, tower_conv1_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n\n\ndef block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n # Inception-Resnet-C\n \"\"\"Builds the 8x8 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],\n scope='Conv2d_0b_1x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],\n scope='Conv2d_0c_3x1')\n mixed = tf.concat([tower_conv, tower_conv1_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n\n\ndef reduction_a(net, k, l, m, n):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,\n scope='Conv2d_0b_3x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,\n stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)\n return net\n\n\ndef reduction_b(net):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat([tower_conv_1, tower_conv1_1,\n tower_conv2_2, tower_pool], 3)\n return net\n\n\ndef inference(images, is_training=True,\n bottleneck_layer_size=512, global_depthwise_conv=False, weight_decay=5e-4, reuse=None):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],\n weights_initializer=slim.initializers.xavier_initializer(\n uniform=False),\n weights_regularizer=slim.l2_regularizer(weight_decay),\n normalizer_fn=slim.batch_norm):\n return inception_resnet_v1(images, is_training=is_training,\n bottleneck_layer_size=bottleneck_layer_size,\n global_depthwise_conv=global_depthwise_conv, reuse=reuse)\n\n\ndef inception_resnet_v1(inputs, is_training=True,\n bottleneck_layer_size=512,\n global_depthwise_conv=False,\n reuse=None,\n scope='InceptionResnetV1'):\n \"\"\"Creates the Inception Resnet V1 model.\n Args:\n inputs: a 4-D tensor of size [batch_size, height, width, 3].\n bottleneck_layer_size:\n global_depthwise_conv:\n is_training: whether is training or not.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n Returns:\n logits: the logits outputs of the model.\n \"\"\"\n with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n with slim.arg_scope([slim.batch_norm], is_training=is_training):\n # 149 x 149 x 32\n net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n # 147 x 147 x 32\n net = slim.conv2d(net, 32, 3, padding='VALID',\n scope='Conv2d_2a_3x3')\n # 147 x 147 x 64\n net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')\n # 73 x 73 x 64\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_3a_3x3')\n # 73 x 73 x 80\n net = slim.conv2d(net, 80, 1, padding='VALID',\n scope='Conv2d_3b_1x1')\n # 71 x 71 x 192\n net = slim.conv2d(net, 192, 3, padding='VALID',\n scope='Conv2d_4a_3x3')\n # 35 x 35 x 256\n net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',\n scope='Conv2d_4b_3x3')\n\n # 5 x Inception-resnet-A\n net = slim.repeat(net, 5, block35, scale=0.17)\n\n # Reduction-A\n with tf.variable_scope('Mixed_6a'):\n net = reduction_a(net, 192, 192, 256, 384)\n\n # 10 x Inception-Resnet-B\n net = slim.repeat(net, 10, block17, scale=0.10)\n\n # Reduction-B\n with tf.variable_scope('Mixed_7a'):\n net = reduction_b(net)\n\n # 5 x Inception-Resnet-C\n net = slim.repeat(net, 5, block8, scale=0.20)\n\n net = block8(net, activation_fn=None)\n\n with tf.variable_scope('Logits'):\n # pylint: disable=no-member\n if global_depthwise_conv: # mobilefacenet\n net = slim.separable_conv2d(net, None, net.get_shape()[1:3], depth_multiplier=1,\n padding='VALID', scope='GdConv')\n else: # default of facenet\n net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',\n scope='AvgPool_1a_8x8')\n\n net = slim.flatten(net)\n\n net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,\n scope='Bottleneck', reuse=False)\n\n return net\n\n\ndef preprocess_for_train(image):\n image = tf.image.per_image_standardization(image)\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_flip_left_right(image)\n\n return image\n\n\ndef preprocess_for_test(image):\n image = tf.image.per_image_standardization(image)\n\n return image\n\n\ndef preprocess(inputs, is_training, scope=None):\n # is_training is tensor\n with tf.name_scope(scope, 'InceptionResnetPrep'):\n inputs = tf.to_float(inputs)\n\n inputs = tf.cond(is_training,\n true_fn=lambda: tf.map_fn(\n lambda image: preprocess_for_train(image), inputs),\n false_fn=lambda: tf.map_fn(\n lambda image: preprocess_for_test(image), inputs))\n\n return inputs\n","sub_path":"python/nets/inception_resnet_v1.py","file_name":"inception_resnet_v1.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422097392","text":"import unittest\n\n'''\n# Anagram\n\nWrite a program that, given a word and a list of possible anagrams, selects the correct sublist.\n\nGiven `\"listen\"` and a list of candidates like `\"enlists\" \"google\"\n\"inlets\" \"banana\"` the program should return a list containing\n`\"inlets\"`.\n'''\n\n\ndef detect_anagrams(test_word, candidates):\n lower_test_word = test_word.lower()\n res_list = []\n\n for candi in candidates:\n if lower_test_word == candi.lower():\n continue\n if len(lower_test_word) != len(candi):\n continue\n\n lower_test_word_list = list(lower_test_word)\n lower_test_word_list.sort()\n lower_candi_list = list(candi.lower())\n lower_candi_list.sort()\n\n if lower_test_word_list == lower_candi_list:\n res_list.append(candi)\n\n return res_list\n\n\nclass AnagramTests(unittest.TestCase):\n def test_no_matches(self):\n self.assertEqual(\n [],\n detect_anagrams('diaper', 'hello world zombies pants'.split())\n )\n\n def test_detect_simple_anagram(self):\n self.assertEqual(\n ['tan'],\n detect_anagrams('ant', 'tan stand at'.split())\n )\n\n def test_detect_multiple_anagrams(self):\n self.assertEqual(\n ['stream', 'maters'],\n detect_anagrams('master', 'stream pigeon maters'.split())\n )\n\n def test_does_not_confuse_different_duplicates(self):\n self.assertEqual(\n [],\n detect_anagrams('galea', ['eagle'])\n )\n\n def test_eliminate_anagram_subsets(self):\n self.assertEqual(\n [],\n detect_anagrams('good', 'dog goody'.split())\n )\n\n def test_detect_anagram(self):\n self.assertEqual(\n ['inlets'],\n detect_anagrams('listen', 'enlists google inlets banana'.split())\n )\n\n def test_multiple_anagrams(self):\n self.assertEqual(\n 'gallery regally largely'.split(),\n detect_anagrams(\n 'allergy',\n 'gallery ballerina regally clergy largely leading'.split()\n )\n )\n\n def test_anagrams_are_case_insensitive(self):\n self.assertEqual(\n ['Carthorse'],\n detect_anagrams('Orchestra',\n 'cashregister Carthorse radishes'.split())\n )\n\n def test_same_word_isnt_anagram(self):\n self.assertEqual(\n [],\n detect_anagrams('banana', ['banana'])\n )\n\n self.assertEqual(\n [],\n detect_anagrams('go', 'go Go GO'.split())\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n\n # a=detect_anagrams('Orchestra','cashregister Carthorse radishes'.split())\n # print a\n","sub_path":"exam2.py","file_name":"exam2.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"509199810","text":"from scipy.spatial.distance import pdist, squareform\nfrom sklearn.manifold import MDS\n\nfrom .RANDOM_SEED import RANDOM_SEED\n\n\ndef scale_point_x_dimension_dimension(\n n_target_dimension,\n point_x_dimension=None,\n distance__point_x_point=None,\n distance_function=\"euclidean\",\n metric=True,\n n_init=int(1e3),\n max_iter=int(1e3),\n verbose=0,\n eps=1e-3,\n n_job=1,\n random_seed=RANDOM_SEED,\n):\n\n keyword_arguments = {\n \"n_components\": n_target_dimension,\n \"metric\": metric,\n \"n_init\": n_init,\n \"max_iter\": max_iter,\n \"verbose\": verbose,\n \"eps\": eps,\n \"n_jobs\": n_job,\n \"random_state\": random_seed,\n }\n\n if distance__point_x_point is None and not callable(distance_function):\n\n mds_ = MDS(dissimilarity=distance_function, **keyword_arguments)\n\n point_x_target_dimension = mds_.fit_transform(point_x_dimension)\n\n else:\n\n mds_ = MDS(dissimilarity=\"precomputed\", **keyword_arguments)\n\n if distance__point_x_point is None:\n\n distance__point_x_point = squareform(\n pdist(point_x_dimension, distance_function)\n )\n\n point_x_target_dimension = mds_.fit_transform(distance__point_x_point)\n\n return point_x_target_dimension\n","sub_path":"ccal/scale_point_x_dimension_dimension.py","file_name":"scale_point_x_dimension_dimension.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367877965","text":"def postorderTraversal(self, root: TreeNode) -> List[int]:\n # res = []\n # def postOrder(root):\n # if not root:\n # return False\n # postOrder(root.left)\n # postOrder(root.right)\n # res.append(root.val)\n # postOrder(root)\n # return res\n cur,stack,res = root,[],[]\n\n while cur or stack:\n while cur:\n res.append(cur.val)\n stack.append(cur)\n cur = cur.right\n temp = stack.pop()\n cur = temp.left\n \n return res[::-1] ","sub_path":"Week_02/Algo/binary_tree_postorder.py","file_name":"binary_tree_postorder.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171488252","text":"class Contact:\n def __init__(self, name, number):\n self.name = name\n self.number = number\n self.sent = 0\n self.recv = 0\n self.pend = 0\n\n def __str__(self):\n ret = self.name + \" (\" + str(self.sent) + \"/\" \n ret += str(self.recv) + \"/\" + str(self.pend) + \")\"\n return ret\n","sub_path":"common/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6510896","text":"from django.db import models\nfrom django.contrib.auth.admin import User\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.auth.models import BaseUserManager, AbstractBaseUser\nfrom django.shortcuts import get_object_or_404\nfrom paintstore.fields import ColorPickerField\nfrom datetime import datetime, timedelta\nimport calendar\nfrom django.utils import timezone\nimport datetime\n\nfrom django.forms import ModelForm, TextInput, DateInput\nfrom suit.widgets import EnclosedInput, SuitDateWidget, SuitSplitDateTimeWidget\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom constance import config\n\n\nclass Category(models.Model):\n\n class Meta:\n verbose_name_plural = \"categories\"\n\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=1200)\n color = ColorPickerField()\n\n def __str__(self):\n return self.name\n\n\ndef get_today():\n return datetime.datetime(timezone.now().year, timezone.now().month, timezone.now().day)\n\n\ndef get_coming_monday(date=get_today()):\n # coming monday's date\n coming_monday = date\n while coming_monday.weekday() != 0:\n coming_monday += datetime.timedelta(1)\n return coming_monday\n\n\n# Stores announcements for newsvine\n\n\nclass Announcement(models.Model):\n submitter = models.ForeignKey(User, db_index=True, related_name=\"announcement_submitter\", null=True, blank=True, default=None, on_delete=models.SET_NULL)\n approver = models.ForeignKey(User, db_index=True, related_name=\"announcement_approver\", null=True, blank=True, default=None, on_delete=models.SET_NULL)\n title = models.CharField(max_length=200, default='')\n body = models.TextField(max_length=1200, default='')\n publish_start_date = models.DateField('Date to start publishing', default=datetime.datetime.now)\n publish_end_date = models.DateField('Date to end publishing', default=get_coming_monday())\n category = models.ForeignKey(Category, default='', verbose_name='Audience', on_delete=models.SET_NULL, null=True, blank=True,)\n link = models.CharField(max_length=400, blank=True, default='')\n hidden = models.BooleanField(default=False)\n under_review = models.BooleanField(default=True)\n contact = models.CharField(max_length=200, blank=True, default='')\n\n def is_published(self):\n import datetime\n today = datetime.date.today()\n if self.publish_start_date <= today <= self.publish_end_date:\n return True\n return False\n\n def __str__(self):\n return '%s - %s: %s' % (self.publish_start_date, self.publish_end_date, self.title)\n\n# Stores event list\n\n\nclass Event(models.Model):\n # Display Override\n DEFAULT = 'default'\n HIDE = 'hide'\n SHOW = 'show'\n CHOICES = (\n (DEFAULT, 'Default: Displays 14 days in advance'),\n (HIDE, 'Hide Event'),\n (SHOW, 'Show Event'),\n )\n\n title = models.CharField(max_length=200)\n description = models.TextField(max_length=400, null=True, blank=True)\n date_start = models.DateField(default=datetime.datetime.now)\n date_end = models.DateField(blank=True, null=True)\n track_rsvp = models.BooleanField(default=False)\n display_override = models.CharField(max_length=30, choices=CHOICES, default=CHOICES[0][0], null=True, blank=True)\n\n def __str__(self):\n return '%s - %s to %s' % (self.title, self.date_start, self.date_end)\n\n\nclass Signup(models.Model):\n # Service Names\n NOTGOING = 'notgoing'\n INTERESTED = 'interested'\n GOING = 'going'\n CHOICES = (\n (NOTGOING, 'Not Going'),\n (INTERESTED, 'Interested'),\n (GOING, 'Going')\n )\n\n user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)\n event = models.ForeignKey(Event, db_index=True, on_delete=models.CASCADE)\n rsvp = models.CharField(max_length=30, choices=CHOICES, default=CHOICES[0][0])\n\n def __str__(self):\n return '%s - %s - %s' % (self.event, self.user, self.rsvp)\n\n\n# Stores announcements for newsvine\n\n\nclass OrderOfService(models.Model):\n # Service Names\n SUN_MORN_ENG = 'sunday-morning-english'\n CHOICES = (\n (SUN_MORN_ENG, 'Sunday Morning - English Service'),)\n\n date = models.DateField(default=datetime.datetime.now)\n text = models.TextField(default='', blank=True)\n service_name = models.CharField(\n max_length=200, choices=CHOICES, default=CHOICES[0][0])\n\n def is_upcoming(self):\n import datetime\n today = datetime.date.today()\n if today <= self.date:\n return True\n return False\n\n def is_print(self):\n import datetime\n # coming sunday's date\n coming_sunday = datetime.date.today()\n while coming_sunday.weekday() != 6:\n coming_sunday += datetime.timedelta(1)\n\n if coming_sunday == self.date:\n return True\n return False\n\n def num_of_lines(self):\n i = 0\n for line in self.text:\n if \"\\n\" in line:\n i += 1\n return i\n\n def get_absolute_url(self):\n return reverse('orderofservice_edit', kwargs={'pk': self.pk})\n\n def __str__(self):\n return '%s %s' % (self.date, self.service_name)\n\n\n# Add member details\nclass Profile(models.Model):\n M = 'm'\n F = 'f'\n GENDER_CHOICES = (\n (M, 'Male'),\n (F, 'Female'),\n )\n\n user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)\n first_name = models.CharField(\"First Name\", max_length=80, null=True, blank=True)\n last_name = models.CharField(\"Last Name\", max_length=80, null=True, blank=True)\n email = models.EmailField(\"Email Address\", max_length=254, null=True, blank=True)\n\n prefered_name = models.CharField(\"Preferred Name\", max_length=120, null=True, blank=True, help_text=\"We will use this name around the site instead of a combination of last+first name.\")\n maiden_name = models.CharField(\"Maiden Name\", max_length=80, null=True, blank=True)\n\n gender = models.CharField(\"Gender\", max_length=1, choices=GENDER_CHOICES, null=True, blank=True)\n\n date_record_updated = models.DateField(null=True, blank=True, default=datetime.datetime.now)\n\n # important dates\n date_of_birth = models.DateField(null=True, blank=True)\n date_of_marriage = models.DateField(null=True, blank=True)\n date_of_baptism = models.DateField(null=True, blank=True)\n date_of_death = models.DateField(null=True, blank=True)\n\n mobile_number = models.CharField(max_length=15, null=True, blank=True)\n home_number = models.CharField(max_length=15, null=True, blank=True)\n\n # address\n address_block = models.CharField(max_length=12, null=True, blank=True)\n address_street = models.CharField(max_length=140, null=True, blank=True)\n address_unit = models.CharField(max_length=12, null=True, blank=True)\n country = models.CharField(max_length=30, null=True, blank=True)\n postal_code = models.CharField(max_length=12, null=True, blank=True)\n\n # other details\n is_regular = models.BooleanField(default=True)\n is_member = models.BooleanField(default=False)\n\n def __str__(self):\n return '%s, %s %s' % (self.user, self.first_name, self.last_name)\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n if instance.email == None:\n return none\n else:\n editor_list = config.EDITOR_LIST.split()\n try:\n if any(instance.email in s for s in editor_list):\n instance.groups.add(Group.objects.get(name='editor'))\n except Group.DoesNotExist:\n pass\n\n contributor_list = config.CONTRIBUTOR_LIST.split()\n try:\n if any(instance.email in s for s in contributor_list):\n instance.groups.add(Group.objects.get(name='contributor'))\n except Group.DoesNotExist:\n pass\n\n email_matching = Profile.objects.filter(email=instance.email).first()\n if email_matching != None:\n email_matching.user = instance\n email_matching.save()\n instance.first_name = email_matching.first_name\n instance.last_name = email_matching.last_name\n instance.save()\n else:\n Profile.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\n# family relationships\n\n\nclass Relationship(models.Model):\n MEMBER_RELATIONSHIP_CHOICES = (\n ('MO', 'Mother'),\n ('FA', 'Father'),\n ('BRO', 'Brother'),\n ('SIS', 'Sister'),\n ('SON', 'Son'),\n ('DAUG', 'Daughter'),\n ('GRMA', 'Grand Mother'),\n ('GRFA', 'Grand Father'),\n ('GRSON', 'Grand Son'),\n ('GRDAUG', 'Grand Daughter'),\n )\n\n user = models.OneToOneField(User, null=True, related_name='user_relationship', on_delete=models.SET_NULL)\n person = models.ForeignKey(User, null=True, related_name='person_relationship', on_delete=models.SET_NULL)\n relationship = models.CharField(max_length=10, choices=MEMBER_RELATIONSHIP_CHOICES)\n\n def __str__(self):\n return '%s, %s, %s' % (self.person, self.user, self.relationship)\n\n\nclass SundayAttendance(models.Model):\n submitter = models.ForeignKey(User, db_index=True, related_name=\"sunday_attendance_submitter\", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT)\n approver = models.ForeignKey(User, db_index=True, related_name=\"sunday_attendance_approver\", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT)\n date = models.DateField(default=datetime.datetime.now)\n english_congregation = models.PositiveSmallIntegerField(default=0)\n chinese_congregation = models.PositiveSmallIntegerField(default=0)\n childrens_church = models.PositiveSmallIntegerField(default=0)\n preschoolers = models.PositiveSmallIntegerField(default=0)\n nursery = models.PositiveSmallIntegerField(default=0)\n under_review = models.BooleanField(default=True)\n notes = models.TextField(max_length=300, null=True, blank=True)\n\n def __str__(self):\n return '%s' % (self.date)\n\n\nclass BuildingFundYearGoal(models.Model):\n name = models.TextField(max_length=80, null=True, blank=True)\n date = models.DateField(default=datetime.datetime.now)\n amount = models.DecimalField(max_digits=10, decimal_places=2, default='0')\n description = models.TextField(max_length=300, null=True, blank=True)\n\n def __str__(self):\n formatted_amount = '{:20,.2f}'.format(self.amount)\n return '%s - $%s' % (self.date, formatted_amount)\n\n\nclass BuildingFundYearPledge(models.Model):\n name = models.TextField(max_length=80, null=True, blank=True)\n date = models.DateField(default=datetime.datetime.now)\n amount = models.DecimalField(max_digits=10, decimal_places=2, default='0')\n description = models.TextField(max_length=300, null=True, blank=True)\n\n def __str__(self):\n formatted_amount = '{:20,.2f}'.format(self.amount)\n return '%s - $%s' % (self.date, formatted_amount)\n\n\nclass BuildingFundCollection(models.Model):\n date = models.DateField(default=datetime.datetime.now)\n building_fund_year_pledge = models.ForeignKey(BuildingFundYearPledge, on_delete=models.SET_DEFAULT, default=0)\n building_fund_year_goal = models.ForeignKey(BuildingFundYearGoal, on_delete=models.SET_DEFAULT, default=0)\n amount = models.DecimalField(max_digits=10, decimal_places=2, default='0')\n notes = models.TextField(max_length=300, null=True, blank=True)\n\n def __str__(self):\n formatted_amount = '{:20,.2f}'.format(self.amount)\n return '%s - $%s' % (self.date.strftime(\"%d/%m/%y\"), formatted_amount)\n\n\nclass WeeklyVerse(models.Model):\n date = models.DateField(default=get_coming_monday())\n verse = models.TextField(max_length=1200, default='')\n reference = models.CharField(max_length=40, default='')\n\n def __str__(self):\n return '%s - %s' % (self.date, self.reference)\n\n\nclass ExtendedGroup(Group):\n MONDAY = 0\n TUESDAY = 1\n WEDNESDAY = 2\n THURSDAY = 3\n FRIDAY = 4\n SATURDAY = 5\n SUNDAY = 6\n IRREGULAR = 7\n\n DAYS_OF_WEEK = (\n (MONDAY, 'Monday'),\n (TUESDAY, 'Tuesday'),\n (WEDNESDAY, 'Wednesday'),\n (THURSDAY, 'Thursday'),\n (FRIDAY, 'Friday'),\n (SATURDAY, 'Saturday'),\n (SUNDAY, 'Sunday'),\n (IRREGULAR, 'Irregular'),\n )\n\n COMMUNITY = 0\n MANAGEMENT = 1\n COMMITTEE = 2\n\n GROUP_TYPE = (\n (COMMUNITY, 'Community Group'),\n (MANAGEMENT, 'Management'),\n (COMMITTEE, 'Committee'),\n )\n\n leader = models.ManyToManyField(Profile, blank=True, related_name='leader_profile')\n group_type = models.IntegerField(choices=GROUP_TYPE)\n notes = models.TextField(max_length=300, null=True, blank=True)\n date_formed = models.DateField(default=datetime.datetime.now, null=True, blank=True)\n date_dissolved = models.DateField(null=True, blank=True)\n meeting_day = models.IntegerField(choices=DAYS_OF_WEEK, null=True, blank=True, help_text=\"Day of the week the group regularly meets if any\")\n meeting_time = models.TimeField(null=True, blank=True, help_text=\"Meeting time of the the group in 24hr format ie. 13:00 for 1pm\")\n active = models.BooleanField(default=True)\n member = models.ManyToManyField(Profile, blank=True, related_name='leader_member')\n\n def __str__(self):\n return '%s' % (self.name)\n\n\nclass GroupAttendance(models.Model):\n PRESENT = 0\n ABSENT = 1\n EXCUSED = 2\n UNKNOWN = 3\n\n ATTENDANCE_CHOICES = (\n (PRESENT, 'Present'),\n (ABSENT, 'Absent'),\n (EXCUSED, 'Excused'),\n (UNKNOWN, 'Unknown'),\n )\n\n person = models.ForeignKey(Profile, on_delete=models.SET_DEFAULT, default=3)\n group = models.ForeignKey(ExtendedGroup, on_delete=models.SET_DEFAULT, default=3)\n date = models.DateField(default=datetime.datetime.now)\n attendance = models.IntegerField(choices=ATTENDANCE_CHOICES)\n\n def __str__(self):\n return '%s , %s , %s , %s' % (self.person.first_name, self.group, self.date, self.attendance)\n","sub_path":"serf/newswire/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330615407","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 5 04:53:34 2017\n\n@author: kim\n\"\"\"\n\nimport h5py\nimport numpy as np\n\ntrain_filename = 'iris_data.hdf5'\n\nf = open(\"iris.txt\", 'r')\nline = f.readline()\nline = line.split(',')\ndata = [[line[0], line[1], line[2], line[3]]]\nline[4] = line[4][:-1]\nif line[4] == 'Iris-setosa' :\n labels = [0]\nelif line[4] == 'Iris-versicolor' :\n labels = [1]\nelif line[4] == 'Iris-virginica' :\n labels = [2]\n \nwhile True:\n line = f.readline()\n if (not line) or (line[0] == '\\n'): break\n\n line = line.split(',')\n data_val = [[line[0], line[1], line[2], line[3]]]\n line[4] = line[4][:-1]\n if line[4] == 'Iris-setosa' :\n label_val = [0]\n elif line[4] == 'Iris-versicolor' :\n label_val = [1]\n elif line[4] == 'Iris-virginica' :\n label_val = [2]\n\n data = np.append(data, data_val, axis=0)\n labels = np.append(labels, label_val, axis=0)\n \nf.close()\n\nnp_data = np.array(data)\nnp_labels = np.array(labels)\n\nwith h5py.File(train_filename, 'w') as f:\n f['data'] = np_data.astype(np.float32)\n f['label'] = np_labels.astype(np.float32)\n","sub_path":"examples/iris_recognition/create_hdf5_data.py","file_name":"create_hdf5_data.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369617259","text":"import numpy as np \r\nfrom general import *\r\nimport random\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\ntrain = getData('clean_train.txt')\r\nX, y = createMatrices(train)\r\ny = [int(y.loc[i]) for i in range(y.shape[0])]\r\nlr = LogisticRegression(penalty = 'l2', C = 10 ** 14) \r\nlr.fit(X, y)\r\ntest = getData('clean_test.txt')\r\npred_x, corr_y = createMatrices(test)\r\npred_y = lr.predict(pred_x)\r\nerror = 0\r\nfor i in range(pred_y.shape[0]):\r\n if pred_y[i] != int(corr_y.loc[i]):\r\n error += 1\r\nprint(error/pred_y.shape[0])\r\n\r\n\r\n\r\n\r\n","sub_path":"logisticRegression.py","file_name":"logisticRegression.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481695375","text":"\"\"\"Useful helpers for basic stuff like parsing module version.\"\"\"\n\nimport os\nimport re\n\n\ndef find_version(*file_paths):\n \"\"\"\n This pattern was modeled on a method from the Python Packaging User Guide:\n https://packaging.python.org/en/latest/single_source_version.html\n\n We read instead of importing so we don't get import errors if our code\n imports from dependencies listed in install_requires.\n \"\"\"\n base_module_file = os.path.join(*file_paths)\n with open(base_module_file) as f:\n base_module_data = f.read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n base_module_data, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n","sub_path":"napalm_base/system_helpers.py","file_name":"system_helpers.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193929956","text":"def sing(n):\n if n == 1:\n objects = 'bottle'\n objects_minus_one = 'bottles'\n elif n == 2:\n objects = 'bottles'\n objects_minus_one = 'bottle'\n else:\n objects = 'bottles'\n objects_minus_one = 'bottles'\n\n if n > 0:\n print(str(n) + \" \" + objects + \" of beer on the wall, \" + str(n) + \" \" + objects + \" of beer.\")\n print(\"Take one down and pass it around, \" + str(n-1) + \" \" + objects_minus_one + \" of beer on the wall.\")\n print(\" \")\n elif n == 0:\n print(\"No more bottles of beer on the wall, no more bottles of beer.\")\n print(\"Go to the store and buy some more, 99 bottles of beer on the wall.\")\n else:\n print(\"Error: Wheres the booze?\")\nbottles = 99\n\nwhile bottles >= 0:\n sing(bottles)\n bottles -= 1\n","sub_path":"99Bottles.py","file_name":"99Bottles.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42415729","text":"\"\"\"\ndocstring needed\n\n:copyright: Copyright 2010-2013 by the Python lib9ML team, see AUTHORS.\n:license: BSD-3, see LICENSE for details.\n\"\"\"\nfrom ...componentclass.visitors.cloner import ComponentCloner\nfrom ..regimes import TimeDerivative, Regime, StateVariable\nfrom ..transitions import (\n OnCondition, OnEvent, Trigger, StateAssignment, OutputEvent)\nfrom ...ports import (\n AnalogSendPort, AnalogReceivePort, AnalogReducePort, EventSendPort,\n EventReceivePort)\n\n\nclass DynamicsCloner(ComponentCloner):\n\n def visit_componentclass(self, component_class, **kwargs):\n try:\n cls = component_class.core_type\n except AttributeError:\n cls = type(component_class)\n cc = cls(\n name=component_class.name,\n parameters=[p.accept_visitor(self, **kwargs)\n for p in component_class.parameters],\n analog_ports=[p.accept_visitor(self, **kwargs)\n for p in component_class.analog_ports],\n event_ports=[p.accept_visitor(self, **kwargs)\n for p in component_class.event_ports],\n regimes=[r.accept_visitor(self, **kwargs)\n for r in component_class.regimes],\n aliases=[\n a.accept_visitor(self, **kwargs)\n for a in component_class.aliases],\n state_variables=[\n s.accept_visitor(self, **kwargs)\n for s in component_class.state_variables],\n constants=[c.accept_visitor(self, **kwargs)\n for c in component_class.constants])\n self.copy_indices(component_class, cc)\n return cc\n\n def visit_regime(self, regime, **kwargs):\n r = Regime(\n name=regime.name,\n time_derivatives=[t.accept_visitor(self, **kwargs)\n for t in regime.time_derivatives],\n transitions=[t.accept_visitor(self, **kwargs)\n for t in regime.transitions])\n self.copy_indices(regime, r)\n return r\n\n def visit_statevariable(self, state_variable, **kwargs):\n return StateVariable(\n name=self.prefix_variable(state_variable.name, **kwargs),\n dimension=state_variable.dimension)\n\n def visit_analogreceiveport(self, port, **kwargs):\n return AnalogReceivePort(\n name=self.prefix_variable(port.name, **kwargs),\n dimension=port.dimension)\n\n def visit_analogreduceport(self, port, **kwargs):\n return AnalogReducePort(\n name=self.prefix_variable(port.name, **kwargs),\n dimension=port.dimension)\n\n def visit_analogsendport(self, port, **kwargs):\n return AnalogSendPort(\n name=self.prefix_variable(port.name, **kwargs),\n dimension=port.dimension)\n\n def visit_eventsendport(self, port, **kwargs):\n return EventSendPort(\n name=self.prefix_variable(port.name, **kwargs))\n\n def visit_eventreceiveport(self, port, **kwargs):\n return EventReceivePort(\n name=self.prefix_variable(port.name, **kwargs))\n\n def visit_outputevent(self, event_out, **kwargs):\n return OutputEvent(\n port_name=self.prefix_variable(event_out.port_name, **kwargs))\n\n def visit_stateassignment(self, assignment, **kwargs):\n prefix = kwargs.get('prefix', '')\n prefix_excludes = kwargs.get('prefix_excludes', [])\n\n lhs = self.prefix_variable(assignment.lhs, **kwargs)\n rhs = assignment.rhs_suffixed(suffix='', prefix=prefix,\n excludes=prefix_excludes)\n return StateAssignment(lhs=lhs, rhs=rhs)\n\n def visit_timederivative(self, time_derivative, **kwargs):\n prefix = kwargs.get('prefix', '')\n prefix_excludes = kwargs.get('prefix_excludes', [])\n\n dep = self.prefix_variable(time_derivative.variable,\n **kwargs)\n\n rhs = time_derivative.rhs_suffixed(suffix='', prefix=prefix,\n excludes=prefix_excludes)\n return TimeDerivative(variable=dep, rhs=rhs)\n\n def visit_trigger(self, trigger, **kwargs):\n prefix = kwargs.get('prefix', '')\n prefix_excludes = kwargs.get('prefix_excludes', [])\n rhs = trigger.rhs_suffixed(suffix='', prefix=prefix,\n excludes=prefix_excludes)\n return Trigger(rhs=rhs)\n\n def visit_oncondition(self, on_condition, **kwargs):\n oc = OnCondition(\n trigger=on_condition.trigger.accept_visitor(self, **kwargs),\n output_events=[e.accept_visitor(self, **kwargs)\n for e in on_condition.output_events],\n state_assignments=[s.accept_visitor(self, **kwargs)\n for s in on_condition.state_assignments],\n target_regime=on_condition.target_regime.name\n )\n self.copy_indices(on_condition, oc)\n return oc\n\n def visit_onevent(self, on_event, **kwargs):\n oe = OnEvent(\n src_port_name=self.prefix_variable(on_event.src_port_name,\n **kwargs),\n output_events=[e.accept_visitor(self, **kwargs)\n for e in on_event.output_events],\n state_assignments=[s.accept_visitor(self, **kwargs)\n for s in on_event.state_assignments],\n target_regime=on_event.target_regime.name\n )\n self.copy_indices(on_event, oe, **kwargs)\n return oe\n","sub_path":"nineml/abstraction/dynamics/visitors/cloner.py","file_name":"cloner.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507747484","text":"from __future__ import print_function, division\nimport numpy as np\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\nnum_epochs = 100\ntotal_series_length = 50000\ntruncated_backprop_length = 15\nstate_size = 4\nnum_classes = 2\necho_step = 3\nbatch_size = 5\nnum_batches = total_series_length//batch_size//truncated_backprop_length\n\n\n# Function to generate data:\ndef generateData():\n\tx = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))\n\ty = np.roll(x, echo_step)\n\ty[0:echo_step] = 0\n\tx = x.reshape((batch_size, -1)) \n\ty = y.reshape((batch_size, -1))\n\treturn (x, y)\n\n\n# Tensorflow placeholders for input, output and RNN state:\nbatchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])\nbatchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])\n\n# Tensorflow variables for weights and biases:\nW = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)\nb = tf.Variable(np.zeros((1,state_size)), dtype=tf.float32)\nW2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32) #output layer\nb2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32) #output layer\n\ninputs_series = tf.unstack(batchX_placeholder, axis=1) \n# This splits the batchX matrix into a list of batch_size column vectors, because we are unstacking along the column axis. The RNN will simultaneously be training on different parts in the time-series (each row is small piece from different place in the timeseries). Hence the name input_series -> not 1 sequence but series.\nlabels_series = tf.unstack(batchY_placeholder, axis=1)\n\ninit_state = tf.placeholder(tf.float32, [batch_size, state_size])\n# Because of the unstacking above and simulataneous training on batches of parts of the series, this requires us to save batch_size number of instances of states when propagating the RNN forward. Therefore, the initial state placeholder is of the size (batch_size x state_size). So we save one instance of the state per batch. \n\n# Forward pass\ncurrent_state = init_state\nstates_series = []\nfor current_input in inputs_series:\n\tcurrent_input = tf.reshape(current_input, [batch_size, 1]) # I think this should already be of the shape batch_size x 1\n\t# However, tf.unstack simply returns a list of tensor objects. So, I guess we assert the shape of the tensor object above.\n\tinput_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns\n\tnext_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition of the biases\n\tstates_series.append(next_state)\n\tcurrent_state = next_state\n\n'''\nNote about broadcasting in Numpy:\nThe size of the resulting array is the maximum size along each dimension of the input arrays.\nIn this code, addition by broadcasting is done in 2 places:\n\t1. to compute the next state\n\t2. to compute the logit \nSo dimension of,\n\t1. next_state is : batch_size x state_size -> (5 x 4)\n\t2. logit is : batch_size x num_classes -> (5 x 2)\nThe dimensions with size 1 are stretched or “copied” to match the other. So in case 1, (1x4) bias vector would become (5x4) matrix.\n'''\n\nlogits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition\npredictions_series = [tf.nn.softmax(logits) for logits in logits_series]\n# This prediction maybe to just visualize the output at every step in the sequence\n\nlosses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]\n# Here softmax is calculated internally before doing cross entropy loss calculation\n# Also, here I would guess losses would be a list where each elements is a (5 x 1) column vector. In all it would be a matrix of size (batch_size x truncated_backprop_length)\n\ntotal_loss = tf.reduce_mean(losses)\ntrain_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)\n\n# Visualization:\ndef plot(loss_list, predictions_series, batchX, batchY):\n\tplt.subplot(2, 3, 1)\n\tplt.cla()\n\tplt.plot(loss_list)\n\n\tfor batch_series_idx in range(5):\n\t\tone_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]\n\t\tsingle_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])\n\t\tplt.subplot(2, 3, batch_series_idx + 2)\n\t\tplt.cla()\n\t\tplt.axis([0, truncated_backprop_length, 0, 2])\n\t\tleft_offset = range(truncated_backprop_length)\n\t\tplt.bar(left_offset, batchX[batch_series_idx, :], width=1, color=\"blue\")\n\t\tplt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color=\"red\")\n\t\tplt.bar(left_offset, single_output_series * 0.3, width=1, color=\"green\")\n\t\n\tplt.draw()\n\tplt.pause(0.0001)\n\nwith tf.Session() as sess:\n\tsess.run(tf.initialize_all_variables())\n\tplt.ion()\n\tplt.figure()\n\tplt.show()\n\tloss_list = []\n\n\tfor epoch_idx in range(num_epochs):\n\t\tx,y = generateData()\n\t\t_current_state = np.zeros((batch_size, state_size))\n\n\t\tprint(\"New data, epoch\", epoch_idx)\n\n\t\tfor batch_idx in range(num_batches):\n\t\t\tstart_idx = batch_idx * truncated_backprop_length\n\t\t\tend_idx = start_idx + truncated_backprop_length\n\t\t\t\n\t\t\tbatchX = x[:,start_idx:end_idx]\n\t\t\tbatchY = y[:,start_idx:end_idx]\n\n\t\t\t_total_loss, _train_step, _current_state, _predictions_series = sess.run(\n\t\t\t\t[total_loss, train_step, current_state, predictions_series],\n\t\t\t\tfeed_dict={\n\t\t\t\tbatchX_placeholder:batchX,\n\t\t\t\tbatchY_placeholder:batchY,\n\t\t\t\tinit_state:_current_state\n\t\t\t\t})\n\n\t\t\tloss_list.append(_total_loss)\n\n\t\t\tif batch_idx%100 == 0:\n\t\t\t\tprint(\"Step\",batch_idx, \"Loss\", _total_loss)\n\t\t\t\tplot(loss_list, _predictions_series, batchX, batchY)\n\t\tplt.ioff()\n\t\tplt.show()\n\n","sub_path":"TensorFlowTutorials/RNN_tutorial_1_RNN.py","file_name":"RNN_tutorial_1_RNN.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545114283","text":"from flask import Flask, jsonify, render_template, request\nfrom modules import scraper\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef main():\n return render_template('index.html')\n\n\n@app.route('/scrape', methods=['POST'])\ndef scrape():\n url = request.form['url']\n v_url = scraper.verify_url(url)\n if v_url['result'] == 'true':\n url_type = scraper.classify_url(url)\n elif v_url['result'] == 'false':\n return jsonify({'error': v_url['message']})\n else:\n return jsonify({'error': 'something went wrong'})\n\n if url_type['type'] == 'category':\n return jsonify({'items': scraper.scrape_category(url)})\n if url_type['type'] == 'product':\n return jsonify(scraper.scrape_product(url))\n else:\n return jsonify({'error': url_type['message']})\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=1337, debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35305327","text":"from math import sqrt\nfrom math import pow\n\nimport unittest\n\nfrom prototype.utils.gps import latlon_offset\nfrom prototype.utils.gps import latlon_diff\nfrom prototype.utils.gps import latlon_dist\n\n\nclass GPSTest(unittest.TestCase):\n def test_latlong_offset(self):\n # UWaterloo 110 yards Canadian Football field from one end to another\n lat = 43.474357\n lon = -80.550415\n offset_N = 44.1938\n offset_E = 90.2336\n\n # Calculate football field GPS coordinates\n lat_new, lon_new = latlon_offset(lat, lon, offset_N, offset_E)\n debug = False\n if debug:\n print(\"lat new: \", lat_new)\n print(\"lon new: \", lon_new)\n\n self.assertTrue(abs(43.474754 - lat_new) < 0.0015)\n self.assertTrue(abs(-80.549298 - lon_new) < 0.0015)\n\n def test_latlon_diff(self):\n # UWaterloo 110 yards Canadian Football field from one end to another\n lat_ref = 43.474357\n lon_ref = -80.550415\n lat = 43.474754\n lon = -80.549298\n\n dist_N = 0.0\n dist_E = 0.0\n\n # Calculate football field distance\n dist_N, dist_E = latlon_diff(lat_ref, lon_ref, lat, lon)\n dist = sqrt(pow(dist_N, 2) + pow(dist_E, 2))\n debug = False\n if debug:\n print(\"distance north: \", dist_N)\n print(\"distance east: \", dist_E)\n\n # 110 yards is approx 100 meters\n self.assertTrue(abs(100.0 - dist) < 1.0)\n\n def test_latlon_dist(self):\n # UWaterloo 110 yards Canadian Football field from one end to another\n lat_ref = 43.474357\n lon_ref = -80.550415\n lat = 43.474754\n lon = -80.549298\n\n # Calculate football field distance\n dist = latlon_dist(lat_ref, lon_ref, lat, lon)\n debug = False\n if debug:\n print(\"distance: \", dist)\n\n # 110 yards is approx 100 meters\n self.assertTrue(abs(100.0 - dist) < 1.0)\n","sub_path":"prototype/tests/utils/test_gps.py","file_name":"test_gps.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369856359","text":"import pickle\nfrom pprint import pprint as pp\n\nfilepath = './Data/employees.dat'\n\nwith open(filepath, 'rb') as empfile:\n employees = pickle.load(empfile)\n\n\npp(employees)\n\nfor id, name in employees.items():\n print(id, name)\n\n ","sub_path":"Intermediate/Day6/6SerializationDemo/serializationdemo2.py","file_name":"serializationdemo2.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"207288369","text":"# coding:utf-8\nimport pandas as pd\nID = ''\njsonpath = ''\nsDatahomePath = ''\nsRslPath = ''\nsearch_time = ''\nexport_excel = pd.DataFrame({\"id\" : [], 'mean':[]})\noutput_format = '.tif' # available value: .png .tif\niDataProduct = 1\niCloulLevel = 9","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453544739","text":"import lirc\nimport logging\nimport time\nimport timeit\n\nclass Race:\n\n DELAY = 0.009\n WRITE_TIMEOUT = 0.028\n READ_TIMEOUT = 0.75\n\n def __init__(self):\n self.remote = \"carrera\"\n self.conn = None\n self.socket = \"/usr/local/var/run/lirc/lircd\"\n\n def __lirc_conn(self):\n return lirc.CommandConnection(socket_path=self.socket)\n\n def __find_sync(self):\n \"\"\"Waits for a blast from the lirc process and returns true if it's\n a syncing signal from the Carrera IR tower.\"\"\"\n sync = \"SYNC %s\" % self.remote\n\n try:\n msg = self.conn.readline(Race.READ_TIMEOUT)\n return sync in msg\n except Exception as e:\n logging.warn(\"Did not receive SYNC from %s, skipping.\" % self.remote)\n return False\n\n\n def __send(self, cmd):\n \"\"\"Attempt to send command to lirc via the socket.\"\"\"\n try:\n lirc.SendCommand(self.conn, self.remote, [cmd]).run(Race.WRITE_TIMEOUT)\n except lirc.client.TimeoutException:\n logging.warn(\"Player %d send_on__sendce to lirc timed out\" % p.nth)\n except BrokenPipeError:\n logging.info(\"Refreshing lirc connection\")\n self.conn.close()\n self.conn = self.__lirc_conn()\n\n\n def run(self):\n try:\n self.conn = self.__lirc_conn();\n\n while True:\n if self.__find_sync():\n\n self.__send(\"P0S3L0\")\n self.__send(\"P1S3L0\")\n self.__send(\"P2S3L0\")\n self.__send(\"P3S3L0\")\n\n except KeyboardInterrupt:\n logging.warn(\"Terminating Race\")\n finally:\n if self.conn:\n self.conn.close()\n\n\nif __name__ == '__main__':\n\n r = Race()\n r.run()\n","sub_path":"laserdrift/websocket_race.py","file_name":"websocket_race.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557478296","text":"# !/usr/bin/env python3\n# -*-coding: UTF-8 -*-\n# Author Frank\n\nimport os\n\nfrom tkinter import filedialog\n\n# 通过Filedialog选择文件夹\ndirectory_path = filedialog.askdirectory(title='请选择图片文件夹', initialdir=r\"D:\\git_repository\\adam\\resource\\pic\")\n\npath_temp = os.path.split(os.path.realpath(__file__))[0]\nfile_path = path_temp + '/pic.py'\n\nwith open(file_path, 'w+') as file:\n file.write('')\n\nwith open(file_path, 'a') as file:\n file.write('import os')\n file.write('\\n')\n file.write('\\n')\n file.write(\"prefix = __file__.replace('pic.py', 'pic/')\\n\")\n\n# 遍历选定文件夹下所有.bmp图片\nprefix = ''\nfor root, dirs, files in os.walk(directory_path):\n if files:\n # root为所有文件夹的绝对路径,下面替换掉前面一部分,形成以选定文件夹为根目录的相对路径\n if root != directory_path:\n root = root.replace(directory_path + '\\\\', '')\n prefix = root + '/'\n prefix = prefix.replace('\\\\', '/')\n with open(file_path, 'a')as file:\n file.write('# ' + prefix + '\\n')\n else:\n with open(file_path, 'a')as file:\n file.write('# root\\n')\n for file_name in files:\n # variable 变量名\n # prefix 文件路径前缀\n # file_name 文件名\n if '.bmp' or '.jpg' in file_name:\n # 去掉后缀\n # variable = file_name.replace('.bmp' if '.bmp' in file_name else '.jpg', '')\n variable = file_name[:-4]\n # 拼接path语句\n full_str = \"{0} = os.path.join(prefix, '{1}{2}')\\n\".format(variable, prefix, file_name)\n # 写入到pic.txt\n with open(file_path, 'a') as file:\n file.write(full_str)\n","sub_path":"resource/write_env2.py","file_name":"write_env2.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"455005273","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nimport utils\n\nfrom double_viewer import DoubleViewer\n\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n\n self.viewer = DoubleViewer(self)\n self.viewer.setUpdatesEnabled(True)\n # 'Load image' button\n self.btnLoad = QToolButton()\n self.btnLoad.setText('Load image')\n self.btnLoad.clicked.connect(self.loadImage)\n\n #alpha group\n self.radio_image = QRadioButton(\"Image\")\n self.radio_image.clicked.connect(self.viewer.contruct_visualization_image)\n self.radio_image.setChecked(True)\n self.radio_extract = QRadioButton(\"Extract\")\n self.radio_extract.clicked.connect(self.viewer.contruct_visualization_image)\n self.radio_alpha = QRadioButton(\"Alpha\")\n self.radio_alpha.clicked.connect(self.viewer.contruct_visualization_image)\n self.extract_slider = QSlider(Qt.Horizontal)\n self.extract_slider.setMinimum(0)\n self.extract_slider.setMaximum(100)\n self.extract_slider.setTickInterval(1)\n self.extract_slider.setValue(0)\n self.extract_slider.sliderReleased.connect(self.viewer.contruct_visualization_image)\n\n self.radio_image.setEnabled(False)\n self.radio_extract.setEnabled(False)\n self.radio_alpha.setEnabled(False)\n self.extract_slider.setEnabled(False)\n\n line_01 = QFrame()\n line_01.setFrameShape(QFrame.VLine)\n\n alpha_layout = QHBoxLayout()\n alpha_layout.addWidget(self.radio_image)\n alpha_layout.addWidget(self.radio_extract)\n alpha_layout.addWidget(self.extract_slider)\n alpha_layout.addWidget(line_01)\n alpha_layout.addWidget(self.radio_alpha)\n alpha_group = QGroupBox(\"Alpha\")\n alpha_group.setLayout(alpha_layout)\n\n # trimap group\n self.show_trimap_checkbox = QCheckBox(\"Show trimap\")\n self.show_trimap_checkbox.setChecked(True)\n self.show_trimap_checkbox.clicked.connect(self.viewer.contruct_visualization_image)\n\n line_02 = QFrame()\n line_02.setFrameShape(QFrame.VLine)\n\n self.brush_size_box = QSpinBox()\n self.brush_size_box.setMinimum(1)\n self.brush_size_box.setMaximum(100)\n self.brush_size_box.setValue(25)\n\n brush_size_changer = QHBoxLayout()\n brush_size_changer.addWidget(QLabel(\"Brush size: \"))\n brush_size_changer.addWidget(self.brush_size_box)\n\n line_03 = QFrame()\n line_03.setFrameShape(QFrame.VLine)\n\n self.brush_unk = QRadioButton(\"Unknown\")\n self.brush_unk.setChecked(True)\n self.brush_fg = QRadioButton(\"Object\")\n self.brush_bg = QRadioButton(\"Background\")\n\n brush_modes = QHBoxLayout()\n brush_modes.addWidget(QLabel(\"Brush mode: \"))\n brush_modes.addWidget(self.brush_unk)\n brush_modes.addWidget(self.brush_fg)\n brush_modes.addWidget(self.brush_bg)\n\n line_04 = QFrame()\n line_04.setFrameShape(QFrame.VLine)\n\n brush_clear = QPushButton(\"Clear\")\n brush_clear.clicked.connect(self.viewer.clear_trimap)\n brush_fill_object = QPushButton(\"Fill object\")\n brush_fill_object.clicked.connect(self.viewer.trimap_fill_object)\n self.brush_move_point = QCheckBox(\"Move point\")\n self.brush_move_point.setChecked(False)\n #brush_move_point = QPushButton(\"Move point\")\n #brush_move_point.clicked.connect(self.viewer.trimap_move_point)\n\n self.trimap_transp = QSlider(Qt.Horizontal)\n self.trimap_transp.setMinimum(0)\n self.trimap_transp.setMaximum(100)\n self.trimap_transp.setTickInterval(1)\n self.trimap_transp.setValue(50)\n self.trimap_transp.sliderReleased.connect(self.viewer.left_viewer.contruct_visualization_image)\n\n trimap_layout = QHBoxLayout()\n trimap_layout.addWidget(self.show_trimap_checkbox)\n trimap_layout.addWidget(QLabel(\"transparency\"))\n trimap_layout.addWidget(self.trimap_transp)\n trimap_layout.addWidget(line_02)\n trimap_layout.addLayout(brush_size_changer)\n trimap_layout.addWidget(line_03)\n trimap_layout.addLayout(brush_modes)\n trimap_layout.addWidget(line_04)\n trimap_layout.addWidget(brush_clear)\n trimap_layout.addWidget(brush_fill_object)\n #trimap_layout.addWidget(brush_move_point)\n trimap_layout.addWidget(self.brush_move_point)\n\n trimap_group = QGroupBox(\"Trimap\")\n trimap_group.setLayout(trimap_layout)\n\n # prediction group\n self.auto_predict = QCheckBox(\"Auto\")\n self.auto_predict.setChecked(False)\n\n\n predict = QPushButton(\"Predict\")\n predict.clicked.connect(self.viewer.predict)\n\n predict_layout = QHBoxLayout()\n predict_layout.addWidget(predict)\n predict_layout.addWidget(self.auto_predict)\n predict_group = QGroupBox(\"SmartTool\")\n predict_group.setLayout(predict_layout)\n\n\n #instruments panel\n instrument_layout = QHBoxLayout()\n #instrument_layout.setAlignment(Qt.AlignLeft)\n instrument_layout.addWidget(alpha_group)\n instrument_layout.addWidget(trimap_group)\n instrument_layout.addWidget(predict_group)\n instrument_layout.addWidget(self.btnLoad)\n instrument_layout.addStretch(10000)\n\n #self.viewer.photoClicked.connect(self.photoClicked)\n # Arrange layout\n VBlayout = QVBoxLayout()\n VBlayout.addLayout(instrument_layout)\n VBlayout.addWidget(self.viewer)\n\n self.setLayout(VBlayout)\n\n def loadImage(self):\n new_image_path = QFileDialog.getOpenFileName(self, \"Pick an image\")[0]\n if utils.check_image_path(str(new_image_path)):\n self.viewer.setPhoto(utils.load_image(new_image_path))\n self.viewer.update()\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n window = Window()\n window.showMaximized()\n sys.exit(app.exec_())\n","sub_path":"smtool_gui_interface/src/v2/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312862609","text":"import re\nfrom pathlib import Path\nfrom enum import Enum\n\nclass NotPathType(BaseException):\n \"\"\" Raised when a passed path isn't of pathlib.Path type \"\"\"\n pass\n\nclass NotCrawlStrategyType(BaseException):\n \"\"\" Raised when a passed strategy isn't an integer \"\"\"\n pass\n\nclass NotCrawlActionType(BaseException):\n \"\"\" Raised when a passed crawl action isn't an integer \"\"\"\n pass\n\nclass MatchExpressionNotString(BaseException):\n \"\"\" Raised when a match expression is not a string \"\"\"\n pass\n\nclass CrawlStrategy(Enum):\n SHALLOW, RECURSIVE, NO_LOCK = range(3)\n\nclass CrawlAction(Enum):\n ALL_PATHS, FILES, DIRECTORIES, PATTERN = range(4)\n\nclass Crawler:\n def __init__(self, path, strategy=CrawlStrategy.SHALLOW, action=CrawlAction.ALL_PATHS, match=None):\n self.set_path(path)\n self.set_strategy(strategy)\n self.set_action(action)\n if match is not None:\n self.set_match_expression(match)\n self.crawled_paths = None\n self.crawled_files = None\n self.crawled_directories = None\n self.crawled_matched_paths = None\n\n def set_path(self, path):\n if not (isinstance(path, Path) or type(path) is str):\n raise NotPathType\n self.root_path = Path(path)\n\n def set_strategy(self, strategy):\n if not (isinstance(strategy, int) or isinstance(strategy, Enum)):\n raise NotCrawlStrategyType\n self.strategy = strategy\n\n def set_action(self, action):\n if not (isinstance(action, int) or isinstance(action, Enum)):\n raise NotCrawlActionType\n self.action = action\n\n def set_match_expression(self, expression):\n if type(expression) is not str:\n raise MatchExpressionNotString\n self.match_expression = re.compile(expression)\n\n def __quick_flatten(self, list):\n return [item for sublist in list for item in sublist]\n\n def __flatten(self, list_to_flatten):\n flattened = []\n for item in list_to_flatten:\n if type(item) is list:\n subitems = self.__flatten(item)\n for sub in subitems:\n flattened.append(sub)\n else:\n flattened.append(item)\n return flattened\n\n def __directories_in(self, path_string, recursive=False):\n result = []\n dirs = [x for x in Path(path_string).iterdir() if x.is_dir()]\n in_subdirs = []\n if recursive:\n for dir in dirs:\n dirs_in_subdir = self.__directories_in(dir, recursive)\n for d in dirs_in_subdir:\n if type(d) is not list:\n in_subdirs.append(d)\n if len(in_subdirs) > 0:\n result = self.__flatten([dirs, in_subdirs])\n else:\n result = dirs\n return result\n\n def __files_in(self, path_string, recursive=False):\n result = []\n files = [x for x in Path(path_string).iterdir() if x.is_file()]\n in_subdirs = []\n if recursive:\n dirs = self.__directories_in(path_string, False)\n for dir in dirs:\n in_subdirs.append(self.__files_in(dir, recursive))\n if len(in_subdirs) > 0:\n result = self.__flatten([files, in_subdirs])\n else:\n result = files\n return result\n\n def crawl(self):\n self.crawled_paths = []\n self.crawled_files = []\n self.crawled_directories = []\n self.crawled_matched_paths = []\n # Determine if recursive based on strategy\n is_recursive = True if self.strategy in (CrawlStrategy.RECURSIVE,\n CrawlStrategy.NO_LOCK) else False\n # Get all files\n if self.action != CrawlAction.DIRECTORIES:\n self.crawled_files = self.__files_in(self.root_path, is_recursive)\n # Get all directories\n if self.action != CrawlAction.FILES:\n self.crawled_directories = self.__directories_in(self.root_path, is_recursive)\n # Get all paths (directories and files together)\n self.crawled_paths = self.__flatten([self.crawled_files, self.crawled_directories])\n # If pattern matching is enabled for this crawler\n if self.action is CrawlAction.PATTERN:\n # If crawler.match_expression is valid\n if self.match_expression is not None:\n for path in self.crawled_paths:\n if self.match_expression.search(str(path)):\n self.crawled_matched_paths.append(path)\n\n def paths(self):\n return self.crawled_paths\n\n def files(self):\n return self.crawled_files\n\n def directories(self):\n return self.crawled_directories\n\n def matches(self):\n return self.crawled_matched_paths\n","sub_path":"BuildTool/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"190783694","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport cherrypy\nimport errors\n\n# Authorizations\nADDRESS_CREATE = \"address.create\"\n\nOWN_ADDRESS_READ = \"own_address.read\"\nOWN_ADDRESS_EDIT = \"own_address.edit\"\nOWN_ADDRESS_DELETE = \"own_address.delete\"\n\nPUBLIC_ADDRESS_READ = \"public_address.read\"\nPUBLIC_ADDRESS_EDIT = \"public_address.edit\"\nPUBLIC_ADDRESS_DELETE = \"public_address.delete\"\n\nPRIVATE_ADDRESS_READ = \"private_address.read\"\nPRIVATE_ADDRESS_EDIT = \"private_address.edit\"\nPRIVATE_ADDRESS_DELETE = \"private_address.delete\"\n\n# All authorizations\nALL_AUTHORIZATIONS = {\n ADDRESS_CREATE,\n\n OWN_ADDRESS_READ,\n OWN_ADDRESS_EDIT,\n OWN_ADDRESS_DELETE,\n\n PUBLIC_ADDRESS_READ,\n PUBLIC_ADDRESS_EDIT,\n PUBLIC_ADDRESS_DELETE,\n\n PRIVATE_ADDRESS_READ,\n PRIVATE_ADDRESS_EDIT,\n PRIVATE_ADDRESS_DELETE\n}\n\n\ndef check_authorization(user, authorization):\n \"\"\"\n Checks if the user has the authorization.\n\n Raises an error if not.\n \"\"\"\n\n assert authorization in ALL_AUTHORIZATIONS\n\n # Check if user exists\n if user not in cherrypy.config[\"users\"]:\n raise errors.UserNotExistsError(user = user)\n\n # Get roles for authorization\n role_names = cherrypy.config[\"authorizations\"].get(authorization)\n if not role_names:\n raise errors.NotAuthorizedError(user = user, authorization = authorization)\n\n # Check usernames in roles\n for role_name in role_names:\n if user in cherrypy.config[\"roles\"][role_name]:\n break\n else:\n raise errors.NotAuthorizedError(user = user, authorization = authorization)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"application/common/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484441738","text":"\nimport sys\nimport logging\nfrom typing import List\nfrom os.path import join\n\nfrom pyspark import SparkContext, SQLContext\n\nfrom shared.log_utils import LogUtils\n\n\ndef analyze(sc, cfg):\n \"\"\"\n Run job \n :param sc: SparkContext\n :param cfg: app configuration\n :return: None\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Python version: {}'.format(sys.version))\n logger.info('Counting words...')\n\n # needs to be initialized (even if not used) to be able \n # to work with DataFrames\n sql_sc = SQLContext(sc)\n \n core_dir = cfg['hdfs']['core_dir']\n\n text_01 = (\n f'CORE’s mission is to aggregate all open access research outputs from '\n f'repositories and journals worldwide and make them available to the '\n f'public. In this way CORE facilitates free unrestricted access to '\n f'research for all.'\n )\n text_02 = (\n f'CORE harvests research papers from data providers from all over the' \n f'world including institutional and subject repositories, open access '\n f'and hybrid journal publishers.'\n )\n text_03 = (\n f'CORE currently contains 135,539,113 open access articles, from '\n f'thousands and over tens of thousands of journals, collected from '\n f'5,969 data providers around the world.'\n )\n text_04 = (\n f'CORE will supply data for the UK REF 2021 Open Access Policy Audit '\n f'to Research England. We provide advice and support to UK HEIs with '\n f'exposing their outputs data to CORE.'\n )\n\n texts = [text_01, text_02, text_03, text_04]\n words = sc.parallelize(texts).flatMap(lambda text: text.split())\n words = words.map(lambda word: (word, 1))\n counts = words.reduceByKey(lambda a, b: a + b)\n ordered = counts.sortBy(lambda pair: pair[1], ascending=False)\n ordered = ordered.toDF(['word', 'count'])\n LogUtils().describe_df(ordered, 'word_count')\n output_path = join(core_dir, 'test_job_output')\n logger.info(f'Storing results in {output_path}')\n ordered.coalesce(1).write.csv(output_path, mode='overwrite')\n","sub_path":"WP3/Task3.2/spark/jobs/word_count/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407923403","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='index'),\n path('message/',message,name='message'),\n path('send/',send, name='send'),\n path('getMessages/',getMessages, name='getMessages'),\n path('checkview//',checkview, name='checkview'),\n path('/',room, name='room'),\n]","sub_path":"chatMain/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364533192","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport numpy as np\nfrom PIL import Image, ImageTk\nimport cv2 as cv \nfrom encryptionfunctions import encrypt\n\n\nclass Root(Tk):\n def __init__(self):\n super(Root, self).__init__()\n self.title(\"Steganography IA\")\n self.minsize(640, 400)\n\n self.labelFrame = ttk.LabelFrame(self, text = \"Open File\")\n self.labelFrame.grid(column = 0, row = 1, padx = 20, pady = 20)\n\n self.button()\n\n\n def button(self):\n self.button = ttk.Button(self.labelFrame, text = \"Browse A File\",command = self.fileDialog)\n self.button.grid(column = 1, row = 1)\n\n\n def fileDialog(self):\n\n self.filename = filedialog.askopenfilename(initialdir = \"/\", title = \"Select A File\", filetype =\n ((\"image files\",\"*.jpg\"), (\"all files\",\"*.*\")) )\n self.label = ttk.Label(self.labelFrame, text = \"\")\n self.label.grid(column = 1, row = 2)\n self.label.configure(text = self.filename)\n\n img = Image.open(self.filename)\n self.img = np.array(img)\n encrypted = encrypt(self.img, \"Hello World\", 1)\n img = encrypted.resize((250, 250))\n photo = ImageTk.PhotoImage(img)\n\n self.label2 = Label(image=photo)\n self.label2.image = photo \n self.label2.grid(column=3, row=4)\n\nroot = Root()\nroot.mainloop()","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594566300","text":"from pathlib import Path\n\nimport numpy as np\nfrom joblib import dump, load\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom evaluate import evaluate\nfrom logistic_regression import LogisticRegressionVal\nfrom utils import createLogger, createDirs, loadClean, writeResults, \\\n preprocessClfParser\n\nINPUT_DIR = Path(r'../data/clean')\nOUTPUT_DIR = Path(r'../logs/models')\nLOG_DIR = Path(\"../logs/pipeline\")\n\nlogger = createLogger(LOG_DIR, \"lda_clf\")\nlogger.info(\"Logger created, logging to %s\" % LOG_DIR.absolute())\n\nK = np.arange(1, 31) * 5\n\n\ndef LDA(train_size, random_state):\n \"\"\"\n Classification pipeline with LDA preprocessing.\n\n Inputs:\n - train_size (int): number of training samples.\n - random_state (int): seed for random number generators\n\n Output:\n (None)\n \"\"\"\n subset = 'subset_%s' % train_size\n\n input_dir = INPUT_DIR / subset\n model_dir = OUTPUT_DIR / subset\n createDirs(model_dir)\n\n X_train, X_test, y_train, y_test = loadClean(input_dir)\n X_train_sub, X_val, y_train_sub, y_val = train_test_split(X_train, y_train,\n test_size=0.2,\n random_state=random_state)\n scaler = StandardScaler()\n\n best_params = []\n best_k, best_auc, best_acc = None, 0, 0\n\n for k in K:\n model_name = \"lda_%s.joblib\" % k\n try:\n lda = load(model_dir / model_name)\n # logger.info(\"\\t\\tk = %s, fitted LDA model loaded.\" % k)\n except:\n lda = LatentDirichletAllocation(n_components=k,\n doc_topic_prior=50 / k,\n topic_word_prior=0.01,\n n_jobs=-1,\n random_state=random_state)\n lda.fit(X_train)\n dump(lda, model_dir / model_name)\n\n X_train_ = scaler.fit_transform(lda.transform(X_train_sub))\n X_val_ = scaler.transform(lda.transform(X_val))\n\n clf_val = LogisticRegressionVal(X_train_, y_train_sub, X_val_, y_val,\n k, random_state=random_state)\n best_k, best_auc, best_acc, best_params = clf_val.tune(best_k, best_auc,\n best_acc,\n best_params)\n\n clf, file_name, header = clf_val.bestClassifier(best_params)\n lda.set_params(**{'n_components': best_k,\n 'doc_topic_prior': 50 / best_k})\n preprocess = make_pipeline(lda, scaler)\n tr_time, tr_metrics, test_time, test_metrics = evaluate(preprocess, clf,\n X_train, y_train,\n X_test, y_test)\n\n writeResults(file_name, header, 'lda',\n train_size, best_k, best_params,\n tr_time, tr_metrics, test_time, test_metrics)\n\n logger.info((\"\\tFor training size = %s, best number of topics k = %s \"\n \"best parameter grid: %s (train AUC: {:.3f}, train acc: {:.3f};\"\n \" test AUC: {:.3f}, test acc: {:.3f})\").\n format(tr_metrics[0], tr_metrics[1],\n test_metrics[0], test_metrics[1])\n % (train_size, best_k, best_params))\n\n\nif __name__ == '__main__':\n desc = (\"Apply LDA as a preprocessing step, grid search for best number of \"\n \"topics and hyperparameters.\")\n parser = preprocessClfParser(desc)\n args = parser.parse_args()\n\n if args.all:\n for train_size in np.linspace(1250, 25000, 20):\n LDA(int(train_size), args.random_state)\n else:\n LDA(int(args.train_size), args.random_state)\n","sub_path":"codes/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186088823","text":"import random\n\n# Assume arr is sorted.\ndef binary_search(arr, v):\n if len(arr) < 1:\n return None\n start = 0\n end = len(arr) - 1\n while start <= end:\n mid = start + (end - start) / 2\n if arr[mid] == v:\n return mid\n elif arr[mid] < v:\n start = mid + 1\n else:\n end = mid - 1\n return None\n\ndef test(arr, val):\n arr.sort()\n result = binary_search(arr, val)\n if result is None:\n print('Value not found.', val, arr)\n else:\n print('Value found.', result, arr[result], arr)\n\nif __name__ == '__main__':\n test(random.sample(range(1,100), 80), 37)\n","sub_path":"BinarySearch/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547002454","text":"# -*- coding: utf-8 -*-\n\nfrom pyqtgraph.Qt import QtGui\nfrom koheron_slider import KoheronSlider\nfrom PyQt4.QtCore import SIGNAL, pyqtSignal\nimport numpy as np\nfrom scipy import signal\n\nclass WaveformList(QtGui.QWidget):\n def __init__(self, items = ['Sine', 'Triangle', 'Square']):\n super(WaveformList, self).__init__()\n layout = QtGui.QVBoxLayout()\n self.items = items\n self.list = []\n for item in self.items:\n self.list.append(QtGui.QRadioButton(item))\n for item in self.list:\n layout.addWidget(item)\n self.list[0].setChecked(True)\n self.setLayout(layout)\n \nclass DacWidget(QtGui.QWidget):\n \"\"\"\n This widget is used to control the DACs of a driver. \n \"\"\"\n \n data_updated_signal = pyqtSignal(int) \n \n def __init__(self, driver, index=0):\n super(DacWidget, self).__init__()\n \n self.n = driver.sampling.n\n self.fs = driver.sampling.fs\n \n self.index = index # used to track which DAC is related to the widget\n self.enable = False\n self.freq = 0\n self.mod_amp = 0\n self.waveform = 'Sine'\n self.data = np.zeros(self.n)\n\n # Layout\n self.layout = QtGui.QHBoxLayout()\n self.slider_layout = QtGui.QVBoxLayout()\n # DAC ON/OFF button\n self.button = QtGui.QPushButton('ON') \n self.button.setStyleSheet('QPushButton {color: green;}') \n self.button.setFixedWidth(80)\n self.button.setCheckable(True)\n # Waveform list\n self.waveform_list = WaveformList() \n # Sliders\n self.freq_slider = KoheronSlider(name = 'Modulation frequency (MHz) : ', max_slider = 1e-6*self.fs/2, step = 1e-6*self.fs/self.n, alpha = 1)\n self.mod_amp_slider = KoheronSlider(name = 'Modulation amplitude (u.a.) : ', max_slider = 1)\n # Add Widgets to Layout\n self.layout.addWidget(self.button)\n self.slider_layout.addWidget(self.mod_amp_slider)\n self.slider_layout.addWidget(self.freq_slider)\n self.layout.addWidget(self.waveform_list)\n self.layout.addLayout(self.slider_layout)\n self.setLayout(self.layout)\n \n self.button.clicked.connect(self.button_clicked)\n self.connect(self.freq_slider, SIGNAL(\"value(float)\"), self.change_freq)\n self.connect(self.mod_amp_slider, SIGNAL(\"value(float)\"), self.change_mod_amp)\n for i in range(len(self.waveform_list.list)):\n self.waveform_list.list[i].toggled.connect(self.update_data)\n \n def change_freq(self, value):\n value /= 1e-6*self.fs/self.n\n self.freq = np.floor(value)\n self.update_data()\n \n def change_mod_amp(self, value):\n self.mod_amp = value\n self.update_data()\n \n def button_clicked(self):\n self.enable = not self.enable \n if self.enable:\n self.update_data()\n self.button.setStyleSheet('QPushButton {color: red;}')\n self.button.setText('OFF')\n else:\n self.data = np.zeros(self.n)\n self.data_updated_signal.emit(self.index)\n self.button.setStyleSheet('QPushButton {color: green;}')\n self.button.setText('ON')\n self.data_updated_signal.emit(self.index)\n \n def update_data(self):\n if self.waveform_list.list[0].isChecked():\n self.waveform_index = 0 \n self.data = self.mod_amp * np.cos(2*np.pi* self.freq/self.n*np.arange(self.n))\n elif self.waveform_list.list[1].isChecked():\n self.waveform_index = 1\n self.data = self.mod_amp * signal.sawtooth(2*np.pi* self.freq/self.n*np.arange(self.n), width = 0.5) \n elif self.waveform_list.list[2].isChecked():\n self.waveform_index = 2\n self.data = self.mod_amp * signal.square(2*np.pi* self.freq/self.n*np.arange(self.n),duty=0.5)\n self.data_updated_signal.emit(self.index)\n \n \n","sub_path":"lase/gui/dac_widget.py","file_name":"dac_widget.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"268490938","text":"from pathlib import Path\n\nimport pytest\n\nfrom cg.store.models import Family\nfrom cg.models.cg_config import CGConfig\nfrom cgmodels.cg.constants import Pipeline\n\n\n@pytest.fixture(name=\"mutant_case\")\ndef fixture_mutant_case(cg_context: CGConfig, case_id: str, ticket_id: str, helpers) -> Family:\n \"\"\"Return mutant case\"\"\"\n case = helpers.add_case(\n store=cg_context.status_db,\n internal_id=case_id,\n name=ticket_id,\n data_analysis=Pipeline.SARS_COV_2,\n )\n return case\n\n\n@pytest.fixture(name=\"microsalt_case\")\ndef fixture_microsalt_case(cg_context: CGConfig, case_id: str, ticket_id: str, helpers) -> Family:\n \"\"\"Return mutant case\"\"\"\n case = helpers.add_case(\n store=cg_context.status_db,\n internal_id=case_id,\n name=ticket_id,\n data_analysis=Pipeline.MICROSALT,\n )\n return case\n\n\n@pytest.fixture(name=\"destination_path\")\ndef fixture_destination_path() -> Path:\n \"\"\"Retyrbs a dummy path.\"\"\"\n return Path(\"path\", \"to\", \"destination\")\n","sub_path":"tests/meta/rsync/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527624603","text":"import threading\nfrom threading import Thread\nfrom cv2.gapi.ie import params\nfrom selenium import webdriver\nimport pytest\nimport unittest\nimport time\nfrom Page.LoginPage import LoginPage\nfrom Page.Homepage import HomePage\nfrom Locator.Locator import Locator\nfrom Page.ByPage import ByPage\nimport cv2\n\n\nclass Login_Test(unittest.TestCase):\n # @pytest.fixture(params=[\"chrome\", \"firefox\"], scope=\"class\")\n def setUp(self):\n # if request.params == \"chrome\":\n self.driver = webdriver.Chrome(executable_path=\"G:\\selenium\\chromedriver.exe\")\n self.driver.get(\"http://automationpractice.com/index.php\")\n # elif request.params == \"firefox\":\n # self.driver = webdriver.Firefox(executable_path=\"G:\\selenium\\geckodriver-v0.29.0-win64 (1)\\geckodriver.exe\")\n # pass\n\n # @pytest.fixture(params=[\"chrome\", \"firefox\"], scope=\"class\")\n # def driver_init(request):\n # if request.param == \"chrome\":\n # self.driver = webdriver.Chrome(executable_path=\"G:\\selenium\\chromedriver.exe\")\n # self.driver.get(\"http://automationpractice.com/index.php\")\n # if request.param == \"firefox\":\n # self.driver = webdriver.Firefox(executable_path=\"G:\\selenium\\geckodriver-v0.29.0-win64 (1)\\geckodriver.exe\")\n\n # # request.cls.driver = web_driver\n # # yield\n # # web_driver.close()\n #\n # elif params == \"firefox\":\n # self.driver = webdriver.Firefox(executable_path=\"G:\\selenium\\geckodriver-v0.29.0-win64 (1)\\geckodriver.exe\")\n # self.driver = webdriver.Chrome(executable_path=\"G:\\selenium\\chromedriver.exe\")\n # self.driver = webdriver.Firefox(executable_path=\"G:\\selenium\\geckodriver-v0.29.0-win64 (1)\\geckodriver.exe\")\n # if params == \"chrome\":\n # self.driver = webdriver.Chrome(executable_path=\"G:\\selenium\\chromedriver.exe\")\n # elif params == \"firefox\":\n # self.driver = webdriver.Firefox(executable_path=\"G:\\selenium\\geckodriver-v0.29.0-win64 (1)\\geckodriver.exe\")\n # self.driver.get(\"http://automationpractice.com/index.php\")\n\n\n\n # TestCae01: Lỗi khi nhập 1 email adress không hợp lệ\n def login(self):\n Login = LoginPage(self.driver)\n Login.SignIn()\n Login.Get_Email(\"abc123\")\n Login.Buttun()\n Messengerbox = self.driver.find_element_by_xpath(Locator.Mesenger01)\n if Messengerbox is not None:\n print('True')\n else:\n print('False')\n self.driver.quit()\n\n\n\n # t Test 02 tạo account\n def test_Account(self):\n Lg = LoginPage(self.driver)\n Lg.SignIn()\n # Lg.clear_em()\n # time.sleep(2)\n Lg.Get_Email(\"du58on6d655st6565dfgdg7g65@gmail.com\")\n Lg.Buttun()\n Lg.click_title()\n Lg.first_name(\"Dương\")\n Lg.set_last_name(\"phạm\")\n # Lg.email(\"duong055689@gmail.com\")\n Lg.passw(\"anhduong\")\n Lg.date_birth()\n Lg.Months()\n Lg.years()\n Lg.compaly1(\"Lqa\")\n Lg.adrres(\"Nguyen co thach, Ha noi\")\n Lg.adrres02(\"Hai duong, Binh giang\")\n Lg.citys(\"Ha noi\")\n Lg.state01()\n Lg.zip(\"00000\")\n Lg.thongtin(\"khong co gi\")\n Lg.Phone(\"0978454157\")\n Lg.m_phone(\"03569445\")\n Lg.register()\n # time.sleep(50)\n my_account = self.driver.find_element_by_xpath(Locator.My_account)\n if my_account is not None:\n print('Đăng kí thành công')\n else:\n print('False')\n\n # Testcase03 Submit a newsletter\n def test_03(self):\n Ts03 = HomePage(self.driver)\n Ts03.Newslet(\"anc99b@gmail.com\")\n Ts03.enter_em()\n\n newsletter = self.driver.find_element_by_xpath(Locator.newlet)\n if newsletter is not None:\n print(\" Thành công\")\n else:\n print('false')\n\n # test04: Submit a contact form\n def test_04(self):\n Ts04 = HomePage(self.driver)\n Ts04.Conta()\n Ts04.Head()\n Ts04.E_mail(\"anmmc@gmail.com\")\n Ts04.oder_pr(\"duong\")\n Ts04.File(\"G:\\IMG_0948.JPG\")\n Ts04.mess(\"khong\")\n Ts04.send_click()\n th = self.driver.find_element_by_xpath(Locator.th)\n if th is not None:\n print('Thành công')\n else:\n print('Flase')\n\n # Test05\n def test_05(self):\n Ts05 = HomePage(self.driver)\n Ts05.search(\"abcdl\")\n Ts05.Scree(\"1.png\")\n time.sleep(3)\n Ts05.clea_sear()\n Ts05.Scree(\"2.png\")\n img1 = cv2.imread(\"1.png\")\n img2 = cv2.imread(\"2.png\")\n\n # test_06\n def test_06(self):\n Ts06 = HomePage(self.driver)\n Ts06.search(\"Dress\")\n time.sleep(5)\n lt = Ts06.get_list()\n for a in lt:\n if 'Dress' in a:\n print('true')\n # self.assertTrue(True)\n else:\n print('flase')\n # self.assertTrue(False)\n print(lt)\n\n def test_07(self):\n Ts07 = HomePage(self.driver)\n Ts07.search(\"Dress\")\n time.sleep(3)\n Ts07.Click_ser()\n a = Ts07.srt_lits_sp()\n for i in a:\n if 'Dress' in i:\n print('true')\n # self.assertTrue(True)\n else:\n print('false')\n # self.assertTrue(False)\n\n def test_08(self):\n Ts08 = HomePage(self.driver)\n Ts08.search(\"Dress\")\n Ts08.Click_ser()\n a = Ts08.srt_lits_sp()\n b = len(a)\n c = Ts08.get_te_sl()\n b1 = str(b)\n b2 = str(c)\n for b1 in b2:\n if b1 in b1:\n print('true')\n else:\n print('flase')\n\n # Gía cả hiển thị với sản phẩm\n def test_09(self):\n Ts09 = HomePage(self.driver)\n Ts09.search(\"Dress\")\n Ts09.Click_ser()\n Ts09.check_gia()\n\n # Nhập sai sản phẩm, message sai sản phẩm hiển thị\n # def test_10(self):\n # Ts10 = HomePage(self.driver)\n # Ts10.search(\"dreSSSsss\")\n # Ts10.Click_ser()\n # Ts10.check_mes_sear()\n #\n # # Mua hàng thành công\n # def test_11(self):\n # Ts11 = ByPage(self.driver)\n # Ts11.click_sp()\n # Ts11.click_add_to_card()\n # time.sleep(3)\n # Ts11.Click_contiue()\n # Ts11.Clear_sl_sp()\n # Ts11.senkeys_sp(\"3\")\n # Ts11.click_add_to_card()\n # time.sleep(3)\n # Ts11.click_button_checkout()\n # Ts11.click_button_sumary()\n # Ts11.senkeys_id_email(\"duong055689@gmail.com\")\n # Ts11.senkeys_password(\"anhduong1\")\n # Ts11.click_sign()\n # Ts11.click_process_Adress()\n # # Ts11.Click_buton_agree()\n # # time.sleep(600)\n # Ts11.Click_process_shiping()\n # time.sleep(3)\n # Ts11.Mesger()\n #\n # # Thay đổi thông tin mua hàng\n # def test_12(self):\n # Ts_12 = ByPage(self.driver)\n # Ts_12.card_01()\n # Ts_12.click_cart()\n # time.sleep(3)\n # Ts_12.sl_sp(\"3\")\n # Ts_12.cler_sp()\n # Ts_12.check_out()\n # Ts_12.senkeys_id_email(\"duong055689@gmail.com\")\n # Ts_12.senkeys_password(\"anhduong1\")\n # Ts_12.click_sign()\n # Ts_12.click_process_Adress()\n # Ts_12.click_4()\n # Ts_12.click_button_check_out_shipping()\n # Ts_12.sum_sp()\n # Ts_12.click_button_pay()\n # Ts_12.Click_but_ton_cf()\n # Ts_12.check_m()\n # # time.sleep(300)\n # #\n # # time.sleep(1000)\n #\n # # tìm hàng khuyến mại 20%\n # def test_13(self):\n # TS_13 = ByPage(self.driver)\n #\n # # Kiểm tra chức năng View large, và tên sản phẩm ở dưới ảnh\n # def test_14(self):\n # Ts_14 = ByPage(self.driver)\n # # time.sleep(3)\n # Ts_14.Click_img()\n # text1 = Ts_14.text_img02()\n # Ts_14.Click_img2()\n # time.sleep(5)\n # Ts_14.img_img_height()\n # Ts_14.imgimg_03_height()\n # a = Ts_14.img_img_height()\n # b = Ts_14.imgimg_03_height()\n # if a == b:\n # print(\"False\")\n # else:\n # print(\"true\")\n # text2 = Ts_14.text_img1()\n # if text1 == text2:\n # print(\"true\")\n # else:\n # print(\"flase\")\n #\n # # Add to cart với Quantity =0\n # def test_15(self):\n # Ts15 = ByPage(self.driver)\n # Ts15.Click_img()\n # Ts15.Clear_quantity()\n # Ts15.sen_keys_quantity(\"0\")\n # Ts15.click_add_to_card()\n # time.sleep(3)\n # Ts15.check_null()\n # # Add to cart với Quantity > 0\n # Ts15.Clear_quantity()\n # Ts15.sen_keys_quantity(\"1\")\n # Ts15.close_null()\n # Ts15.click_bt_add()\n #\n # # Share to TWitter\n # def test_16(self):\n # driver = self.driver\n # Ts16 = ByPage(driver)\n # Ts16.Click_img()\n # # get the window handle after the window has opened\n # window_before = driver.window_handles[0]\n #\n # # window_before_title = driver.title\n # # print(window_before_title)\n #\n # Ts16.click_bt_twitter()\n # time.sleep(2)\n # # get the window handle after a new window has opened\n # window_after = driver.window_handles[1]\n #\n # # switch on to new child window\n # driver.switch_to.window(window_after)\n # time.sleep(10)\n #\n # # window_after_title = driver.title\n # # print(window_after_title)\n # # time.sleep(3)\n # Ts16.send_keys_tw(\"Dng74437507\")\n # Ts16.send_keys_pass_tw(\"anhduong1\")\n # Ts16.click_login_tw()\n #\n # # driver.switch_to.window(window_before)\n # # time.sleep(500)\n #\n # # Comment\n # def test_17(self):\n # Ts17 = ByPage(self.driver)\n # Ts17.Click_button_sn()\n # time.sleep(3)\n # Ts17.senkeys_id_email(\"duong055689@gmail.com\")\n # Ts17.senkeys_password(\"anhduong1\")\n # Ts17.click_sign()\n # time.sleep(3)\n # Ts17.Click_button_home()\n # Ts17.Click_img()\n # Ts17.Click_button_cmt()\n # Ts17.sen_keys_cmt_title(\"Bình luận\")\n # Ts17.sen_keys_cmt(\"Sản phẩm tốt\")\n # Ts17.click_bt_send()\n # Ts17.check_review()\n #\n # # Send to friend\n #\n # def test_18(self):\n # Ts18 = ByPage(self.driver)\n # Ts18.Click_img()\n # Ts18.click_sento_fr()\n # Ts18.senkeys_name(\"Dương\")\n # Ts18.senkey_email(\"Duong055689@gmail.com\")\n # Ts18.click_send_enail()\n # Ts18.check_sento_el()\n # # TODO\n #\n # # def test_action(self):\n # # browsers = [\"chrome\", \"firefox\"]\n # # for browser in browsers:\n # # Thread(target=self.login, args=(browser,)).start()\n #\n\n\n def tearDown(self):\n self.driver.quit()\n print('tearDown')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Tests/LoginTest.py","file_name":"LoginTest.py","file_ext":"py","file_size_in_byte":11028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283771156","text":"from mrjob.job import MRJob\r\nfrom itertools import combinations\r\nfrom math import sqrt\r\n\r\nclass Step2(MRJob):\r\n\r\n def pairwise_items(self, user_id,values):\r\n\r\n values=eval(values.split('\\t')[1])\r\n item_count, item_sum, ratings = values\r\n for item1, item2, in combinations(ratings, 2):\r\n yield (item1[0], item2[0]), (item1[1], item2[1])\r\n\r\n def calculate_similarity(self, pair_key, lines):\r\n\r\n sum_xx, sum_xy, sum_yy, sum_x, sum_y, n = (0.0, 0.0, 0.0, 0.0, 0.0, 0)\r\n item_pair, co_ratings = pair_key, lines\r\n item_xname, item_yname = item_pair\r\n\r\n for item_x, item_y in co_ratings:\r\n sum_xx += item_x * item_x\r\n sum_yy += item_y * item_y\r\n sum_xy += item_x * item_y\r\n sum_y += item_y\r\n sum_x += item_x\r\n n += 1\r\n similarity = self.normalized_correlation(n, sum_xy, sum_x, sum_y, sum_xx, sum_yy)\r\n yield (item_xname, item_yname), (similarity, n)\r\n\r\n def steps(self):\r\n return [self.mr(mapper=self.pairwise_items,\r\n reducer=self.calculate_similarity),]\r\n\r\n def normalized_correlation(self,n,sum_xy,sum_x,sum_y,sum_xx,sum_yy):\r\n numerator = (n * sum_xy - sum_x * sum_y)\r\n denominator = sqrt(n * sum_xx - sum_x * sum_x) * sqrt(n * sum_yy - sum_y * sum_y)\r\n similarity = numerator / denominator\r\n\r\n return similarity\r\n\r\nif __name__ == '__main__':\r\n Step2.run()","sub_path":"step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103845965","text":"import unittest\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\nfrom ruc import random_gates, ruc_channel\n\n\nclass TestRandomCircuitFunctions(unittest.TestCase):\n\n def testRandomGatesAreUnitary(self):\n q = 2\n depth = 3\n gates = random_gates(q, depth)\n gates_as_matrices = [gate.transpose(2,0,1,3).reshape(2*[q**2]) for gate in gates]\n uudag = [gate @ gate.conj().transpose() for gate in gates_as_matrices]\n [assert_allclose(prod, np.identity(q**2, dtype=complex), atol=1e-14) for prod in uudag]\n\n def testGatesAreCContiguous(self):\n q = 2\n depth = 3\n gates = random_gates(q, depth)\n for gate in gates:\n assert(gate.flags['C_CONTIGUOUS'] == True)\n\n def testRucChannelTracePreserving(self):\n q = 2\n depth = 3\n random_matrix = np.random.rand(*2*[q**depth])\n random_matrix = (random_matrix + random_matrix.transpose()) / 2\n input_trace = np.trace(random_matrix)\n input_tensor = random_matrix.reshape(2*depth*[q])\n output_tensor = ruc_channel(input_tensor)\n output_trace = np.trace(output_tensor.reshape(2*[q**depth]))\n assert_almost_equal(input_trace, output_trace)\n\n def testRucChannelHermiticityPreserving(self):\n q = 2\n depth = 3\n random_matrix = np.random.rand(*2 * [q ** depth])\n random_matrix = (random_matrix + random_matrix.transpose()) / 2\n input_tensor = random_matrix.reshape(2 * depth * [q])\n output_tensor = ruc_channel(input_tensor)\n output_matrix = output_tensor.reshape(2 * [q ** depth])\n assert_almost_equal(output_matrix, output_matrix.conj().transpose())\n\n def testRucChannelPositive(self):\n \"\"\"\n Use Choi form to test for positivity\n \"\"\"\n pass\n\n","sub_path":"test_ruc.py","file_name":"test_ruc.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128525824","text":"import numpy as np\nfrom gym.spaces import prng\nimport random\nimport gym\nfrom option_space import OptionSpace\n\n\nclass Agent(object):\n def __init__(self):\n self.episode = 0\n self.accumulated_reward = 0\n self.gamma = 0.5\n\n def seed(self, seed):\n prng.seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n def set_environment(self, env):\n self.env = env\n\n # state space\n NUM_ACTIONS = env.action_space.n # (left, nothing, right)\n\n BINS = [0,0,0,0]\n BINS[0] = np.array([0.0])\n BINS[1] = np.array([0.0])\n BINS[2] = np.array([-0.62831853, -0.20943951, -0.06, 0.06, 0.20943951])\n BINS[3] = np.array([0.0, np.radians(50)])\n NUM_BUCKETS = (BINS[0].size, BINS[1].size, BINS[2].size, BINS[3].size)\n self.BINS = BINS\n self.NUM_BUCKETS = NUM_BUCKETS\n\n NUM_STATES = np.prod(NUM_BUCKETS)\n\n # option space\n actions_per_state = list()\n for state in range(NUM_STATES):\n actions_per_state.append(NUM_ACTIONS)\n self.option_space = OptionSpace(actions_per_state, 4)\n \n NUM_OPTIONS = self.option_space.num_options\n self.q_table = 0.0 * np.ones((NUM_STATES, NUM_OPTIONS ))\n self.q_table[NUM_BUCKETS[0]-1,:] = 0.0\n\n def learning_rate(self):\n # the learning rate scheme\n if self.episode < 100:\n return 0.9\n elif self.episode < 200:\n return 0.3\n else:\n return 0.3\n\n def exploration_rate(self):\n if self.episode < 50:\n return 0.8\n elif self.episode < 200:\n return 0.000\n else:\n return 0.000\n\n def train_episode(self):\n # -- setup --\n next_state_cont = self.env.reset()\n s = self.discretize(next_state_cont)\n s_next = s\n o = self.new_policy(s)\n done = False\n option_stack = list() # save the list of (s,o) pairs; once done\n # update inversed for compliance with semi-MDP\n\n # -- info about the trial\n options_executed = 0\n self.global_step = 0\n self.accumulated_reward = 0\n\n # -- execute episode --\n acc_reward = 0\n reward_sequence = list()\n actions = (\"left\",\"wait\",\"right\")\n alpha = self.learning_rate()\n gamma = self.gamma\n \n while not done:\n option = self.new_policy(s)\n s_next, reward, done, time_steps = self.execute_option(s,option)\n options_executed += 1\n\n # update option -- smdp-Q-learning on top level\n if not done:\n valid_options = self.option_space.options_in_state(s_next)\n update = reward + ((gamma ** time_steps) *\n np.amax(self.q_table[s_next,valid_options]))\n else:\n update = reward\n self.q_table[s, option] += alpha * (update - self.q_table[s,option])\n\n s = s_next\n print(\"Episode %d. Reward: %d Options: %d\" % (self.episode, self.accumulated_reward,\n options_executed))\n\n self.episode += 1\n return options_executed, self.global_step, self.accumulated_reward\n\n def execute_option(self, s, option):\n gamma = self.gamma\n \n done = False\n total_time_steps = 0\n discounted_reward = 0\n while option is not None and not done:\n \n action = self.option_space.get_action(s,option)\n s_next, reward, done, time_steps = self.execute_action(s,action)\n o_next = self.option_space.o_new(s,option)\n\n discounted_reward += (gamma ** total_time_steps) * reward\n total_time_steps += time_steps\n \n option = self.option_space.o_new(s,option)\n s = s_next\n\n return s_next, discounted_reward, done, time_steps\n\n def execute_action(self, s, action):\n # the idea is to execute an action until a state change is observed\n # this turns the environment into a partially observable semi-MDP\n\n if action is None:\n # terminal action\n s_next = s\n reward = 0\n time_steps = 0\n done = False\n return s_next, reward, done, time_steps\n\n gamma = self.gamma\n discounted_reward = 0\n time_steps = 0\n done = False\n s_next = s\n \n while s == s_next and not done and time_steps < 3:\n next_state_cont, reward, done, _ = self.env.step(action)\n s_next = self.discretize(next_state_cont)\n if self.episode > 1500:\n self.env.render()\n\n discounted_reward += (gamma ** time_steps) * reward\n time_steps += 1\n self.global_step += 1\n self.accumulated_reward += reward\n\n return s_next, discounted_reward, done, time_steps\n\n def new_policy(self, s):\n valid_options = self.option_space.options_in_state(s)\n \n if random.random() < self.exploration_rate():\n return np.random.choice(valid_options,1)[0]\n else:\n option_idx = np.argmax(self.q_table[s,valid_options])\n best_option = valid_options[option_idx]\n return best_option\n\n def discretize(self, state):\n bucket_indice = []\n for i in range(len(state)):\n bucket_idx = max(0, np.digitize(state[i],self.BINS[i]) - 1)\n bucket_indice.append(bucket_idx)\n\n return np.ravel_multi_index(bucket_indice, self.NUM_BUCKETS)\n\ndef main(trial_idx):\n # initialization data for up to 100 trails\n random.seed(1337)\n env_seed = [random.randint(10,1e6) for i in range(100)]\n agent_seed = [random.randint(10,1e6) for i in range(100)]\n \n env = gym.make(\"CartPole-v0\")\n env.seed(env_seed[trial_idx])\n \n agent = Agent()\n agent.set_environment(env)\n agent.seed(agent_seed[trial_idx])\n\n options_executed = np.zeros(1000)\n steps = np.zeros(1000)\n accumulated_reward = np.zeros(1000)\n\n for episode in range(1000):\n ep_options_executed, ep_steps, acc_reward = agent.train_episode()\n\n options_executed[episode] = ep_options_executed\n steps[episode] = ep_steps\n accumulated_reward[episode] = acc_reward\n\n np.save(\"options_executed.npy\", options_executed)\n np.save(\"steps.npy\", steps)\n np.save(\"accumulated_reward.npy\", accumulated_reward)\n\nif __name__ == \"__main__\":\n main(45)\n","sub_path":"PolePOMDP/OptionQPolePOMDP.py","file_name":"OptionQPolePOMDP.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"362833400","text":"import unittest\nimport os\n\nfrom utils import CellState as C\nfrom agent import Agent\nfrom scanner import *\n\n\ndef build_scanner(matrix, px, py, ox, oy, agent=None):\n os.environ['MAP_FILE'] = 'test.map'\n with open(os.environ['MAP_FILE'], 'w') as f:\n f.write('\\n')\n return {\n 'field_matrix': matrix,\n 'current_cell': (px, py),\n 'orientation': (ox, oy),\n 'agent': agent if agent else Agent()\n }\n\n\ndef remove_test_map_file():\n os.remove(os.environ['MAP_FILE'])\n\n\nclass TestScanner(unittest.TestCase):\n\n def _test_visited(self, function, result, test_matrix):\n for case in test_matrix:\n matrix, px, py, ox, oy = case\n self.assertEqual(result,\n function(\n build_scanner(matrix, px, py, ox, oy)\n )\n )\n remove_test_map_file()\n\n def test_left_cell_visited(self):\n self._test_visited(left_cell_visited, True, (\n ([[1, 1]], 1, 0, 0, 1),\n ([[1], [1]], 0, 1, 1, 0),\n ([[1, 1]], 0, 0, 0, -1),\n ([[1], [1]], 0, 0, -1, 0)\n ))\n\n def test_left_cell_not_visited(self):\n self._test_visited(left_cell_visited, False, (\n ([[0, 1]], 1, 0, 0, 1),\n ([[0], [1]], 0, 1, 1, 0),\n ([[0, 1]], 1, 0, 0, -1),\n ([[1], [0]], 0, 0, -1, 0),\n ([[1]], 0, 0, 0, 1)\n ))\n\n def test_left_cell_not_visited_wall(self):\n self._test_visited(left_cell_visited, False, (\n ([[1]], 0, 0, 0, 1),\n ([[1]], 0, 0, 1, 0),\n ([[1]], 0, 0, 0, -1),\n ([[1]], 0, 0, -1, 0)\n ))\n\n def test_right_cell_visited(self):\n self._test_visited(right_cell_visited, True, (\n ([[C.EMPTY, C.EMPTY]], 0, 0, 0, 1),\n ([[C.EMPTY], [C.EMPTY]], 0, 0, 1, 0),\n ([[C.EMPTY, C.EMPTY]], 1, 0, 0, -1),\n ([[C.EMPTY], [C.EMPTY]], 0, 1, -1, 0)\n ))\n\n def test_right_cell_not_visited(self):\n self._test_visited(right_cell_visited, False, (\n ([[1, 0]], 0, 0, 0, 1),\n ([[1], [0]], 0, 0, 1, 0),\n ([[0, 1]], 1, 0, 0, -1),\n ([[0], [1]], 0, 1, -1, 0)\n ))\n\n def test_right_cell_not_visited_wall(self):\n self._test_visited(right_cell_visited, False, (\n ([[1]], 0, 0, 0, 1),\n ([[1]], 0, 0, 1, 0),\n ([[1]], 0, 0, 0, -1),\n ([[1]], 0, 0, -1, 0)\n ))\n\n def test_front_cell_visited(self):\n self._test_visited(front_cell_visited, True, (\n ([[1], [1]], 0, 1, 0, 1),\n ([[1, 1]], 0, 0, 1, 0),\n ([[1], [1]], 0, 0, 0, -1),\n ([[1, 1]], 1, 0, -1, 0)\n ))\n\n def test_front_cell_not_visited(self):\n self._test_visited(front_cell_visited, False, (\n ([[1, 1]], 0, 0, 0, 1),\n ([[1], [1]], 0, 0, 1, 0),\n ([[1, 1]], 1, 0, 0, -1),\n ([[1], [1]], 0, 1, -1, 0)\n ))\n\n def test_front_cell_not_visited_wall(self):\n self._test_visited(front_cell_visited, False, (\n ([[1]], 0, 0, 0, 1),\n ([[1]], 0, 0, 1, 0),\n ([[1]], 0, 0, 0, -1),\n ([[1]], 0, 0, -1, 0)\n ))\n\n\n def _test_mark_cell(self, marker, cell_init, cell_expect, success):\n scanner = build_scanner([[cell_init]], 0, 0, 0, 1)\n result = marker(scanner)\n self.assertEqual([[cell_expect]], scanner['field_matrix'])\n self.assertEqual(success, result)\n\n\n def test_fill_cell(self):\n self._test_mark_cell(mark_filled, 0, -1, True)\n self._test_mark_cell(mark_filled, 1, -1, True)\n self._test_mark_cell(mark_filled, -1, -1, False)\n\n\n def test_empty_cell(self):\n self._test_mark_cell(mark_empty, 0, 1, True)\n self._test_mark_cell(mark_empty, 1, 1, False)\n self._test_mark_cell(mark_empty, -1, 1, True)\n\n\n def test_orient_left_from_front(self):\n scanner = build_scanner([[-1]], 0, 0, 0, 1)\n orient_left(scanner)\n self.assertEqual((-1, 0), scanner['orientation'])\n\n\n def test_orient_right_from_front(self):\n scanner = build_scanner([[-1]], 0, 0, 0, 1)\n orient_right(scanner)\n self.assertEqual((1, 0), scanner['orientation'])\n\n\n def test_left_cell_empty_from_front(self):\n scanner = build_scanner([[1, -1]], 1, 0, 0, 1)\n self.assertEqual(True, left_cell_empty(scanner))\n\n\n def test_left_cell_not_empty_from_front(self):\n scanner = build_scanner([[0, -1]], 1, 0, 0, 1)\n self.assertEqual(False, left_cell_empty(scanner))\n\n\n def test_left_cell_not_empty_from_front_wall(self):\n scanner = build_scanner([[-1]], 0, 0, 0, 1)\n self.assertEqual(False, left_cell_empty(scanner))\n\n\n def test_create_scanner(self):\n expect = build_scanner([[C.AGENT]], 0, 0, 0, 1)\n self.assertEqual(expect, create_scanner(expect['agent']))\n","sub_path":"test_scanner.py","file_name":"test_scanner.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27982635","text":"import pickle\nimport numpy as np\n\n\ndef readmatrix(D):\n with open(\"TCC_21/matrix.ec\", 'r', encoding='latin1') as file:\n f = open('type.txt', 'w')\n count = 1\n for line in file:\n new_array = line[:-1].split()\n type_array = new_array[1].split(',')\n if count in D:\n # f.write(new_array[0] + ' ')\n for i in range(len(type_array)):\n if i != len(type_array) - 1:\n f.write(type_array[i] + ' ')\n else:\n f.write(type_array[i])\n if count != D[len(D) - 1]:\n f.write('\\n')\n count = count + 1\n print(count)\n f.close()\n # print(count)\n # D = pickle.load(file, encoding='latin1')\n\n\ndef readnonzero():\n with open(\"mat_21/nonzero_ec.dat\", 'rb') as file:\n D = pickle.load(file, encoding='latin1')\n # print(len(D))\n return D\n\n\nif __name__ == '__main__':\n A = readnonzero()\n # print(A)\n readmatrix(A)\n\n\n","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424298113","text":"#!/usr/bin/env python3\n\n# compByFolder\n# Invoke by: compByFolder \n# Takes all files in 2 designated folders and compares matching filenames' contents\n# Original Creation: 09NOV20\n# Released under Apache 2.0 License\n\nimport os # For checking file path and replacing file extension\nimport sys # For parsing input arguments\nimport filecmp # For comparing files\n\nif os.path.isdir(sys.argv[1]) and os.path.isdir(sys.argv[2]): # Check if dir paths are valid\n files1 = {f for f in os.listdir(sys.argv[1]) if os.path.isfile(os.path.join(sys.argv[1], f))} # List files in dir 1\n files2 = {f for f in os.listdir(sys.argv[2]) if os.path.isfile(os.path.join(sys.argv[2], f))} # List files in dir 2\n files1A = sorted(files1) # Alphabetize files in dir 1\n files2A = sorted(files2) # Alphabetize files in dir 2\n fileCt = 0\n\n if files1A == files2A: # Check if both directories match\n for i in range(len(files1A)): # Begin comparing files\n if not filecmp.cmp(os.path.join(sys.argv[1], files1A[i]), os.path.join(sys.argv[2], files2A[i])):\n print(\"File: \" + files1A[i] + \" has changes.\")\n fileCt += 1\n print(str(fileCt) + \" files checked.\")\n else: # If both dirs do NOT match, list diffs and then compare:\n both = sorted(files1 & files2) # List files present in both folders and sort files alphabetically\n notBoth = sorted(files1 ^ files2) # List files NOT present in both folders and sort files alphabetically\n print(str(len(notBoth)) + \" file(s) not in both:\\n\" + str(notBoth))\n for i in range(len(both)): # Begin comparing files\n if not filecmp.cmp(os.path.join(sys.argv[1], both[i]), os.path.join(sys.argv[2], both[i])):\n print(\"File: \" + both[i] + \" has changes.\")\n fileCt += 1\n print(str(fileCt) + \" files checked.\")\n\nelse:\n print(\"Error: Folder paths invalid. Exiting.\")\n","sub_path":"compByFolder.py","file_name":"compByFolder.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599668885","text":"import pprint\n\nfrom pyparsing import *\n\ndata = '''\n\n#Huawei Model S2700, S3300 and S5300\n\nsystem-view\n<%\n module: expect-output\n of-command: \"display traffic behavior user-defined [cid]\"\n to-contain: \"CIR\"\n at-position: 1\n to-equal: [cpe_cir]\n%>\n\n<%\n module: create-variable\n variable-name: traffic_policy_name\n command: \"display this | i traffic-policy\"\n keyword: \"traffic-policy\"\n position: 1\n%>\n\ntraffic-policy {traffic_policy_name} inbound\n<%\n module: expect-output\n of-command: \"y\"\n to-contain: \"Info: Save the configuration successfully\"\n%>\n\nquit\n\n\n'''\n\n\n#\n# Blank lines grammar\n#\ndef blankline_grammar():\n return LineStart().leaveWhitespace() + LineEnd()\n\n\n#\n# Comments grammar\n#\ndef comment_grammar():\n return ('#' + restOfLine).suppress()\n\n\n#\n# Start by describing grammar for commands\n#\ndef command_grammar():\n identifier = Word(alphas + '-_')\n line_with_quotes = quotedString.setParseAction(removeQuotes)\n plain_value = Word(alphanums + \"-_\")\n value_with_bracket = \"[\" + Word(alphas + \"-_\") + \"]\"\n attr_value = (line_with_quotes | plain_value | value_with_bracket) + ZeroOrMore(\",\").suppress()\n attribute = Group(identifier + Suppress(':') + attr_value)\n command_group = Suppress('<%') + Dict(OneOrMore(attribute))(\"data\") + Suppress('%>')\n command_group.ignore(pythonStyleComment)\n command_group.ignore(blankline_grammar())\n\n return command_group\n\n\nclass CreateVariableCommand:\n def __init__(self, attrs: dict, raw_command: str):\n self.name = attrs.get('variable-name')\n self.raw_command = raw_command\n\n def __str__(self):\n return \"CreateVariable -> name:{name} raw_command:{raw_command}\\n\".format(name=self.name,\n raw_command=self.raw_command)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass ExpectOutputCommand:\n def __init__(self, attrs: dict, raw_command: str):\n self.command = attrs.get('of-command')\n self.keyword = attrs.get('to-contain')\n self.raw_command = raw_command\n\n def __str__(self):\n return \"ExpectOutput -> command:'{command}' keyword:'{keyword}' raw_command:{raw_command}\\n\".format(\n command=self.command,\n keyword=self.keyword,\n raw_command=self.raw_command)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef extract_raw_command(original_content, start_location):\n end_location = original_content.find('>', start_location) + 1\n raw_specialized_command = original_content[start_location:end_location]\n return raw_specialized_command\n\n\ndef convert_to_specialized_command(content, loc, toks):\n class_attrs = {\n 'create-variable': CreateVariableCommand,\n 'expect-output': ExpectOutputCommand,\n }\n\n attrs = toks.data.asDict()\n klass = class_attrs.get(attrs.get('module'))\n raw_command = extract_raw_command(content, loc)\n return klass(attrs, raw_command)\n\n\n#\n# Describe how a plain line is\n#\ndef plain_grammar():\n return Word(printables + \" \")\n\n\nclass PlainCommand:\n def __init__(self, raw_command):\n self.raw_command = raw_command\n\n def __str__(self):\n return \"Plain -> content:{content}\\n\".format(content=self.raw_command)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef convert_to_plain_command(content, loc, toks):\n return PlainCommand(toks[0])\n\n\n#\n# Text file grammar\n#\ncommand = command_grammar().setParseAction(convert_to_specialized_command)\nplain = plain_grammar().setParseAction(convert_to_plain_command)\nstatement = command | plain\nbody = OneOrMore(statement)\nparser = body + StringEnd()\nparser.ignore(blankline_grammar())\nparser.ignore(comment_grammar())\n\n#\n# Text file parsing\n#\nresults = parser.parseString(data)\npprint.pprint(list(results))\n\n# Okay, this is where we start\nmydict = {\n 'a': 1,\n 'b': 2,\n 'c': 3,\n}\n\nwanted_keys = ['b', 'c']\n\nnew_dict = {i: mydict.pop(i) for i in wanted_keys}\nprint(new_dict)\nprint(mydict)\n","sub_path":"match2.py","file_name":"match2.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214531697","text":"import copy\nimport time\nimport os\nimport pathlib\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.backends import cudnn\n\nimport numpy as np\n\nfrom vision.trainings.abstract_training import AbstractTraining\nfrom vision.tasks.detection.ssd_qfgaohao.ssd.priors import priors_factory\nfrom vision.tasks.detection.ssd_qfgaohao import utils as box_utils\nfrom vision.tasks.detection.ssd_qfgaohao.ssd.predictor import Predictor\nfrom vision.tasks.detection.ssd_qfgaohao.misc import Timer\nfrom vision.tasks.detection.ssd_qfgaohao import measurements\n\nfrom vision.losses import losses\n\nfrom visualization.utils import plot_bboxes\nfrom torchvision.transforms import ToPILImage\nimport pdb\n\n\ndef get_training(model, logger, config):\n return ROCKTraining(model, logger, config)\n\n\ndef group_annotation_by_class(dataset, indexes=None):\n true_case_stat = {}\n all_gt_boxes = {}\n all_difficult_cases = {}\n\n if indexes is None:\n indexes = range(len(dataset))\n\n for i in indexes:\n image_id, annotation = dataset.get_annotation(i)\n gt_boxes, classes, is_difficult = annotation\n gt_boxes = torch.from_numpy(gt_boxes)\n for i, difficult in enumerate(is_difficult):\n class_index = int(classes[i])\n gt_box = gt_boxes[i]\n if not difficult:\n true_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1\n\n if class_index not in all_gt_boxes:\n all_gt_boxes[class_index] = {}\n if image_id not in all_gt_boxes[class_index]:\n all_gt_boxes[class_index][image_id] = []\n all_gt_boxes[class_index][image_id].append(gt_box)\n if class_index not in all_difficult_cases:\n all_difficult_cases[class_index]={}\n if image_id not in all_difficult_cases[class_index]:\n all_difficult_cases[class_index][image_id] = []\n all_difficult_cases[class_index][image_id].append(difficult)\n\n for class_index in all_gt_boxes:\n for image_id in all_gt_boxes[class_index]:\n all_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id])\n for class_index in all_difficult_cases:\n for image_id in all_difficult_cases[class_index]:\n all_gt_boxes[class_index][image_id] = torch.tensor(all_gt_boxes[class_index][image_id])\n return true_case_stat, all_gt_boxes, all_difficult_cases\n\n\ndef compute_average_precision_per_class(num_true_cases, gt_boxes, difficult_cases,\n prediction_file, iou_threshold, use_2007_metric):\n\n if isinstance(prediction_file, list):\n lines = prediction_file\n else:\n with open(prediction_file) as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n \n image_ids = []\n boxes = []\n scores = []\n for line in lines:\n t = line.rstrip().split(\" \")\n image_ids.append(int(t[0]))\n scores.append(float(t[1]))\n box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)\n box -= 1.0 # convert to python format where indexes start from 0\n boxes.append(box)\n scores = np.array(scores)\n sorted_indexes = np.argsort(-scores)\n boxes = [boxes[i] for i in sorted_indexes]\n image_ids = [image_ids[i] for i in sorted_indexes]\n true_positive = np.zeros(len(image_ids))\n false_positive = np.zeros(len(image_ids))\n matched = set()\n for i, image_id in enumerate(image_ids):\n box = boxes[i]\n if image_id not in gt_boxes:\n false_positive[i] = 1\n continue\n\n gt_box = gt_boxes[image_id]\n ious = box_utils.iou_of(box, gt_box)\n max_iou = torch.max(ious).item()\n max_arg = torch.argmax(ious).item()\n if max_iou > iou_threshold:\n if difficult_cases[image_id][max_arg] == 0:\n if (image_id, max_arg) not in matched:\n true_positive[i] = 1\n matched.add((image_id, max_arg))\n else:\n false_positive[i] = 1\n else:\n false_positive[i] = 1\n\n true_positive = true_positive.cumsum()\n false_positive = false_positive.cumsum()\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / num_true_cases\n if use_2007_metric:\n return measurements.compute_voc2007_average_precision(precision, recall)\n else:\n return measurements.compute_average_precision(precision, recall)\n\n\nclass ROCKTraining(AbstractTraining):\n def __init__(self, model, logger, config):\n super(ROCKTraining, self).__init__(model, logger, config)\n\n get_priors_fn = priors_factory.get_priors(self.config.model_name)\n self.priors = get_priors_fn(self.config)\n self.predictor = Predictor(self.model,\n self.priors.to(self.device),\n center_variance=self.config.center_variance,\n size_variance=self.config.size_variance,\n nms_method=self.config.nms_method,\n nms_threshold=self.config.nms_threshold,\n candidate_size=200,\n sigma=self.config.sigma)\n\n def log_val_stats(self, val_regression_loss, val_classification_loss, val_loss):\n self.logger.scalar_summary('val/regression_loss', val_regression_loss, self.current_step)\n self.logger.scalar_summary('val/classification_loss', val_classification_loss, self.current_step)\n self.logger.scalar_summary('val/loss', val_loss, self.current_step)\n\n def log_mAP_stats(self, mAP, phase='val', IoU=0.5):\n self.logger.scalar_summary('%s/mAP@%f' % (phase, IoU), mAP, self.current_step) \n\n def log_iteration(self, loss, regression_loss, classification_loss,\n depth_estimation_loss=None, scenes_loss=None, normals_loss=None):\n print('Step [%d/%d], Loss: %.4f, Regression Loss: %.4f, Classification Loss: %.4f'\n % (self.current_step, self.total_steps, loss.data.item(), regression_loss.data.item(), classification_loss.data.item()))\n # ============ TensorBoard logging ============#\n # (1) Log the scalar values\n info = {\n 'loss': loss.data.item(),\n 'regression_loss': regression_loss.data.item(),\n 'classification_loss': classification_loss.data.item()\n }\n if depth_estimation_loss is not None:\n info['depth_estimation_loss'] = depth_estimation_loss.data.item()\n if scenes_loss is not None:\n info['scene_classification_loss'] = scenes_loss.data.item()\n if normals_loss is not None:\n info['surface_normals_loss'] = normals_loss.data.item()\n for tag, value in info.items():\n self.logger.scalar_summary(\"train/\" + tag, value, self.current_step + 1)\n\n def _train_one_epoch(self, dataloader, criterion, optimizer,\n depth_criterion=None, normals_criterion=None, classification_criterion=None):\n\n self.model.train(True)\n running_loss = 0.0\n running_regression_loss = 0.0\n running_classification_loss = 0.0\n\n if self.config.use_in_training_predictions:\n statistics = np.zeros((len(dataloader), 5))\n\n for i, data in enumerate(dataloader):\n batch_index = i\n indexes, images, depths, scene_types, bboxes, bboxes_labels, normals, masks = data\n if (self.current_step % self.config.log_frequency) == 0:\n to_draw_bboxes = box_utils.convert_locations_to_boxes(bboxes[0],\n self.priors,\n self.config.center_variance,\n self.config.size_variance)\n to_draw_bboxes = box_utils.center_form_to_corner_form(to_draw_bboxes)\n img_with_bboxes = plot_bboxes(images[0], to_draw_bboxes, bboxes_labels[0])\n self.logger.image_summary('transformed_image_with_bboxes', [img_with_bboxes], self.current_step)\n transform = ToPILImage()\n depth_image = transform(depths[0] / 10)\n normals_image = transform(normals[0])\n self.logger.image_summary('transformed_depth', [depth_image], self.current_step)\n self.logger.image_summary('transformed_surface_normals', [normals_image], self.current_step)\n images = images.to(self.device)\n depths = depths.to(self.device)\n bboxes = bboxes.to(self.device)\n bboxes_labels = bboxes_labels.to(self.device)\n normals = normals.to(self.device)\n scene_types = scene_types.to(self.device)\n masks = masks.to(self.device)\n\n optimizer.zero_grad()\n confidence, locations, rock_outs = self.model(images)\n regression_loss, classification_loss = criterion(confidence, locations, bboxes_labels, bboxes)\n regression_loss *= self.config.SSD_regression_weight\n classification_loss *= self.config.SSD_classification_weight\n loss = regression_loss + classification_loss\n\n depth_estimation_loss = None\n if depth_criterion is not None:\n depth_outs = rock_outs[1]\n depth_loss = depth_criterion(depth_outs * masks, depths * masks)\n depth_estimation_loss = (depth_loss) * self.config.ROCK_depth_weight \n loss = loss + depth_estimation_loss\n #depth_estimation_loss.backward()\n\n scenes_loss = None\n if classification_criterion is not None:\n scene_classification_outs = rock_outs[0]\n scenes_loss = classification_criterion(scene_classification_outs, scene_types)\n scenes_loss = scenes_loss * self.config.ROCK_classification_weight\n loss = loss + scenes_loss\n #scenes_loss.backward()\n\n normals_loss = None\n if normals_criterion is not None:\n normals_outs = rock_outs[2]\n normals_loss = normals_criterion(normals_outs * masks, normals * masks)\n normals_loss = normals_loss * self.config.ROCK_normals_weight\n loss = loss + normals_loss\n #normals_loss.backward()\n\n loss.backward()\n\n optimizer.step()\n\n if (i % self.config.log_frequency) == 0:\n self.log_iteration(loss, regression_loss, classification_loss,\n depth_estimation_loss, scenes_loss, normals_loss)\n\n running_loss += loss.item() * images.size(0)\n running_regression_loss += regression_loss.item() * images.size(0)\n running_classification_loss += classification_loss.item() * images.size(0)\n\n self.current_step += 1\n\n epoch_loss = running_loss / float(len(dataloader.dataset))\n epoch_regression_loss = running_regression_loss / float(len(dataloader.dataset))\n epoch_classification_loss = running_classification_loss / float(len(dataloader.dataset))\n\n print('{} Loss: {:.4f} Regression Loss: {} Classification Loss: {:.4f}'.format('train',\n epoch_loss,\n epoch_regression_loss,\n epoch_classification_loss))\n\n\n def _eval(self, dataloader, criterion, eval_dir='validation', verbose=False):\n self.model.eval()\n path = os.path.join(self.config.job_dir, self.config.job_name, eval_dir)\n eval_path = pathlib.Path(path)\n eval_path.mkdir(exist_ok=True)\n timer = Timer()\n\n # Not using the dataloaders functions\n dataset = dataloader.dataset\n class_names = dataset.class_names\n\n '''\n true_case_stat: dizionario etichetta -> numero assoluto di true cases\n all_gb_boxes: dizionario etichetta -> (dizionario sample_idx -> lista di bboxes)\n '''\n\n true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(dataset)\n\n '''\n Results: lista di risultati\n results[i] è il risultato sull'immagine i-esima\n results[i][j] corrisponde alla j-esima bbox predetta sull'i-esima immagine\n results[i][j][0] = i\n results[i][j][1] = label associata alla j-esima bbox\n results[i][j][2] = probabilità associata alla j-esima bbox\n results[i][j][3:] = la j-esima bbox in corner form\n '''\n results = []\n\n for i in range(len(dataset)):\n timer.start(\"Load Image\")\n image = dataset.get_image(i)\n image = image.to(self.device)\n if verbose:\n print(\"Load Image: {:4f} seconds.\".format(timer.end(\"Load Image\")))\n timer.start('Predict')\n '''\n boxes: lista di bboxes estratte dall'algoritmo nell'immagine\n labels: lista di labels associate a ciascuna delle bboxes\n probs: lista di probabilità associata a ciascuna delle bbox\n '''\n #pdb.set_trace()\n boxes, labels, probs = self.predictor.predict(image) \n #pdb.set_trace()\n if verbose:\n print(\"Prediction: {:4f} seconds.\".format(timer.end(\"Predict\")))\n indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i\n results.append(torch.cat([\n indexes.reshape(-1, 1),\n labels.reshape(-1, 1).float(),\n probs.reshape(-1, 1),\n boxes + 1.0 # matlab's indexes start from 1\n ], dim=1))\n\n # Concatena i risultati in un'unica lista\n results = torch.cat(results)\n\n for class_index, class_name in enumerate(class_names):\n if class_index == 0: continue # ignore background\n prediction_path = eval_path / f\"det_test_{class_name}.txt\"\n with open(prediction_path, \"w\") as f:\n sub = results[results[:, 1] == class_index, :]\n for i in range(sub.size(0)):\n # prob_box: probabilità e bbox\n prob_box = sub[i, 2:].numpy()\n image_id = dataset.ids[int(sub[i, 0])]\n # scrivi su file image_id e prob_box\n print(str(image_id) + \" \" + \" \".join([str(v) for v in prob_box]), file=f)\n\n aps = []\n if verbose:\n print(\"\\n\\nAverage Precision Per-class:\")\n for class_index, class_name in enumerate(class_names):\n if class_index == 0:\n continue\n prediction_path = eval_path / f\"det_test_{class_name}.txt\"\n try:\n ap = compute_average_precision_per_class(\n true_case_stat[class_index],\n all_gb_boxes[class_index],\n all_difficult_cases[class_index],\n prediction_path,\n self.config.iou_threshold,\n self.config.use_2007_metric\n )\n except KeyError:\n print('caught key error')\n ap=0\n aps.append(ap)\n if verbose:\n print(f\"{class_name}: {ap}\")\n\n mAP = sum(aps)/len(aps)\n self.log_mAP_stats(mAP)\n print(f\"\\nAverage Precision Across All Classes:{mAP}\")\n return mAP\n\n\n def train_model(self, dataloaders, criterion, optimizer, scheduler, num_epochs, logging, checkpoint_path=None):\n\n since = time.time()\n\n best_val_mAP = float('-inf')\n last_epoch = -1\n\n dataset_sizes = {\n x: len(dataloaders[x].dataset) for x in ['train', 'val']\n }\n logging.info('Dataset sizes: %s %d, %s %d' %\n ('train', dataset_sizes['train'], 'val', dataset_sizes['val']))\n\n cudnn.benchmark = True\n\n self.current_step = 0\n self.total_steps = len(dataloaders['train']) * num_epochs\n\n depth_criterion = None\n if self.config.use_depth:\n depth_criterion = losses.berHu_log()\n\n normals_criterion = None\n if self.config.use_normals:\n normals_criterion = losses.rock_SNLoss()\n\n classification_criterion = None\n if self.config.use_scenes:\n classification_criterion = nn.CrossEntropyLoss()\n\n for epoch in range(last_epoch + 1, num_epochs):\n logging.info('Epoch {}/{}, lr:{}'.format(epoch, num_epochs - 1, scheduler.get_lr()))\n print('-' * 10)\n\n scheduler.step()\n self.logger.scalar_summary(\"learning_rate\", scheduler.get_lr()[-1], self.current_step)\n\n self._train_one_epoch(dataloaders['train'], criterion,\n optimizer,\n depth_criterion=depth_criterion,\n normals_criterion=normals_criterion,\n classification_criterion=classification_criterion)\n\n if (epoch % self.config.val_frequency) == 0:\n val_mAP = self._eval(dataloaders['val'], criterion)\n\n if checkpoint_path is not None:\n logging.info('Saving checkpoint')\n self.save_model(os.path.join(checkpoint_path, 'latest_checkpoint'))\n\n if val_mAP > best_val_mAP:\n best_val_mAP = val_mAP\n logging.info('Best model')\n\n if checkpoint_path is not None:\n logging.info('Saving best checkpoint')\n self.save_model(os.path.join(checkpoint_path, 'best_model'))\n\n def eval_model(self, dataloader, logging):\n self._eval(dataloader, None, eval_dir='predictions', verbose=True)\n","sub_path":"vision/trainings/rock_training.py","file_name":"rock_training.py","file_ext":"py","file_size_in_byte":18158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116874483","text":"#Se importan los siguientes modulos.\r\nimport urllib.parse \r\nimport requests \r\n \r\nmain_api = \"https://www.mapquestapi.com/directions/v2/route?\" #URL a la que se accede.\r\norig = \"Washington\" #Origen.\r\ndest = \"Baltimaore\" #Destino.\r\nkey = \"mdJyZfldqu3Hor9DWw0aN1am6eBlwQ7k\" #la clave API de MapQuest.\r\nurl = main_api + urllib.parse.urlencode({\"key\": key, \"de\":orig, \"a\":dest}) #Creamos la variable url que contiene los datos anteriores.\r\njson_data = requests.get(url).json() #Realizar la solicitud creando la variable json.\r\nprint(json_data) #Probar si la solicitud se realizo con exito.\r\n\r\n#Jorge Alejandro Camacho Martínez.\r\n#GIR0142\r\n\r\n#Basic HTTP:\r\n\"\"\"En la identificación básica, el cliente solicita una URL que requiere identificación.\r\nEl servidor solicita al cliente que se identifique mediante el envío de un código 401-Non Authorized.\r\nEl cliente, envía de vuelta la misma solicitud pero con las credenciales a modo de cadena con base 64\"\"\"\r\n\r\n#Open Authorization (OAuth):\r\n\"\"\"Es un estándar abierto que permite flujos simples de autorización para sitios web o aplicaciones.\r\npermite a un usuario del sitio A compartir su información en el sitio A (proveedor de servicio) con el sitio B (llamado consumidor) sin compartir toda su identidad.\"\"\"\r\n\r\n#Token:\r\n\"\"\"Es un aparato electrónico que se le da a un usuario autorizado de un servicio computarizado para facilitar el proceso de autenticación.\"\"\"\r\n","sub_path":"unidad_2/ actividad-1/08_parse-json1-Jorge.py","file_name":"08_parse-json1-Jorge.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260853725","text":"import sys\r\n\r\nlinenum = 1\r\nn = 0\r\nfor line in sys.stdin:\r\n if linenum == 1:\r\n n = int(line.strip())\r\n linenum += 1\r\n else:\r\n if n > linenum:\r\n break\r\n linenum += 1\r\n x = line.split()\r\n print( f( int(x[0].strip()), int(x[1].strip()), int(x[2].strip()) ) )\r\n\r\ndef f(r, e, c):\r\n p = e - c\r\n if r > p : return \"do not advertise\"\r\n if p > r : return \"advertise\"\r\n if p == r : return \"does not matter\"","sub_path":"openkattis/nastyhacks.py","file_name":"nastyhacks.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31048919","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# BSD.\n# Copyright (c) 2012, y-p (repos: http://github.com/y-p)\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport subprocess\n\nimport sys,os,re,json,codecs\nimport logging\nimport urllib\n\nimport settings\nimport utils\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)-4s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n #\t\t filename='/tmp/myapp.log',\n #\t\t filemode='w'\n)\nlogger = logging.getLogger(\"mmm-scrape\")\n\nclass DocumentCache(object):\n def __init__(self, datadict):\n self.datadict = datadict\n self.populateLocalCache()\n\n def populateLocalCache(self):\n \"\"\"\n iterates over document entries in self.datadict, retrievs documents\n from net if not already cached, extracts text from documents if not already cached\n \"\"\"\n logger.info(\"Populating local document cache, retrieving from net as needed\")\n for (k, d) in sorted(self.datadict.iteritems()):\n basename = utils.get_base_name(d['url'])\n\n fullpath = os.path.join(settings.DATADIR, basename)\n fulltxtpath = os.path.join(settings.DATADIR, basename.split('.')[0] + \".txt\")\n if not os.path.exists(fulltxtpath) and not os.path.exists(fullpath):\n logger.info(\"Retrieving %s into %s\" % (d['url'], settings.DATADIR))\n with open(fullpath, \"wb\") as f:\n f.write(urllib.urlopen(d['url']).read())\n pass\n\n if not os.path.exists(fulltxtpath):\n cmd = \"pdftotext -f 1 -l 5 %s -\" % fullpath\n logger.info(\"converting %s to text\" % fullpath)\n\n p = subprocess.Popen(cmd.strip().split(' '), stdout=subprocess.PIPE)\n (contents, errf) = p.communicate()\n with codecs.open(fulltxtpath, \"wb\", encoding='utf-8') as f:\n f.write(contents.decode('utf-8'))\n\n if settings.DELETE_PDF_AFTER_EXTRACTION:\n os.unlink(fullpath)\n def sanitize_lines(self,lines):\n lines = [x.decode('utf-8') for x in lines]\n lines = [re.sub(u\"['`\\\"]\", \"\", x) for x in lines]\n lines = [re.sub(u\"[^א-ת\\d]\", \" \", x) for x in lines]\n lines = [re.sub(u\"וו?עד\", u\"ועד\", x) for x in lines]\n lines = [re.sub(u\"[לב]ועד\", u\"ועד\", x) for x in lines]\n #\tlines = [re.sub(u\"\\sה\",u\" \",x ) for x in lines ]\n lines = [re.sub(u\"\\s+\", \" \", x) for x in lines]\n\n return lines\n\n def getDocumentLines(self, k):\n d = self.datadict[k]\n basename = utils.get_base_name(d['url'])\n fullpath = os.path.join(settings.DATADIR, basename)\n fulltxtpath = os.path.join(settings.DATADIR, basename.split('.')[0] + \".txt\")\n logger.debug(\"Loading cached text for %s from %s\" % (fullpath, fulltxtpath))\n with codecs.open(fulltxtpath, encoding='utf-8') as f:\n contents = f.read().encode('utf-8')\n\n lines = self.sanitize_lines(contents.split(\"\\n\"))\n return lines","sub_path":"DocumentCache.py","file_name":"DocumentCache.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261226126","text":"\"\"\"\n@project: MECPaper\n@author: shuangcheng\n@e-mail: sunshuangcheng@bupt.edu.cn\n@file: algorithm\n\"\"\"\nimport numpy as np\nimport json\nimport pandas as pd\nr = 0.005\ndef deg(matrix):\n \"\"\"\n :param matrix: 图的邻接矩阵。\n :return: 返回一个度矩阵\n \"\"\"\n [rows, cols] = np.shape(matrix)\n print(rows, cols)\n # 理论上来讲,matrix应该是一个方阵,row=col\n if rows != cols:\n print(\"the row and col error rows is {}, and the cols is {}\".format(rows, cols))\n return None\n degree = []\n for row in range(rows):\n degree.append(np.sum(matrix[row]))\n degree = np.diag(degree)\n return degree\n\ndef adjacentMatrix():\n \"\"\"\n :return:返回邻接矩阵\n \"\"\"\n with open(\"../static/info.json\") as f:\n ORI = json.loads(f.read(), encoding=\"utf-8\")\n ORI = ORI.get(\"UU\")\n return ORI\n\ndef ELU(x):\n if x > 0:\n return x\n else:\n return np.exp(x) - 1\n\ndef dif(x1, x2):\n ans = x1 - x2\n if ans >= 0:\n return ans\n else:\n return 0\n\ndef GCNTest():\n # 第一步:依据构建的邻接矩阵实现状态和边的更新\n ObeNumber = 10\n ObeAppName = \"微信\"\n adjacent = adjacentMatrix()\n degMatrix = deg(adjacent)\n\n # 第二步: 构建状态矩阵\n DF = pd.read_csv(\"../dataSet/GroupOneTest.csv\")\n # 按照时间分片\n DF[\"starttime\"] = pd.to_datetime(DF[\"starttime\"])\n # 用于构建上下文\n CO = DF[DF[\"starttime\"] > pd.to_datetime(\"2018-3-14 8:00\")]\n CO = CO[CO[\"starttime\"] <= pd.to_datetime(\"2018-3-14 9:00\")]\n # 用于构建初始状态矩阵\n ORI = DF[DF[\"starttime\"] > pd.to_datetime(\"2018-3-14 9:00\")]\n ORI = ORI[ORI[\"starttime\"] <= pd.to_datetime(\"2018-3-14 10:00\")]\n\n # 用于构建结果状态矩阵\n SOUR = DF[DF[\"starttime\"] > pd.to_datetime(\"2018-3-14 10:00\")]\n SOUR = SOUR[SOUR[\"starttime\"] <= pd.to_datetime(\"2018-3-14 11:00\")]\n\n\n User = pd.read_csv(\"../static/UsedUser.csv\")\n User = User[\"user_id\"].to_list()\n userNumber = len(User) # 不出意外的话,此处应该是100\n ObeUsers = []\n count = 0\n state = np.zeros(shape=(1, userNumber))\n\n # 构建context\n for user in User:\n temp = CO[CO[\"user_id\"] == user][\"app_name\"].drop_duplicates().to_list()\n if ObeAppName in temp:\n row = User.index(user)\n state[0, row] = 0.5\n\n # 构建观测user\n while count < ObeNumber:\n a = np.random.randint(low=1, high=userNumber-1, size=1)\n b = User[a[0]]\n if b not in ObeUsers:\n ObeUsers.append(b)\n count += 1\n for ObeUser in ObeUsers:\n row = User.index(ObeUser) # 定位\n print(row)\n temp = ORI[ORI[\"user_id\"] == ObeUser][\"app_name\"].drop_duplicates().to_list()\n if ObeAppName in temp:\n state[0, row] = 1\n adjacent = np.array(adjacent)\n adjacent = np.reshape(adjacent, newshape=(100, 100))\n # print(\"shape is {} state shape is {}\".format(np.shape(degMatrix - adjacent), np.shape(state)))\n CT = r * (degMatrix - adjacent)\n predict = np.dot(CT, state.T)\n predict = np.reshape(predict, newshape=(1, 100))\n predict = state - predict\n print(\"predict is \", predict)\n newState = state.copy()\n for user in User:\n if user in ObeUsers:\n continue\n else:\n temp = ORI[ORI[\"user_id\"] == user][\"app_name\"].to_list()\n row = User.index(user)\n if ObeAppName in temp:\n newState[0, row] = 1\n print(\"new state is \", newState[:5])\n print(\"ORI state is \", state)\n\n\n\n pass\n\nif __name__ == \"__main__\":\n # print(np.random.randint(low=0, high=100-1, size=1)[0])\n GCNTest()\n # adjacent = adjacentMatrix()\n # degMatrix = deg(adjacent)\n # print(np.sum(adjacent[0]))\n # print(degMatrix[0,0])\n # print()\n # a = np.zeros(shape=(10, 10))\n # b = np.zeros(shape=(1, 10))\n # p = np.dot(a, b.T)\n # print(np.shape(p))\n pass\n","sub_path":"src/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320223748","text":"#!/usr/bin/python3\n\nfrom time import sleep\n\nimport bluetooth\nimport threading\n\n# IS_MASTER = True\nIS_MASTER = False\nSERVER_MAC = 'CC:78:AB:50:B2:46'\nNUMBER_OF_ACTIONS = 5\n\n\ndef connect(server_mac, is_master=True):\n \"\"\"\n\n @param server_mac: MAC address of the master brick\n @param is_master: determines which brick acts as the server and waits for the other brick to connect\n @return: the communication socket between slave and master, the read socket file object and write socket file object\n \"\"\"\n port = 3\n if is_master:\n server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n server_sock.bind((server_mac, port))\n server_sock.listen(1)\n print('Listening...')\n client_sock, address = server_sock.accept()\n print(f'Accepted connection from {address}')\n return client_sock, client_sock.makefile('r'), client_sock.makefile('w')\n else:\n sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n print('Connecting...')\n sock.connect((server_mac, port))\n print('Connected to ', server_mac)\n return sock, sock.makefile('r'), sock.makefile('w')\n\n\ndef disconnect(sock):\n \"\"\"\n wrapper for closing sockets or socket file objects\n @param sock: socket or file object to be closed\n \"\"\"\n sock.close()\n\n\ndef run(server_mac, is_master=True, number_of_actions=5):\n \"\"\"\n main function of the application. Starts the Bluetooth threads, does work in the main thread.\n After the main thread has finishes \"Doing something\" closes the Bluetooth threads and disconnects the sockets\n @param server_mac: MAC address of the master brick\n @param is_master: determines which brick acts as the server and waits for the other brick to connect\n @param number_of_actions:\n \"\"\"\n sock, sock_in, sock_out = connect(server_mac, is_master)\n listener = threading.Thread(target=start_listening if is_master else listen,\n args=(sock_in, sock_out, number_of_actions))\n listener.start()\n for i in range(number_of_actions):\n print(f'[{str(i)}] Doing something...')\n sleep(1)\n listener.join()\n disconnect(sock_in)\n disconnect(sock_out)\n disconnect(sock)\n\n\ndef start_listening(sock_in, sock_out, number_of_actions):\n \"\"\"\n Function that writes the first message (e.g, should be done by the master) before going to the listen function\n @param sock_in: socket file object used for reading data\n @param sock_out: socket file object used for writing data\n @param number_of_actions: number of expected integer to be sent and received\n \"\"\"\n i = 1\n sock_out.write(f'{str(i)}\\n')\n sock_out.flush()\n print(f'Sent {str(i)}')\n listen(sock_in, sock_out, number_of_actions)\n\n\ndef listen(sock_in, sock_out, number_of_actions):\n \"\"\"\n Function that wait for a message from other party, increments the received integer and sends the result\n @param sock_in: socket file object used for reading data\n @param sock_out: socket file object used for writing data\n @param number_of_actions: number of expected integer to be sent and received\n \"\"\"\n print('Now listening...')\n stop_listening = False\n while not stop_listening:\n data = int(sock_in.readline())\n print(f'Received {str(data)}')\n if data == -1:\n print('Received stop from other party')\n break\n elif data == number_of_actions:\n print('Received last message, sending stop to other party')\n stop_listening = True\n data = -1\n else:\n data += 1\n sleep(1)\n sock_out.write(f'{str(data)}\\n')\n sock_out.flush()\n print(f'Sent {str(data)}')\n\n\nrun(SERVER_MAC, IS_MASTER, NUMBER_OF_ACTIONS)\n","sub_path":"examples/Bluetooth/bluetoothSlave.py","file_name":"bluetoothSlave.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100521101","text":"\"\"\"#Exercise : Function and Objects Exercise-3\n#Implement a function that converts the given testList = [1, -4, 8, -9]\n into [1, 16, 64, 81]\"\"\"\n\n\ndef apply_to_each(l_a, f_a):\n \"\"\":param l_a the list of elements\n #:param f_a is the function that takes L elements to give the required o/p\n #:return int type\"\"\"\n for i in enumerate(l_a):\n l_a[i] = f_a(l_a[i])\n return l_a\ndef square(l_a):\n \"\"\"takes the return value from apply_to_each\"\"\"\n return l_a*l_a\ndef main():\n \"\"\"using the condition from above and input here\n print\"\"\"\n data = input()\n data = data.split()\n list1 = []\n for j in data:\n list1.append(int(j))\n print(apply_to_each(list1, square))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CSPP1/cspp1-practice/m9/Functions and Objects Exercise-3/functions_and_objects_3.py","file_name":"functions_and_objects_3.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332886291","text":"import math\n\nclass Box(object):\n \"\"\"A box is defined by a length, width and height and an optional edge fillet radius.\n Attributes\n ----------\n plane: :class:`compas.geometry.Plane`\n The plane of the circle.\n length: float\n The length of the box (x).\n width: float\n The width of the box (y).\n height: float\n The height of the box (z):\n radius: float\n The radius of the edge fillet.\n Examples\n --------\n >>> from compas_vol.primitives import Box\n >>> box = Box(2,4,5)\n \"\"\"\n\n def __init__(self,length=3.0,width=2.0,height=1.0,radius=0.0):\n self._l = None\n self._w = None\n self._h = None\n self._r = None\n self.l = length\n self.w = width\n self.h = height\n self.r = radius\n\n @classmethod\n def box_from_edge(cls,edge):\n box = cls(edge,edge,edge,0.0)\n return box\n\n # ==========================================================================\n # descriptors\n # ==========================================================================\n\n @property\n def l(self):\n \"\"\"float: The length of the box.\"\"\"\n return self._l\n\n @l.setter\n def l(self, l):\n self._l = float(l)\n\n @property\n def w(self):\n \"\"\"float: The width of the box.\"\"\"\n return self._w\n\n @w.setter\n def w(self, w):\n self._w = float(w)\n\n @property\n def h(self):\n \"\"\"float: The height of the box.\"\"\"\n return self._h\n\n @h.setter\n def h(self, h):\n self._h = float(h)\n\n @property\n def r(self):\n \"\"\"float: The radius of the sphere.\"\"\"\n return self._r\n\n @r.setter\n def r(self, r):\n self._r = float(r)\n \n # ==========================================================================\n # distance function\n # ==========================================================================\n\n def get_distance(self, x,y,z):\n dx = abs(x) - (self._l / 2.0 - self._r)\n dy = abs(y) - (self._w / 2.0 - self._r)\n dz = abs(z) - (self._h / 2.0 - self._r)\n inside = max(dx, max(dy, dz)) - self._r\n dx = max(dx,0)\n dy = max(dy,0)\n dz = max(dz,0)\n if inside+self._r<0:\n return inside\n else:\n corner = math.sqrt(dx*dx + dy*dy + dz*dz) - self._r\n return corner\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n b = Box(25,20,15,7)\n for y in range(-15,15):\n s = ''\n for x in range(-30,30):\n d = b.get_distance(x*0.5,y,0)\n if d<0:\n s += 'x'\n else:\n s += '.'\n print(s)\n","sub_path":"src/compas_vol/old/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438391118","text":"import argparse\nimport os\nimport zipfile\n\nparser = argparse.ArgumentParser(prog='unzip')\nparser.add_argument('zipfile')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('output', nargs='?', default='.')\ngroup.add_argument('-l', '--list', action='store_true', default=False)\nparser.add_argument('-f', '--force', action='store_true', default=False)\nargs = parser.parse_args()\n\nzp = zipfile.ZipFile(args.zipfile, 'r')\n\nif args.list:\n for f in zp.namelist():\n print(f)\n\nelse:\n if not args.force:\n for f in zp.namelist():\n f = os.path.join(args.output, f)\n\n if os.path.exists(f):\n raise FileExistsError('File exists: %s' % f)\n\n zp.extractall(args.output)\n","sub_path":"source/unzip.py","file_name":"unzip.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379388128","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef preprocess():\r\n train = pd.read_csv('Q:\\\\QlvC\\\\DataSets\\\\Influence\\\\train.csv')\r\n test = pd.read_csv('Q:\\\\QlvC\\\\DataSets\\\\Influence\\\\test.csv')\r\n target = train['Choice']\r\n train = train.drop(['Choice'], axis = 1)\r\n X = train.values\r\n y = target.values\r\n X_train, X_test, y_train, y_test = train_test_split(train.values, target.values, test_size=0.2)\r\n\r\n return X_train, X_test, y_train, y_test\r\ndef preprocess2():\r\n train = pd.read_csv('Q:\\\\QlvC\\\\DataSets\\\\Influence\\\\train.csv')\r\n test = pd.read_csv('Q:\\\\QlvC\\\\DataSets\\\\Influence\\\\test.csv')\r\n target = train['Choice']\r\n train = train.drop(['Choice'], axis=1)\r\n X = train.values\r\n y = target.values\r\n return X, y\r\n\r\n\r\n\r\n","sub_path":"ML/Influence/Influence_Preprocess.py","file_name":"Influence_Preprocess.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"188785172","text":"import datacube\nimport time\n\nfrom datetime import datetime\n\ndef dc_find_datasets(date_range, product='landsat_8_USARD', cloud_threshold = 20, fill_threshold = 30 ):\n\n\n dc = datacube.Datacube(app = 'my_app', config = '../datacube.conf')\n\n dss = dc.find_datasets(product=product, time=date_range, measurements=['red',])\n\n clear_datasets = []\n for item in dss:\n if float(item.metadata_doc['cloud_cover']) < cloud_threshold:\n if float(item.metadata_doc['fill']) < fill_threshold:\n clear_datasets.append(item)\n \n print(\"Number of Pretty Scenes\", len(clear_datasets))\n return clear_datasets\n\ndef compute_tile_chip_span(h,v,ch,cv,datasets):\n \n citem = datasets[0]\n citem.bounds\n\n l = citem.bounds.left\n\n b = citem.bounds.bottom\n\n r = citem.bounds.right\n \n t = citem.bounds.top\n\n x1 = l + (ch * 100) * 30\n x2 = l + ((ch + 1) * 100 - 1) * 30\n y1 = t - (cv * 100) * 30\n y2 = t - ((cv + 1) * 100 - 1) * 30\n return x1,x2,y1,y2\n\ndef dc_load_tile_chip(h,v,ch,cv,datasets,measurements,product='landsat_8_USARD'):\n\n x1,x2,y1,y2 = compute_tile_chip_span(h,v,ch,cv,datasets)\n dc = datacube.Datacube(app = 'dc_helper', config = '../datacube.conf')\n ds2 = dc.load(product=product, datasets=datasets, measurements=measurements,\n x=(x1,x2),y=(y1,y2), crs='epsg:5072',\n output_crs = 'epsg:5072',\n resolution = (-30,30))\n return (ds2)\n\n","sub_path":"noteLib/noteLib/dc_helper.py","file_name":"dc_helper.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441813323","text":"import pytest\nimport random\nimport mock\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom rest_framework.test import APIClient\nfrom reservation_test.models import Room, Reservation, Occupancy, Guest\n\nuser_count = 1\nguest_count = 1\n\n\ndef create_guest(request, db):\n \"\"\"Creates a guest.\"\"\"\n global guest_count\n guest = Guest.objects.create(\n first_name='test_first_name{}'.format(guest_count),\n last_name='test_last_name{}'.format(guest_count),\n birth_date=datetime(1985, 3, 2),\n email='test{}@example.com'.format(guest_count)\n )\n\n def fin():\n guest.delete()\n\n request.addfinalizer(fin)\n guest_count += 1\n return guest\n\n\nguest = pytest.fixture(create_guest)\nguest2 = pytest.fixture(create_guest)\n\n\ndef create_user(request, db):\n \"\"\"Creates a Django User.\"\"\"\n global user_count\n test_password = 'test_password{}'.format(user_count)\n user = User.objects.create_user(\n username='test_username{}'.format(user_count),\n email='test{}@example.com'.format(user_count),\n password=test_password\n )\n user.password_plaintext = test_password\n\n def fin():\n user.delete()\n\n request.addfinalizer(fin)\n user_count += 1\n return user\n\n\nuser = pytest.fixture(create_user)\nuser2 = pytest.fixture(create_user)\n\n\n@pytest.fixture\ndef client():\n \"\"\"Creates a rest framework APIClient for testing endpoints.\"\"\"\n return APIClient()\n\n\n@pytest.fixture\ndef client_logged_in(client, user):\n \"\"\"Creates a rest framework APIClient, and logs it in. The user is\n available from the attribute:\n\n client_logged_in.user\n \"\"\"\n client.login(username=user.username,\n password=user.password_plaintext)\n return client\n\n\n@pytest.fixture\ndef admin(user):\n \"\"\"Creates an admin superuser\"\"\"\n user.is_superuser = True\n user.is_staff = True\n user.save()\n return user\n\n\n@pytest.fixture\ndef admin_logged_in(admin, client):\n \"\"\"\n Creates an admin superuser and logs it in. The user is\n available from the attribute:\n admin_logged_in.user\n \"\"\"\n client.login(username=admin.username,\n password=admin.password_plaintext)\n return client\n\n\ndef create_reserved_room(request, db):\n \"\"\"Fixture for creating reserved reservation_test.Room.\"\"\"\n room = Room.objects.create(\n number=random.randint(1, 101),\n available=False\n )\n\n def fin():\n room.delete()\n\n request.addfinalizer(fin)\n return room\n\n\nreserved_room = pytest.fixture(create_reserved_room)\nreserved_room2 = pytest.fixture(create_reserved_room)\n\n\ndef create_vacant_room(request, db):\n \"\"\"Fixture for creating vacant reservation_test.Room.\"\"\"\n room = Room.objects.create(\n number=random.randint(1, 101),\n available=True,\n number_of_beds=2\n )\n\n def fin():\n room.delete()\n\n request.addfinalizer(fin)\n return room\n\n\nvacant_room = pytest.fixture(create_vacant_room)\nvacant_room2 = pytest.fixture(create_vacant_room)\n\n\ndef create_past_reservation(request, db, vacant_room, guest):\n \"\"\"Fixture for creating reservation_test.Reservation.\"\"\"\n with mock.patch('django.utils.timezone.now') as mock_now:\n three_days_ago = timezone.make_aware(datetime.now() - timedelta(days=3))\n two_days_ago = timezone.make_aware(datetime.now() - timedelta(days=2))\n reservation = Reservation.objects.create(\n guest=guest,\n check_in_date=three_days_ago,\n check_out_date=two_days_ago\n )\n\n def fin():\n reservation.delete()\n\n request.addfinalizer(fin)\n return reservation\n\n\npast_reservation = pytest.fixture(create_past_reservation)\n\n\ndef create_ongoing_reservation(request, db, reserved_room, guest):\n \"\"\"Fixture for creating reservation_test.Reservation.\"\"\"\n # with mock.patch('django.utils.timezone.now') as mock_now:\n two_days_ago = timezone.make_aware(datetime.now() - timedelta(days=2))\n plus_five_days = timezone.make_aware(datetime.now() + timedelta(days=3))\n reservation = Reservation.objects.create(\n guest=guest,\n check_in_date=two_days_ago,\n check_out_date=plus_five_days\n )\n\n def fin():\n reservation.delete()\n\n request.addfinalizer(fin)\n return reservation\n\n\nongoing_reservation = pytest.fixture(create_ongoing_reservation)\n\n\ndef create_future_reservation(request, db, room, guest):\n \"\"\"Fixture for creating reservation_test.Reservation.\"\"\"\n with mock.patch('django.utils.timezone.now') as mock_now:\n three_days_from_now = timezone.make_aware(datetime.now() + timedelta(days=3))\n five_days_from_now = timezone.make_aware(datetime.now() + timedelta(days=5))\n reservation = Reservation.objects.create(\n guest=guest,\n check_in_date=three_days_from_now,\n check_out_date=five_days_from_now\n )\n\n def fin():\n reservation.delete()\n\n request.addfinalizer(fin)\n return reservation\n\n\nfuture_reservation = pytest.fixture(create_future_reservation)\n\n\ndef create_ongoing_occupancy(request, db, reserved_room, reservation):\n \"\"\"Fixture for creating reservation_test.Reservation.\"\"\"\n # with mock.patch('django.utils.timezone.now') as mock_now:\n two_days_ago = timezone.make_aware(datetime.now() - timedelta(days=2))\n plus_five_days = timezone.make_aware(two_days_ago + timedelta(days=5))\n occupancy = Occupancy.objects.create(\n active=True,\n room=reserved_room,\n from_date=two_days_ago,\n to_date=plus_five_days\n )\n\n def fin():\n occupancy.delete()\n\n request.addfinalizer(fin)\n return occupancy\n\n\nongoing_occupancy = pytest.fixture(create_ongoing_occupancy)\n\n\ndef create_past_occupancy(request, db, vacant_room):\n \"\"\"Fixture for creating reservation_test.Reservation.\"\"\"\n with mock.patch('django.utils.timezone.now') as mock_now:\n five_days_ago = timezone.make_aware(datetime.now() - timedelta(days=5))\n two_days_ago = timezone.make_aware(datetime.now() - timedelta(days=2))\n occupancy = Occupancy.objects.create(\n active=False,\n room=vacant_room,\n from_date=five_days_ago,\n to_date=two_days_ago\n )\n\n def fin():\n occupancy.delete()\n\n request.addfinalizer(fin)\n return occupancy\n\n\npast_occupancy = pytest.fixture(create_ongoing_occupancy)\n","sub_path":"reservation_test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308079697","text":"import pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nimport time\n\napi_key = '8OSSWAF34NN90RZF'\n\nts = TimeSeries(key=api_key, output_format='pandas')\ndata, meta_data = ts.get_intraday(symbol='MSFT', interval= '1min', outputsize='full')\n# print(data)\n\ni = 1\n#while i==1:\n# data, meta_data = ts.get_intraday(symbol='MSFT', interval= '1min', outputsize='full')\n# data.to_excel(\"output.xlsx\")\n# time.sleep(60)#seconds, every minute we should get the info put into excel doc\n\nclose_data = data['4. close']\npercent_change = close_data.pct_change()\nprint(percent_change)\n\nlast_change = percent_change[-1]\nif abs(last_change) > 0.05:\n print('MSFT Alert:' + str(last_change))","sub_path":"Desktop/Lambda/AlphaScreener/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285069699","text":"import os\n# version\nversion = \"0.4\"\n\n# setting here\ninput_format = [\"wav\",\"flac\"]\noutput_format = \"mp3\"\n\nos.system('mkdir result')\noutput_dir = os.getcwd()+\"/result/\"\n\nfor file in os.listdir():\n for ext in input_format:\n if file.endswith(ext):\n output_filename=file.replace(ext,output_format)\n args = \"ffmpeg -i \\\"\"+file+\"\\\" -b:a 320k \\\"\"+output_dir+output_filename+\"\\\"\"\n print (args)\n os.system(args)","sub_path":"cmusic.py","file_name":"cmusic.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497687341","text":"from migen.fhdl.std import *\nfrom migen.fhdl.module import FinalizeError\nfrom migen.genlib.misc import optree\nfrom migen.genlib import roundrobin\nfrom migen.bus.transactions import *\nfrom migen.sim.generic import Proxy\n\n(SLOT_EMPTY, SLOT_PENDING, SLOT_PROCESSING) = range(3)\n\nclass Slot(Module):\n\tdef __init__(self, aw, time):\n\t\tself.time = time\n\t\tself.state = Signal(2)\n\t\tself.we = Signal()\n\t\tself.adr = Signal(aw)\n\t\tif self.time:\n\t\t\tself.mature = Signal()\n\t\t\n\t\tself.allocate = Signal()\n\t\tself.allocate_we = Signal()\n\t\tself.allocate_adr = Signal(aw)\n\t\tself.process = Signal()\n\t\tself.call = Signal()\n\t\n\t\t###\n\n\t\tself.sync += [\n\t\t\tIf(self.allocate,\n\t\t\t\tself.state.eq(SLOT_PENDING),\n\t\t\t\tself.we.eq(self.allocate_we),\n\t\t\t\tself.adr.eq(self.allocate_adr)\n\t\t\t),\n\t\t\tIf(self.process, self.state.eq(SLOT_PROCESSING)),\n\t\t\tIf(self.call, self.state.eq(SLOT_EMPTY))\n\t\t]\n\t\tif self.time:\n\t\t\tcounter = Signal(max=self.time+1)\n\t\t\tself.comb += self.mature.eq(counter == 0)\n\t\t\tself.sync += [\n\t\t\t\tIf(self.allocate,\n\t\t\t\t\tcounter.eq(self.time)\n\t\t\t\t).Elif(counter != 0,\n\t\t\t\t\tcounter.eq(counter - 1)\n\t\t\t\t)\n\t\t\t]\n\nclass Port(Module):\n\tdef __init__(self, hub, base, nslots):\n\t\tself.hub = hub\n\t\tself.base = base\n\t\tself.submodules.slots = [Slot(self.hub.aw, self.hub.time) for i in range(nslots)]\n\t\t\n\t\t# request issuance\n\t\tself.adr = Signal(self.hub.aw)\n\t\tself.we = Signal()\n\t\tself.stb = Signal()\n\t\t# tag_issue is created by finalize()\n\t\tself.ack = Signal()\n\t\t\n\t\t# request completion\n\t\tself.call = Signal()\n\t\t# tag_call is created by finalize()\n\t\tself.dat_r = Signal(self.hub.dw)\n\t\tself.dat_w = Signal(self.hub.dw)\n\t\tself.dat_wm = Signal(self.hub.dw//8)\n\n\tdef do_finalize(self):\n\t\tnslots = len(self.slots)\n\t\tif nslots > 1:\n\t\t\tself.tag_issue = Signal(max=nslots)\n\t\tself.tag_call = Signal(self.hub.tagbits)\n\n\t\t# allocate\n\t\tfor s in self.slots:\n\t\t\tself.comb += [\n\t\t\t\ts.allocate_we.eq(self.we),\n\t\t\t\ts.allocate_adr.eq(self.adr)\n\t\t\t]\n\t\tchoose_slot = None\n\t\tneeds_tags = len(self.slots) > 1\n\t\tfor n, s in reversed(list(enumerate(self.slots))):\n\t\t\tchoose_slot = If(s.state == SLOT_EMPTY,\n\t\t\t\ts.allocate.eq(self.stb),\n\t\t\t\tself.tag_issue.eq(n) if needs_tags else None\n\t\t\t).Else(choose_slot)\n\t\tself.comb += choose_slot\n\t\tself.comb += self.ack.eq(optree(\"|\", \n\t\t\t[s.state == SLOT_EMPTY for s in self.slots]))\n\n\t\t# call\n\t\tself.comb += [s.call.eq(self.get_call_expression(n))\n\t\t\tfor n, s in enumerate(self.slots)]\n\t\n\tdef get_call_expression(self, slotn=0):\n\t\tif not self.finalized:\n\t\t\traise FinalizeError\n\t\treturn self.call \\\n\t\t\t& (self.tag_call == (self.base + slotn))\n\nclass Hub(Module):\n\tdef __init__(self, aw, dw, time=0):\n\t\tself.aw = aw\n\t\tself.dw = dw\n\t\tself.time = time\n\n\t\tself.ports = []\n\t\tself._next_base = 0\n\t\tself.tagbits = 0\n\t\t\n\t\tself.call = Signal()\n\t\t# tag_call is created by do_finalize()\n\t\tself.dat_r = Signal(self.dw)\n\t\tself.dat_w = Signal(self.dw)\n\t\tself.dat_wm = Signal(self.dw//8)\n\t\n\tdef get_port(self, nslots=1):\n\t\tif self.finalized:\n\t\t\traise FinalizeError\n\t\tnew_port = Port(self, self._next_base, nslots)\n\t\tself._next_base += nslots\n\t\tself.tagbits = bits_for(self._next_base-1)\n\t\tself.ports.append(new_port)\n\t\tself.submodules += new_port\n\t\treturn new_port\n\t\n\tdef do_finalize(self):\n\t\tself.tag_call = Signal(self.tagbits)\n\t\tfor port in self.ports:\n\t\t\tself.comb += [\n\t\t\t\tport.call.eq(self.call),\n\t\t\t\tport.tag_call.eq(self.tag_call),\n\t\t\t\tport.dat_r.eq(self.dat_r)\n\t\t\t]\n\t\tself.comb += [\n\t\t\tself.dat_w.eq(optree(\"|\", [port.dat_w for port in self.ports])),\n\t\t\tself.dat_wm.eq(optree(\"|\", [port.dat_wm for port in self.ports]))\n\t\t]\n\t\n\tdef get_slots(self):\n\t\tif not self.finalized:\n\t\t\traise FinalizeError\n\t\treturn sum([port.slots for port in self.ports], [])\n\nclass Tap(Module):\n\tdef __init__(self, hub, handler=print):\n\t\tself.hub = hub\n\t\tself.handler = handler\n\t\tself.tag_to_transaction = dict()\n\t\tself.transaction = None\n\t\n\tdef do_simulation(self, s):\n\t\thub = Proxy(s, self.hub)\n\t\t\n\t\t# Pull any data announced in the previous cycle.\n\t\tif isinstance(self.transaction, TWrite):\n\t\t\tself.transaction.data = hub.dat_w\n\t\t\tself.transaction.sel = ~hub.dat_wm\n\t\t\tself.handler(self.transaction)\n\t\t\tself.transaction = None\n\t\tif isinstance(self.transaction, TRead):\n\t\t\tself.transaction.data = hub.dat_r\n\t\t\tself.handler(self.transaction)\n\t\t\tself.transaction = None\n\t\t\n\t\t# Tag issue. Transaction objects are created here\n\t\t# and placed into the tag_to_transaction dictionary.\n\t\tfor tag, slot in enumerate(self.hub.get_slots()):\n\t\t\tif s.rd(slot.allocate):\n\t\t\t\tadr = s.rd(slot.allocate_adr)\n\t\t\t\twe = s.rd(slot.allocate_we)\n\t\t\t\tif we:\n\t\t\t\t\ttransaction = TWrite(adr)\n\t\t\t\telse:\n\t\t\t\t\ttransaction = TRead(adr)\n\t\t\t\ttransaction.latency = s.cycle_counter\n\t\t\t\tself.tag_to_transaction[tag] = transaction\n\t\t\n\t\t# Tag call.\n\t\tif hub.call:\n\t\t\ttransaction = self.tag_to_transaction[hub.tag_call]\n\t\t\ttransaction.latency = s.cycle_counter - transaction.latency + 1\n\t\t\tself.transaction = transaction\n\nclass Initiator(Module):\n\tdef __init__(self, generator, port):\n\t\tself.generator = generator\n\t\tself.port = port\n\t\tself.done = False\n\t\tself._exe = None\n\t\n\tdef _execute(self, s, generator, port):\n\t\twhile True:\n\t\t\ttransaction = next(generator)\n\t\t\ttransaction_start = s.cycle_counter\n\t\t\tif transaction is None:\n\t\t\t\tyield\n\t\t\telse:\n\t\t\t\t# tag phase\n\t\t\t\ts.wr(port.adr, transaction.address)\n\t\t\t\tif isinstance(transaction, TWrite):\n\t\t\t\t\ts.wr(port.we, 1)\n\t\t\t\telse:\n\t\t\t\t\ts.wr(port.we, 0)\n\t\t\t\ts.wr(port.stb, 1)\n\t\t\t\tyield\n\t\t\t\twhile not s.rd(port.ack):\n\t\t\t\t\tyield\n\t\t\t\tif hasattr(port, \"tag_issue\"):\n\t\t\t\t\ttag = s.rd(port.tag_issue)\n\t\t\t\telse:\n\t\t\t\t\ttag = 0\n\t\t\t\ttag += port.base\n\t\t\t\ts.wr(port.stb, 0)\n\t\t\t\t\n\t\t\t\t# data phase\n\t\t\t\twhile not (s.rd(port.call) and (s.rd(port.tag_call) == tag)):\n\t\t\t\t\tyield\n\t\t\t\tif isinstance(transaction, TWrite):\n\t\t\t\t\ts.wr(port.dat_w, transaction.data)\n\t\t\t\t\ts.wr(port.dat_wm, ~transaction.sel)\n\t\t\t\t\tyield\n\t\t\t\t\ts.wr(port.dat_w, 0)\n\t\t\t\t\ts.wr(port.dat_wm, 0)\n\t\t\t\telse:\n\t\t\t\t\tyield\n\t\t\t\t\ttransaction.data = s.rd(port.dat_r)\n\t\t\t\ttransaction.latency = s.cycle_counter - transaction_start - 1\n\t\n\tdef do_simulation(self, s):\n\t\tif not self.done:\n\t\t\tif self._exe is None:\n\t\t\t\tself._exe = self._execute(s, self.generator, self.port)\n\t\t\ttry:\n\t\t\t\tnext(self._exe)\n\t\t\texcept StopIteration:\n\t\t\t\tself.done = True\n\nclass TargetModel:\n\tdef __init__(self):\n\t\tself.last_slot = 0\n\t\n\tdef read(self, address):\n\t\treturn 0\n\t\n\tdef write(self, address, data, mask):\n\t\tpass\n\t\n\t# Round-robin scheduling.\n\tdef select_slot(self, pending_slots):\n\t\tif not pending_slots:\n\t\t\treturn -1\n\t\tself.last_slot += 1\n\t\tif self.last_slot > max(pending_slots):\n\t\t\tself.last_slot = 0\n\t\twhile self.last_slot not in pending_slots:\n\t\t\tself.last_slot += 1\n\t\treturn self.last_slot\n\nclass Target(Module):\n\tdef __init__(self, model, hub):\n\t\tself.model = model\n\t\tself.hub = hub\n\t\tself._calling_tag = -1\n\t\tself._write_request_d = -1\n\t\tself._write_request = -1\n\t\tself._read_request = -1\n\t\n\tdef do_simulation(self, s):\n\t\tslots = self.hub.get_slots()\n\t\t\n\t\t# Data I/O\n\t\tif self._write_request >= 0:\n\t\t\tself.model.write(self._write_request,\n\t\t\t\ts.rd(self.hub.dat_w), s.rd(self.hub.dat_wm))\n\t\tif self._read_request >= 0:\n\t\t\ts.wr(self.hub.dat_r, self.model.read(self._read_request))\n\t\t\t\n\t\t# Request pipeline\n\t\tself._read_request = -1\n\t\tself._write_request = self._write_request_d\n\t\tself._write_request_d = -1\n\t\t\n\t\t# Examine pending slots and possibly choose one.\n\t\t# Note that we do not use the SLOT_PROCESSING state here.\n\t\t# Selected slots are immediately called.\n\t\tpending_slots = set()\n\t\tfor tag, slot in enumerate(slots):\n\t\t\tif tag != self._calling_tag and s.rd(slot.state) == SLOT_PENDING:\n\t\t\t\tpending_slots.add(tag)\n\t\tslot_to_call = self.model.select_slot(pending_slots)\n\t\t\n\t\t# Call slot.\n\t\tif slot_to_call >= 0:\n\t\t\tslot = slots[slot_to_call]\n\t\t\ts.wr(self.hub.call, 1)\n\t\t\ts.wr(self.hub.tag_call, slot_to_call)\n\t\t\tself._calling_tag = slot_to_call\n\t\t\tif s.rd(slot.we):\n\t\t\t\tself._write_request_d = s.rd(slot.adr)\n\t\t\telse:\n\t\t\t\tself._read_request = s.rd(slot.adr)\n\t\telse:\n\t\t\ts.wr(self.hub.call, 0)\n\t\t\tself._calling_tag = -1\n","sub_path":"migen/bus/asmibus.py","file_name":"asmibus.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322193887","text":"import logging\nfrom logging import handlers\nfrom .filepublic import filepath\n\n\nclass MyLogger:\n def get_level(self, level_str):\n level = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n level_str = level_str.lower()\n return level.get(level_str)\n\n def __init__(self, file_name, level='info', backCount=5, when='D'):\n logger = logging.getLogger() # 先实例化一个logger对象,先创建一个办公室\n logger.setLevel(self.get_level(level)) # 设置日志的级别的人\n cl = logging.StreamHandler() # 负责往控制台输出的人\n bl = handlers.TimedRotatingFileHandler(filename=file_name, when=when, interval=1, backupCount=backCount,\n encoding='utf-8')\n fmt = logging.Formatter('%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n cl.setFormatter(fmt) # 设置控制台输出的日志格式\n bl.setFormatter(fmt) # 设置文件里面写入的日志格式\n logger.addHandler(cl)\n logger.addHandler(bl)\n self.logger = logger\n\n\npath = filepath('logs', 'atp.logs') # 拼好日志的绝对路径\natp_log = MyLogger(path, 'info').logger # 直接在这里实例化,用的时候就不用再实例化了\n","sub_path":"common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345554435","text":"#!/usr/bin/env python\n\"\"\" \nUseful basic math tools for localization from angles.\n\"\"\"\n\nfrom math import atan2, pi\nimport numpy as np\n\n# function used by frum_X_to_Ypi\ndef change_angles(method, theta, tol=1e-10):\n try:\n theta_new = np.zeros(theta.shape)\n for i, thet in enumerate(theta):\n try:\n # theta is vector\n theta_new[i] = method(thet, tol)\n except:\n # theta is matrix\n for j, th in enumerate(thet):\n try:\n theta_new[i, j] = method(th, tol)\n except:\n # theta is tensor\n for k, t in enumerate(th):\n theta_new[i, j, k] = method(t, tol)\n return theta_new\n except:\n return method(theta, tol)\n\n\ndef from_0_to_pi(theta):\n def from_0_to_pi_scalar(theta, tol):\n theta = from_0_to_2pi(theta)\n theta = min(theta, 2 * pi - theta)\n assert theta >= 0 and theta <= pi, \"{} not in [0, pi]\".format(theta)\n return theta\n return change_angles(from_0_to_pi_scalar, theta)\n\n\ndef from_0_to_2pi(theta, tol=1e-10):\n def from_0_to_2pi_scalar(theta, tol):\n theta = theta % (2 * pi)\n # eliminate numerical issues of % function\n if abs(theta - 2 * pi) < tol:\n theta = theta - 2 * pi\n if theta < 0:\n theta = 2 * pi + theta\n assert theta >= 0 and theta <= 2 * \\\n pi, \"{} not in [0, 2pi]\".format(theta)\n return theta\n return change_angles(from_0_to_2pi_scalar, theta, tol)\n\n\ndef get_absolute_angle(Pi, Pj):\n \"\"\" Get asolute angle of edge from Pi to Pj, as seen at Pi, clockwise direction. \"\"\"\n if (Pi == Pj).all():\n return 0\n y = Pj[1] - Pi[1]\n x = Pj[0] - Pi[0]\n theta_ij = atan2(y, x)\n return from_0_to_2pi(theta_ij)\n\n\ndef get_inner_angle(Pk, Pij):\n \"\"\" Get inner angle from point Pk to pair of points Pij, between 0 and pi. \"\"\"\n theta_ki = get_absolute_angle(Pk, Pij[0])\n theta_kj = get_absolute_angle(Pk, Pij[1])\n theta = abs(theta_ki - theta_kj)\n return from_0_to_pi(theta)\n\n\ndef rmse_2pi(x, xhat):\n \"\"\" Calcualte rmse between vector or matrix x and xhat, ignoring modulo of 2pi.\"\"\"\n real_diff = from_0_to_pi(x - xhat)\n np.square(real_diff, out=real_diff)\n sum_ = np.sum(real_diff)\n return sqrt(sum_ / len(x))\n\n\ndef get_point(theta_ik, theta_jk, Pi, Pj):\n \"\"\" Calculate coordinates of point Pk given two points Pi, Pj and inner angles. \n\n :param theta_ik: Inner angle at Pi to Pk.\n :param theta_jk: Inner angle at Pj to Pk.\n :param Pi: Coordinates of point Pi.\n :param Pj: Coordinates of point Pj.\n\n :return: Coordinate of point Pk.\n\n \"\"\"\n A = np.array([[sin(theta_ik), -cos(theta_ik)],\n [sin(theta_jk), -cos(theta_jk)]])\n B = np.array([[sin(theta_ik), -cos(theta_ik), 0, 0],\n [0, 0, sin(theta_jk), -cos(theta_jk)]])\n p = np.r_[Pi, Pj]\n Pk = np.linalg.solve(A, np.dot(B, p))\n return Pk\n\n\ndef get_theta_tensor(theta, corners, N):\n \"\"\" Convert vectorized thetas to tensor form. \"\"\"\n theta_tensor = np.zeros([N, N, N])\n for k, idx in enumerate(corners):\n theta_tensor[int(idx[0]), int(idx[1]), int(idx[2])] = theta[k]\n theta_tensor[int(idx[0]), int(idx[2]), int(idx[1])] = theta[k]\n return theta_tensor\n\n\ndef get_index(corners, Pk, Pij):\n \"\"\" Get index mask corresponding to angle at corner Pk with Pi, Pj. \"\"\"\n angle1 = [Pk, Pij[0], Pij[1]]\n angle2 = [Pk, Pij[1], Pij[0]]\n index = np.bitwise_or(corners == angle1, corners == angle2)\n return index.all(axis=1)\n","sub_path":"pylocus/basics_angles.py","file_name":"basics_angles.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389449924","text":"import pandas as pd\nimport numpy as np\nfrom transformers import BertTokenizer, BertForQuestionAnswering\nimport torch\nfrom nltk import tokenize\n\nclass DocReader():\n \"\"\"\n Uses Hungging Face BertForQuestionAnswering\n\n Parameters\n ----------\n model : path to folder containing pytorch.bin, bert_config.json and vocab.txt\n or pretrained model\n lowercase : boolean\n Convert all characters to lowercase before tokenizing. (default is True)\n \n tokenizer : default is BertTokenizer\n \n \"\"\"\n\n def __init__(self, model:str=None, lowercase=True, tokenizer=BertTokenizer):\n\n self.lowercase = lowercase\n self.tokenizer = tokenizer.from_pretrained(model)\n self.model = BertForQuestionAnswering.from_pretrained(model)\n \n\n def predict(self, \n df: pd.DataFrame = None,\n query: str = None,\n n_best: int =3):\n \n doc_text = df['paragraphs']\n self.n_best = n_best\n \n if(self.lowercase):\n query = query.lower()\n \n # num docs_index must be equal to top_n\n doc_index = list(doc_text.index)\n \n answers = []\n for df_index in doc_index:\n \n if(self.lowercase):\n doc_lines = doc_text[df_index].lower()\n else:\n doc_lines = doc_text[df_index]\n \n doc_lines = tokenize.sent_tokenize(doc_lines)\n \n doc_answers = []\n for lines in doc_lines:\n input_ids = self.tokenizer.encode(query, lines)\n #print(lines, input_ids)\n token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]\n start_scores, end_scores = self.model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))\n\n all_tokens = self.tokenizer.convert_ids_to_tokens(input_ids)\n answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])\n \n #it is better to decode the tokens at later stage\n #answer = all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]\n #answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer))\n entry = {}\n #print(answer)\n start_scores = start_scores.detach().numpy()\n end_scores = end_scores.detach().numpy()\n entry['answer'] = answer\n entry['score'] = (max(start_scores[0]), max(end_scores[0]))\n entry['index'] = df_index\n entry['sentence'] = lines\n doc_answers.append(entry)\n \n best_doc_ans = [entry['score'][0]+entry['score'][1] for entry in doc_answers]\n ans_index = np.argsort(best_doc_ans)\n \n # take n_best answers per document based on max(start_scores+end_scores)\n #it is possible to improve by taking different metric\n for ans in range(1,self.n_best+1):\n answers.append(doc_answers[ans_index[-ans]])\n \n \n best_ans = [entry['score'][0]+entry['score'][1] for entry in answers]\n ans_index = np.argsort(best_ans) \n \n n_best_answers = []\n for ans in range(1,self.n_best+1):\n n_best_answers.append(answers[ans_index[-ans]])\n \n return(n_best_answers)\n \n \n \n def best_answer(self, answers):\n ans_dict = {}\n final_answer = {}\n ANS_THRESH = 2.0\n max_score = answers[0]['score'][0]+answers[0]['score'][1]\n \n for ans in answers:\n score = ans['score'][0]+ans['score'][1]\n if score > max_score - ANS_THRESH:\n start_end = ans['answer'].split()\n if(len(start_end)>0):\n ans_key = (start_end[0], start_end[-1])\n ans_dict[ans_key] = ans_dict.get(ans_key,0)+1\n \n inverse = [value for key, value in ans_dict.items()]\n inverse.sort()\n ans_list = [key for key, value in ans_dict.items() if(value == inverse[-1])]\n \n max_score = float('-inf')\n for ans in answers:\n start_end = ans['answer'].split()\n ans_key = (start_end[0], start_end[-1])\n for item in ans_list:\n if(ans_key == item):\n score = ans['score'][0]+ans['score'][1]\n if(score > max_score):\n ans_ids = self.tokenizer.convert_tokens_to_ids(start_end)\n final_answer['answer'] = self.tokenizer.decode(ans_ids)\n final_answer['index'] = ans['index']\n final_answer['sentence'] = ans['sentence']\n \n \n return(final_answer)\n","sub_path":"helper/DocReader.py","file_name":"DocReader.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465864831","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Monday July 09 11:36:59 2018\n\n@author: Chong Xue\n\n Software License Agreement (BSD License)\n\n Copyright (c) 2018, CAIP Co., Ltd.\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n * Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n \n\"\"\"\n# author: Chong Xue\n\nfrom __future__ import division\nimport rospy\nimport math\nimport tf\nimport moveit_commander\nfrom std_msgs.msg import Bool, String, Int64\nfrom std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse\nfrom elfin_robot_msgs.srv import SetString, SetStringRequest, SetStringResponse, SetFloat64, SetFloat64s\nfrom elfin_robot_msgs.srv import SetInt16, SetInt16Request, SetFloat64Request, SetFloat64sRequest\nimport wx\nfrom sensor_msgs.msg import JointState\nfrom actionlib import SimpleActionClient\nfrom control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal\nfrom control_msgs.msg import JointTrajectoryControllerState, JointsFloat64\nfrom geometry_msgs.msg import PoseStamped, PoseArray, Pose\nfrom trajectory_msgs.msg import trajectory_msgs\nimport threading\nimport dynamic_reconfigure.client\nfrom dynamic_reconfigure.srv import Reconfigure, ReconfigureRequest\nfrom dynamic_reconfigure.msg import DoubleParameter, Config\nimport time\nimport sys\n\ndef testcaip(num, num2, num3, num4, num5, num6):\n #print(num)\n caip = CaipElfin()\n #print(caip.JointState)\n caip.listen()\n print('start')\n for i in range(0,10):\n #print('come in function \"call_ref_coordinate()\"')\n time.sleep(0.4)\n #caip.want_end_coordinate()\n #print(caip.EndCoordinate)\n caip.want_stop()\n \n name=['elfin_joint1', 'elfin_joint2', 'elfin_joint3',\n 'elfin_joint4', 'elfin_joint5', 'elfin_joint6']\n pos = [i*5 / 180 * math.pi] * 6\n #print(num)\n if num == 0:\n caip.do_joints_goal(name,pos)\n elif num == 1:\n caip.do_cart_goal(0.10 + i * 0.05, 0.10 + i * 0.06, 0.7 - 0.06 * i, 0,0,0,1 )\n elif num == 2:\n caip.set_cart_path()\n caip.do_cart_path()\n elif num == 3:\n caip.want_joint(num2)\n caip.want_joint(num3)\n elif num == 4:\n caip.want_home()\n elif num == 5:\n caip.want_cart(num2)\n elif num == 6:\n caip.do_joint_cmd(num2)\n print(caip.get_joints_state)\n\n caip.want_stop()\n print('end')\n pass\n\nclass Tcs_robot():\n\n def __init__(self):\n # The current status of the joints.\n self.JointState = JointTrajectoryControllerState()\n\n # The servo power's status of the robot.\n self.ServoPowerState = Bool()\n\n # The fault power's status of the robot.\n self.PowerFaultState = Bool()\n\n # The reference coordinate in the calculations of the elfin_basic_api node\n self.RefCoordinate = String()\n\n # The end coordinate in the calculations of the elfin_basic_api node\n self.EndCoordinate = String()\n\n #The value of the dynamic parameters of elfin_basic_api, e.g. velocity scaling.\n self.DynamicArgs = Config()\n\n # get the reference coordinate name of elfin_basic_api from the response of this service.\n self.call_ref_coordinate = rospy.ServiceProxy('elfin_basic_api/get_reference_link', SetBool)\n self.call_ref_coordinate_req = SetBoolRequest()\n\n # get the current position of elfin_ros_control from the response of this service.\n self.call_current_position = rospy.ServiceProxy('elfin_ros_control/elfin/get_current_position', SetBool)\n self.call_current_position_req = SetBoolRequest()\n\n # call service recognize_position of elfin_ros_control.\n self.call_recognize_position = rospy.ServiceProxy('elfin_ros_control/elfin/recognize_position', SetBool)\n self.call_recognize_position_req = SetBoolRequest()\n self.call_recognize_position_req.data = True\n\n # get the end coordinate name of elfin_basic_api from the response of this service.\n self.call_end_coordinate = rospy.ServiceProxy('elfin_basic_api/get_end_link', SetBool)\n self.call_end_coordinate_req = SetBoolRequest()\n\n # for publishing joint goals to elfin_basic_api\n self.JointsPub = rospy.Publisher('elfin_basic_api/joint_goal', JointState, queue_size=1)\n self.JointsGoal = JointState()\n\n # for publishing cart goals to elfin_basic_api\n self.CartGoalPub = rospy.Publisher('elfin_basic_api/cart_goal', PoseStamped, queue_size=1)\n self.CartPos = PoseStamped()\n\n # for pub cart path\n self.CartPathPub = rospy.Publisher('elfin_basic_api/cart_path_goal', PoseArray, queue_size=1)\n self.CartPath = PoseArray()\n self.CartPath.header.stamp=rospy.get_rostime()\n self.CartPath.header.frame_id='elfin_base_link'\n\n # for pub one specific joint action to elfin_teleop_joint_cmd_no_limit\n self.JointCmdPub = rospy.Publisher('elfin_teleop_joint_cmd_no_limit', Int64 , queue_size=1)\n self.JointCmd = Int64()\n\n # for pub multi specific joint action to elfin_teleop_joint_cmd_no_limit\n self.JointsCmdPub = rospy.Publisher('changyuan_joints_cmd', JointsFloat64, queue_size=1)\n self.JointsCmd = JointsFloat64()\n\n # action client, send goal to move_group\n self.action_client = SimpleActionClient('elfin_module_controller/follow_joint_trajectory',\n FollowJointTrajectoryAction)\n self.action_goal = FollowJointTrajectoryGoal()\n #self.goal_list = JointTrajectoryPoint()\n self.goal_list = []\n\n self.joints_ = []\n self.ps_ = []\n\n self.listener = tf.TransformListener()\n self.robot=moveit_commander.RobotCommander()\n self.scene=moveit_commander.PlanningSceneInterface()\n self.group=moveit_commander.MoveGroupCommander('elfin_arm')\n\n self.ref_link_name=self.group.get_planning_frame()\n self.end_link_name=self.group.get_end_effector_link()\n\n self.ref_link_lock=threading.Lock()\n self.end_link_lock=threading.Lock()\n\n \n self.call_teleop_stop=rospy.ServiceProxy('elfin_basic_api/stop_teleop', SetBool)\n self.call_teleop_stop_req=SetBoolRequest()\n\n\n self.call_teleop_joint=rospy.ServiceProxy('elfin_basic_api/joint_teleop',SetInt16)\n self.call_teleop_joint_req=SetInt16Request()\n\n\n self.call_teleop_joints=rospy.ServiceProxy('elfin_basic_api/joints_teleops',SetFloat64s)\n self.call_teleop_joints_req=SetFloat64sRequest()\n\n\n self.call_teleop_cart=rospy.ServiceProxy('elfin_basic_api/cart_teleop', SetInt16)\n self.call_teleop_cart_req=SetInt16Request()\n\n \n self.call_move_homing=rospy.ServiceProxy('elfin_basic_api/home_teleop', SetBool)\n self.call_move_homing_req=SetBoolRequest()\n\n self.call_reset=rospy.ServiceProxy(self.elfin_driver_ns+'clear_fault', SetBool)\n self.call_reset_req=SetBoolRequest()\n self.call_reset_req.data=True\n\n self.call_power_on = rospy.ServiceProxy(self.elfin_driver_ns+'enable_robot', SetBool)\n self.call_power_on_req=SetBoolRequest()\n self.call_power_on_req.data=True\n\n self.call_power_off = rospy.ServiceProxy(self.elfin_driver_ns+'disable_robot', SetBool)\n self.call_power_off_req = SetBoolRequest()\n self.call_power_off_req.data=True\n\n self.call_velocity_setting = rospy.ServiceProxy('elfin_basic_api/set_velocity_scale', SetFloat64)\n self.call_velocity_req = SetFloat64Request()\n self._velocity_scale = 0.78\n self.set_velocity_scale(self._velocity_scale)\n\n pass\n\n # call for service of one joint operation\n def want_joint(self,data):\n self.call_teleop_joint_req.data = data\n resp=self.call_teleop_joint.call(self.call_teleop_joint_req)\n return resp.success, resp.message\n\n # modify for new api whitch added jointsTeleop_cb(service of multi-joint teleopration)\n # call for service of multi joints operation\n def want_joints(self,data):\n self.call_teleop_joints_req.data = data\n resp=self.call_teleop_joints.call(self.call_teleop_joints_req)\n return resp.success, resp.message\n\n # call for service of clear_fault\n def want_clear_fault(self):\n resp = self.call_reset.call(self.call_reset_req)\n return resp.success, resp.message\n\n # call for service of power off\n def want_disable_robot(self):\n resp = self.call_power_off.call(self.call_power_off_req)\n return resp.success, resp.message\n\n # call for service of power on \n def want_enable_robot(self):\n resp = self.call_power_on.call(self.call_velocity_req)\n return resp.success, resp.message\n\n # call for service of power on\n def want_power_on(self):\n resp = self.call_power_on.call(self.call_power_on_req)\n return resp.success, resp.message\n\n # call for service of home\n def want_home(self):\n self.call_move_homing_req.data = True\n resp=self.call_move_homing.call(self.call_move_homing_req)\n return resp.success, resp.message\n\n # call for service of home\n def want_cart(self, data):\n self.call_teleop_cart_req.data =data\n resp=self.call_teleop_cart.call(self.call_teleop_cart_req)\n return resp.success, resp.message\n\n # call for service of home\n def want_stop(self):\n self.call_teleop_stop_req.data=True\n resp=self.call_teleop_stop.call(self.call_teleop_stop_req)\n return resp.success, resp.message\n\n # call for service of setting velocity\n def set_velocity_scale(self, scale):\n self.call_velocity_req.data = scale\n resp = self.call_velocity_setting.call(self.call_velocity_req)\n if resp.success is True:\n self._velocity_scale = scale\n pass\n return resp.success, resp.message\n\n # get current velocity_scale\n def get_velocity_scale(self):\n return self._velocity_scale\n pass\n\n # pub one joint cmd once one time\n def do_joint_cmd(self, data):\n cmd = Int64()\n cmd.data = data\n self.JointCmdPub.publish(cmd)\n pass\n\n # modify for new api whitch added joints_cmds(subcribe of multi-joint teleopration)\n def do_joints_cmd(self, data):\n cmd = JointsFloat64()\n cmd.data = data\n self.JointsCmdPub.publish(cmd)\n pass\n\n # stop elfin by actionlib\n def do_action_stop():\n self.action_client.wait_for_server()\n self.action_goal.trajectory.header.stamp.secs=0\n self.action_goal.trajectory.header.stamp.nsecs=0\n self.action_goal.trajectory.points=[]\n self.action_client.send_goal(self.action_goal)\n\n\n # send path arg\n def set_cart_path(self):\n ps=Pose()\n ps.position.x=0.264\n ps.position.y=0.125\n ps.position.z=1.143\n ps.orientation.x=0\n ps.orientation.y=0\n ps.orientation.z=0\n ps.orientation.w=1\n\n ps1=Pose()\n ps1.position.x=0.324\n ps1.position.y=0.245\n ps1.position.z=1.143\n ps1.orientation.x=0\n ps1.orientation.y=0\n ps1.orientation.z=0\n ps1.orientation.w=1\n\n ps2=Pose()\n ps2.position.x=0.504\n ps2.position.y=0.330\n ps2.position.z=1.143\n ps2.orientation.x=0\n ps2.orientation.y=0\n ps2.orientation.z=0\n ps2.orientation.w=1\n\n ps3=Pose()\n ps3.position.x=0.505\n ps3.position.y=0.225\n ps3.position.z=1.143\n ps3.orientation.x=0\n ps3.orientation.y=0\n ps3.orientation.z=0\n ps3.orientation.w=1\n\n self.CartPath.poses.append(ps)\n self.CartPath.poses.append(ps1)\n self.CartPath.poses.append(ps2)\n #self.CartPath.poses.append(ps3)\n #self.CartPath.poses.append(ps1)\n pass\n\n # pub cart path to api\n def do_cart_path(self,path=None):\n if path is not None:\n self.CartPathPub.publish(path)\n else:\n self.CartPathPub.publish(self.CartPath)\n\n def set_cart_pos(self,x,y,z,ox,oy,oz,ow):\n self.CartPos.header.stamp=rospy.get_rostime()\n self.CartPos.header.frame_id='elfin_base_link'\n self.CartPos.pose.position.x=x\n self.CartPos.pose.position.y=y\n self.CartPos.pose.position.z=z\n self.CartPos.pose.orientation.x=ox\n self.CartPos.pose.orientation.y=oy\n self.CartPos.pose.orientation.z=oz\n self.CartPos.pose.orientation.w=ow\n pass\n\n def set_action_goal(self, name = None, goal_list = None):\n if name is not None and goal_list is not None:\n self.action_goal.trajectory.joint_names = name\n self.action_goal.trajectory.points = goal_list\n else:\n point_goal=JointTrajectoryPoint()\n point_goal.positions=[0.4, -0.5]\n point_goal.velocities=[0, 0]\n point_goal.accelerations=[0, 0]\n point_goal.time_from_start=rospy.Time(secs=2, nsecs=0)\n self.action_goal.trajectory.points.append(point_goal)\n\n self.action_goal.trajectory.header.stamp.secs=0\n self.action_goal.trajectory.header.stamp.nsecs=0\n pass\n\n # do action goal through actionlib\n def do_action_goal(self, action_goal = None):\n self.action_client.wait_for_server()\n if action_goal is None:\n self.action_client.send_goal(self.action_goal)\n self.action_goal.trajectory.points=[]\n else:\n self.action_client.send_goal(self.action_goal)\n\n pass\n\n # pub cart goal\n def do_cart_goal(self,x=None, y=None, z=None, ox=None, oy=None, oz=None, ow=None):\n if x is None or y is None or z is None or ox is None or oy is None or oz is None or ow is None:\n pass\n else:\n self.set_cart_pos(x,y,z,ox,oy,oz,ow)\n self.CartGoalPub.publish(self.CartPos)\n pass\n\n def set_joints_goal(self, name=None, pos=None):\n if name is None:\n name=['elfin_joint1', 'elfin_joint2', 'elfin_joint3',\n 'elfin_joint4', 'elfin_joint5', 'elfin_joint6']\n if pos is not None:\n name=['elfin_joint1', 'elfin_joint2', 'elfin_joint3',\n 'elfin_joint4', 'elfin_joint5', 'elfin_joint6']\n name = name[:len(pos)]\n else:\n pos = [0.4, 0.4, 0.4, 0.4, 0.4, 0.4]\n self.JointsGoal.name = name\n self.JointsGoal.position = pos\n self.JointsGoal.header.stamp=rospy.get_rostime()\n\n # pub joint goal\n def do_joints_goal(self, name=None, pos=None):\n if name is not None and pos is not None:\n self.set_joints_goal(name, pos)\n elif pos is not None:\n name=['elfin_joint1', 'elfin_joint2', 'elfin_joint3',\n 'elfin_joint4', 'elfin_joint5', 'elfin_joint6']\n name = name[:len(pos)]\n self.set_joints_goal(name, pos)\n self.JointsPub.publish(self.JointsGoal)\n\n # call service of end_link\n def want_end_coordinate(self):\n self.call_end_coordinate_req.data = True\n resp = self.call_end_coordinate.call(self.call_end_coordinate_req)\n if resp.success:\n self._end_coordinate(resp)\n #print(resp)\n return resp.success, self.EndCoordinate\n\n # call service of ref_link\n def want_ref_coordinate(self):\n self.call_ref_coordinate_req.data = True\n resp = self.call_ref_coordinate.call(self.call_ref_coordinate_req)\n if resp.success:\n self._ref_coordinate(resp)\n return resp.success, self.RefCoordinate\n #print(resp)\n\n # call service of current positions\n def want_current_pos(self):\n self.call_current_position_req.data = True\n resp = self.call_current_position.call(self.call_current_position_req)\n return resp.success, resp.message\n\n # call service of recognize positions\n def want_recognize_pos(self):\n self.call_recognize_position_req.data = True\n resp = self.call_recognize_position.call(self.call_recognize_position_req)\n return resp.success, resp.message\n\n # callback of subscribing joints_state\n def _joints_state(self, data):\n self.JointState = data\n #print(self.JointState)\n\n # get joints_state\n def get_joints_state(self):\n return self.JointState\n\n # callback of subcribe powr_enable status\n def _servo_power_state(self, data):\n self.ServoPowerState = data.data\n #print(data.data)\n\n def get_servo_power_state(self):\n return self.ServoPowerState\n\n def _power_fault_state(self,data):\n self.PowerFaultState = data.data\n #print(self.PowerFaultState)\n\n def get_power_fault_state(self):\n return self.PowerFaultState\n\n def _ref_coordinate(self,data):\n if hasattr(data, 'message'):\n self.RefCoordinate = data.message\n else:\n self.RefCoordinate = data.data\n #print(self.RefCoordinate)\n\n def get_ref_coordinate(self):\n return self.RefCoordinate\n\n def _end_coordinate(self,data):\n if hasattr(data, 'message'):\n self.EndCoordinate = data.message\n else:\n self.EndCoordinate = data.data\n #print(self.EndCoordinate)\n\n def get_end_coordinate(self):\n return self.EndCoordinate\n\n def _dynamic_args(self,data):\n self.DynamicArgs = data\n #print(self.DynamicArgs)\n \n def get_dynamic_args(self,data):\n return self.DynamicArgs\n\n # get joints angular\n def get_joints(self):\n return self.joints_\n\n # get current position\n def get_ps_(self):\n return self.ps_\n\n # monitor status\n def monitor_status(self, evt):\n self.key=[]\n self.joints_ = []\n self.ps_ = []\n \n current_joint_values=self.group.get_current_joint_values()\n for i in xrange(len(current_joint_values)):\n self.key.append(str(round(current_joint_values[i]*180/math.pi, 2)))\n self.joints_.append(current_joint_values[i]*180)\n\n if self.ref_link_lock.acquire():\n ref_link=self.ref_link_name\n self.ref_link_lock.release()\n\n if self.end_link_lock.acquire():\n end_link=self.end_link_name\n self.end_link_lock.release()\n\n while not rospy.is_shutdown():\n try:\n self.listener.waitForTransform(ref_link, end_link, rospy.Time(0), rospy.Duration(100))\n (xyz,qua) = self.listener.lookupTransform(ref_link, end_link, rospy.Time(0))\n break\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n\n rpy=tf.transformations.euler_from_quaternion(qua)\n\n self.key.append(str(round(xyz[0]*1000, 2)))\n self.ps_.append(xyz[0]*1000)\n self.key.append(str(round(xyz[1]*1000, 2)))\n self.ps_.append(xyz[1]*1000)\n self.key.append(str(round(xyz[2]*1000, 2)))\n self.ps_.append(xyz[2]*1000)\n\n self.key.append(str(round(rpy[0]*180/math.pi, 2)))\n self.ps_.append(rpy[0]*1000)\n self.key.append(str(round(rpy[1]*180/math.pi, 2)))\n self.ps_.append(rpy[1]*1000)\n self.key.append(str(round(rpy[2]*180/math.pi, 2)))\n self.ps_.append(rpy[2]*1000)\n \n def listen(self):\n rospy.Subscriber('elfin_arm_controller/state', JointTrajectoryControllerState, self._joints_state)\n rospy.Subscriber('elfin_ros_control/elfin/enable_state', Bool, self._servo_power_state)\n rospy.Subscriber('elfin_ros_control/elfin/fault_state', Bool, self._power_fault_state)\n rospy.Subscriber('elfin_basic_api/reference_link_name', String, self._ref_coordinate)\n rospy.Subscriber('elfin_basic_api/end_link_name', String, self._end_coordinate)\n rospy.Subscriber('elfin_basic_api/parameter_updates', Config, self._dynamic_args)\n rospy.Timer(rospy.Duration(nsecs=50000000), self.monitor_status)\n pass\n\n \nif __name__=='__main__': \n rospy.init_node('elfin_gui')\n\n #app=wx.App(False) \n #myframe=MyFrame(parent=None,id=-1) \n #myframe.Show(True)\n\n #myframe.listen()\n\n #app.MainLoop()\n num = 0\n num2 = 0\n num3 = 0\n num4 = 0\n num5 = 0\n num6 = 0\n if len(sys.argv) >= 2:\n num = int(sys.argv[1])\n if len(sys.argv) >= 3:\n num2 = int(sys.argv[2])\n if len(sys.argv) >= 4:\n num2 = int(sys.argv[3])\n if len(sys.argv) >= 5:\n num2 = int(sys.argv[4])\n if len(sys.argv) >= 6:\n num2 = int(sys.argv[5])\n if len(sys.argv) >= 7:\n num2 = int(sys.argv[6])\n testcaip(num, num2, num3, num4, num5, num6)\n rospy.spin()\n","sub_path":"src/elfin_robot-kinetic-devel/caip_app/scripts/tcs_robot.py","file_name":"tcs_robot.py","file_ext":"py","file_size_in_byte":22096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263755475","text":"x = int(input())\n\nn = 100\nans = 0\nfor i in range(1, 10**4):\n n *= 1.01\n n *= 10\n n = n // 10\n if n >= x:\n ans = i\n break\nprint(ans)","sub_path":"atcoder/abc/165/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589005638","text":"#!/usr/bin/env python \n\nimport os\nimport pandas as pd\n\nifile = \"../data/fcc_477_jun_2020.csv.gz\"\nofile = \"../data/fcc_477_jun_2020_redux.csv.gz\"\n\ncolumns = [\"BlockCode\", \"Provider_Id\", \"DBAName\", \"TechCode\", \"Consumer\", \"MaxAdDown\", \"MaxAdUp\"]\n\ncol_dict = {\"BlockCode\" : \"geoid\", \"Provider_Id\" : \"provider\", \n \"DBAName\" : \"dba\", \"TechCode\" : \"tech\", \"Consumer\" : \"consumer\", \n \"MaxAdDown\" : \"ad_dn\", \"MaxAdUp\" : \"ad_up\"}\n\nchunkerator = pd.read_csv(ifile, chunksize = 1000000, usecols = columns)\n\nif os.path.exists(ofile): os.remove(ofile)\n\nproviders = []\n\nfor ci, chunk in enumerate(chunkerator):\n\n chunk.rename(columns = col_dict, inplace = True)\n chunk.query(\"consumer == 1\", inplace = True)\n chunk.query(\"geoid < 570000000000000\")\n chunk.query(\"tech != 60\", inplace = True) # satellite\n chunk.drop(\"consumer\", axis = 1, inplace = True)\n chunk[\"tech\"] = chunk.tech // 10\n chunk[\"dba\"] = chunk.dba.str.replace(\",\", \"\")\n\n print(ci, end = \" \", flush = True)\n chunk.to_csv(ofile, mode = \"a\", compression = \"gzip\",\n index = False, header = (ci == 0))\n\n","sub_path":"fcc/fcc_redux.py","file_name":"fcc_redux.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419140014","text":"# Largest palindrome product\n# Problem 4\n#\n# A palindromic number reads the same both ways.\n# The largest palindrome made from the product of two 2-digit numbers is\n# 9009 = 91 × 99.\n#\n# Find the largest palindrome made from the product of two 3-digit numbers.\n# 100*100 = 10 000, 999 * 999 = 998 001\n\n# You are the 315953rd person to have solved this problem.\n# 993 * 913 = 906609\n\ndef is_palindrome(word):\n '''\n :param word - must be string, because of test\n '''\n # rev_iterator = reversed(word)\n # print(str(rev_iterator))\n if word[::-1] == word:\n return True\n else:\n return False\n\ndef largest_palindrome_product(digits=2):\n start = int(10 ** digits / 10)\n end = start * 10 - 1\n biggest_palindrome = 0\n # chceme jit od nejvyssiho\n for x in range(end, start-1,-1):\n for y in range(end, start-1,-1):\n product = x * y\n # print(product)\n if is_palindrome(str(product)):\n if product > biggest_palindrome:\n biggest_palindrome = product\n print(x, \"*\", y, \"=\", product)\n return biggest_palindrome\n\ndef palindromes_of_product(digits=2):\n start = int(10**digits/10)\n end = start * 10 -1\n print(start, end)\n\n for i in range(start, end):\n # print(i)\n pass\n\n\ndef find_palindromes(start=10000, end=998001):\n palindromes = []\n for i in range(start, end+1):\n if is_palindrome(str(i)) == True:\n palindromes.append(i)\n return palindromes\n\n\n# palindromes_of_product()\nlargest_palindrome_product(3)\n\n# palindromes = find_palindromes()\n# for i in palindromes:\n# print(i)\n# print(len(palindromes))","sub_path":"uceni/euler/problem004.py","file_name":"problem004.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464950993","text":"from s3mesh.mesh import MeshMessage\nfrom s3mesh.monitoring.event.forward import ForwardMessageEvent\n\n\nclass S3Uploader:\n def __init__(self, s3_client, bucket_name: str):\n self._s3_client = s3_client\n self._bucket_name = bucket_name\n\n def upload(self, message: MeshMessage, forward_message_event: ForwardMessageEvent):\n s3_file_name = message.file_name.replace(\" \", \"_\")\n key = f\"{message.date_delivered.strftime('%Y/%m/%d')}/{s3_file_name}\"\n self._s3_client.upload_fileobj(message, self._bucket_name, key)\n forward_message_event.record_s3_key(key)\n","sub_path":"src/s3mesh/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218535607","text":"import sys\nimport pymongo\nimport pprint\nimport ssl\nsys.path.append('../../')\nimport nba_net.core.networks as net\nimport torch\n\n\nDB_URL = \"mongodb://localhost:27017/\"\n\n\ndef filter_data(game_stats):\n if len(game_stats) > 30:\n raise ValueError(\"This should never happen\")\n home_players = []\n away_players = []\n for p in game_stats:\n if game_stats[p][\"home\"] == 1:\n home_players.append(game_stats[p])\n else:\n away_players.append(game_stats[p])\n hp = sorted(home_players, key=lambda k: k[\"total_sec\"], reverse=True)\n ap = sorted(away_players, key=lambda k: k[\"total_sec\"], reverse=True)\n\n for pd in hp:\n del pd[\"home\"]\n del pd[\"total_sec\"]\n del pd[\"player_id\"]\n\n for pd in ap:\n del pd[\"home\"]\n del pd[\"total_sec\"]\n del pd[\"player_id\"]\n\n # We zero pad our data up to 15 data points\n if len(hp) < 15:\n to_add = 15 - len(hp)\n for i in range(0, to_add):\n hp.append({\n \"off_rating\": 0,\n \"def_rating\": 0,\n \"usg_pct\": 0,\n \"plus_minus\": 0\n })\n\n if len(ap) < 15:\n to_add = 15 - len(ap)\n for i in range(0, to_add):\n ap.append({\n \"off_rating\": 0,\n \"def_rating\": 0,\n \"usg_pct\": 0,\n \"plus_minus\": 0\n })\n\n home_data = []\n away_data = []\n\n for pd in hp:\n home_data.append(pd[\"def_rating\"])\n home_data.append(pd[\"off_rating\"])\n home_data.append(pd[\"plus_minus\"])\n home_data.append(pd[\"usg_pct\"])\n for pd in ap:\n away_data.append(pd[\"def_rating\"])\n away_data.append(pd[\"off_rating\"])\n away_data.append(pd[\"plus_minus\"])\n away_data.append(pd[\"usg_pct\"])\n\n return home_data, away_data\n\n\ndef get_pred_acc(game_ids, model):\n \"\"\"Gets the model's accuracy on the set of games represented by game_ids\"\"\"\n client = pymongo.MongoClient(DB_URL)\n db = client.attempt4\n ml_stats = db.learningStats\n query = {\"game_id\": {\"$in\": game_ids}}\n ml_data = ml_stats.find(query)\n parsed_stats = []\n for game_stats in ml_data:\n temp_list = []\n del game_stats[\"_id\"]\n del game_stats[\"game_id\"]\n winner = game_stats[\"winner\"]\n del game_stats[\"winner\"]\n home_player_data, away_player_data = filter_data(game_stats)\n temp_list = temp_list + home_player_data\n temp_list = temp_list + away_player_data\n temp_list.append(winner)\n parsed_stats.append(temp_list)\n X_data, y_actual = net.Util.split_xy(parsed_stats)\n X_data = torch.tensor(X_data, dtype=torch.float)\n y_actual = torch.tensor(y_actual, dtype=torch.float)\n accuracy = net.Util.accuracy(X_data, y_actual, model)\n return accuracy\n\n\ndef get_team_stats():\n \"\"\"Returns a list of all the team-level ML stats for each game, as well as\n the outcome. Each list has the form [0..., 0..., 0..., 0..., ..., 1/0]\"\"\"\n client = pymongo.MongoClient(DB_URL)\n db = client.attempt4\n ml_stats = db.learningStats\n parsed_stats = []\n for game_stats in ml_stats.find():\n temp_list = []\n del game_stats[\"_id\"]\n del game_stats[\"game_id\"]\n winner = game_stats[\"winner\"]\n del game_stats[\"winner\"]\n home_player_data, away_player_data = filter_data(game_stats)\n temp_list = temp_list + home_player_data\n temp_list = temp_list + away_player_data\n temp_list.append(winner)\n parsed_stats.append(temp_list)\n return parsed_stats\n\n\ndef exp_train():\n \"\"\"Exports a trained model.\"\"\"\n team_stats = get_team_stats()\n model = net.OneLayer(120, 60, 1)\n net.train_model(10000, 0.00019, model, team_stats, no_graph=True)\n return model\n","sub_path":"comparison/nn_5.py","file_name":"nn_5.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160113930","text":"#!/usr/bin/env python\n\"\"\"The WaveBlocks Project\n\nCompute the differences of serveral wavepackets to the reference wavepacket.\n\n@author: O. Rietmann\n@copyright: Copyright (C) 2020 O. Rietmann\n@license: Modified BSD License\n\"\"\"\n\nimport argparse\nimport os\n\nfrom WaveBlocksND import IOManager\nfrom WaveBlocksND import GlobalDefaults as GD\nfrom WaveBlocksND.Interface import ComputeConvergence\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-d\", \"--datafile\",\n type = str,\n help = \"The data file to store the convergence results\",\n nargs = 1)\n\nparser.add_argument(\"-i\", \"--interval\",\n type = float,\n help = \"x-axis interval [a, b] for plot\",\n nargs = 2)\n\nparser.add_argument(\"-c\", \"--coefficient\",\n type = float,\n help = \"Prefactor c of the monomial c*h^p\",\n nargs = \"*\")\n \nparser.add_argument(\"-p\", \"--power\",\n type = int,\n help = \"Power p of the monomial c*h^p\",\n nargs = \"*\")\n\nparser.add_argument(\"-l\", \"--linestyle\",\n type = str,\n help = \"Linestyle for plotting\",\n nargs = \"*\",\n default = [\"--\"])\n\nparser.add_argument(\"-x\", \"--xlabel\",\n type = str,\n help = \"Running variable of the line (the x-axis label).\",\n nargs = 1)\n\n# TODO: Filter type of objects\n# parser.add_argument(\"-t\", \"--type\",\n# help = \"The type of objects to consider.\",\n# type = str,\n# default = \"all\")\n\nargs = parser.parse_args()\n\n\n# Read file with convergence data\ndatafile = os.path.abspath(args.datafile[0])\nif os.path.exists(datafile):\n print(\"Convergence file already exists: '{}'\".format(datafile))\n print(\"Adding new datasets to file: '{}'\".format(datafile))\nelse:\n raise IOError(\"Convergence file does not exist: '{}'\".format(datafile))\n\nxvalues = args.interval\ncvalues = args.coefficient\npvalues = args.power\nlinestyles = args.linestyle\nxlabel = args.xlabel[0]\n\nif len(cvalues) != len(pvalues):\n raise IOError(\"Length of coefficients and powers needs to be the same.\")\nif len(linestyles) == 1 and len(cvalues) > 1:\n linestyles *= len(cvalues)\nelif len(linestyles) != len(cvalues):\n raise IOError(\"Incompatible number of linestyles and coefficients.\")\n\nprint(\"**************************************************\")\nprint(\"*** Adding Lines ***\")\nprint(\"**************************************************\")\n\nblockid = 0\n\nprint(\"Adding convergence line in data block '{}'\".format(blockid))\n\niom = IOManager()\niom.open_file(datafile)\nfor c, p, l in zip(cvalues, pvalues, linestyles):\n label = \"$O(\" + xlabel + \"^\" + str(p) + \")$\"\n diff = [c * xvalues[0]**p, c * xvalues[1]**p]\n if iom.has_convergence(label):\n print(\"Warning: Overwriting dataset with label '{}'\".format(label))\n iom.delete_convergence(label)\n iom.add_convergence(label, xvalues, linestyle=l)\n iom.save_convergence(label, diff, 'L2diff')\n iom.save_convergence(label, diff, 'coeffdiff')\niom.finalize()\n\nprint(\"**************************************************\")\nprint(\"*** Adding Lines Finished ***\")\nprint(\"**************************************************\")\n","sub_path":"scripts/scripts/AddConvergenceLine.py","file_name":"AddConvergenceLine.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505303426","text":"# -*- coding: utf-8 -*-\nimport face_recognition\nimport tkinter as tk\nfrom tkinter import *\nimport cv2,os\nimport shutil\nimport csv\nimport numpy as np\nfrom PIL import Image, ImageTk\nimport pandas as pd\nimport datetime\nimport time\nimport tkinter.ttk as ttk\nimport tkinter.font as font\nimport dlib\nimport argparse\nfrom imutils import paths\nimport pickle\nfrom imutils.video import VideoStream\nimport imutils\n\n\nwindow = tk.Tk()\nwindow.title(\"Face_Recogniser\")\nwindow.geometry('1920x1080')\nwindow.configure(background='#D9FFDC')\nwindow.grid_rowconfigure(0, weight=1)\nwindow.grid_columnconfigure(0, weight=1)\n\nap = argparse.ArgumentParser()\n\nap.add_argument('-w', '--weights', default='./mmod_human_face_detector.dat',\n help='path to weights file')\nargs = ap.parse_args()\n\n\n#Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.\n#img = ImageTk.PhotoImage(Image.open(path))\n\nmessage = tk.Label(window, text=\"Smart Security System\" ,bg=\"#176AE3\" ,fg=\"white\" ,width=60 ,height=3,font=('monserrat', 30)) \nmessage.place(x=200, y=20)\n\nlbl = tk.Label(window, text=\"Enter ID\",width=20 ,height=2 ,fg=\"white\" ,bg=\"#189CED\" ,font=('monserrat', 15) ) \nlbl.place(x=400, y=200)\n\ntxt = tk.Text(window,height=2, width=30,bg=\"#189CED\" ,fg=\"white\",font=('monserrat', 15))\ntxt.place(x=700, y=200)\n\nlbl2 = tk.Label(window, text=\"Enter Name\",width=20 ,fg=\"white\" ,bg=\"#189CED\" ,height=2 ,font=('monserrat', 15)) \nlbl2.place(x=400, y=300)\n\ntxt2 = tk.Text(window,height=2, width=30,bg=\"#189CED\" ,fg=\"white\",font=('monserrat', 15))\ntxt2.place(x=700, y=300)\n\nlbl3 = tk.Label(window, text=\"Notification : \",width=20 ,fg=\"white\" ,bg=\"#189CED\" ,height=2 ,font=('monserrat', 15)) \nlbl3.place(x=400, y=400)\n\nmessage = tk.Label(window, text=\"\" ,bg=\"#189CED\" ,fg=\"white\" ,width=30 ,height=2, activebackground = \"#189CED\" ,font=('monserrat', 15)) \nmessage.place(x=700, y=400)\n\nlbl3 = tk.Label(window, text=\"Presence : \",width=20 ,fg=\"white\" ,bg=\"#189CED\" ,height=2 ,font=('monserrat', 15)) \nlbl3.place(x=400, y=500)\n\n\nmessage2 = tk.Label(window, text=\"\" ,fg=\"white\" ,bg=\"#189CED\",activeforeground = \"white\",width=30 ,height=5 ,font=('monserrat', 15)) \nmessage2.place(x=700, y=500)\n\n\ndef clear():\n\ttxt.delete(0, 'end') \n\tres = \"\"\n\tmessage.configure(text= res)\n\ndef clear2():\n\ttxt2.delete(0, 'end') \n\tres = \"\"\n\tmessage.configure(text= res) \n\t\ndef is_number(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\tpass \n\ttry:\n\t\timport unicodedata\n\t\tunicodedata.numeric(s)\n\t\treturn True\n\texcept (TypeError, ValueError):\n\t\tpass \n\treturn False\n \ndef TakeImages(): \n\tId=(txt.get(\"1.0\",'end-1c'))\n\tname=(txt2.get(\"1.0\",'end-1c'))\n\tos.makedirs(os.path.join('dataset', name ))\n\tif(is_number(Id) and name.isalpha()):\n\t\tcam = cv2.VideoCapture(0)\n\t\t#harcascadePath = \"haarcascade_frontalface_default.xml\"\n\t\t#detector=cv2.CascadeClassifier(harcascadePath)\n\t\tsampleNum=0\n\t\twhile(True):\n\t\t\tret, img = cam.read()\n\t\t\t\n\t\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\t\thog_face_detector = dlib.get_frontal_face_detector()\n\t\t\t#faces = detector.detectMultiScale(gray, 1.3, 5)\n\t\t\tfaces_hog = hog_face_detector(gray, 1)\n\t\t\tfor face in faces_hog:\n\t\t\t\tx = face.left()\n\t\t\t\ty = face.top()\n\t\t\t\tw = face.right() - x\n\t\t\t\th = face.bottom() - y\n\n\t\t\t\t# draw box over face\n\t\t\t\tcv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) \n\t\t\t\t#incrementing sample number \n\t\t\t\tsampleNum=sampleNum+1\n\t\t\t\t#saving the captu face in the dataset folder TrainingImage\n\t\t\t\tstr_nm=name+\"_\"+Id+'_'+str(sampleNum)+\".jpg\"\n\t\t\t\tcv2.imwrite(os.path.join('dataset',name,str_nm), img)\n\t\t\t\t#display the frame\n\t\t\t\tcv2.imshow('frame',img) \n \n \n \n\t\t\t#wait for 5 miliseconds \n\t\t\tif cv2.waitKey(5) & 0xFF == ord('q'):\n\t\t\t\tbreak\n\t\t\t# break if the sample number is morethan 200\n\t\t\telif sampleNum>99:\n\t\t\t\tbreak\n\t\tcam.release()\n\t\tcv2.destroyAllWindows() \n\t\tres = \"Images Saved for ID : \" + Id +\" Name : \"+ name\n\t\trow = [Id , name]\n\t\tfilename=os.path.join('PersonDetails','PersonDetails.csv')\n\t\twith open(filename,'a+') as csvFile:\n\t\t\theaders = ['Id','Name']\n\t\t\twriter = csv.DictWriter(csvFile, delimiter=',', fieldnames=headers)\n\t\t\twriter = csv.writer(csvFile)\n\t\t\twriter.writerow(row)\n\t\tcsvFile.close()\n\t\tmessage.configure(text= res)\n\telse:\n\t\tif(is_number(Id)):\n\t\t\tres = \"Enter Numeric Id\"\n\t\t\tmessage.configure(text= res)\n\t\tif(name.isalpha()):\n\t\t\tres = \"Enter Alphabetical Name\"\n\t\t\tmessage.configure(text= res)\n\t\ndef TrainImages():\n\t# construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n #ap.add_argument(\"-i\", \"--dataset\", required=True,\n #\thelp=\"path to input directory of faces + images\")\n #ap.add_argument(\"-e\", \"--encodings\", required=True,\n #\thelp=\"path to serialized db of facial encodings\")\n ap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"hog\",\n help=\"face detection model to use: either `hog` or `cnn`\")\n args = vars(ap.parse_args())\n dataset=\"dataset\"\n #encodings=\"encodings.pickle\"\n # grab the paths to the input images in our dataset\n print(\"[INFO] quantifying faces...\")\n imagePaths = list(paths.list_images(dataset))\n\n # initialize the list of known encodings and known names\n knownEncodings = []\n knownNames = []\n\n # loop over the image paths\n for (i, imagePath) in enumerate(imagePaths):\n # extract the person name from the image path\n print(\"[INFO] processing image {}/{}\".format(i + 1,\n len(imagePaths)))\n res=\"[INFO] processing image {}/{}\".format(i + 1,\n len(imagePaths))\n \n name = imagePath.split(os.path.sep)[-2]\n\n # load the input image and convert it from RGB (OpenCV ordering)\n # to dlib ordering (RGB)\n image = cv2.imread(imagePath)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input image\n boxes = face_recognition.face_locations(rgb,\n model=args[\"detection_method\"])\n\n # compute the facial embedding for the face\n encodings = face_recognition.face_encodings(rgb, boxes)\n\n # loop over the encodings\n for encoding in encodings:\n # add each encoding + name to our set of known names and\n # encodings\n knownEncodings.append(encoding)\n knownNames.append(name)\n\n # dump the facial encodings + names to disk\n print(\"[INFO] serializing encodings...\")\n data = {\"encodings\": knownEncodings, \"names\": knownNames}\n f = open(\"encodings.pickle\", \"wb\")\n f.write(pickle.dumps(data))\n f.close()\n\n res = \"Image Trained\"#+\",\".join(str(f) for f in Id)\n message.configure(text= res)\n print(\"Images Trained\")\n\ndef TrackImages():\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n #ap.add_argument(\"-e\", \"--encodings\", required=True,\n #\thelp=\"path to serialized db of facial encodings\")\n #ap.add_argument(\"-o\", \"--output\", type=str,\n #\thelp=\"path to output video\")\n ap.add_argument(\"-y\", \"--display\", type=int, default=1,\n help=\"whether or not to display output frame to screen\")\n ap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"hog\",\n help=\"face detection model to use: either `hog` or `cnn`\")\n args = vars(ap.parse_args())\n output=\"output/demo_webcam.avi\"\n # load the known faces and embeddings\n print(\"[INFO] loading encodings...\")\n data = pickle.loads(open(\"encodings.pickle\", \"rb\").read())\n\n # initialize the video stream and pointer to output video file, then\n # allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n vs = VideoStream(src=0).start()\n writer = None\n time.sleep(2.0)\n frame_number=0\n # loop over frames from the video file stream\n while True:\n print(frame_number)\n frame_number=frame_number+1\n # grab the frame from the threaded video stream\n frame = vs.read()\n \n \n # convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speedup processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb = imutils.resize(frame, width=750)\n r = frame.shape[1] / float(rgb.shape[1])\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input frame, then compute\n # the facial embeddings for each face\n boxes = face_recognition.face_locations(rgb,\n model=args[\"detection_method\"])\n \n encodings = face_recognition.face_encodings(rgb, boxes)\n \n \n \n \n names = []\n # loop over the facial embeddings\n for encoding in encodings:\n # attempt to match each face in the input image to our known\n # encodings\n matches = face_recognition.compare_faces(data[\"encodings\"],\n encoding)\n name = \"Unknown\"\n \n\n # check to see if we have found a match\n if True in matches:\n # find the indexes of all matched faces then initialize a\n # dictionary to count the total number of times each face\n # was matched\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n \n \n # loop over the matched indexes and maintain a count for\n # each recognized face face\n for i in matchedIdxs:\n name = data[\"names\"][i]\n counts[name] = counts.get(name, 0) + 1\n \n\n # determine the recognized face with the largest number\n # of votes (note: in the event of an unlikely tie Python\n # will select first entry in the dictionary)\n \n name = max(counts, key=counts.get)\n \n \n #print(counts)\n if counts[name]<95:\n name=\"unknown\"\n \n \n \n #loop\n else:\n name=\"unknown\"\n print(name)\n\n # update the list of names\n names.append(name)\n \n\n # loop over the recognized faces\n for ((top, right, bottom, left), name) in zip(boxes, names):\n # rescale the face coordinates\n top = int(top * r)\n right = int(right * r)\n bottom = int(bottom * r)\n left = int(left * r)\n\n # draw the predicted face name on the image\n cv2.rectangle(frame, (left, top), (right, bottom),\n (0, 255, 0), 2)\n y = top - 15 if top - 15 > 15 else top + 15\n cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,\n 0.75, (0, 255, 0), 2)\n\n # if the video writer is None *AND* we are supposed to write\n # the output video to disk initialize the writer\n if writer is None and output is not None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(output, fourcc, 20,\n (frame.shape[1], frame.shape[0]), True)\n\n # if the writer is not None, write the frame with recognized\n # faces t odisk\n if writer is not None:\n writer.write(frame)\n\n # check to see if we are supposed to display the output frame to\n # the screen\n if args[\"display\"] > 0:\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n\n # check to see if the video writer point needs to be released\n if writer is not None:\n writer.release()\n\n ts = time.time() \n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n monserrattamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n Hour,Minute,Second=monserrattamp.split(\":\")\n fileName=\"Security_\"+date+\"_\"+Hour+\"-\"+Minute+\"-\"+Second+\".csv\"\n #attendance.to_csv(os.path.join(\"Security\",fileName),index=False)\n\n \n\n # Final insert statement\n #cam.release()\n cv2.destroyAllWindows()\n\t#print(attendance)\n # res=attendance\n # message2.configure(text= res)\n\n \nclearButton = tk.Button(window, text=\"X\", command=clear ,fg=\"white\" ,bg=\"#189CED\" ,width=1 ,height=1 ,activebackground = \"#189CED\" ,font=('monserrat', 15))\nclearButton.place(x=1100, y=200)\nclearButton2 = tk.Button(window, text=\"X\", command=clear2 ,fg=\"white\" ,bg=\"#189CED\" ,width=1 ,height=1, activebackground = \"#189CED\" ,font=('monserrat', 15))\nclearButton2.place(x=1100, y=300) \ntakeImg = tk.Button(window, text=\"Take Images\", command=TakeImages ,fg=\"white\" ,bg=\"#189CED\" ,width=20 ,height=3, activebackground = \"#189CED\" ,font=('monserrat', 15))\ntakeImg.place(x=200, y=700)\ntrainImg = tk.Button(window, text=\"Train Images\", command=TrainImages ,fg=\"white\" ,bg=\"#189CED\" ,width=20 ,height=3, activebackground = \"#189CED\" ,font=('monserrat', 15))\ntrainImg.place(x=500, y=700)\ntrackImg = tk.Button(window, text=\"Track Images\", command=TrackImages ,fg=\"white\" ,bg=\"#189CED\" ,width=20 ,height=3, activebackground = \"#189CED\" ,font=('monserrat', 15))\ntrackImg.place(x=800, y=700)\nquitWindow = tk.Button(window, text=\"Quit\", command=window.destroy ,fg=\"white\" ,bg=\"#189CED\" ,width=20 ,height=3, activebackground = \"#189CED\" ,font=('monserrat', 15))\nquitWindow.place(x=1100, y=700)\n \nwindow.mainloop()\n","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57948671","text":"'''\nWritten by Debojit Kaushik (Timestamp)\n'''\nimport os\nimport sys\nimport traceback\n\n\n#Add two nbumbers without using arithmatic operators '+' and '-'\ndef addNums(a, b):\n try:\n a, b = \"{0:b}\".format(max(a, b)), \"{0:b}\".format(min(a, b))\n i, answer, carry, x = len(a), '', 0, ''\n for item in range(len(a)-len(b)):\n x += str(0)\n for item in b:\n x += item\n b = x\n\n while i:\n if int(a[i-1]) == 0 and int(b[i-1]) == 0:\n if carry:\n answer += str(1)\n carry = 0\n else:\n answer += str(0)\n carry = 0\n elif int(a[i-1]) ^ int(b[i-1]) == 1:\n if carry:\n answer += str(0)\n carry = 1\n else:\n answer += str(1)\n carry = 0\n else:\n if carry:\n answer += str(1)\n carry = 1\n else:\n answer += str(0)\n carry = 1\n i -= 1\n if carry:\n answer += str(1)\n ans = ''\n answer = list(answer)\n while answer:\n ans += answer.pop()\n return int(ans, 2) \n except Exception:\n print(traceback.format_exc())\n\n\nif __name__ == '__main__':\n try:\n print(addNums(1, 2))\n except Exception:\n print(traceback.format_exc())","sub_path":"code_quests/addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106675586","text":"#!/usr/bin/env python3.5\nimport argparse\nimport os\nimport nibabel as nib\nimport numpy as np\nfrom glob import glob\nfrom diffqc import *\nimport shutil\nimport pandas as pd\nimport collections\n\n__version__ = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'version')).read()\n\nparser = argparse.ArgumentParser(description='Example BIDS App entrypoint script.')\nparser.add_argument('bids_dir', help='The directory with the input dataset '\n 'formatted according to the BIDS standard.')\nparser.add_argument('output_dir', help='The directory where the output files '\n 'should be stored. If you are running group level analysis '\n 'this folder should be prepopulated with the results of the'\n 'participant level analysis.')\nparser.add_argument('analysis_level', help='Level of the analysis that will be performed. '\n 'Multiple participant level analyses can be run independently '\n '(in parallel) using the same output_dir.',\n choices=['participant', 'group'])\nparser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '\n 'corresponds to sub- from the BIDS spec '\n '(so it does not include \"sub-\"). If this parameter is not '\n 'provided all subjects should be analyzed. Multiple '\n 'participants can be specified with a space separated list.',\n nargs=\"+\")\nparser.add_argument('--skip_bids_validator', help='Whether or not to perform BIDS dataset validation',\n action='store_true')\nparser.add_argument('--keep_data', help='Keep intermediate data (e.g. fa maps)',\n action='store_true')\nparser.add_argument('-v', '--version', action='version',\n version='BIDS-App example version {}'.format(__version__))\n\n\nargs = parser.parse_args()\n\nif not args.skip_bids_validator:\n helper.run('bids-validator %s'%args.bids_dir)\n\nsubjects_to_analyze = []\n# only for a subset of subjects\nif args.participant_label:\n subjects_to_analyze = args.participant_label\n# for all subjects\nelse:\n subject_dirs = glob(os.path.join(args.bids_dir, \"sub-*\"))\n subjects_to_analyze = [subject_dir.split(\"-\")[-1] for subject_dir in subject_dirs]\n\n# running participant level\nif args.analysis_level == \"participant\":\n # find all DWI files and run denoising and tensor / residual calculation\n for subject_label in subjects_to_analyze:\n\n print(\"processing sub-\" + subject_label + \"\\n\")\n\n # loop over DWI-Files\n for dwi_file in glob(os.path.join(args.bids_dir, \"sub-%s\"%subject_label,\n \"dwi\", \"*_dwi.nii*\")) + glob(os.path.join(args.bids_dir,\"sub-%s\"%subject_label,\"ses-*\",\"dwi\", \"*_dwi.nii*\")):\n\n # create subj dir in qc_data & qc_figures folders\n subject_dir = os.path.join(args.output_dir, 'qc_data', 'sub-' + subject_label)\n fig_dir = os.path.join(args.output_dir, 'qc_figures', 'sub-' + subject_label)\n stats_dir = os.path.join(args.output_dir, 'qc_stats', 'sub-' + subject_label)\n\n # check session\n if dwi_file.split(\"ses-\")[-1] != dwi_file:\n ses = 'ses-' + dwi_file.split(\"ses-\")[-1].split(\"_\")[0]\n subject_dir = subject_dir + '_' + ses\n fig_dir = fig_dir + '_' + ses\n stats_dir = stats_dir + '_' + ses\n\n # check acquisition\n if dwi_file.split(\"acq-\")[-1] != dwi_file:\n acq = 'acq-' + dwi_file.split(\"acq-\")[-1].split(\"_\")[0]\n subject_dir = subject_dir + '_' + acq\n fig_dir = fig_dir + '_' + acq\n stats_dir = stats_dir + '_' + acq\n\n # create output folder\n if not os.path.isdir(subject_dir):\n os.makedirs(subject_dir)\n if not os.path.isdir(fig_dir):\n os.makedirs(fig_dir)\n if not os.path.isdir(stats_dir):\n os.makedirs(stats_dir)\n\n # add acquisition directory\n dwi = {}\n dwi['subject_label'] = subject_label\n dwi['fig_dir'] = fig_dir\n dwi['data_dir'] = subject_dir\n dwi['stats_dir'] = stats_dir\n dwi['file'] = dwi_file\n dwi['bval'] = dwi['file'].replace(\"_dwi.nii.gz\", \"_dwi.bval\")\n dwi['bval'] = dwi['bval'].replace(\"_dwi.nii\", \"_dwi.bval\")\n dwi['bvec'] = dwi['file'].replace(\"_dwi.nii.gz\", \"_dwi.bvec\")\n dwi['bvec'] = dwi['bvec'].replace(\"_dwi.nii\", \"_dwi.bvec\")\n\n # Get Header and flip_sign\n img = nib.load(dwi['file'])\n\n # np.set_printoptions(precision=2, suppress=True)\n # print(np.array(img.get_affine()))\n\n (M, perm, flip_sign) = helper.fixImageHeader(img)\n\n voxSize = img.header['pixdim'][1:4]\n\n # print(M)\n # print(perm)\n # print(flip_sign)\n\n dwi['M'] = M\n dwi['perm'] = perm\n dwi['flip_sign'] = flip_sign\n dwi['voxSize'] = voxSize[perm]\n stats = collections.OrderedDict()\n stats['subject_label'] = subject_label\n stats['voxel_size'] = [np.round(voxSize, decimals=2)]\n\n dwi['stats'] = stats\n\n # Get DWI sampling scheme\n participant.samplingScheme(dwi)\n\n # get nr of shells and directions\n participant.getShells(dwi)\n\n\n\n # Denoising to obtain noise-map\n participant.denoise(dwi)\n\n # # Step 2: Gibbs ringing removal (if available)\n # if unring_cmd:\n # run.command(unring_cmd + ' dwi_denoised.nii dwi_unring' + fsl_suffix + ' -n 100')\n # file.delTemporary('dwi_denoised.nii')\n # unring_output_path = fsl.findImage('dwi_unring')\n # run.command('mrconvert ' + unring_output_path + ' dwi_unring.mif -json_import input.json')\n # file.delTemporary(unring_output_path)\n # file.delTemporary('input.json')\n\n # b=0 and brain extraction\n participant.brainMask(dwi)\n\n\n numShells = sum(dwi['shells']>50) # use b<50 as b=0 images\n bShells = dwi['shells'][dwi['shells'] > 50]\n # MultiShell Datasets: perform tensor fit, residuals and fa per shell\n if numShells < 10 and numShells > 1 and sum(dwi['shells']<=50) > 0:\n # backup MultiShell Files in Config\n origDWI = dwi.copy()\n\n for i in range(numShells):\n bShell = bShells[i]\n dwi['shellStr'] = \"_b\" + str(int(bShell))\n\n dwi['data_dir'] = origDWI['data_dir'] + dwi['shellStr']\n dwi['fig_dir'] = origDWI['fig_dir'] + dwi['shellStr']\n dwi['stats_dir'] = origDWI['stats_dir'] + dwi['shellStr']\n\n dwi['stats']['subject_label'] = origDWI['subject_label'] + dwi['shellStr']\n\n # create output folder\n if not os.path.isdir(dwi['data_dir']):\n os.makedirs(dwi['data_dir'])\n if not os.path.isdir(dwi['fig_dir']):\n os.makedirs(dwi['fig_dir'])\n if not os.path.isdir(dwi['stats_dir']):\n os.makedirs(dwi['stats_dir'])\n\n dwi['denoised'] = origDWI['denoised'].replace(origDWI['data_dir'], dwi['data_dir'])\n dwi['bval'] = dwi['denoised'].replace('.nii.gz', '.bval')\n dwi['bvec'] = dwi['denoised'].replace('.nii.gz', '.bvec')\n\n # extract shell from _denoise\n cmd = \"dwiextract -shells 0,%s -fslgrad %s %s -export_grad_fsl %s %s %s %s -force\"%(str(int(bShell)),\n origDWI['bvec'],\n origDWI['bval'],\n dwi['bvec'],\n dwi['bval'],\n origDWI['denoised'],\n dwi['denoised'])\n # print(cmd)\n helper.run(cmd)\n\n participant.getShells(dwi)\n\n # perform tensor fit, faMap and Residuals\n participant.dtiFit(dwi)\n participant.faMap(dwi)\n participant.mdsMap(dwi)\n participant.tensorResiduals(dwi)\n\n # Create stats-file\n df = pd.DataFrame([])\n df = df.append(pd.DataFrame(dwi['stats'], columns=dwi['stats'].keys()))\n\n stats_file = os.path.join(dwi['stats_dir'], \"stats.tsv\")\n df.to_csv(stats_file, sep=\"\\t\", index=False)\n\n # Cleanup dwi data at shell-level\n if not args.keep_data:\n shutil.rmtree(dwi['data_dir'])\n\n # restore MultiShell Files in Config\n dwi = origDWI.copy()\n dwi['stats']['subject_label'] = origDWI['subject_label']\n else:\n dwi['shellStr'] = ''\n # perform tensor fit\n participant.dtiFit(dwi)\n\n # Create FA maps\n participant.faMap(dwi)\n\n # Calculate MDS map\n participant.mdsMap(dwi)\n\n # Calc DTI residuals\n participant.tensorResiduals(dwi)\n\n # check DWI -> T1 overlay\n for t1_file in glob(os.path.join(args.bids_dir, \"sub-%s\"%subject_label,\n \"anat\", \"*_T1w.nii*\")) + glob(os.path.join(args.bids_dir,\"sub-%s\"%subject_label,\"ses-*\",\"anat\", \"*_T1w.nii*\")):\n\n # check if T1 is from the correct session\n if dwi_file.split(\"ses-\")[-1] != dwi_file:\n ses = 'ses-' + dwi_file.split(\"ses-\")[-1].split(\"_\")[0]\n ses_t1 = 'ses-' + t1_file.split(\"ses-\")[-1].split(\"_\")[0]\n if ses != ses_t1:\n # skip t1 file if sessions don't match!\n t1_file = ''\n\n if (t1_file):\n t1 = {}\n t1['file'] = t1_file\n participant.anatOverlay(dwi, t1)\n\n\n # Create stats-file\n df = pd.DataFrame([])\n df = df.append(pd.DataFrame(dwi['stats'], columns=dwi['stats'].keys()))\n\n stats_file = os.path.join(stats_dir, \"stats.tsv\")\n df.to_csv(stats_file, sep=\"\\t\", index=False)\n\n # Cleanup dwi-level\n if not args.keep_data:\n shutil.rmtree(subject_dir)\n\n # Cleanup top-level\n if not args.keep_data:\n shutil.rmtree(os.path.join(args.output_dir, 'qc_data'))\n\n# running group level\nelif args.analysis_level == \"group\":\n\n # get figure names and number of figures per subject\n myList = []\n\n for subject_label in subjects_to_analyze:\n for image_file in glob(os.path.join(args.output_dir, 'qc_figures', \"sub-%s*\"%subject_label, \"*.png\")):\n myList.extend([os.path.basename(image_file)[0:-4]])\n\n imgSet = set(myList)\n\n wp = {}\n wp['filePath'] = os.path.join(args.output_dir, \"_quality.html\")\n wp['subjects'] = subjects_to_analyze\n wp['subFolders'] = [os.path.split(subF)[-1][4:] for subF in glob(os.path.join(args.output_dir,'qc_figures',\"sub-*\")) ]\n wp['figFolder'] = os.path.join(args.output_dir, 'qc_figures')\n wp['maxImg'] = len(imgSet)\n wp['maxList'] = list(sorted(imgSet))\n\n group.createWebPage(wp)\n\n # create group stats table\n df = pd.DataFrame([])\n for subj_stats in glob(os.path.join(args.output_dir, 'qc_stats', 'sub-*', \"*.tsv\")):\n df = df.append(pd.read_csv(subj_stats, sep=\"\\t\"))\n\n out_file = os.path.join(args.output_dir, \"qc_stats_all.tsv\")\n df.to_csv(out_file, sep=\"\\t\", index=False)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":12272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55388061","text":"from typing import List\n\n'''\nclass Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n'''\n\n\nclass removeDuplicatesFromSortedArray:\n def start(self,nums:List[int])->int:\n insertIndex=1\n\n for k in range(1,len(nums)):\n if nums[k]!=nums[insertIndex-1]:\n nums[insertIndex]=nums[k]\n insertIndex+=1\n\n return insertIndex\n\nobj=removeDuplicatesFromSortedArray()\n#nums=[0,0,1,1,1,2,2,3,3,4]\nnums=[1,1]\nrs=obj.start(nums)\nprint(rs)","sub_path":"leetcode/easy/26_RemoveDuplicatesFromSortedArray.py","file_name":"26_RemoveDuplicatesFromSortedArray.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4752311","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2018 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom pyannote.metrics.binary_classification import det_curve\nfrom pyannote.database import get_unique_identifier\nfrom pyannote.database import get_label_identifier\nfrom pyannote.database import get_annotated\nfrom pyannote.audio.util import to_numpy\nfrom pyannote.core import Segment\nfrom pyannote.core import Timeline\nfrom pyannote.core import SlidingWindowFeature\n\nfrom pyannote.generators.batch import batchify\nfrom pyannote.generators.fragment import random_segment\nfrom pyannote.generators.fragment import random_subsegment\nfrom pyannote.generators.fragment import SlidingSegments\n\nfrom collections import deque\n\nfrom pyannote.audio.train import Trainer\n\n\nclass LabelingTaskGenerator(object):\n \"\"\"Base batch generator for various labeling tasks\n\n This class should be inherited from: it should not be used directy\n\n Parameters\n ----------\n precomputed : `pyannote.audio.features.Precomputed`\n Precomputed features\n duration : float, optional\n Duration of sub-sequences. Defaults to 3.2s.\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Total audio duration per epoch, in seconds.\n Defaults to one hour (3600).\n parallel : int, optional\n Number of prefetching background generators. Defaults to 1.\n Each generator will prefetch enough batches to cover a whole epoch.\n Set `parallel` to 0 to not use background generators.\n exhaustive : bool, optional\n Ensure training files are covered exhaustively (useful in case of\n non-uniform label distribution).\n shuffle : bool, optional\n Shuffle exhaustive samples. Defaults to False.\n \"\"\"\n\n def __init__(self, precomputed, duration=3.2, batch_size=32,\n per_epoch=3600, parallel=1, exhaustive=False, shuffle=False):\n\n super(LabelingTaskGenerator, self).__init__()\n\n self.precomputed = precomputed\n self.duration = duration\n self.batch_size = batch_size\n self.per_epoch = per_epoch\n self.parallel = parallel\n self.exhaustive = exhaustive\n self.shuffle = shuffle\n\n def initialize(self, protocol, subset='train'):\n \"\"\"Gather the following information about the training subset:\n\n data_ : dict\n\n {'segments': ,\n 'duration': ,\n 'current_file': ,\n 'y': }\n\n databases_ : list\n Sorted list of (unique) databases in protocol.\n\n labels_ : list\n Sorted list of (unique) lables in protocol.\n \"\"\"\n\n self.data_ = {}\n labels, databases = set(), set()\n\n # loop once on all files\n for current_file in getattr(protocol, subset)():\n\n # keep track of database\n database = current_file['database']\n databases.add(database)\n\n # keep track of unique labels\n for label in current_file['annotation'].labels():\n label = get_label_identifier(label, current_file)\n labels.add(label)\n\n annotated = get_annotated(current_file)\n\n if not self.precomputed.use_memmap:\n msg = ('Loading all precomputed features in memory. '\n 'Set \"use_memmap\" to True if you run out of memory.')\n warnings.warn(msg)\n\n segments = [s for s in annotated if s.duration > self.duration]\n\n # corner case where no segment is long enough\n # and we removed them all...\n if not segments:\n continue\n\n # total duration of label in current_file (after removal of\n # short segments).\n duration = sum(s.duration for s in segments)\n\n # store all these in data_ dictionary\n datum = {'segments': segments,\n 'duration': duration,\n 'current_file': current_file}\n uri = get_unique_identifier(current_file)\n self.data_[uri] = datum\n\n self.databases_ = sorted(databases)\n self.labels_ = sorted(labels)\n\n sliding_window = self.precomputed.sliding_window()\n for current_file in getattr(protocol, subset)():\n y, _ = to_numpy(current_file, self.precomputed,\n labels=self.labels_)\n uri = get_unique_identifier(current_file)\n self.data_[uri]['y'] = SlidingWindowFeature(\n self.postprocess_y(y), sliding_window)\n\n def postprocess_y(self, Y):\n \"\"\"This function does nothing but return its input.\n It should be overriden by subclasses.\"\"\"\n return Y\n\n def samples(self):\n if self.exhaustive:\n return self.random_samples()\n else:\n return self.sliding_samples()\n\n def random_samples(self):\n \"\"\"Random samples\n\n Returns\n -------\n samples : generator\n Generator that yields {'X': ..., 'y': ...} samples indefinitely.\n \"\"\"\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri]['duration'] for uri in uris])\n probabilities = durations / np.sum(durations)\n\n while True:\n\n # choose file at random with probability\n # proportional to its (annotated) duration\n uri = uris[np.random.choice(len(uris), p=probabilities)]\n\n datum = self.data_[uri]\n current_file = datum['current_file']\n\n # choose one segment at random with probability\n # proportional to its duration\n segment = next(random_segment(datum['segments'], weighted=True))\n\n # choose fixed-duration subsegment at random\n sequence = next(random_subsegment(segment, self.duration))\n\n X = self.precomputed.crop(current_file,\n sequence, mode='center',\n fixed=self.duration)\n\n y = datum['y'].crop(sequence, mode='center', fixed=self.duration)\n\n yield {'X': X, 'y': np.squeeze(y)}\n\n def sliding_samples(self):\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri]['duration'] for uri in uris])\n probabilities = durations / np.sum(durations)\n\n sliding_segments = SlidingSegments(duration=self.duration,\n step=self.duration,\n source='annotated')\n\n while True:\n\n np.random.shuffle(uris)\n\n # loop on all files\n for uri in uris:\n datum = self.data_[uri]\n\n # make a copy of current file\n current_file = dict(datum['current_file'])\n\n # randomly shift 'annotated' segments start time so that\n # we avoid generating exactly the same subsequence twice\n annotated = Timeline(\n [Segment(s.start + np.random.random() * self.duration,\n s.end) for s in get_annotated(current_file)])\n current_file['annotated'] = annotated\n\n if self.shuffle:\n samples = []\n\n for sequence in sliding_segments.from_file(current_file):\n\n X = self.precomputed.crop(current_file,\n sequence, mode='center',\n fixed=self.duration)\n\n y = datum['y'].crop(sequence, mode='center',\n fixed=self.duration)\n\n sample = {'X': X, 'y': np.squeeze(y)}\n\n if self.shuffle:\n samples.append(sample)\n else:\n yield sample\n\n if self.shuffle:\n np.random.shuffle(samples)\n for sample in samples:\n yield sample\n\n @property\n def signature(self):\n return {'X': {'@': (None, np.stack)},\n 'y': {'@': (None, np.stack)}}\n\n @property\n def batches_per_epoch(self):\n \"\"\"Number of batches needed to complete an epoch\"\"\"\n duration_per_batch = self.duration * self.batch_size\n return int(np.ceil(self.per_epoch / duration_per_batch))\n\n @property\n def labels(self):\n return list(self.labels_)\n\n def __call__(self, protocol, subset='train'):\n \"\"\"(Parallelized) batch generator\"\"\"\n\n # pre-load useful information about protocol once and for all\n self.initialize(protocol, subset=subset)\n\n # number of batches needed to complete an epoch\n batches_per_epoch = self.batches_per_epoch\n\n generators = []\n\n if self.parallel:\n for _ in range(self.parallel):\n\n # initialize one sample generator\n samples = self.samples()\n\n # batchify it and make sure at least\n # `batches_per_epoch` batches are prefetched.\n batches = batchify(samples, self.signature,\n batch_size=self.batch_size,\n prefetch=batches_per_epoch)\n\n # add batch generator to the list of (background) generators\n generators.append(batches)\n else:\n\n # initialize one sample generator\n samples = self.samples()\n\n # batchify it without prefetching\n batches = batchify(samples, self.signature,\n batch_size=self.batch_size, prefetch=0)\n\n # add it to the list of generators\n # NOTE: this list will only contain one generator\n generators.append(batches)\n\n # loop on (background) generators indefinitely\n while True:\n for batches in generators:\n # yield `batches_per_epoch` batches from current generator\n # so that each epoch is covered by exactly one generator\n for _ in range(batches_per_epoch):\n yield next(batches)\n\n\nclass LabelingTask(Trainer):\n \"\"\"Base class for various labeling tasks\n\n This class should be inherited from: it should not be used directy\n\n Parameters\n ----------\n duration : float, optional\n Duration of sub-sequences. Defaults to 3.2s.\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Total audio duration per epoch, in seconds.\n Defaults to one hour (3600).\n parallel : int, optional\n Number of prefetching background generators. Defaults to 1.\n Each generator will prefetch enough batches to cover a whole epoch.\n Set `parallel` to 0 to not use background generators.\n \"\"\"\n\n def __init__(self, duration=3.2, batch_size=32, per_epoch=3600,\n parallel=1):\n super(LabelingTask, self).__init__()\n self.duration = duration\n self.batch_size = batch_size\n self.per_epoch = per_epoch\n self.parallel = parallel\n\n def get_batch_generator(self, precomputed):\n \"\"\"This method should be overriden by subclass\n\n Parameters\n ----------\n precomputed : `pyannote.audio.features.Precomputed`\n\n Returns\n -------\n batch_generator : `LabelingTaskGenerator`\n \"\"\"\n return LabelingTaskGenerator(\n precomputed, duration=self.duration, per_epoch=self.per_epoch,\n batch_size=self.batch_size, parallel=self.parallel)\n\n @property\n def n_classes(self):\n \"\"\"Number of classes\"\"\"\n msg = 'LabelingTask subclass must define `n_classes` property.'\n raise NotImplementedError(msg)\n\n def on_train_start(self, model, batches_per_epoch=None, **kwargs):\n\n if model.n_classes != self.n_classes:\n raise ValueError('n_classes mismatch')\n\n self.loss_func_ = model.get_loss()\n\n self.log_y_pred_ = deque([], maxlen=batches_per_epoch)\n self.log_y_true_ = deque([], maxlen=batches_per_epoch)\n\n def batch_loss(self, batch, model, device, writer=None, **kwargs):\n\n X = torch.tensor(batch['X'], dtype=torch.float32, device=device)\n y = torch.tensor(batch['y'], dtype=torch.int64, device=device)\n\n fX = model(X.requires_grad_())\n\n losses = self.loss_func_(fX.view((-1, self.n_classes)),\n y.contiguous().view((-1, )))\n\n if writer is not None:\n self.log_y_pred_.append(self.to_numpy(fX))\n self.log_y_true_.append(self.to_numpy(y))\n\n return torch.mean(losses)\n\n def on_epoch_end(self, iteration, writer=None, **kwargs):\n\n if writer is None:\n return\n\n log_y_pred = np.hstack(self.log_y_pred_)\n log_y_true = np.hstack(self.log_y_true_)\n log_y_pred = log_y_pred.reshape((-1, self.n_classes))\n log_y_true = log_y_true.reshape((-1, ))\n if self.n_classes < 3:\n _, _, _, eer = det_curve(log_y_true == 0,\n log_y_pred[:, 0])\n writer.add_scalar(f'train/eer',\n eer, global_step=iteration)\n else:\n for k in range(self.n_classes):\n _, _, _, eer = det_curve(log_y_true == k,\n log_y_pred[:, k])\n writer.add_scalar(f'train/eer/{k}',\n eer, global_step=iteration)\n","sub_path":"pyannote/audio/labeling/tasks/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86308905","text":"# DAVIS BUSTEED -- LING 360 -- HW 3\n\n# Instructions:\n# Find two texts that are each at least 500 words in length. Try to find one text that is more formal \n# and one that is less formal. Create a Python program that produces normalized counts per 100 words \n# for the number of (a) subject pronouns, (b) contractions, and (c) modal verbs in each of the texts. \n# You should have six counts: (1) subject pronouns in formal text, (2) subject pronouns in informal \n# text, (3) contractions in formal text, (4) contractions in informal text, (5) modal verbs in formal \n# text, (6) modal verbs in informal text. Check both the precision and the recall of your program's \n# ability to correctly find these three linguistic features. Simply paste the texts into your .py file. \n# Interpret the results in 100-200 words. Turn into the CMS a zipped file (with the extension .zip) with your \n# .py file and a .docx file with your interpretation of the results in which you describe any differences \n# between the texts, as well as a report on the precision and recall of your program.\n\n# import required functions from libraries\nfrom re import split, search\n\n# constant values\nSUBJ_PRONOUNS = ['i', 'you', 'he', 'she', 'it', 'we', 'ye', 'they', 'what', 'who']\nMODAL_VERBS = ['can', 'could', 'may', 'might', 'must', 'shall', 'should', 'will', 'would']\nNORMALIZE_COUNT = 100\n\n# this function uses a regex to check if a \n# word is a contraction or not\ndef is_contraction(w):\n if search(r'\\b\\w+[\\'\\u2019][^s ]\\w*|\\bit[\\'\\u2019]s', w):\n return True\n else: \n return False\n\n# read in the two files (make them lowercase now for simplicity)\nformal_txt = open('formal.txt', 'r', encoding='utf8').read().lower()\ninformal_txt = open('informal.txt', 'r', encoding='utf8').read().lower()\n\n# dictionary to be used to keep track of feature counts\nresults = {\n 'formal_subj_pronoun': 0,\n 'formal_contractions': 0,\n 'formal_modal_verbs': 0,\n 'informal_subj_pronoun': 0,\n 'informal_contractions': 0,\n 'informal_modal_verbs': 0,\n}\n\n# split up the text into a list of words (split on spaces, whitespace, and other punctuation)\nformal_tokens = split(r'[\\s+\\.\\?!,)(]', formal_txt)\ninformal_tokens = split(r'[\\s+\\.\\?!,)(]', informal_txt)\n\n# remove empty strings from the list of tokens\nformal_tokens = [tok for tok in formal_tokens if tok != '']\ninformal_tokens = [tok for tok in informal_tokens if tok != '']\n\n# loop thru the words in the formal txt\nfor word in formal_tokens:\n \n # check if the word is a subject pronoun from the list\n # if so, add one to the feature count\n if word in SUBJ_PRONOUNS:\n results['formal_subj_pronoun'] += 1\n \n # check if the word is a contraction\n if is_contraction(word):\n results['formal_contractions'] += 1\n\n # check if the word is a modal verb\n if word in MODAL_VERBS:\n results['formal_modal_verbs'] += 1\n\n# do the same for the informal txt\nfor word in informal_tokens:\n\n if word in SUBJ_PRONOUNS:\n results['informal_subj_pronoun'] += 1\n \n if is_contraction(word): \n results['informal_contractions'] += 1\n\n if word in MODAL_VERBS:\n results['informal_modal_verbs'] += 1\n\n# loop the dictionary of results, calculate the normalized count\n# and display the results\nprint(f'\\nNormalized Counts per {NORMALIZE_COUNT} Words\\n')\n\nfor key,value in results.items():\n if key[0] == 'f': \n norm_val = value / len(formal_tokens) * NORMALIZE_COUNT\n else:\n norm_val = value / len(informal_tokens) * NORMALIZE_COUNT\n\n # add a space between the formal and informal results\n if(key == 'informal_subj_pronoun'):\n print('')\n\n # print the feature name and the normalized count (formatted nicely)\n print(f'{key.upper()}\\t{norm_val:05.2f}')\n\nprint('\\n')","sub_path":"Busteed_03/Busteed_03.py","file_name":"Busteed_03.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56501543","text":"import numpy as np\n\n# # Rosenbrock function (d = n)\n# # [-5, 5]\n# # fmin = 0 | (1, 1, .... , 1)\nclass Rosenbrock():\n @staticmethod\n def F(x):\n val = 0\n for i in range(0, len(x) - 1):\n t1 = 100 * (x[i + 1] - x[i] ** 2) ** 2\n t2 = (x[i] - 1) ** 2\n val += t1 + t2\n return val\n\n# # Weighted Sphere function (d = n)\n# # [-5.12, 5.12]\n# # fmin = 0 | (0, 0, .... , 0)\nclass WeightedSphere():\n @staticmethod\n def F(x):\n val = np.array([ (i + 1) * xi ** 2 for i, xi in enumerate(x)])\n return np.sum(val)\n\ndef Evaluate(obj, x) :\n if obj == \"Rosenbrock function\":\n return Rosenbrock.F(x)\n if obj == \"Weighted Sphere function\":\n return WeightedSphere.F(x)","sub_path":"Target.py","file_name":"Target.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229227455","text":"#!/usr/bin/python\n\n'This example runs stations in AP mode'\n\nimport sys\n\nfrom mininet.log import setLogLevel, info\nfrom mn_wifi.link import wmediumd\nfrom mn_wifi.cli import CLI_wifi\nfrom mn_wifi.net import Mininet_wifi\nfrom mininet.node import Controller, RemoteController\nfrom mn_wifi.wmediumdConnector import interference\n\nclass InbandController( RemoteController ):\n\n def checkListening( self ):\n \"Overridden to do nothing.\"\n return\n\ndef topology():\n 'Create a network.'\n net = Mininet_wifi(controller=RemoteController, link=wmediumd, wmediumd_mode=interference)\n\n info(\"*** Creating nodes\\n\")\n sta1 = net.addStation('sta1', mac='00:00:00:00:00:01', ip='192.168.0.1/24')\n sta2 = net.addStation('sta2', mac='00:00:00:00:00:02', ip='192.168.1.1/24')\n\n ap1 = net.addStation('ap1', mac='02:00:00:00:01:00', ip='192.168.0.10/24')\n ap2 = net.addStation('ap2', mac='02:00:00:00:02:00', ip='192.168.1.10/24')\n\n c1 = net.addController(name='c1',\n controller=InbandController,\n ip='127.0.0.1',\n protocol='tcp',\n port=6633)\n\n net.setPropagationModel(model=\"logDistance\", exp=4.5)\n\n info(\"*** Configuring wifi nodes\\n\")\n net.configureWifiNodes()\n\n ap1.setMasterMode(intf='ap1-wlan0', ssid='ap1-ssid', channel='1', mode='n')\n ap2.setMasterMode(intf='ap2-wlan0', ssid='ap2-ssid', channel='6', mode='n')\n\n info(\"*** Adding Link\\n\")\n net.addLink(ap1, ap2) # wired connection\n\n info(\"*** Plotting Graph\\n\")\n net.plotGraph(max_x=120, max_y=120)\n\n net.setMobilityModel(time=0, model='RandomWayPoint', max_x=100, max_y=100,min_v=0.5, max_v=2, seed=10)\n\n info(\"*** Starting network\\n\")\n net.build()\n c1.start()\n\n ap1.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward')\n ap2.cmd('echo 1 > /proc/sys/net/ipv4/ip_forward')\n\n ap1.start([c1])\n ap2.start([c1])\n\n ap1.setIP('192.168.0.10/24', intf='ap1-wlan0')\n ap1.setIP('192.168.2.1/24', intf='ap1-eth2')\n ap2.setIP('192.168.1.10/24', intf='ap2-wlan0')\n ap2.setIP('192.168.2.2/24', intf='ap2-eth2')\n ap1.cmd('route add -net 192.168.1.0/24 gw 192.168.2.2')\n ap2.cmd('route add -net 192.168.0.0/24 gw 192.168.2.1')\n sta1.cmd('route add -net 192.168.1.0/24 gw 192.168.0.10')\n sta1.cmd('route add -net 192.168.2.0/24 gw 192.168.0.10')\n sta2.cmd('route add -net 192.168.0.0/24 gw 192.168.1.10')\n sta2.cmd('route add -net 192.168.2.0/24 gw 192.168.1.10')\n\n info(\"*** Running CLI\\n\")\n CLI_wifi(net)\n\n info(\"*** Stopping network\\n\")\n net.stop()\n\nif __name__ == '__main__':\n setLogLevel('info')\n topology()\n","sub_path":"Old files/sta_ap_mode.py","file_name":"sta_ap_mode.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237310266","text":"import sys\nimport shutil\nimport time\nimport unicodedata\n\n\ndef getlen(s):\n \"\"\"\n 全角文字を2文字、半角文字を1文字として文字列の長さを得る\n \"\"\"\n cnt = 0\n for c in s:\n if unicodedata.east_asian_width(c) in 'FWA':\n cnt += 2\n else:\n cnt += 1\n return cnt\n\n\ndef main():\n \"\"\"\n メインルーチン\n \"\"\"\n termsize = shutil.get_terminal_size()\n tcols = termsize.columns # 端末(ターミナル)の横幅の文字列数\n x = 0 # ショボーンのX座標\n xamnt = 1 # X座標の増加量\n face = '(´・ω・`)'\n facelen = getlen(face)\n\n while True:\n # 33ミリ秒プログラムを休ませる\n try:\n time.sleep(0.033)\n except KeyboardInterrupt:\n break\n\n # 現在のX座標が端末の横幅を超えるようなら増加量をマイナスに\n # 0以下ならプラスにする\n if x + facelen >= tcols-1:\n xamnt = -1\n elif x <= 0:\n xamnt = 1\n\n # 描画するバッファを生成する\n buf = ''\n for _ in range(x):\n buf += ' '\n\n buf += face\n\n # 座標の更新\n x += xamnt\n \n # 顔文字を描画\n print(buf)\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"side_walker.py","file_name":"side_walker.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269271825","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Guestbook',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('user', models.CharField(verbose_name='Пользователь', max_length=20)),\n ('posted', models.DateTimeField(verbose_name='Опубликовано', auto_now_add=True, db_index=True)),\n ('content', models.TextField(verbose_name='Содержание')),\n ],\n options={\n 'verbose_name': 'Запись гостевой книги',\n 'verbose_name_plural': 'Записи гостевой книги',\n 'ordering': ['-posted'],\n },\n ),\n ]\n","sub_path":"guestbook/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"611963500","text":"import logging\nimport os\n\nimport boto3\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n # Log the received event\n logger.info(f\"Received event: {event}\")\n # get the Batch client ready\n batch_client = boto3.client('batch')\n\n # Retrieve parameters\n container_overrides = event['containerOverrides'] if event.get('containerOverrides') else {}\n parameters = event['parameters'] if event.get('parameters') else {}\n depends_on = event['dependsOn'] if event.get('dependsOn') else []\n job_queue = event['jobQueue'] if event.get('jobQueue') else os.environ.get('JOBQUEUE')\n job_definition = event['jobDefinition'] if event.get('jobDefinition') else os.environ.get('JOBDEF')\n container_mem = event['memory'] if event.get('memory') else os.environ.get('JOB_MEM')\n container_vcpus = event['vcpus'] if event.get('vcpus') else os.environ.get('JOB_VCPUS')\n data_bucket = event['dataBucket'] if event.get('dataBucket') else os.environ.get('DATA_BUCKET')\n result_bucket = event['resultBucket'] if event.get('resultBucket') else data_bucket\n refdata_bucket = event['refDataBucket'] if event.get('refDataBucket') else os.environ.get('REFDATA_BUCKET')\n ref_dataset = event['refDataset'] if event.get('refDataset') else os.environ.get('REF_DATASET')\n genome_build = event['genomeBuild'] if event.get('genomeBUild') else os.environ.get('GENOME_BUILD')\n\n data_wts_dir = event['dataDirWTS']\n data_wgs_dir = event['dataDirWGS'] if event.get('dataDirWGS') else \"\"\n job_name = data_bucket + \"---\" + data_wts_dir.replace('/', '_').replace('.', '_')\n job_name = os.environ.get('JOBNAME_PREFIX') + '_' + job_name\n\n try:\n s3 = boto3.client('s3')\n logger.info(f\"Checking if data in WTS input S3 path ({data_wts_dir}) exists in bucket {data_bucket}\")\n response_wts = s3.list_objects(Bucket=data_bucket, MaxKeys=3, Prefix=data_wts_dir)\n logger.info(f\"S3 list response: {response_wts}\")\n if not response_wts.get('Contents') or len(response_wts['Contents']) < 1:\n return {\n 'statusCode': 400,\n 'error': 'Bad parameter',\n 'message': f\"Provided S3 path ({data_wts_dir}) does not exist in bucket {data_bucket}!\"\n }\n\n if event.get('dataDirWGS'):\n logger.info(f\"Checking if data in WGS input S3 path ({data_wgs_dir}) exists in bucket {data_bucket}\")\n response_wgs = s3.list_objects(Bucket=data_bucket, MaxKeys=3, Prefix=data_wgs_dir)\n logger.info(f\"S3 list response: {response_wgs}\")\n if not response_wgs.get('Contents') or len(response_wgs['Contents']) < 1:\n return {\n 'statusCode': 400,\n 'error': 'Bad parameter',\n 'message': f\"Provided S3 path ({data_wgs_dir}) does not exist in bucket {data_bucket}!\"\n }\n\n # create and submit a Batch job request\n container_overrides['environment'] = [\n {'name': 'S3_WTS_INPUT_DIR', 'value': data_wts_dir},\n {'name': 'S3_WGS_INPUT_DIR', 'value': data_wgs_dir},\n {'name': 'S3_DATA_BUCKET', 'value': data_bucket},\n {'name': 'S3_RESULT_BUCKET', 'value': result_bucket},\n {'name': 'S3_REFDATA_BUCKET', 'value': refdata_bucket},\n {'name': 'REF_DATASET', 'value': ref_dataset},\n {'name': 'GENOME_BUILD', 'value': genome_build}\n ]\n\n if container_mem:\n container_overrides['memory'] = int(container_mem)\n if container_vcpus:\n container_overrides['vcpus'] = int(container_vcpus)\n parameters['vcpus'] = container_vcpus\n\n logger.info(f\"jobName: {job_name}\")\n logger.info(\"containerOverrides: \")\n logger.info(container_overrides)\n logger.info(f\"jobDefinition: {job_definition}\")\n response = batch_client.submit_job(\n dependsOn=depends_on,\n containerOverrides=container_overrides,\n jobDefinition=job_definition,\n jobName=job_name,\n jobQueue=job_queue,\n parameters=parameters\n )\n\n # Log response from AWS Batch\n logger.info(f\"Response: {response}\")\n # Return the jobId\n event['jobId'] = response['jobId']\n return event\n\n except Exception as e:\n logger.error(e)\n","sub_path":"cdk/apps/rnasum/lambdas/trigger_wts_report.py","file_name":"trigger_wts_report.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"567783617","text":"import typer\nfrom icon_cli.models.Callbacks import Callbacks\nfrom icon_cli.models.Config import Config\nfrom icon_cli.utils import print_json, print_object\nfrom pathlib import Path\n\napp = typer.Typer()\n\n\n@app.command()\ndef debug():\n print_object(__name__)\n\n\n@app.command()\ndef add(\n keystore_path: Path = typer.Argument(\n ...,\n callback=Callbacks.validate_keystore_integrity,\n exists=True,\n file_okay=True,\n dir_okay=False,\n readable=True,\n writable=False,\n resolve_path=True,\n )\n):\n \"\"\"\n Add a keystore to ~/.icon-cli/keystore.\n \"\"\"\n Config.import_keystore(keystore_path)\n\n\n@app.command()\ndef inspect(\n keystore_name=typer.Argument(\n Config.get_default_keystore(), callback=Callbacks.validate_keystore_name\n ),\n all: bool = typer.Option(False, \"--all\", \"-a\"),\n format: str = typer.Option(None, \"--format\", \"-f\", callback=Callbacks.validate_output_format),\n):\n \"\"\"\n Returns information about imported keystores.\n \"\"\"\n\n if all is True:\n imported_keystores = Config.get_imported_keystores()\n if format == \"json\":\n print_json(imported_keystores)\n else:\n for keystore in imported_keystores:\n print(\n f\"Keystore Name: {keystore['keystore_name']}\\n\"\n f\"Keystore Address: {keystore['keystore_address']}\\n\"\n f\"Keystore Filename: {keystore['keystore_filename']}\"\n )\n else:\n keystore_metadata = Config.get_keystore_metadata(keystore_name)\n if format == \"json\":\n print_json(keystore_metadata)\n else:\n print(\n f\"Keystore Name: {keystore_metadata['keystore_name']}\\n\"\n f\"Keystore Address: {keystore_metadata['keystore_address']}\\n\"\n f\"Keystore Filename: {keystore_metadata['keystore_filename']}\"\n )\n","sub_path":"icon_cli/commands/subcommands/config/keystore.py","file_name":"keystore.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11109947","text":"from bs4 import BeautifulSoup\nimport urllib.request\nfrom datetime import datetime, timedelta\nfrom dotenv import load_dotenv\nimport json\nimport requests\nimport pyperclip\nimport time\n\nimport os\nimport sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')\n\nfrom selenium import webdriver\n\n# Explicitly wait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\n# chromedriver_dir=r'C:\\Users\\multicampus\\Downloads\\chromedriver\\chromedriver.exe'\n# driver = webdriver.Chrome(chromedriver_dir)\n\nwith open('02_movie_list_save.json', 'r', encoding='UTF-8') as fr:\n movie_list = json.load(fr)\n\nwith open('03_genre.json', 'r', encoding='UTF-8') as fr:\n genre_dict = json.load(fr)\n\ndef getCodeFromURL(movie_url):\n equal_idx = movie_url.index('=')\n movie_code = movie_url[equal_idx+1:]\n return movie_code\n\ndef deleteBTag(movie_name):\n ck = False\n res = movie_name\n for idx in range(len(movie_name)-3):\n if movie_name[idx:idx+3] == '':\n btag_start = movie_name.index('')\n btag_end = movie_name.index('')\n res = movie_name[btag_start+3:btag_end]\n break\n return res\n\ndef findGenreCode(movie_genre):\n pk_list = []\n for genre_name in movie_genre:\n for genre in genre_dict:\n if genre['fields']['name'] == genre_name:\n pk_list.append(genre['pk'])\n return pk_list\n\ndef getInfoDict(atag_list, movie_info):\n atag_dict = {}\n atag_dict['watch_grade'] = ''\n atag_dict['open_date'] = movie_info['pubDate']\n atag_dict['genres'] = []\n for idx in range(len(atag_list)):\n info_atag = atag_list[idx]\n info_href = info_atag.get('href')\n if info_href != '#':\n question_idx = info_href.index('?')\n equal_idx = info_href.index('=')\n if question_idx and equal_idx:\n query = info_href[question_idx+1: equal_idx]\n if query == 'grade':\n if not atag_dict['watch_grade']:\n atag_dict['watch_grade'] = info_atag.text\n elif query == 'open':\n open_date = info_href[equal_idx+1:]\n if len(open_date) > 4:\n open_date = open_date[:4] + '-' + open_date[4:6] + '-' + open_date[6:]\n atag_dict['open_date'] = open_date\n elif query == 'genre':\n genre_pk = info_href[equal_idx+1:]\n atag_dict['genres'].append(int(genre_pk))\n if not atag_dict['watch_grade']:\n atag_dict['watch_grade'] = '정보없음'\n return atag_dict\n\ndef getPeopleInfo(people_area):\n directors = []\n actors = []\n people_json = {}\n if people_area:\n people_list = people_area.find_all('li')\n for people in people_list:\n dt_tag = people.find('dt')\n dt_class = dt_tag.get('class')\n a_tag = people.find('a', {'class': 'tx_people'})\n if a_tag:\n a_href = a_tag.get('href')\n people_code = int(getCodeFromURL(a_href))\n if dt_class[0] == 'staff_dir':\n directors.append(people_code)\n elif dt_class[0] == 'staff_main':\n actors.append(people_code)\n \n people_json[people_code] = a_tag.get('title')\n\n people_dict = {}\n people_dict['directors'] = directors\n people_dict['actors'] = actors\n\n return people_dict, people_json\n\ndef getRunTime(tg_area):\n span_tags = tg_area.find_all('span')\n for span_tag in span_tags:\n span_text = span_tag.text\n if '분' in span_text:\n return span_text\n\ndef copyInput(self, xpath, input_text):\n pyperclip.copy(input_text)\n driver.find_element_by_xpath(xpath).click()\n ActionChains(driver).key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()\n time.sleep(1)\n\ndef getSummary(desc):\n cutpoint = 0\n new_desc = desc\n for idx in range(len(desc)-3):\n if desc[idx:idx+3] == '줄거리':\n cutpoint = idx\n new_desc = new_desc[cutpoint+5:]\n break\n for idx in range(len(new_desc)):\n if new_desc[idx] == '\\r':\n new_desc = new_desc[:idx] + '\\n' + new_desc[idx+1:]\n \n double_point = []\n for i in range(len(new_desc)-1):\n if new_desc[i] == '\\n' and new_desc[i+1] == '\\n':\n double_point.append(i+1)\n \n res = ''\n s_point = 0\n for idx in double_point:\n res += new_desc[s_point:idx]\n s_point = idx+1\n if s_point < len(new_desc):\n res += new_desc[s_point:]\n \n for idx in range(len(res)-4):\n if res[idx:idx+4] == '제작노트':\n res = res[:idx-1]\n break\n return res\n\n\ndef getTrailer(title, s_opt):\n res = ''\n YOUTUBE_KEY = os.getenv('YOUTUBE_KEY6')\n REQUEST_URL = 'https://www.googleapis.com/youtube/v3/search?'\n YOUTUBE_SEARCH = 'https://www.youtube.com/results?'\n options = {\n 'key': YOUTUBE_KEY,\n 'part': 'id',\n 'q': title + ' ' + s_opt,\n 'maxResults': 1,\n 'type': 'video',\n 'videoDuration': 'short'\n }\n search_option = {\n 'search_query': title + ' ' + s_opt,\n }\n # TITLE_TO_URL = urllib.parse.urlencode(search_option)\n # SEARCH_URL = YOUTUBE_SEARCH + TITLE_TO_URL\n # y_html = urllib.request.urlopen(SEARCH_URL)\n # y_soup = BeautifulSoup(y_html, 'lxml')\n # atags = y_soup.find_all('a')\n # if atags:\n # for atag in atags:\n # href_url = atag.get('href')\n # break\n url_option = urllib.parse.urlencode(options)\n SEARCH_URL = REQUEST_URL+url_option\n SEARCH_RESULT = json.loads(urllib.request.urlopen(SEARCH_URL).read())\n ITEM_LIST = SEARCH_RESULT['items']\n if ITEM_LIST:\n YOUTUBE_VIDEO_URL = 'https://www.youtube.com/embed/'\n for ITEM in ITEM_LIST:\n if ITEM['id'].get('videoId'):\n youtube_code = ITEM['id']['videoId']\n break\n res = YOUTUBE_VIDEO_URL + youtube_code\n return res\n\nchromedriver_dir=r'C:\\Users\\multicampus\\Downloads\\chromedriver\\chromedriver.exe'\ndriver = webdriver.Chrome(chromedriver_dir)\n\nload_dotenv(verbose=True)\nNAVER_CLIENT_ID = os.getenv('NAVER_CLIENT_ID')\nNAVER_CLIENT_SECRET = os.getenv('NAVER_CLIENT_SECRET')\nNAVER_REQUEST_URL = 'https://openapi.naver.com/v1/search/movie.json?'\nheader={\n \"X-Naver-Client-Id\":NAVER_CLIENT_ID,\n \"X-Naver-Client-secret\":NAVER_CLIENT_SECRET,\n}\n\nNAVER_MOVIE = 'https://movie.naver.com'\nNAVER_IMAGE_URL = 'https://movie.naver.com/movie/bi/mi/photoViewPopup.nhn?movieCode='\nNAVER_ID = os.getenv('NAVER_ID')\nNAVER_PW = os.getenv('NAVER_PW')\nmovie_cnt = 0\n\nfor detail_url, title in movie_list.items():\n movie_pk = getCodeFromURL(detail_url)\n check = True\n\n with open('04_complete_save.json', 'r', encoding='UTF-8') as fr:\n complete_movie = json.load(fr)\n if complete_movie.get(movie_pk):\n continue\n\n with open('04_notfound_save.json', 'r', encoding='UTF-8') as fr:\n not_found = json.load(fr)\n if not_found.get(movie_pk):\n continue\n\n with open('04_movies_save.json', 'r', encoding='UTF-8') as fr:\n movies = json.load(fr)\n with open('04_peoples_save.json', 'r', encoding='UTF-8') as fr:\n peoples = json.load(fr)\n\n url = NAVER_MOVIE + detail_url\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, 'lxml')\n\n driver.get(url)\n sel_source = driver.page_source \n sel_soup = BeautifulSoup(sel_source, 'html.parser')\n needlogin = sel_soup.find('form', {'id': 'frmNIDLogin'})\n\n if needlogin:\n IDxPath='//*[@id=\"id\"]'\n PasswordxPath='//*[@id=\"pw\"]'\n\n ID=NAVER_ID\n Password=NAVER_PW\n\n copyInput(driver, IDxPath, ID)\n copyInput(driver,PasswordxPath,Password)\n driver.find_element_by_xpath('//*[@value=\"로그인\"]').click()\n time.sleep(1)\n source = driver.page_source \n soup = BeautifulSoup(source, 'html.parser')\n \n req = requests.get(NAVER_REQUEST_URL+\"query=\"+title+\"&display=100\", headers = header).json()\n req_items = req['items']\n for item in req_items:\n if item['link'] == url:\n check = False\n tg_movie = item\n info_area = soup.find('dl', {'class': 'info_spec'})\n info_atag = info_area.find_all('a')\n genres = []\n watch_grade = ''\n info_dict = getInfoDict(info_atag, tg_movie)\n running_time = getRunTime(info_area)\n people_area = soup.find('div', {'class': 'people'})\n people_dict, new_people = getPeopleInfo(people_area)\n\n description = soup.find('div', {'class': 'story_area'})\n if description:\n summary = getSummary(description.text)\n else:\n summary = ''\n\n new_movie_info = {}\n new_movie_info['model'] = \"movies.movie\"\n new_movie_info['pk'] = int(movie_pk)\n\n new_fields = {\n 'name': '',\n 'name_eng': '',\n 'watch_grade': '',\n 'running_time': '',\n 'summary': '',\n 'open_date': '',\n 'trailer': '',\n 'poster': '',\n 'directors': [],\n 'genres': [],\n 'actors': []\n }\n\n\n new_fields['name'] = title\n new_fields['name_eng'] = tg_movie['subtitle']\n new_fields['summary'] = summary\n # if tg_movie['subtitle']:\n # new_fields['trailer'] = getTrailer(tg_movie['subtitle'], 'trailer')\n # else:\n # new_fields['trailer'] = getTrailer(title, '예고편')\n new_fields['trailer'] = ''\n image_url = NAVER_IMAGE_URL+movie_pk\n image_html = urllib.request.urlopen(image_url)\n image_soup = BeautifulSoup(image_html, 'lxml')\n image_tag = image_soup.find('img', {'id': 'targetImage'})\n \n if image_tag:\n image_src = image_tag.get('src')\n new_fields['poster'] = image_src\n elif tg_movie['image']:\n new_fields['poster'] = tg_movie['image']\n else:\n new_fields['poster'] = ''\n\n if running_time:\n new_fields['running_time'] = running_time[:-1]\n else:\n new_fields['running_time'] = '정보없음'\n\n for k, v in info_dict.items():\n new_fields[k] = v\n for k, v in people_dict.items():\n new_fields[k] = v\n for k, v in new_people.items():\n peoples[k] = v\n new_movie_info['fields'] = new_fields\n movies.append(new_movie_info)\n complete_movie[movie_pk] = title\n movie_cnt += 1\n break\n\n if check:\n not_found[movie_pk] = title\n with open('04_notfound_save.json', 'w', encoding='UTF-8') as fp:\n json.dump(not_found, fp, ensure_ascii=False, indent=4)\n\n with open('04_movies_save.json', 'w', encoding='UTF-8') as fp:\n json.dump(movies, fp, ensure_ascii=False, indent=4)\n\n with open('04_peoples_save.json', 'w', encoding='UTF-8') as fp:\n json.dump(peoples, fp, ensure_ascii=False, indent=4)\n \n with open('04_complete_save.json', 'w', encoding='UTF-8') as fp:\n json.dump(complete_movie, fp, ensure_ascii=False, indent=4)\n\n \ndriver.quit()\nwith open('04_movies.json', 'w', encoding='UTF-8') as fp:\n json.dump(movies, fp, ensure_ascii=False, indent=4)\n\nwith open('04_peoples.json', 'w', encoding='UTF-8') as fp:\n json.dump(peoples, fp, ensure_ascii=False, indent=4)\n\nprint('******************끝***************************')","sub_path":"wouldyouci_database/crawling/04_movies.py","file_name":"04_movies.py","file_ext":"py","file_size_in_byte":12130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"456558909","text":"from setup_files.mission_chutes import parachutes\nimport numpy as np\n\n\nclass MissionSetup(object):\n def __init__(self, name, n_phases, chutes, masses, break_alt, time_step, state, ke_limit=75, time_limit=90):\n assert isinstance(name, str), \"\\\"name\\\" must be a string\"\n assert isinstance(chutes, list), \"\\\"chutes\\\" must be a list of parachute names\"\n assert isinstance(masses, list), \"\\\"masses\\\" must be a list of masses per phase\"\n assert len(chutes) == n_phases, \"\\\"chutes\\\" must have the same number of parachutes as the number of phases\"\n assert len(masses) == n_phases, \"\\\"masses\\\" must have the same number of masses as the number of phases\"\n assert len(time_step) == n_phases, \"\\\"time_step\\\" must have the same number of masses as the number of phases\"\n assert isinstance(state, np.ndarray), \"\\\"state\\\" must be instantiate as an ndarray\"\n assert len(state.flatten()) == 2, \"\\\"state\\\" must be a (2, ) ndarray\"\n assert isinstance(ke_limit, (float, int)), \"\\\"ke_limit\\\" must be a float or int\"\n assert isinstance(time_limit, (float, int)), \"\\\"time_limit\\\" must be a float or int\"\n self.title = name\n self.max_ke = ke_limit\n self.max_time = time_limit\n self.chutes = []\n self.n = n_phases\n for chute in chutes:\n try:\n self.chutes.append(parachutes[chute])\n except IndexError:\n pass\n self.initial_state = state.flatten()\n self.masses = masses\n self.bc = break_alt\n self.dt = time_step\n","sub_path":"simulator/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357736290","text":"# coding: utf-8\n\n# some imports\nimport os\nimport numpy as np\nnp.random.seed(seed=1)\nfrom glob import glob\nfrom matplotlib import pyplot as plt\nfrom natsort import natsorted\nimport keras\nfrom keras.optimizers import Adam\n\n# import custom functions and viewing tools\nfrom VisTools import multi_slice_viewer0, mask_viewer0\nfrom KerasModel import BlockModel, dice_coef_loss, CalcVolumes\n\n#~# some parameters to set for inference #~#\n# model version to use\nmodel_version = 3\nmodel_weights_path = os.path.join(os.getcwd(),'BestModelWeights_v{:02d}.h5'.format(model_version))\n# set data directory\ndata_dir = os.path.join('/home','bashirmllab','output')\n\n# Find data in selected directory\nall_inputs = natsorted(glob(os.path.join(data_dir,\"input*.npy\")))\nall_targets = natsorted(glob(os.path.join(data_dir,\"target*.npy\")))\nprint('{} subjects found'.format(len(all_inputs)))\nprint('Loading data...')\ninputs = np.concatenate([np.load(f) for f in all_inputs])[...,np.newaxis]\ntargets = np.concatenate([np.load(f) for f in all_targets])[...,np.newaxis]\nprint(\"Data loaded\")\n\n# prepare model\nmodel = BlockModel(inputs.shape,filt_num=16,numBlocks=4)\nmodel.compile(optimizer=Adam(), loss=dice_coef_loss)\n\n# load selected model weights\nmodel.load_weights(model_weights_path)\n# evaluate on test data\nprint('Evaluating data...')\nscore = model.evaluate(inputs,targets,verbose=1)\nprint(\"Test Dice score is {:.03f}\".format(1-score))\n\n# display some results\nprint('Generating masks...')\noutput = model.predict(inputs,batch_size=16)\nmask_viewer0(inputs[...,0],targets[...,0],output[...,0],name='Test Results')\nplt.show()\n\n# Set voxel dimensions (in cm)\nsx = .7\nsy = .1\nsz = .2\nvox_vol = sx*sy*sz\n\n# Calculate actual and predicted volume of all subjects\nresults = []\nfor inp_file,targ_file in zip(all_inputs,all_targets):\n # run function to perform calculations\n calc,truth = CalcVolumes(inp_file,targ_file,vox_vol,model)\n results.append((calc,truth))\n # display result\n print('---------------------------------------')\n print(inp_file[-30:])\n print(\"The calculated volume is {:.02f} cm^3\".format(calc))\n print(\"The actual volume is {:.02f} cm^3\".format(truth))\n\n\n# Plot correlation of predicted/actual volumes\nx = [r[1] for r in results]\ny = [r[0] for r in results]\nplt.figure()\nplt.plot(x,y,'ro')\nplt.plot(np.arange(0,np.max(x)),np.arange(0,np.max(x)),'k--')\nplt.xlim([0,np.max(x)])\nplt.ylim([0,np.max(y)])\nplt.ylabel('Calculated Volume (cc)')\nplt.xlabel('Actual Volume (cc)')\nplt.title(\"Correlation of Actual and Predicted Liver Volumes of Test Subjects\")\nplt.show()\ncc = np.corrcoef(x,y)[0,1]\nprint('Correlation coefficient is {:.03f}'.format(cc))\n\n","sub_path":"InferanceAndAnalysis.py","file_name":"InferanceAndAnalysis.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565944040","text":"from datetime import timedelta\nimport json\nimport logging\nimport random\nfrom django.conf import settings\nfrom whoare.whoare import WhoAre\nfrom django.utils import timezone\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import DjangoModelPermissions\nfrom rest_framework.authentication import TokenAuthentication, SessionAuthentication\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters\nfrom django.http import JsonResponse\nfrom django.db.models import F, Q\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import never_cache, cache_page, cache_control\nfrom rest_framework.decorators import action\nfrom dominios.models import Dominio, STATUS_DISPONIBLE, STATUS_NO_DISPONIBLE, PreDominio\nfrom zonas.models import Zona\nfrom cambios.models import CampoCambio\nfrom .serializer import (DominioSerializer, CambiosDominioSerializer, \n FlatDominioSerializer, FlatPreDominioSerializer,\n PreDominioSerializer)\n\nlogger = logging.getLogger(__name__)\n\nclass DominioViewSet(viewsets.ModelViewSet):\n queryset = Dominio.objects.all()\n serializer_class = DominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n filterset_fields = ['estado', 'nombre', 'expire', 'registrante__legal_uid']\n search_fields = ['nombre', 'registrante__legal_uid']\n ordering_fields = '__all__'\n ordering = ['nombre']\n\n @action(methods=['post'], detail=False)\n def update_from_whoare(self, request):\n data = request.data # require to be parsed\n logger.info(f'update_from_whoare: {data}')\n \n real_data_str = data['domain']\n logger.info(f'real data: {real_data_str}')\n \n # final_data = ast.literal_eval(real_data_str)\n final_data = json.loads(real_data_str)\n \n if final_data.get('whoare_version', None) is None:\n return JsonResponse({'ok': False, 'error': 'Missing WhoAre version'}, status=400)\n \n if final_data['whoare_version'] < '0.1.40':\n return JsonResponse({'ok': False, 'error': 'Unexpected WhoAre version'}, status=400)\n \n wa = WhoAre()\n wa.from_dict(final_data)\n \n # if exists at pre-domains, remove it: we are done with this domain\n pres = PreDominio.objects.filter(dominio=wa.domain.full_name())\n if pres.count() > 0:\n pre = pres[0]\n pre.delete()\n\n # skipp not-real domains when it come from pre-domains\n if wa.domain.is_free:\n \n # Si ya existe entonces es un update, si no entonces no lo necesitamos\n dominio = Dominio.get_from_full_domain(wa.domain.full_name())\n if dominio is None:\n return JsonResponse({'ok': False, 'error': 'We expect a REGISTERED domain'}, status=400)\n\n \n zona, _ = Zona.objects.get_or_create(nombre=wa.domain.zone)\n dominio, dominio_created = Dominio.objects.get_or_create(\n nombre=wa.domain.base_name,\n zona=zona\n )\n \n cambios = dominio.update_from_wa_object(wa, just_created=dominio_created)\n res = {\n 'ok': True,\n 'created': dominio_created,\n 'cambios': cambios\n }\n return JsonResponse(res)\n\n\n@method_decorator(cache_page(settings.GENERAL_CACHE_SECONDS), name='dispatch')\nclass PreDominioViewSet(viewsets.ModelViewSet):\n queryset = PreDominio.objects.all()\n serializer_class = PreDominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n ordering = ['dominio']\n\n\n@method_decorator(never_cache, name='dispatch')\nclass NextPriorityDomainViewSet(viewsets.ModelViewSet):\n \n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n\n def get_queryset(self):\n # definir si mando uno de los posibles nuevos o de la base comun\n nuevos = PreDominio.objects.filter(priority__gt=0)\n pick = random.randint(1, 100)\n if pick > 70 or nuevos.count() == 0:\n res = self.get_from_domain()\n return res\n else:\n res = self.get_from_predomain()\n return res\n \n def get_from_domain(self):\n prioritarios = Dominio.objects.all().order_by('-priority_to_update')[:100]\n random_item = random.choice(prioritarios)\n \n # remove priority\n random_item.priority_to_update = 0\n random_item.next_update_priority = timezone.now() + timedelta(days=5)\n random_item.save()\n self.serializer_class = FlatDominioSerializer\n res = Dominio.objects.filter(pk=random_item.id)\n return res\n \n def get_from_predomain(self):\n \n nuevos = PreDominio.objects.filter(priority__gt=0)\n nuevos = nuevos.order_by('-priority', 'dominio')[:100]\n random_item = random.choice(nuevos)\n if random_item.priority == 0:\n # se acabaron\n return self.get_from_domain()\n \n random_item.priority = 0\n random_item.save()\n \n # si ya existe en dominios, omitir\n wa = WhoAre()\n domain_name, zone = wa.detect_zone(random_item.dominio)\n zona = Zona.objects.get(nombre=zone)\n\n dominios = Dominio.objects.filter(nombre=domain_name, zona=zona)\n if dominios.count() > 0:\n return self.get_from_domain()\n\n self.serializer_class = FlatPreDominioSerializer\n res = PreDominio.objects.filter(pk=random_item.id)\n return res\n\n\n@method_decorator(cache_page(settings.GENERAL_CACHE_SECONDS), name='dispatch')\nclass UltimosCaidosViewSet(viewsets.ModelViewSet):\n \"\"\" ultimo dominios que pasaron a estar disponibles \"\"\"\n\n def get_queryset(self):\n campo_caidos = CampoCambio.objects.filter(\n campo='estado',\n anterior=STATUS_NO_DISPONIBLE,\n nuevo=STATUS_DISPONIBLE)\\\n .order_by('-cambio__momento')[:100]\n ids = [cc.cambio.dominio.id for cc in campo_caidos]\n queryset = Dominio.objects.filter(id__in=ids)\n return queryset\n\n serializer_class = CambiosDominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n filterset_fields = ['estado', 'nombre', 'expire']\n search_fields = ['nombre']\n ordering_fields = '__all__'\n ordering = ['nombre']\n\n\n@method_decorator(cache_page(settings.GENERAL_CACHE_SECONDS), name='dispatch')\nclass UltimosRenovadosViewSet(viewsets.ModelViewSet):\n \"\"\" ultimo dominios que se renovaron \"\"\"\n\n def get_queryset(self):\n campos = CampoCambio.objects.filter(\n campo='dominio_expire',\n nuevo__gt=F('anterior'))\\\n .order_by('-cambio__momento')[:100]\n ids = [cc.cambio.dominio.id for cc in campos]\n queryset = Dominio.objects.filter(id__in=ids)\n return queryset\n\n serializer_class = CambiosDominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n filterset_fields = ['estado', 'nombre', 'expire']\n search_fields = ['nombre']\n ordering_fields = '__all__'\n ordering = ['nombre']\n\n\n@method_decorator(cache_page(settings.GENERAL_CACHE_SECONDS), name='dispatch')\nclass UltimosTranspasadosViewSet(viewsets.ModelViewSet):\n \"\"\" ultimo dominios que pasaron a nuevos dueños \"\"\"\n\n def get_queryset(self):\n campos = CampoCambio.objects.filter(\n campo='registrant_legal_uid',\n nuevo__isnull=False,\n anterior__isnull=False)\\\n .exclude(\n Q(nuevo__exact='') | Q(anterior__exact=''))\\\n .order_by('-cambio__momento')[:100]\n\n ids = [cc.cambio.dominio.id for cc in campos]\n queryset = Dominio.objects.filter(id__in=ids)\n return queryset\n\n serializer_class = CambiosDominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n filterset_fields = ['estado', 'nombre', 'expire']\n search_fields = ['nombre']\n ordering_fields = ['nombre', 'expire']\n ordering = ['nombre']\n\n\n@method_decorator(cache_page(settings.GENERAL_CACHE_SECONDS), name='dispatch')\nclass UltimosCambioDNSViewSet(viewsets.ModelViewSet):\n \"\"\" ultimo dominios que pasaron a nuevos dueños \"\"\"\n\n def get_queryset(self):\n campos = CampoCambio.objects.filter(\n campo='DNS1',\n nuevo__isnull=False,\n anterior__isnull=False)\\\n .exclude(\n Q(nuevo__exact='') | Q(anterior__exact=''))\\\n .order_by('-cambio__momento')[:100]\n\n ids = [cc.cambio.dominio.id for cc in campos]\n queryset = Dominio.objects.filter(id__in=ids)\n return queryset\n\n serializer_class = CambiosDominioSerializer\n permission_classes = [DjangoModelPermissions]\n authentication_classes = [TokenAuthentication, SessionAuthentication]\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter]\n filterset_fields = ['estado', 'nombre', 'expire']\n search_fields = ['nombre']\n ordering_fields = '__all__'\n ordering = ['nombre']","sub_path":"djnic/dominios/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151433744","text":"from datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext as _\nfrom django.db import models\n\nfrom store.inventory.models import Item\n\n\n# Create your models here.\n\n\nclass Transaction( models.Model ):\n\tuser = models.ForeignKey(User, verbose_name=\"Customer\")\n\ttotal_price = models.DecimalField( verbose_name=\"Total Price\", max_digits=8, decimal_places=2)\n\tdatetime = models.DateTimeField( verbose_name=\"Date-Time\", default=datetime.now, blank = True)\n\t\n\tclass Meta:\n\t\tverbose_name = \"Transaction\"\n\t\tverbose_name_plural = \"Transactions\"\n\t\t\n\tdef __unicode__(self):\n\t\treturn _(u'%s') % (self.datetime)\n\n\nclass TransactionItem( models.Model ):\n\titem = models.ForeignKey(Item, verbose_name=\"Item\")\n\ttransaction = models.ForeignKey(Transaction, verbose_name=\"Transaction\")\n\tquantity = models.PositiveIntegerField( verbose_name=\"Item Quantity\", max_length = 10, default=1)\n\t\n\tclass Meta:\n\t\tverbose_name = \"Transaction Item\"\n\t\tverbose_name_plural = \"Transaction Items\"\n\t\t\n\tdef __unicode__(self):\n\t\treturn _(u'%s') % (self.transaction)","sub_path":"store/transactions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506065134","text":"#! python2.7\nimport pyperclip\n\n'''\nProgram to add bullet point at the start of words\nto create a bulleted list\n'''\n\n\nclass bulletpoint:\n def __init__(self):\n self.text = str(pyperclip.paste())\n\n def addBullets(self):\n self.text = self.text.splitlines()\n for i in range(len(self.text)):\n self.text[i] = '*' + self.text[i]\n self.text = '\\n'.join(self.text)\n return self.text\n\n def copy(self, valuein):\n return pyperclip.copy(valuein)\n\nif __name__ == '__main__':\n bulletPoint = bulletpoint()\n a = bulletPoint.addBullets()\n bulletPoint.copy(a)\n","sub_path":"BulletPointAdder.py","file_name":"BulletPointAdder.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199437752","text":"from django import forms\nfrom django.core import validators\nfrom django.utils.datastructures import SortedDict\nfrom common import *\nfrom tiote.utils import *\n\n\nclass pgDbForm(forms.BaseForm):\n \n def __init__(self, templates=None, users=None, charsets=None, **kwargs):\n f = SortedDict()\n \n f['name'] = forms.CharField(widget=forms.TextInput(attrs={'class':'required'}))\n f['encoding'] = forms.ChoiceField(\n choices = fns.make_choices(pgsql_encoding),\n initial = 'UTF8',\n )\n f['template'] = forms.ChoiceField(\n choices = fns.make_choices(templates),\n required = False,\n )\n f['owner'] = forms.ChoiceField( choices = fns.make_choices(users) ,\n required = False, )\n \n self.base_fields = f\n forms.BaseForm.__init__(self, **kwargs)\n\n\nclass pgUserForm(forms.BaseForm):\n \n def __init__(self, groups=None, dbs=None, **kwargs):\n f = SortedDict()\n f['role_name'] = forms.CharField(\n widget = forms.TextInput(attrs={'class':'required'})\n )\n f['can_login'] = forms.CharField(\n widget = forms.CheckboxInput\n )\n f['password'] = forms.CharField(\n widget = forms.PasswordInput,\n required = False\n )\n f['valid_until'] = forms.DateTimeField(\n widget = forms.TextInput(attrs={}),\n required = False)\n f['connection_limit'] = forms.IntegerField(\n widget=forms.TextInput(attrs={'class':'validate-integer'}),\n required = False)\n# f['comment'] = forms.CharField(\n# widget = forms.Textarea(attrs={'cols':'', 'rows':''}),\n# required = False)\n f['role_privileges'] = forms.MultipleChoiceField(\n required = False, widget = forms.CheckboxSelectMultiple,\n choices = fns.make_choices(pgsql_privileges_choices, True) \n )\n if groups:\n f['group_membership'] = forms.MultipleChoiceField(\n choices = fns.make_choices(groups, True), required = False,\n widget = forms.CheckboxSelectMultiple,)\n \n self.base_fields = f\n forms.BaseForm.__init__(self, **kwargs)\n\n\n\nclass pgSequenceForm(forms.Form):\n \n name = forms.CharField(\n widget=forms.TextInput(attrs={'class':'required'})\n )\n incremented_by = forms.IntegerField(\n required=False, \n widget = forms.TextInput(attrs={'class':'validate-integer'})\n )\n min_value = forms.IntegerField(\n required=False, \n widget = forms.TextInput(attrs={'class':'validate-integer'})\n )\n max_value = forms.IntegerField(\n required=False, \n widget = forms.TextInput(attrs={'class':'validate-integer'})\n )\n start_value = forms.IntegerField(\n required = False, \n widget = forms.TextInput(attrs={'class':'validate-integer'})\n )\n cache_value = forms.IntegerField(\n required =False, \n widget = forms.TextInput(attrs={'class':'validate-integer'})\n )\n can_cycle = forms.ChoiceField(\n label = 'Can cycle?', required = False,\n widget = forms.CheckboxInput()\n )\n\n\n\nclass pgTableEditForm(forms.BaseForm):\n\n def __init__(self, tbl_name=None, tbl_schema=None, \n # tbl_owner = None,\n users=[], schemas=[], tbl_comment='', tablespace=[], **kwargs):\n\n if tbl_name == None or tbl_schema == None:\n raise TypeError('tbl_name or tbl_schema is required')\n\n f = SortedDict()\n f['name'] = forms.CharField(\n max_length= 64,\n widget= forms.TextInput(attrs={'class':'required'}),\n initial = tbl_name,\n )\n\n # roles and users is not yet implemented in tiote\n # f['owner'] = forms.ChoiceField(\n # choices = fns.make_choices(users, begin_empty=True),\n # initial = tbl_owner\n # )\n\n f['schema'] = forms.ChoiceField(\n choices = fns.make_choices(schemas, begin_empty=True),\n initial = tbl_schema\n )\n\n # comment is not yet in use in tiote\n # f['comment'] = forms.CharField(required=False, \n # widget = forms.Textarea(attrs={'cols':0, 'rows':0})\n # )\n\n self.base_fields = f\n super(pgTableEditForm, self).__init__(**kwargs)\n\n\nclass TableVacuumForm(forms.Form):\n\n full = forms.BooleanField(required=False)\n analyze = forms.BooleanField(required=False)\n freeze = forms.BooleanField(required=False)\n\n\n","sub_path":"tiote/forms/pgforms.py","file_name":"pgforms.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37862989","text":"# Excepções\n\ntry:\n with open('ficheiro.txt', 'r') as file:\n print(file)\nexcept Exception as err:\n print('Ficheiro não encontrado')\n\n\nprint('A execução do programa continua')\n\n\nfim = False\nwhile not fim:\n try:\n a = input (\"Digite um numero: \")\n b = input (\"Digite outro numero: \")\n print(a/b)\n fim = True\n except ZeroDivisionError:\n print(\"Você tentou dividir por zero. Tente novamente\")\n except TypeError:\n print('Você usou um valor não numérico. Tente novamente')","sub_path":"python_do_zero/excepcoes_erros.py","file_name":"excepcoes_erros.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529402472","text":"# Import required API: ________________________________________________________________________________\nimport numpy as np\nimport time\nimport os\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport datetime\n\nfrom torch import nn\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom model.bigtiff import BigtiffDataset\nfrom model.transforms import *\nfrom model.utils import *\nfrom model.dataset import partition\nfrom model.frcnn import FRCNN\nfrom torch.utils import tensorboard\nfrom config import Config\nfrom collections import namedtuple\nfrom utils import batchplot\n\n#-------------------------------------------------------------------------------\n# Class definition macro for modules with no back-propagation gradient \n# calculation.\n#-------------------------------------------------------------------------------\ndef nograd(fcn):\n def no_grad_fcn(*args, **kwargs):\n with torch.no_grad():\n return fcn(*args, **kwargs)\n return no_grad_fcn\n\nLoss = namedtuple('Loss', [\n 'rpn_box_loss' ,\n 'rpn_cls_loss' ,\n 'rcnn_box_loss',\n 'rcnn_cls_loss',\n 'total_loss' \n])\n\n#-------------------------------------------------------------------------------\n# AnchorTargetGenerator - Generates anchor targets wrt ground truth boxes\n#-------------------------------------------------------------------------------\nclass AnchorTargetGenerator(object):\n \n num_samples = 256\n pos_iou_thr = 0.7\n neg_iou_thr = 0.3\n ratio = 0.5\n \n @staticmethod\n def __call__(anchors, gt_bbox, inside):\n \"\"\" Assign ground truth bounding boxes to anchors for training the \n Region Proposal Network in Faster R-CNN.\n \"\"\"\n \n B = gt_bbox.size(0)\n if anchors.dim() == 2:\n anchors = anchors.expand(B,anchors.size(0),4).contiguous()\n \n # Compute ground truth boxes overlap with anchors and label them:\n iou = IoU(gt_bbox, anchors)\n gt_anchor_label, argmax_bbox = classify(\n iou, \n AnchorTargetGenerator.pos_iou_thr,\n AnchorTargetGenerator.neg_iou_thr\n )\n \n # Ignore out of bound anchors:\n gt_anchor_label[:, inside] = -1;\n \n # Randomly sample a fixed subset of the anchors:\n gt_anchor_label = randsample(\n gt_anchor_label,\n AnchorTargetGenerator.num_samples,\n AnchorTargetGenerator.ratio\n )\n \n # Calculate anchor targets:\n # NOTE: Select only the ground truth objects which have the maximum\n # overlap with each anchor to compute the target coefficients.\n boxes = gt_bbox.new(anchors.size()).zero_()\n for b in range(B):\n boxes[b] = gt_bbox[b, argmax_bbox[b], :]\n \n # Map computed targets to only contain inside boxes:\n targets = box2targets(anchors, boxes)\n gt_anchor_targets = gt_bbox.new(anchors.size()).zero_()\n gt_anchor_targets[:, inside, :] = targets[:, inside, :]\n \n return gt_anchor_targets, gt_anchor_label\n#_______________________________________________________________________________\n\n#-------------------------------------------------------------------------------\n# ProposalTargetGenerator - Generates proposal targets wrt ground truth boxes\n#-------------------------------------------------------------------------------\nclass ProposalTargetGenerator(object):\n \n num_samples = 128\n pos_iou_thr = 0.5\n neg_iou_thr = 0.5\n ratio = 0.25\n \n @staticmethod\n def __call__(proposals, gt_bbox, gt_label):\n \"\"\" Assign ground truth targets to sampled proposals.\n \"\"\"\n \n B, N, _ = gt_bbox.size()\n S = ProposalTargetGenerator.num_samples\n \n # Concatenate proposals and ground truth boxes:\n proposals = torch.cat((proposals, gt_bbox), dim=1)\n iou = IoU(gt_bbox, proposals)\n proposal_label, argmax_bbox = classify(\n iou, \n ProposalTargetGenerator.pos_iou_thr,\n ProposalTargetGenerator.neg_iou_thr\n )\n \n # Randomly sample a fixed subset of the anchors:\n proposal_label = randsample(\n proposal_label,\n ProposalTargetGenerator.num_samples,\n ProposalTargetGenerator.ratio\n )\n \n # Select sampled proposals:\n keep = proposal_label >= 0\n sample_rois = proposals.new(torch.Size((B, S, 4)))\n gt_roi_label = gt_label.new(torch.Size((B, S)))\n gt_assigned = gt_bbox.new(torch.Size((B, S, 4)))\n \n # Get sampled rois and labels:\n for b in range(B):\n # NOTE: Offset the labels to account for 0 labeled background.\n gt_ind = argmax_bbox[b]\n labels = gt_label[b, gt_ind[keep[b]]]\n sample_rois[b] = proposals[b, keep[b], :]\n gt_roi_label[b] = labels\n gt_assigned[b] = gt_bbox[b, gt_ind[keep[b]]]\n\n # Compute sampled ROI targets wrt ground truth:\n gt_roi_targets = box2targets(sample_rois, gt_assigned)\n \n # Offset ROI lagels from [0 L-1] to [1 L] to allow for 0 to be \n # background class label.\n gt_roi_label = gt_roi_label + 1\n \n # NOTE: TO REMOVE\n sample_label = proposal_label[keep].view([B,S])\n \n return sample_rois, gt_roi_targets, gt_roi_label, sample_label\n#_______________________________________________________________________________\n\n#-------------------------------------------------------------------------------\n# Faster R-CNN Object Detetction Network Trainer for BigTIFF #-------------------------------------------------------------------------------\nclass FRCNNTrainer(nn.Module):\n \"\"\" Faster R-CNN Trainer for training an object detection network with a \n BigTIFF dataset.\n \"\"\"\n \n #---------------------------------------------------------------------------\n def __init__(self, dataset, logdir):\n \"\"\" Initialize the trainer with a training dataset.\n \"\"\"\n assert isinstance(dataset, BigtiffDataset)\n \n super(FRCNNTrainer, self).__init__()\n \n self.dataset = dataset\n self.logdir = logdir\n self.num_classes = len(dataset.get_classes())\n self.network = FRCNN(dataset.get_classes(), Config.IMAGE_SIZE)\n self.optimizer = self.__getoptimizer__()\n self.network.__initialize__(0, 0.01)\n \n # Define number of proposals generated by the RPN network:\n self.network.__set_numproposals__(12000, 2000)\n \n # Create tensorboard writer to log trainings:\n self.logger = tensorboard.SummaryWriter(logdir)\n \n # Setup a learning rate scheduler:\n self.lr_scheduler = optim.lr_scheduler.StepLR(\n self.optimizer, \n step_size = Config.LR_STEPSIZE,\n gamma = Config.GAMMA\n )\n \n # Create a dataloader:\n self.loader = DataLoader(\n self.dataset,\n batch_size = Config.BATCH_SIZE,\n shuffle = True ,\n num_workers = Config.NUM_WORKERS,\n collate_fn = self.dataset.collate,\n worker_init_fn = partition\n )\n self.iterator = None\n \n # Saved network path:\n self.saved_network = None\n \n # Initialize network inputs:\n image_size = ([Config.BATCH_SIZE] + Config.IMAGE_SIZE)\n roi_size = (Config.BATCH_SIZE,Config.NUM_ROI,4)\n label_size = (Config.BATCH_SIZE,Config.NUM_ROI)\n device = torch.device(Config.DEVICE)\n \n self.batch = torch.Tensor((image_size)).to(torch.float64)\n self.boxes = torch.Tensor((roi_size)).to(torch.float64)\n self.labels = torch.Tensor((label_size)).to(torch.float64)\n \n self.batch = self.batch.to(Config.DEVICE).requires_grad_()\n self.boxes = self.boxes.to(Config.DEVICE).requires_grad_()\n self.labels = self.labels.to(Config.DEVICE).requires_grad_()\n \n #---------------------------------------------------------------------------\n @staticmethod\n def __targets_loss__(targets, gt_targets, gt_labels, sigma=3):\n \n # Compute weights:\n B, N = gt_labels.size()\n weight = torch.zeros(gt_labels.shape, dtype=torch.float32)\n weight.masked_fill_(gt_labels > 0, 1)\n weight = weight.view(B, N, 1).expand_as(gt_targets)\n \n loss = smoothL1Loss(targets, gt_targets, weight.detach(), sigma)\n \n # IMPORTANT: Normalize by total number of sampled ROIs.\n loss /= ((gt_labels >= 0).sum().float()) \n \n return loss\n \n #---------------------------------------------------------------------------\n @staticmethod\n def __class_loss__(cls_score, gt_label):\n \n # Compute batch loss:\n B, N, _ = cls_score.size()\n loss = 0\n for b in torch.arange(B):\n batch_score = cls_score[b]\n batch_label = gt_label[b].type(torch.LongTensor)\n loss += F.cross_entropy(batch_score, batch_label, ignore_index = -1)\n \n # Compute mean batch loss:\n loss = loss / B\n \n return loss\n \n #---------------------------------------------------------------------------\n def __getoptimizer__(self):\n \"\"\" Get optimizer for training\"\"\"\n lr = Config.LEARNING_RATE\n wd = Config.WEIGHT_DECAY\n params = []\n \n for key, value in dict(self.network.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]\n else:\n params += [{'params': [value], 'lr':lr, 'weight_decay': wd}]\n \n optimizer = torch.optim.Adam(params)\n return optimizer\n \n #---------------------------------------------------------------------------\n def forward(self, batch, gt_bbox, gt_label):\n \"\"\"Forward propagate the trainer and compute prediction losses\"\"\"\n \n B, C, H, W = batch.size()\n \n # Forward pass through the network, manually:\n #_______________________________________________________________________\n # Get low-level features from the convolution layers:\n features = self.network.VGG16FeatureExtractor(batch)\n \n # Get anchor proposals and targets from RPN:\n anchors, inside = self.network.RPN.__get_anchors__()\n proposals, rpn_score, rpn_targets = self.network.RPN(features)\n\n # Sample proposals and find ground truth targets:\n sample_proposals, gt_bbox_targets, gt_bbox_label, sample_label= ProposalTargetGenerator.__call__(\n proposals,\n gt_bbox,\n gt_label\n )\n \n # Get roi bounding boxes and classify using the RCNN head:\n roi_targets, roi_scores = self.network.Head(features, sample_proposals) \n \n gt_anchor_targets, gt_anchor_label = \\\n AnchorTargetGenerator.__call__(\n anchors,\n gt_bbox,\n inside\n )\n \n # Compute losses: \n #_______________________________________________________________________\n rpn_box_loss = self.__targets_loss__(\n rpn_targets,\n gt_anchor_targets,\n gt_anchor_label,\n sigma = 3\n )\n \n rpn_cls_loss = self.__class_loss__(rpn_score, gt_anchor_label)\n \n rcnn_box_loss = self.__targets_loss__(\n roi_targets,\n gt_bbox_targets,\n gt_bbox_label,\n sigma = 1\n )\n \n rcnn_cls_loss = self.__class_loss__(roi_scores, gt_bbox_label)\n \n losses = [rpn_box_loss, rpn_cls_loss, rcnn_box_loss, rcnn_cls_loss]\n losses = losses + [sum(losses)]\n \n return Loss(*losses)\n \n #---------------------------------------------------------------------------\n def dostep(self, epoch, step):\n if step == 0:\n self.iterator = iter(self.loader)\n if epoch > 0:\n # Save the network and optimizer:\n self.save()\n \n # Load batch and copy to variables:\n batch_data = next(self.iterator)\n img_data = batch_data[0]\n roi_data = batch_data[1]\n roi_locs = roi_data[:,:,:4]\n roi_label = torch.squeeze(roi_data[:,:,4])\n \n # Copy data to the variables:\n self.batch.data = img_data.data\n self.boxes.data = roi_locs.data\n self.labels.data = roi_label.data\n \n # Clear previous step gradients:\n self.optimizer.zero_grad()\n \n loss = self.forward(self.batch, self.boxes, self.labels)\n loss.total_loss.backward()\n \n self.optimizer.step()\n if epoch > 0 & step == 0 & epoch % Config.LR_STEPSIZE == 0:\n self.lr_scheduler.step()\n \n if step % Config.DISPLAY_STEP == 0:\n self.log_results(epoch, step, loss)\n \n #---------------------------------------------------------------------------\n def log_results(self, epoch, step, loss):\n # Time to print some results...\n trainsize = int(self.dataset.__len__() / Config.BATCH_SIZE)\n N = epoch * trainsize + step\n \n # Covert losses from tensors to float:\n rpn_box_loss = loss.rpn_box_loss.item()\n rpn_cls_loss = loss.rpn_cls_loss.item()\n rcnn_box_loss = loss.rcnn_box_loss.item()\n rcnn_cls_loss = loss.rcnn_cls_loss.item()\n total_loss = loss.total_loss.item()\n \n # Print progress:\n stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n info =\"[%s][TRAIN][Epoch %2d][Step %4d:%4d] Loss: %.4f, LR: %.2e\"\n lr = self.optimizer.param_groups[0][\"lr\"]\n print(info % (stamp, epoch, step, trainsize, total_loss, lr))\n \n # Log loss to tensorboard:\n self.logger.add_scalar(\"LOSS/TRAIN/Total_Loss\" , total_loss , N)\n self.logger.add_scalar(\"LOSS/TRAIN/RPN_Class_Loss\" , rpn_cls_loss , N)\n self.logger.add_scalar(\"LOSS/TRAIN/RPN_BBox_Loss\" , rpn_box_loss , N)\n self.logger.add_scalar(\"LOSS/TRAIN/RCNN_Class_Loss\", rcnn_cls_loss, N)\n self.logger.add_scalar(\"LOSS/TRAIN/RCNN_BBox_Loss\" , rcnn_box_loss, N)\n \n #---------------------------------------------------------------------------\n def save(self, path=None):\n save_dict = dict()\n save_dict['model'] = self.network.state_dict()\n save_dict['optimizer'] = self.optimizer.state_dict()\n\n if path is None:\n path = 'checkpoint/frcnn_{0}'.format(time.strftime('%m%d%H%M'))\n \n save_dir = os.path.dirname(path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n torch.save(save_dict, path)\n self.saved_network = path\n \n #---------------------------------------------------------------------------\n def load(self, path):\n state_dict = torch.load(path)\n if 'model' in state_dict:\n self.network.load_state_dict(state_dict['model'])\n \n if 'optimizer' in state_dict:\n self.optimizer.load_state_dict(state_dict['optimizer'])\n \n return self \n#_______________________________________________________________________________","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":15655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92767480","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\n\nimport numpy as np\nfrom torch.autograd import Variable\n\n# from network.model.graph_front.graphFront import _graphFront\n# from network.model.tgcn.gcn import ConvTemporalGraphical\n# from network.model.roi_align.modules.roi_align import RoIAlignAvg\nfrom mypath import Path\n\nclass SpatioConv(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size=(1,1), stride=(1,1), \\\n padding=(0,0), bias=False):\n super(SpatioConv, self).__init__()\n\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,\n stride=stride, padding=padding, bias=bias)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.bn(self.conv(x))\n x = self.relu(x)\n return x\n\nclass BaseModel(nn.Module):\n\n def __init__(self):\n super(BaseModel, self).__init__()\n self.blocks = nn.ModuleList([])\n\n self.blocks += [SpatioConv(3, 96, (3,3), (2,2))]\n self.blocks += [nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))]\n self.blocks += [SpatioConv(96, 256, (3,3), (2,2))]\n self.blocks += [nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))]\n self.blocks += [SpatioConv(256, 384, (3,3), (1,1))]\n self.blocks += [SpatioConv(384, 384, (3,3), (1,1))]\n self.blocks += [SpatioConv(384, 256, (3,3), (1,1))]\n self.blocks += [nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))]\n # self.linear1 = nn.Linear(4096,4096)\n # self.linear2 = nn.Linear(4096,4096)\n self.linear1 = nn.Linear(768, 256)\n # self.linear2 = nn.Linear(256,256)\n\n def forward(self, x):\n for block in self.blocks:\n x = block(x)\n x = x.view(x.size(0), -1)\n x = self.linear1(x)\n # x = self.linear2(x)\n return x\n\nclass VGG(nn.Module):\n\n def __init__(self, num_classes=1000):\n super(VGG, self).__init__()\n self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, \\\n 'M', 512, 512, 512, 512, 'M']\n self.features = self.make_layers()\n self.fc6 = nn.Linear(12288, 4096)\n self.fc7 = nn.Linear(4096, 4096)\n # self.classifier = nn.Linear(2048, num_classes)\n # self._initialize_weights()\n\n def forward(self, x):\n # print(\"input\", x)\n x = self.features(x)\n x = x.view(x.size(0), -1)\n # print(\"VGG\", np.where(x>100))\n # x = self.fc6(x)\n # x = self.fc7(x)\n # x = self.classifier(x)\n return x\n\n # def _initialize_weights(self):\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n # if m.bias is not None:\n # m.bias.data.zero_()\n # elif isinstance(m, nn.BatchNorm2d):\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n # elif isinstance(m, nn.Linear):\n # n = m.weight.size(1)\n # m.weight.data.normal_(0, 0.01)\n # m.bias.data.zero_()\n\n def make_layers(self, batch_norm=False):\n layers = []\n in_channels = 3\n for v in self.cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\nclass R2DNet(nn.Module):\n r\"\"\"Forms the overall ResNet feature extractor by initializng 5 layers, with the number of blocks in\n each layer set by layer_sizes, and by performing a global average pool at the end producing a\n 512-dimensional vector for each element in the batch.\n\n Args:\n layer_sizes (tuple): An iterable containing the number of blocks in each layer\n block_type (Module, optional): Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.\n \"\"\"\n\n def __init__(self, group_num_classes, hidden_dim, embedding_dim):\n super(R2DNet, self).__init__()\n # self.base_model = BaseModel()\n self.base_model = VGG(num_classes=group_num_classes)\n # first conv, with stride 1x2x2 and kernel size 3x7x7\n self.conv1da = nn.Conv1d(in_channels=2048, out_channels=512, kernel_size=1, \\\n padding=0, stride=1, dilation=1, bias=False)\n self.conv1db = nn.Conv1d(in_channels=512, out_channels=256, kernel_size=1, \\\n padding=0, stride=1, dilation=1, bias=False)\n self.pool = nn.AdaptiveAvgPool1d(1)\n\n self.hidden_dim = hidden_dim\n self.embedding_dim = embedding_dim\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, 1)\n self.avg_pool = nn.AdaptiveAvgPool1d(1)\n self.convLinear = nn.Conv1d(in_channels=512, out_channels=group_num_classes, kernel_size=1, \\\n padding=0, stride=1, dilation=1, bias=False)\n\n # self.hidden = (torch.autograd.Variable(torch.zeros(1,21,self.hidden_dim)).cuda(), \\\n # torch.autograd.Variable(torch.zeros(1,21,self.hidden_dim)).cuda())\n self.linear = nn.Linear(256, group_num_classes)\n\n def forward(self, x, dist):\n [N, T, M, C, H, W] = x.shape\n base_out = self.base_model(x.view(-1, C, H, W)).view(N*T, M, -1)\n\n # print(np.where(base_out>100))\n # base_out_c.detach()\n # base_out_c.no_grad()\n # print(\"base_out\", base_out[0][0])\n # print(\"base_out\", base_out[0][1])\n # x = x.view(-1, C, H, W)\n\n # dist = torch.zeros(N*T, M, M)\n # for i in np.arange(N*T):\n # for j in np.arange(M):\n # for k in np.arange(j+1,M):\n # # print(\"base_out\", base_out[i][j])\n # # print(\"base_out\", base_out[i][k])\n # dist[i,j,k] = F.pairwise_distance(base_out[i][j][:].unsqueeze(0), \\\n # base_out[i][k][:].unsqueeze(0))\n # dist[i,k,j] = dist[i,j,k]\n #\n # dist_des, dist_index = dist[i].sort(1,descending=True)\n # for l in np.arange(M):\n # dist[i][l][dist_index[:, 3:][l]] = 0\n # dist[i][l][dist_index[:, :3][l]] = 0.25\n #\n # # print(\"dist\",dist[i])\n # dist[i] += torch.eye(M)*0.25\n # dist[i] = self.normalize_digraph(dist[i].unsqueeze(0).cuda())\n # # print(\"dist\",dist[0])\n\n with torch.no_grad():\n base_out = Variable(base_out)\n\n node1 = self.conv1da(base_out.permute(0,2,1)).cuda()\n node1 = torch.bmm(dist.view(-1,12,12).float(), node1.permute(0,2,1))\n node1 = F.relu(node1)\n\n nodelinear = self.convLinear(node1.permute(0,2,1))\n pooled_feat = self.pool(nodelinear).squeeze(2)\n\n group_out = self.avg_pool(pooled_feat.view(N, -1, T))\n\n # gcn_out = torch.zeros(N*T, 8).cuda()\n # dist_num = dist_num.view(-1)\n # dist = dist.view(-1, 12, 12)\n # x = x.view(-1, M, C, H, W)\n # for i in range(N*T):\n # base_out = self.base_model(x[i, :dist_num[i]])\n # with torch.no_grad():\n # base_out = Variable(base_out)\n # node1 = self.conv1da(base_out[:dist_num.view(-1)[i]].unsqueeze(0).permute(0,2,1).contiguous())\n # node1 = torch.bmm(dist[i, :dist_num[i], :dist_num[i]].unsqueeze(0).float(), \\\n # node1.permute(0,2,1).contiguous())\n # node1 = F.relu(node1)\n # nodeLinear = self.convLinear(node1.permute(0,2,1).contiguous())\n # pooled_feat = self.pool(nodeLinear).squeeze(2)\n # gcn_out[i] = pooled_feat\n #\n # group_out = self.avg_pool(gcn_out.view(N, -1, T))\n\n #normalize\n # node1 = self.conv1da(base_out.permute(0,2,1).contiguous()).permute(0,2,1).contiguous()\n # node1 = torch.bmm(dist.squeeze(0)[::2,:,:].float(), node1).permute(0,2,1).contiguous()\n # node1 = F.relu(node1)\n # node2 = self.conv1db(node1).permute(0,2,1).contiguous()\n # node2 = torch.bmm(dist.squeeze(0)[::2,:,:].float(), node2).permute(0,2,1).contiguous()\n # node2 = F.relu(node2)\n #\n # nodeLinear = self.convLinear(node2)\n #\n # pooled_feat = self.pool(nodeLinear).squeeze(2).view(N, T, -1)\n # group_out = self.avg_pool(pooled_feat.view(N,-1,T))\n\n # print(\"gcn\", gcn_out.shape)\n # group_out, _ = self.lstm(gcn_out.view(N, T, -1))\n # # print(\"group_out\", group_out.squeeze(2).squeeze(0))\n # # group_cls_out = self.linear(group_out[:,-1,:])\n # # group_cls_out = self.linear(group_out.squeeze(2))\n # print(group_out.shape)\n # return group_out[:,-1,:]\n return group_out.squeeze(2)\n\n def normalize_digraph(self, A):\n Dl = torch.sum(A, 2)\n num_node = A.shape[2]\n Dn = torch.zeros(A.shape[0], num_node, num_node).cuda()\n for i in range(A.shape[0]):\n for j in range(num_node):\n if Dl[i][j] > 0:\n Dn[i][j][j] = Dl[i][j]**(-1)\n AD = torch.bmm(Dn,A)\n\n return AD\n\nclass R2DClassifier(nn.Module):\n\n def __init__(self, group_num_classes, hidden_dim=8, embedding_dim=8, pretrained=False):\n super(R2DClassifier, self).__init__()\n\n self.res2d = R2DNet(group_num_classes, hidden_dim, embedding_dim)\n\n self.__init_weight()\n\n if pretrained:\n self.__load_pretrained_weights()\n\n def forward(self, x, dist):\n x = self.res2d(x, dist)\n return x\n\n def __load_pretrained_weights(self):\n\n \"\"\"Initialiaze network.\"\"\"\n corresp_name = {\n # Conv1\n \"module.features.0.weight\": \"res2d.base_model.features.0.weight\",\n \"module.features.0.bias\": \"res2d.base_model.features.0.bias\",\n # Conv2\n \"module.features.2.weight\": \"res2d.base_model.features.2.weight\",\n \"module.features.2.bias\": \"res2d.base_model.features.2.bias\",\n # Conv3a\n \"module.features.5.weight\": \"res2d.base_model.features.5.weight\",\n \"module.features.5.bias\": \"res2d.base_model.features.5.bias\",\n # Conv3b\n \"module.features.7.weight\": \"res2d.base_model.features.7.weight\",\n \"module.features.7.bias\": \"res2d.base_model.features.7.bias\",\n # Conv4a\n \"module.features.10.weight\": \"res2d.base_model.features.10.weight\",\n \"module.features.10.bias\": \"res2d.base_model.features.10.bias\",\n # Conv4b\n \"module.features.12.weight\": \"res2d.base_model.features.12.weight\",\n \"module.features.12.bias\": \"res2d.base_model.features.12.bias\",\n # Conv5a\n \"module.features.14.weight\": \"res2d.base_model.features.14.weight\",\n \"module.features.14.bias\": \"res2d.base_model.features.14.bias\",\n # Conv5b\n \"module.features.16.weight\": \"res2d.base_model.features.16.weight\",\n \"module.features.16.bias\": \"res2d.base_model.features.16.bias\",\n\n \"module.features.19.weight\": \"res2d.base_model.features.19.weight\",\n \"module.features.19.bias\": \"res2d.base_model.features.19.bias\",\n\n \"module.features.21.weight\": \"res2d.base_model.features.21.weight\",\n \"module.features.21.bias\": \"res2d.base_model.features.21.bias\",\n\n \"module.features.23.weight\": \"res2d.base_model.features.23.weight\",\n \"module.features.23.bias\": \"res2d.base_model.features.23.bias\",\n\n \"module.features.25.weight\": \"res2d.base_model.features.25.weight\",\n \"module.features.25.bias\": \"res2d.base_model.features.25.bias\",\n\n \"module.features.28.weight\": \"res2d.base_model.features.28.weight\",\n \"module.features.28.bias\": \"res2d.base_model.features.28.bias\",\n\n \"module.features.30.weight\": \"res2d.base_model.features.30.weight\",\n \"module.features.30.bias\": \"res2d.base_model.features.30.bias\",\n\n \"module.features.32.weight\": \"res2d.base_model.features.32.weight\",\n \"module.features.32.bias\": \"res2d.base_model.features.32.bias\",\n\n \"module.features.34.weight\": \"res2d.base_model.features.34.weight\",\n \"module.features.34.bias\": \"res2d.base_model.features.34.bias\",\n # fc6\n \"module.fc6.weight\": \"res2d.base_model.fc6.weight\",\n \"module.fc6.bias\": \"res2d.base_model.fc6.bias\",\n # fc7\n \"module.fc7.weight\": \"res2d.base_model.fc7.weight\",\n \"module.fc7.bias\": \"res2d.base_model.fc7.bias\",\n }\n\n p_dict = torch.load(Path.model_dir())\n print(\"p_dict\", [item for item in p_dict[\"state_dict\"]])\n # print(\"p_dict\", p_dict[\"state_dict\"])\n s_dict = self.state_dict()\n # for item in s_dict:\n # print(\"sdict\", item)\n for name in p_dict['state_dict']:\n if name not in corresp_name:\n print(\"not\", name)\n continue\n s_dict[corresp_name[name]] = p_dict[\"state_dict\"][name]\n self.load_state_dict(s_dict)\n\n def __init_weight(self):\n # print(\"self.modules\", self.modules)\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n # print(\"m\",m.weight)\n # nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n # nn.init.kaiming_normal_(m.weight)\n # print(\"m\",m.weight)\n elif isinstance(m, nn.Conv1d):\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n # nn.init.kaiming_normal_(m.weight)\n # print(\"m\",m.weight)\n # print(\"m\",m)\n # elif isinstance(m, nn.BatchNorm3d):\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight)\n # elif isinstance(m, nn.Conv1d):\n # nn.init.kaiming_normal_(m.weight)\n # # print(\"m\",m.weight)\n # elif isinstance(m, nn.BatchNorm3d):\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n # print(\"m\",m.weight)\n\nif __name__ == \"__main__\":\n import torch\n inputs = torch.rand(1, 32, 12, 3, 224, 224)\n net = R2DNet(8, 512, 512)\n # bbox = torch.rand(1, 16, 100, 4)\n outputs = net.forward(inputs)\n print(outputs.size())\n\n #\n # def _prepare_base_model(self, base_model):\n #\n # if 'resnet' in base_model or 'vgg' in base_model:\n # self.base_model = getattr(torchvision.models, base_model)(True)\n # self.base_model.last_layer_name = 'fc'\n # self.input_size = 224\n # self.input_mean = [0.485, 0.456, 0.406]\n # self.input_std = [0.229, 0.224, 0.225]\n # else:\n # raise ValueError('Unknown base model: {}'.format(base_model))\n","sub_path":"network/R2Dnet.py","file_name":"R2Dnet.py","file_ext":"py","file_size_in_byte":16229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96805330","text":"from Instrucciones.TablaSimbolos.Instruccion import *\n\nclass CreateIndex(Instruccion):\n def __init__(self, nombre, tipo, tabla, columnas, strGram, linea, columna, strSent):\n Instruccion.__init__(self,tipo,linea,columna, strGram, strSent)\n self.nombre= nombre\n self.tipo = tipo\n self.tabla = tabla \n self.columnas = columnas \n\n def ejecutar(self, tabla, arbol):\n #super().ejecutar(tabla,arbol)\n tabla.setIndice(self, arbol)\n \n def traducir(self,tabla,arbol,cadenaTraducida):\n temporal = arbol.generaTemporal()\n codigo = \"\\t\" + temporal + \" = \" + \"\\\"\" + self.strSent + \"\\\"\\n\"\n codigo += \"\\tFuncionesPara3D.ejecutarsentecia(\" + temporal + \")\\n\\n\"\n return codigo","sub_path":"parser/fase2/team07/Tytus_SQLPARSER_G8/Instrucciones/Sql_create/CreateIndex.py","file_name":"CreateIndex.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"456079819","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport time\nimport paho.mqtt.client as mqtt\nimport traceback\nimport Adafruit_BMP.BMP085 as BMP085\nimport sys\n\n\n######## r3 MQTT ############\n\nmyclientid_ = \"printerbone\"\nsensor = BMP085.BMP085(busnum=2)\nquery_sensor_intervall_ = 60\n\ndef sendR3Message(client, topic, datadict, qos=0, retain=False):\n client.publish(topic, json.dumps(datadict), qos, retain)\n\n\ndef decodeR3Payload(payload):\n try:\n return json.loads(payload.decode(\"utf-8\"))\n except Exception as e:\n print(\"Error decodeR3Payload:\" + str(e))\n return {}\n\n\ndef getAndPublishBMP085SensorValues(client):\n ts=int(time.time())\n sendR3Message(client, \"realraum/\" + myclientid_ + \"/temperature\",\n {\"Location\": \"PrinterBone\", \"Value\": sensor.read_temperature(), \"Ts\": ts}, retain=True)\n sendR3Message(client, \"realraum/\" + myclientid_ + \"/barometer\",\n {\"Location\": \"PrinterBone\", \"HPa\": sensor.read_pressure()/100.0, \"Ts\": ts}, retain=True)\n\n\ndef onMQTTDisconnect(mqttc, userdata, rc):\n if rc != 0:\n print(\"Unexpected disconnection.\")\n while True:\n time.sleep(5)\n print(\"Attempting reconnect\")\n try:\n mqttc.reconnect()\n break\n except ConnectionRefusedError:\n continue\n else:\n print(\"Clean disconnect.\")\n sys.exit()\n\n# Start zmq connection to publish / forward sensor data\ndef initMQTT():\n client = mqtt.Client(client_id=myclientid_)\n client.connect(\"mqtt.realraum.at\", 1883, keepalive=31)\n client.on_disconnect = onMQTTDisconnect\n return client\n\n\nif __name__ == '__main__':\n client = None\n last_get_sensor_data_ts = 0\n try:\n client = initMQTT()\n while True:\n if time.time() - last_get_sensor_data_ts > query_sensor_intervall_:\n getAndPublishBMP085SensorValues(client)\n last_get_sensor_data_ts = time.time()\n client.loop()\n\n except Exception as e:\n traceback.print_exc()\n finally:\n if isinstance(client, mqtt.Client):\n # client_stop_loop()\n client.disconnect()\n","sub_path":"scripts/bmp180sensor.py","file_name":"bmp180sensor.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35932331","text":"# MIT License\n#\n# Copyright (c) 2020 SCL team at Red Hat\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport json\nimport urllib3\nimport pytest\n\n\nfrom pathlib import Path\n\n\nfrom betka import config\n\nurllib3.disable_warnings()\n\n\nclass TestConfig:\n def test_dict_merge(self):\n dct = {\"a\": 1, \"b\": {\"b1\": 2, \"b2\": 3}}\n merge_dct = {\"a\": 1, \"b\": {\"b1\": 4}}\n\n config.dict_merge(dct, merge_dct)\n assert dct[\"a\"] == 1\n assert dct[\"b\"][\"b1\"] == 4\n assert dct[\"b\"][\"b2\"] == 3\n\n merge_dct = {\"a\": 1, \"b\": {\"b1\": 4, \"b3\": 5}, \"c\": 6}\n\n config.dict_merge(dct, merge_dct)\n assert dct[\"a\"] == 1\n assert dct[\"b\"][\"b1\"] == 4\n assert dct[\"b\"][\"b2\"] == 3\n assert dct[\"b\"][\"b3\"] == 5\n assert dct[\"c\"] == 6\n\n @pytest.mark.parametrize(\n \"bot_cfg_path\",\n [\n Path(__file__).parent.parent / \"data/bot-configs/bot-cfg.yml\",\n Path(__file__).parent.parent / \"data/bot-configs/bot-cfg-old-keys.yml\",\n ],\n )\n def test_load_configuration(self, bot_cfg_path):\n from_file = config.load_configuration(conf_path=bot_cfg_path)\n from_string = config.load_configuration(conf_str=bot_cfg_path.read_text())\n assert from_file == from_string\n\n # no arguments -> default config\n assert config.load_configuration()\n\n with pytest.raises(AttributeError):\n config.load_configuration(\"both args\", \"specified\")\n\n with pytest.raises(AttributeError):\n config.load_configuration(conf_path=\"/does/not/exist\")\n\n def test_load_configuration_with_aliases(self):\n my = {\"version\": \"2\", \"betka\": {\"enabled\": False}}\n conf = config.load_configuration(conf_str=json.dumps(my))\n # our 'betka' key has been merged into default's 'upstream-to-downstream' key\n assert conf[\"upstream-to-downstream\"][\"enabled\"] is False\n\n @pytest.mark.parametrize(\n \"cfg_url\",\n [\"https://github.com/sclorg/betka/raw/main/examples/cfg/bot-cfg.yml\"],\n )\n def test_fetch_config(self, cfg_url):\n urllib3.disable_warnings()\n c1 = config.fetch_config(\"betka\", cfg_url)\n c2 = config.fetch_config(\"upstream-to-downstream\", cfg_url)\n assert c1 == c2\n # make sure the 'global' key has been merged into all bots` keys\n assert \"notifications\" in c1\n\n def test_get_from_bot_config(self):\n assert config.get_from_bot_config(\"emails\", \"sender\")\n\n def test_betka_config_ok(self):\n path = Path(__file__).parent.parent / \"data/configs/ok-config\"\n conf = config.bot_config(path)\n assert conf[\"emails\"][\"smtp_server\"] == \"elm.street\"\n\n @pytest.mark.parametrize(\n \"data_path\", [\"no-config/\", \"empty-config/\", \"list-but-no-deployment/\"]\n )\n def test_betka_config_not_ok(self, data_path):\n path = Path(__file__).parent.parent / \"data/configs/\" / data_path\n with pytest.raises(Exception):\n config.bot_config(path)\n","sub_path":"tests/unit/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330911327","text":"import numpy as np\nimport tensorflow as tf\nimport pickle\nimport os\n\n\n\nvocab = None\n# vocab_size = 598633\nvocab_size = 125080+1\nembedding_size = 768\nembedding_matrix = np.random.uniform(-1, 1, size=(vocab_size, embedding_size))\n\n\n# for w, i in word_index.items():\n# v = embeddings.get(w)\n# if v is not None and i < vocab_size:\n# embedding_matrix[i] = v\n\n# for i in range(vocab_size):\n# embedding_matrix[i] = np.ones(embedding_size) * i * 2\n\ndef my_initializer1(shape=None, dtype=tf.float32, partition_info=None):\n assert dtype is tf.float32\n return embedding_matrix\n # W = tf.constant(embedding_matrix, name=\"W\")\n # W = tf.Variable(embedding_matrix,trainable=False, name=\"W\",dtype=tf.float64)\n # return W\n\ndef tagset2vocab(tagset):\n global custom_tags\n global embedding_matrix\n # if os.path.exists(\"embedding_matrix.pkl\"):\n # embedding_matrix = pickle.load(open(\"embedding_matrix.pkl\",\"rb\"))\n # return\n\n global vocab,vocab_size,embedding_size\n vocab = list(tagset)\n vocab_size = len(vocab) + 1\n print(\"in taegset2 vocab\")\n print(\"vocab_size = \",vocab_size)\n # import pdb\n # pdb.set_trace()\n custom_tags[0]['vocab_size']=vocab_size\n pretrained_embedding_dict = pickle.load(open(\"videoID_vector.pkl.filterComment\",\"rb\"))\n for key in pretrained_embedding_dict:\n embedding_size=len(pretrained_embedding_dict[key]['vector'])\n break\n embedding_matrix = np.random.uniform(-1, 1, size=(vocab_size, embedding_size))\n for idx, word in enumerate( vocab ):\n if word in pretrained_embedding_dict:\n vector = pretrained_embedding_dict[word][\"vector\"]\n embedding_matrix[idx] = vector\n pickle.dump(embedding_matrix,open(\"embedding_matrix.pkl\",\"wb\"))\n return vocab\n\ndef fun1(message):\n print(message)\n return np.random.uniform(-1, 1, size=(vocab_size, embedding_size))\n\n\ncustom_tags = [\n{'tag_name':'7','vocab_size':598633,'embedding_size':768,'initializer_function':my_initializer1,'vocab_fun':tagset2vocab}\n]\n","sub_path":"custom_tag_config.py","file_name":"custom_tag_config.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234837706","text":"import csv\nimport datetime\n\nclass NotionCsvImporter:\n csv_file_name = None \n def __init__(self, csv_file_name):\n self.csv_file_name = csv_file_name\n\n def convert_csv_to_list(self):\n readed = self.__csv_reader()\n if readed == False:\n return False\n return self.__date_sort(readed)\n\n def __date_sort(self, results):\n # return [[title => str, d => datetime, mothin => str], [], ... ]\n results_sorted = sorted(results, key=lambda r: r[1])\n return results_sorted\n\n def __csv_reader(self):\n results = []\n if self.csv_file_name == None:\n return False\n with open('./notionData/' + self.csv_file_name, encoding=\"utf-8\") as f:\n # CSVの形式\n # title, day(date and month), year, ???, mothon\n lines = f.readlines()\n for i, line in enumerate(lines):\n if i == 1: # headerっぽい\n continue\n splitted_line = line.split(',')\n if len(splitted_line) <= 5:\n continue\n title = splitted_line[0]\n day = splitted_line[1].replace(\"\\\"\", '')\n year = splitted_line[2].replace(\"\\\"\", '').replace(\"\\t\", '').replace(\" \", \"\")\n motion = splitted_line[5].replace(\"\\n\", '')\n date_and_month = self.__day_to_date_and_month(day)\n if date_and_month == False:\n continue\n d = year + '/' + str(date_and_month['month']) + '/' + str(date_and_month['date'])\n results.append([title, datetime.datetime.strptime(d, '%Y/%m/%d'), self.__convert_motion_to_value(motion)])\n return results\n\n def __day_to_date_and_month(self, raw_day):\n raw_day_list = raw_day.split(\" \")\n dic_str_month = {\n \"Apr\": 4,\n \"May\": 5,\n \"Mar\": 3,\n \"Feb\": 2,\n \"Jan\": 1,\n \"Dec\": 12,\n \"Nov\": 11,\n \"Oct\": 10,\n \"Sep\": 9,\n \"Aug\": 8,\n \"Jul\": 7\n }\n if len(raw_day_list) <= 1:\n return False\n if raw_day_list[0] in dic_str_month:\n month = dic_str_month[raw_day_list[0]]\n return {'month': month, 'date': int(raw_day_list[1])}\n else:\n return False\n\n def __convert_motion_to_value(self, motion):\n dic_str_motion = {\n '良い': 5, 'そこそこ良い': 4, '普通': 3,\n 'そこそこ悪い': 2, '悪い': 1\n }\n if motion in dic_str_motion:\n return dic_str_motion[motion]\n else:\n return -1","sub_path":"notionImporter.py","file_name":"notionImporter.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178510815","text":"from __future__ import annotations\nfrom game_objects.items import Item\nfrom game_objects.attributes import DynamicParameter\nfrom mechanics.events.items.ItemDestroyedEvent import ItemDestroyedEvent\n\nfrom my_utils.utils import tractable_value\n\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from game_objects.items import Blueprint, Material, QualityLevel\n\n#Design: ensure item has game variable as soon as it enters any interactable containers.\nclass WearableItem(Item):\n\n durability = DynamicParameter(\"max_durability\", on_zero_callbacks=[ItemDestroyedEvent])\n energy = DynamicParameter(\"max_energy\")\n\n def __init__(self, name, item_type, *, blueprint:Blueprint=None, quality:QualityLevel=None, material:Material=None,\n max_durability=None, actives = None,\n game=None):\n super().__init__(name, item_type, game=game)\n assert isinstance(name, str)\n self.blueprint = blueprint\n self.quality = quality\n self.material = material\n self.max_durability = max_durability\n self.rarity = self.calc_rarity()\n\n self.max_complexity = self.material.magic_complexity * self.rarity if self.material else 0\n self.max_energy = self.material.magic_complexity * self.rarity ** 2 if self.material else 50\n self.active_enchantment = None\n self.bonuses = []\n self.actives = actives or []\n\n @property\n def price(self):\n components_price = self.blueprint.price + ( self.material.price * self.blueprint.material_count)\n return tractable_value( components_price * (0.8 + self.quality.rarity ** 4 / 5 ) )\n\n @property\n def durability_factor(self):\n durability_factor = 0.5 + 0.5 * self.durability / self.max_durability\n return durability_factor\n\n def calc_rarity(self):\n mr = self.material.rarity if self.material else 1\n qr = self.quality.rarity if self.quality else 1\n br = self.blueprint.rarity if self.blueprint else 1\n\n total_rarity = mr * qr * br\n return total_rarity\n\n\n def on_equip(self, slot):\n if slot.item_type == self.item_type:\n for active in self.actives:\n self.owner.give_active(active)\n\n def on_unequip(self, slot):\n if slot.item_type == self.item_type:\n for active in self.actives:\n self.owner.remove_active(active)\n\n\n\n","sub_path":"game_objects/items/WearableItem.py","file_name":"WearableItem.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"639052836","text":"# -*- coding: utf-8 -*-\n\nfrom dotenv import read_dotenv\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addini(\"env_files\",\n type=\"linelist\",\n help=\"a line separated list of env files to parse\",\n default=['.env'])\n parser.addini(\"env_override_existing_values\",\n type=\"bool\",\n help=\"override the existing environment variables\",\n default=False)\n\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_load_initial_conftests(args, early_config, parser):\n _override = early_config.getini(\"env_override_existing_values\")\n for filename in early_config.getini(\"env_files\"):\n read_dotenv(filename, override=_override)\n","sub_path":"pytest_dotenv/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375860225","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n#import redis\nimport scrapy\nimport redis\nfrom elasticsearch_dsl.connections import connections\n\nfrom game.models.es_types import GameType\n\n\nes = connections.create_connection(hosts=[\"127.0.0.1\"])\nredis_cli = redis.StrictRedis(host=\"localhost\")\n# redis_cli = redis.StrictRedis()\nclass GameItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\nclass Game(scrapy.Item):\n url = scrapy.Field()\n gameName = scrapy.Field()\n # 动作设计啥的\n gameType = scrapy.Field()\n # 开发商\n developer = scrapy.Field()\n # 发行商\n publisher = scrapy.Field()\n # 发售日期\n publishDate = scrapy.Field()\n gameLanguage = scrapy.Field()\n gameTitle = scrapy.Field()\n gamePlatform = scrapy.Field()\n # 游戏内容\n gameContext = scrapy.Field()\n\n tag = scrapy.Field()\n\n\n\n def save_to_es(self):\n game = GameType()\n game.url = self[\"url\"]\n game.developer = self[\"developer\"]\n game.gameContext = self[\"gameContext\"]\n game.gameLanguage = self[\"gameLanguage\"]\n game.gameName = self[\"gameName\"]\n game.gamePlatform = self[\"gamePlatform\"]\n game.gameTitle = self[\"gameTitle\"]\n game.gameType = self[\"gameType\"]\n game.publishDate = self[\"publishDate\"]\n game.publisher = self[\"publisher\"]\n game.suggest = gen_suggests(GameType._doc_type.index, ((game.gameName, 10),\n (game.publisher, 7),\n (game.gameTitle, 7),\n (game.gameContext,7),\n (game.developer, 7)))\n\n # 存的是3dm的数据tag为1,存的是douyou的数据tag为2,youmin的数据tag为3\n if self[\"tag\"] == 1:\n redis_cli.incr(\"threedm_count\")\n #print(redis_cli.get(\"threedm_count\"),\"3dm_count\")\n elif self[\"tag\"] == 2:\n redis_cli.incr(\"douyou_count\")\n #print(redis_cli.get(\"douyou_count\"), \"douyou_count\")\n elif self[\"tag\"] == 3:\n redis_cli.incr(\"youmin_count\")\n #print(redis_cli.get(\"youmin_count\"), \"youmin_count\")\n\n\n game.save()\n\n\n # redis_cli.incr(\"game_count\")\n\n return\n\n\ndef gen_suggests(index, info_tuple):\n # 根据字符串生成搜索建议数组\n used_words = set()\n suggests = []\n for (text, weight) in info_tuple:\n if text:\n # 调用es的analyze接口分析字符串\n words = es.indices.analyze(index=index, body={\n \"analyzer\": \"ik_max_word\",\n \"filter\": [\"lowercase\"],\n \"text\": text\n })\n anylyzed_words = set([r[\"token\"] for r in words[\"tokens\"] if len(r[\"token\"]) > 1])\n new_words = anylyzed_words - used_words\n else:\n new_words = set()\n\n if new_words:\n suggests.append({\"input\": list(new_words), \"weight\": weight})\n\n return suggests","sub_path":"game/game/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546089296","text":"\"\"\" \r\n@author: lileilei\r\n@file: shujujiegou.py \r\n@time: 2018/4/23 11:22 \r\n\"\"\"\r\n# 对有序列表进行二分查找\r\ndef bin_search(data_set,val):\r\n low = 0\r\n high = len(data_set)-1\r\n while low <= high:\r\n mid = (low+high) // 2\r\n if data_set[mid] == val:\r\n return mid\r\n elif data_set[mid] < val:\r\n low = mid + 1\r\n else:\r\n high = mid - 1\r\n return None\r\n","sub_path":"study/shujujiegou.py","file_name":"shujujiegou.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368874385","text":"def which(n):\n # 接受输入\n numbers = [int(i) for i in input().strip().split()]\n m = int(input().strip())\n q = [int(i) for i in input().strip().split()]\n\n for i in range(1, n):\n numbers[i] += numbers[i - 1]\n\n for i in q:\n l, r = 0, n - 1\n ans = -1\n while l <= r:\n mid = (l + r) >> 1\n if numbers[mid] >= i:\n ans = mid\n r = mid - 1\n else:\n l = mid + 1\n print(ans + 1)\n\nn = int(input().strip())\n\nwhich(n)","sub_path":"code/offer/71.007-第几堆苹果.py","file_name":"71.007-第几堆苹果.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131747505","text":"from .model import db\nfrom pathlib import Path\nimport logging.config\n\nfrom flask import Flask\n\n\ndef configure_logging():\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"formatters\": {\n \"default\": {\n \"format\": \"[%(asctime)s] %(levelname)s in %(module)s: %(message)s\", # noqa\n }\n },\n \"handlers\": {\n \"wsgi\": {\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n \"formatter\": \"default\",\n }\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"wsgi\"]},\n # \"loggers\": {\n # \"sqlalchemy.engine\": {\"level\": \"INFO\"},\n # },\n }\n )\n\n\ndef create_app(config_overrides=None):\n configure_logging() # should be configured before any access to app.logger\n app = Flask(__name__)\n app.config.from_object(\"meal_options.default_settings\")\n app.config.from_prefixed_env()\n\n if config_overrides is not None:\n app.config.from_mapping(config_overrides)\n\n db.init_app(app)\n db_file = Path(app.config['SQLALCHEMY_DATABASE_URI'].strip('sqlite:///'))\n if not db_file.exists():\n print(f'creating database ${db_file}')\n with app.app_context():\n db.create_all()\n\n return app\n","sub_path":"s05-add-database/meal_options/meal_options/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607778975","text":"import logging\nimport threading\n\nfrom objectify_json import ObjectifyJSON\nfrom time import sleep\nfrom typing import Any, Dict, Tuple, Optional, List\n\nfrom constances import (\n APP_NAME,\n CHECK_RUN_STATUS_COMPLETED,\n CHECK_RUN_STATUS_IN_PROGRESS,\n CHECK_STATUS_FAILURE,\n CHECK_STATUS_RUNNING,\n CHECK_STATUS_NEUTRAL,\n CHECK_STATUS_SUCCESS,\n CHECK_RUN_TITLE,\n ESP_OVERRIDE_STRING,\n check_status_lookup,\n validations,\n)\nfrom gh_utils import get_check_runs, post_check_run_result, get_latest_sha, post_pull_request_review\n\nlog = logging.getLogger(__name__)\n\n\nclass ProcessCheckRun:\n def __init__(self, webhook: ObjectifyJSON):\n self.webhook = webhook\n\n # Test variables.\n self._result = False # failed\n # self._result = True # success\n\n self.base_url: str = str(webhook.repository.url)\n self.head_sha: str = str(webhook.pull_request.head.sha) if webhook.pull_request else str(webhook.check_suite.head_sha)\n\n self.checks: List[Check] = []\n\n self.link = \"https://crt.prod.linkedin.com/#/testing/executions/e49a13da-126a-4726-a045-09dbdbb68a2f/execution\"\n\n def generate_output_summary(self) -> str:\n \"\"\"Aggregate all the test results from checks.\"\"\"\n summary = \"\"\n\n for check in self.checks:\n summary += check.get_check_result() + \"\\n\"\n\n summary += f\"\\n***\\n#### [Check execution URL]({self.link})\"\n\n return summary\n\n def start(self) -> None:\n # Note: in the real implementation these threads will be done through spawning jobs through task API.\n start_check_runs_thread = threading.Thread(target=self.create_checks())\n start_check_runs_thread.start()\n\n def create_checks(self) -> None:\n \"\"\"Create all check objects, then start processing them.\"\"\"\n for test in validations:\n check = Check(test[\"name\"], self.webhook, self._result)\n self.checks.append(check)\n\n # Simulate some tests success, some failed.\n # self._result ^= True\n\n self.process_checks()\n\n def process_checks(self) -> None:\n \"\"\"Kick start every check and update the result as they're available.\n Then update the check run conclusion when every test is done.\n \"\"\"\n threads = set()\n\n for check in self.checks:\n thread = threading.Thread(target=check.process_check)\n threads.add(thread)\n\n # Start every check.\n for thread in threads:\n thread.start()\n\n # Update the results whenever any test is done.\n while threads:\n sleep(1)\n removing = set()\n for thread in threads:\n if not thread.is_alive():\n self.update_check_results()\n removing.add(thread)\n threads -= removing\n\n def determine_check_run_progress(self) -> Tuple[str, str]:\n \"\"\"Determine the progress of the check run.\n When all of the checks are done, the check run status will be completed with conclusion based on the check results.\n Otherwise, there's no conclusion yet and the status will be CHECK_RUN_STATUS_IN_PROGRESS.\n \"\"\"\n check_size = len(self.checks)\n conclusion = CHECK_STATUS_SUCCESS\n status = CHECK_RUN_STATUS_COMPLETED\n\n for check in self.checks:\n if check.status != CHECK_STATUS_RUNNING:\n check_size -= 1\n\n # Any of the check is failed, the entire check run will be considered as failed.\n if check.status == CHECK_STATUS_FAILURE:\n conclusion = CHECK_STATUS_FAILURE\n\n # if there's one check is not done yet, the entire check run is still in progress.\n if check_size:\n status = CHECK_RUN_STATUS_IN_PROGRESS\n conclusion = \"\"\n\n return conclusion, status\n\n def update_check_results(self) -> None:\n \"\"\"Update date the entire check run result page.\"\"\"\n conclusion, status = self.determine_check_run_progress()\n post_check_run_result(name=APP_NAME,\n head_sha=self.head_sha,\n base_url=self.base_url,\n check_status=status,\n check_conclusion=conclusion,\n output_title=CHECK_RUN_TITLE,\n output_summary=self.generate_output_summary(),\n )\n\n\nclass Check:\n def __init__(self, name: str, webhook: ObjectifyJSON, _result: bool):\n self.name = name\n\n self.base_url: str = str(webhook.repository.url)\n\n self.check_suite_re_request = False\n self.pull_number: int = int(str(webhook.pull_request.number))\n\n # check run can be triggered by either pull request [opened, updated] or check suite [rerequested]\n if not webhook.pull_request:\n self.check_suite_re_request = True\n self.pull_number = int(str(webhook.check_suite.pull_requests[0].number)) # same PR has the same number\n\n self.status = CHECK_STATUS_RUNNING\n self.link = \"\"\n self.head_sha = webhook\n\n # test variable\n self._result = _result\n\n def get_process_time(self) -> int:\n for test in validations:\n if self.name == test[\"name\"]:\n return test[\"estimate_time\"]\n\n log.warning(f\"Can't find the test {self.name}'s estimate time.\")\n return 5\n\n def get_link(self) -> str:\n for test in validations:\n if self.name == test[\"name\"]:\n return test[\"good_link\"] if self._result else test[\"bad_link\"]\n\n log.warning(f\"Can't find the test {self.name}'s estimate time.\")\n return \"\"\n\n def process_check(self) -> None:\n log.info(f\"Starting {self.name}\")\n sleep(self.get_process_time()) # imitate the delay each test would take before getting the result is back.\n self.status = CHECK_STATUS_SUCCESS\n self.link = self.get_link()\n\n if not self._result:\n self.status = CHECK_STATUS_FAILURE\n # post a request change when the check fails\n post_pull_request_review(self.base_url,\n self.pull_number,\n body=f\"{self.name} detects some error(s).\",\n path=\"README.md\", position=1,\n comment_body=\"This needs to be fixed.\",\n )\n\n log.info(f\"Finish {self.name}\")\n\n def get_check_result(self) -> str:\n link = f\"[See more details]({self.link})\\n\" if self.link else \"\"\n return f\"### {self.name}\\n\" \\\n f\"{check_status_lookup[self.status]['icon']} The test is {check_status_lookup[self.status]['text']}.\\n\" \\\n f\"{link}\"\n\n\ndef neutralize_failed_check_runs(base_url: str, head_sha: str) -> None:\n \"\"\"Go through the check runs of the given head SHA and replace the conclusion from 'failure' to 'neutral'.\"\"\"\n for run in get_check_runs(base_url, head_sha):\n if run[\"app\"][\"name\"] == APP_NAME:\n log.info(f\"Neutralizing the check run.\")\n post_check_run_result(name=run[\"name\"],\n head_sha=head_sha,\n base_url=base_url,\n check_status=CHECK_RUN_STATUS_COMPLETED,\n check_conclusion=CHECK_STATUS_NEUTRAL,\n output_title=f\"{CHECK_RUN_TITLE} - Overrided\",\n output_summary=run[\"output\"][\"summary\"],\n )\n\n\ndef comment_contains_override_string(comment: str) -> bool:\n \"\"\"Determine whether the comment contains the override string.\"\"\"\n for line in comment.splitlines():\n if line == ESP_OVERRIDE_STRING:\n return True\n\n return False\n\n\ndef neutralize_latest_check_suite(webhook):\n \"\"\"Neutralize all of the failed check runs of the last commit in the PR that the comment is from.\"\"\"\n base_url = webhook.issue.repository_url\n head_sha = get_latest_sha(base_url, webhook.issue.number)\n\n if not comment_contains_override_string(str(webhook.comment.body)):\n log.debug(f\"Ignore the comment.\")\n return\n\n log.info(f\"ESP override string detected.\")\n\n if not head_sha:\n log.error(\"Abort neutralizing the latest check suite.\")\n return\n\n log.info(f\"Neutralizing {webhook.repository.full_name} PR {webhook.issue.number} head sha {head_sha}\")\n neutralize_failed_check_runs(base_url, head_sha)\n","sub_path":"checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82581625","text":"from __future__ import absolute_import\n\nimport os\nimport random\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom ..utils import media, console\n\n\ndef find_files(path, target_ext):\n if target_ext[0] != '.':\n target_ext = '.' + target_ext\n result_list = []\n for parent, dirs, files in os.walk(path):\n for file in files:\n name, ext = os.path.splitext(os.path.join(parent, file))\n if ext == target_ext:\n result_list.append(name + ext)\n return result_list\n\n\ndef generate_train_test(path, ext, rate=0.2, config=None):\n file_list = find_files(path, ext)\n train_list = []\n test_list = []\n for file_path in file_list:\n if random.random() < rate:\n test_list.append(file_path)\n else:\n train_list.append(file_path)\n print('Train data: ' + str(len(train_list)))\n print('Test data: ' + str(len(test_list)))\n\n train_set = accumulate_data(train_list, config)\n test_set = accumulate_data(test_list, config)\n return {\n 'train_set': train_set,\n 'test_set': test_set\n }\n\n\ndef pad_sequences(sequences, maxlen=None, dtype=np.float32,\n padding='post', truncating='post', value=0.):\n lengths = np.asarray([len(s) for s in sequences], dtype=np.int64)\n\n nb_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if len(s) == 0:\n continue # empty list was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % \n truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s is\\\n different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x, lengths\n\n\ndef accumulate_data(file_list, config):\n inputs = []\n outputs = []\n seq_len = []\n silence = []\n path_prefix = []\n if len(file_list) == 0:\n return None\n failed = 0\n for idx, file in enumerate(file_list):\n console.log(\n 'info', 'Process ' + str(idx + 1) + '/' + str(len(file_list)))\n res = None\n try:\n res = media.process_media(file, config)\n except:\n res = None\n if res is not None:\n assert(len(res[0]) == 75)\n inputs.append(res[0])\n outputs.append(res[1])\n seq_len.append(len(res[0]))\n silence.append(res[2])\n path_prefix.append(res[3])\n else:\n failed += 1\n # if idx > 10: break\n if failed > 0:\n console.log('error', 'Failed', str(failed) + '\\n')\n if len(inputs) == 0:\n return None\n # padding the data at end\n # inputs, _ = pad_sequences(inputs, dtype=np.float64)\n # outputs, seq_len = pad_sequences(outputs, dtype=np.float32)\n inputs = np.asarray(inputs, dtype=np.float32)\n outputs = np.asarray(outputs, dtype=np.float32)\n seq_len = np.asarray(seq_len, dtype=np.int32)\n silence = np.asarray(silence, dtype=np.float32)\n assert(inputs.dtype == np.float32)\n assert(outputs.dtype == np.float32)\n\n return {\n 'inputs': inputs,\n 'outputs': outputs,\n 'seq_len': seq_len,\n 'silence': silence,\n 'path_prefix': path_prefix\n }\n\n\ndef save(path, data):\n with open(path, 'wb') as file:\n pickle.dump(data, file)\n\n\ndef load(path):\n with open(path, 'rb') as file:\n return pickle.load(file)\n\n\ndef process(\n config,\n root_path='../../dataset/GRID/video/s1/', ext='mpg', test_rate=0.2,\n train_path='data/train.pkl', test_path='data/test.pkl'):\n\n if os.path.exists(train_path) and os.path.exists(test_path):\n res = ''\n while res.lower() != 'y' and res.lower() != 'n':\n res = input('Find train and test sets, replace? [y/[N]]').lower()\n if res == '':\n res = 'n'\n if res == 'n':\n return\n\n media.init_dde_fbx()\n sets = generate_train_test(root_path, ext, test_rate, config)\n save(train_path, sets['train_set'])\n save(test_path, sets['test_set'])\n\n\ndef merge(path_list, path):\n all_set = {}\n for p in path_list:\n with open(p, 'rb') as file:\n the_set = pickle.load(file)\n for k in the_set:\n if k in all_set:\n all_set[k] = np.append(all_set[k], the_set[k], axis=0)\n else:\n all_set[k] = np.asarray(the_set[k])\n with open(path, 'wb') as file:\n pickle.dump(all_set, file)\n\n\nif __name__ == '__main__':\n # process()\n train_data = load('data/train.pkl')\n data = train_data['outputs']\n train_data['label'] = []\n count = 0\n all = 0\n for i in range(data.shape[0]):\n # d = data[i] ** 0.5\n d = data[i]\n label = []\n for j in range(d.shape[0]):\n max_v = np.amax(d[j])\n if max_v < 0.04:\n print('!')\n count += 1\n all += 1\n print(count, '/', all)\n # data = data ** 0.25\n print(data.shape)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(data, bins=20)\n plt.show()\n","sub_path":"GAN/pack/backend/data/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229074021","text":"def fasta_reader(filepath):\n \"\"\"\n Reads in a fasta file located at the filepath and returns a list of the sequences in the file\n\n Arguments:\n filepath::str\n The path to the fasta file that is being read\n \n Returns:\n sequences::[string]\n List of strings representing the DNA sequences encoded in the fasta file\n \"\"\"\n sequences = []\n with open(filepath, \"r\") as infile:\n seq = ''\n for i, line in enumerate(infile):\n if line.startswith(\">\"):\n if i != 0:\n sequences.append(seq)\n seq = ''\n else:\n seq += line.strip().upper()\n sequences.append(seq)\n \n return sequences\n\n\ndef line_reader(filepath):\n \"\"\"\n Reads in a sequence file holding one sequence per line. Returns a list of these sequences\n \n Arguments:\n filepath::str\n The path to the sequence file that is being read\n \n Returns:\n sequences::[string]\n List of strings representing the DNA sequences encoded in the sequence file\n \"\"\"\n sequences = []\n with open(filepath, \"r\") as infile:\n for line in infile:\n sequences.append(line.strip().upper())\n \n return sequences\n\ndef write_output(outfile_path, sequences, predictions):\n \"\"\"\n Specific function for writing out the data for part 5 in the requested format (2 columns, first column is the sequence, second is the predicted value)\n \n Arguments:\n outfile_path::str\n The desired path of the output file\n sequences::[str]\n List of the sequences (in DNA string format) that have had predictions performed for them.\n predictions::[float]\n List of predicted values for the sequences in \"sequences\". Must be in the same order and the same length of the sequences list\n\n Returns:\n None\n \"\"\"\n with open(outfile_path, 'w') as f:\n for seq, pred in zip(sequences,predictions):\n f.write(seq+'\\t'+str(pred)+'\\n')\n ","sub_path":"scripts/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"279720229","text":"# class grannies:\n\n# def __init__(self, arr, mmin, mmax):\n# self.ind = arr\n# self.min = mmin\n# self.max = mmax\n\n# def solve2(some_gr):\n# ans = 0\n# cum_sum = 0 # nothing for the initial granny\n# candidate_ans = 0\n# for counter in range(some_gr.min, some_gr.max + 1): #count cumulative sum\n# new = some_gr.ind[counter]\n# if new > 0: \n# cum_sum += new\n# if counter <= cum_sum:\n# candidate_ans = cum_sum\n# candidate_ans += 1\n# return candidate_ans \n\ndef solve(n, ss):\n cum_sum = 0 # nothing for the initial granny\n candidate_ans = 0\n index = indices = [0]*(2*10**5 + 5)\n lowest = 10**6\n highest = 0\n inp_arr = ss.split(' ')\n nonzero_indices= []\n sum_available = 0\n for under_s in inp_arr:\n a = int(under_s)\n if a <= n+1:\n sum_available += 1\n index[a] += 1\n if lowest > a:\n lowest = a\n if highest < a:\n highest = a\n nonzero_indices.append(a)\n else:\n n -= 1\n if n == 1:\n if nonzero_indices[0] == 1:\n candidate_ans = 1\n else:\n candidate_ans = 0\n else:\n if highest <= sum_available:\n candidate_ans = sum_available\n else:\n nonzero_set = sorted(set(nonzero_indices))[::-1]\n i = 0\n cum_sum = sum_available\n while i < len(nonzero_set):\n counter = nonzero_set[i]\n if counter <= cum_sum:\n candidate_ans = cum_sum\n i = len(nonzero_set)\n else:\n new = index[counter]\n cum_sum -= new\n i += 1\n # for counter in nonzero_set: #count cumulative sum \n candidate_ans += 1\n return candidate_ans\n\n\nt = int(input())\nfor ___ in range(t):\n n = int(input())\n if n == 1:\n if int(input()) == 1:\n answer = 2\n else:\n answer = 1\n else:\n indices = [0]*(2*10**5 + 5)\n lowest = 10**6\n highest = 0\n inp_str = input()\n answer = solve(n, inp_str)\n print(answer)","sub_path":"Round_645_Div_2/B/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529105205","text":"from twisted.web.server import Site\nfrom twisted.web.resource import Resource\nfrom twisted.internet import reactor\n\nimport cgi\n\nclass FormPage(Resource):\n def render_GET(self, request):\n return '''\n \n
\n Enter your mobile nunber: \n \n
\n '''\n\n def render_POST(self, request):\n self.ext = (cgi.escape(request.args[\"mobile\"][0]),)\n return 'You submitted: %s' % (cgi.escape(request.args[\"mobile\"][0]),)\n\n\nroot = Resource()\nroot.putChild(\"form\", FormPage())\nfactory = Site(root)\nreactor.listenTCP(8880, factory)\nreactor.run()\n","sub_path":"freeswitch/otp_verify/test_otp.py","file_name":"test_otp.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651516666","text":"from manim_imports_ext import *\r\n\r\nclass TrieScene(AlgoScene):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self.data = [\"to\", \"tea\", \"ted\", \"ten\", \"inn\", \"in\"]\r\n self.data_find = [\"hello\", \"te\", \"ted\", \"ins\"]\r\n\r\n def construct(self):\r\n self.go_speed_up()\r\n self.start_logo(subtitle=\"Trie前缀树\", animate=True)\r\n self.init_message(\"Trie树也叫做前缀树、字典树\")\r\n\r\n vector = AlgoVector(self, datas=self.data)\r\n self.add(vector)\r\n self.play(vector.shift, UP*2)\r\n\r\n self.show_message(\"如上所示,有6个单词\")\r\n self.show_message(\"前缀树是如何存储这些单词的呢?\")\r\n self.show_message(\"我们先来看看前缀树的插入操作\")\r\n\r\n self.reset_speed_up()\r\n tree = AlgoTrieTree(self)\r\n for w in self.data:\r\n tree.add_word(w)\r\n \r\n self.add(tree)\r\n\r\n for w in self.data_find:\r\n v = tree.query(w)\r\n print(\"word %s %s\"%(w, v))\r\n\r\n self.wait(2)\r\n","sub_path":"animations/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205902095","text":"import datetime\nimport csv\nimport dateutil.relativedelta\nimport json\nfrom django.utils.translation import ugettext_lazy as _\nfrom random import randint\nfrom mws import mws\nfrom abc import abstractmethod\n\nfrom .abstract import Abstract\n\nclass Amazon(Abstract):\n # CANADA = 'CA'\n # US = 'US'\n # MEXICO = 'MX'\n\n # marketplaceids = (\n # (CANADA, _('A2EUQ1WTGCTBG2')),\n # (US, _('ATVPDKIKX0DER')),\n # (MEXICO, _('A1AM78C64UM0Y8'))\n # )\n \n def __init__(self, credentials):\n \n super().__init__(credentials)\n self.credentials = credentials\n \n @abstractmethod\n def get_orders(self):\n pass\n\nclass AmazonCANADA(Amazon):\n\n def __init__(self, credentials):\n super().__init__(credentials)\n self.credentials = credentials\n \n marketplaceid = 'A2EUQ1WTGCTBG2'\n \n def get_orders(self):\n conn = mws.Orders(self.credentials)\n #conn.list_orders(marketplaceids)\n #print(conn.list_orders(marketplaceids))\n \n #, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None,\n #lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(),\n # payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'):\n\n\n \nclass AmazonUS(Amazon):\n\n def __init__(self, credentials):\n credentials['access_key'] = 'AKIAJYMIF6L5JOCWLIBA'\n credentials['secret_key'] = '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'\n credentials['account_id'] = 'A2GGQ2N88V80Z0'\n \n super().__init__(credentials)\n \n self.credentials = credentials\n \n marketplaceid = 'ATVPDKIKX0DER'\n \n def get_amazon_order_ids(self, supplier_id, ship_from_contact_id, records, created_after):\n conn = mws.Orders(**self.credentials)\n # print(records, '////////////////')\n items = conn.list_order_items(list(records.keys())[0])\n print(items.parsed.OrderItems)\n # orders = conn.list_orders([self.marketplaceid,], created_after).parsed.Orders.Order\n # data = {'client_order_detail': [], 'order': [], 'shipping_address': [], 'lineitem': []}\n # for order in orders:\n # if order.OrderStatus == 'Shipped':\n # print(order.AmazonOrderId in list(records.keys()))\n # if order.AmazonOrderId in list(records.keys()):\n # print(order, '*********************************1**************************', records[order.AmazonOrderId])\n\n # data['client_order_detail'].append({\n # #'source_order_id': order.AmazonOrderId,\n # #'customer_id': '',\n # #'order_created_date': order.PurchaseDate,\n # 'order_paid_date': order.LastUpdateDate,\n # 'order_total_cost': order.OrderTotal.Amount,\n # 'order_subtotal': '',\n # 'seller_email': '',\n # 'shipping_address_id': '',\n # 'shipping_service_name' : '',\n # 'shipping_service_cost': records[order.AmazonOrderId]['shipping-price'],\n\n # 'buyer_email': order.get('BuyerEmail', None),\n # 'buyer_name': order.BuyerName,\n \n # })# for order in orders if orders]\n # data['order'].append({\n # 'order_number':'{}-{}-{}-{}'.format('ebay', datetime.datetime.today().strftime('%Y%m%d'), order.BuyerName, randint(100,999)),\n # 'terms': '',\n # 'status': order.OrderStatus,\n # 'ship_to_contact_id': '',\n # 'shipped_date': order.LatestShipDate,\n # 'estimated_delivery': '',\n # 'delivery_date': '',\n # 'buyer_id': '',\n # 'supplier_id': supplier_id,\n # 'order_date': order.PurchaseDate,\n # 'payment_reference': '',\n # 'release_order' : True,\n # 'shipment_number': '',\n # 'carrier': '',\n # 'ship_from_contact_id': ship_from_contact_id,\n # 'source': 'Amazon'\n # })# for order in orders if orders]\n\n \n # data['shipping_address'].append({\n # 'name': order.ShippingAddress.Name,\n # 'city_name': order.ShippingAddress.City,\n # 'address_id': '',\n # 'phone_number': '',\n # 'street1': order.ShippingAddress.AddressLine1,\n # 'street2': '',\n # 'postal_code': order.ShippingAddress.PostalCode,\n # 'state': order.ShippingAddress.StateOrRegion,\n # 'country': order.ShippingAddress.CountryCode,\n # 'address_owner': 'Amazon'\n # })# for order in orders if orders]\n\n # data['lineitem'].append({\n # 'quantity': records[order.AmazonOrderId]['quantity'],\n # 'name': records[order.AmazonOrderId]['product-name'],\n # 'order_id': '',\n # 'product_variant_id': '',\n # 'grade_id': '',\n # 'price': records[order.AmazonOrderId]['item-price'],\n # 'source_item_id': records[order.AmazonOrderId].OrderItemId\n # })\n \n \n return {} #data\n\n def get_line_item_details(self, conn, data):\n for detail in data['client_order_detail']:\n lineitems = conn.list_order_items(detail['source_order_id']).parsed.OrderItems.OrderItem\n lineitems = lineitems if type(lineitems)==list else [lineitems]\n for lineitem in lineitems:\n print('in')\n \n return data\n \n def get_date(self):\n now = datetime.datetime.now()\n previous_month = now + dateutil.relativedelta.relativedelta(days=-1)\n return previous_month.strftime('%Y-%m-%d')\n \n def check_report_scheduled_or_not(self, conn, report_type):\n data = conn.get_report_list().parsed\n report = {}\n for var in data.ReportInfo:\n if var.Acknowledged=='false' and var.ReportType==report_type:\n report = var\n \n return False if not len(report.keys()) else report\n \n def get_reports(self):\n conn = mws.Reports(**self.credentials) \n report = self.check_report_scheduled_or_not(conn, '_GET_FLAT_FILE_ORDERS_DATA_')#'_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_')\n if not report:\n request = conn.request_report('_GET_FLAT_FILE_ORDERS_DATA_',\n (datetime.datetime.now()+dateutil.relativedelta.relativedelta(months=-1)).isoformat(),\n datetime.datetime.now().isoformat(),\n [self.marketplaceid,])\n \n\n report_data = conn.get_report(report.ReportId)\n \n filep = open('geek.csv','wb') \n filep.write(report_data.original) \n filep.close() \n records = {}\n \n with open('geek.csv', encoding='ISO-8859-1') as myFile:\n keys = myFile.readline().lstrip().lstrip('\\n').rstrip('\\n').rstrip().split('\\t')\n for num, line in enumerate(myFile):\n if(num):\n record = {}\n formatted_line = line.replace('\\n', '\\t').split('\\t')\n if 'Shipped' in formatted_line:\n for index, key in enumerate(keys):\n formatted_line = line.replace('\\n', '\\t').split('\\t')\n record[key] = formatted_line[index]\n records[record['amazon-order-id']] = record\n return records\n\n\n\n\n def get_orders(self, supplier_id, ship_from_contact_id, created_after=None): #orders of one month till date\n records = self.get_reports()\n # data = self.get_amazon_order_ids(supplier_id, ship_from_contact_id, records, self.get_date() if not created_after else created_after)\n # print(line,'////***************************////////' ,num )\n # for line in open('geek.txt', encoding='ISO-8859-1'): \n # print(line,'**********')\n # #print(dir(report), report)#, data.ReportInfo)\n # data = self.get_amazon_order_ids(conn, supplier_id, ship_from_contact_id, self.get_date() if not created_after else created_after)\n # data = self.get_line_item_details(conn, data)\n\n #def filter_orders(self, data):\n #orders = conn.list_orders([self.marketplaceid,], created_after)\n\n # print(orders.parsed.Orders.Order[0].AmazonOrderId,'----------\\n')\n\n # order_items = conn.list_order_items(orders.parsed.Orders.Order[0].AmazonOrderId)\n # print(order_items.parsed)#, (orders.parsed.Orders.Order[0]).keys())\n #, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None,\n #lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(),\n # payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'):\n\n \nclass AmazonMEXICO(Amazon):\n\n def __init__(self, credentials):\n super().__init__(credentials)\n self.credentials = credentials\n \n marketplaceid = 'A1AM78C64UM0Y8'\n \n def get_orders(self):\n conn = mws.Orders(self.credentials)\n #conn.list_orders(marketplaceids)\n #print(conn.list_orders(marketplaceids))\n \n #, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None,\n #lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(),\n # payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'):\n","sub_path":"src/client/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":9950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85300122","text":"import turtle\nimport math\n\ndef drawSquare(bot, unit):\n '''Draws a square'''\n\n for i in range(4):\n bot.forward(unit)\n bot.left(90)\n \ndef drawInception(bot, number, unit):\n '''Draws squares in inception pattern'''\n\n for i in range(number):\n drawSquare(bot, unit)\n \n bot.up()\n bot.forward(unit/2)\n\n bot.right(90)\n\n unit = unit + 20\n \n bot.forward(10)\n bot.left(90)\n \n bot.backward(unit/2)\n bot.down()\n \nbot = turtle.Turtle()\nbot.color(\"hot pink\")\nbot.pensize(3)\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"light green\")\n\nnumber = 5\nunit = 20\n\ndrawInception(bot, number, unit)\n\nwindow.exitonclick()\n","sub_path":"python/basics/square_ception2.py","file_name":"square_ception2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535048402","text":"# coding=utf-8\n\n# ===============================================\n# Author: RyutaShitomi\n# date: 2019-04-18T13:07:20.248Z\n# Description:\n#\n# ===============================================\n\n# lib\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport time\n\n# user packages\nfrom lib.models.reshourglass import ResHourglass\nfrom lib.core.config import BACKBONE_NAME_LIST\nimport lib.utils.helper as helper\nfrom lib.models.hourglass import Hourglass\nfrom lib.models.stacked_hourglass import StackedHourglass\n\ntf.logging.set_verbosity(tf.logging.FATAL)\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('checkpoint_dir',\n './checkpoints',\n 'checkpoints directory')\n\ntf.app.flags.DEFINE_string('image_path',\n './data/demo/ski.png',\n 'image you want to predict keypoints')\n\ntf.app.flags.DEFINE_boolean('is_separate',\n False,\n 'whether you visualize heatmaps separately or not.')\n\ntf.app.flags.DEFINE_boolean('is_save',\n False,\n 'Do you save the keypoints estimation image?')\ntf.app.flags.DEFINE_boolean('is_measure',\n False,\n 'Do you measure the fps about model.')\n\ntf.app.flags.DEFINE_enum('model_type',\n 'hourglass',\n ['reshourglass', 'hourglass', 'stacked'],\n 'model type which should be defined ./lib/models/')\n\n\ndef main(argv):\n\n sess = tf.Session()\n\n input_size = (256, 192)\n image = tf.placeholder(tf.float32, shape=[None, input_size[0], input_size[1], 3], name='input')\n\n if FLAGS.model_type == 'hourglass':\n model = Hourglass(is_use_bn=True, num_keypoints=17)\n\n resize = (128, 96)\n\n elif FLAGS.model_type == 'reshourglass':\n model = ResHourglass(is_use_bn=True, num_keypoints=17)\n resize = (64, 48)\n\n elif FLAGS.model_type == 'stacked':\n model = StackedHourglass(is_use_bn=True, num_keypoints=17)\n\n resize = (64, 48)\n\n\n logits = model.build(image, 'Hourglass', is_training=False, visualize=True)\n\n logits = tf.nn.sigmoid(logits)\n\n\n load_image = plt.imread(FLAGS.image_path)\n load_image = cv2.resize(load_image, (192, 256))\n\n if np.max(load_image) <= 1:\n load_image = load_image * 255\n\n saver, checkpoint_path = helper.create_saver_and_restore(sess, FLAGS.checkpoint_dir)\n # checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n\n time_list = []\n for _ in range(100):\n start = time.perf_counter()\n res = sess.run(logits, feed_dict={image: np.expand_dims(load_image, axis=0)})\n end = time.perf_counter()\n time_list.append(end - start)\n\n if FLAGS.is_measure:\n plt.boxplot(time_list)\n plt.title('time')\n plt.savefig('time_result.png')\n plt.show()\n res = res[0]\n helper.visualize_heatmaps(load_image, predict=res, is_separate=FLAGS.is_separate, is_save=FLAGS.is_save)\n helper.visualize_keypoints(load_image, predict_heatmap=res, is_save=FLAGS.is_save)\n\n\n\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277301065","text":"'''\nThis program is free software: you can redistribute it and/or modify it under the\nterms of the GNU Affero General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\nA PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License along\nwith this program. If not, see \n\nDate : April 2019\nAuthor: Mr. Jayanath Liyanage jayanathl@icta.lk/jayanath1987@gmail.com\nURL: https://github.com/jayanath1987/AIE\n'''\n\n\ninput=input('Enter the marks of a student: ')\n\ntry:\n marks = int(input)\n if marks not in range(0, 101):\n print(\"ERROR! Please enter a numerical value between 0 to 100\")\n else:\n if marks < 45:\n if marks < 30:\n grade=\"D-\"\n elif marks < 35:\n grade = \"D\"\n elif marks < 40:\n grade = \"D+\"\n else :\n grade = \"C-\"\n print(\"Student is FAIL, Grade\",grade)\n else:\n if marks < 50:\n grade=\"C\"\n elif marks < 55:\n grade = \"C+\"\n elif marks < 60:\n grade = \"B-\"\n elif marks < 65:\n grade = \"B\"\n elif marks < 75:\n grade = \"B+\"\n elif marks < 80:\n grade = \"A-\"\n elif marks < 90:\n grade = \"A\"\n else :\n grade = \"A+\"\n print(\"Student is PASS, Grade\", grade)\n\n\n\nexcept ValueError:\n print(\"ERROR! Please enter a numerical value between 0 to 100\")\n","sub_path":"Assignment1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366331589","text":"#! python2\n#coding: utf-8\nimport sys\n\nfrom utils.config_file_path import Config_File_Path\nfrom utils.data_read import Data_Read\nfrom utils.own_config_file_path import Own_Config_File_Path\n\n\nclass All_Kinds_Of_Data_Read:\n '''\n 程序执行顺序,\n 所有接口的调用都需要的数据\n 1. 全局数据--读取当前环境地址,代理信息性能参数,公共依赖的接口名字和编号,以及返回结果下标\n 2. 接口的配置数据--读取请求方法,接口地址,请求方法,请求参数,是否依赖,\n 3. 默认参数,用于在没有参数输入的情况下,请求接口\n 可选数据:\n 1. 测试数据\n 2. 性能数据\n 3. log文件\n\n 三种调用关系:\n 测试接口调用被测接口\n 测试接口的名字和被测接口的名字是相同的,exec数据和own读取的数据相同的数据\n 被测接口调用依赖接口\n 测试接口的名字和依赖接口不一样,测试接口数据已经保存在自己的进程中,依赖接口只要读取全局数据,自己的配合数据,自己的默认数据就可以\n 依赖接口调用依赖接口\n 依赖接口先被调用,它的数据已经被保存在自己的进程中,被依赖接口读取全局数据,自己的配合数据,自己的默认数据就可以\n\n 这里面即读取了执行路径字典,也读取了本接口路径字典,实际上有一些数据是用不到的,但是为了以后扩容方便,就全部先保留了下来\n '''\n def __init__(self):\n self.default_data_list = [\"test_data_test.yml\",\"test_data_dev.yml\",\"test_data_online.yml\"]\n\n def all_Kinds_Of_Data_Read(self,center_name=None, own_interface_name=None):\n\n default_data_list = self.default_data_list\n # try:\n # exec_path_dict_dict = Config_File_Path().config_File_Path(exec_from=exec_from, interface_name=interface_name)\n # if exec_path_dict_dict[\"msg\"] == \"success\":\n # exec_path_dict = exec_path_dict_dict[\"result\"]\n\n # else:\n # print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n # print(\"--%s--%s--problem data is exec_path_dict_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n # return exec_path_dict_dict\n #==========================================================================\n #确定当前文件对应的数据的路径\n own_path_dict_dict = Own_Config_File_Path().own_Config_File_Path(center_name=center_name, own_interface_name=own_interface_name)\n if own_path_dict_dict[\"msg\"] == \"success\":\n own_path_dict = own_path_dict_dict[\"result\"]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is own_path_dict_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return own_path_dict_dict\n #==========================================================================\n #读取全局配置文件\n data_config_globle_dict = Data_Read().yaml_Read(own_path_dict['global_config_path'])\n if data_config_globle_dict[\"msg\"] == \"success\":\n data_config_globle = data_config_globle_dict[\"result\"][0]\n\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is data_config_globle_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return data_config_globle_dict\n #==========================================================================\n interface_config_dict = Data_Read().yaml_Read(own_path_dict[\"own_interface_config_path\"])\n if interface_config_dict[\"msg\"] == \"success\":\n interface_config = interface_config_dict[\"result\"][0]\n request_method = interface_config[\"request_method\"]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is interface_params_list_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return interface_config_dict\n #==========================================================================\n #读取环境标识\n environment_flag = data_config_globle['environment_flag']\n if environment_flag not in [\"0\",\"1\",\"2\"]:\n environment_flag = \"0\"\n #==========================================================================\n #读取测试数据\n func_test_data_dict = Data_Read().yaml_Read(own_path_dict['func_data_path']+default_data_list[int(environment_flag)])\n if func_test_data_dict[\"msg\"] == \"success\":\n func_test_data = func_test_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is test_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return test_data_dict\n #==========================================================================\n #获取自己对应环境的默认数据\n default_data_dict = Data_Read().yaml_Read(\"%s%s\" % (own_path_dict[\"own_default_data_path\"], \\\n default_data_list[int(environment_flag)]))\n if default_data_dict[\"msg\"] == \"success\":\n default_data = default_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is default_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return default_data_dict\n #==========================================================================\n #先声明两个返回变量,以免返回的时候报错误\n pref_python_get_test_data = None\n pref_python_post_test_data = None\n pref_siege_get_test_data = None\n pref_siege_post_test_data = None\n #如果请求方法是get,读取get的数据\n if request_method == \"get\":\n pref_python_get_test_data_dict = Data_Read().yaml_Read(own_path_dict['pref_python_data_get_path']+default_data_list[int(environment_flag)])\n if pref_python_get_test_data_dict[\"msg\"] == \"success\":\n pref_python_get_test_data = pref_python_get_test_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is pref_python_get_test_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return pref_python_get_test_data_dict\n #==================================================================\n pref_siege_get_test_data_dict = Data_Read().yaml_Read(own_path_dict['pref_siege_data_get_path']+default_data_list[int(environment_flag)])\n if pref_siege_get_test_data_dict[\"msg\"] == \"success\":\n pref_siege_get_test_data = pref_siege_get_test_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is pref_siege_get_test_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return pref_siege_get_test_data_dict\n #==========================================================================\n #如果请求方法是post,读取post的数据\n elif \"post\" in request_method:\n pref_python_post_test_data_dict = Data_Read().yaml_Read(own_path_dict['pref_python_data_post_path']+default_data_list[int(environment_flag)])\n if pref_python_post_test_data_dict[\"msg\"] == \"success\":\n pref_python_post_test_data = pref_python_post_test_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is pref_python_post_test_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return pref_python_post_test_data_dict\n #==================================================================\n pref_siege_post_test_data_dict = Data_Read().yaml_Read(own_path_dict['pref_siege_data_post_path']+default_data_list[int(environment_flag)])\n if pref_siege_post_test_data_dict[\"msg\"] == \"success\":\n pref_siege_post_test_data = pref_siege_post_test_data_dict[\"result\"][0]\n else:\n print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n print(\"--%s--%s--problem data is pref_siege_post_test_data_dict\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return pref_siege_post_test_data_dict\n\n else:\n print(\"--%s--%s--request_method error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n return {\"msg\":\"error\"}\n\n interface_pref_log_path = own_path_dict[\"interface_pref_log_path\"]\n interface_pref_siege_log_path = own_path_dict[\"data_for_siege_content\"]\n #recover_path_deep = own_path_dict[\"recover_path_deep\"]\n #==========================================================================\n #读取接口的配置文件\n interface_log_path = own_path_dict[\"interface_log_path\"]\n return {\n \"msg\":\"success\",\n \"result\":\n {\n #全局配置数据\n \"global_config\":data_config_globle,\n #执行接口的功能测试数据\n \"func_test_data\":func_test_data,\n #本接口的默认数据\n \"default_data\":default_data,\n #执行接口的性能数据\n \"pref_python_get_test_data\": pref_python_get_test_data,\n #执行接口的性能数据\n \"pref_python_post_test_data\": pref_python_post_test_data,\n #执行接口siege性能数据\n \"pref_siege_get_test_data\": pref_siege_get_test_data,\n #执行接口siege性能数据\n \"pref_siege_post_test_data\": pref_siege_post_test_data,\n #本接口的配置文件\n \"interface_config\":interface_config,\n #执行接口的测试结果路径\n \"interface_log_path\":interface_log_path,\n #执行接口的性能log路径\n \"interface_pref_log_path\":interface_pref_log_path,\n \"data_for_siege_content\": interface_pref_siege_log_path,\n\n\n }\n\n }\n # except Exception as f:\n # print(\"--%s--%s--tool error\" % (self.__class__.__name__, sys._getframe().f_code.co_name))\n # print(\"python error is: %s\" % f)\n # return {\n # \"msg\":\"error\"\n # }\n\n\n def main(self):\n pass\n\nif __name__ == \"__main__\":\n ss = All_Kinds_Of_Data_Read()\n ss.main()","sub_path":"interface_test_tool/utils/all_kinds_of_data_read.py","file_name":"all_kinds_of_data_read.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202075079","text":"from tqdm import tqdm\r\nimport torch.nn.functional as F\r\nfrom cuda_avail import cuda_avail_device\r\ndevice=cuda_avail_device\r\ndef train(model, device, train_loader, optimizer, epoch,regularizer,losses_array, acc_array):\r\n model.train()\r\n pbar = tqdm(train_loader)\r\n correct = 0\r\n processed = 0\r\n for batch_idx, (data, target) in enumerate(pbar):\r\n # get samples\r\n data, target = data.to(device), target.to(device)\r\n optimizer.zero_grad()\r\n # Predict\r\n y_pred = model(data)\r\n # Calculate loss\r\n loss = F.nll_loss(y_pred,target)\r\n l1=0\r\n for p in model.parameters(): \r\n l1=l1+p.abs().sum()\r\n lambda_l1=1e-5\r\n # Backpropagation\r\n # For l1\r\n #------------------\r\n if(regularizer=='l1'):\r\n loss=loss+lambda_l1*l1 \r\n # For l2 loss\r\n #------------------\r\n # Nothing needed as this is accomodated in the optimizer\r\n # For l1 and l2 loss\r\n #------------------\r\n if(regularizer=='l1l2'):\r\n loss=loss+lambda_l1*l1 \r\n # For GBN\r\n #------------------\r\n # No need to do anything as GBN is addressed in model definition\r\n # For GBN + l1l2 \r\n #------------------\r\n if(regularizer=='l1l2'):\r\n loss=loss+lambda_l1*l1 \r\n losses_array.append(loss)\r\n # In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes. \r\n # Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly. \r\n loss.backward()\r\n optimizer.step()\r\n # Update pbar-tqdm\r\n pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability\r\n correct += pred.eq(target.view_as(pred)).sum().item()\r\n processed += len(data)\r\n pbar.set_description(desc= f'Model={regularizer} Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')\r\n acc_array.append(100*correct/processed)\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106335693","text":"\"\"\"\nCopyright 2020, All rights reserved.\nAuthor : SangJae Kang\nMail : craftsangjae@gmail.com\n\"\"\"\nimport asyncio\nimport unittest\nfrom service.github import GithubKeyGen\nfrom service.consumer import RedisQueue\nfrom service.database import MongoDatabase\nfrom service.document import parse_repository\nfrom service.worker import RepositoryCrawler\n\n\nclass TestConsumerMethods(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n github = GithubKeyGen(\"../credentials/github.txt\")\n database = MongoDatabase(\"repository\")\n broker = RedisQueue(\"repository\", host=\"localhost\", port=\"6379\", db=\"0\")\n cls.worker = RepositoryCrawler(broker, database, github)\n\n cls.api_key = list(github.key_cache.keys())[0]\n\n def test_get_name_and_owner_by_repository_id(self):\n loop = asyncio.get_event_loop()\n repo_name, repo_owner = (\n loop.run_until_complete(self.worker.get_name_and_owner_by_repository_id(56417681, self.api_key)))\n self.assertEqual(repo_name, \"implicit\")\n self.assertEqual(repo_owner, \"benfred\")\n\n def test_get_name_and_owner_by_repository_id_not_exist(self):\n loop = asyncio.get_event_loop()\n with self.assertRaises(ValueError):\n loop.run_until_complete(self.worker.get_name_and_owner_by_repository_id(56417222222222212312681, self.api_key))\n\n def test_get_repository_info_by_name_and_worker(self):\n loop = asyncio.get_event_loop()\n document = loop.run_until_complete(self.worker.get_repository_info_by_name_and_owner(\"implicit\", \"benfred\", self.api_key))\n print(parse_repository(document))\n\n def test_get_repository_info_by_name_and_worker_not_exists(self):\n loop = asyncio.get_event_loop()\n\n with self.assertRaises(ValueError):\n document = loop.run_until_complete(self.worker.get_repository_info_by_name_and_owner(\"implasdsicit\", \"benfred\", self.api_key))\n print(parse_repository(document))","sub_path":"tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42672572","text":"from typing import List\n\ndef update(lst: List[int], m: int) -> List[int]:\n \"\"\"Given an ordered list, positive int m, update the list so\n if x appears m times in original list, x will show up min(2,m) in returned list\n \n ex. update([10,10,10], 2) -> [10,10]\n update([10,10,10], 1) -> [10]\n update([12,12,12], 3) -> [12, 12] \n \"\"\"\n exact = min(2, m)\n j = exact\n for i in range(exact, len(lst)):\n if (\n (exact == 1 and lst[i] != lst[j-1]) or \n (\n exact == 2 and \n (lst[j-1] != lst[j-2] or lst[j-1] != lst[i])\n )\n ):\n lst[j] = lst[i]\n j += 1 \n\n return lst[:j]\n\nif __name__ == '__main__':\n for lst in ([1,2,3], [1,2,2,2,3], [1,2,2,3,3,3,4]):\n print(update(lst, 2))","sub_path":"list/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205240595","text":"'''\nSource : geeksforgeeks\nmy solution\n1) find min element and its position in arr\n2) find no of elements from pos to len(arr) --> pivot array\n3) rotate anticlockwise and pass it to binary search\n\nbetter solution:\nInput arr[] = {3, 4, 5, 1, 2}\nElement to Search = 1\n 1) Find out pivot point and divide the array in two\n sub-arrays. (pivot = 2) /*Index of 5*/\n 2) Now call binary search for one of the two sub-arrays.\n (a) If element is greater than 0th element then\n search in left array\n (b) Else Search in right array\n (1 will go in else as 1 < 0th element(3))\n 3) If element is found in selected sub-array then return index\n Else return -1.\n'''\n\n#my solution\ndef mysolution():\n min_ele = min(arr)\n minpos = arr.index(min_ele)\n pivot_arr = arr[minpos:len(arr)]\n reg_arr = arr[:minpos]\n result = pivot_arr + reg_arr\n print (result)\n\n#better solution\n# def binarysearch(t_arr, x):\n# print(t_arr)\n# mid_idx = int(len(t_arr)/2)\n# middle = t_arr[mid_idx]\n# if middle == x:\n# return mid_idx\n# elif middle > x:\n# binarysearch(t_arr[:mid_idx], x)\n# elif middle < x:\n# binarysearch(t_arr[mid_idx:], x)\n# return -1\ndef binary_search(arr, low, high, x):\n # Check base case\n if high >= low:\n mid = (high + low) // 2\n # If element is present at the middle itself\n if arr[mid] == x:\n return mid\n # If element is smaller than mid, then it can only\n # be present in left subarray\n elif arr[mid] > x:\n return binary_search(arr, low, mid - 1, x)\n # Else the element can only be present in right subarray\n else:\n return binary_search(arr, mid + 1, high, x)\n else:\n # Element is not present in the array\n return -1\n\ndef bettersolution(arr, x):\n min_ele = min(arr)\n minpos = arr.index(min_ele)\n pivot_arr = arr[minpos:len(arr)]\n reg_arr = arr[:minpos]\n print(\"pivor arr\", pivot_arr)\n print(\"regular arr\", reg_arr)\n if reg_arr[0] == x:\n return 0\n elif reg_arr[0] > x:\n s_idx = binary_search(pivot_arr, 0, len(pivot_arr), x)\n s_idx = s_idx + len(reg_arr)\n elif reg_arr[0] < x:\n s_idx = binary_search(reg_arr, 0, len(reg_arr), x)\n else:\n s_idx = -1\n\n return (s_idx)\n\narr = [5, 6, 7, 8, 9, 10, 1, 2, 3]\nx = 5\nprint (\"given arr \", arr)\nprint (bettersolution(arr, x))\n","sub_path":"DataStructures/arrays/Search_an_element_in_a_sorted_and_rotated_array.py","file_name":"Search_an_element_in_a_sorted_and_rotated_array.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368832104","text":"# hexqt.py -- HexQT a pretty QT hext editor.\nimport enum\nimport sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIcon, QPalette, QColor, QFont, QTextCharFormat, QTextCursor\nfrom PyQt5.QtWidgets import QAction, QMainWindow, QFileDialog, QTextEdit, QDesktopWidget\n# QT5 Python Binding\nfrom PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout\nfrom PyQt5.QtWidgets import QInputDialog, QLineEdit\n\n\nclass Mode(enum.Enum):\n READ = 0 # Purely read the hex.\n ADDITION = 1 # Add to the hex.\n OVERRIDE = 2 # Override the current text.\n\n\nclass FileSelector(QFileDialog):\n def __init__(self):\n super(FileSelector, self).__init__()\n self.file_name = None\n self.selectFile()\n self.show()\n\n def selectFile(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self, \"Directory View\", \"\", \"All Files (*)\", options=options)\n\n self.file_name = file_name\n\n\nclass InputDialogue(QInputDialog):\n def __init__(self, title, text):\n super(InputDialogue, self).__init__()\n\n # Dialogue options.\n self.dialog_title: str = title\n self.dialog_text: str = text\n self.dialog_response = None\n\n self.init_ui()\n\n # initUI ... Initialize the main view of the dialogue.\n def init_ui(self):\n dialogue_response, dialogue_complete = QInputDialog.getText(self, self.dialog_title, self.dialog_text,\n QLineEdit.Normal, '')\n if dialogue_complete and dialogue_response:\n self.dialog_response = dialogue_response\n else:\n self.dialog_response = ''\n\n\nclass App(QMainWindow):\n def __init__(self):\n super(App, self).__init__()\n\n # Window options!\n self.title: str = 'HexQT'\n self.left: int = 0\n self.top: int = 0\n self.width: int = 1280\n self.height: int = 840\n\n self.row_spacing: int = 4 # How many bytes before a double space.\n self.row_length: int = 16 # How many bytes in a row.\n self.byte_width: int = 2 # How many bits to include in a byte.\n self.mode = Mode.READ\n\n self.initUI()\n\n def read_file(self, file_name):\n file_data = ''\n\n if file_name:\n with open(file_name, 'rb') as fileObj:\n file_data = fileObj.read()\n\n self.generate_view(file_data)\n\n def generate_view(self, text):\n \"\"\"generates hex text\"\"\"\n space = ' '\n\n row_spacing = self.row_spacing\n row_length = self.row_length\n\n offset = 1\n\n offset_text = ''\n main_text = ''\n ascii_text = ''\n\n for index, b in enumerate(text):\n char = chr(b)\n\n if char in (' ', '', '\\n', '\\t', '\\r', '\\b'):\n ascii_text += '.'\n else:\n ascii_text += char\n\n main_text += format(b, '0' + str(self.byte_width) + 'x')\n\n if (index + 1) % row_length == 0:\n offset_text += format(offset, '04x') + '\\n'\n main_text += '\\n'\n ascii_text += '\\n'\n elif (index + 1) % row_spacing == 0:\n main_text += space * 2\n else:\n main_text += space\n\n offset += len(char)\n\n self.offset_text_area.setText(offset_text)\n self.main_text_area.setText(main_text)\n self.ascii_text_area.setText(ascii_text)\n\n def open_file(self):\n file_select = FileSelector()\n file_name = file_select.file_name\n\n self.read_file(file_name)\n\n def save_file(self):\n print('Saved!')\n\n def highlight_main(self) -> None:\n \"\"\"Bidirectional highlighting from main\"\"\"\n\n # Create and get cursors for getting and setting selections.\n highlight_cursor = QTextCursor(self.ascii_text_area.document())\n cursor = self.main_text_area.textCursor()\n\n # Clear any current selections and reset text color.\n highlight_cursor.select(QTextCursor.Document)\n highlight_cursor.setCharFormat(QTextCharFormat())\n highlight_cursor.clearSelection()\n\n # Information about where selections and rows start.\n selected_text = cursor.selectedText() # The actual text selected.\n selection_start = cursor.selectionStart()\n selection_end = cursor.selectionEnd()\n\n total_bytes = self.__get_valuable_positions_length(selected_text) # get all valuable positions\n # \\n and word length compensation\n total_bytes = self.__negative_compensation(total_bytes)\n\n main_text = self.main_text_area.toPlainText().replace('\\n', ' ')\n ascii_start = self.__get_valuable_positions_length(main_text[:selection_start]) # get all valuable positions\n # \\n and word length compensation\n ascii_start = self.__negative_compensation(ascii_start)\n ascii_end = ascii_start + total_bytes\n\n # Select text and highlight it.\n highlight_cursor.setPosition(ascii_start, QTextCursor.MoveAnchor)\n highlight_cursor.setPosition(ascii_end, QTextCursor.KeepAnchor)\n\n highlight = QTextCharFormat()\n highlight.setBackground(Qt.red)\n highlight_cursor.setCharFormat(highlight)\n highlight_cursor.clearSelection()\n\n def highlight_ascii(self) -> None:\n \"\"\"Bidirectional highlighting from ascii\"\"\"\n\n # Create and get cursors for getting and setting selections.\n highlight_cursor = QTextCursor(self.main_text_area.document())\n cursor = self.ascii_text_area.textCursor()\n\n # Clear any current selections and reset text color.\n highlight_cursor.select(QTextCursor.Document)\n highlight_cursor.setCharFormat(QTextCharFormat())\n highlight_cursor.clearSelection()\n\n # Information about where selections and rows start.\n selected_text = cursor.selectedText() # The actual text selected.\n selection_start = cursor.selectionStart()\n\n ascii_text = self.ascii_text_area.toPlainText().replace('\\n', '')\n main_start = self.__get_valuable_positions_length(ascii_text[:selection_start])\n main_start = self.__positive_compensation(main_start)\n\n total_bytes = self.__get_valuable_positions_length(selected_text) # get all valuable positions\n # \\n and word length compensation\n total_bytes = self.__positive_compensation(total_bytes)\n selection_end = main_start + total_bytes\n\n # Select text and highlight it.\n highlight_cursor.setPosition(main_start, QTextCursor.MoveAnchor)\n highlight_cursor.setPosition(selection_end, QTextCursor.KeepAnchor)\n\n highlight = QTextCharFormat()\n highlight.setBackground(Qt.red)\n highlight_cursor.setCharFormat(highlight)\n highlight_cursor.clearSelection()\n\n @staticmethod\n def __get_valuable_positions_length(array):\n return len(list(filter(lambda x: x not in ('', ' '), array)))\n\n def __positive_compensation(self, value):\n return self.byte_width * value + value\n\n def __negative_compensation(self, value):\n return (value + value // self.row_length) // self.byte_width\n\n # Creates a dialogue and gets the offset to jump to and then jumps to that offset.\n def offset_jump(self):\n jump_text = InputDialogue('Jump to Offset', 'Offset').dialog_response\n jump_offset = 0xF\n\n main_text = self.main_text_area.toPlainText()\n main_text = main_text.strip().replace(' ', ' ')\n\n text_cursor = self.main_text_area.textCursor()\n\n # createMainView ... Creates the primary view and look of the application (3-text areas.)\n def create_main_view(self) -> QHBoxLayout:\n qh_box = QHBoxLayout()\n\n self.main_text_area = QTextEdit()\n self.offset_text_area = QTextEdit()\n self.ascii_text_area = QTextEdit()\n\n # Initialize them all to read only.\n self.main_text_area.setReadOnly(True)\n self.ascii_text_area.setReadOnly(True)\n self.offset_text_area.setReadOnly(True)\n\n # Create the fonts and styles to be used and then apply them.\n font = QFont(\"Courier New\", 12, QFont.Normal, False)\n\n self.main_text_area.setFont(font)\n self.ascii_text_area.setFont(font)\n self.offset_text_area.setFont(font)\n\n self.offset_text_area.setTextColor(Qt.red)\n\n # Syncing scrolls.\n self.sync_scrolls(self.main_text_area, self.ascii_text_area, self.offset_text_area)\n\n # Highlight linking. BUG-GY\n self.main_text_area.selectionChanged.connect(self.highlight_main)\n self.ascii_text_area.selectionChanged.connect(self.highlight_ascii)\n\n qh_box.addWidget(self.offset_text_area, 1)\n qh_box.addWidget(self.main_text_area, 6)\n qh_box.addWidget(self.ascii_text_area, 2)\n\n return qh_box\n\n def initUI(self):\n # Initialize basic window options.\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n # Center the window.\n qt_rectangle = self.frameGeometry()\n center_point = QDesktopWidget().availableGeometry().center()\n qt_rectangle.moveCenter(center_point)\n self.move(qt_rectangle.topLeft())\n\n # Creates a menu bar, (file, edit, options, etc...)\n mainMenu = self.menuBar()\n\n # Menus for window.\n file_menu = mainMenu.addMenu('File')\n edit_menu = mainMenu.addMenu('Edit')\n view_menu = mainMenu.addMenu('View')\n help_menu = mainMenu.addMenu('Help')\n\n # FILE MENU ---------------------------------------\n\n # Open button.\n open_button = QAction(QIcon(), 'Open', self)\n open_button.setShortcut('Ctrl+O')\n open_button.setStatusTip('Open file')\n open_button.triggered.connect(self.open_file)\n\n # Save button.\n save_button = QAction(QIcon(), 'Save', self)\n save_button.setShortcut('Ctrl+S')\n save_button.setStatusTip('Open file')\n save_button.triggered.connect(self.save_file)\n\n # Optional exit stuff.\n exit_button = QAction(QIcon(), 'Exit', self)\n exit_button.setShortcut('Ctrl+Q')\n exit_button.setStatusTip('Exit application')\n exit_button.triggered.connect(self.close)\n\n file_menu.addAction(open_button)\n file_menu.addAction(save_button)\n file_menu.addAction(exit_button)\n\n # EDIT MENU ---------------------------------------\n\n # Jump to Offset\n offset_button = QAction(QIcon(), 'Jump to Offset', self)\n offset_button.setShortcut('Ctrl+J')\n offset_button.setStatusTip('Jump to Offset')\n offset_button.triggered.connect(self.offset_jump)\n\n edit_menu.addAction(offset_button)\n\n # Creating a widget for the central widget thingy.\n central_widget = QWidget()\n central_widget.setLayout(self.create_main_view())\n\n self.setCentralWidget(central_widget)\n\n # Show our masterpiece.\n self.show()\n\n # Syncs the horizontal scrollbars of multiple qTextEdit objects. Rather clunky but it works.\n @staticmethod\n def sync_scrolls(q_text_obj0, q_text_obj1, q_text_obj2):\n scroll0 = q_text_obj0.verticalScrollBar()\n scroll1 = q_text_obj1.verticalScrollBar()\n scroll2 = q_text_obj2.verticalScrollBar()\n\n # There seems to be no better way of doing this at present so...\n\n scroll0.valueChanged.connect(\n scroll1.setValue\n )\n\n scroll0.valueChanged.connect(\n scroll2.setValue\n )\n\n scroll1.valueChanged.connect(\n scroll0.setValue\n )\n\n scroll1.valueChanged.connect(\n scroll2.setValue\n )\n\n scroll2.valueChanged.connect(\n scroll1.setValue\n )\n\n scroll2.valueChanged.connect(\n scroll0.setValue\n )\n\n\n# setStyle ... Sets the style of the QT Application. Right now using edgy black.\ndef set_style(q_app):\n q_app.setStyle(\"Fusion\")\n\n dark_palette = QPalette()\n\n dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.WindowText, Qt.white)\n dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))\n dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.ToolTipBase, Qt.white)\n dark_palette.setColor(QPalette.ToolTipText, Qt.white)\n dark_palette.setColor(QPalette.Text, Qt.white)\n dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.ButtonText, Qt.white)\n dark_palette.setColor(QPalette.BrightText, Qt.white)\n dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))\n dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n dark_palette.setColor(QPalette.HighlightedText, Qt.black)\n\n q_app.setPalette(dark_palette)\n\n q_app.setStyleSheet(\"QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }\")\n\n\ndef main():\n app = QApplication(sys.argv)\n set_style(app)\n\n hexqt = App()\n sys.exit(app.exec_())\n\n\n# Initialize the program.\nif __name__ == '__main__':\n main()\n","sub_path":"hexqt.py","file_name":"hexqt.py","file_ext":"py","file_size_in_byte":13206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"472496418","text":"import socket \n\nclass MyFirstTCPClient(object):\n def __init__(self):\n self.host = \"127.0.0.1\"\n self.port = 3006\n \n def sendMessage(self, message):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((self.host, self.port)) \n except:\n print(\"Connect failed.\")\n\n s.send(message)\n back = s.recv(1024)\n print(back.decode(\"utf-8\"))\n s.close() \n\ndef main():\n f = MyFirstTCPClient()\n f.sendMessage(message=b\"Hello World!\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502381763","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom pylab import *\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nimport operator\nimport matplotlib.pyplot as plt\nfrom math import log\ndef calculateEntropy(dataset):\n numbers = len(dataset)\n labelCounts = {} # 每个label出现的次数\n # 计算每一个特征计算\n for featVec in dataset:\n currentLabel = featVec[-1] # 获取label信息\n # 统计正负样本数\n if currentLabel not in labelCounts.keys():\n labelCounts[currentLabel] = 0\n labelCounts[currentLabel] += 1\n entropy = 0\n for key in labelCounts:\n prob = float(labelCounts[key]) / numbers\n entropy -= prob * log(prob, 2)\n return entropy\n\n\n# In[2]:\n\n\ndef createDataset():\n dataset = [[0, 0, 0, 0, 'no'], #数据集\n [0, 0, 0, 1, 'no'],\n [0, 1, 0, 1, 'yes'],\n [0, 1, 1, 0, 'yes'],\n [0, 0, 0, 0, 'no'],\n [1, 0, 0, 0, 'no'],\n [1, 0, 0, 1, 'no'],\n [1, 1, 1, 1, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [2, 0, 1, 2, 'yes'],\n [2, 0, 1, 1, 'yes'],\n [2, 1, 0, 1, 'yes'],\n [2, 1, 0, 2, 'yes'],\n [2, 0, 0, 0, 'no']]\n labels = ['年龄', '有工作', '有自己的房子', '信贷情况']\n return dataset, labels\n\n\n# In[3]:\n\n\ndef splitDataset(dataset, axis, value):\n afterSplitDataset = []\n for featVec in dataset:\n if featVec[axis] == value:\n reducedFeatVec = featVec[:axis] # 去掉该特征\n reducedFeatVec.extend(featVec[axis+1:])\n afterSplitDataset.append(reducedFeatVec)\n return afterSplitDataset\n\n\n# In[4]:\n\n\ndef chooseBestFeatureToSplit(dataset):\n numberOfFeatures = len(dataset[0]) - 1\n entropy = calculateEntropy(dataset)\n bestInformationGain = 0\n bestFeature = -1\n for i in range(numberOfFeatures):\n featList = [example[i] for example in dataset]\n uniqueVals = set(featList)# 每个属性有多少个取值\n newEntropy = 0\n # 计算每个属性的Gain\n for value in uniqueVals:\n subDataset = splitDataset(dataset, i, value)\n prob = len(subDataset) / float(len(dataset))\n newEntropy += prob * calculateEntropy(subDataset)\n informationGain = entropy - newEntropy\n if informationGain > bestInformationGain:\n bestInformationGain = informationGain\n bestFeature = i\n \n return bestFeature\n\n\n# In[5]:\n\n\ndef countMajority(classlist):\n classCount = {}\n for vote in classlist:\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n return sortedClassCount[0][0]\n\n\n# In[6]:\n\n\ndef createTree(dataset, labels, featLabels):\n classList = [example[-1] for example in dataset]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n if len(dataset[0]) == -1:\n return countMajority(classList)\n bestFeat = chooseBestFeatureToSplit(dataset)\n bestFeatLabel = labels[bestFeat]\n featLabels.append(bestFeatLabel)\n myTree = {bestFeatLabel: {}}\n del(labels[bestFeat])\n featValues = [example[bestFeat] for example in dataset]\n uniqueVals = set(featValues)\n for value in uniqueVals:\n myTree[bestFeatLabel][value] = createTree(splitDataset(dataset, bestFeat, value), labels, featLabels)\n return myTree\n\n\n# In[7]:\n\n\nif __name__ == '__main__':\n dataset, labels = createDataset()\n featLabels = []\n myTree = createTree(dataset, labels, featLabels)\n print(myTree)\n\n\n# In[8]:\n\n\n# 决策树可视化\ndef getNumLeafs(myTree):\n numberOfLeafs = 0\n firstStr = next(iter(myTree))\n secondDict = myTree[firstStr]\n for key in secondDict.keys():\n # 判断是否为叶子结点\n if type(secondDict[key]).__name__ == 'dict':\n numberOfLeafs += getNumLeafs(secondDict[key])\n else:\n numberOfLeafs += 1\n return numberOfLeafs\n\n\n# In[9]:\n\n\ndef getTreeDepth(myTree):\n depth = 0\n firstStr = next(iter(myTree))\n secondDict = myTree[firstStr]\n for key in secondDict.keys():\n if type(secondDict[key]).__name__ == 'dict':\n presentDepth = 1 + getTreeDepth(secondDict[key])\n else:\n presentDepth = 1\n if presentDepth > depth:\n depth = presentDepth\n return depth\n\n\n# In[10]:\n\n\ndef plotNode(nodeTxt, textPos, parentPos, nodeType):\n arrow_args = dict(arrowstyle='<-')\n createPlot.ax1.annotate(nodeTxt, xy=parentPos, xycoords='axes fraction', xytext=textPos, textcoords='axes fraction', va='center', ha='center', bbox=nodeType, arrowprops=arrow_args)\n\n\n# In[11]:\n\n\ndef plotEdgeText(currentPos, parentPos, text):\n xMid = (parentPos[0] - currentPos[0]) / 2.0 + currentPos[0]\n yMid = (parentPos[1] - currentPos[1]) / 2.0 + currentPos[1]\n createPlot.ax1.text(xMid, yMid, text, va='center', ha='center', rotation=30)\n\n\n# In[12]:\n\n\ndef plotTree(myTree, parentPos, nodeTxt):\n decisionNode = dict(boxstyle='sawtooth', fc='0.8')\n leafNode = dict(boxstyle='round4', fc='0.8')\n numberOfLeafs = getNumLeafs(myTree)\n depth = getTreeDepth(myTree)\n firstStr = next(iter(myTree))\n currentPos = (plotTree.xOff + (1.0 + float(numberOfLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)\n plotEdgeText(currentPos, parentPos, nodeTxt)\n plotNode(firstStr, currentPos, parentPos, decisionNode)\n secondDict = myTree[firstStr]\n plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD\n for key in secondDict.keys():\n if type(secondDict[key]).__name__ == 'dict':\n plotTree(secondDict[key], currentPos, str(key))\n else:\n plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW\n plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), currentPos, leafNode)\n plotEdgeText((plotTree.xOff, plotTree.yOff), currentPos, str(key))\n plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD\n\n\n# In[13]:\n\n\ndef createPlot(inTree):\n fig = plt.figure(1, facecolor='white')\n fig.clf()\n axprops = dict(xticks=[], yticks=[])\n createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)\n plotTree.totalW = float(getNumLeafs(inTree))\n plotTree.totalD = float(getTreeDepth(inTree))\n plotTree.xOff = -0.5 / plotTree.totalW\n plotTree.yOff = 1.0\n plotTree(inTree, (0.5, 1.0), '')\n plt.show()\n\n\n# In[14]:\n\n\nif __name__ == '__main__':\n dataset, labels = createDataset()\n featLabels = []\n myTree = createTree(dataset, labels, featLabels)\n print(myTree)\n createPlot(myTree)\n\n\n# In[31]:\n\n\ndef classify(inTree, featLabels, testVec):\n firstStr = next(iter(inTree))\n secondDict = inTree[firstStr]\n featIndex = featLabels.index(firstStr)\n for key in secondDict.keys():\n if testVec[featIndex] == key:\n if type(secondDict[key]).__name__ == 'dict':\n finalLabel = classify(secondDict[key], featLabels, testVec)\n else:\n finalLabel = secondDict[key]\n return finalLabel\n\n\n# In[32]:\n\n\n# 测试\nif __name__ == '__main__':\n dataset, labels = createDataset()\n featLabels = []\n myTree = createTree(dataset, labels, featLabels)\n testVec = [0, 1]\n result = classify(myTree, featLabels, testVec)\n if result == 'yes':\n print('放贷')\n else:\n print('不放贷')\n\n\n# In[35]:\n\n\n# 存储决策树\nimport pickle\ndef storeTree(inTree, filename):\n with open(filename, 'wb') as fw:\n pickle.dump(inTree, fw)\n\n\n# In[36]:\n\n\nif __name__ == '__main__':\n myTree = {'有自己的房子': {0: {'有工作': {0: 'no', 1: 'yes'}}, 1: 'yes'}}\n storeTree(myTree, 'classifierStorage.txt')\n\n\n# In[37]:\n\n\n# 导入决策树\ndef loadTree(filename):\n fr = open(filename, 'rb')\n return pickle.load(fr)\n\n\n# In[38]:\n\n\nif __name__ == '__main__':\n myTree = loadTree('classifierStorage.txt')\n print(myTree)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"DecisionTree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139508956","text":"#!/usr/bin/env python3\nfrom pprint import pprint\nimport json\nimport http.client, urllib.parse\n\ndef http_get(connection, path, dict={}):\n dict['api_key'] = '7813f08896ec72acf0a4a5a749b8e095'\n dict['file_type'] = 'json'\n connection.request('GET', path + '?' + urllib.parse.urlencode(dict))\n response = connection.getresponse()\n if response.status == 200:\n data = response.read()\n return json.loads(data.decode('utf-8')) \n else:\n raise Exception(\"HTTP call failed: \" + response.reason)\n\nurl = 'api.stlouisfed.org'\nconnection = http.client.HTTPSConnection(url)\n\n# get the children of the root category\ncat = http_get(connection, '/fred/category/children')\nfor c in cat['categories']:\n series = http_get(connection, '/fred/category/series', {\"category_id\": c['id']})\n if series['count'] > 0:\n metadata = http_get(connection, '/fred/series', {\"series_id\": series['seriess'][0]['id']})\n pprint(metadata)\n break\n","sub_path":"exercise/fred.py","file_name":"fred.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128128497","text":"from config import DataConfig\nimport matplotlib.pyplot as plt\ntry:\n import _pickle as pickle\nexcept:\n import pickle\nimport glob\n\ndef start():\n config = DataConfig()\n histories = sorted(glob.glob(config.history_location+\"*.pickle\"))\n data = {}\n for hist in histories:\n file = open(hist, 'rb') \n h = pickle.loads(pickle.load(file))\n for k, v in h.items():\n if k not in data.keys():\n data[k] = []\n for item in v:\n data[k].append(item)\n for i, kv in enumerate(data.items()):\n plt.subplot(1, len(data), i+1)\n plt.title(kv[0])\n plt.plot(kv[1])\n plt.show()\n ","sub_path":"worker/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599736868","text":"#!/usr/bin/python3\n# python 3.4.3\n\nimport pandas\n\n# Adam Wespiser, adamwespiser@gmail.com\n# Smarking\n# Technical Interview, September 2015\n\n\n\n\n#######################################################################\n#\n# PROBLEM 1: Get duraction distribution over all days/hours\n#\n#######################################################################\ndef date_hour_duration_distro_transactions(file_csv = \"transactions.csv\"):\n\t# load in the data and get the cols that we need\n try:\n df_hd = pandas.read_csv(filepath_or_buffer = file_csv)[[\"entry_date_hour\",\"duration_hour\"]]\n except Exception:\n print(\"Pandas error on\", file_csv, \"make sure it exists, and that entry_time and duration_hour are valid column title\")\n return None\n df_hd.loc[:,\"hour_group\"] = df_hd[\"duration_hour\"].astype(int) + 1\n df_group = df_hd.groupby(['entry_date_hour','hour_group']).size()\n return df_group.unstack('hour_group').fillna(0)\n\n\n#######################################################################\n#\n# PROBLEM 2: Get duraction distribution over all days/hours\n# Select on start/end datetime, dayofweek and entry_hour\n# Allow for 'all' option for all daysofweek or \n# entry hours\n#\n#######################################################################\ndef query_date_hour_duration_transactions(file_csv=\"transactions.csv\", \n start_datetime=\"2013-01-01 13:00:00\", \n end_datetime=\"2013-02-01 13:00:00\",\n entry_dayofweek=\"Monday\", \n entry_hour=15):\n\n \n ## PARAMETER CHECK\n\n #entry_dayofweek = str(entry_dayofweek).lower()\n entry_dayofweek = fuzzy_match_day(entry_dayofweek)\n if entry_dayofweek is None:\n print(\"Provide a day of the week, or specify \\\"all\\\", to indicate all days\")\n return None\n\n if ((not entry_hour in range(24)) and (not str(entry_hour).lower() == 'all')):\n print(\"Provide an entry hour(integer in range(24)), or specify \\\"all\\\", to indicate all days\")\n return None\n\n # note, this will push both string, and type=datetime into datetime objects\n try:\n s_dt = pandas.to_datetime(start_datetime)\n e_dt = pandas.to_datetime(end_datetime)\n except Exception as e:\n print(\"Problem with (start|end)_datetime arguments, can't convert to datetime obj\")\n return None\n\n if not s_dt < e_dt:\n print(\"please make sure start_datetime: \", start_datetime, \n \"is less than end_datetime: \", end_datetime,\n \"[start_date, end_date)\")\n return None\n\n\n ## LOAD IN DATA FRAME\n\n try:\n df_hd = pandas.read_csv(filepath_or_buffer = file_csv)[[\"entry_time\",\"duration_hour\"]]\n except Exception:\n print(\"Pandas error on\", file_csv, \"make sure it exists, and that entry_time and duration_hour are valid column title\")\n return None\n\n\n ## TIME selection\n\n # convert text timestamp to datetime \n df_hd.loc[:,'entry_datetime'] = df_hd[\"entry_time\"].apply(lambda x: pandas.to_datetime(x))\n # get mask of dates within range [start, end)\n mask_time = (df_hd['entry_datetime'] >= s_dt) & (df_hd['entry_datetime'] < e_dt)\n df_hd = df_hd.loc[mask_time]\n\n\n ## DAY OF WEEK\n \n if (not entry_dayofweek.lower() == 'all'):\n # get the index of the first matching day\n days_of_week = get_daysofweek()\n dayofweek_index = next(filter(lambda i: days_of_week[i] == entry_dayofweek.lower(),range(7)),None)\n\n # with the dayofweek index, create the mask by comparing each entries\n # dayofweek value with dayofweek_index\n mask_dayofweek = df_hd['entry_datetime'].apply(lambda x: x.dayofweek == dayofweek_index)\n # apply the mask\n df_hd = df_hd.loc[mask_dayofweek]\n\n\n ## HOUR\n\n #print(not str(entry_hour).lower() == 'all',\"\\n\\n\\n\")\n if (not str(entry_hour).lower() == 'all'):\n # x.hour is [0,24), so make a mask by comparing that with entry_hour(same range)\n \n mask_dayofweek = df_hd['entry_datetime'].apply(lambda x: x.hour == int(entry_hour)) \n df_hd = df_hd.loc[mask_dayofweek]\n\n\n ## AGGREGATION: over (month,day,hour) & (hour_group)\n\n df_hd.loc[:,\"hour_group\"] = df_hd['duration_hour'].apply(lambda x: int(x) + 1)\n # get the entry_date_hour be setting entry_datetime minutes and seconds to zero\n df_hd.loc[:,\"entry_date_hour\"] = df_hd['entry_datetime'].apply(lambda x: x.replace(minute=0,second=0))\n # finally, group by (entry_date_hour,hour_group), and get the size(or length of\n # each group. (.size() gets us the number of occurances someone stayed x long\n # on day Y at hour Z. Unstack moves hour_group from a column to a row, so now\n # the entries are day Y at hour Z, and the duration someonestayed is a column.\n # Finally, we fillNA to 0, since not all hour groups are observed for each date_hour.\n # Resetting the index to 0 gives the dataframe 'as is' at the end\n return df_hd.groupby(['entry_date_hour','hour_group']).size().unstack('hour_group').fillna(0).reset_index('entry_date_hour')\n\n\n#######################################################################\n#\n# PROBLEM 3: Write a test function that returns the aggreate over all\n# returned rows.\n# Returns a dictionary with key=hour_group, val= counts\n#\n#######################################################################\n\ndef aggreate_duration_all_entries(file_csv=\"transactions.csv\",\n start_datetime=\"2013-01-01 13:00:00\", \n end_datetime=\"2013-02-01 13:00:00\",\n entry_dayofweek=\"Monday\",\n entry_hour = 9):\n\n # let the query function to the heavey lifting:\n dis_df = query_date_hour_duration_transactions(file_csv=file_csv,\n start_datetime=start_datetime, end_datetime = end_datetime,\n entry_dayofweek = entry_dayofweek,\n entry_hour = entry_hour)\n\n if dis_df is None:\n print('Could not perform query on transactions, please ensure all parameters are specified correctly and files exist')\n return None\n ddf = dis_df.sum(numeric_only=True).reset_index()\n # counts are ddf[0][0 -> len(groups)]\n # first group label ddf['hour_group'][0 -> len(groups)],\n return dict(zip(ddf['hour_group'],ddf[0]))\n\ndef get_daysofweek(lowercase = True):\n d = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n if lowercase:\n return [x.lower() for x in d]\n return d\n\n\ndef fuzzy_match_day(day_arg, accept_all=True,all_keyword='all',cap_insensitive=True,\n dow = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']):\n # move to all lowercase\n if cap_insensitive:\n dow = [x.lower() for x in dow]\n day_arg = day_arg.lower()\n all_keyword = all_keyword.lower()\n # add all keyword\n if accept_all:\n dow.append(all_keyword)\n # search for a match in the first letters\n #print(dow)\n #print(day_arg)\n pm = [x for x in dow if day_arg in x[:len(day_arg)]]\n # if a unique match is found, return it\n if len(pm) == 1:\n #print(\"return result:\", pm[0])\n return pm[0]\n # if 0, or more than one match, we can't get the day \n #print(\"return result:\", None)\n return None\n\n\n\n\n","sub_path":"fall2015/practice/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487846436","text":"from __future__ import absolute_import\n\nimport regex\nimport logging\nfrom polyglot.text import Text\nfrom normality import collapse_spaces\n\nfrom aleph.analyze.analyzer import Analyzer\nfrom aleph.model import Document, DocumentTag, DocumentTagCollector\n\nlog = logging.getLogger(__name__)\n\n\nclass PolyglotEntityAnalyzer(Analyzer):\n ORIGIN = 'polyglot'\n MIN_LENGTH = 100\n CLEAN = regex.compile('(^[^\\w]*|[^\\w]*$)')\n TYPES = {\n 'I-PER': DocumentTag.TYPE_PERSON,\n 'I-ORG': DocumentTag.TYPE_ORGANIZATION,\n }\n IGNORED = [\n Document.SCHEMA_PACKAGE,\n Document.SCHEMA_FOLDER,\n Document.SCHEMA_IMAGE,\n Document.SCHEMA_TABLE\n ]\n\n def analyze(self, document):\n if document.schema in self.IGNORED:\n return\n\n collector = DocumentTagCollector(document, self.ORIGIN)\n text = document.text\n if text is None or len(text) <= self.MIN_LENGTH:\n return\n try:\n hint_language_code = None\n if len(document.languages) == 1:\n hint_language_code = document.languages[0]\n text = Text(text, hint_language_code=hint_language_code)\n for entity in text.entities:\n if entity.tag == 'I-LOC':\n continue\n\n label = ' '.join(entity)\n label = self.CLEAN.sub(' ', label)\n label = collapse_spaces(label)\n if ' ' not in label or len(label) < 4 or len(label) > 200:\n continue\n # log.info(\"Entity [Doc %s]: %s [%s]\",\n # document.id, label, entity.tag)\n collector.emit(label, self.TYPES[entity.tag])\n\n except ValueError as ve:\n log.warning('NER value error: %r', ve)\n except Exception as ex:\n log.warning('NER failed: %r', ex)\n finally:\n collector.save()\n log.info('Polyglot extracted %s entities.', len(collector))\n","sub_path":"aleph/analyze/polyglot_entity.py","file_name":"polyglot_entity.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"233146205","text":"import pandas as pd\nimport math\nimport timeit\nimport mysql.connector\nimport pandas as pd\nfrom nltk.corpus import stopwords\nimport string\nimport re\nfrom whoosh import qparser\nfrom whoosh.lang.porter import stem\nfrom whoosh.lang.morph_en import variations\nfrom whoosh.index import create_in\nfrom whoosh.analysis import StemmingAnalyzer\nfrom whoosh.fields import *\nfrom whoosh import index\nimport os, os.path\nfrom whoosh.qparser import MultifieldParser, OrGroup\nfrom bs4 import BeautifulSoup\n\n# Database connectivity :\ndb_connection = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"olcademy\"\n)\nmy_database = db_connection.cursor()\nsql_statement = \"SELECT * FROM `courses`\"\ndf = pd.read_sql(sql=sql_statement, con=db_connection)\n\n# Removing unneccesary text\ndf['course_description'].astype(str)\ndf['trainer_description'].astype(str)\ndf['clean_course_description'] = df['course_description'].apply(lambda x : BeautifulSoup(str(x), 'lxml').get_text())\ndf['clean_trainer_description'] = df['trainer_description'].apply(lambda x : BeautifulSoup(str(x), 'lxml').get_text())\n\n# Selecting required columns :\ndf=df.loc[:,['course_id','trainer_name','course_title','course_subtitle','course_price','clean_course_description',\n 'course_language','level_of_course','course_category','clean_trainer_description',]]\n\n\n#Schema of each document :\nschema = Schema(trainer_name=TEXT(analyzer=StemmingAnalyzer(minsize=3), stored=True),\n course_title=TEXT(spelling=True,field_boost=2.0, stored=True),\n clean_course_description=TEXT(analyzer=StemmingAnalyzer(minsize=0),spelling=True, stored=True),\n clean_trainer_description=TEXT(analyzer=StemmingAnalyzer(minsize=3), stored=True),\n course_language=TEXT(analyzer=StemmingAnalyzer(minsize=3), stored=True),\n level_of_course=TEXT(analyzer=StemmingAnalyzer(minsize=3), stored=True),\n course_category=TEXT(analyzer=StemmingAnalyzer(minsize=3), stored=True),\n course_subtitle=TEXT(field_boost=1.0),\n course_id=ID(stored=True))\n\n# Creating index :\nif not os.path.exists(\"indexdir\"):\n os.mkdir(\"indexdir\")\nix = index.create_in(\"indexdir\", schema)\n#open an existing index object\nix = index.open_dir(\"indexdir\")\n#create a writer object to add documents to the index\nwriter = ix.writer()\n\n#Writing the document locally\nfor i in range(len(df)):\n x2=df.trainer_name[i]\n x3=df.course_title[i]\n x4=df.clean_course_description[i]\n x7=df.course_subtitle[i]\n x8=df.course_category[i]\n x1=df.course_id[i]\n \n writer.add_document(\n trainer_name=x2,\n course_title=x3,\n clean_course_description=x4,\n course_subtitle=x7,\n course_category=x8,\n course_id=x1\n )\n \nwriter.commit()\n\n#Parser to parse the results :\nqp = MultifieldParser([\"course_title\",\n \"trainer_name\",\n \"clean_course_description\",\n \"course_subtitle\"\n ], # all selected fields\n schema=ix.schema, # with my schema\n group=OrGroup) # OR instead AND\n\n\n#Function to ask query and provide search results\nstop_words_eng = stopwords.words(\"english\")\ndef ask(user_query):\n #user_query = str(input(\"Enter your query:\"))\n #user_query = \"java\"\n #print('\\n')\n #start = timeit.default_timer()\n user_query = user_query.lower()\n user_query = ' '.join([word for word in user_query.split() if word not in stop_words_eng])\n print(\"this is your query: \" + user_query+'\\n\\n')\n\n q = qp.parse(user_query) \n\n res_list=[]\n with ix.searcher() as searcher:\n results = searcher.search(q)\n for hit in results:\n res_list.append((hit['course_id']))\n return (res_list) \n ","sub_path":"olcademy_sql.py","file_name":"olcademy_sql.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132634008","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom typing import TYPE_CHECKING\n\nfrom azure.mgmt.core import ARMPipelineClient\nfrom msrest import Deserializer, Serializer\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from typing import Any, Optional\n\n from azure.core.credentials import TokenCredential\n\nfrom ._configuration import DevicesCloudPrintConfiguration\nfrom .operations import PrintPrintOperations\nfrom .operations import PrintOperations\nfrom .operations import PrintPrinterOperations\nfrom .operations import PrintPrinterTaskTriggerOperations\nfrom .operations import PrintPrinterShareOperations\nfrom .operations import PrintPrinterSharePrinterOperations\nfrom .operations import PrintReportOperations\nfrom .operations import PrintServiceOperations\nfrom .operations import PrintShareOperations\nfrom .operations import PrintSharePrinterOperations\nfrom .operations import PrintTaskDefinitionOperations\nfrom .operations import PrintTaskDefinitionTaskOperations\nfrom . import models\n\n\nclass DevicesCloudPrint(object):\n \"\"\"DevicesCloudPrint.\n\n :ivar print_print: PrintPrintOperations operations\n :vartype print_print: devices_cloud_print.operations.PrintPrintOperations\n :ivar print: PrintOperations operations\n :vartype print: devices_cloud_print.operations.PrintOperations\n :ivar print_printer: PrintPrinterOperations operations\n :vartype print_printer: devices_cloud_print.operations.PrintPrinterOperations\n :ivar print_printer_task_trigger: PrintPrinterTaskTriggerOperations operations\n :vartype print_printer_task_trigger: devices_cloud_print.operations.PrintPrinterTaskTriggerOperations\n :ivar print_printer_share: PrintPrinterShareOperations operations\n :vartype print_printer_share: devices_cloud_print.operations.PrintPrinterShareOperations\n :ivar print_printer_share_printer: PrintPrinterSharePrinterOperations operations\n :vartype print_printer_share_printer: devices_cloud_print.operations.PrintPrinterSharePrinterOperations\n :ivar print_report: PrintReportOperations operations\n :vartype print_report: devices_cloud_print.operations.PrintReportOperations\n :ivar print_service: PrintServiceOperations operations\n :vartype print_service: devices_cloud_print.operations.PrintServiceOperations\n :ivar print_share: PrintShareOperations operations\n :vartype print_share: devices_cloud_print.operations.PrintShareOperations\n :ivar print_share_printer: PrintSharePrinterOperations operations\n :vartype print_share_printer: devices_cloud_print.operations.PrintSharePrinterOperations\n :ivar print_task_definition: PrintTaskDefinitionOperations operations\n :vartype print_task_definition: devices_cloud_print.operations.PrintTaskDefinitionOperations\n :ivar print_task_definition_task: PrintTaskDefinitionTaskOperations operations\n :vartype print_task_definition_task: devices_cloud_print.operations.PrintTaskDefinitionTaskOperations\n :param credential: Credential needed for the client to connect to Azure.\n :type credential: ~azure.core.credentials.TokenCredential\n :param top: Show only the first n items.\n :type top: int\n :param skip: Skip the first n items.\n :type skip: int\n :param search: Search items by search phrases.\n :type search: str\n :param filter: Filter items by property values.\n :type filter: str\n :param count: Include count of items.\n :type count: bool\n :param str base_url: Service URL\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n \"\"\"\n\n def __init__(\n self,\n credential, # type: \"TokenCredential\"\n top=None, # type: Optional[int]\n skip=None, # type: Optional[int]\n search=None, # type: Optional[str]\n filter=None, # type: Optional[str]\n count=None, # type: Optional[bool]\n base_url=None, # type: Optional[str]\n **kwargs # type: Any\n ):\n # type: (...) -> None\n if not base_url:\n base_url = 'https://graph.microsoft.com/beta'\n self._config = DevicesCloudPrintConfiguration(credential, top, skip, search, filter, count, **kwargs)\n self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n\n self.print_print = PrintPrintOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print = PrintOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer = PrintPrinterOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer_task_trigger = PrintPrinterTaskTriggerOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer_share = PrintPrinterShareOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer_share_printer = PrintPrinterSharePrinterOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_report = PrintReportOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_service = PrintServiceOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_share = PrintShareOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_share_printer = PrintSharePrinterOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_task_definition = PrintTaskDefinitionOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_task_definition_task = PrintTaskDefinitionTaskOperations(\n self._client, self._config, self._serialize, self._deserialize)\n\n def close(self):\n # type: () -> None\n self._client.close()\n\n def __enter__(self):\n # type: () -> DevicesCloudPrint\n self._client.__enter__()\n return self\n\n def __exit__(self, *exc_details):\n # type: (Any) -> None\n self._client.__exit__(*exc_details)\n","sub_path":"msgraph-cli-extensions/beta/devicescloudprint_beta/azext_devicescloudprint_beta/vendored_sdks/devicescloudprint/_devices_cloud_print.py","file_name":"_devices_cloud_print.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244783173","text":"#!/usr/bin/env python\n\nimport sys\nimport copy\nimport rospy\nimport time\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nfrom sawyer_move.srv import grippercmd, grippercmdRequest, grippercmdResponse\nfrom math import pi\nfrom std_msgs.msg import String\nfrom moveit_commander.conversions import pose_to_list\n\n\nclass moveRobot:\n def __init__(self):\n moveit_commander.roscpp_initialize(sys.argv)\n rospy.init_node('move_group_python_interface')\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(\"right_arm\")\n rospy.wait_for_service(\"/gripper_control\")\n self.gripper_service_control = rospy.ServiceProxy(\"/gripper_control\", grippercmd)\n rospy.loginfo(\"we are good to go\")\n\n def getInfo(self):\n rospy.loginfo(\"planning frame :\" +\n str(self.group.get_planning_frame()))\n rospy.loginfo(\"eff link :\" + str(self.group.get_end_effector_link()))\n rospy.loginfo(\"robot planning groups\" +\n str(self.robot.get_group_names()))\n rospy.loginfo(self.robot.get_current_state())\n\n def getCurrentJointAngle(self):\n rospy.loginfo(self.group.get_current_joint_values())\n\n def getCurrentPoseAngle(self):\n rospy.loginfo(self.group.get_current_pose())\n\n def getJointNames(self):\n rospy.loginfo(self.group.get_joints())\n\n def sendJointGoal(self, data):\n self.group.go(data, wait=True)\n self.group.stop()\n rospy.loginfo(\"joint motion completed\")\n\n def sendPoseGoal(self, data):\n pose_goal = geometry_msgs.msg.Pose()\n pose_goal.orientation.w = 1.0\n pose_goal.position.x = 0.4\n pose_goal.position.y = 0.1\n pose_goal.position.z = 0.4\n self.group.set_pose_target(pose_goal)\n plan = self.group.go(wait=True)\n self.group.stop()\n rospy.loginfo(\"pose motion completed\")\n \n def gripperControl(self, data):\n grippersrv = grippercmdRequest()\n grippersrv.grip.data = data\n if (self.gripper_service_control.call(grippersrv)):\n rospy.loginfo(\"service call succeeded!\")\n else:\n rospy.loginfo(\"service call failed\")\n\n\n\nif __name__ == \"__main__\":\n mr = moveRobot()\n pose_0 = [0.114, -0.69, -0.075, 2.06, 0.0131, 0.310, 0.0896]\n pose_1 = [0.658,-0.736,1.814,-2.243,-2.326,0.98,2.067]\n mr.getCurrentJointAngle()\n # mr.gripperControl(True)\n # time.sleep(1)\n # mr.gripperControl(False)\n # mr.sendJointGoal(pose_0)\n # mr.sendPoseGoal(\"a\")\n\n\n\n\n\n\n\n\n# 'right_j0'\n# 'right_j1'\n# 'right_j2'\n# 'right_j3'\n# 'right_j4'\n# 'right_j5'\n# 'right_j6'\n\n","sub_path":"src/sawyer_move/src/door.py","file_name":"door.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"120234081","text":"from tkinter import *\n\n\ndef left_click(event):\n frame1.configure(bg='red')\n frame2.configure(bg='white')\n frame3.configure(bg='white')\n\n\ndef middle_click(event):\n frame1.configure(bg='white')\n frame2.configure(bg='red')\n frame3.configure(bg='white')\n\n\ndef right_click(event):\n frame1.configure(bg='white')\n frame2.configure(bg='white')\n frame3.configure(bg='red')\n\n\nroot = Tk()\n\nroot.configure(bg='black')\n\nframe1 = Frame(root, width=250, heigh=250, bg='white')\nframe2 = Frame(root, width=250, heigh=250, bg='white')\nframe3 = Frame(root, width=250, heigh=250, bg='white')\nframe1.grid(row=0, column=0)\nframe2.grid(row=0, column=1, padx=1)\nframe3.grid(row=0, column=2)\n\nroot.bind('', left_click)\nroot.bind('', middle_click)\nroot.bind('', right_click)\n\nroot.mainloop()\n","sub_path":"tkinter_ed/les6/mouse_event.py","file_name":"mouse_event.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"210615939","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\n\nclass schoolrestaurant(dml.Algorithm):\n contributor = 'cici_fyl'\n reads = []\n writes = ['school', 'restaurant']\n\n @staticmethod\n def execute(trial = False):\n '''Retrieve some data sets (not using the API here for the sake of simplicity).'''\n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('cici_fyl', 'cici_fyl')\n\n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/1d9509a8b2fd485d9ad471ba2fdb1f90_0.geojson'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)\n repo.dropCollection(\"school\")\n repo.createCollection(\"school\")\n \n r=r[\"features\"]\n\n repo['cici_fyl.school'].insert_many(r)\n repo['cici_fyl.school'].metadata({'complete':True})\n print(repo['cici_fyl.school'].metadata())\n\n url = 'https://data.cityofboston.gov/resource/fdxy-gydq.json'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)\n repo.dropCollection(\"restaurant\")\n repo.createCollection(\"restaurant\")\n repo['cici_fyl.restaurant'].insert_many(r)\n repo['cici_fyl.restaurant'].metadata({'complete':True})\n print(repo['cici_fyl.restaurant'].metadata())\n\n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n \n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('cici_fyl', 'cici_fyl')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n doc.add_namespace('bod','http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n\n this_script = doc.agent('alg:cici_fyl#schoolrestaurant', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n dataset1 = doc.entity('bdp:fdxy-gydq', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'json'})\n dataset2 = doc.entity('bod:1d9509a8b2fd485d9ad471ba2fdb1f90_0', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'geojson'})\n\n school_data = doc.entity('dat:cici_fyl#school', {'prov:label':'Boston Public Schools', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\n restaurant_data = doc.entity('dat:cici_fyl#restaurant', {prov.model.PROV_LABEL:'Boston Restaurants', prov.model.PROV_TYPE:'ont:DataSet'})\n get_school = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n get_restaurant = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_school, this_script)\n doc.wasAssociatedWith(get_restaurant, this_script)\n #Query might need to be changed\n doc.usage(get_school,dataset2, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval',\n 'ont:Query':'?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'\n }\n )\n doc.usage(get_restaurant, dataset1, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval',\n 'ont:Query':'?type=Animal+Lost&$select=type,latitude,longitude,OPEN_DT'\n }\n )\n\n doc.wasAttributedTo(school_data, this_script)\n doc.wasGeneratedBy(school_data, get_school, endTime)\n doc.wasDerivedFrom(school_data, dataset2, get_school, get_school)\n\n\n\n doc.wasAttributedTo(restaurant_data, this_script)\n doc.wasGeneratedBy(restaurant_data, get_restaurant, endTime)\n doc.wasDerivedFrom(restaurant_data, dataset1, get_restaurant)\n \n\n repo.logout()\n \n return doc\n\n\n","sub_path":"cici_fyl/project/cici_fyl/schoolrestaurant.py","file_name":"schoolrestaurant.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114620514","text":"import yaml\r\nimport numpy as np\r\nimport cv2\r\n\r\nfn = \"phong_testvideo_03.mp4\" # 3\r\nfn_yaml = \"phong_yml_02.yml\"\r\nfn_out = \"phong_outputvideo_02.avi\"\r\ncascade_src = 'phong_classifier.xml'\r\ncar_cascade = cv2.CascadeClassifier(cascade_src)\r\nglobal_str = \"Last change at: \"\r\nchange_pos = 0.00\r\ndict = {\r\n 'print_out':True,\r\n 'text_overlay': True,\r\n 'parking_overlay': True,\r\n 'parking_id_overlay': True,\r\n 'parking_detection': True,\r\n 'motion_detection': False,\r\n 'pedestrian_detection': False, # mất nhiều power\r\n 'min_area_motion_contour': 500,\r\n 'park_laplacian_th': 2.7,\r\n 'park_sec_to_wait': 1, # thời gian đợi để thay đổi trạng thái của region\r\n 'start_frame': 0, # Bắt đầu từ frame nào\r\n 'show_ids': True, # Hiển thị id cho từng lot\r\n 'classifier_used': True,\r\n 'save_video': False\r\n}\r\n\r\n# Set từ video\r\ncap = cv2.VideoCapture(fn)\r\nvideo_info = {'fps': cap.get(cv2.CAP_PROP_FPS),\r\n 'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) * 0.6),\r\n 'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.6),\r\n 'fourcc': cap.get(cv2.CAP_PROP_FOURCC),\r\n 'num_of_frames': int(cap.get(cv2.CAP_PROP_FRAME_COUNT))}\r\n\r\ncap.set(cv2.CAP_PROP_POS_FRAMES, dict['start_frame']) # Nhảy đến frame được xác định trước\r\n\r\n\r\ndef run_classifier(img, id):\r\n cars = car_cascade.detectMultiScale(img, 1.1, 1)\r\n if cars == ():\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n# Định nghĩa codec và tạo VideoWriter object\r\nif dict['save_video']:\r\n fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I',\r\n 'D') # các lựa chọn: ('P','I','M','1'), ('D','I','V','X'), ('M','J','P','G'), ('X','V','I','D')\r\n out = cv2.VideoWriter(fn_out, -1, 25.0, (video_info['width'], video_info['height']))\r\n\r\n# Khởi tạo HOG descriptor/person detector. Mất rất nhiều power cho quá trình này.\r\nif dict['pedestrian_detection']:\r\n hog = cv2.HOGDescriptor()\r\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\r\n\r\n # Sử dụng Background subtraction tách cảnh nền.\r\nif dict['motion_detection']:\r\n fgbg = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)\r\n\r\n# Đọc file yaml (parking space polygons)\r\nwith open(fn_yaml, 'r') as stream:\r\n parking_data = yaml.load(stream)\r\nparking_contours = []\r\nparking_bounding_rects = []\r\nparking_mask = []\r\nparking_data_motion = []\r\nif parking_data != None:\r\n for park in parking_data:\r\n points = np.array(park['points'])\r\n rect = cv2.boundingRect(points)\r\n points_shifted = points.copy()\r\n points_shifted[:, 0] = points[:, 0] - rect[0] # shift contour to region of interest\r\n points_shifted[:, 1] = points[:, 1] - rect[1]\r\n parking_contours.append(points)\r\n parking_bounding_rects.append(rect)\r\n mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted], contourIdx=-1,\r\n color=255, thickness=-1, lineType=cv2.LINE_8)\r\n mask = mask == 255\r\n parking_mask.append(mask)\r\n\r\nkernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # morphological kernel\r\nkernel_dilate = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 19))\r\nif parking_data != None:\r\n parking_status = [False] * len(parking_data)\r\n parking_buffer = [None] * len(parking_data)\r\n\r\n\r\ndef print_parkIDs(park, coor_points, frame_rev):\r\n moments = cv2.moments(coor_points)\r\n centroid = (int(moments['m10'] / moments['m00']) - 3, int(moments['m01'] / moments['m00']) + 3)\r\n # Gắn số vào các region được marked bằng tay\r\n cv2.putText(frame_rev, str(park['id']), (centroid[0] + 1, centroid[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (255, 255, 255), 1, cv2.LINE_AA)\r\n cv2.putText(frame_rev, str(park['id']), (centroid[0] - 1, centroid[1] - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (255, 255, 255), 1, cv2.LINE_AA)\r\n cv2.putText(frame_rev, str(park['id']), (centroid[0] + 1, centroid[1] - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (255, 255, 255), 1, cv2.LINE_AA)\r\n cv2.putText(frame_rev, str(park['id']), (centroid[0] - 1, centroid[1] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (255, 255, 255), 1, cv2.LINE_AA)\r\n cv2.putText(frame_rev, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\r\n\r\n\r\nwhile (cap.isOpened()):\r\n current_count = 0\r\n video_cur_pos = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0 # Vị trí hiện tại của video file tính theo giây\r\n video_cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES) # Vị trí tính theo frame\r\n ret, frame_initial = cap.read()\r\n if ret == True:\r\n frame = cv2.resize(frame_initial, None, fx=0.6, fy=0.6)\r\n if ret == False:\r\n print(\"Video ended\")\r\n break\r\n\r\n # Background Subtraction\r\n frame_blur = cv2.GaussianBlur(frame.copy(), (5, 5), 3)\r\n frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)\r\n frame_out = frame.copy()\r\n\r\n # Hiển thị số frame trên góc trái video\r\n if dict['text_overlay']:\r\n str_on_frame = \"%d/%d\" % (video_cur_frame, video_info['num_of_frames'])\r\n cv2.putText(frame_out, str_on_frame, (5, 30), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.8, (0, 255, 255), 2, cv2.LINE_AA)\r\n cv2.putText(frame_out, global_str + str(round(change_pos, 2)) + 'sec', (5, 60), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.8, (255, 0, 0), 2, cv2.LINE_AA)\r\n\r\n # motion detection cho mọi objects\r\n if dict['motion_detection']:\r\n fgmask = fgbg.apply(frame_blur)\r\n bw = np.uint8(fgmask == 255) * 255\r\n bw = cv2.erode(bw, kernel_erode, iterations=1)\r\n bw = cv2.dilate(bw, kernel_dilate, iterations=1)\r\n (_, cnts, _) = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n # Áp loop cho contours\r\n for c in cnts:\r\n # Nếu contours quá nhỏ thì bỏ qua\r\n if cv2.contourArea(c) < dict['min_area_motion_contour']:\r\n continue\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 1)\r\n\r\n # Detect xe và chỗ trống\r\n if dict['parking_detection']:\r\n for ind, park in enumerate(parking_data):\r\n points = np.array(park['points'])\r\n rect = parking_bounding_rects[ind]\r\n roi_gray = frame_gray[rect[1]:(rect[1] + rect[3]),\r\n rect[0]:(rect[0] + rect[2])] # crop ROI để tính toán nhanh hơn\r\n\r\n laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)\r\n points[:, 0] = points[:, 0] - rect[0] # Chuyển contour sang ROI\r\n points[:, 1] = points[:, 1] - rect[1]\r\n delta = np.mean(np.abs(laplacian * parking_mask[ind]))\r\n status = delta < dict['park_laplacian_th']\r\n # Nếu phát hiện có sự thay đổi thì lưu thời gian lại\r\n if status != parking_status[ind] and parking_buffer[ind] == None:\r\n parking_buffer[ind] = video_cur_pos\r\n change_pos = video_cur_pos\r\n\r\n # Nếu trạng thái vẫn khác với cái đã được lưu và counter đang open\r\n elif status != parking_status[ind] and parking_buffer[ind] != None:\r\n if video_cur_pos - parking_buffer[ind] > dict['park_sec_to_wait']:\r\n parking_status[ind] = status\r\n parking_buffer[ind] = None\r\n # Nếu trạng thái vẫn như vậy và counter đang open\r\n elif status == parking_status[ind] and parking_buffer[ind] != None:\r\n parking_buffer[ind] = None\r\n\r\n # Thay đổi màu và trạng thái hiển thị trên section phía trên\r\n if dict['parking_overlay']:\r\n for ind, park in enumerate(parking_data):\r\n points = np.array(park['points'])\r\n if parking_status[ind]:\r\n color = (0, 255, 0) #BGR\r\n rect = parking_bounding_rects[ind]\r\n roi_gray_ov = frame_gray[rect[1]:(rect[1] + rect[3]),\r\n rect[0]:(rect[0] + rect[2])] # crop ROI để tính toán nhanh hơn\r\n res = run_classifier(roi_gray_ov, ind)\r\n current_count += 1\r\n if res:\r\n parking_data_motion.append(parking_data[ind])\r\n color = (0, 0, 255) #BGR\r\n else:\r\n color = (0, 0, 255) #BGR\r\n\r\n cv2.drawContours(frame_out, [points], contourIdx=-1,\r\n color=color, thickness=2, lineType=cv2.LINE_8)\r\n\r\n if dict['show_ids']:\r\n print_parkIDs(park, points, frame_out)\r\n # Hiển thị số lot trống trong frame\r\n cv2.putText(frame_out, 'Vacant spots in frame: ' + str(current_count), (7, 85), cv2.FONT_HERSHEY_SIMPLEX, 0.728,\r\n (98, 189, 184), 2, cv2.LINE_AA)\r\n\r\n if dict['print_out']:\r\n count = current_count\r\n file = open(\"test.txt\", \"w\")\r\n file.write(str(count))\r\n file.close()\r\n\r\n if parking_data_motion != []:\r\n for index, park_coord in enumerate(parking_data_motion):\r\n points = np.array(park_coord['points'])\r\n color = (0, 0, 255)\r\n recta = parking_bounding_rects[ind]\r\n roi_gray1 = frame_gray[recta[1]:(recta[1] + recta[3]),\r\n recta[0]:(recta[0] + recta[2])] # crop ROI để tính toán nhanh hơn\r\n fgbg1 = cv2.createBackgroundSubtractorMOG2(history=300, varThreshold=16, detectShadows=True)\r\n roi_gray1_blur = cv2.GaussianBlur(roi_gray1.copy(), (5, 5), 3)\r\n fgmask1 = fgbg1.apply(roi_gray1_blur)\r\n bw1 = np.uint8(fgmask1 == 255) * 255\r\n bw1 = cv2.erode(bw1, kernel_erode, iterations=1)\r\n bw1 = cv2.dilate(bw1, kernel_dilate, iterations=1)\r\n (_, cnts1, _) = cv2.findContours(bw1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n # Áp loop cho contours\r\n for c in cnts1:\r\n print(cv2.contourArea(c))\r\n # Nếu contours quá nhỏ thì bỏ qua\r\n if cv2.contourArea(c) < 4:\r\n continue\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n classifier_result1 = run_classifier(roi_gray1, index)\r\n if classifier_result1:\r\n color = (0, 0, 255) # Đỏ nếu có xe BGR\r\n else:\r\n color = (0, 255, 0)\r\n classifier_result1 = run_classifier(roi_gray1, index)\r\n if classifier_result1:\r\n color = (0, 0, 255) # Đỏ nếu có xe\r\n else:\r\n color = (0, 255, 0)\r\n cv2.drawContours(frame_out, [points], contourIdx=-1, color=color, thickness=2, lineType=cv2.LINE_8)\r\n\r\n if dict['pedestrian_detection']:\r\n # Detecr người trong video, sẽ làm giản tốc độ chương trình vì cần một GPU có tốc độ xử lý cao\r\n (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4), padding=(8, 8), scale=1.05)\r\n # Vẽ bounding box\r\n for (x, y, w, h) in rects:\r\n cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n\r\n # write output frames\r\n if dict['save_video']:\r\n out.write(frame_out)\r\n\r\n # Hiển thị video\r\n cv2.imshow('frame', frame_out)\r\n k = cv2.waitKey(1)\r\n if k == ord('q'):\r\n break\r\n elif k == ord('c'):\r\n cv2.imwrite('frame%d.jpg' % video_cur_frame, frame_out)\r\n elif k == ord('j'):\r\n cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame + 1000) # Nhảy 1000 frames\r\n elif k == ord('u'):\r\n cap.set(cv2.CAP_PROP_POS_FRAMES, video_cur_frame + 500) # Nhảy 500 frames\r\n if cv2.waitKey(33) == 27:\r\n break\r\n\r\ncv2.waitKey(0)\r\ncap.release()\r\nif dict['save_video']: out.release()\r\ncv2.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497502194","text":"#/bin/env python3\n\nimport radix\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(description='foobar')\nparser.add_argument('-n', '--num', type=int, default=3)\nparser.add_argument('-s', '--sep', default='')\n\nif __name__ == '__main__':\n args = parser.parse_args(sys.argv[1:])\n print(args)\n print(radix.n_words(args.num, args.sep))\n","sub_path":"sjautils/word_pass.py","file_name":"word_pass.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438492366","text":"from lib.cells import DenseCell\nfrom lib.activations import *\n\nclass FullyConnectedFeedForward:\n\n fc1 = DenseCell(20, 20, name=\"dense1\")\n tanh1 = Tanh()\n\n fc2 = DenseCell(20, 20, name=\"dense2\")\n tanh2 = Tanh()\n\n fc3 = DenseCell(20, 20, name=\"dense3\")\n tanh3 = Tanh()\n\n smce = SoftmaxCrossEntropy()\n\n train_objects = [fc1, fc2, fc3]\n\n def forward(self, batch, target):\n \"\"\"\n Performs a forward pass through a fully connected network and prepares it for the backward pass.\n :param batch_sequence: numpy ndarray with rows as samples and cols input dimensions.\n :return:\n \"\"\"\n assert type(batch) == np.ndarray, \"batch input must be an numpy ndarray\"\n assert batch.shape[1] != 0, \"batch must have at least 2 dimensions with sample rows \" \\\n \"and data dimensions as columns.\"\n assert target.shape[1] != 0, \"target must have at least 2 dimensions with sample rows \" \\\n \"and one-hot class predicitons as columns.\"\n assert batch.shape[0] == target.shape[0], \"batch and targets don't have the same number \" \\\n \"of samples\"\n\n z1 = self.fc1(batch)\n a1 = self.tanh1(z1)\n\n z2 = self.fc2(a1)\n a2 = self.tanh2(z2)\n\n z3 = self.fc3(a2)\n a3 = self.tanh3(z3)\n\n loss = self.smce.forward(a3, target)\n prediction = self.smce.softmax(a3)\n\n return loss, prediction\n\n def backward(self):\n \"\"\"\n Performs a backward pass. No inputs are needed since every object saves the\n necessary variables during the forward pass.\n :return:\n \"\"\"\n curr_delta = self.smce.backward()\n\n curr_delta = self.tanh3.backward(curr_delta)\n curr_delta = self.fc3.backward(curr_delta)\n\n curr_delta = self.tanh2.backward(curr_delta)\n curr_delta = self.fc2.backward(curr_delta)\n\n curr_delta = self.tanh1.backward(curr_delta)\n curr_delta = self.fc1.backward(curr_delta)\n\n","sub_path":"networks/fcMnistGradientCheck.py","file_name":"fcMnistGradientCheck.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155936127","text":"#!/usr/bin/env python\n\"\"\" \nConnection class for S3 (Simple Storage Service)\nReturns raw results from S3\n\"\"\"\n\n# From s3/connection.py:\nimport xml.sax\nimport urllib, base64\nimport time\nimport boto.utils\nimport types\n#from boto.connection import AWSAuthConnection\n#from aws.awsconnection import AWSAuthConnection\nfrom aws.aws_connection import AWSAuthConnection\n\nfrom boto import handler\nfrom boto.s3.bucket import Bucket\nfrom boto.s3.key import Key\nfrom boto.resultset import ResultSet\nfrom boto.exception import S3ResponseError, S3CreateError, BotoClientError\n\ndef assert_case_insensitive(f):\n def wrapper(*args, **kwargs):\n if len(args) == 3 and not (args[2].islower() or args[2].isalnum()):\n raise BotoClientError(\"Bucket names cannot contain upper-case \" \\\n \"characters when using either the sub-domain or virtual \" \\\n \"hosting calling format.\")\n return f(*args, **kwargs)\n return wrapper\n\n\nclass _CallingFormat:\n def build_url_base(self, protocol, server, bucket, key=''):\n url_base = '%s://' % protocol\n url_base += self.build_host(server, bucket)\n url_base += self.build_path_base(bucket, key)\n return url_base\n\n def build_host(self, server, bucket):\n if bucket == '':\n return server\n else:\n return self.get_bucket_server(server, bucket)\n\n def build_auth_path(self, bucket, key=''):\n path = ''\n if bucket != '':\n path = '/' + bucket\n return path + '/%s' % urllib.quote(key)\n\n def build_path_base(self, bucket, key=''):\n return '/%s' % urllib.quote(key)\n\nclass SubdomainCallingFormat(_CallingFormat):\n @assert_case_insensitive\n def get_bucket_server(self, server, bucket):\n return '%s.%s' % (bucket, server)\n\nclass VHostCallingFormat(_CallingFormat):\n @assert_case_insensitive\n def get_bucket_server(self, server, bucket):\n return bucket\n\nclass OrdinaryCallingFormat(_CallingFormat):\n def get_bucket_server(self, server, bucket):\n return server\n\n def build_path_base(self, bucket, key=''):\n path_base = '/'\n if bucket:\n path_base += \"%s/\" % bucket\n return path_base + urllib.quote(key)\n\nclass Location:\n DEFAULT = ''\n EU = 'EU'\n\n\n# Modelled after S3Connection\nclass S3Connection(AWSAuthConnection):\n \"\"\" S3 connection class \"\"\"\n\n #--------------------------------\n # Variables\n #--------------------------------\n\n DefaultHost = 's3.amazonaws.com'\n QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'\n\n #--------------------------------\n # init\n #--------------------------------\n\n def __init__(self, aws_access_key_id=None, aws_signature=None, aws_secret_access_key=None,\n is_secure=True, port=None,\n host=DefaultHost, debug=0, https_connection_factory=None,\n calling_format=SubdomainCallingFormat(), path='/'):\n self.calling_format = calling_format\n AWSAuthConnection.__init__(self, host,\n aws_access_key_id, aws_signature, aws_secret_access_key,\n is_secure=is_secure, port=port,\n debug=debug, https_connection_factory=https_connection_factory,\n path=path)\n\n #--------------------------------\n # Overridden methods\n #--------------------------------\n\n def make_request(self, method, bucket='', key='', headers=None, data='',\n query_args=None, sender=None):\n if isinstance(bucket, Bucket):\n bucket = bucket.name\n if isinstance(key, Key):\n key = key.name\n path = self.calling_format.build_path_base(bucket, key)\n auth_path = self.calling_format.build_auth_path(bucket, key)\n host = self.calling_format.build_host(self.server_name(), bucket)\n if query_args:\n path += '?' + query_args\n auth_path += '?' + query_args\n return AWSAuthConnection.make_request(self, method, path, headers,\n data, host, auth_path, sender)\n\n #--------------------------------\n # Service methods\n #--------------------------------\n\n def get_buckets(self):\n \"\"\" Returns all buckets in service.\"\"\"\n response = self.make_request('GET')\n return response\n\n def put_bucket(self, bucket_name, short_acl=None, location=''):\n \"\"\" Creates a new bucket \"\"\"\n if short_acl:\n headers = {'x-amz-acl' :short_acl}\n else:\n headers = None\n\n if location == Location.DEFAULT:\n data = ''\n else:\n data = '' + \\\n location + ''\n response = self.make_request('PUT', bucket_name, headers=headers, data=data)\n return response\n\n def delete_bucket(self, bucket):\n response = self.make_request('DELETE', bucket)\n return response\n\n def get_canonical_user_id(self, headers=None):\n \"\"\"\n Convenience method that returns the \"CanonicalUserID\" of the user who's credentials\n are associated with the connection. The only way to get this value is to do a GET\n request on the service which returns all buckets associated with the account. As part\n of that response, the canonical userid is returned. This method simply does all of\n that and then returns just the user id.\n\n :rtype: string\n :return: A string containing the canonical user id.\n \"\"\"\n rs = self.get_all_buckets(headers=headers)\n return rs.ID\n\n\n def get_keys(self, bucket_name, headers=None, **params):\n \"\"\"\n A lower-level method for listing contents of a bucket. This closely models the actual S3\n API and requires you to manually handle the paging of results. For a higher-level method\n that handles the details of paging for you, you can use the list method.\n\n :type maxkeys: int\n :param maxkeys: The maximum number of keys to retrieve\n\n :type prefix: string\n :param prefix: The prefix of the keys you want to retrieve\n\n :type marker: string\n :param marker: The \"marker\" of where you are in the result set\n\n :type delimiter: string\n :param delimiter: \"If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response.\"\n\n :rtype: ResultSet\n :return: The result from S3 listing the keys requested\n \"\"\"\n l = []\n for k,v in params.items():\n if k == 'maxkeys':\n k = 'max-keys'\n if isinstance(v, unicode):\n v = v.encode('utf-8')\n if v is not None:\n l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))\n if len(l):\n s = '&'.join(l)\n else:\n s = None\n response = self.make_request('GET', bucket_name,\n headers=headers, query_args=s)\n return response\n\n\n def put_object(self, bucket_name, key_name, data='', content_type='application/xml', x_amz_acl='public-read'):\n headers = {}\n headers['x-amz-acl'] = x_amz_acl\n headers['Content-Length'] = len(data)\n headers['Content-Type'] = content_type\n response = self.make_request('PUT', bucket_name, key_name, headers=headers, data=data)\n return response\n\n\n def delete_object(self, bucket_name, key_name, headers=None):\n \"\"\"\n Deletes a key from the bucket.\n\n :type key_name: string\n :param key_name: The key name to delete\n \"\"\"\n response = self.make_request('DELETE', bucket_name, key_name, headers=headers)\n return response\n\n\n\n\n# PORTED STUFF\n#.........\n\n def copy_key(self, new_bucket_name, new_key_name, src_bucket_name, src_key_name, metadata=None):\n \"\"\"\n Create a new key in the bucket by copying another existing key.\n\n :type new_key_name: string\n :param new_key_name: The name of the new key\n\n :type src_bucket_name: string\n :param src_bucket_name: The name of the source bucket\n\n :type src_key_name: string\n :param src_key_name: The name of the source key\n\n :type metadata: dict\n :param metadata: Metadata to be associated with new key.\n If metadata is supplied, it will replace the\n metadata of the source key being copied.\n If no metadata is supplied, the source key's\n metadata will be copied to the new key.\n\n :rtype: :class:`boto.s3.key.Key` or subclass\n :returns: An instance of the newly created key object\n \"\"\"\n src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))\n if metadata:\n headers = {'x-amz-copy-source' : src,\n 'x-amz-metadata-directive' : 'REPLACE'}\n headers = boto.utils.merge_meta(headers, metadata)\n else:\n headers = {'x-amz-copy-source' : src,\n 'x-amz-metadata-directive' : 'COPY'}\n response = self.connection.make_request('PUT', new_bucket_name, new_key_name,\n headers=headers)\n body = response.read()\n return body\n\n def get_acl(self, bucket_name, key_name='', headers=None):\n response = self.make_request('GET', bucket_name, key_name,\n query_args='acl', headers=headers)\n return response\n\n def set_xml_acl(self, acl_str, bucket_name, key_name='', headers=None):\n\n headers = {}\n headers['Content-Length'] = len(acl_str)\n headers['Content-Type'] = 'application/xml'\n\n response = self.make_request('PUT', bucket_name, key_name,\n data=acl_str, query_args='acl', headers=headers)\n return response\n\n\n def set_canned_acl(self, acl_str, bucket_name, key_name='', headers=None):\n assert acl_str in CannedACLStrings\n\n if headers:\n headers['x-amz-acl'] = acl_str\n else:\n headers={'x-amz-acl': acl_str}\n\n response = self.make_request('PUT', bucket_name, key_name,\n headers=headers, query_args='acl')\n body = response.read()\n return body\n\n\n\n","sub_path":"lib/aws/s3_connection.py","file_name":"s3_connection.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500736861","text":"import unittest\nfrom city_functions import add\n\nclass TestCityFunctions(unittest.TestCase):\n def test_city_country1(self):\n a = add('Toronto', \"Canada\")\n self.assertEqual(a, 'Toronto, Canada')\n def test_city_country2(self):\n a = add('Santiago', \"Chile\", 1000000)\n self.assertEqual(a, 'Santiago, Chile - 1000000')\n\nunittest.main()","sub_path":"ch11/test_cicties.py","file_name":"test_cicties.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225840048","text":"from collections import namedtuple\nfrom contextlib import contextmanager\n\nfrom .local import localhost\nfrom .models import Host\nfrom .ssh import SshHost\n\ndef _ensure_host(host):\n if isinstance(host, Host):\n return host\n return SshHost(host)\n\n\nclass ShellbyEnv(object):\n\n def __init__(self):\n # Write directly to __dict__ to skip __setattr__\n self.__dict__.update({\n 'hosts': [localhost],\n 'tty': True,\n 'known_hosts': (),\n 'agent_forwarding': False,\n 'directory': None,\n 'user': None,\n })\n\n def __setattr__(self, prop, value):\n if not prop in self.__dict__:\n raise KeyError('Shellby env property ' + prop + ' not valid.')\n\n if prop == 'hosts':\n value = [_ensure_host(v) for v in value]\n\n self.__dict__[prop] = value\n\n def add_hosts(self, hosts):\n self.hosts += hosts\n\n @contextmanager\n def settings(self, **kv):\n original = { key: self.__dict__[key] for key in kv.keys() }\n for prop, value in kv.items():\n self.__setattr__(prop, value)\n yield\n for prop, value in original.items():\n self.__setattr__(prop, value)\n","sub_path":"shellby/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349974067","text":"'''\napplication.py\nby Tessa Rhinehart\n\nA Python3 GUI for inspecting spectrograms\n'''\nimport warnings\nfrom collections import OrderedDict\n\nimport matplotlib\nmatplotlib.use('TkAgg')\n\n### Imports ###\n# GUI: tkInterface\nimport tkinter as tk\nimport tkinter.filedialog as fd\nimport tkinter.ttk as ttk\n\n# Plotting MPL figures with tkinter\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\n\n# Default mpl key bindings\nfrom matplotlib.backend_bases import key_press_handler\n\n# Own utils for creating and saving spectrograms and saving wave files\n#from squiggle_detector import load_file, make_spect\nfrom utils import plotter, load_file, make_spect\n\n# File inspection\n#from os import listdir, walk\n#from os.path import splitext, join, exists\nfrom pathlib import Path\nimport csv\n\n# Playing audio\nimport simpleaudio as sa\nimport numpy as np\n\n### Classes ###\nclass Application:\n\n def __init__(self, master=None):\n self.master = master\n self.position = 0\n self.dirname = None\n self.files = []\n self.samples = None\n self.spec = None\n\n # TODO: Add a button that lets you toggle whether sounds should automatically play\n self.auto_play = True\n\n self.labels_dict = OrderedDict() #for assessments\n self.play_obj = None #for controlling playback\n\n # Helpful spectrogram settings to have\n self.sample_rate = 22050.0\n self.samples_per_seg = 512\n self.overlap_percent = 0.75\n\n # Set styles\n self.radiostyle = 'IndicatorOff.TRadiobutton'\n self.set_styles()\n\n # Create two rows of buttons with buttons\n self.io_frame = tk.Frame() # For controlling\n self.playback_frame = tk.Frame()\n self.create_header_buttons()\n\n # Create assessment frame below (empty)\n self.assessment_button_frame = tk.Frame()\n self.assessment_button_frame.pack()\n self.assessment_navigation_frame = tk.Frame()\n self.assessment_navigation_frame.pack()\n self.assessment_variables = []\n\n # Keep track of what info is being displayed during assessments\n self.info_incomplete = False\n\n # Other parameter needed for assessment\n self.assess_file = None\n self.zoom = False\n self.assessment = OrderedDict()\n\n # Create self.canvas for plotting\n self.create_canvas()\n self.canvas.mpl_connect('key_press_event',\n lambda event: self.on_key_event(event, self.canvas))\n\n # Add navigation toolbar to plot\n #NavigationToolbar2Tk(self.fig.canvas, self.master)\n\n\n\n\n\n #################### BASIC SETUP/BREAKDOWN FUNCTIONS ####################\n\n def set_styles(self):\n\n #Tkinter styling\n ttk_style = ttk.Style()\n\n # Styling for label radiobuttons\n ttk_style.configure(self.radiostyle,\n theme='default',\n indicatorrelief=tk.FLAT,\n indicatormargin=-10,\n indicatordiameter=-1,\n relief=tk.RAISED,\n focusthickness=0, highlightthickness=0, padding=5)\n ttk_style.map(self.radiostyle,\n background=[('selected', '#BABABA'), ('active', '#E8E8E8')])\n\n\n\n def create_header_buttons(self):\n \"\"\"\n Use self._add_buttons() to create two frames of buttons\n\n Create two rows of buttons at the top of the tkinter window.\n On top, a row of buttons for file i/o, and underneath, a row of buttons\n for controlling file playback.\n \"\"\"\n ### File I/O frame\n io_commands = [\n (\"Quit\", self.clean_up),\n (\"Open File\", self.open_file),\n (\"Open Folder\", self.open_folder),\n (\"Settings\", self.set_settings),\n (\"Assess Folder\", self.assess_folder),\n ]\n self._add_buttons(\n button_commands = io_commands,\n master_frame = self.io_frame)\n\n ### Audio playback controls frame\n playback_commands = [\n (\"Play audio\", self.play),\n (\"Stop audio\", self.stop),\n (\"Toggle zoom\", self.toggle_zoom)\n ]\n self._add_buttons(\n button_commands = playback_commands,\n master_frame = self.playback_frame\n )\n\n def _add_buttons(self, button_commands, master_frame, header_text = None):\n \"\"\"\n Given pairs of button text and commands, make/pack buttons\n\n Create a row of buttons. Text can be added as as a \"header\"\n that will appear to the left of buttons.\n Buttons will be packed inside of master_frame from left to right.\n\n Inputs:\n button_commands : list of tuples\n list of tuples where each tuple is in this format:\n (\"Button Text\", self.function_to_call_when_button_pressed)\n master_frame : tk.Frame\n the frame these should be packed to\n header_text : str\n text to appear to the left of buttons\n \"\"\"\n button_dict = {}\n if header_text:\n ttk.Label(\n master = master_frame,\n text=header_text+\": \",\n font=\"Helvetica 18 bold\").pack(side = 'left')\n for button_command in button_commands:\n button = tk.Button(\n master = master_frame,\n text = button_command[0],\n command = button_command[1])\n button.pack(side = 'left')\n button_dict[button_command[0]] = button\n master_frame.pack()\n return button_dict\n\n\n def create_canvas(self):\n self.fig = Figure(dpi=100)\n self.ax = self.fig.add_subplot(111)\n\n # Create a tk.DrawingArea\n self.canvas = FigureCanvasTkAgg(self.fig, master=self.master)\n self.canvas.draw()\n self.canvas.get_tk_widget().configure(background=self.master.cget('bg')) # Set background color as same as window\n self.canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)\n #self.canvas._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = 1)\n\n\n def set_settings(self):\n # TODO: enable setting sample rate, etc.\n return\n\n\n def on_key_event(self, event, canvas):\n '''Handles keypresses:\n n - display next spectrogram in folder\n 1, 2, ... - move to correct folder and display next spectrogram'''\n self.io_frame.focus_set()\n self.io_frame.bind(\"\", lambda event: self.load_next_file())\n self.io_frame.bind(\"\", lambda event: self.load_next_file())\n self.io_frame.bind(\"\", lambda event: self.load_next_file())\n self.io_frame.bind(\"\", lambda event: self.clean_up())\n return\n\n\n def clean_up(self):\n # Finish assessment if it hasn't finished yet\n if self.assess_file:\n self.finish_assessment()\n self.master.quit()\n\n\n\n\n\n #################### KICK OFF THREE IMPORTANT FUNCTIONALITIES ####################\n\n def open_file(self):\n '''Open dialog to load & view file\n\n Opens a file dialog to allow user to open a single file.\n Returns nothing if no filename is selected.\n '''\n filename = Path(fd.askopenfilename(filetypes=(\n (\"WAV files\",\"*.wav\"),\n (\"MP3 files\", \"*.mp3\"),\n (\"all files\",\"*.*\"))))\n\n # If no filename is selected, return\n if not filename:\n return\n\n self.files = [filename]\n self.position = 0\n\n if filename:\n self.load_samples()\n self.draw_spec()\n\n\n def open_folder(self, dirname=None, draw_first_spec=True):\n '''Open dialog to load & view folder & draw initial spectrogram\n\n Allows user to select dirname if one is not passes to the function\n Recursively identifies all files with the following endings in this dir:\n .mp3, .wav, .WAV. Draws the initial spectrogram of the first file in\n self.files.\n\n Args:\n dirname: folder to open. If None, user chooses folder using file dialog\n draw_first_spec: bool\n whether or not to draw first spect immediately\n\n Returns:\n 1 if successful\n 0 if no files\n '''\n\n # If no dirname is passed to function, ask user to select\n if not dirname: self.dirname = Path(fd.askdirectory())\n else: self.dirname = Path(dirname)\n\n # If no dirname is selected, return\n if not self.dirname: return\n\n # Fill self.files with list of wav files\n self.files = []\n self.files.extend(self.dirname.glob(\"**/*.mp3\"))\n self.files.extend(self.dirname.glob(\"**/*.wav\"))\n self.files.extend(self.dirname.glob(\"**/*.WAV\"))\n\n # Draw initial spectrogram if files were returned\n if self.files:\n if draw_first_spec:\n self.position = 0\n self.load_samples()\n self.draw_spec()\n return 1\n # No files returned\n else:\n print(\"No mp3 or wav files found in desired directory\")\n return 0\n\n\n def assess_folder(self):\n '''\n Create GUI to sort through a folder of recordings\n '''\n #\n # # For testing purposes\n # self.open_folder(\n # dirname = '/Volumes/seagate-storage/audio/og_files_from_10spp/cardinalis-cardinalis')\n # self.set_labels_to_use(use_default_dict=True)\n # self.set_assess_file(assess_file = 'default')\n # valid = self.validate_assess_file()\n # if not valid:\n # print(\"Valid assessment file not chosen. Please try again.\")\n # self.finish_assessment()\n # else:\n # # Make buttons if they aren't already made yet\n # if not self.assessment_button_frame.winfo_children():\n # self.make_assessment_buttons()\n # return\n\n tk.messagebox.showinfo(title = \"Info\", message=\"Select a folder from which to assess .WAVs and .MP3s\")\n # Get folder to assess\n folder_chosen_successfully = self.open_folder(draw_first_spec=False)\n if not folder_chosen_successfully:\n return\n\n # Get the labels to use as buttons in the assessment\n tk.messagebox.showinfo(title = \"Info\", message='Select a labels file, or press \"cancel\" to use default labels')\n self.set_labels_to_use()\n\n # Get a .csv filename\n tk.messagebox.showinfo(title = \"Info\", message='Select a filename to save assessments under, or press \"cancel\" to save \"assessments.csv\" within the folder to be assessed')\n self.set_assess_file()\n valid = self.validate_assess_file()\n if not valid:\n tk.messagebox.showinfo(title=\"Info\", message=\"Labels in the pre-existing annotation file do not match the chosen labels. Please try again.\")\n self.finish_assessment()\n else:\n # Draw first spectrogram\n self.position = 0\n self.load_samples()\n self.draw_spec()\n\n # Make buttons if they aren't already made yet\n if not self.assessment_button_frame.winfo_children():\n self.make_assessment_buttons()\n\n self.play()\n\n\n\n\n #################### READING AUDIO FILES AND DRAWING SPECTROGRAMS ####################\n\n def load_samples(self):\n '''\n Load samples from a file at self.files[self.position]\n '''\n\n print(f\"Opening {self.files[self.position]}\")\n\n # Clear figure\n self.clear_fig()\n self.zoom = False\n\n self.samples, sr = load_file(\n self.files[self.position],\n sample_rate=self.sample_rate)\n\n # Convert to needed format\n self.samples *= 32767 / max(abs(self.samples))\n self.samples = self.samples.astype(np.int16)\n\n\n def draw_spec(self, cutoff=None, already_flipped=False):\n '''\n Draw the spectrogram of self.samples as loaded by load_samples(),\n cutting off at sample `cutoff` if provided\n '''\n\n if cutoff:\n freqs, times, spect = make_spect(\n self.samples[:cutoff], self.samples_per_seg, self.overlap_percent, self.sample_rate)\n else:\n freqs, times, spect = make_spect(\n self.samples, self.samples_per_seg, self.overlap_percent, self.sample_rate)\n\n flip_axis = True\n if already_flipped:\n flip_axis = False\n\n plotter(spect, freqs, times, self.ax, title=self.files[self.position])\n\n self.fig.canvas.draw()\n\n\n def toggle_zoom(self):\n '''\n Either zoom in or out of spectrogram,\n depending on self.zoom being False (i.e., zoomed out)\n or True (i.e., zoomed in.\n '''\n\n # Can't zoom in if the spec isn't showing :)\n if type(self.samples) is not np.ndarray:\n print(\"Can't toggle; no spectrogram showing\")\n return\n\n if self.zoom:\n self.draw_spec(already_flipped = True)\n self.zoom=False\n\n else:\n self.draw_spec(cutoff=500000, already_flipped = True)\n self.zoom=True\n\n\n def clear_fig(self):\n self.fig.clear()\n self.samples = None\n self.ax = self.fig.add_subplot(111)\n self.canvas.draw()\n\n\n def load_next_file(self, increment=1, autoplay = True):\n '''\n Increments position and moves to next file in self.files\n\n Can also be used to stay at current file by setting increment=0\n '''\n\n # Some special things to take care of during an assessment\n if self.assess_file:\n\n # If assessment is incomplete, don't allow to move to next file\n for assessment_value in self.assessment.values():\n if not assessment_value:\n tk.messagebox.showinfo(title=\"Error\", message=\"Please complete assessment before moving to next file\")\n return\n # Otherwise write out the assessment, reset the labels, and clear the info box\n self.write_assessment()\n self.reset_assessment_dict()\n\n # Remove loaded audio\n self.stop()\n self.play_obj = None\n\n # Load the next file if there are more files to load\n self.position += increment\n if self.position < len(self.files):\n self.load_samples()\n self.draw_spec()\n if autoplay: self.play() # Automatically play the audio\n\n else:\n tk.messagebox.showinfo(title=\"Message\", message=\"No more files to load\")\n print(\"No more files to load\")\n\n # Finish assessment if there was one going on\n self.finish_assessment()\n\n\n\n\n\n\n #################### ASSESSMENT HELPER FUNCTIONS ####################\n\n def set_labels_to_use(self, use_default_dict = False):\n\n # Use default dict if desired\n default_dict = OrderedDict({'species_present':['present', 'absent', 'unsure'], 'sound_type':['song', 'call', 'unsure', 'na']})\n self.labels_file = None\n self.labels_dict = default_dict\n if not use_default_dict:\n # Prompt to select a labels options file from csv\n labels_file = fd.askopenfilename(filetypes=(\n (\"CSV files\",\"*.csv\"),\n (\"all files\",\"*.*\")))\n\n # If file not selected, use the default dict\n # Otherwise, open and parse the labels file\n if not labels_file: pass\n else:\n self.labels_file = Path(labels_file)\n with open(self.labels_file, 'r') as f:\n for line in f:\n splitline = line.split(\",\")\n self.labels_dict[splitline[0]] = splitline[1:]\n # Create the assessment_dict\n self.reset_assessment_dict()\n\n print(f\" Using labels {self.labels_dict}\")\n\n #def raise_all_buttons(self):\n #for button_row, buttons in self.assessment_button_dod.items():\n # for button_name, button in self.assessment_button_dod[button_row].items():\n # button.config(bg = 'blue')\n\n def reset_assessment_dict(self):\n for label in self.labels_dict.keys():\n self.assessment[label] = None\n #state([\"!focus\", \"!selected\"]\n #self.raise_all_buttons()\n\n def set_assess_file(self, assess_file = None):\n # Can specify assess file by calling this function\n # Otherwise, ask user to \"save as\" an assessment file\n if not self.dirname:\n warnings.warn('Must select assessment folder before choosing assessment filename')\n return\n if not self.labels_dict:\n warnings.warn('Must select labels dict before choosing assessment filename')\n return\n default_assess_file = self.dirname.joinpath(\"assessment.csv\")\n\n # Use default assess file\n if assess_file == 'default':\n self.assess_file = default_assess_file\n\n # Use a different assess file specified in the function call\n elif assess_file:\n self.assess_file = Path(assess_file)\n\n # No assess file specified in script; open dialog box\n else:\n assess_file = fd.asksaveasfilename(\n title = \"Select filename\",\n defaultextension = \".csv\",\n filetypes = (\n (\"CSV File\", \"*.csv\"),\n (\"All Files\", \"*.*\") )\n )\n # Use the selected file if a file was selected\n # Otherwise, the default file value will be used\n # There are less verbose ways to write this logic, but this is clear\n if assess_file:\n self.assess_file = Path(assess_file)\n else:\n self.assess_file = default_assess_file\n\n print(f\" Using assessment file {self.assess_file}\")\n\n\n def validate_assess_file(self):\n \"\"\"\n Make sure the header row is correct\n\n Follows this logic:\n - If the chosen assessment file doesn't exist, makes a new file.\n - Otherwise, allows user to decide what to do with existing file:\n - Select a different filename\n - Restart (overwrites previous file)\n - Continue from file (pick up where left off)\n - If continuing from file, do the following:\n - Compare the chosen labels with the chosen file\n -\n \"\"\"\n header_row = ['filename']\n header_row.extend(list(self.labels_dict.keys()))\n\n while True: # The only way to exit from the loop is to return\n # Create new file if it doesn't exist yet\n if not self.assess_file.exists():\n with open(self.assess_file, 'w', newline='') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(header_row)\n return True\n\n continue_from_file = tk.messagebox.askyesnocancel(\"Warning\",'There is already a file at the chosen location. Attempt to continue assessment from this file?\\n\\n Selecting \"no\" will overwrite assessment.\\n\\nSelecting \"cancel\" will allow you to pick a new file')\n\n # When user clicks \"Cancel\": Select a different assessment file\n if continue_from_file is None:\n self.set_assess_file()\n\n # When user clicks \"No\": Overwrite assessment\n elif continue_from_file is False:\n print(\"Overwriting pre-existing assessment\")\n with open(self.assess_file, 'w', newline='') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(header_row)\n return True\n\n # When user clicks \"Yes\": Attempt to continue previous assessment\n else:\n # Get the label files to compare labels\n with open(self.assess_file, 'r') as f:\n first_line = f.readline()\n chosen_labels = ','.join(header_row)\n this_file_labels = first_line.strip()\n\n # Can't continue previous assessment: select a new file\n if chosen_labels != this_file_labels:\n tk.messagebox.showerror(title = \"Error\", message = f\"Chosen labels are incompatible with this assessment file.\\n\\nChosen labels: {header_row}. \\n\\nThis file's column headers: {this_file_labels}. Please try again.\")\n self.assess_file = None\n self.set_assess_file()\n\n # Can continue previous assessment: un-queue old assessed files\n else:\n print(\"Continuing pre-existing assessment\")\n print(\"Currently queued files:\")\n print(self.files)\n # Un-queue any files that have previously been assessed in self.assess_file\n with open(self.assess_file, 'r') as f:\n reader = csv.reader(f)\n for line in reader:\n filename = Path(line[0])\n try:\n self.files.remove(filename)\n except: pass\n return True\n\n def clear_assessment_buttons(self):\n self.assessment_navigation_frame.destroy()\n self.assessment_button_frame.destroy()\n\n def make_assessment_buttons(self):\n \"\"\" Create rows of buttons, one for each assessment attribute\n\n Create one row of buttons for each of the assessment attributes\n assigned in self.assess_folder(). These attributes are the\n columns of the assessment csvs. The buttons give the\n possible values of each attribute. Attributes and their possible\n values are stored in self.labels_dict\n\n When these buttons are clicked, the assessment will be logged\n using self.assign_assessment(), which stores the assessment result in\n self.assessment.\n\n A \"next\" button is produced as well.\n All attributes must be assessed before continuing.\n \"\"\"\n\n\n self.assessment_button_frame = tk.Frame()\n self.assessment_button_frame.pack()\n self.assessment_navigation_frame = tk.Frame()\n self.assessment_navigation_frame.pack()\n\n self.assessment_variables = OrderedDict()\n # Create a set of radio buttons for each assessment variable\n for variable, options in self.labels_dict.items():\n\n # Keep track of each variable in an ordered dict\n self.assessment_variables[variable] = tk.StringVar()\n\n # Create radio button for each option\n variable_frame = tk.Frame(\n master = self.assessment_button_frame,\n )\n tk.Label(\n master=variable_frame,\n text=variable+\": \",\n font=\"Helvetica 18 bold\",\n ).pack(\n side='top', fill=tk.X, anchor=tk.NW)\n for option in options:\n\n ttk.Radiobutton(\n master = variable_frame,\n text = option,\n variable = self.assessment_variables[variable],\n value = option,\n #idth=-15,\n style=self.radiostyle,\n command = self.create_assessment_function(\n column_name = variable,\n column_val = option)\n ).pack(\n side='top', fill=tk.X, anchor=tk.NW)\n\n variable_frame.pack(side=\"left\", fill=tk.X, anchor=tk.NW)\n\n # Create a little navigation button\n self.assessment_navigation_frame = tk.Frame()\n tk.Button(\n master=self.assessment_navigation_frame,\n text=\"Save assessment and view next file\",\n command = self.load_next_file).pack(side='left')\n\n self.assessment_navigation_frame.pack(side='bottom')\n self.assessment_button_frame.pack(side='bottom')\n\n\n\n def create_assessment_function(self, column_name, column_val):\n # Assign assessment to the ordered dict\n # This function is a bit tricky in that it returns a lambda function\n # This is necessary because we need to return a function that each\n # button can use to assign a different value to a certain column\n\n #print(f\"Creating button to set {column_name} as {column_val}\")\n return lambda : self.assign_assessment(column_name = column_name, column_val = column_val)\n\n\n def assign_assessment(self, column_name, column_val):\n print(f\"Setting {column_name} as {column_val}\")\n self.assessment[column_name] = column_val\n\n def write_assessment(self):\n '''\n Write the file at self.position with its designated status\n to the assessment file at self.assess_file, then move\n to next file\n '''\n\n row_to_write = [self.files[self.position], *self.assessment.values()]\n with open(self.assess_file, 'a', newline='') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(row_to_write)\n\n self.clear_assessment_buttons()\n self.make_assessment_buttons()\n\n\n def finish_assessment(self):\n '''\n Delete contents of self.assessment_button_frame\n\n Called either when out of files, or when window is closed\n '''\n print(\"Ending assessment\")\n\n # Destroy accept/reject/hold buttons\n for child in self.assessment_button_frame.winfo_children():\n child.destroy()\n\n # Clear spectrogram\n self.clear_fig()\n\n # Reset relevant variables\n self.assess_file = None\n\n\n\n #################### AUDIO HELPER FUNCTIONS ####################\n\n def play(self):\n if type(self.samples) is not np.ndarray:\n return\n else:\n if self.play_obj:\n self.stop()\n self.play_obj = sa.play_buffer(\n audio_data = self.samples,\n num_channels = 1,\n bytes_per_sample = 2,\n sample_rate = int(self.sample_rate))\n\n def stop(self):\n if self.play_obj:\n self.play_obj.stop()\n sa.stop_all()\n\n\n\n\n### Scripts ###\ndef main():\n root = tk.Tk() # root window\n root.wm_title(\"Specky\")\n root.geometry(\"600x600+200+200\") # dimensions & position\n\n appy = Application(root)\n\n root.protocol(\"WM_DELETE_WINDOW\", appy.clean_up)\n root.mainloop()\n root.destroy()\n\n\nif __name__ == \"__main__\": main()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":26643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407524391","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport asyncio\nimport discord\nfrom datetime import date\nimport urllib.request\nimport json\n\nclient = discord.Client()\n\nadmin = '345254729401499648'\nadmin_sub = '494053984516964353'\nv_user = '493781391473901569'\n\n# 토큰을 입력\ntry:\n token_file = open('token.txt','r')\n token = token_file.read()\n token_file.close()\n print('Token read SUCCESS!')\nexcept:\n print('Token read fail')\n token = input('Enter token : ')\n\n\n\n# 봇이 구동되었을 때 동작되는 코드\n@client.event\nasync def on_ready():\n print('GAMJA_BOT ONLINE\\n')\n print(\"Logged in as \")\n print(client.user.name)\n print(client.user.id)\n print(\"===========\\n\")\n \n\n #봇의 상태를 간단하게 출력\n await client.change_presence(game=discord.Game(name=\"!도움,!help\", type=1))\n\n\n\n\n# 봇이 새로운 메시지를 수신했을때 동작되는 코드\n\n@client.event\nasync def on_message(message):\n if message.author.bot: #만약 메시지를 보낸사람이 봇일 경우에는\n return None #동작하지 않고 무시\n\n id = message.author.id #id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.\n channel = message.channel #channel이라는 변수에는 메시지를 받은 채널의 ID를 담습니다.\n\n print(\"|\"+str(message.timestamp)+\" 서버 : \"+str(message.author.server)+\" 채널 : \"+str(channel)+\" 유저코드 : \"+id+\" 사용자명 : \"+message.author.name+\" 내용 : \"+message.content+\"|\")\n\n\n\n if message.content.startswith('!도움') | message.content.startswith('!명령') | message.content.startswith('!도우미') | message.content.startswith('!help'):\n embed = discord.Embed(\n title = '안녕하십니까 Users.',\n description = '나는 dae pe bang GAMJA bot이다.\\n명령어가 알고 싶으신가요? 그것은 아래 글로 대체되었다'\n )\n embed.set_thumbnail(url = 'https://cdn.discordapp.com/attachments/482375689035710485/483927615279988738/Potato_s.png')\n embed.add_field(name = '!감자', value = '`♣THE KING♧`',inline = True)\n embed.add_field(name = '!왕감자', value = '`DaeHongDan BIG GAMJA`',inline = True)\n embed.add_field(name = '!고인물',value = '`사용방법 : !고인물 (닉네임)`')\n embed.add_field(name = '!소피아아줌마', value = '`얼굴보니좋네요`',inline = True)\n embed.add_field(name = '!조용히하세요', value = '`조용히하세요`',inline = True)\n embed.add_field(name = '!서울날씨', value = '`현재 서울 날씨를 알려드립니다.`',inline = True)\n embed.add_field(name = '!수능디데이',value = '`수능까지 얼마 남았는지 알려드립니다.`',inline = True)\n embed.add_field(name = '!군대디데이',value = '`단ㅡ결!`',inline = True)\n embed.add_field(name = '!생존신고',value = '`생존신고 방에서만 사용가능한 명령어입니다.`',inline = True)\n embed.add_field(name = '!생존신고확인',value = '`생존신고 방에서만 사용가능한 명령어입니다.`',inline = True)\n embed.add_field(name = '!패치노트', value = '`감자봇의 최근 패치노트를 확인하실수 있습니다.`', inline = True)\n \n await client.send_message(channel,embed=embed)\n\n\n\n\n\n #----------------------------------------------------------------------------!감자----------------------------------------------------------------------------\n if message.content.startswith('!감자'): #만약 해당 메시지가 '!커맨드' 로 시작하는 경우에는\n embed = discord.Embed(\n title = 'POTATO',\n description = 'GoIngMul GAMJA'\n )\n embed.set_image(url = 'https://cdn.discordapp.com/attachments/482375689035710485/483927615279988738/Potato_s.png')\n await client.send_message(channel,embed=embed)\n\n\n\n #----------------------------------------------------------------------------!고인물----------------------------------------------------------------------------\n if message.content.startswith('!고인물'):\n goingmul = message.content.split('!고인물 ')[1]\n embed = discord.Embed(\n title = '고인물' + goingmul,\n\t\t\tdescription='킹갓엠페러제너럴충무공마제스티하이퍼울트라판타스틱익스트림탑클래스고인모범노블레스석유를재껴버리는서브미션챔피언게임고인물남바원 『'+message.content.split('!고인물 ')[1]+'』'\n )\n embed.set_footer(text='호온아 이렇게 하면 되는거지?')\n await client.send_message(channel,embed=embed)\n\n\n\n\n #----------------------------------------------------------------------------!왕감자----------------------------------------------------------------------------\n if (message.content == '!왕감자'):\n embed = discord.Embed(\n title = 'Big POTATO',\n description = 'DaeHongDan GAMJA',\n )\n embed.set_image(url = 'https://cdn.discordapp.com/attachments/482375689035710485/482376272631300105/hugePotato.jpg')\n await client.send_message(channel,embed=embed)\n\n\n #----------------------------------------------------------------------------!소피아아줌마----------------------------------------------------------------------------\n if (message.content == '!소피아아줌마') | (message.content == '!소피아 아줌마'):\n await client.send_message(channel,'https://youtu.be/s3hyx8Opxh0')\n\n\n\n #----------------------------------------------------------------------------!조용히하세요----------------------------------------------------------------------------\n if message.content.startswith('!조용히하세요') | message.content.startswith('!조용히 하세요'):\n embed = discord.Embed(\n title = '조용히 하세요!'\n )\n embed.set_image(url = 'https://cdn.discordapp.com/attachments/482375689035710485/482494421330558977/joyuong.png')\n await client.send_message(channel, embed=embed)\n\n\n\n #----------------------------------------------------------------------------!수능디데이----------------------------------------------------------------------------\n if message.content.startswith('!수능디데이') | message.content.startswith('!수능 디데이') | message.content.startswith( '!수능일계산기') | message.content.startswith('!수능일 계산기'):\n today = date.today()\n suneung = date(2018,11,15)\n suneung_dday = (suneung-today).days\n embed =discord.Embed(\n title = '수능 D-day 계산기',\n description='수능을 준비하고 계시는 분들을 위해 만들었습니다.\\n'\n )\n embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/482375689035710485/483237446167822356/agumonfuck.png')\n if suneung_dday>49:\n embed.add_field(name ='수능 D-day',value='`수능까지 '+str(suneung_dday)+'일 남음`')\n elif suneung_dday>24:\n embed.add_field(name ='수능 D-day',value='`수능까지 '+str(suneung_dday)+'일 남음`')\n embed.set_footer(text='빨리 공부하러가셈')\n elif suneung_dday>9:\n embed.add_field(name ='수능 D-day',value='`수능까지 '+str(suneung_dday)+'일 남음`')\n embed.set_footer(text='디코꺼라')\n elif suneung_dday>4:\n embed.add_field(name ='수능 D-day',value='`수능까지 '+str(suneung_dday)+'일 남음`')\n embed.set_footer(text='미침? 디코끄고 빨리 공부하러가셈')\n else:\n embed.add_field(name ='수능D-day!')\n embed.set_footer(text='컨디션 조절 필수! 빨리 이불덮고 디비자셈')\n await client.send_message(channel,embed=embed)\n\n\n\n #----------------------------------------------------------------------------!군대디데이----------------------------------------------------------------------------\n if message.content.startswith('!군대디데이') | message.content.startswith('!군대 디데이'):\n today = date.today()\n gundae = date(2019,2,25)\n gundae_dday = (gundae-today).days\n embed = discord.Embed(\n title = '군대 D-day 계산기',\n description = '나라의 노예가 될 우리 킹갓송파공고 군특학생들을 위해 만들었습니다.'\n )\n embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/482375689035710485/483913896889942016/godgunDdabong.png')\n embed.add_field(name='군대 D-day', value='`입대까지 '+str(gundae_dday)+'일 남음`')\n embed.set_footer(text='단결!')\n await client.send_message(channel,embed=embed)\n\n\n\n #----------------------------------------------------------------------------!서울날씨----------------------------------------------------------------------------\n if (message.content == '!서울날씨'):\n en_wthr = ['Clear','Drizzle','Rain','Haze','Mist','Clouds','Thunderstorm']\n kr_wthr = ['맑음','가랑비','비내림','실안개','안개 낌','흐림','천둥 번개를 동반한 비']\n API_key = '372c8656b54d6cd1c8a952b04bf3dcc7'\n URL =\"http://api.openweathermap.org/data/2.5/weather?q=Seoul&mode=json&APPID=\"+API_key\n res = urllib.request.urlopen(URL).read()\n js = json.loads(res.decode('utf-8'))\n seoul_weather = js['weather'][0]['main']\n seoul_temp = round(js['main']['temp']-273,2)\n if seoul_weather in en_wthr:\n seoul_weather = kr_wthr[en_wthr.index(seoul_weather)]\n embed = discord.Embed(\n title = '서울 날씨', \n description='창밖을 내다보지도 않고 게임만 처하는 우리 앰창인생들을 위한 현재 서울 날씨 정보입니다.'\n )\n embed.add_field(name = '날씨', value ='`'+seoul_weather+'`')\n embed.add_field(name = '기온', value ='`'+str(seoul_temp)+'˚C`')\n embed.set_thumbnail(url = 'https://cdn.discordapp.com/attachments/482375689035710485/482483642468401163/i_seoul_u.jpg')\n await client.send_message(channel,embed=embed)\n\n\n\n\n# if(message.content.startswith('!수동생존신고')):\n# if (admin_sub in [role.id for role in message.author.roles]) or (admin in [role.id for role in message.author.roles]):\n# nick_name = message.content.split('!수동생존신고 ')[1]\n# #try:\n# member = str(discord.utils.get(message.server.members, name=str(nick_name)))\n# print(discord.User())\n# imalive_list_file = open(\"imalive_list.txt\",'r')\n# imalive_list = imalive_list_file.read()\n# imalive_list_file.close()\n# if member in imalive_list.split('\\n'):\n# await client.send_message(channel,message.author.name+\"님은 이미 등록되어있습니다.\")\n# else:\n# #except:\n# #await client.send_message(channel,\"이런 닉네임이 없습니다.\")\n# imalive_list_file = open(\"imalive_list.txt\",'a')\n# imalive_list_file.write(member+\"\\n\")\n# imalive_list_file.close()\n# await client.send_message(channel,\"등록완료\")\n\n\n\n\n if (message.content == '!생존신고'): \n if(str(channel) == '생존신고'):\n try:\n imalive_list_file = open(\"imalive_list.txt\",'r')\n imalive_list = imalive_list_file.read()\n imalive_list_file.close()\n if id in imalive_list.split('\\n'):\n await client.send_message(channel,message.author.name+\"님은 이미 등록되어있습니다.\")\n else:\n imalive_list_file = open(\"imalive_list.txt\",'a')\n imalive_list_file.write(id+\"\\n\")\n imalive_list_file.close()\n await client.send_message(channel,message.author.name+\"님 생존 신고 리스��에 정상적으로 등록 되었습니다.\")\n except:\n await client.send_message(channel,\"에러가 발생하였습니다. 다시 시도 해주시거나 관리자를 호출하여 주십시오\")\n elif not (str(channel) =='생존신고'):\n await client.send_message(channel,\"이 방이 아닌 왼쪽의 채팅방 카테고리의 `생존신고`채팅방을 이용해주세요.\")\n\n \n\n\n# if ((message.content == '!생존신고확인') or(message.content == '!생존신고 확인')):\n# if (str(channel) == '생존신고'):\n# try:\n# imalive_list_file = open(\"imalive_list.txt\",'r')\n# imalive_list = imalive_list_file.read()\n# imalive_list_file.close()\n# imalive_list_s = imalive_list.split('\\n')\n# if (id in imalive_list_s):\n# await client.send_message(channel,message.author.name+\"님은 생존 신고 리스트에 등록 되어있습니다!\")\n# elif not(id in imalive_list_s):\n# await client.send_message(channel,message.author.name+\"님은 생존 신고 리스트에 등록 되어있지 않습니다!\")\n# except:\n# await client.send_message(channel,\"에러가 발생하였습니다. 다시 시도 해주시거나 관리자를 호출하여 주십시오\")\n# elif not(str(channel) == '생존신고'):\n# await client.send_message(channel,\"이 방이 아닌 왼쪽의 채팅방 카테고리의 `생존신고`채팅방을 이용해주세요.\")\n\n\n\n\n\n\n if (message.content == '!물갈이시작'):\n if(admin_sub in [role.id for role in message.author.roles]) or (admin in [role.id for role in message.author.roles]):\n try:\n imalive_list_file = open(\"imalive_list.txt\",'r')\n imalive_list = imalive_list_file.read()\n imalive_list_file.close()\n await asyncio.sleep(3)\n await client.send_message(channel,\"생존신고 리스트 불러오기 성공!\")\n except:\n await client.send_message(channel,\"입출력 오류 발생\")\n await client.send_message(channel,\"물갈이를 시작합니다\")\n alive_list = []\n i=0\n for alive in imalive_list.split(\"\\n\"):\n user=discord.utils.get(message.server.members, id = alive)\n alive_list.append(user)\n kick_list=[]\n for member in message.server.members:\n if not(member in alive_list):\n kick_list.append(member)\n await client.send_message(channel,\"누군가가 강퇴리스트에 추가되었습니다.\")\n await client.send_message(channel,\"강퇴리스트 추가가 완료되었습니다.\")\n await client.send_message(channel,\"이제 곧 강퇴가 시작됩니다.\")\n await asyncio.sleep(3)\n for kick_user in kick_list:\n await client.kick(kick_user)\n await client.send_message(channel,\"물갈이로 인해`\"+str(kick_user)+\"님`이 추방당하였습니다.\")\n i=i+1\n await client.send_message(channel,\"물갈이 완료!\")\n await client.send_message(channel,\"총`\"+str(i)+\"명`을 추방하였습니다!\")\n else: await client.send_message(channel,\"권한이 없습니다.\")\n\n\n\n\n\n\n if message.content.startswith('!유저리스트만들기'):\n if (admin_sub in [role.id for role in message.author.roles]) or (admin in [role.id for role in message.author.roles]):\n member_list_file = open(\"member_list_file.txt\",'w')\n for member in message.server.members:\n member_list_file.write(member.name+\",\")\n member_list_file.close()\n await client.send_message(channel,'유저 리스트 생성 완료!')\n else: await client.send_message(channel,'권한이 없습니다!')\n\n\n\n\n\n if message.content.startswith('!모두인증') :\n if (admin_sub in [role.id for role in message.author.roles]) or (admin in [role.id for role in message.author.roles]):\n member_list_file = open(\"member_list_file.txt\",'r')\n member_list = member_list_file.read()\n member_list_file.close()\n role = discord.utils.get(message.server.roles, name='인증된 사용자')\n\n for i in range(len(member_list.split(','))):\n #await client.add_roles(member_list.split(',')[i],role)\n member = discord.utils.get(message.server.members, name=member_list.split(',')[i])\n await client.add_roles(member,role)\n await client.send_message(channel,'사용자 모두 인증된 사용자로 변경되었습니다.')\n else: await client.send_message(channel,'관리자만 사용할 수 있습니다.')\n\n\n\n\n if message.content.startswith('!사용자인증') and v_user in [role.id for role in message.author.roles]:\n not_v_user = message.content.split('!사용자인증 ')[1]\n not_v_user_id = discord.utils.get(message.server.members,name = not_v_user)\n role = discord.utils.get(message.server.roles, name='인증된 사용자')\n await client.add_roles(not_v_user_id,role)\n await client.send_message(channel,\"`\"+message.author.name+\"` 님으로 인해 `\"+not_v_user+'` 님이 인증되었습니다. ')\n \n\n\n\n if (message.content == '!패치노트'):\n embed=discord.Embed(\n title = '2018 10 08 감자봇 패치노트',\n description = '\\n- 10월달 물갈이를 위한 명령어 2가지를 추가하였습니다.\\n'\n '- 명령어 도움말에 물갈이 명령어도 추가 하였습니다.'\n )\n #embed.set_footer(text='(2018.08.30 오후 8시 30분 적용예정)')\n embed.set_thumbnail(url = 'https://cdn.discordapp.com/attachments/482375689035710485/483927615279988738/Potato_s.png')\n await client.send_message(channel,embed=embed)\n\n\n\nclient.run(token)","sub_path":"GAMJA_BOT/GAMJA_BOT.py","file_name":"GAMJA_BOT.py","file_ext":"py","file_size_in_byte":18160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133775614","text":"import os\nimport time\nimport datetime\nimport re\nimport logging\nimport os\nimport signal\nimport sys\nimport argparse\nimport json\nimport elastalerthelp\nimport elastichelp\nimport triage\nfrom slackclient import SlackClient\n\nclass Elastabot():\n def __init__(self):\n self.running = False\n self.conf = {}\n self.log = logging.getLogger('elastabot')\n \n # Initializes the bot via a config file and env variables, invoked once\n def init(self):\n configured = True\n parser = argparse.ArgumentParser()\n parser.add_argument('--configFile', help='JSON configuration file', default=\"elastabot.json\")\n args = parser.parse_args()\n \n with open(args.configFile, \"r\") as fp:\n self.conf = json.load(fp)\n\n self.slackBotToken = os.environ.get('SLACK_BOT_TOKEN')\n if not self.slackBotToken:\n self.log.error(\"SLACK_BOT_TOKEN is a required environment variable\")\n configured = False\n else:\n self.slackBotToken = self.slackBotToken.strip()\n\n logging.basicConfig(level=logging.INFO)\n\n return configured\n\n # Main loop for the bot, connects to slack and watches for incoming commands\n def run(self):\n self.running = self.init()\n self.log.info(\"Slack bot connecting to server\")\n self.slack_client = SlackClient(self.slackBotToken)\n if self.slack_client.rtm_connect(with_team_state=False):\n starterbot_id = self.slack_client.api_call(\"auth.test\")[\"user_id\"]\n self.log.info(\"Slack bot connected; botId=\" + starterbot_id)\n while self.running:\n command, args, channel, user = self.parse_bot_commands(self.slack_client.rtm_read())\n if command:\n self.handle_command(self.slack_client, command, args, channel, user)\n time.sleep(1)\n else:\n self.log.error(\"Slack bot connection failed.\")\n\n # Parses incoming message for a valid command\n def parse_bot_commands(self, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n command, args = self.parse_command(event[\"text\"])\n user = event[\"user\"]\n return command, args, event[\"channel\"], user\n return None, None, None, None\n\n # Splits the command and arguments apart\n def parse_command(self, message_text):\n pattern = \"^%s([a-z_]+)(.*)\" % (self.conf['commandPrefix'])\n matches = re.search(pattern, message_text)\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\n\n # Command distributor\n def handle_command(self, client, command, args, channel, user):\n default_response = \"Unrecognized command; Try !help\"\n response = None\n if command == \"search\":\n response = elastichelp.search(self.conf, args)\n elif command == \"health\":\n response = elastichelp.health(self.conf, args)\n elif command == \"ack\":\n response = elastalerthelp.ack(self.conf, args)\n elif command == \"triage\":\n response = triage.triage(self.conf, args)\n elif command == \"help\":\n response = self.help()\n\n msg = response.strip().replace(\"${prefix}\", self.conf['commandPrefix'])\n client.api_call(\n \"chat.postMessage\",\n as_user=True,\n channel=channel,\n text=msg or default_response\n )\n\n def help(self):\n return \"\"\"Supported commands:```\n${prefix}search - Perform Elasticsearch search\n${prefix}health - Show Elasticsearch health\n${prefix}ack - Silence an alert with optional triage\n${prefix}triage - Triage an arbitrary issue\n${prefix}help - This help message\n\nSpecify `${prefix}command help` for more information about a specific command. Ex: `${prefix}ack help`\n```\"\"\"\n\ndef handle_signal(signal, frame):\n os._exit(0)\n\ndef main():\n signal.signal(signal.SIGINT, handle_signal)\n bot = Elastabot()\n bot.run()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"src/elastabot.py","file_name":"elastabot.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604963561","text":"\"\"\"\nLaboratory for Computational Motor Control, Johns Hopkins School of Medicine\n\nAuthor: Kaveh Karbasi \n\"\"\"\n\nimport numpy as np\nimport scipy.signal\nfrom sklearn.cluster import KMeans\n\nclass target:\n \"\"\" Class for handling target data\"\"\"\n def __init__(self, vt, ht, dt, mode):\n \"\"\"\n Object constructor\n m sklearn.cluster import KMeans\n vt: vertical target position signal\n ht: horizontal target position signal\n dt: sampling period\n \"\"\"\n valid_modes = {'horizontal','vertical','2d'}\n if mode not in valid_modes:\n raise ValueError(\"Mode must be one of {}\".format(valid_modes))\n if mode == 'horizontal':\n self.ht = ht\n if mode == 'vertical':\n self.vt = vt\n if mode == '2d':\n self.ht = ht\n self.vt = vt\n self.dt = dt\n self.mode = mode\n \n def _find_target_jumps(self):\n if self.mode == 'horizontal':\n return self._find_target_jumps_horizontal()\n if self.mode == 'vertical':\n return self._find_target_jumps_vertical()\n if self.mode == '2d':\n return self._find_target_jumps_2d()\n\n \n def _find_target_jumps_horizontal(self):\n \"\"\"\n Finds the target jump indices in the input target horizontal position signal\n \"\"\"\n # find target jumps\n ht_diff = np.abs(np.diff(self.ht))\n target_jump_indices = scipy.signal.find_peaks(ht_diff, prominence=200)[0]\n\n # remove detected target jumps that are sequential (less than 5 samples apart)\n to_delete = []\n for i, tji in enumerate(target_jump_indices[1:]):\n if tji - target_jump_indices[i] < 5:\n to_delete = to_delete + [i+1]\n mask = np.ones(target_jump_indices.shape, dtype=bool)\n mask[to_delete] = False\n target_jump_indices = target_jump_indices[mask]\n return target_jump_indices\n \n \n def _find_target_jumps_vertical(self):\n \"\"\"\n Finds the target jump indices in the input target vertical position signal\n \"\"\"\n # find target jumps\n vt_diff = np.abs(np.diff(self.vt))\n target_jump_indices = scipy.signal.find_peaks(vt_diff, prominence=200)[0]\n\n # remove detected target jumps that are likely noise related(less than 5 samples apart)\n to_delete = []\n for i, tji in enumerate(target_jump_indices[1:]):\n if tji - target_jump_indices[i] < 5:\n to_delete = to_delete + [i+1]\n mask = np.ones(target_jump_indices.shape, dtype=bool)\n mask[to_delete] = False\n target_jump_indices = target_jump_indices[mask]\n return target_jump_indices\n \n def _find_target_jumps_2d(self):\n pos_2d = np.column_stack((self.vt, self.ht))\n pos_norm = np.linalg.norm(pos_2d, axis = -1)\n pos_diff = np.abs(np.diff(pos_norm))\n target_jump_indices = scipy.signal.find_peaks(pos_diff, prominence=200)[0]\n\n to_delete = []\n for i, tji in enumerate(target_jump_indices[1:]):\n if tji - target_jump_indices[i] < 5:\n to_delete = to_delete + [i+1]\n mask = np.ones(target_jump_indices.shape, dtype=bool)\n mask[to_delete] = False\n target_jump_indices = target_jump_indices[mask]\n\n return target_jump_indices\n\n def _find_jump_vector_amplitudes(self, num_clusters):\n if self.mode == 'horizontal':\n return self._find_jump_vector_amplitudes_h(num_clusters)\n if self.mode == 'vertical':\n return self._find_jump_vector_amplitudes_v(num_clusters)\n if self.mode == '2d':\n return self._find_jump_vector_amplitudes_2d(num_clusters)\n\n def _find_jump_vector_amplitudes_h(self, num_clusters):\n target_jump_indices = self._find_target_jumps()\n self.jump_vecs = []\n for tji in target_jump_indices:\n self.jump_vecs = self.jump_vecs + [self.ht[tji + 5] - self.ht[tji - 5]]\n #[hist, bin_edges] = np.histogram(jump_vecs, bins=np.arange(np.min(self.ht), np.max(self.ht), bin_size))\n #hist[hist < 10] = 0 # remove rare target jump vectors\n #return bin_edges[np.nonzero(hist)]\n self.jump_vecs = np.array(self.jump_vecs).reshape(-1,1)\n kmeans = KMeans(n_clusters=num_clusters, n_init = 20, n_jobs=5).fit(self.jump_vecs)\n jump_amps = kmeans.cluster_centers_\n jump_amps = np.array([int(ja) for ja in jump_amps])\n return jump_amps\n\n def _find_jump_vector_amplitudes_v(self, num_clusters):\n target_jump_indices = self._find_target_jumps()\n self.jump_vecs = []\n for tji in target_jump_indices:\n self.jump_vecs = self.jump_vecs + [self.vt[tji + 5] - self.vt[tji - 5]]\n #[hist, bin_edges] = np.histogram(jump_vecs, bins=np.arange(np.min(self.ht), np.max(self.ht), bin_size))\n #hist[hist < 10] = 0 # remove rare target jump vectors\n #return bin_edges[np.nonzero(hist)]\n self.jump_vecs = np.array(self.jump_vecs).reshape(-1,1)\n kmeans = KMeans(n_clusters=num_clusters, n_init = 20, n_jobs=5).fit(self.jump_vecs)\n jump_amps = kmeans.cluster_centers_\n jump_amps = np.array([int(ja) for ja in jump_amps])\n return jump_amps\n\n def _find_jump_vector_amplitudes_2d(self, num_clusters):\n \"\"\"\n 2d clustering of the jump vectors.\n \"\"\"\n target_jump_indices = self._find_target_jumps();\n \n jump_vecs_h = []\n for tji in target_jump_indices:\n jump_vecs_h = jump_vecs_h + [self.ht[tji + 5] - self.ht[tji - 5]]\n jump_vecs_h = np.array(jump_vecs_h) \n \n jump_vecs_v = []\n for tji in target_jump_indices:\n jump_vecs_v = jump_vecs_v + [self.vt[tji + 5] - self.vt[tji - 5]]\n jump_vecs_v = np.array(jump_vecs_v)\n\n self.jump_vecs = np.column_stack((jump_vecs_h, jump_vecs_v))\n kmeans = KMeans(n_clusters=num_clusters, n_init = 20, n_jobs=5).fit(self.jump_vecs)\n jump_amps = kmeans.cluster_centers_\n\n return np.int64(jump_amps)\n\n def _is_in_cluster(self, jump_vec, jump_amp, jump_tol):\n if self.mode == 'horizontal':\n if jump_vec < jump_amp + jump_tol and jump_vec >= jump_amp - jump_tol:\n return True\n else:\n return False\n if self.mode == 'vertical':\n if jump_vec < jump_amp + jump_tol and jump_vec >= jump_amp - jump_tol:\n return True\n else:\n return False\n if self.mode == '2d':\n if (jump_vec[0] < jump_amp[0] + jump_tol and\n jump_vec[0] >= jump_amp[0] - jump_tol and\n jump_vec[1] < jump_amp[1] + jump_tol and\n jump_vec[1] >= jump_amp[1] - jump_tol):\n return True\n else:\n return False\n\n def get_target_jumps(self, num_clusters = 3, jump_tol = 100):\n \"\"\"\n Returns a dictionary containing the indices of the jumps to the found jump vectors.\n The jump vectors are found by detecting all jumps, then using kmeans with k=num_clusters\n (should be determined by the experimental setup, auto detection later), then assigning each\n jump to one cluster if the euclidean distance is less than jump_tol.\n \"\"\"\n jump_amps = self._find_jump_vector_amplitudes(num_clusters)\n\n target_jumps_to = {}\n for ja in jump_amps:\n target_jumps_to[str(ja)] = np.array([], dtype='int64')\n target_jump_indices = self._find_target_jumps()\n for i, tji in enumerate(target_jump_indices):\n # jump_vec = ht.data[prange][tji + 5] - ht.data[prange][tji - 5]\n for ja in jump_amps:\n if self._is_in_cluster(self.jump_vecs[i], ja, jump_tol):\n target_jumps_to[str(ja)] = np.concatenate((target_jumps_to[str(ja)], [tji]))\n return [target_jumps_to, jump_amps]\n\n\n","sub_path":"kaveh/behavioral/oculomotor.py","file_name":"oculomotor.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172853465","text":"# -*- coding: utf-8 -*-\r\n__author__ = \"Mengxuan Chen\"\r\n__email__ = \"chenmx19@mails.tsinghua.edu.cn\"\r\n__date__ = \"20200714\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom getTradingDate import getTradingDateFromJY\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\nclass Para():\r\n data_path = '.\\\\data\\\\'\r\n result_path = '.\\\\result\\\\'\r\n pass\r\npara = Para()\r\n\r\n# In[]\r\n# 净金融负债 = 金融负债 − 金融资产\r\nfinancial_liability = pd.read_csv(para.result_path + 'financial_liability.csv',index_col=0)\r\nfinancial_asset = pd.read_csv(para.result_path + 'financial_asset.csv',index_col=0)\r\nnet_financial_liability = np.array(financial_liability) - np.array(financial_asset)\r\nnet_financial_liability = pd.DataFrame(net_financial_liability,\r\n index = financial_liability.index,\r\n columns = financial_liability.columns)\r\n# In[]\r\n# 税后利息率 = 税后利息费用 ÷ 净金融负债\r\nfinancial_loss = pd.read_csv(para.result_path + 'financial_loss.csv',index_col=0)\r\ninterest_rate_after_tax = np.array(financial_loss) / (np.array(net_financial_liability) + 0.000000001)\r\ninterest_rate_after_tax = pd.DataFrame(interest_rate_after_tax,\r\n index = financial_loss.index,\r\n columns = financial_loss.columns)\r\ninterest_rate_after_tax.to_csv(para.result_path + 'interest_rate_after_tax.csv')\r\n# In[]\r\n# 净经营资产 = 经营资产 − 经营负债 = 净金融负债 + 股东权益\r\noperating_asset = pd.read_csv(para.result_path + 'operating_asset.csv',index_col=0)\r\noperating_liability = pd.read_csv(para.result_path + 'operating_liability.csv',index_col=0)\r\nnet_operating_asset = np.array(operating_asset) - np.array(operating_liability)\r\nnet_operating_asset = pd.DataFrame(net_operating_asset,\r\n index = operating_asset.index,\r\n columns = operating_asset.columns)\r\n\r\n# In[]\r\n# 所得税率 = 所得税费用/利润总额\r\ntax_expenditure = pd.read_csv(para.result_path + '所得税额.csv',index_col=0)\r\ntax_expenditure.fillna(method = 'ffill',axis = 0,inplace=True)\r\nprofit_all = pd.read_csv(para.result_path + '利润总额.csv',index_col=0)\r\nprofit_all.fillna(method = 'ffill',axis = 0,inplace=True)\r\ntax_rate = np.array(tax_expenditure) / np.array(profit_all)\r\ntax_rate = pd.DataFrame(tax_rate,\r\n index=profit_all.index,\r\n columns=profit_all.columns)\r\n\r\n# In[]\r\n# 税后经营净利润 = 息税前利润 × (1 − 所得税率)\r\nEBIT = pd.read_csv(para.result_path + '息税前收益.csv',index_col=0)\r\nEBIT.fillna(method = 'ffill',axis = 0,inplace=True)\r\nEBI = np.array(EBIT) * (1 - np.array(tax_rate))\r\nEBI = pd.DataFrame(EBI,\r\n index=EBIT.index,\r\n columns=EBIT.columns)\r\n\r\n# In[]\r\n# 净财务杠杆 = 净金融负债 ÷ 股东权益\r\nequity = pd.read_csv(para.result_path + '股东权益.csv',index_col=0)\r\nequity.fillna(method = 'ffill',axis = 0,inplace=True)\r\nfinancail_leverage = np.array(net_financial_liability) / np.array(equity)\r\nfinancail_leverage = pd.DataFrame(financail_leverage,\r\n index=equity.index,\r\n columns=equity.columns)\r\n\r\n# In[]\r\n# 经营差异率 = 净经营资产净利率 − 税后利息率\r\noperating_diff = np.array(financail_leverage) - np.array(interest_rate_after_tax)\r\noperating_diff = pd.DataFrame(operating_diff,\r\n index=financail_leverage.index,\r\n columns=financail_leverage.columns)\r\n\r\n# In[]\r\n# 净经营资产周转次数 = 销售收入 ÷ 净经营资产\r\nsales = pd.read_csv(para.result_path + '营业总收入.csv',index_col=0)\r\nsales.fillna(method = 'ffill',axis = 0,inplace=True)\r\nnet_operating_asset_turnover = np.array(sales) / np.array(net_operating_asset)\r\nnet_operating_asset_turnover = pd.DataFrame(net_operating_asset_turnover,\r\n index=sales.index,\r\n columns=sales.columns)\r\n\r\n# In[]\r\n# 税后经营净利率 = 税后经营净利润 ÷ 销售收入\r\nnet_profit_after_tax = np.array(EBI) / np.array(sales)\r\nnet_profit_after_tax = pd.DataFrame(net_profit_after_tax,\r\n index=EBI.index,\r\n columns=EBI.columns)\r\n\r\n# In[]\r\n# 杠杆贡献率 = 经营差异率 × 净财务杠杆\r\nleverage_contrib = np.array(operating_diff) * np.array(financail_leverage)\r\nleverage_contrib = pd.DataFrame(leverage_contrib,\r\n index=operating_diff.index,\r\n columns=operating_diff.columns)\r\n\r\n# In[]\r\n# 净经营资产净利率 = 税后经营净利率 × 净经营资产周转次数\r\nnet_operating_asset_net_profit = np.array(net_profit_after_tax) * np.array(net_operating_asset_turnover)\r\nnet_operating_asset_net_profit = pd.DataFrame(net_operating_asset_net_profit,\r\n index=net_profit_after_tax.index,\r\n columns=net_profit_after_tax.columns)\r\n\r\n# In[]\r\n# 净资产收益率 = (税后经营净利润 − 税后利息费用) ÷ 股东权益\r\n# = 净经营资产净利率 + 杠杆贡献率\r\nnet_ROE = np.array(net_operating_asset_net_profit) + np.array(leverage_contrib)\r\nnet_ROE[np.isinf(net_ROE)] = 0\r\nnet_ROE = pd.DataFrame(net_ROE,\r\n index=net_operating_asset_net_profit.index,\r\n columns=net_operating_asset_net_profit.columns)\r\n\r\n# In[]\r\n# 净资产收益率 = 净经营资产净利率 + 杠杆贡献率\r\nnet_ROE.to_csv(para.result_path + 'net_ROE.csv')\r\nnet_operating_asset_net_profit.to_csv(para.result_path + 'net_operating_asset_net_profit.csv')\r\nleverage_contrib.to_csv(para.result_path + 'leverage_contrib.csv')\r\n\r\n# 净经营资产净利率 = 税后经营净利率 × 净经营资产周转次数\r\nnet_profit_after_tax.to_csv(para.result_path + 'net_profit_after_tax.csv')\r\nnet_operating_asset_turnover.to_csv(para.result_path + 'net_operating_asset_turnover.csv')\r\n\r\n# 杠杆贡献率 = 经营差异率 × 净财务杠杆\r\noperating_diff.to_csv(para.result_path + 'operating_diff.csv')\r\nfinancail_leverage.to_csv(para.result_path + 'financail_leverage.csv')\r\n","sub_path":"codes/3 管理用财务报表指标分析.py","file_name":"3 管理用财务报表指标分析.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101260398","text":"from FFxivPythonTrigger.memory import scan_address\nfrom FFxivPythonTrigger.Logger import Logger\nfrom FFxivPythonTrigger.Storage import get_module_storage\nfrom FFxivPythonTrigger.AddressManager import AddressManager\n\n_logger = Logger(\"XivMem/AddressManager\")\n_storage = get_module_storage(\"XivMem\")\n_am = AddressManager(_storage.data, _logger)\n\n##########\n# actor table\n##########\nactor_table_sig = \"48 8d ?? ?? ?? ?? ?? e8 ?? ?? ?? ?? 48 8b ?? 48 8b ?? 48 8d ?? ?? ?? ?? ?? \" \\\n \"e8 ?? ?? ?? ?? 48 8d ?? ?? ?? ?? ?? ba ?? ?? ?? ?? e8 ?? ?? ?? ?? 89 2f\"\nactor_table_addr = _am.get(\"actor table\", scan_address, actor_table_sig, cmd_len=7)\n\n##########\n# combat data\n##########\ncombo_state_sig = \"f3 0f ?? ?? ?? ?? ?? ?? f3 0f ?? ?? ?? e8 ?? ?? ?? ?? 48 8b ?? 48 8b ?? 0f b7\"\ncombo_state_addr = _am.get(\"combo state\", scan_address, combo_state_sig, cmd_len=8)\n\nskill_queue_sig = \"44 89 2d ?? ?? ?? ?? f3 0f 11 05 ?? ?? ?? ??\"\nskill_queue_addr = _am.get(\"skill queue\", scan_address, skill_queue_sig, cmd_len=7, add=4)\n\nis_in_fight_sig = \"80 3d ?? ?? ?? ?? ?? 0f 95 c0 48 83 c4 ??\"\nis_in_fight_addr = _am.get(\"is in fight\", scan_address, is_in_fight_sig, cmd_len=7, ptr_idx=2)\n\ncool_down_group_sig = \"0f b7 0d ?? ?? ?? ?? 84 c0\"\ncool_down_group_addr = _am.get(\"cool down group\", scan_address, cool_down_group_sig, cmd_len=7, add=0x76)\n\nenemies_base_sig = \"48 8b 0d ?? ?? ?? ?? 4c 8b c0 33 d2\"\nenemies_base_addr = _am.get(\"enemies base\", scan_address, enemies_base_sig, cmd_len=7)\nenemies_shifts = [0x30, 0x58, 0x98, 0x20, 0x20]\n\n##########\n# player info\n##########\ngauge_sig = \"48 8d ?? ?? ?? ?? ?? e8 ?? ?? ?? ?? 80 be 13 07 ?? ??\"\ngauge_addr = _am.get(\"gauge\", scan_address, gauge_sig, cmd_len=7, add=0x10)\n\nplayer_sig = \"0f 10 ?? ?? ?? ?? ?? 40 0f ?? ?? 0f 95\"\nplayer_addr = _am.get(\"player\", scan_address, player_sig, cmd_len=7, add=-0x11)\n\n##########\n# targets\n##########\ntarget_ptr_sig = \"48 8B 05 ?? ?? ?? ?? 48 8D 0D ?? ?? ?? ?? FF 50 ?? 48 85 DB\"\ntarget_ptr_addr = _am.get(\"target ptr\", scan_address, target_ptr_sig, cmd_len=7)\n\n##########\n# zone\n##########\nzone_sig = \"0f b7 ?? ?? ?? ?? ?? 48 8d ?? ?? ?? f3 0f ?? ?? 33 d2\"\nzone_addr = _am.get(\"zone\", scan_address, zone_sig, cmd_len=7)\n\n##########\n# skill animation lock\n##########\nskill_ani_lock_sig = \"F3 0F ? ? ? ? ? ? 41 F6 47 20\"\nskill_ani_lock_addr = _am.get(\"skill animation lock\", scan_address, skill_ani_lock_sig, cmd_len=8)\n\n##########\n# chat log\n##########\nchat_log_sig = \"48 8b da 49 8b f8 41 8b d1 48 8b f1 ?? ?? ?? ?? ?? 48 8d 05\"\nchat_log_addr = _am.get(\"chat log\", scan_address, chat_log_sig, cmd_len=24)\n\n##########\n# movement\n##########\nmovement_sig=\"48 8D 0D ? ? ? ? E8 ? ? ? ? BA ? ? ? ? 48 8D 0D ? ? ? ? 0F B6 D8\"\nmovement_addr = _am.get(\"movement\", scan_address,movement_sig,cmd_len = 7)\n\n_storage.save()\n","sub_path":"plugins/XivMemory/AddressManager.py","file_name":"AddressManager.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171446567","text":"from direct.showbase.ShowBase import ShowBase\nfrom direct.showbase.RandomNumGen import RandomNumGen\nfrom direct.actor.Actor import Actor\nfrom panda3d.core import Vec3\nfrom direct.interval.IntervalGlobal import *\n\nclass Application(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n self.panda = Actor(\"panda\", {\"walk\": \"panda-walk\"})\n self.panda.reparentTo(render)\n self.panda.setHpr(-90, 0, 0)\n\n self.walkIval1 = self.panda.posInterval(2, Vec3(-8, 0, 0), startPos = Vec3(8, 0, 0))\n self.walkIval2 = self.panda.posInterval(2, Vec3(8, 0, 0), startPos = Vec3(-8, 0, 0))\n self.turnIval1 = self.panda.hprInterval(0.5, Vec3(90, 0, 0), startHpr = Vec3(-90, 0, 0))\n self.turnIval2 = self.panda.hprInterval(0.5, Vec3(-90, 0, 0), startHpr = Vec3(90, 0, 0))\n self.colorIval = Func(self.randomColor)\n self.pandaAnim = ActorInterval(self.panda, \"walk\", loop = 1, duration = 5)\n self.pandaMove = Sequence(self.walkIval1, self.turnIval1, self.colorIval, self.walkIval2, self.turnIval2, self.colorIval)\n self.pandaWalk = Parallel(self.pandaAnim, self.pandaMove)\n self.pandaWalk.loop()\n\n self.cam.setPos(0, -50, 6)\n\n def randomColor(self):\n rand = RandomNumGen(globalClock.getFrameTime())\n self.panda.setColorScale(rand.random(), rand.random(), rand.random(), 1)\n","sub_path":"CarAI/joshua_work/old/tutorial/Code/02_code_revised/IntervalsActor/src/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1846740","text":"import numpy as np\nimport tensorflow as tf\n\ndef prints(content):\n \"\"\"Prints content with a line gap afterwards\n\n Args:\n content (any): the content to print\n \"\"\"\n print(content, '\\n')\n\ndef supervised_learning_transform(data, start_idx, end_idx, look_back):\n \"\"\"Transforms time series into supervised learning data.\n\n Args:\n data (numpy array): the time series to generate data from\n start_idx (int): the first index (inclusive) to generate data from\n end_idx (int): the last index (exclusive) to generate data from\n look_back (int): the look back duration\n\n Returns:\n x (numpy array): transformed x data\n y (numpy array): transformed y data\n \"\"\"\n assert start_idx < end_idx\n assert look_back <= start_idx\n assert end_idx <= len(data)\n\n x, y = [], []\n for i in range(start_idx, end_idx):\n x.append(data[i - look_back:i])\n y.append(data[i])\n x, y = np.array(x), np.array(y)\n x = np.squeeze(x)\n x = x[..., np.newaxis]\n \n assert len(x) == len(y)\n return x, y\n\ndef build_rnn_model(n_layers, n_units, input_shape, optimizer):\n \"\"\"Builds a rnn model.\n\n Args:\n n_layers (int): number of layers in the rnn\n n_units (list): number of units in each layer of the rnn\n input_shape (tuple): input dimensions to the rnn\n optimizer (string): optimizer for the model\n\n Returns:\n model (keras model): rnn\n \"\"\"\n assert len(n_units) == n_layers\n\n x = inputs = tf.keras.Input(shape=input_shape)\n for i in range(n_layers):\n x = tf.keras.layers.LSTM(n_units[i], return_sequences=(i + 1 < n_layers))(x)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=optimizer, loss='mean_squared_error')\n\n return model\n\ndef predict_future_values(data, model, scaler, look_back, look_ahead):\n \"\"\"Uses model and data to predict time series values in the future.\n\n Args:\n data (numpy array): the time series data\n model (keras model): the model used for prediction\n scaler (scaler obj): the scaler used to scale data\n look_back (int): the look back duration\n look_ahead (int): the look ahead duration \n\n Returns:\n y (numpy array): the predicted time series values\n \"\"\"\n y = []\n x = data[-look_back:].reshape((1, look_back, 1))\n for _ in range(look_ahead):\n pred = model(x).numpy()\n y.append(pred)\n x = np.concatenate([x, np.array([pred]).reshape((1, 1, 1))], axis=1)\n x = x[:, -look_back:]\n y = np.array(y).reshape((1, -1))\n y = scaler.inverse_transform(y)\n return y.reshape(-1)","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521810394","text":"##############################################################################\n#\n# Copyright (c) 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Extension of the ZODB DB class\n\n$Id$\n\"\"\"\n\nimport cPickle\nimport cStringIO\n\nfrom threading import RLock\nfrom transaction import Transaction\nfrom ZODB.DB import DB\nfrom apelib.core.interfaces import ConfigurationError\n\nfrom connection import ApeConnection\nfrom storage import ApeStorage\nfrom resource import StaticResource\nfrom interfaces import IResourceAccess\n\n\ndef call_conf_factory(factory, kw):\n \"\"\"Returns (conf, conns) given the name of a factory and arguments.\n \"\"\"\n pos = factory.rfind('.')\n if pos < 0:\n raise ConfigurationError(\n 'factory must be a string containing .')\n module = factory[:pos]\n name = factory[pos + 1:]\n m = __import__(module, {}, {}, (name,))\n f = getattr(m, name)\n return f(**kw)\n\n\nclass ApeDB (DB):\n \"\"\"Mapper-driven Database\n \"\"\"\n\n klass = ApeConnection\n database_name = \"unnamed\"\n\n # SDH: some extra args.\n def __init__(self, storage,\n conf_resource=None,\n factory=None,\n scan_interval=10,\n pool_size=7,\n cache_size=400,\n cache_deactivate_after=60,\n version_pool_size=3,\n version_cache_size=100,\n version_cache_deactivate_after=10,\n **kw\n ):\n \"\"\"Create an object database.\n \"\"\"\n if conf_resource is None:\n if factory is not None:\n # Use a configuration factory\n conf, connections = call_conf_factory(factory, kw)\n conf_resource = StaticResource(conf)\n else:\n if kw:\n raise ConfigurationError('Extra keyword args: %s' % kw)\n if isinstance(storage, ApeStorage):\n # Use the configuration from the storage\n conf_resource = storage.conf_resource\n else:\n raise ConfigurationError(\n 'No configuration or factory specified')\n else:\n # conf_resource was specified\n if kw:\n raise ConfigurationError('Extra keyword args: %s' % kw)\n assert IResourceAccess.isImplementedBy(conf_resource)\n assert factory is None\n \n # Allocate locks:\n l = RLock()\n self._a=l.acquire\n self._r=l.release\n\n # Setup connection pools and cache info\n self._pools={}\n self._temps=[]\n self._pool_size=pool_size\n self._cache_size=cache_size\n self._cache_deactivate_after = cache_deactivate_after\n self._version_pool_size=version_pool_size\n self._version_cache_size=version_cache_size\n self._version_cache_deactivate_after = version_cache_deactivate_after\n\n self._miv_cache={}\n\n # Setup storage\n self._storage=storage\n storage.registerDB(self)\n if not hasattr(storage,'tpc_vote'): storage.tpc_vote=lambda *args: None\n\n self._conf_resource = conf_resource\n scan_interval = int(scan_interval)\n if scan_interval > 0:\n from scanner import PoolScanControl, Scanner\n pool_ctl = PoolScanControl(storage, db=self, scan_interval=scan_interval)\n self.pool_scan_ctl = pool_ctl\n scanner = Scanner()\n storage.scanner = scanner\n scanner.storage = storage\n else:\n self._scan_ctl = None\n\n # Pass through methods:\n self.history = storage.history\n\n if hasattr(storage, 'undoInfo'):\n self.undoInfo=storage.undoInfo\n\n # Create the root object if it doesn't exist\n c = self.open()\n try:\n c._prepare_root()\n finally:\n c.close()\n","sub_path":"Products.Ape/trunk/lib/apelib/zodb3/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426572620","text":"import os\n\nimport xarray as xr\n\nfrom oas_erf.util import practical_functions\nfrom oas_erf.util.filenames import filename_map_avg\nfrom oas_erf.util.slice_average.avg_pkg import get_fields4weighted_avg, average_model_var, \\\n compute_weighted_averages, is_weighted_avg_var\n\n\ndef get_average_map2(ds, varlist,case_name, from_time, to_time,\n avg_over_lev =True, pmin=850., p_level=1013., pressure_coord=True, save_avg=True,\n recalculate=False, model_name='NorESM', time_mask=None):\n \"\"\"\n\n :param ds:\n :param varlist:\n :param case_name:\n :param from_time:\n :param to_time:\n :param avg_over_lev:\n :param pmin:\n :param p_level:\n :param pressure_coord:\n :param save_avg:\n :param recalculate:\n :param model_name:\n :param time_mask:\n :return:\n \"\"\"\n\n xr_out = ds.copy()\n for var in varlist:\n found_map_avg = False\n # Look for file:\n if not recalculate:\n ds_copy, found_map_avg = load_average2map(model_name, case_name, var, from_time, to_time, avg_over_lev,\n pmin, p_level, pressure_coord, time_mask=time_mask)\n # if file not found:\n if not found_map_avg:\n # some fields require other fields for weighted average:\n sub_varL = get_fields4weighted_avg(var)\n\n\n ds_copy = ds.copy()\n for svar in sub_varL:\n if ('lev' in ds[svar].dims):\n had_lev_coord=True\n else: had_lev_coord=False\n if avg_over_lev:\n ds_copy = average_model_var(ds_copy, svar, area='Global',\n dim=list(set(ds.dims)-{'lat', 'lon'}), minp=pmin,\n time_mask=time_mask)\n else:\n ds_copy = average_model_var(ds_copy, svar, area='Global',\n dim=list(set(ds.dims)-{'lat', 'lon', 'lev'}),\n time_mask=time_mask)\n if 'lev' in ds_copy[svar].dims:\n ds_copy[svar] = ds_copy[svar].sel(lev=p_level, method='nearest')\n\n ds_copy = compute_weighted_averages(ds_copy, var, model_name)\n ds_copy[var].attrs['Calc_weight_mean']=str(is_weighted_avg_var(var)) + ' map'\n if save_avg:\n filen = filename_map_avg(model_name, case_name, var, from_time, to_time, avg_over_lev, pmin, p_level,\n pressure_coord, lev_was_coord=had_lev_coord, time_mask=time_mask)\n practical_functions.make_folders(filen)\n practical_functions.save_dataset_to_netcdf(ds_copy, filen)\n xr_out[var] = ds_copy[var]\n return xr_out\n\n\ndef load_average2map(model, case, var, startyear, endyear, avg_over_lev, pmin, p_level, pressure_adj,\n time_mask=None):\n \"\"\"\n Loads avg to map\n :param model:\n :param case:\n :param var:\n :param startyear:\n :param endyear:\n :param avg_over_lev:\n :param pmin:\n :param p_level:\n :param pressure_adj:\n :param time_mask: optional\n\n :return:\n \"\"\"\n #var_info_df = var_overview_sql.open_and_fetch_var_case(model, case, var)\n #if len(var_info_df) > 0:\n # had_lev_coord = bool(var_info_df['lev_is_dim'].values)\n #else:\n # had_lev_coord = True\n filen_had_lev = filename_map_avg(model, case, var, startyear, endyear, avg_over_lev, pmin, p_level, pressure_adj,\n lev_was_coord=True, time_mask=time_mask)\n filen_no_lev = filename_map_avg(model, case, var, startyear, endyear, avg_over_lev, pmin, p_level, pressure_adj,\n lev_was_coord=False, time_mask=time_mask)\n if os.path.isfile(filen_had_lev):\n filen=filen_had_lev\n file_found=True\n elif os.path.isfile(filen_no_lev):\n filen = filen_no_lev\n file_found=True\n else:\n file_found=False\n if file_found:\n print('Loading file %s' % filen)\n ds = xr.open_dataset(filen).copy()\n else:\n print('Did not find map mean with filename: %s or %s' % (filen_had_lev, filen_no_lev))\n ds=None\n return ds, file_found","sub_path":"oas_erf/util/slice_average/avg_pkg/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269796153","text":"# pyramid\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom pyramid.renderers import render, render_to_response\nfrom pyramid.httpexceptions import HTTPNotFound\nfrom pyramid.httpexceptions import HTTPSeeOther\n\n# stdlib\nimport datetime\nimport json\nimport pdb\n\n# pypi\nimport sqlalchemy\nimport transaction\n\n# localapp\nfrom .. import lib\nfrom ..lib import formhandling\nfrom ..lib import form_utils as form_utils\nfrom ..lib.forms import Form_QueueCertificate_mark\n\nfrom ..lib.forms import Form_QueueCertificate_new_structured\nfrom ..lib.handler import Handler, items_per_page\nfrom ..lib.handler import json_pagination\nfrom ...lib import db as lib_db\nfrom ...lib import errors\nfrom ...lib import utils\nfrom ...model import utils as model_utils\n\n\n# ==============================================================================\n\n\nclass ViewList(Handler):\n\n \"\"\"\n note-\n if a renewal fails, the record is marked with the following:\n timestamp_process_attempt = time.time()\n process_result = False\n Records with the above are the failed renewal attempts.\n\n The record stays active and in the queue, as it may renew later on.\n To be removed, it must suucceed or be explicitly removed from the queue.\n \"\"\"\n\n @view_config(route_name=\"admin:queue_certificates\")\n @view_config(route_name=\"admin:queue_certificates|json\")\n def list_redirect(self):\n url_redirect = (\n \"%s/queue-certificates/all\"\n % self.request.registry.settings[\"app_settings\"][\"admin_prefix\"]\n )\n if self.request.wants_json:\n raise HTTPSeeOther(\"%s.json\" % url_redirect)\n else:\n raise HTTPSeeOther(url_redirect)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n @view_config(\n route_name=\"admin:queue_certificates:all\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:all_paginated\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:failures\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:failures_paginated\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:successes\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:successes_paginated\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:unprocessed\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:unprocessed_paginated\",\n renderer=\"/admin/queue_certificates.mako\",\n )\n @view_config(route_name=\"admin:queue_certificates:all|json\", renderer=\"json\")\n @view_config(\n route_name=\"admin:queue_certificates:all_paginated|json\", renderer=\"json\"\n )\n @view_config(route_name=\"admin:queue_certificates:failures|json\", renderer=\"json\")\n @view_config(\n route_name=\"admin:queue_certificates:failures_paginated|json\", renderer=\"json\",\n )\n @view_config(route_name=\"admin:queue_certificates:successes|json\", renderer=\"json\")\n @view_config(\n route_name=\"admin:queue_certificates:successes_paginated|json\", renderer=\"json\",\n )\n @view_config(\n route_name=\"admin:queue_certificates:unprocessed|json\", renderer=\"json\"\n )\n @view_config(\n route_name=\"admin:queue_certificates:unprocessed_paginated|json\",\n renderer=\"json\",\n )\n def list(self):\n get_kwargs = {}\n url_template = None\n sidenav_option = None\n\n if self.request.matched_route.name in (\n \"admin:queue_certificates:all\",\n \"admin:queue_certificates:all_paginated\",\n \"admin:queue_certificates:all|json\",\n \"admin:queue_certificates:all_paginated|json\",\n ):\n sidenav_option = \"all\"\n url_template = \"%s/queue-certificates/all/{0}\"\n elif self.request.matched_route.name in (\n \"admin:queue_certificates:failures\",\n \"admin:queue_certificates:failures_paginated\",\n \"admin:queue_certificates:failures|json\",\n \"admin:queue_certificates:failures_paginated|json\",\n ):\n sidenav_option = \"failures\"\n get_kwargs[\"failures_only\"] = True\n url_template = \"%s/queue-certificates/failures/{0}\"\n elif self.request.matched_route.name in (\n \"admin:queue_certificates:successes\",\n \"admin:queue_certificates:successes_paginated\",\n \"admin:queue_certificates:successes|json\",\n \"admin:queue_certificates:successes_paginated|json\",\n ):\n sidenav_option = \"successes\"\n get_kwargs[\"successes_only\"] = True\n url_template = \"%s/queue-certificates/successes/{0}\"\n elif self.request.matched_route.name in (\n \"admin:queue_certificates:unprocessed\",\n \"admin:queue_certificates:unprocessed_paginated\",\n \"admin:queue_certificates:unprocessed|json\",\n \"admin:queue_certificates:unprocessed_paginated|json\",\n ):\n get_kwargs[\"unprocessed_only\"] = True\n sidenav_option = \"unprocessed\"\n url_template = \"%s/queue-certificates/unprocessed/{0}\"\n\n # update the url_template with our prefix\n url_template = (\n url_template\n % self.request.registry.settings[\"app_settings\"][\"admin_prefix\"]\n )\n # and make it json if needed\n if self.request.wants_json:\n url_template = \"%s.json\" % url_template\n\n items_count = lib_db.get.get__QueueCertificate__count(\n self.request.api_context, **get_kwargs\n )\n (pager, offset) = self._paginate(items_count, url_template=url_template)\n items_paged = lib_db.get.get__QueueCertificate__paginated(\n self.request.api_context, limit=items_per_page, offset=offset, **get_kwargs\n )\n\n continue_processing = False\n _results = self.request.params.get(\"results\", None)\n if _results:\n try:\n _results = json.loads(_results)\n items_remaining = int(_results.get(\"count_remaining\", 0))\n if items_remaining:\n continue_processing = True\n except Exception as exc:\n # this could be a json or int() error\n pass\n if self.request.wants_json:\n _domains = {d.id: d.as_json for d in items_paged}\n return {\n \"QueueCertificates\": _domains,\n \"pagination\": json_pagination(items_count, pager),\n }\n return {\n \"project\": \"peter_sslers\",\n \"QueueCertificates_count\": items_count,\n \"QueueCertificates\": items_paged,\n \"sidenav_option\": sidenav_option,\n \"pager\": pager,\n \"continue_processing\": continue_processing,\n }\n\n\nclass ViewNew(Handler):\n def _parse_queue_source(self):\n _failure_url = \"%s/queue-certificates\" % (self.request.admin_url,)\n queue_source = self.request.params.get(\"queue_source\")\n acme_order_id = self.request.params.get(\"acme_order\")\n server_certificate_id = self.request.params.get(\"server_certificate\")\n unique_fqdn_set_id = self.request.params.get(\"unique_fqdn_set\")\n\n queue_data = {\n \"queue_source\": queue_source,\n \"AcmeAccountKey_reuse\": None,\n \"AcmeOrder\": None,\n \"PrivateKey_reuse\": None,\n \"ServerCertificate\": None,\n \"UniqueFQDNSet\": None,\n }\n if (queue_source == \"AcmeOrder\") and acme_order_id:\n dbAcmeOrder = lib_db.get.get__AcmeOrder__by_id(\n self.request.api_context, acme_order_id\n )\n if not dbAcmeOrder:\n raise HTTPSeeOther(\n \"%s?result=error&error=invalid+acme-order&operation=new\"\n % _failure_url\n )\n if not dbAcmeOrder.is_renewable_queue:\n raise HTTPSeeOther(\n \"%s?result=error&error=acme-order+ineligible&operation=new\"\n % _failure_url\n )\n queue_data[\"AcmeOrder\"] = dbAcmeOrder\n if dbAcmeOrder.acme_account_key.is_active:\n queue_data[\"AcmeAccountKey_reuse\"] = dbAcmeOrder.acme_account_key\n if dbAcmeOrder.private_key.is_active:\n queue_data[\"PrivateKey_reuse\"] = dbAcmeOrder.private_key\n\n elif (queue_source == \"ServerCertificate\") and server_certificate_id:\n dbServerCertificate = lib_db.get.get__ServerCertificate__by_id(\n self.request.api_context, server_certificate_id\n )\n if not dbServerCertificate:\n raise HTTPSeeOther(\n \"%s?result=error&error=invalid+server-certificate&operation=new\"\n % _failure_url\n )\n queue_data[\"ServerCertificate\"] = dbServerCertificate\n if dbServerCertificate.private_key.is_active:\n queue_data[\"PrivateKey_reuse\"] = dbServerCertificate.private_key\n\n elif (queue_source == \"UniqueFQDNSet\") and unique_fqdn_set_id:\n dbUniqueFQDNSet = lib_db.get.get__UniqueFQDNSet__by_id(\n self.request.api_context, unique_fqdn_set_id\n )\n if not dbUniqueFQDNSet:\n raise HTTPSeeOther(\n \"%s?result=error&error=invalid+unique-fqdn-set&operation=new\"\n % _failure_url\n )\n queue_data[\"UniqueFQDNSet\"] = dbUniqueFQDNSet\n else:\n raise HTTPSeeOther(\n \"%s?result=error&error=invalid+queue+source&operation=new\"\n % _failure_url\n )\n return queue_data\n\n @view_config(route_name=\"admin:queue_certificate:new_structured\")\n @view_config(\n route_name=\"admin:queue_certificate:new_structured|json\", renderer=\"json\"\n )\n def new_structured(self):\n self._load_AcmeAccountKey_GlobalDefault()\n self._load_AcmeAccountProviders()\n self.queue_data = self._parse_queue_source()\n if self.request.method == \"POST\":\n return self._new_structured__submit()\n return self._new_structured__print()\n\n def _new_structured__print(self):\n if self.request.wants_json:\n return {\n \"instructions\": \"\"\"POST required\"\"\",\n \"form_fields\": {\n \"queue_source\": \"what is the source of the queue item?\",\n \"acme_order\": \"If queue_source is `AcmeOrder`, the corresponding id\",\n \"server_certificate\": \"If queue_source is `AcmeOrder`, the corresponding id\",\n \"unique_fqdn_set\": \"If queue_source is `AcmeOrder`, the corresponding id\",\n \"account_key_option\": \"How is the AcmeAccountKey specified?\",\n \"account_key_reuse\": \"pem_md5 of the existing account key. Must/Only submit if `account_key_option==account_key_reuse`\",\n \"account_key_global_default\": \"pem_md5 of the Global Default account key. Must/Only submit if `account_key_option==account_key_global_default`\",\n \"account_key_existing\": \"pem_md5 of any key. Must/Only submit if `account_key_option==account_key_existing`\",\n \"account_key_file_pem\": \"pem of the account key file. Must/Only submit if `account_key_option==account_key_file`\",\n \"acme_account_provider_id\": \"account provider. Must/Only submit if `account_key_option==account_key_file` and `account_key_file_pem` is used.\",\n \"account_key_file_le_meta\": \"LetsEncrypt Certbot file. Must/Only submit if `account_key_option==account_key_file` and `account_key_file_pem` is not used\",\n \"account_key_file_le_pkey\": \"LetsEncrypt Certbot file\",\n \"account_key_file_le_reg\": \"LetsEncrypt Certbot file\",\n \"private_key_option\": \"How is the PrivateKey being specified?\",\n \"private_key_reuse\": \"pem_md5 of existing key\",\n \"private_key_existing\": \"pem_md5 of existing key\",\n \"private_key_file_pem\": \"pem to upload\",\n \"private_key_cycle__renewal\": \"how should the PrivateKey be cycled on renewals?\",\n },\n \"form_fields_related\": [\n [\"acme_order\", \"server_certificate\", \"unique_fqdn_set\",],\n ],\n \"valid_options\": {\n \"queue_source\": (\n \"AcmeOrder\",\n \"ServerCertificate\",\n \"UniqueFQDNSet\",\n ),\n \"private_key_cycle__renewal\": None,\n \"acme_account_provider_id\": {\n i.id: \"%s (%s)\" % (i.name, i.url)\n for i in self.dbAcmeAccountProviders\n },\n \"account_key_option\": model_utils.AcmeAccontKey_options_b,\n \"private_key_option\": model_utils.PrivateKey_options_b,\n \"AcmeAccountKey_GlobalDefault\": self.dbAcmeAccountKey_GlobalDefault.as_json\n if self.dbAcmeAccountKey_GlobalDefault\n else None,\n \"private_key_cycle__renewal\": model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,\n },\n }\n return render_to_response(\n \"/admin/queue_certificate-new-structured.mako\",\n {\n \"queue_source\": self.queue_data[\"queue_source\"],\n \"AcmeOrder\": self.queue_data[\"AcmeOrder\"],\n \"AcmeAccountKey_GlobalDefault\": self.dbAcmeAccountKey_GlobalDefault,\n \"AcmeAccountProviders\": self.dbAcmeAccountProviders,\n \"AcmeAccountKey_reuse\": self.queue_data[\"AcmeAccountKey_reuse\"],\n \"PrivateKey_reuse\": self.queue_data[\"PrivateKey_reuse\"],\n \"ServerCertificate\": self.queue_data[\"ServerCertificate\"],\n \"UniqueFQDNSet\": self.queue_data[\"UniqueFQDNSet\"],\n },\n self.request,\n )\n return render_to_response(\n \"/admin/queue_certificate-new-structured.mako\", {}, self.request\n )\n\n def _new_structured__submit(self):\n try:\n (result, formStash) = formhandling.form_validate(\n self.request,\n schema=Form_QueueCertificate_new_structured,\n validate_get=False,\n )\n if not result:\n raise formhandling.FormInvalid()\n\n (accountKeySelection, privateKeySelection) = form_utils.form_key_selection(\n self.request, formStash, require_contact=False,\n )\n private_key_cycle__renewal = formStash.results[\"private_key_cycle__renewal\"]\n private_key_cycle_id__renewal = model_utils.PrivateKeyCycle.from_string(\n private_key_cycle__renewal\n )\n\n kwargs_create = {\n \"dbAcmeAccountKey\": accountKeySelection.AcmeAccountKey,\n \"dbPrivateKey\": privateKeySelection.PrivateKey,\n \"private_key_cycle_id__renewal\": private_key_cycle_id__renewal,\n }\n _queue_source = self.queue_data[\"queue_source\"]\n if _queue_source == \"AcmeOrder\":\n kwargs_create[\"dbAcmeOrder\"] = self.queue_data[\"AcmeOrder\"]\n elif _queue_source == \"ServerCertificate\":\n kwargs_create[\"dbServerCertificate\"] = self.queue_data[\n \"ServerCertificate\"\n ]\n elif _queue_source == \"UniqueFQDNSet\":\n kwargs_create[\"dbUniqueFQDNSet\"] = self.queue_data[\"UniqueFQDNSet\"]\n\n try:\n dbQueueCertificate = lib_db.create.create__QueueCertificate(\n self.request.api_context, **kwargs_create\n )\n except Exception as exc:\n log.critical(\"create__QueueCertificate: %s\", exc)\n # `formStash.fatal_form()` will raise `FormFieldInvalid(FormInvalid)`\n formStash.fatal_form(message=\"Could not create the QueueCertificate\")\n\n if self.request.wants_json:\n return {\n \"result\": \"success\",\n \"QueueCertificate\": dbQueueCertificate.as_json,\n }\n return HTTPSeeOther(\n \"%s/queue-certificate/%s\"\n % (\n self.request.registry.settings[\"app_settings\"][\"admin_prefix\"],\n dbQueueCertificate.id,\n )\n )\n\n except formhandling.FormInvalid as exc:\n if self.request.wants_json:\n return {\"result\": \"error\", \"form_errors\": formStash.errors}\n return formhandling.form_reprint(self.request, self._new_structured__print)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\nclass ViewFocus(Handler):\n def _focus(self):\n dbQueueCertificate = lib_db.get.get__QueueCertificate__by_id(\n self.request.api_context, self.request.matchdict[\"id\"], load_events=True\n )\n if not dbQueueCertificate:\n raise HTTPNotFound(\"the item was not found\")\n self._focus_item = dbQueueCertificate\n self._focus_url = \"%s/queue-certificate/%s\" % (\n self.request.admin_url,\n dbQueueCertificate.id,\n )\n return dbQueueCertificate\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n @view_config(\n route_name=\"admin:queue_certificate:focus\",\n renderer=\"/admin/queue_certificate-focus.mako\",\n )\n @view_config(route_name=\"admin:queue_certificate:focus|json\", renderer=\"json\")\n def focus(self):\n dbQueueCertificate = self._focus()\n if self.request.wants_json:\n return {\"result\": \"success\", \"QueueCertificate\": dbQueueCertificate.as_json}\n return {\"project\": \"peter_sslers\", \"QueueCertificate\": dbQueueCertificate}\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n @view_config(route_name=\"admin:queue_certificate:focus:mark\")\n @view_config(route_name=\"admin:queue_certificate:focus:mark|json\", renderer=\"json\")\n def focus_mark(self):\n dbQueueCertificate = self._focus()\n if self.request.method == \"POST\":\n return self._focus_mark__submit(dbQueueCertificate)\n return self._focus_mark__print(dbQueueCertificate)\n\n def _focus_mark__print(self, dbQueueCertificate):\n if self.request.wants_json:\n return {\n \"instructions\": [\n \"\"\"curl --form 'action=active' %s/mark.json\"\"\" % self._focus_url\n ],\n \"form_fields\": {\"action\": \"the intended action\"},\n \"valid_options\": {\"action\": [\"cancel\"]},\n }\n url_huh = \"%s?&result=error&error=post+required&operation=mark\" % (\n self._focus_url\n )\n return HTTPSeeOther(url_huh)\n\n def _focus_mark__submit(self, dbQueueCertificate):\n try:\n (result, formStash) = formhandling.form_validate(\n self.request, schema=Form_QueueCertificate_mark, validate_get=False\n )\n if not result:\n raise formhandling.FormInvalid()\n\n action = formStash.results[\"action\"]\n event_type = model_utils.OperationsEventType.from_string(\n \"QueueCertificate__mark\"\n )\n event_payload_dict = utils.new_event_payload_dict()\n event_payload_dict[\"queue_certificate.id\"] = dbQueueCertificate.id\n event_payload_dict[\"action\"] = formStash.results[\"action\"]\n\n event_status = False\n if action == \"cancel\":\n if not dbQueueCertificate.is_active:\n # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`\n formStash.fatal_field(field=\"action\", message=\"Already cancelled\")\n\n dbQueueCertificate.is_active = False\n dbQueueCertificate.timestamp_processed = (\n self.request.api_context.timestamp\n )\n event_status = \"QueueCertificate__mark__cancelled\"\n self.request.api_context.dbSession.flush(objects=[dbQueueCertificate])\n else:\n # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`\n formStash.fatal_field(field=\"action\", message=\"invalid action\")\n\n # bookkeeping\n dbOperationsEvent = lib_db.logger.log__OperationsEvent(\n self.request.api_context, event_type, event_payload_dict\n )\n lib_db.logger._log_object_event(\n self.request.api_context,\n dbOperationsEvent=dbOperationsEvent,\n event_status_id=model_utils.OperationsObjectEventStatus.from_string(\n event_status\n ),\n dbQueueCertificate=dbQueueCertificate,\n )\n if self.request.wants_json:\n return {\n \"result\": \"success\",\n \"QueueCertificate\": dbQueueCertificate.as_json,\n }\n\n url_post_required = \"%s?result=success&operation=mark\" % (self._focus_url,)\n return HTTPSeeOther(url_post_required)\n\n except formhandling.FormInvalid as exc:\n if self.request.wants_json:\n return {\"result\": \"error\", \"form_errors\": formStash.errors}\n url_failure = \"%s?result=error&error=%s&operation=mark&action=%s\" % (\n self._focus_url,\n errors.formstash_to_querystring(formStash),\n action,\n )\n raise HTTPSeeOther(url_failure)\n","sub_path":"peter_sslers/web/views_admin/queue_certificate.py","file_name":"queue_certificate.py","file_ext":"py","file_size_in_byte":22242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165680195","text":"import click\n\nfrom ..decorators import command\nfrom ..globals import get_global_settings\n\n\n@command(_is_buildin=True, group=\"Inspect\")\ndef commands(_state):\n settings = get_global_settings()\n\n for group_name, group in sorted(settings.functions.items()):\n click.secho(\"{}:\".format(group_name), fg=\"blue\", bold=True)\n for entry in sorted(group, key=lambda e: e.name):\n location = entry.location\n click.echo(\"\\t{} \".format(entry.name), nl=False)\n if location:\n click.secho(\"[{}]\".format(location), fg=\"cyan\")\n else:\n click.secho(\"\", fg=\"magenta\")\n click.echo()\n\n\n@command(_is_buildin=True, group=\"Inspect\")\ndef show_state(state):\n if not state.models and not state.test_data and not state.train_data:\n click.secho(\"State is empty\", fg=\"red\")\n\n if state.models:\n click.secho(\"{}:\".format(\"Models\"), fg=\"blue\", bold=True)\n for info in state.models:\n click.echo(\"\\t{}\".format(info.name))\n click.echo()\n\n if state.train_data:\n click.secho(\"{}:\".format(\"Train data\"), fg=\"blue\", bold=True)\n for info in state.train_data:\n click.echo(\"\\t{}\".format(info.name))\n click.echo()\n\n if state.test_data:\n click.secho(\"{}:\".format(\"Test data\"), fg=\"blue\", bold=True)\n for info in state.test_data:\n click.echo(\"\\t{}\".format(info.name))\n click.echo()\n\n\n@command(_is_buildin=True, group=\"Inspect\")\ndef show_models(state):\n for info in state.models:\n click.secho(\"--- Model '{}' ---\".format(info.name), fg=\"blue\", bold=True)\n info.model.summary()\n click.echo()\n","sub_path":"kitt/commands/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"476046067","text":"from django.urls import path # defs\n# from clinic.views.home import homepage\nfrom .views import (save_patient, edit_patient, \n table_patient, patient_details)\n# from clinic.views import visits, medicine, prescription\n# from clinic.views.search import search_patient, search_visit, search_date, search_only\n# from clinic.views.calculations import (calculate_income, calculate_day_income, \n# calculate_month_income, calculate_year_income)\n\napp_name = 'patientdata'\nurlpatterns = [\n # path('', homepage, name='myhome'), # localhost:8000/clinic\n \n # for patient\n path('create/patient/', save_patient, name='save_patient'),\n path('edit/patient//', edit_patient, name='edit_patient'),\n path('table/patients/', table_patient, name='table_patient'),\n path('patient/details/by/barcode//', patient_details, name='patient_details'),\n \n # for visit\n # path('create/visit/', visits.save_visits, name='save_visits'),\n # path('edit/visit//patient//', visits.visits_patient_id, name='visits_patient_id'),\n \n # path('edit/visit//', visits.edit_visits, name='edit_visits'),\n # path('table/visits/', visits.table_visits, name='table_visits'),\n # path('export/table/', visits.export_table, name='export_table'),\n \n #\n # path('drug/patient//visit//',\n # medicine.save_medicine, name='save_medicine'),\n #\n # path('add/prescription/patient//visit//',\n # medicine.add_new, name='add_new'),\n #\n # path('patient//visit//drug//', \n # medicine.edit_medicine, name='edit_medicine'),\n \n #\n # path('patient//visit//delete/drug//', medicine.delete_medicine, name='delete_medicine'),\n # path('table/medicine/', medicine.table_medicine, name='table_medicine'),\n \n #\n # path('search/patient/', search_patient, name='search_patient'),\n # path('search/visit/', search_visit, name='search_visit'),\n # path('search/date/', search_date, name='search_date'),\n # path('search/only/date/', search_only, name='search_only'),\n\n #\n # path('income/day////', calculate_income, name='day_income'),\n # path('income/month////', calculate_income, name='month_income'),\n # path('income/year//', calculate_income, name='year_income'),\n # # path('income/day/', calculate_income, name='calculate_day_income'),\n # # path('income/month/', calculate_income, name='calculate_month_income'),\n # path('income/day/', calculate_day_income, name='calculate_day_income'),\n # path('income/month/', calculate_month_income, name='calculate_month_income'),\n # path('income/year/', calculate_year_income, name='calculate_year_income'),\n \n # #\n # path('prescription/visit//', prescription.get_pdf, name='get_pdf'),\n # # This is a good one for printing prescription\n # path('print/visit//', prescription.print_html, name='print_html'),\n # #\n # path('pdf/', prescription.some_view, name='some_view'),\n\n] \n","sub_path":"src/clinic/apps/patientdata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409583688","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nf_in = './res/2.txt'\nf_out = './bg_plot_2.png'\n\nx = np.genfromtxt(f_in, usecols=(0))\ny = np.genfromtxt(f_in, usecols=(4))\n\nplt.plot(x, y, 'ro')\nplt.ticklabel_format(style='sci', axis='y', useMathText=True)\n#plt.yticks(list(plt.yticks()[0]) + [209])\nplt.xlabel(r'*bw $d_{cut}, arcdeg$')\nplt.ylabel(r'Background, K')\nplt.tight_layout()\n#plt.axhline(209)\n#plt.axis(ymax = 0.001)\nplt.grid(True)\nplt.savefig(f_out)\nplt.close()\n","sub_path":"big_test/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"435942413","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2017 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\n\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\n\nimport os.path as op\nfrom pyannote.database import Database\nfrom pyannote.database.protocol import SpeakerDiarizationProtocol\nfrom pyannote.parser import MDTMParser\nfrom pyannote.parser import UEMParser\n\n\nclass VoxCeleb1(SpeakerDiarizationProtocol):\n \"\"\"VoxCeleb1 protocol \"\"\"\n\n def trn_iter(self):\n for _ in []:\n yield\n\n def xxx_iter(self, xxx):\n\n data_dir = op.join(op.dirname(op.realpath(__file__)), 'data')\n\n annotation = MDTMParser().read(\n op.join(data_dir, 'voxceleb1.{xxx}.mdtm'.format(xxx=xxx)))\n\n annotated = UEMParser().read(\n op.join(data_dir, 'voxceleb1.{xxx}.uem'.format(xxx=xxx)))\n\n for uri in sorted(annotation.uris):\n yield {\n 'database': 'VoxCeleb',\n 'uri': uri,\n 'annotation': annotation(uri),\n 'annotated': annotated(uri),\n }\n\n def dev_iter(self):\n return self.xxx_iter('dev')\n\n def tst_iter(self):\n return self.xxx_iter('test')\n\nclass VoxCeleb(Database):\n \"\"\"VoxCeleb: a large-scale speaker identification dataset\n\nCitation\n========\n@InProceedings{Nagrani17,\n author = \"Nagrani, A. and Chung, J.~S. and Zisserman, A.\",\n title = \"VoxCeleb: a large-scale speaker identification dataset\",\n booktitle = \"INTERSPEECH\",\n year = \"2017\",\n}\n\nWebpage\n=======\nhttp://www.robots.ox.ac.uk/~vgg/data/voxceleb/\n\n \"\"\"\n def __init__(self, preprocessors={}, **kwargs):\n super(VoxCeleb, self).__init__(preprocessors=preprocessors, **kwargs)\n\n self.register_protocol(\n 'SpeakerDiarization', 'VoxCeleb1', VoxCeleb1)\n","sub_path":"VoxCeleb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352566842","text":"# coding=utf-8\n'''\n@Title:visualize_data\n\n@Author: tyee.noprom@qq.com\n@Time: 4/18/16 8:07 PM\n'''\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('../data/ch02/Customer Churn Model.txt')\n# 画一个scatter图,显示4张小图,2x2\nfigure, axs = plt.subplots(2, 2, sharey=True, sharex=True)\ndata.plot(kind='scatter', x='Day Mins', y='Day Charge', ax=axs[0][0])\ndata.plot(kind='scatter', x='Night Mins', y='Night Charge', ax=axs[0][1])\ndata.plot(kind='scatter', x='Day Calls', y='Day Charge', ax=axs[1][0])\ndata.plot(kind='scatter', x='Night Calls', y='Night Charge', ax=axs[1][1])\nplt.show()\n\n# 画一个histogram图\nplt.hist(data['Day Calls'], bins=8)\nplt.xlabel('Day Calls Value') # x坐标显示的文字\nplt.ylabel('Frequency') # y坐标显示的文字\nplt.title('Frequency of Day Calls') # 整张图的标题\nplt.show()\n\n# 画一个boxplot图\nplt.boxplot(data['Day Calls'])\nplt.ylabel('Day Calls')\nplt.title('Box Plot of Day Calls')\nplt.show()\n\n# 生成正态分布的随机数,并且显示\nx = range(1, 101)\ny = np.random.uniform(1, 100, 1000000) # 数据量越大就越符合正态分布的图\nplt.hist(y)\nplt.show()\n\n# 生成标准正太分布\nx = range(1, 101)\ny = np.random.randn(100)\nplt.plot(x, y)\nplt.show()\n\n# get a 2 x 4 array of numbers following a standard normal distribution\na = np.random.randn(2, 4)\n# mean 1.5 and standard deviation 2.5\na = 2.5 * np.random.randn(100) + 1.5\n# 画一个标准正态分布\na = np.random.randn(100000)\nb = range(1, 101)\nplt.hist(a)\nplt.show()\n","sub_path":"ch02/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"581381795","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\n\ndef sigmoid_derivative(x):\n return sigmoid(x) * (1.0 - sigmoid(x))\n\n\ndef make_matrix(m, n, f=0.0): # 创建一个m * n的矩阵\n mat = []\n for i in range(m):\n mat.append([f] * n)\n return mat\n\n\nclass BPNetWork:\n def __init__(self, inputNodeNum, hiddenLayerNum, hiddenLayerNodeNum, outputNodeNum, r, weightDecay): # 初始化变量\n self.inputNodeNum = inputNodeNum # 输入层节点数\n self.hiddenLayerNum = hiddenLayerNum # 隐藏层的数目\n self.hiddenLayerNodeNum = hiddenLayerNodeNum # 隐藏层节点数\n self.outputNodeNum = outputNodeNum # 输出层节点数\n self.r = r # learning rate\n self.weightDecay = weightDecay\n self.inputCell = [1.0] * self.inputNodeNum\n # 初始化权重\n self.allWeight = []\n for t in range(0, self.hiddenLayerNum + 1):\n if t == 0: # 输入层与第一个隐层之间的权重\n tmp = make_matrix(self.inputNodeNum, self.hiddenLayerNodeNum)\n for i in range(0, self.inputNodeNum):\n for j in range(0, self.hiddenLayerNodeNum):\n tmp[i][j] = random.uniform(0, 0.001)\n self.allWeight.append(tmp)\n elif 0 < t < self.hiddenLayerNum: # 各个隐层之间的权重\n tmp = make_matrix(self.hiddenLayerNodeNum, self.hiddenLayerNodeNum)\n for i in range(self.hiddenLayerNodeNum):\n for j in range(self.hiddenLayerNodeNum):\n tmp[i][j] = random.uniform(0, 0.001)\n self.allWeight.append(tmp)\n else: # 最后一个隐层和输出层之间的权重\n tmp = make_matrix(self.hiddenLayerNodeNum, self.outputNodeNum)\n for i in range(self.hiddenLayerNodeNum):\n for j in range(self.outputNodeNum):\n tmp[i][j] = random.uniform(0, 0.001)\n self.allWeight.append(tmp)\n # 初始化隐藏层\n self.hiddenCells = []\n for t in range(0, hiddenLayerNum):\n self.hiddenCells.append([1.0] * self.hiddenLayerNodeNum)\n # 初始化输出层\n self.outputCell = [1.0] * self.outputNodeNum\n # 初始化bias\n self.allBias = []\n for t in range(0, self.hiddenLayerNum + 1):\n if t == self.hiddenLayerNum: # 输出层的bias\n tmp = make_matrix(self.outputNodeNum, 1)\n for o in range(self.outputNodeNum):\n tmp[o] = random.uniform(-0.001, 0) # 这样子的操作,把这个地方的list类型变成了数字类型\n self.allBias.append(tmp)\n else:\n tmp = make_matrix(self.hiddenLayerNodeNum, 1)\n for h in range(self.hiddenLayerNodeNum):\n tmp[h] = random.uniform(-0.001, 0)\n self.allBias.append(tmp)\n\n def forwardPropagate(self, inputs):\n # 输入层的值\n for i in range(0, self.inputNodeNum):\n self.inputCell[i] = inputs[i] # 要归一化吗?\n # print(self.inputCell)\n # print(\"weight: \" + str(self.allWeight))\n # 各���隐藏层的值\n for t in range(0, len(self.hiddenCells)):\n if t == 0: # 输入层与第一层隐藏层之间的传输\n for j in range(self.hiddenLayerNodeNum):\n total = 0.0\n for i in range(self.inputNodeNum):\n total += self.inputCell[i] * self.allWeight[t][i][j]\n self.hiddenCells[t][j] = sigmoid(total + self.allBias[t][j])\n elif 0 < t <= len(self.hiddenCells) - 1: # 其他隐藏层的值\n for j in range(self.hiddenLayerNodeNum):\n total = 0.0\n for i in range(self.hiddenLayerNodeNum):\n total += self.hiddenCells[t - 1][i] * self.allWeight[t][i][j]\n self.hiddenCells[t][j] = sigmoid(total + self.allBias[t][j])\n # print(self.hiddenCells)\n # 输出层的值\n for j in range(self.outputNodeNum):\n total = 0.0\n for i in range(self.hiddenLayerNodeNum):\n total += self.hiddenCells[self.hiddenLayerNum - 1][i] * self.allWeight[self.hiddenLayerNum][i][j]\n self.outputCell[j] = total + self.allBias[self.hiddenLayerNum][j]\n # 计算误差\n yhat = np.array(self.outputCell)\n label = np.array(np.sin(self.inputCell))\n error = ((yhat - label) ** 2).sum()\n print(\"The loss is \" + str(error))\n return self.outputCell\n\n def backPropagate(self, case, label):\n self.forwardPropagate(case)\n # 输出层误差\n outputDelta = [0.0] * self.outputNodeNum\n for i in range(self.outputNodeNum):\n error = label[i] - self.outputCell[i]\n outputDelta[i] = error # 最后一层没有经过函数\n # * sigmoid_derivative(self.outputCell[i])\n # 隐藏层误差\n allHiddenDeltas = [[0.0] * self.hiddenLayerNodeNum] * self.hiddenLayerNum\n for k in range(self.hiddenLayerNum - 1, -1, -1): # 倒序到0\n if k == self.hiddenLayerNum - 1: # 倒数第一个隐藏层的误差\n for h in range(self.hiddenLayerNodeNum):\n error = 0.0\n for o in range(self.outputNodeNum):\n error += outputDelta[o] * self.allWeight[self.hiddenLayerNum][h][o]\n allHiddenDeltas[k][h] = sigmoid_derivative(self.hiddenCells[k][h]) * error\n else:\n for h_low in range(self.hiddenLayerNodeNum):\n error = 0.0\n for h_high in range(self.hiddenLayerNodeNum):\n error += allHiddenDeltas[k + 1][h_high] * self.allWeight[k + 1][h_low][h_high]\n allHiddenDeltas[k][h_low] = sigmoid_derivative(self.hiddenCells[k][h_low]) * error\n # 更新权重\n for k in range(len(self.allWeight) - 1, -1, -1):\n if k == len(self.allWeight) - 1: # 调整倒数第一个隐藏层和输出层之间的权重\n for h in range(self.hiddenLayerNodeNum):\n for o in range(self.outputNodeNum):\n # change是为了防止三个浮点数相乘\n change = outputDelta[o] * self.hiddenCells[k - 1][h]\n self.allWeight[k][h][o] = (1 - self.weightDecay) * self.allWeight[k][h][o] + change * self.r\n elif k == 0: # 调整第一个隐藏层和输入层之间的权重\n for i in range(self.inputNodeNum):\n for h in range(self.hiddenLayerNodeNum):\n change = allHiddenDeltas[0][h] * self.inputCell[i]\n self.allWeight[k][i][h] = (1 - self.weightDecay) * self.allWeight[k][i][h] + change * self.r\n else: # 调整其他隐藏层之间的权重\n for h_low in range(self.hiddenLayerNodeNum):\n for h_high in range(self.hiddenLayerNodeNum):\n change = allHiddenDeltas[k][h_high] * self.hiddenCells[k - 1][h_low]\n self.allWeight[k][h_low][h_high] = (1 - self.weightDecay) * self.allWeight[k][h_low][\n h_high] + change * self.r\n # 更新bias\n for k in range(len(self.allBias) - 1, -1, -1):\n if k == len(self.allBias) - 1: # 调整输出层的bias\n for o in range(self.outputNodeNum):\n self.allBias[k][o] += self.r * outputDelta[o]\n else:\n for h in range(self.hiddenLayerNodeNum):\n self.allBias[k][h] += self.r * allHiddenDeltas[k][h]\n\n def train(self, cases, labels, times=1000):\n for i in range(times):\n for j in range(len(cases)):\n case = cases[j]\n label = labels[j]\n self.backPropagate(case, label)\n\n\nif __name__ == '__main__':\n bpNetwork = BPNetWork(1, 1, 50, 1, 0.03, 0)\n cases = [] # 训练集\n labels = [] # 正确的值\n for i in range(1000):\n tmp = random.uniform(-1, 1)\n cases.append([tmp * np.pi])\n labels.append([np.sin(tmp * np.pi)])\n bpNetwork.train(cases, labels, 2000)\n test = []\n testResult = []\n for i in range(-100, 101, 1):\n test.append([i * np.pi / 100])\n for case in test:\n print(\"case: \" + str(case))\n out = bpNetwork.forwardPropagate(case)\n print(\"out: \" + str(out))\n testResult.append(out[0])\n plt.figure()\n x = np.arange(-1.0, 1.0, 0.03)\n sinx, = plt.plot(x * np.pi, np.sin(x * np.pi), color='red')\n testData = []\n for i in range(len(test)):\n testData.append(test[i][0])\n mysinx, = plt.plot(testData, testResult, color='green')\n plt.legend(handles=[sinx, mysinx, ], labels=['real sinx', 'my sinx'], loc='best')\n plt.show()\n","sub_path":"BP/fitSinx.py","file_name":"fitSinx.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377351495","text":"import base64\nimport os\nimport requests\n\n# The server URL specifies the endpoint of your server running the ResNet\n# model with the name \"resnet\" and using the predict interface.\n# SERVER_URL = 'http://localhost:8501/v1/models/resnet:predict'\nSERVER_URL = 'http://localhost:9091/v1/models/violence:predict'\n\n# The image URL is the location of the image we should send to the server\nIMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg'\nIMAGE_PATH = os.path.expanduser('~/Desktop/fire_002626.jpg')\nIMAGE_FOLDER = os.path.expanduser('~/Desktop/violence_images')\n\n\ndef read_imagesb64():\n files = os.listdir(IMAGE_FOLDER)\n data = []\n for name in files:\n if not name.endswith('.jpg'):\n continue\n name = os.path.join(IMAGE_FOLDER, name)\n with open(name, 'rb') as f:\n img_data = f.read()\n data.append(base64.b64encode(img_data, b'-_').decode('utf-8'))\n return data\n\n\ndef test_multiple_images():\n jpeg_data = read_imagesb64()\n for data in jpeg_data:\n predict_request = '{\"instances\" : [\"%s\"]}' % data\n\n response = requests.post(SERVER_URL, data=predict_request)\n response.raise_for_status()\n prediction = response.json()\n print(prediction)\n\n\ndef main():\n # Download the image\n # dl_request = requests.get(IMAGE_URL, stream=True)\n # dl_request.raise_for_status()\n\n # Compose a JSON Predict request (send JPEG image in base64).\n # jpeg_bytes = base64.b64encode(dl_request.content).decode('utf-8')\n with open(IMAGE_PATH, 'rb') as f:\n data = f.read()\n jpeg_bytes = base64.b64encode(data, b'-_').decode('utf-8')\n\n # input instances are a list of strings.\n predict_request = '{\"instances\" : [\"%s\"]}' % jpeg_bytes\n\n # Send few requests to warm-up the model.\n for _ in range(3):\n response = requests.post(SERVER_URL, data=predict_request)\n response.raise_for_status()\n\n # Send few actual requests and report average latency.\n total_time = 0\n num_requests = 1\n for _ in range(num_requests):\n response = requests.post(SERVER_URL, data=predict_request)\n response.raise_for_status()\n total_time += response.elapsed.total_seconds()\n prediction = response.json()\n print(prediction)\n\n\ndef run(image_path):\n SERVER_URL = 'http://101.91.169.58:9000/1/models/violence_det'\n with open(image_path, 'rb') as f:\n data = f.read()\n\n data64 = base64.encodebytes(data)\n\n predict_request = '{\"instances\":%s}' % str(data64)\n response = requests.post(SERVER_URL, data=predict_request)\n prediction = response.json()['predict'][0]\n\n print(prediction)\n\n\n\nif __name__ == '__main__':\n # main()\n test_multiple_images()\n\n # \n image_path = r'D:/data020_004367.jpg'\n run(image_path)\n","sub_path":"tfvortex/deploy/test_tfserving.py","file_name":"test_tfserving.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541644038","text":"# %load q06_bowled_players/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n\n\n# Your Solution\ndef bowled_out(data=data):\n\n bowled_out = data['innings'][1]['2nd innings']['deliveries']\n #print(bowled_out)\n\n bowled_players = []\n \n for i in bowled_out:\n# print(i)\n for ball,wicket in i.items():\n# print(wicket['wicket'])\n if 'wicket' in wicket and wicket['wicket']['kind']=='bowled':\n# print(wicket['wicket']['player_out'])\n bp=wicket['wicket']['player_out']\n print(bp)\n bp= bowled_players.append(bp)\n \n return bowled_players\nprint(bowled_out())\n\n\n\n","sub_path":"q06_bowled_players/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186451140","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\chatette_qiu\\__init__.py\n# Compiled at: 2019-03-27 03:00:31\n# Size of source mod 2**32: 410 bytes\n\"\"\"\nModule `chatette_qiu`\nA generator of example sentences based on templates.\n\"\"\"\nimport pkg_resources\ntry:\n __version__ = pkg_resources.require('chatette_qiu')[0].version\nexcept pkg_resources.DistributionNotFound:\n __version__ = \"\"","sub_path":"pycfiles/chatette_qiu-1.4.4-py3.6/__init__.cpython-36.py","file_name":"__init__.cpython-36.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535383963","text":"# coding=utf-8\n\"\"\"\nModule\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport random\n\nimport datacube.scripts.cli_app\nimport logging\nfrom click.testing import CliRunner\n\nfrom datacube.index.postgres import _dynamic\nfrom datacube.index.postgres.tables._core import drop_db, has_schema, SCHEMA_NAME\nfrom pathlib import Path\n\n_LOG = logging.getLogger(__name__)\n\nEXAMPLE_STORAGE_TYPE_DOCS = Path(__file__).parent.parent. \\\n joinpath('docs', 'config_samples', 'storage_types').glob('**/*.yaml')\n\nEXAMPLE_DATASET_TYPE_DOCS = Path(__file__).parent.parent. \\\n joinpath('docs', 'config_samples', 'dataset_types').glob('**/*.yaml')\n\n# Documents that shouldn't be accepted as mapping docs.\nINVALID_MAPPING_DOCS = Path(__file__).parent.parent. \\\n joinpath('docs').glob('*')\n\n\ndef _run_cli(global_integration_cli_args, cli_method, opts, catch_exceptions=False, expect_success=True):\n exe_opts = list(global_integration_cli_args)\n exe_opts.extend(opts)\n runner = CliRunner()\n result = runner.invoke(\n cli_method,\n exe_opts,\n catch_exceptions=catch_exceptions\n )\n if expect_success:\n assert result.exit_code == 0, \"Error for %r. output: %r\" % (opts, result.output)\n return result\n\n\ndef _dataset_type_count(db):\n with db.connect() as connection:\n return len(list(connection.get_all_dataset_types()))\n\n\ndef test_add_example_dataset_types(global_integration_cli_args, db, default_metadata_type):\n \"\"\"\n Add example mapping docs, to ensure they're valid and up-to-date.\n\n We add them all to a single database to check for things like duplicate ids.\n\n :type global_integration_cli_args: tuple[str]\n :type db: datacube.index.postgres._api.PostgresDb\n \"\"\"\n existing_mappings = _dataset_type_count(db)\n\n print('{} mappings'.format(existing_mappings))\n for mapping_path in EXAMPLE_DATASET_TYPE_DOCS:\n print('Adding mapping {}'.format(mapping_path))\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'product', 'add',\n str(mapping_path)\n ]\n )\n assert result.exit_code == 0, \"Error for %r. output: %r\" % (str(mapping_path), result.output)\n mappings_count = _dataset_type_count(db)\n assert mappings_count > existing_mappings, \"Mapping document was not added: \" + str(mapping_path)\n existing_mappings = mappings_count\n\n\ndef test_error_returned_on_invalid(global_integration_cli_args, db):\n \"\"\"\n :type global_integration_cli_args: tuple[str]\n :type db: datacube.index.postgres._api.PostgresDb\n \"\"\"\n assert _dataset_type_count(db) == 0\n\n for mapping_path in INVALID_MAPPING_DOCS:\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'product', 'add',\n str(mapping_path)\n ],\n # TODO: Make this false when the cli is updated to print errors (rather than uncaught exceptions).\n catch_exceptions=True,\n expect_success=False\n )\n assert result.exit_code != 0, \"Success return code for invalid document.\"\n assert _dataset_type_count(db) == 0, \"Invalid document was added to DB\"\n\n\ndef test_config_check(global_integration_cli_args, local_config):\n \"\"\"\n :type global_integration_cli_args: tuple[str]\n :type local_config: datacube.config.LocalConfig\n \"\"\"\n\n # This is not a very thorough check, we just check to see that\n # it prints something vaguely related and does not error-out.\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'system', 'check'\n ]\n )\n assert result.exit_code == 0\n host_line = 'Host: {}'.format(local_config.db_hostname)\n assert host_line in result.output\n user_line = 'User: {}'.format(local_config.db_username)\n assert user_line in result.output\n\n\ndef test_list_users_does_not_fail(global_integration_cli_args, local_config):\n \"\"\"\n :type global_integration_cli_args: tuple[str]\n :type local_config: datacube.config.LocalConfig\n \"\"\"\n # We don't want to make assumptions about available users during test runs.\n # (They are host-global, not specific to the database)\n # So we're just checking that it doesn't fail (and the SQL etc is well formed)\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'list'\n ]\n )\n assert result.exit_code == 0\n\n\ndef test_db_init_noop(global_integration_cli_args, local_config, ls5_telem_type):\n # Run on an existing database.\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-vv', 'system', 'init'\n ]\n )\n assert result.exit_code == 0\n assert 'Updated.' in result.output\n # It should not rebuild indexes by default\n assert 'Dropping index: dix_{}'.format(ls5_telem_type.name) not in result.output\n\n\ndef test_db_init_rebuild(global_integration_cli_args, local_config, ls5_telem_type):\n\n # We set the field creation logging to debug, as we assert its logging output below.\n _dynamic._LOG.setLevel(logging.DEBUG)\n\n # Run on an existing database.\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-vv', 'system', 'init', '--rebuild'\n ]\n )\n assert result.exit_code == 0\n assert 'Updated.' in result.output\n # It should have recreated views and indexes.\n assert 'Dropping index: dix_{}'.format(ls5_telem_type.name) in result.output\n assert 'Creating index: dix_{}'.format(ls5_telem_type.name) in result.output\n assert 'Dropping view: {schema}.dv_{name}_dataset'.format(\n schema=SCHEMA_NAME, name=ls5_telem_type.name\n ) in result.output\n assert 'Creating view: {schema}.dv_{name}_dataset'.format(\n schema=SCHEMA_NAME, name=ls5_telem_type.name\n ) in result.output\n\n\ndef test_db_init(global_integration_cli_args, db, local_config):\n with db.connect() as connection:\n drop_db(connection._connection)\n\n assert not has_schema(db._engine, connection._connection)\n\n # Run on an empty database.\n cli_method = datacube.scripts.cli_app.cli\n result = _run_cli(global_integration_cli_args, cli_method, [\n '-v', 'system', 'init'\n ])\n assert result.exit_code == 0\n assert 'Created.' in result.output\n\n with db.connect() as connection:\n assert has_schema(db._engine, connection._connection)\n\n\ndef test_user_creation(global_integration_cli_args, db, default_metadata_type):\n \"\"\"\n Add a user, grant them, delete them.\n\n :type global_integration_cli_args: tuple[str]\n :type db: datacube.index.postgres._api.PostgresDb\n \"\"\"\n existing_mappings = _dataset_type_count(db)\n\n print('{} mappings'.format(existing_mappings))\n\n test_number = random.randint(111111, 999999)\n user_name = 'test_user_{}'.format(test_number)\n\n # No user exists.\n assert_no_user(global_integration_cli_args, user_name)\n\n # Create them\n _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'create', 'ingest', user_name\n ]\n )\n assert_user_with_role(global_integration_cli_args, 'ingest', user_name)\n\n # Grant them 'manage' permission\n _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'grant', 'manage', user_name\n ]\n )\n assert_user_with_role(global_integration_cli_args, 'manage', user_name)\n\n # Delete them\n _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'delete', user_name\n ]\n )\n assert_no_user(global_integration_cli_args, user_name)\n\n\ndef assert_user_with_role(global_integration_cli_args, role, user_name):\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'list'\n ]\n )\n assert '{}\\t{}'.format(role, user_name) in result.output\n\n\ndef assert_no_user(global_integration_cli_args, username):\n result = _run_cli(\n global_integration_cli_args,\n datacube.scripts.cli_app.cli,\n [\n '-v', 'user', 'list'\n ]\n )\n assert username not in result.output\n","sub_path":"integration_tests/test_config_tool.py","file_name":"test_config_tool.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604175166","text":"from bookyd.app import app\nfrom functools import wraps\nfrom bs4 import BeautifulSoup\nfrom flask import abort, request\nfrom gunicorn.app.base import BaseApplication\nfrom gunicorn.six import iteritems\nimport hashlib\nimport requests\n\n\ndef build_screenshot_url(url):\n \"\"\"\n Build a screenshot url\n \"\"\"\n secret_key = hashlib.md5(\n bytes(url+app.config['SCREENSHOTLAYER_SECRET'], encoding=\"UTF-8\")\n ).hexdigest()\n url = 'http://api.screenshotlayer.com/api/capture?access_key={api_key}&url={url}&secret_key={secret_key}&width={width}'.format(\n api_key=app.config['SCREENSHOTLAYER_API_KEY'],\n url=url,\n width=800,\n secret_key=secret_key\n )\n return url\n\n\ndef get_site_title(url):\n \"\"\"\n Extract the Tag from a page\n \"\"\"\n try:\n html = requests.get(url).text\n soup = BeautifulSoup(html, \"html.parser\")\n return soup.title.string\n except:\n return url\n\n\ndef validate_json(f):\n \"\"\"\n Checks if a requests content-type is \"application/json\"\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.is_json:\n return f(*args, **kwargs)\n else:\n return abort(400)\n return decorated_function\n\n\nclass StandaloneServer(BaseApplication):\n\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(StandaloneServer, self).__init__()\n\n def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application","sub_path":"bookyd/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284902876","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# 给你一个字符串 S、一个字符串 T,请在字符串 S 里面找出:包含 T 所有字母的最小子串。\n\n# 示例:\n\n# 输入: S = \"ADOBECODEBANC\", T = \"ABC\"\n# 输出: \"BANC\"\n\n# 说明:\n\n# 如果 S 中不存这样的子串,则返回空字符串 \"\"。\n# 如果 S 中存在这样的子串,我们保证它是唯一的答案。\n\n\nclass Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n s_len = len(s)\n t_len = len(t)\n\n count_dict = {}\n current_dict = {}\n\n for char in t:\n count_dict.setdefault(char, 0)\n count_dict[char] += 1\n\n current_dict.setdefault(char, 0)\n\n start = 0\n end = t_len - 1\n for char in s[start: end + 1]:\n if char in count_dict:\n current_dict[char] += 1\n\n def check(count_dict, current_dict):\n for char in count_dict:\n if current_dict.get(char, 0) < count_dict[char]:\n return False\n\n return True\n\n if check(count_dict, current_dict):\n return s[start: end + 1]\n\n min_string = ''\n min_len = s_len + 1\n\n add = True\n while start <= s_len - t_len:\n if end - start + 1 <= t_len:\n add = True\n\n if end >= s_len - 1:\n add = False\n\n if add:\n end += 1\n current = s[end]\n if current in count_dict:\n current_dict[current] += 1\n\n else:\n current = s[start]\n if current in count_dict:\n current_dict[current] -= 1\n start += 1\n add = True\n\n if check(count_dict, current_dict):\n add = False\n current_len = end - start + 1\n if current_len < min_len:\n min_len = current_len\n min_string = s[start: end + 1]\n\n return min_string\n\n\nif __name__ == \"__main__\":\n s = Solution()\n\n result = s.minWindow(\n \"of_characters_and_as\",\n \"aas\"\n )\n print(result)\n","sub_path":"leetcode/76.py","file_name":"76.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125997915","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 11:20:36 2020\n\n@author: Vicky\n\nNeural PDE - Tensorflow 1.14\nModule : Spatio-Temporal Residual Sampler\n\"\"\"\nimport numpy as np\nimport tensorflow as tf \nfrom pyDOE import lhs\n\n\nclass Sampler(object):\n \n def __init__(self, N_samples, subspace_N):\n\n self.N = N_samples\n self.ssN = subspace_N\n \n def residual(self, n, t_bounds):\n residual_across_time = []\n \n for ii in range(n-1):\n lb_temp = np.asarray([t_bounds[ii], self.lb[1]])\n ub_temp = np.asarray([t_bounds[ii+1], self.ub[1]])\n X_f = lb_temp + (ub_temp-lb_temp)*lhs(self.input_size, self.ssN) \n \n str_val = self.sess.run(self.residual_val, {self.X_f: X_f})\n residual_across_time.append(str_val)\n\n return np.asarray(residual_across_time)\n\n \n def str_sampler(self):\n n = int(self.N/self.ssN)\n t_bounds = np.linspace(self.lb[0], self.ub[0], n)\n \n residuals = self.residual(n, t_bounds)\n t_range_idx = np.argmax(residuals)\n print('\\n')\n print(\"Time_Index : {}\".format(t_range_idx))\n \n lb_temp = np.asarray([t_bounds[t_range_idx], self.lb[1]])\n ub_temp = np.asarray([t_bounds[t_range_idx+1], self.ub[1]])\n X_f = lb_temp + (ub_temp-lb_temp)*lhs(self.input_size, self.N) \n \n return X_f\n \n \n def uniform_sampler(self):\n X_f = self.lb + (self.ub-self.lb)*lhs(self.input_size, self.N) \n return X_f\n\n \n \n \n \n \n","sub_path":"Neural_PDE/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257028143","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom pathlib import Path\n\nimport pytest\nfrom PIL import Image, ImageChops, ImageStat\n\nimport webtraversallibrary as wtl\nfrom webtraversallibrary.graphics import crop_image, draw_rect, draw_text\n\nORIGINAL_DIR = Path(\"tests/data/\")\n\n\ndef equal_images(img1, img2):\n \"\"\"Adapted from Nicolas Hahn:\n https://github.com/nicolashahn/diffimg/blob/master/diffimg/__init__.py\n \"\"\"\n if img1.mode != img2.mode or img1.size != img2.size or img1.getbands() != img2.getbands():\n return False\n\n diff_img = ImageChops.difference(img1, img2)\n stat = ImageStat.Stat(diff_img)\n return (100 * sum(stat.mean)) / (len(stat.mean) * 255) < 0.01\n\n\ndef test_crop_image():\n img = Image.open(ORIGINAL_DIR / \"cat.png\")\n rect = wtl.Rectangle(wtl.Point(50, 70), wtl.Point(130, 170))\n result = crop_image(img, rect)\n\n reference = Image.open(ORIGINAL_DIR / \"crop.png\")\n\n assert equal_images(result, reference)\n\n with pytest.raises(ValueError):\n rect = wtl.Rectangle.empty()\n crop_image(img, rect)\n\n with pytest.raises(ValueError):\n rect = wtl.Rectangle(wtl.Point(257, 257), wtl.Point(270, 280))\n crop_image(img, rect)\n\n\ndef test_draw_rect():\n img = Image.open(ORIGINAL_DIR / \"cat.png\")\n rect_1 = wtl.Rectangle(wtl.Point(40, 50), wtl.Point(70, 80))\n rect_2 = wtl.Rectangle(wtl.Point(200, 210), wtl.Point(220, 230))\n draw_rect(img, rect_1, wtl.Color(50, 150, 250), 5)\n draw_rect(img, rect_2, wtl.Color(250, 30, 30), 5)\n\n reference = Image.open(ORIGINAL_DIR / \"rect.png\")\n\n assert equal_images(img, reference)\n\n\ndef test_draw_text():\n img = Image.open(ORIGINAL_DIR / \"cat.png\")\n draw_text(img, wtl.Point(10, 10), wtl.Color(50, 150, 250), 20, \"This is a cat\")\n draw_text(img, wtl.Point(50, 200), wtl.Color(0, 0, 0), 50, \"Not a dog\")\n\n reference = Image.open(ORIGINAL_DIR / \"text.png\")\n\n assert equal_images(img, reference)\n","sub_path":"tests/graphics_test.py","file_name":"graphics_test.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"543642279","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\nimport base64\nimport httplib\nimport logging\nimport re\n\nfrom google.appengine.api import search\nfrom google.appengine.ext import ndb\n\nimport requests\nfrom framework.bizz.job import run_job\nfrom framework.models.session import Session\nfrom framework.plugin_loader import get_plugins\nfrom framework.utils import convert_to_str, chunks\nfrom mcfw.cache import cached\nfrom mcfw.exceptions import HttpNotFoundException\nfrom mcfw.rpc import returns, arguments\nfrom plugins.its_you_online_auth.models import Profile, ProfileAppEmailMapping\nfrom plugins.its_you_online_auth.plugin_consts import NAMESPACE\nfrom transliterate import slugify\n\nPROFILE_INDEX = search.Index('profile', namespace=NAMESPACE)\n\n\ndef _get_best_session(sessions):\n sorted_sessions = sorted(sessions, key=lambda s: len(s.scopes))\n return sorted_sessions[0] if sorted_sessions and sorted_sessions[0].jwt else None\n\n\ndef set_user_information(profile_key, session_key=None):\n from plugins.its_you_online_auth.bizz.authentication import get_itsyouonline_client_from_jwt\n iyo_username = profile_key.id()\n if session_key:\n session = session_key.get()\n else:\n sessions = Session.list_active_user(iyo_username)\n session = _get_best_session(sessions)\n if session:\n client = get_itsyouonline_client_from_jwt(session.jwt)\n try:\n data = client.users.GetUserInformation(convert_to_str(session.user_id)).json()\n except requests.HTTPError as e:\n logging.warn('%s: %s', e.response.status_code, e.response.content)\n if e.response.status_code != httplib.FORBIDDEN:\n raise e\n return\n logging.info('Saving user information %s', data)\n store_user_information(data)\n else:\n logging.info('No session found for user %s, not storing user information', iyo_username)\n\n\ndef store_user_information(data):\n profile = Profile.create_key(data['username']).get()\n profile.information = data\n profile.put()\n index_profile(profile, _get_extra_profile_fields(profile))\n\n\ndef _get_extra_profile_fields(profile):\n fields = []\n for plugin in get_plugins():\n if hasattr(plugin, 'get_extra_profile_fields'):\n fields.extend(plugin.get_extra_profile_fields(profile))\n return fields\n\n\ndef index_all_profiles():\n remove_all_from_index(PROFILE_INDEX)\n run_job(_get_all_profiles, [], index_profile, [])\n\n\ndef _get_all_profiles():\n return Profile.query()\n\n\ndef index_profile(profile_or_key, extra_profile_fields=None):\n # type: (ndb.Key, list[search.Field]) -> list[search.PutResult]\n profile = profile_or_key.get() if isinstance(profile_or_key, ndb.Key) else profile_or_key\n if extra_profile_fields is None:\n extra_profile_fields = _get_extra_profile_fields(profile)\n logging.debug('Indexing profile %s\\nExtra fields: %s', profile.username, extra_profile_fields)\n document = create_profile_document(profile, extra_profile_fields)\n return PROFILE_INDEX.put(document)\n\n\ndef _add_slug_fields(key, value):\n if not value:\n return []\n value = value.lower().strip()\n return [\n search.TextField(name=key, value=value),\n search.TextField(name='%s_slug' % key, value=slugify(value) or value)\n ]\n\n\ndef create_profile_document(profile, extra_profile_fields):\n # type: (Profile, list[search.Field]) -> search.Document\n fields = [search.AtomField(name='username', value=profile.username.lower())]\n # complete this if needed\n if profile.info:\n fields.extend(_add_slug_fields('firstname', profile.info.firstname))\n fields.extend(_add_slug_fields('lastname', profile.info.lastname))\n if profile.info.validatedemailaddresses:\n for i, mail in enumerate(profile.info.validatedemailaddresses):\n fields.append(search.AtomField(name='validatedemailaddresses_%d' % i, value=mail.emailaddress))\n mails = ' '.join([email.emailaddress for email in profile.info.validatedemailaddresses])\n fields.append(search.TextField(name='validatedemailaddresses', value=mails))\n if profile.info.validatedphonenumbers:\n for i, phone in enumerate(profile.info.validatedphonenumbers):\n fields.append(search.AtomField(name='validatedphonenumbers_%d' % i, value=phone.phonenumber))\n phones = ' '.join([phone.phonenumber for phone in profile.info.validatedphonenumbers])\n fields.append(search.TextField(name='validatedphonenumbers', value=phones))\n else:\n # Adding username as firstname/lastname for sorting reasons\n fields.append(search.TextField(name='firstname_slug', value=profile.username.lower()))\n fields.append(search.TextField(name='lastname_slug', value=profile.username.lower()))\n fields.extend(extra_profile_fields)\n return search.Document(_encode_doc_id(profile), fields)\n\n\ndef _encode_doc_id(profile):\n # doc id must be ascii, base64 encode it\n return base64.b64encode(profile.username.encode('utf-8'))\n\n\ndef _decode_doc_id(doc_id):\n # type: (unicode) -> unicode\n return base64.b64decode(doc_id)\n\n\ndef normalize_search_string(search_string):\n return re.sub(r'[,\\\"\\+\\-:><=\\\\()~]', u' ', search_string)\n\n\ndef get_profile(username):\n \"\"\"\n Args:\n username (unicode)\n Returns:\n Profile\n\n Raises:\n HttpNotFoundException in case the profile was not found\n \"\"\"\n profile = Profile.create_key(username).get()\n if not profile:\n raise HttpNotFoundException('profile_not_found', {'username': username})\n return profile\n\n\ndef get_or_create_profile(username, app_email=None):\n profile_key = Profile.create_key(username)\n profile = profile_key.get() or Profile(key=profile_key)\n if app_email:\n profile.app_email = app_email\n mapping_key = ProfileAppEmailMapping.create_key(app_email)\n mapping_key.get() or ProfileAppEmailMapping(key=mapping_key, username=username).put()\n return profile\n\n\ndef search_profiles(query='', page_size=20, cursor=None):\n # type: (unicode, int, unicode) -> tuple[list[Profile], search.Cursor, bool]\n sort_expressions = [search.SortExpression(expression='firstname_slug', direction=search.SortExpression.ASCENDING),\n search.SortExpression(expression='lastname_slug', direction=search.SortExpression.ASCENDING),\n search.SortExpression(expression='username', direction=search.SortExpression.ASCENDING)]\n options = search.QueryOptions(limit=page_size,\n cursor=search.Cursor(cursor),\n sort_options=search.SortOptions(expressions=sort_expressions),\n ids_only=True)\n search_results = PROFILE_INDEX.search(search.Query(query, options=options)) # type: search.SearchResults\n results = search_results.results # type: list[search.ScoredDocument]\n keys = []\n for result in results:\n username = _decode_doc_id(result.doc_id)\n keys.append(Profile.create_key(username))\n profiles = ndb.get_multi(keys) if keys else []\n return profiles, search_results.cursor, search_results.cursor is not None\n\n\n@cached(1, lifetime=0)\n@returns(unicode)\n@arguments(rogerthat_email=unicode)\ndef get_username_from_rogerthat_email(rogerthat_email):\n # type: (unicode) -> unicode\n mapping = ProfileAppEmailMapping.create_key(rogerthat_email).get()\n return mapping and mapping.username\n\n\n@returns(dict)\n@arguments(rogerthat_emails=[unicode])\ndef get_usernames_from_rogerthat_emails(rogerthat_emails):\n # type: (list[unicode]) -> dict[unicode, unicode]\n result = {}\n mappings = ndb.get_multi([ProfileAppEmailMapping.create_key(app_email) for app_email in rogerthat_emails])\n for app_email, mapping in zip(rogerthat_emails, mappings):\n if mapping:\n result[app_email] = mapping.username\n else:\n logging.error('No ProfileAppEmailMapping found for app email %s!', app_email)\n result[app_email] = None\n return result\n\n\n@cached(1, lifetime=0)\n@returns(unicode)\n@arguments(username=unicode)\ndef get_rogerthat_email_from_username(username):\n # type: (unicode) -> unicode\n profile = Profile.create_key(username).get()\n return profile and profile.app_email\n\n\ndef remove_all_from_index(index):\n # type: (search.Index) -> long\n total = 0\n while True:\n result = index.search(search.Query(u'', options=search.QueryOptions(ids_only=True, limit=1000)))\n if not result.results:\n break\n logging.debug('Deleting %d documents from %s' % (len(result.results), index))\n total += len(result.results)\n for rpc in [index.delete_async([r.doc_id for r in chunk]) for chunk in chunks(result.results, 200)]:\n rpc.get_result()\n logging.info('Deleted %d documents from %s', total, index)\n return total\n","sub_path":"plugins/its_you_online_auth/bizz/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":9516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"134665424","text":"from numba import cuda as nbcuda\nimport numpy as np\n\nfrom device_funcs import *\n\ndef divUp(a, b):\n\treturn (a + b - 1)/b\n\n@nbcuda.jit\ndef renderKernel(d_out, d_vol, w, h, volSize, method, zs, theta, threshold, dist, EPS, NUMSTEPS):\n\tc = nbcuda.blockIdx.x*nbcuda.blockDim.x + nbcuda.threadIdx.x\n\tr = nbcuda.blockIdx.y*nbcuda.blockDim.y + nbcuda.threadIdx.y\n\ti = (c + r*w)\n\n\tif c >= w or r >= h:\n\t\treturn\n\n\tbackground = (64, 0, 64, 0)\n\n\tray_source = (0.0, 0.0, -zs)\n\n\tcurrent_pixel = scrIdxToPos(c,\n\t\t\t\t\t\t\t\tr,\n\t\t\t\t\t\t\t\tw,\n\t\t\t\t\t\t\t\th,\n\t\t\t\t\t\t\t\t2.0*volSize[2] - zs)\n\n\tsource = yRotate(ray_source, theta)\n\tpix = yRotate(current_pixel, theta)\n\n\tt0 = 0.0\n\tt1 = 0.0\n\n\tpixRay = (source, (pix[0] - source[0],\n\t\t\t\t\t pix[1] - source[1],\n\t\t\t\t\t pix[2] - source[2]))\n\n\tcenter = (volSize[0]/2.0,\n\t\t\t volSize[1]/2.0,\n\t\t\t volSize[2]/2.0)\n\n\tboxmin = (-center[0],\n\t\t\t -center[1],\n\t\t\t -center[2])\n\n\tboxmax = (volSize[0] - center[0],\n\t\t\t volSize[1] - center[1],\n\t\t\t volSize[2] - center[2])\n\n\thitBox, t0, t1 = intersectBox(pixRay, boxmin, boxmax)\n\n\tif hitBox == False:\n\t\tshade = background\n\n\telse:\n\t\tif t0 < 0.0:\n\t\t\tt0 = 0.0\n\n\t\tboxRay = (paramRay(pixRay,t0), (paramRay(pixRay, t1)[0] - paramRay(pixRay, t0)[0],\n\t\t\t\t\t\t\t\t\t\tparamRay(pixRay, t1)[1] - paramRay(pixRay, t0)[1],\n\t\t\t\t\t\t\t\t\t\tparamRay(pixRay, t1)[2] - paramRay(pixRay, t0)[2]))\n\n\t\tif method == 0: #Volume\n\t\t\tshade = volumeRenderShader(d_vol, volSize, boxRay, threshold, NUMSTEPS)\n\t\telif method == 1: #Slicer\n\t\t\tshade = sliceShader(d_vol, volSize, boxRay, threshold, dist, source)\n\t\telse: #Raycast\n\t\t\tshade = rayCastShader(d_vol, volSize, boxRay, threshold, EPS)\n\n\td_out[i] = shade\n\n@nbcuda.jit\ndef volumeKernel(d_vol, volSize, id_, params):\n\tw = volSize[0]\n\th = volSize[1]\n\td = volSize[2]\n\tc = nbcuda.blockIdx.x*nbcuda.blockDim.x + nbcuda.threadIdx.x #Column\n\tr = nbcuda.blockIdx.y*nbcuda.blockDim.y + nbcuda.threadIdx.y #Row\n\ts = nbcuda.blockIdx.z*nbcuda.blockDim.z + nbcuda.threadIdx.z #Stack\n\ti = c + r * w + s * w * h;\n\n\tif c >= w or r >= h or s >= d:\n\t\treturn\n\n\td_vol[i] = func(c, r, s, id_, volSize, params)\n\ndef kernelLauncher(d_out, d_vol, w, h, volumeSize, method, zs, theta, threshold, dist, TX_2D, TY_2D, EPS, NUMSTEPS):\n\tgridSize = (divUp(w, TX_2D), divUp(h, TY_2D))\n\tblockSize = (TX_2D, TY_2D)\n\n\td_vol_numba = nbcuda.to_device(d_vol)\n\tvolumeSize_numba = nbcuda.to_device(volumeSize)\n\n\trenderKernel[gridSize, blockSize](d_out, d_vol_numba, w, h, volumeSize_numba, method, zs, theta, threshold, dist, EPS, NUMSTEPS)\n\ndef volumeKernelLauncher(d_vol, volumeSize, id_, params, TX, TY, TZ):\n\td_vol_numba = nbcuda.to_device(d_vol)\n\tvolumeSize_numba = nbcuda.to_device(volumeSize)\n\n\tgridSize = (divUp(volumeSize[0], TX), divUp(volumeSize[1], TY), divUp(volumeSize[2], TZ))\n\tblockSize = (TX, TY, TZ)\n\tvolumeKernel[gridSize, blockSize](d_vol_numba, volumeSize, id_, params)\n\n\td_vol_numba.copy_to_host(d_vol)","sub_path":"vis_3d/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"385986878","text":"#-------------------------------------------------------------------------------\n# Name: Landsat Digital Numbers to Radiance/Reflectance\n# Purpose: To convert landsat 4,5, or 7 pixel values from digital numbers\n# to Radiance, Reflectance, or Temperature\n# Author: Quinten Geddes Quinten.A.Geddes@nasa.gov\n# NASA DEVELOP Program\n# Created: 19/10/2012\n\n#-------------------------------------------------------------------------------\nimport arcpy\nimport math\nimport sys\narcpy.CheckOutExtension(\"Spatial\")\narcpy.env.overwriteOutput = True\n#Variables-----------------------------------------------------------------\nL7bands=arcpy.GetParameterAsText(0)\nMetaData =arcpy.GetParameterAsText(1)\nOutputType=arcpy.GetParameterAsText(2)\nOutputFolder=arcpy.GetParameterAsText(3)\n#--------------------------------------------------------------------------\n\n#Reading Metadata that pertains to all bands\nL7bands=L7bands.split(\";\")\nnewMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \",\n \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \",\n \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"]\n\noldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \",\n \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \",\n \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"]\nf=open(MetaData)\n\nMText=f.read()\n\nif \"PRODUCT_CREATION_TIME\" in MText:\n Meta=oldMeta\n Band6length=2\nelse:\n Meta=newMeta\n Band6length=8\nif Meta==newMeta:\n TileName=MText.split(Meta[0])[1].split('\"')[0]\n year=TileName[9:13]\n jday=TileName[13:16]\nelif Meta==oldMeta:\n TileName=MText.split(Meta[0])[1].split('\"')[0]\n year=TileName[13:17]\n jday=TileName[17:20]\n\ndate=MText.split(Meta[1])[1].split('\\n')[0]\nL7_ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0.,82.07,1368.00)\nL5_ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0.,80.67)\nL4_ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0.,80.72)\nspacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0]\n\nif \"7\" in spacecraft:\n ESun=L7_ESun\nelif \"5\" in spacecraft:\n ESun=L5_ESun\nelif \"4\" in spacecraft:\n ESun=L4_ESun\nelse:\n arcpy.AddError(\"This tool only works for Landsat 4, 5, or 7. This data appears to be from {0}\".format(spacecraft))\n sys.exit()\n\nif float(year) % 4 ==0:\n DIY=366.\nelse:\n DIY=365.\ntheta =2*math.pi*float(jday)/DIY\n\ndSun2 = 1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta)\n\nSZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0])\n\n#Calculating values for each band\nBandNum=0\nfor pathname in L7bands:\n BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0]\n if BandNum==\"6\" and spacecraft[8]==\"7\":\n BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length]\n arcpy.AddMessage( \"Processing Band {0}\".format(BandNum))\n f=open(MetaData)\n text=f.read()\n Oraster=arcpy.Raster(pathname)\n LMax= float(text.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0])\n LMin= float(text.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0])\n QCalMax=float(text.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0])\n QCalMin=float(text.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0])\n\n Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin\n Oraster=0\n if OutputType==\"Radiance\":\n Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum))\n Radraster=0\n elif OutputType==\"Reflectance/Temperature\":\n #Calculating temperature for band 6 if present\n if \"6\" in BandNum:\n Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0))\n Refraster.save(\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum))\n del Refraster\n else:\n Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) )\n Refraster.save(\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum))\n del Refraster,Radraster\n f.close()\n arcpy.AddMessage( \"Band {0} Completed\".format(BandNum))\n\narcpy.CheckInExtension(\"Spatial\")","sub_path":"Landsat/DNtoReflectance.py","file_name":"DNtoReflectance.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280175953","text":"#!/usr/bin/python3\n\n##----------------------------------------------------------------------------------##\n##-- A script what sends a definable number of batches of definable numbers --##\n##-- of emails to muliple email addresses randomly from a randomly selected MTA --##\n##-- and via multiple vmta's also selected randomly. --##\n##-- The 'vmta' bit only applies to PMTA, other MTA's will just see an extra --##\n##-- X-Header... --##\n##-- --##\n##-- PYTHON 3 VERSION --##\n##-- --##\n##-- Last Update - TimC@09082016 --##\n##----------------------------------------------------------------------------------##\n\n##-- Import Modules --##\nimport smtplib\nfrom time import sleep\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom random import randint\nimport pdb\n\n# ##-- HOME CONFIG: --##\n# from_address = \"tim@drivebysulking.com\"\n# rcpt_addrs=[\"tim.caudrey@dotmailer.com\", \"tim.caudrey@gmail.com\", \"tim@drivebysulking.com\"]\n# vmtas=[\"117-20\", \"CleverDigit\", \"106-22\"]\n# smtp_server = 'septicpi01.drivebysulking.com:25'\n\n##-- WORK CONFIG: --##\nfrom_address = \"tim.caudrey@dotmailer.com\"\nrcpt_addrs=[\"tim.caudrey@gmail.com\", \"timcaudrey@hotmail.com\", \"tim.caudrey@yahoo.co.uk\", \"tim.caudrey@dotmailer.com\"]\nvmtas=[\"108-10\", \"108-25\", \"108-39\", \"108-65\", \"108-80\", \"108-116\", \"108-145\", \"108-166\", \"108-188\", \"108-204\", \"108-225\"]\nsmtp_servers=[\"10.32.101.1:25\", \"10.32.101.2:25\", \"10.32.101.3:25\"]\n# smtp_servers=[\"10.32.101.1:25\", \"10.32.101.2:25\", \"10.32.101.3:25\"]\n# smtp_server = 'powermta01.dc.dotdigitalgroup.plc.com:25'\n# smtp_server = \"10.32.101.1:25\"\n\n##-- Some Variables --##\nmsg_threads = 10\nmsg_total = 20\n##--\nmsg_pause = 0\nmsg_count = 1\nmsg_total_count = 0\nthread_count = 1\n\n##-- Define Sending Mechanism --##\ndef mail(msg):\n\tserver = smtplib.SMTP(smtp_server)\n\tserver.sendmail(from_address, msg['To'], msg.as_string())\n\tserver.quit()\n\n##-- Define Message Content --##\ndef mailer():\n\n\t##-- MIME Plain Text Content --##\n\tpln_txt = \"\"\"\\nthis is a automated message.....\n\tblah, blah, blah...\\n\n\twaffle, waffle, waffle...\\n\\n\"\"\"\n\tpln_txt = MIMEText(pln_txt, 'plain')\n\n\t##-- MIME HTML Content --##\n\thtml_txt = \"\"\"\n\t<html>\n\t\t<head>Hello</head>\n\t\t\t<body>\n\t\t\t\t<p>Wotcha!<br>\n\t\t\t\tHow are you?<br>\n\t\t\t\tHere is a <a href=\"http://www.dotmailer.com\">link</a>...\n\t\t\t\t</p>\n\t\t\t</body>\n\t</html>\n\t\"\"\"\n\thtml_txt = MIMEText(html_txt, 'html')\n\n\t##-- MIME Image Content --##\n\t# logo = \"logo_new.png\"\n\t# logo = open(logo, 'rb')\n\t# img = MIMEImage(logo.read())\n\t# logo.close\n\n\t##-- Add MIME Parts to message --##\n\tmsg = MIMEMultipart('mixed')\n\tmsg.attach(pln_txt)\n\tmsg.attach(html_txt)\n\t# msg.attach(img)\n\n\t##-- Set from address --##\n\tmsg['From'] = from_address\n\n\t##-- Randomly choose recipient address from rcpt_addrs array --##\n\trcptno = (len(rcpt_addrs)-1)\n\trandrcpt = (randint(0, rcptno))\n\trcpt = rcpt_addrs[randrcpt]\n\tmsg['To'] = rcpt\n\n\t##-- Randomly choose vmta from vmtas array --##\n\tvmtano = (len(vmtas)-1)\n\trandvmta = (randint(0, vmtano))\n\tvmta = vmtas[randvmta]\n\tmsg.add_header('x-virtual-mta', vmta)\n\tmsg.add_header('x-random-header: ', 'bobbins')\n\n\t##-- Randomly choose SMTP server from array --##\n\tglobal smtp_server\n\tno_smtp = (len(smtp_servers)-1)\n\trandsmtp = (randint(0, no_smtp))\n\tsmtp_server = smtp_servers[randsmtp]\n\n\t##-- Other bits of the message --##\n\tmsg_counter = str(msg_count)\n\tthread_counter = str(thread_count)\n\tmsg['subject'] = \"Batch email thread# \" + thread_counter + \" message# \" + msg_counter + \" from: \" + smtp_server + \" via vmta: \" + vmta\n\tmsg.add_header('reply-to', from_address)\n\tprint (msg.as_string())\n\n\t##-- Send the damn thing --##\n\tmail(msg)\n\n\t##-- Have a snooze if required --##\n\tsleep(msg_pause)\n\n##-- Loop through Batches and Quantities and send... --##\nfor threads in range (0, msg_threads):\n\tfor msgs in range(0, msg_total):\n\t\tmailer()\n\t\tmsg_count += 1\n\t\tmsg_total_count += 1\n\tmsg_count = 1\n\tthread_count += 1\nthread_count = 1\n\n##-- Sign Off & Stats --##\nprint (\"\\nDone... %d messages sent in total...\\n\" % msg_total_count)","sub_path":"dotpy3mailer_108.py","file_name":"dotpy3mailer_108.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370097754","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 5 18:01:14 2016\n\n@author: Serj\n\"\"\"\n\nimport numpy as np\nfrom astropy.io import fits\nimport os, sys\nsys.path.append('C:/science/python')\nfrom astrolib import helcorr\nfrom glob import glob\nfrom vac_helio import vac_helio\n\nclass exposure():\n def __init__(self, folder, name=None, instr=None, gelio=True, mask=False):\n self.folder = folder\n if name is not None:\n self.name = name\n else:\n self.name = folder[folder.rfind('\\\\')+1:]\n self.readfits()\n self.instr = instr\n self.gelio = gelio\n self.mask = mask\n print(self.name, self.folder)\n \n def readfits(self):\n path = self.folder\n if os.path.exists(path+'/list.dat'):\n f_in = open(folder+'/list.dat', 'r')\n self.list_of_fits = [] \n for l in f_in:\n self.list_of_fits.append(str(l).replace('\\n', ''))\n else:\n print(path)\n self.list_of_fits = [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.fits'))] \n \n def fitstoascii(self):\n with open(self.folder+r'\\file.list','w') as f_list:\n for file in self.list_of_fits:\n f_list.write(file.replace('.fits', '.dat') + '\\n')\n \n if self.instr == 'UVES':\n self.fitstoascii_UVES()\n \n \n def fitstoascii_UVES(self): \n for file in self.list_of_fits:\n s = file.replace('.fits', '.dat')\n hdulist = fits.open(file)\n if self.mode == 'CRVAL':\n header = hdulist[0].header\n print(header)\n print(header['CRVAL1'], header['CRPIX1'])\n prihdr = hdulist[0].data\n print(prihdr[0], prihdr[1], prihdr[2], prihdr[3])\n l = np.power(10, np.arange(header['NAXIS1'])*header['CD1_1']+header['CRVAL1'])\n f = prihdr[0]\n e = prihdr[1]\n print(l, f, e)\n elif self.mode == 'ADP':\n header = hdulist[1].header\n print(header)\n prihdr = hdulist[1].data\n print(prihdr)\n x = prihdr.field(0)\n n = x.size\n l = prihdr.field(0)[0]\n f = prihdr.field(1)[0]\n e = prihdr.field(2)[0]\n print(l, f, e)\n \n if self.mask:\n mask = (l > 3300) * (f != 0)\n l, f, e = l[mask], f[mask], e[mask]\n print(l, f, e, mask)\n \n if self.gelio:\n long = hdulist[0].header['HIERARCH ESO TEL GEOLON']\n lat = hdulist[0].header['HIERARCH ESO TEL GEOLAT']\n elev = hdulist[0].header['HIERARCH ESO TEL GEOELEV']\n ra = hdulist[0].header['RA']\n dec = hdulist[0].header['DEC']\n mjd = hdulist[0].header['MJD-OBS']\n rjd = mjd + 0.5\n jd = rjd + 2400000.0\n exptime = hdulist[0].header['EXPTIME']\n corrhel, hjd = helcorr(long,lat,elev,ra/15.0,dec, rjd+exptime/86400.0/2)\n v_helio = corrhel\n print(v_helio)\n l = vac_helio(l, v_helio)\n \n print('>>> writing to ascii file ...')\n \n np.savetxt(s, np.array([l, f, e]).transpose(), fmt='%.4f')\n \nif __name__ == \"__main__\":\n exp = exposure(r'D:\\science\\QSO\\UVES\\J031115-172247', instr='UVES', gelio=False)\n exp.mode = 'CRVAL'\n exp.fitstoascii()","sub_path":"spectools.py","file_name":"spectools.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63403812","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport boto3\nimport csv\nfrom botocore.response import StreamingBody\nfrom typing import Dict, List, Optional, Set\n\nALL_REQUIRED_FIELDS: Set[str] = {\n 'action_source',\n 'conversion_value',\n 'currency_type',\n 'event_type',\n 'timestamp',\n}\nONE_OR_MORE_REQUIRED_FIELDS: Set[str] = {'email','device_id'}\n\ndef validate_and_generate_report(bucket: str, key: str) -> str:\n s3_client = boto3.client('s3')\n response = s3_client.get_object(Bucket=bucket, Key=key)\n body = response['Body']\n try:\n return generate_from_body(body)\n except BaseException as e:\n return f'Something went wrong while validating the data. Exception details if available:\\n{e}'\n\ndef header_check_fields_missing(header_fields: List[str]) -> List[str]:\n fields_missing = ALL_REQUIRED_FIELDS.difference(set(header_fields))\n return sorted(fields_missing)\n\ndef header_contains_identity_fields(header_fields: List[str]) -> bool:\n intersection = ONE_OR_MORE_REQUIRED_FIELDS.intersection(set(header_fields))\n return len(intersection) > 0\n\ndef is_line_valid(line: Dict[str, str]) -> bool:\n for field in ALL_REQUIRED_FIELDS:\n if field not in line or value_empty(line[field]):\n return False\n\n return any(\n field in line and not value_empty(line[field])\n for field in ONE_OR_MORE_REQUIRED_FIELDS\n )\n\ndef value_empty(value: Optional[str]) -> bool:\n return (\n str(value).strip() == '' or\n value is None\n )\n\ndef generate_from_body(body: StreamingBody) -> str:\n validation_state = ValidationState()\n valid_header_row = None\n\n for line in body.iter_lines():\n line_string = line.decode('utf-8')\n if valid_header_row:\n reader = csv.DictReader([valid_header_row, line_string])\n for parsed_line in reader:\n if is_line_valid(parsed_line):\n validation_state.valid_rows += 1\n else:\n validation_state.error_rows += 1\n validation_state.total_rows += 1\n else:\n header_row_valid = True\n raw_field_names = csv.DictReader([line_string]).fieldnames\n header_fields = []\n if raw_field_names:\n for s in raw_field_names:\n header_fields.append(s)\n missing_fields = header_check_fields_missing(header_fields)\n if len(missing_fields) > 0:\n missing_fields_str = ','.join(missing_fields)\n validation_state.header_validation_messages.append(\n f'Header row not valid, missing `{missing_fields_str}` required fields.'\n )\n header_row_valid = False\n if not header_contains_identity_fields(header_fields):\n required_header_fields = ','.join(sorted(ONE_OR_MORE_REQUIRED_FIELDS))\n validation_state.header_validation_messages.append(\n f'Header row not valid, at least one of `{required_header_fields}` is required.'\n )\n header_row_valid = False\n if not header_row_valid:\n validation_state.header_validation_messages.append(\n 'Validation processing stopped.'\n )\n break\n valid_header_row = line_string\n\n report = ['Validation Summary:']\n report.append(f'Total rows: {validation_state.total_rows}')\n report.append(f'Valid rows: {validation_state.valid_rows}')\n report.append(f'Rows with errors: {validation_state.error_rows}')\n report.extend(validation_state.header_validation_messages)\n return '\\n'.join(report) + '\\n'\n\nclass ValidationState:\n def __init__(self):\n self.total_rows = 0\n self.valid_rows = 0\n self.error_rows = 0\n self.header_validation_messages = []\n","sub_path":"fbpcs/infra/cloud_bridge/data_validation/validation_utility/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258254698","text":"import json\nfrom Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom zope import schema\n\nfrom zope.component import getMultiAdapter, getUtility\n\nfrom z3c.form import field\nfrom plone.directives import dexterity, form\nfrom zope.lifecycleevent import modified\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\nfrom plone.i18n.normalizer import idnormalizer\n\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.CMFCore.utils import getToolByName\nfrom plone.app.layout.viewlets.content import ContentHistoryView\n\nfrom z3c.form.browser.checkbox import CheckBoxFieldWidget\nfrom collective.z3cform.chosen import ChosenMultiFieldWidget\nfrom collective.z3cform.datagridfield import DataGridFieldFactory, DictRow\n\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zc.relation.interfaces import ICatalog\nfrom zope.app.intid.interfaces import IIntIds\n\nfrom plone.app.contentlisting.interfaces import IContentListing\n\nfrom chromsystems.shopcontent.itemcollection import IItemCollection\nfrom chromsystems.shopcontent.orderableitem import IOrderableItem\n\nfrom chromsystems.shopcontent import MessageFactory as _\n\n\ndef make_terms(items):\n terms = [SimpleTerm(value=pair[0], token=pair[0], title=pair[1])\n for pair in items]\n return terms\n\n\n@grok.provider(IContextSourceBinder)\ndef category_source(context):\n catalog = api.portal.get_tool(name='portal_catalog')\n cats = catalog.uniqueValuesFor(\"category\")\n entries = []\n done = []\n for cat in cats:\n cat_unicode = safe_unicode(cat)\n cat_id = idnormalizer.normalize(cat_unicode)\n if cat_id not in done:\n entry = (cat_id, cat_unicode)\n entries.append(entry)\n done.append(cat_id)\n terms = make_terms(entries)\n return SimpleVocabulary(terms)\n\n\nclass ISettings(form.Schema):\n\n main_category = schema.Choice(\n title=u\"Main category\",\n required=True,\n vocabulary=u\"chromsystems.shopcontent.ShopCategories\",\n )\n form.widget(subcategories=CheckBoxFieldWidget)\n subcategories = schema.Set(\n title=u\"Select Categories\",\n description=_(u\"Select from the list of alredy existing categories\"),\n required=False,\n value_type=schema.Choice(\n source=u\"chromsystems.shopcontent.ShopCategories\"\n )\n )\n\n\nclass IShopFolder(form.Schema):\n \"\"\"\n A folderish type acting as shop dashboard\n \"\"\"\n #form.widget(main_categories=ChosenMultiFieldWidget)\n #main_categories = schema.List(\n # title=u\"Select Categories\",\n # description=_(u\"Select from the list of alredy existing categories\"),\n # required=False,\n # value_type=schema.Choice(source=category_source)\n #)\n form.widget(settings=DataGridFieldFactory)\n settings = schema.List(\n title=u'Addresses',\n value_type=DictRow(\n title=u'Address',\n schema=ISettings),\n required=True\n )\n\n\nclass ShopFolder(dexterity.Container):\n grok.implements(IShopFolder)\n\n\nclass View(grok.View):\n grok.context(IShopFolder)\n grok.require('zope2.View')\n grok.name('view')\n\n def update(self):\n self.has_items = len(self.recent_items()) > 0\n\n def recent_items(self):\n context = aq_inner(self.context)\n catalog = api.portal.get_tool(name='portal_catalog')\n brains = catalog(portal_type=[\n 'chromsystems.shopcontent.itemcollection',\n 'chromsystems.shopcontent.orderableitem'],\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=2),\n sort_on='modified',\n sort_order='reverse',\n sort_limit=10)[:10]\n return brains\n\n def item_history_info(self, item):\n obj = item.getObject()\n chv = ContentHistoryView(obj, obj.REQUEST)\n history = chv.fullHistory()\n if history is not None:\n return history[0]\n else:\n obj_history = {'time': obj.modified,\n 'actor': obj.Creator}\n return obj_history\n\n def ic_count(self):\n context = aq_inner(self.context)\n catalog = api.portal.get_tool(name='portal_catalog')\n brains = catalog(object_provides=IItemCollection.__identifier__,\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=2))\n return len(brains)\n\n def oi_count(self):\n context = aq_inner(self.context)\n catalog = api.portal.get_tool(name='portal_catalog')\n brains = catalog(object_provides=IOrderableItem.__identifier__,\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=2))\n return len(brains)\n\n def friendly_date(self, time):\n return api.portal.get_localized_time(datetime=time)\n\n def current_lang(self):\n context = aq_inner(self.context)\n pstate = getMultiAdapter((context, self.request),\n name=u'plone_portal_state')\n return pstate.language()\n\n\nclass LayoutSetter(grok.View):\n grok.context(IShopFolder)\n grok.require('zope2.View')\n grok.name('layout-setter')\n\n def render(self):\n return self.set_layout()\n\n def set_layout(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n products = catalog(object_provides=IItemCollection.__identifier__,)\n idx = 0\n for x in products:\n idx += 1\n obj = x.getObject()\n structure = self._generateLayout(obj)\n setattr(obj, 'layout', 'view')\n setattr(obj, 'order', structure)\n modified(obj)\n obj.reindexObject(idxs='modified')\n return 'Successfully updated products: %s' % idx\n\n def _generateLayout(self, obj):\n intids = getUtility(IIntIds)\n rel_catalog = getUtility(ICatalog)\n cid = intids.getId(obj)\n rels = sorted(rel_catalog.findRelations({'from_id': cid}))\n data = {}\n counter = 0\n for r in rels:\n if not r.isBroken():\n counter += 1\n r_obj = r.to_object\n item = {}\n item['type'] = 'orderable'\n item['uid'] = r_obj.UID()\n order_info = 'entry_' + str(counter)\n data[order_info] = item\n return json.dumps(data)\n\n\nclass DashboardCollections(grok.View):\n grok.context(IShopFolder)\n grok.require('cmf.ModifyPortalContent')\n grok.name('dashboard-collections')\n\n def update(self):\n self.has_collections = len(self.item_collections()) > 0\n\n def item_collections(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n brains = catalog(object_provides=IItemCollection.__identifier__,\n path=dict(query='/'.join(context.getPhysicalPath()),\n depth=1),\n sort_on='modified',\n sort_order='reverse',\n review_state='published')\n results = IContentListing(brains)\n return results\n\n\nclass CategoriesJSON(grok.View):\n grok.context(IShopFolder)\n grok.require('cmf.ModifyPortalContent')\n grok.name('categories-json')\n\n def render(self):\n return json.dumps(self.available_categories(), indent=4)\n\n def available_categories(self):\n catalog = api.portal.get_tool(name='portal_catalog')\n cats = catalog.uniqueValuesFor(\"category\")\n entries = []\n done = []\n for cat in cats:\n cat_unicode = safe_unicode(cat)\n cat_id = idnormalizer.normalize(cat_unicode)\n if cat_id not in done:\n entry = (cat_id, cat_unicode)\n entries.append(entry)\n done.append(cat_id)\n return entries\n","sub_path":"src/chromsystems.shopcontent/chromsystems/shopcontent/shopfolder.py","file_name":"shopfolder.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321182768","text":"from pymongo import MongoClient\nfrom pymongo.errors import WriteError\n\nfrom challenge_bravo.exceptions.mongo_exceptions import NoMatchedCurrency\nfrom challenge_bravo.interfaces.repository import Repository\n\n\nclass Mongodb(Repository):\n\n def __init__(self, config):\n self.db = MongoClient(self.generate_mongo_connection_string(config))[f'{config.MONGODB_DATABASE}']\n\n def add_to_collection(self, payload):\n return self.db.currencies.insert(payload)\n\n def remove_from_collection(self, query):\n return self.db.currencies.delete_many(query)\n\n def get_from_collection(self, code=None):\n try:\n if code is None:\n return self.db.currencies.find({}, {'_id': False})\n return self.db.currencies.find_one({'code': code})\n except Exception as e:\n print(e)\n\n @staticmethod\n def generate_mongo_connection_string(config):\n return f'mongodb://{config.MONGODB_USERNAME}:{config.MONGODB_PASSWORD}' \\\n f'@{config.MONGODB_HOST}:{config.MONGODB_PORT}/'\n\n def update_object(self, currency_id, updated_fields):\n if 'code' in updated_fields.keys():\n raise WriteError(\"You cant update currency code\")\n result = self.db.currencies.update_one({\n 'code': currency_id\n }, {\n '$set': updated_fields\n }, upsert=False)\n if result.matched_count == 1:\n return result\n else:\n raise NoMatchedCurrency(f'Unable to find currency code {currency_id}')\n","sub_path":"challenge_bravo/repositories/mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40933413","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom skimage.filters import sobel_h, sobel_v\n\n# Histogram shape\ngist_h, gist_w = 8, 8\n# Block-matrix shape\nblock_h, block_w = gist_h - 1,gist_w - 1\n# Block size\nblock_s = 2\n# Count of bins\nbinCount = 8\n\ndef fit_and_classify(train_features, train_labels, test_features):\n from sklearn import svm\n lin_clsf = svm.LinearSVC()\n lin_clsf.fit(train_features, train_labels)\n return lin_clsf.predict(test_features)\n\ndef create_hist(hist, theta, G, cellr, cellc):\n for i in range(gist_h):\n for j in range(gist_w):\n hist[i,j,:],_ = np.histogram(theta[i*cellr:(i+1)*cellr, j*cellc:(j+1)*cellc], bins = binCount, range = (-np.pi, np.pi), weights = G[i*cellr:(i+1)*cellr, j*cellc:(j+1)*cellc])\n\ndef create_blocks(blocks, hist):\n ### eps для нормировки\n eps = 0.000001\n for i in range(block_h):\n for j in range(block_w):\n #cur_cell = hist[i*block_s:(i+1)*block_s, j*block_s:(j+1)*block_s, :]\n cur_cell = hist[i:i+block_s, j:j+block_s, :]\n blocks[i, j, :, :, :] = cur_cell / np.sqrt((np.linalg.norm(cur_cell) ** 2 + eps))\n\ndef extract_hog(img):\n Y = img[:,:,0] * 0.299 + img[:,:,1] * 0.587 + img[:,:,2] * 0.114\n Ix = sobel_v(Y)\n Iy = sobel_h(Y)\n G = np.sqrt(Ix * Ix + Iy * Iy)[2:-2, 2:-2]\n theta = np.arctan2(Iy, Ix)[2:-2, 2:-2]\n # One cell shape\n h, w = G.shape\n cellr, cellc = h // gist_h, w // gist_w\n \n hist = np.zeros((gist_h, gist_w, binCount))\n create_hist(hist, theta, G, cellr, cellc)\n \n blocks = np.empty((block_h, block_w, block_s, block_s, binCount))\n create_blocks(blocks, hist)\n return blocks.ravel()\n \nif __name__ == \"__main__\":\n from skimage.io import imread\n print(extract_hog(imread(\"27749.png\")).shape)\n \n ","sub_path":"big-tasks/3-signs/fit_and_classify.py","file_name":"fit_and_classify.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560887033","text":"from django.test import TestCase\nfrom django.db.models.expressions import Value as V\nfrom django.db.models.functions import Cast, Greatest, Coalesce\nfrom django.db.models import CharField, F, Func, Case, When, Value, Q, IntegerField as I\nfrom django.contrib.postgres.search import TrigramSimilarity, SearchVector\n\nfrom ..models import Product, search_all_categories, Category\n\n\nclass TestProductManager(TestCase):\n\n fixtures = [\"product_fixtures.json\"]\n\n def setUp(self) -> None:\n pass\n\n def test_full_search(self):\n value = \"soundcard\"\n prod_search = Product.objects.full_search(value)\n cat_search = search_all_categories(value)\n if cat_search:\n print(f\"Cat_list: {(cat_search, cat_search.query_type) if not hasattr(cat_search, 'suggestions') else (cat_search, 'These are suggestions')}\")\n else:\n print(\"No Category items found\")\n if prod_search:\n print(f\"Prod_list: {(prod_search, prod_search.query_type) if not hasattr(prod_search, 'suggestions') else (prod_search, 'These are suggestions')}\")\n else:\n print(\"No Products found\")\n\n\n\n","sub_path":"products/tests/test_managers.py","file_name":"test_managers.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116215055","text":"# Implementar la función es_vocal, que reciba un carácter y\n# devuelva un booleano en base a si letra es una vocal o no.\n\n\n# Resolver utilizando listas y el operador in.\ndef es_vocal(letra):\n vocales = ['a', 'e', 'i', 'o', 'u']\n return letra in vocales\n\n\nassert es_vocal('a')\nassert not es_vocal('b')\n","sub_path":"practico_01/ejercicio-05.py","file_name":"ejercicio-05.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255770279","text":"import pandas as pd\nimport numpy as np\nfrom os import path\nimport click\nimport eval_tools as et\nfrom eval_tools.tools import getQNumber, convertNanToStr, shortenWhitespace, searchKeyByRegex\n\n@click.command()\n@click.argument('pc_txt',\n type=click.Path(exists=True, dir_okay=False,\n file_okay=True, readable=True)\n )\n@click.argument('ws_question')\n@click.argument('team_txt',\n type=click.Path(exists=True, dir_okay=False,\n file_okay=True, readable=True)\n )\n@click.argument('questions',\n type=click.Path(exists=True, dir_okay=False,\n file_okay=True, readable=True)\n )\n@click.argument('output', type=click.Path(exists=False, dir_okay=False, file_okay=True))\ndef main(pc_txt, ws_question, team_txt, questions, output):\n '''\n Script to sort and extract the workshop related comment fields from the imwe evalutaion\n\n Parameters:\n\n -----------\n\n PC_TXT: Path to xlsx with PC answers\n \n WS_QUESTION: number of the workshop question e.g. Q20\n\n TEAM_TXT: Path to xlsx with team answers\n\n QUESTIONS: Path to xlsx file containing a table \"Common\" telling the question names\n '''\n\n df_pc_txt = et.io.readTxtExcelFileToDf(pc_txt)\n df_team_txt = et.io.readTxtExcelFileToDf(team_txt)\n df_questions = et.io.readConfigXlsxToDf(questions)\n\n # Correct pc questions\n df_pc_txt = et.tools.correctGenders(df_pc_txt)\n\n categories_mask = np.logical_and(df_questions.Type == 'Categories', df_questions.PC == searchKeyByRegex(df_pc_txt,str(ws_question) ))\n WsCategories = df_questions[categories_mask]\n WsComments = df_questions[df_questions.Type == 'WsComments']\n\n with open(output, 'w') as f:\n for name, group in df_pc_txt.groupby(searchKeyByRegex(df_pc_txt,str(ws_question) )):\n print(name)\n f.write(name)\n f.write(\"\\n\\nPCs:\")\n for i, row in group[WsComments.PC].iterrows():\n if len(row.values) > 1:\n print(\"alert\")\n comment = shortenWhitespace(str(row.values[0]))\n if not comment == 'nan':\n f.write(\"\\n\"+comment)\n f.write(\"\\n\\nTeamer:\" )\n mask = df_team_txt[WsCategories.Team] == name\n for index, row in df_team_txt[mask.values].iterrows():\n comment = shortenWhitespace(str(row[searchKeyByRegex(df_team_txt,'Q1')]))\n f.write(\"\\n\\n\"+comment+\": \")\n f.write(str(row[WsComments.Team].values[0]))\n f.write(\"\\n\\n\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"eval_tools/scripts/extractWsComments.py","file_name":"extractWsComments.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"493852712","text":"# coding=utf-8\nu\"\"\"\nUser: lands\nDate: 16-12-26\nFunc: 项目日志记录\n\"\"\"\n\nimport logging\nimport logging.handlers\nfrom background_system.config import config\n\nloggers = {}\n\n\ndef create_log(name):\n global loggers\n\n if name in loggers.keys():\n return loggers[name]\n\n new_log = logging.getLogger(name)\n\n app_formatter = logging.Formatter(\"%(asctime)s %(process)d|%(thread)d %(module)s | %(funcName)s |\"\n \" %(filename)s:%(lineno)d\"\n \" %(levelname)s %(message)s\", \"%Y-%m-%d@%H:%M:%S\")\n\n # handler设置,用以将日志输出到控制台\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(app_formatter)\n console_handler.setLevel(logging.DEBUG)\n\n # handler设置,用以将日志输出到Log文件中:每隔一天创建一个日志文件,日志文件最大数量为100\n log_path = config.LOG_STORE_PATH\n file_handler = logging.handlers.TimedRotatingFileHandler('%s\\\\daily_log' % log_path, 'S', 5, 50, delay=True)\n file_handler.suffix = \"%Y-%m-%d_%H-%M-%S.log\"\n file_handler.setFormatter(app_formatter)\n file_handler.setLevel(logging.DEBUG)\n\n new_log.propagate = False\n new_log.addHandler(console_handler)\n new_log.addHandler(file_handler)\n new_log.setLevel(logging.DEBUG)\n\n loggers.update(dict(name=new_log))\n return new_log\n\n\nif __name__ == '__main__':\n pass\n\n","sub_path":"lib/log_record.py","file_name":"log_record.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95336922","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 7 22:45:46 2017\r\n\r\n@author: Wilson\r\n\"\"\"\r\n\r\ndata = []\r\nwith open(\"C:/Users/Wilson/Desktop/Codejam/A-large.in\", \"r\") as f:\r\n for line in f:\r\n line = line.strip('\\n')\r\n data.append(list(map(str, line.split(' '))))\r\n\r\ndata.pop(0)\r\n \r\ndef flip(x):\r\n for i in range(len(x)):\r\n if x[i] == '+':\r\n x[i] = '-'\r\n else:\r\n x[i] = '+'\r\n return x\r\n\r\ndef pancake(s, k):\r\n s = list(s)\r\n k = int(k)\r\n if '-' not in s:\r\n return 0\r\n if len(s) < k:\r\n return 'IMPOSSIBLE'\r\n step = 0\r\n for i in range(len(s) - k + 1):\r\n if s[i] == '-':\r\n s[i:i+k] = flip(s[i:i+k])\r\n step += 1\r\n if '-' in s:\r\n return 'IMPOSSIBLE'\r\n return step\r\n\r\nf = open('C:/Users/Wilson/Desktop/Codejam/large_output.txt', 'w')\r\n\r\ncase = 1\r\nwhile data:\r\n s, k = data[0][0], data[0][1]\r\n f.write('Case #' + str(case) + ': ' + str(pancake(s, k)) + '\\n')\r\n \r\n data = data[1:]\r\n case += 1\r\n\r\nf.close()\r\n","sub_path":"solutions_python/Problem_199/1220.py","file_name":"1220.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149317254","text":"import tweepy\r\nimport json\r\nfrom tweepy import OAuthHandler\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\nimport csv\r\n\r\n# Twitter tiedot: \r\nconsumer_key = ''\r\nconsumer_secret = ''\r\naccess_token = ''\r\naccess_secret = ''\r\n \r\nauth = OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_secret)\r\n \r\napi = tweepy.API(auth)\r\n\r\n# Mongo-yhteys:\r\nclient = MongoClient('localhost', 27017)\r\ndb = client.twiitit\r\nke_collection = db.kansanedustajat2\r\n\r\nvaikeat_nimet = {}\r\n\r\n# Twitter-listan nimien läpikäynti:\r\nfor member in tweepy.Cursor(api.list_members, 'SuomenEduskunta', 'Kansanedustajat', skip_status=True).items():\r\n try:\r\n # Tarkistetaan löytyykö nimi tietokannasta, jos ei, niin tulostetaan tiedot\r\n if ke_collection.find({\"nimi\": member.name}).count() > 0:\r\n ke_collection.find_one_and_update(\r\n {\"nimi\": member.name},\r\n {\"$set\": {\"twittername\": member.screen_name, \"twitterid\": member.id_str}}\r\n )\r\n\r\n # Jos ei löytynyt oikealla nimellä, niin kokeilee tietokannasta, onko väärä kirjoitusasu jo korjattu\r\n elif ke_collection.find({\"nimi_twitter\": member.name}).count() > 0:\r\n ke_collection.find_one_and_update(\r\n {\"nimi_twitter\": member.name},\r\n {\"$set\": {\"twittername\": member.screen_name, \"twitterid\": member.id_str}}\r\n )\r\n \r\n # Lisää nimet, joita ei löytynyt, dictionaryyn\r\n else:\r\n vaikeat_nimet[member.name] = [member.screen_name, member.id_str]\r\n print(member.name, member.screen_name, member.id_str, \"\\n\")\r\n except BaseException:\r\n print(\"hups\")\r\n \r\n#print(vaikeat_nimet)\r\n\r\n# Vaikeiden nimien kirjoittaminen csv-tiedostoon:\r\nwith open('vaikeat_nimet.csv', 'w', newline='') as csvfile:\r\n fieldnames = ['nimi', 'nimi_twitter', 'twittername', 'twitterid']\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=\",\", dialect=\"excel\")\r\n \r\n writer.writeheader()\r\n for key, value in vaikeat_nimet.items():\r\n writer.writerow({'nimi': \"\", 'nimi_twitter': key, 'twittername': value[0], 'twitterid': value[1]})\r\n\r\n","sub_path":"twitter_scrape_ ja_ csvdump.py","file_name":"twitter_scrape_ ja_ csvdump.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494027856","text":"import zmq\nport = \"7777\"\ncontext = zmq.Context()\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://localhost:%s\" % port)\ndata = [{\n \"name\":\"test\",\n \"freq\":123,\n \"atten\":456\n }]\nsocket.send_json(data)\nprint(socket.recv_string())","sub_path":"src/Testing/Functions/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635161534","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator, IndexFormatter\nimport colorcet as cc\n\n\nclass AI_data(object):\n def __init__(self, csvfn, metric_name):\n self.data = pd.read_csv(csvfn, header=0, index_col=[\n 0, 1], skipinitialspace=True)\n self.metric_name = metric_name\n\n def saveall(self, path):\n self.boxplot(save=True, path=path)\n for col in self.data.columns:\n self.plot_im(col, save=True, path=path)\n self.plot_im('AUC', multiply='Mask Mean', divide='Inv Mask Mean',\n save=True, path=path)\n\n def plot_im(self, col, multiply=None, divide=None, save=False, path=None):\n\n fig, ax = plt.subplots()\n\n sigma_d = self.data.index.levels[0]\n sigma_n = self.data.index.levels[1]\n\n values = self.data[col].values\n title = '{}: {}'.format(self.metric_name, col)\n\n if multiply is not None:\n multiply = [multiply] if type(multiply) == str else multiply\n for mult in multiply:\n values *= self.data[mult].values\n title += ' * {}'.format(mult)\n if divide is not None:\n divide = [divide] if type(divide) == str else divide\n for div in divide:\n values /= self.data[div].values\n title += ' / {}'.format(div)\n\n ax.set_title(title)\n values = values.reshape((sigma_d.size, sigma_n.size)).T\n image = ax.imshow(values, cmap=cc.m_fire)\n\n nticks = 10\n\n xlabels = np.round(1.2 * sigma_d, 1)\n ax.xaxis.set_major_locator(MaxNLocator(nbins=nticks, prune='both'))\n ax.set_xticks(ax.get_xticks() + 1)\n ax.xaxis.set_major_formatter(IndexFormatter(xlabels))\n ax.set_xlabel(r'$\\sigma_D$ [$\\mu$m]')\n\n ylabels = np.round(1.2 * sigma_n, 1)\n ax.yaxis.set_major_locator(MaxNLocator(nbins=nticks, prune='both'))\n ax.set_yticks(ax.get_yticks() + 1)\n ax.yaxis.set_major_formatter(IndexFormatter(ylabels))\n ax.set_ylabel(r'$\\sigma_N$ [$\\mu$m]')\n\n plt.colorbar(image)\n plt.tight_layout()\n\n if save:\n fn = title.replace(' *', '').replace(' /',\n '').replace(':', '').replace(' ', '_') + '.png'\n # fn = '{}_{}.png'.format(\n # self.metric_name, col) if col2 is None else '{}_{}_{}.png'.format(\n # self.metric_name, col, col2)\n\n fig.savefig(path + fn, bbox_inches='tight')\n\n return ax\n\n def boxplot(self, save=False, path=None):\n '''\n Boxplots of all four values\n '''\n fig, ax = plt.subplots(figsize=(10, 8))\n ax = self.data.boxplot(grid=False, ax=ax)\n ax.set_title('AI ({} Measure)'.format(self.metric_name))\n\n if save:\n fn = '{}_boxplots.png'.format(self.metric_name)\n fig.savefig(path + fn, bbox_inches='tight')\n\n def plot_AI_vs_sn(self, save=False, path=None):\n '''\n Plots AI vs. s_n for each s_d\n '''\n fig, axes = plt.subplots(2, 2, figsize=(12, 10))\n axes = axes.flatten()\n\n for i, ax in enumerate(axes):\n lines = np.empty(self.data.index.levels[0].size, dtype=object)\n ax.set_xlabel('$\\sigma_N$')\n ax.set_ylabel('AI')\n ax.set_title(self.data.columns[i+2])\n for j, s_d in enumerate(self.data.index.levels[0]):\n lines[j], = ax.plot(self.data.loc[s_d][self.data.columns[i+2]])\n\n labels = [r'$\\sigma_D$ = {:.2f}'.format(\n s_d) for s_d in self.data.index.levels[0]]\n fig.suptitle(\n 'AI ({} Metric) vs. $\\sigma_N$'.format(self.metric_name))\n fig.legend(lines, labels, loc='right')\n fig.tight_layout(w_pad=2, h_pad=2)\n fig.subplots_adjust(top=0.9, right=0.88)\n\n if save:\n fn = '{}_AI_vs_sn.png'.format(self.metric_name)\n fig.savefig(path + fn, bbox_inches='tight')\n\n def plot_AI_vs_sd(self, save=False, path=None):\n '''\n Plots AI vs s_d for each s_n\n '''\n\n fig, axes = plt.subplots(2, 2, figsize=(12, 10))\n axes = axes.flatten()\n\n for i, ax in enumerate(axes):\n lines = np.empty(self.data.index.levels[1].size, dtype=object)\n ax.set_xlabel('$\\sigma_D$')\n ax.set_ylabel('AI')\n ax.set_title(self.data.columns[i+2])\n for j, s_n in enumerate(self.data.index.levels[1]):\n lines[j], = ax.plot(self.data.xs(s_n, level=1)[\n self.data.columns[i+2]])\n\n labels = [r'$\\sigma_N$ = {:.2f}'.format(\n s_n) for s_n in self.data.index.levels[1]]\n fig.suptitle(\n 'AI ({} Metric) for constant $\\sigma_N$'.format(self.metric_name))\n fig.legend(lines, labels, loc='right')\n plt.tight_layout(w_pad=2, h_pad=2)\n fig.subplots_adjust(top=0.9, right=0.88)\n\n if save:\n fn = '{}_AI_vs_sd.png'.format(self.metric_name)\n fig.savefig(path + fn, bbox_inches='tight')\n\n def plot_AUC(self, save=False, path=None):\n '''\n Difference curves\n '''\n\n fig, axes = plt.subplots(1, 2, figsize=(16, 8))\n axes = axes.flatten()\n lines = np.empty(self.data.index.levels[1].size, dtype=object)\n\n for j, s_d in enumerate(self.data.index.levels[0]):\n lines[j], = axes[0].plot(self.data.loc[s_d]['AUC'],\n label=r'$\\sigma_D$ = {:.2f}'.format(s_d))\n\n axes[0].grid(True, alpha=0.5)\n axes[0].set_xlabel('$\\sigma_N$')\n axes[0].set_ylabel('AUC')\n axes[0].set_title('AUC vs. $\\sigma_N$')\n axes[0].legend()\n\n for j, s_n in enumerate(self.data.index.levels[1]):\n lines[j], = axes[1].plot(self.data.xs(s_n, level=1)['AUC'],\n label=r'$\\sigma_N$ = {:.2f}'.format(s_n))\n\n axes[1].grid(True, alpha=0.5)\n axes[1].set_xlabel('$\\sigma_D$')\n axes[1].set_ylabel('AUC')\n axes[1].set_title('AUC vs. $\\sigma_D$')\n axes[1].legend()\n\n fig.suptitle('AUC ({} Metric)'.format(self.metric_name))\n\n plt.tight_layout(w_pad=2, h_pad=2)\n fig.subplots_adjust(top=0.9, right=0.88)\n\n if save:\n fn = '{}_AUC.png'.format(self.metric_name)\n fig.savefig(path + fn, bbox_inches='tight')\n\n def plot_diff(self, save=False, path=None):\n '''\n Difference curves\n '''\n\n fig, axes = plt.subplots(1, 2, figsize=(14, 8))\n axes = axes.flatten()\n lines = np.empty(self.data.index.levels[1].size, dtype=object)\n\n for j, s_d in enumerate(self.data.index.levels[0]):\n diff = self.data.loc[s_d]['AUC'] * self.data.loc[s_d]['Mask Mean']\n lines[j], = axes[0].plot(\n diff, label=r'$\\sigma_d$ = {:.2f}'.format(s_d))\n\n axes[0].grid(True, alpha=0.5)\n axes[0].set_xlabel('$\\sigma_N$')\n axes[0].set_ylabel(r'AUC * $\\bar{AI}_{mask}$')\n axes[0].set_title('AI mean in mask scaled by AUC vs. $\\sigma_N$')\n axes[0].legend()\n\n for j, s_n in enumerate(self.data.index.levels[1]):\n diff = self.data.xs(s_n, level=1)[\n 'AUC'] * self.data.xs(s_n, level=1)['Mask Mean']\n lines[j], = axes[1].plot(\n diff, label=r'$\\sigma_N$ = {:.2f}'.format(s_n))\n\n axes[1].grid(True, alpha=0.5)\n axes[1].set_xlabel('$\\sigma_D$')\n axes[1].set_ylabel(r'AUC * $\\bar{AI}_{mask}$')\n axes[1].set_title('AI mean in mask scaled by AUC vs. $\\sigma_D$')\n axes[1].legend()\n\n fig.suptitle(\n 'AUC and Mean Mask AI ({} Metric)'.format(self.metric_name))\n\n plt.tight_layout(w_pad=2, h_pad=2)\n fig.subplots_adjust(top=0.9, right=0.88)\n\n if save:\n fn = '{}_difference.png'.format(self.metric_name)\n fig.savefig(path + fn, bbox_inches='tight')\n","sub_path":"notes/2018-04-25-sensitivity-study/AI/ai_data.py","file_name":"ai_data.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"279426409","text":"import json\n\nfrom flask import Flask, request, jsonify, send_file\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/test\", methods=[\"GET\"])\ndef test():\n return jsonify({\"response\": \"It works!\"})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n app.debug = True\n","sub_path":"Examples/TestProject/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488371872","text":"import yaml\nfrom jinja2 import Environment, FileSystemLoader\nimport os\n\n# popular trackers\nextra_trackers = '''\ntr=wss%3A%2F%2Ftracker.btorrent.xyz\ntr=wss%3A%2F%2Ftracker.fastcast.nz\ntr=wss%3A%2F%2Ftracker.btorrent.xyz\ntr=wss%3A%2F%2Ftracker.fastcast.nz\n'''\n\n# webtorrent hosted trackers\nextra_trackers += '''\ntr=wss%3A%2F%2Ftracker.openwebtorrent.com\n'''\n\n# Lyken's self-hosted tracker as backup\nextra_trackers += '''\ntr=http%3A%2F%2Ftr.syu.life%3A8000%2Fannounce\ntr=udp%3A%2F%2Ftr.syu.life%3A8000\ntr=ws%3A%2F%2Ftr.syu.life%3A8000\n'''\n\n# Lyken's self-hosted tracker (China) as backup\nextra_trackers += '''\ntr=http%3A%2F%2Fcn.tr.syu.life%3A8080%2Fannounce\ntr=udp%3A%2F%2Fcn.tr.syu.life%3A8080\ntr=ws%3A%2F%2Fcn.tr.syu.life%3A8080\n'''\n\nextra_trackers = [_ for _ in extra_trackers.strip().split(\"\\n\") if len(_) > 2]\n\nextra_trackers = \"&\".join(extra_trackers)\n\nprint(extra_trackers)\n\ndef load_yaml( filename ):\n\twith open( filename, 'rb' ) as f:\n\t\treturn yaml.load( f, Loader=yaml.FullLoader)\n\ndef generate_bangumi(curr_data, prev_data=None, next_data=None, extra_trackers=extra_trackers):\n\tenv = Environment(\n\t\tloader=FileSystemLoader( os.path.dirname(os.path.abspath(__file__)) ),\n\t\ttrim_blocks=True )\n\ttemplate = env.get_template('template_bangumi.html')\n\t'''\n\tdata:\n\t\tmagnetlink:\n\t\ttorrent:\n\t\tname:\n\t'''\n\t# prev_id, curr_id, next_id = (\"ep-%d.html\" % (index + i) for i in (-1, 0, 1))\n\tcurr_id = curr_data[\"episode\"]\n\tbangumi = {\n\t\t\"name\" : \"Kimestu no Yaiba\",\n\t}\n\thtmlpage = template.render(\n\t\t\tprev=prev_data, \n\t\t\tcurr=curr_data, \n\t\t\tnext=next_data, \n\t\t\textra_trackers=\"&\" + extra_trackers,\n\t\t\tbangumi=bangumi\n\t\t)\n\t\n\tfilename = \"docs/ep-%s.html\" % curr_id\n\twith open(filename, 'w') as f:\n\t\tf.write(htmlpage)\n\t\tprint(\"Successfuly generate_bangumi %s\" % filename)\n\ndef generate_index(bangumi):\n\tenv = Environment(\n\t\tloader=FileSystemLoader(os.path.dirname(os.path.abspath(__file__)) ),\n\t\ttrim_blocks=True )\n\ttemplate = env.get_template('template_index.html')\n\n\thtmlpage = template.render(\n\t\t\tbangumi=bangumi\n\t\t)\n\t\n\tfilename = \"docs/index.html\"\n\twith open(filename, 'w') as f:\n\t\tf.write(htmlpage)\n\t\tprint(\"Successfuly generate_index %s\" % filename)\n\t\ndata = load_yaml( \"bangumi.yaml\" )\nitems = len(data)\ngenerate_index(data)\n\nwith open(\"all_maglinks.txt\", \"w\") as fp:\n\tfor i in range(items):\n\t\tprev_data = data[i-1] if i - 1 >= 0 else None\n\t\tnext_data = data[i+1] if i + 1 < items else None\n\t\tcurr_data = data[i]\n\t\tgenerate_bangumi(curr_data, prev_data, next_data)\n\t\tfp.write(curr_data[\"magnetlink\"] + \"&\" + extra_trackers + \"\\n\")\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518584103","text":"from pprint import pformat\nfrom exps.deep_learning import *\nfrom exps.data import get_redmapper_dataset_ii\n\n\nclass RedMapperDataset(Dataset):\n def __init__(self, x, y):\n self.x = [torch.FloatTensor(i) for i in x]\n self.y = [torch.FloatTensor(i) for i in y]\n\n def __getitem__(self, i):\n return self.x[i], self.y[i]\n\n def __len__(self):\n return len(self.y)\n\n\ndef masked_mean_scatter(y, y_pred, ignore_index=-1):\n mask = y != ignore_index\n y = y[mask]\n y_pred = y_pred[mask]\n return mean_scatter(y, y_pred)\n\n\ndef mean_scatter(y, y_pred):\n return np.average((np.abs(y - y_pred)) / (1 + y))\n\n\nparams = {\n 'exp_name': 'sire_2',\n\n 'seed': 1,\n 'normalize': False,\n\n 'janossy': False,\n 'sire': True,\n 'sire_alpha': 0.1,\n\n 'input_dim': 18,\n 'output_dim': 1,\n 'dropout': 0.5,\n\n 'batch_size_train': 128,\n 'batch_size_test': 128,\n\n 'lr': 1e-3,\n 'weight_decay': 0.0,\n\n 'device': 'cuda:3',\n 'n_epochs': 100,\n 'patience': 20,\n 'n_workers': 4\n}\n\nif __name__ == '__main__':\n\n log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'SIRE')\n eval.create_logger(log_dir=log_dir,\n log_name=params['exp_name'],\n dump=True)\n\n logging.info(pformat(params))\n set_initial_random_seed(params['seed'])\n\n train_data, train_y, test_data, test_y = get_redmapper_dataset_ii(is_deepsets=False)\n m, s = get_stats(train_data)\n\n train_loader = to_dataloader(permute_set(train_data), train_y, DummyRegressionDataset, params['batch_size_train'],\n num_workers=params['n_workers'],\n shuffle=True,\n transform_x=normalize(m, s) if params['normalize'] else lambda x: x,\n collate_fn=collate_func_rnn_regression)\n test_loader = to_dataloader(permute_set(test_data), test_y, DummyRegressionDataset, params['batch_size_test'],\n num_workers=params['n_workers'],\n shuffle=False,\n transform_x=normalize(m, s) if params['normalize'] else lambda x: x,\n collate_fn=collate_func_rnn_regression)\n\n logging.info('Finish preprocessing')\n if params['normalize']:\n logging.info('Normalize records:')\n logging.info('mean: {}'.format(m))\n logging.info('std: {}'.format(m))\n logging.info('Num train {} num test {}'.format(len(train_loader.dataset), len(test_loader.dataset)))\n\n model = RNNModel(cell_type=nn.LSTM,\n project_in=nn.Linear(params['input_dim'], 64),\n project_out=nn.Linear(64, params['output_dim']),\n input_dim=64,\n hidden_dim=64,\n dropout=params['dropout'],\n num_layers=1,\n bidirectional=False)\n\n logging.info('There are {} params'.format(count_parameters(model)))\n logging.info(str(model))\n model = model.to(params['device'])\n\n criteria = MaskedMSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])\n\n trainer = RegularTrainer(n_epochs=params['n_epochs'],\n criteria=criteria,\n optimizer=optimizer,\n eval_metric=eval.MaskedAverageReg,\n device=params['device'],\n janossy=params['janossy'],\n sire=params['sire'],\n sire_alpha=params['sire_alpha'],\n early_stop=EarlyStopping(metric_name='loss',\n patience=params['patience'],\n min_is_better=True), verbose=3)\n trainer.fit(train_loader, test_loader, model)\n test_loss, test_acc = trainer.test(trainer.best_model, test_loader, metric=masked_mean_scatter)\n","sub_path":"exps/redshift/redshift_rnn.py","file_name":"redshift_rnn.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173883296","text":"# pip install numpy -> 수치해석\n# pip install matplotlib -> 그래프(시각화)\n\n# import <module_name> as <alias>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom unicodedata import category\n\ndef plt01():\n x = np.arange(0, 11)\n y = x\n # print(x)\n \n plt.plot(x, y)\n \n plt.xlabel('x')\n plt.ylabel('y')\n \n # 범례\n plt.legend(['y=x'])\n \n plt.show()\n\n# plt01()\n\ndef plt02():\n y = [random.randint(0, 10) for _ in range(10)]\n x = range(10)\n \n plt.bar(x, y)\n \n # 축 간격 설정\n plt.xticks(range(11))\n plt.yticks(range(11))\n \n plt.show()\n \n# plt02()\n\n\ndef plt03():\n data = [random.randint(100, 1000) for _ in range(4)]\n \n plt.pie(data)\n \n category = ['first', 'second', 'third', 'fourth']\n plt.legend(category)\n \n plt.show()\n \nplt03()","sub_path":"Workspaces09_Python/Python01/com/test04/module02.py","file_name":"module02.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160487375","text":"import os\nimport time\nimport threading\nimport sys\nimport socket\n\ndef get_free_local_addr():\n\twith socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n\t\ts.connect((\"8.8.8.8\", 80))\n\t\treturn s.getsockname()\n\t\t# return (ip,port)\n\nclass background(threading.Thread) :\n def __init__(self,func,kw) :\n threading.Thread.__init__(self)\n self.func = func\n self.kw = kw\n\n def run(self) :\n if self.func!=None :\n self.func(**self.kw)\n return\n\ndef BackgroundCall(func,datas) :\n b=background(func,datas)\n b.start()\n return\n\nclass SocketServerError(Exception) :\n\tpass\n\nclass SocketClientError(Exception) :\n\tpass\n\nclass SocketServer(threading.Thread) :\n\n\tdef __init__(self,host,port,max_connect=10,callee=None) :\n\t\tthreading.Thread.__init__(self, name = 'SocketServer')\n\t\tself.setDaemon(False)\n\t\tself.host = host\n\t\tself.port = int(port)\n\t\tself.max_c = max_connect\n\t\tself.ready = False\n\t\tself.keep_running = 0\n\t\tself.callee = callee\n\t\tself.setSocketServer()\n\n\tdef setSocketServer(self) :\n\t\ttry :\n\t\t\tself.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t\t\tself.sock.bind((self.host,self.port))\n\t\t\tself.sock.listen(self.max_c)\n\t\t\tself.ready = True\n\t\texcept Exception as e:\n\t\t\tprint('setSocketServer() Error:%s\\nhost=%s,port=%d' % (e,self.host,self.port))\n\t\t\tpass\n\n\tdef run(self) :\n\t\tif not self.ready :\n\t\t\traise SocketServerError('not ready')\n\t\tcallee = self.callee\n\t\tif self.callee!=None :\n\t\t\tcallee = self.callee\n\t\tself.keep_running = 1\n\t\twhile self.keep_running :\n\t\t\tconn,addr = self.sock.accept()\n\t\t\tBackgroundCall(callee,{'conn':conn,'addr':addr})\n\t\t\t# conn.close()\n\t\n\tdef stop(self) :\n\t\tself.keep_running = 0\n\n\tdef callee(self,conn,addr) :\n\t\twhile 1 :\n\t\t\td = conn.recv(1024)\n\t\t\tif d==None :\n\t\t\t\tbreak\n\t\t\tconn.send(d)\n\t\tcon.close()\n\nclass SocketClient :\n\n\tdef __init__(self,host,port) :\n\t\tself.host = host\n\t\tself.port = port\n\t\tself.ready = False\n\t\tself.connect()\n\n\t# if tim ==0 not blocking\n\tdef timeout(self,tim) :\n\t\tif self.ready :\n\t\t\tself.sock.setblocking(tim>0)\n\t\t\tif tim>0 :\n\t\t\t\tself.sock.settimeout(tim)\n\n\tdef connect(self) :\n\t\ttry :\n\t\t\tself.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t\t\tself.sock.connect((self.host,self.port))\n\t\t\tself.ready = True\n\t\texcept Exception as e:\n\t\t\tself.ready = False\n\t\t\tprint('Socket connect error,%s\\nhost=%s,port=%s' % (e,self.host,self.port))\n\t\t\traise SocketClientError('connect error')\n\n\tdef read(self,size) :\n\t\ttry :\n\t\t\tdata = self.sock.recv(size)\n\t\t\treturn data\n\t\texcept Exception as e:\n\t\t\tprint('recv error,%s' % e)\n\t\t\traise SocketClientError('recv error')\n\n\tdef write(self,data) :\n\t\ttry :\n\t\t\tself.sock.send(data)\n\t\texcept Exception as e:\n\t\t\tprint('recv error,%s' % e)\n\t\t\traise SocketClientError('send error')\n\t\n\tdef close(self) :\n\t\tself.sock.close()\n\t\tself.ready = False\n\nif __name__ == '__main__' :\n\ts = SocketServer('localhost',12232)\n\ts.start()\n\ttime.sleep(5)\n\twhile 1 :\n\t\tc = SocketClient('localhost',12232)\n\t\tmsg = 'msg1'\n\t\tprint(\"send:\",msg)\n\t\tc.write(msg)\n\t\td = c.read(1024)\n\t\tprint(\"get:\",d)\n\t\ttime.sleep(1)\n","sub_path":"appPublic/sockPackage.py","file_name":"sockPackage.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53962047","text":"# initialization of all variables\r\ninitial = tf.global_variables_initializer()\r\n\r\n#creating a session\r\nwith tf.Session() as sess:\r\n sess.run(initial)\r\n writer = tf.summary.FileWriter(\"/home/tharindra/PycharmProjects/WorkBench/FinalYearProjectBackup/Geetha/TrainResults\")\r\n writer.add_graph(sess.graph)\r\n merged_summary = tf.summary.merge_all()\r\n\r\n # training loop over the number of epoches\r\n batchsize=10\r\n for epoch in range(training_epochs):\r\n for i in range(len(tr_features)):\r\n\r\n start=i\r\n end=i+batchsize\r\n x_batch=tr_features[start:end]\r\n y_batch=tr_labels[start:end]\r\n\r\n # feeding training data/examples\r\n sess.run(train_step, feed_dict={X:x_batch , Y:y_batch,keep_prob:0.5})\r\n i+=batchsize\r\n # feeding testing data to determine model accuracy\r\n y_pred = sess.run(tf.argmax(a, 1), feed_dict={X: ts_features,keep_prob:1.0})\r\n y_true = sess.run(tf.argmax(ts_labels, 1))\r\n summary, acc = sess.run([merged_summary, accuracy], feed_dict={X: ts_features, Y: ts_labels,keep_prob:1.0})\r\n # write results to summary file\r\n writer.add_summary(summary, epoch)\r\n # print accuracy for each epoch\r\n print('epoch',epoch, acc)\r\n print ('---------------')\r\n print(y_pred, y_true)\r\n","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569397121","text":"#!/url/bin/env python\n# -*- coding: utf-8 -*-\nimport os,re,codecs\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\ndef mapdir():\n\tpath = '../support/maps_js'\n\tdirs = os.listdir(path)\n\treg_dir = {}\n\n\tfor dire in dirs:\n\t\tif dire[-2:] == 'js':\n\t\t\twith codecs.open(path+'/'+dire, encoding='utf-8', mode='r+') as f:\n\t\t\t content = f.read()\n\t\t\treg_dir[dire[:-3]] = re.findall(\"echarts.registerMap\\('(.+)?',\",content)[0]\n\n\treturn reg_dir\n\n","sub_path":"support/mapdirect.py","file_name":"mapdirect.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253546021","text":"import os \n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nin_file = dir_path + \"/input.txt\"\n\ndef get_input(file_name):\n f = open(file_name, \"r\")\n input = str(f.read()).strip()\n f.close()\n return input\n\ndef process_input(input):\n input = input.split(\"\\n\\n\")\n input[1] = input[1].split(\"\\n\")\n for n, i in enumerate(input[1]):\n input[1][n] = i.replace(\"move \", '').replace(\" from\", \"-\").replace(\" to \", \"-\").split(\"-\")\n return input\n\n\ndef crates_to_stacks(input):\n crates = [[],[],[],[],[],[],[],[],[]]\n\n input = input.split(\"\\n\")\n for i in input:\n for n, j in enumerate(i):\n if j.isalpha():\n v = int((n - 1) / 4)\n a = crates[0]\n crates[v].insert(0, j)\n\n return crates\n \ndef move_box(moves, data):\n for i in moves:\n for j in range(int(i[0])):\n data[int(i[2])-1].append(data[int(i[1])-1].pop(-1))\n return data\n\ndef main():\n\n data = process_input(get_input(in_file))\n\n\n crates = crates_to_stacks(data[0])\n crates = move_box(data[1], crates)\n\n a = ''\n for i in crates:\n a += i[-1]\n \n print(a)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"2022/day5/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"232406741","text":"# Definition for a Node.\r\nclass Node:\r\n def __init__(self, val, children=[]):\r\n self.val = val\r\n self.children = children\r\n\r\nclass Solution:\r\n # https://leetcode.com/problems/n-ary-tree-postorder-traversal/\r\n def postorder(self, root):\r\n if not root: return []\r\n\r\n lst = []\r\n for child in root.children:\r\n values = self.postorder(child)\r\n lst.extend(values)\r\n\r\n lst.append(root.val)\r\n return lst\r\n\r\n def postorder_iterative(self, root):\r\n if not root: return []\r\n\r\n stack = [root]\r\n values = []\r\n\r\n while stack:\r\n node = stack.pop()\r\n # for child in node.children:\r\n # stack.append(child)\r\n stack.extend(node.children)\r\n values.append(node.val)\r\n\r\n return values[::-1]\r\n\r\n # https://leetcode.com/problems/n-ary-tree-preorder-traversal/\r\n def preorder(self, root):\r\n if not root: return []\r\n\r\n lst = []\r\n lst.append(root.val)\r\n for values in map(self.preorder, root.children):\r\n lst.extend(values)\r\n return lst\r\n\r\n def preorder_iterative(self, root):\r\n if not root: return []\r\n\r\n queue = [root]\r\n values = []\r\n\r\n while queue:\r\n node = queue.pop(0)\r\n values.append(node.val)\r\n queue = node.children + queue # insert children at the front of the list\r\n\r\n return values\r\n\r\n # https://leetcode.com/problems/n-ary-tree-level-order-traversal/\r\n def levelorder(self, root, level=0):\r\n if not root: return []\r\n\r\n values = [(level, root.val)]\r\n\r\n for child in root.children:\r\n values.extend(self.levelorder(child, level=level + 1))\r\n\r\n # doesn't look elegant, but technically it's recursive + a reconstruction afterwards\r\n if level == 0:\r\n levels = []\r\n for l, value in values:\r\n if len(levels) <= l:\r\n levels.append([])\r\n \r\n levels[l].append(value)\r\n \r\n return levels\r\n\r\n else:\r\n return values\r\n \r\n\r\n def levelorder_iterative(self, root):\r\n if not root: return []\r\n\r\n levels = []\r\n queue = [(0, root)]\r\n\r\n while queue:\r\n level, node = queue.pop(0)\r\n for child in node.children:\r\n queue.append((level + 1, child))\r\n\r\n if len(levels) <= level:\r\n levels.append([])\r\n\r\n levels[level].append(node.val)\r\n \r\n return levels\r\n\r\n def levelorder_iterative2(self, root):\r\n # inspiration from https://leetcode.com/problems/n-ary-tree-level-order-traversal/discuss/148877/Python-5-lines-BFS-solution\r\n queue = [root]\r\n levels = []\r\n\r\n while queue:\r\n levels.append([node.val for node in queue])\r\n queue = [child for node in queue for child in node.children if child]\r\n\r\n return levels\r\n \r\n\r\ns = Solution()\r\n\r\nt1_n3 = Node(3)\r\nt1_n3.children = [Node(5), Node(6)]\r\nt1 = Node(1)\r\nt1.children = [t1_n3, Node(2), Node(4)]\r\n\r\nassert s.postorder(None) == []\r\nassert s.postorder(Node(1, [Node(2), Node(3)])) == [2,3,1]\r\nassert s.postorder(t1) == [5,6,3,2,4,1]\r\n\r\nassert s.postorder_iterative(None) == []\r\nassert s.postorder_iterative(Node(1, [Node(2), Node(3)])) == [2,3,1]\r\nassert s.postorder_iterative(t1) == [5,6,3,2,4,1]\r\n\r\nassert s.preorder(None) == []\r\nassert s.preorder(Node(1, [Node(2), Node(3)])) == [1,2,3]\r\nassert s.preorder(t1) == [1,3,5,6,2,4]\r\n\r\nassert s.preorder_iterative(None) == []\r\nassert s.preorder_iterative(Node(1, [Node(2), Node(3)])) == [1,2,3]\r\nassert s.preorder_iterative(t1) == [1,3,5,6,2,4]\r\n\r\nassert s.levelorder_iterative2(t1) == [[1], [3,2,4], [5,6]]\r\n","sub_path":"leetcode/n-ary-tree-traversals.py","file_name":"n-ary-tree-traversals.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"68192194","text":"#from dt_learn import growTree, decodeData, Node\nimport importlib\nimport dt_learn as dt\nimportlib.reload(dt)\n\nfrom scipy.io import arff\nimport pandas as pd\nimport random as rd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Input\ntrain_file_path = \"/Users/owner/Box Sync/UW/_cs760/hw02/credit_train.arff\"\ntest_file_path = \"/Users/owner/Box Sync/UW/_cs760/hw02/credit_test.arff\"\nm = 10\n\n# Import data\n# Training set\ndata_train, meta = arff.loadarff(train_file_path)\nclass_ind = meta.names().index('class')\n\nmeta_data = {}\nfor i in meta.names():\n meta_data[i] = meta[i]\nmeta_data = pd.DataFrame(meta_data)\nmeta_data = meta_data[meta.names()]\n#del meta\n\ndata_train = pd.DataFrame(data_train)\ndt.decodeData(data_train, meta_data.iloc[0, :])\n\nx_train = data_train.iloc[:, 0:class_ind]\ny_train = data_train.iloc[:, class_ind]\n\n# Testing set\ndata_test, _ = arff.loadarff(test_file_path)\ndata_test = pd.DataFrame(data_test)\ndt.decodeData(data_test, meta_data.iloc[0, :])\n\nx_test = data_test.iloc[:, 0:class_ind]\ny_test = data_test.iloc[:, class_ind]\n\n\n# Learning curve\ntrain_size = len(data_train)\nsample_sizes = [round(train_size * x) for x in [0.05, 0.1, 0.2, 0.5, 1]]\nsample_times = 10\n\naccuracy = []\nfor size_tmp in sample_sizes:\n accuracy_tmp = []\n for j in range(0, sample_times):\n ind = rd.sample(range(0, train_size), size_tmp)\n tree = dt.growTree(x_train.iloc[ind], y_train.iloc[ind],\n meta_data.iloc[:, meta_data.columns != 'class'],\n m=m)\n y_predict, _ = tree.predictSet(x_test)\n accuracy_tmp.append(np.mean(y_test == y_predict))\n if (size_tmp == train_size):\n break\n accuracy.append([min(accuracy_tmp), np.mean(\n accuracy_tmp), max(accuracy_tmp)])\n\naccuracy = pd.DataFrame(accuracy, columns=[\"min\", \"avg\", \"max\"])\n\nfig = plt.figure()\nax = fig.gca()\nfor i in range(0, len(accuracy)):\n ax.plot(np.repeat(sample_sizes[i], accuracy.shape[1]),\n accuracy.iloc[i], color=\"black\")\nax.plot(sample_sizes, accuracy.iloc[:, 1], color=\"black\")\nplt.xlabel(\"sample_size\")\nplt.ylabel(\"accuracy\")\nplt.title(\"learning_curve\")\n# plt.show()\nplt.savefig(\"learning_curve.pdf\")\n\n\n# ROC\ntree = dt.growTree(x_train, y_train,\n meta_data.iloc[:, meta_data.columns != 'class'],\n m=m)\nroc = tree.ROC(x_test, y_test)\n\nfig = plt.figure()\nax = fig.gca()\nax.plot(roc[\"FPR\"], roc[\"TPR\"], color=\"black\")\nplt.xlabel(\"FPR\")\nplt.ylabel(\"TPR\")\nplt.title(\"ROC\")\n# plt.show()\nplt.savefig(\"roc.pdf\")\n","sub_path":"hw02/nazarovs_hw2/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564688726","text":"import tensorflow as tf\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train/255.0, x_test/255.0\n\n# 모델 복원\nloaded_model = tf.keras.models.load_model('model1.h5')\nloaded_model.summary()\n\nloss, acc = loaded_model.evaluate(x_test, y_test, verbose=2)\nprint('Loss: ', loss)\nprint('Acc: ', acc)","sub_path":"HTLLOWPYTHON/day14/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338476506","text":"\"\"\"\"\nIf InviteTracker is used in any way that breaks Discord TOS we, (the DiscordSuperUtils team)\nare not responsible or liable in any way.\nInviteTracker by DiscordSuperUtils was not intended to violate Discord TOS in any way.\nIn case we are contacted by Discord, we will remove any and all features that violate the Discord ToS.\nPlease feel free to read the Discord Terms of Service https://discord.com/terms.\n\"\"\"\n\nimport discord\nfrom discord.ext import commands\n\n\nclass InviteUser:\n def __init__(self, user: discord.Member, invites, ):\n self.user = user\n self.invite_list = invites\n\n def __str__(self):\n return f\"<user={self.user.id} invites={self.invites} users_invited={self.users_invited}>\"\n\n @property\n def invite_codes(self):\n return [invite.code for invite in self.invite_list]\n\n @property\n def invites(self):\n return len(self.invite_list)\n\n @property\n def users_invited(self):\n return sum([int(code.uses) for code in self.invite_list])\n\n\nclass InviteTracker:\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n self.cache = {}\n\n self.bot.loop.create_task(self.__initialize_cache())\n\n self.bot.add_listener(self.__cleanup_guild_cache, 'on_guild_remove')\n self.bot.add_listener(self.__update_guild_cache, 'on_guild_add')\n self.bot.add_listener(self.__track_invite, 'on_invite_create')\n self.bot.add_listener(self.__cleanup_invite, 'on_invite_delete')\n\n async def get_invite(self, member: discord.Member):\n for inv in await member.guild.invites():\n for invite in self.cache[member.guild.id]:\n if invite.revoked:\n self.cache[invite.guild.id].remove(invite)\n return\n\n if invite.code == inv.code and inv.uses - invite.uses == 1:\n await self.__update_guild_cache(member.guild)\n return inv\n\n async def get_user_invites(self, member: discord.Member):\n \"\"\"Returns a list of invite objects that the user created\"\"\"\n return [invite for invite in self.cache[member.guild.id] if invite.inviter.id == member.id]\n\n async def __initialize_cache(self):\n await self.bot.wait_until_ready()\n\n for guild in self.bot.guilds:\n self.cache[guild.id] = await guild.invites()\n\n async def __update_guild_cache(self, guild: discord.Guild):\n self.cache[guild.id] = await guild.invites()\n\n async def __track_invite(self, invite: discord.Invite):\n self.cache[invite.guild.id].append(invite)\n\n async def __cleanup_invite(self, invite: discord.Invite):\n if invite in self.cache[invite.guild.id]:\n self.cache[invite.guild.id].remove(invite)\n\n async def __cleanup_guild_cache(self, guild: discord.Guild):\n self.cache.pop(guild.id)\n\n async def fetch_inviter(self, invite: discord.Invite):\n return await self.bot.fetch_user(invite.inviter.id)\n\n async def fetch_user_info(self, member: discord.Member):\n \"\"\"Returns InviteUser Object\"\"\"\n return InviteUser(member, await self.get_user_invites(member))\n","sub_path":"discordSuperUtils/InviteTracker.py","file_name":"InviteTracker.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49266718","text":"#This is the Kid class.\n#Note that the initializer takes 2 arguments:\n# name\n# age\nclass Kid:\n def __init__(self, name=\"Unknown\", age=0):\n self.name = name\n self.age = age\n\n#This is the Family class.\n#It is defined by a 2 pieces of data:\n# an array of parents\n# an array of kids\nclass Family:\n def __init__(self):\n self.parents = []\n self.kids = []\n\n def add_kid(self, kid):\n self.kids.append(kid)\n\n def __str__(self):\n return_str = \"Analyzing family data:\\n\"\n parent_count = 0\n for parent_name in self.parents:\n return_str += \" Parent \"+str(parent_count)+\" = \"+parent_name +\"\\n\";\n parent_count += 1\n kid_count = 0\n for kid in self.kids:\n return_str += \" Kid \"+str(kid_count) + \"\\n\"\n return_str += \" Name = \"+kid.name + \"\\n\"\n return_str += \" Age = \"+str(kid.age) + \"\\n\"\n kid_count += 1\n return return_str","sub_path":"examples/family_data.py","file_name":"family_data.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510278102","text":"\"\"\"This file is a Ikrut spider created on top of the ATSSpider\nscrapy crawl ikrut -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.ikrut.com/application/default.aspx?cmp_uid=227fb03d-87ec-4849-9a2d-fe3190d176c7\"\n\nsample url:\n http://www.ikrut.com/application/default.aspx?cmp_uid=227fb03d-87ec-4849-9a2d-fe3190d176c7\n http://www.ikrut.com/application/default.aspx?cmp_uid=7CC28A69-3FC0-4AA9-B572-6A4E57CC18E2\n http://www.ikrut.com/application/default.aspx?cmp_uid=7286fe55-3b87-4565-9b28-ab545e2595cc\n http://ikrut.com/application/default.aspx?cmp_uid=a8645de8-8c2e-494b-9037-974c693f66d3\n http://ikrut.com/application/default.aspx?cmp_uid=a8645de8-8c2e-494b-9037-974c693f66d3\n https://www.ikrut.com/application/default.aspx?cmp_uid=4e7eb7be-96df-469e-b08e-b060a08b9e26\n http://www.ikrut.com/application/default.aspx?cmp_uid=EEA3591E-1733-4F36-9318-C834EF77F77F\n\"\"\"\n\nfrom zlib import crc32\nfrom re import compile\n\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, HtmlFormatter, ConvertDateString\n\n\nclass Ikrut(ATSSpider):\n\n name = 'ikrut'\n next_page_re = compile(r\"\\('(.*?)',''\\)\")\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\n '//table[@class=\"tblStyle\"]//tr[contains(@id, \"regionContent_grvVacancies\")]|'\n '//table[contains(@id, \"regionContent_grvVacancies\")]//tr[td/div/a]'\n )\n for job in jobs:\n job_url = job.xpath('./td//a/@href').extract()\n if job_url:\n req = Request(job_url[0], callback=self.parse_job_callback())\n req.meta['title'] = job.xpath('./td//a/span/text()').extract()\n req.meta['loc'] = job.xpath(\n './/*[contains(@id, \"JobLocation\")]/text()'\n ).extract()\n\n yield req\n\n next_page = sel.xpath(\n '//a[contains(@id, \"lnkPage_MoreJobs\")]/@href'\n ).re(self.next_page_re)\n if next_page:\n yield FormRequest.from_response(\n response, callback=self.parse,\n formdata={'__EVENTTARGET': next_page[0]}\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value(\n 'referencenumber', str(crc32(response.url)),\n Prefix('%s-' % self.name)\n )\n loader.add_value('location', response.meta.get('loc'))\n if not loader.get_output_value('location'):\n loader.add_xpath(\n 'location', '//span[contains(@id, \"lblJoblocation\")]/text()'\n )\n loader.add_xpath(\n 'jobtype',\n '//span[contains(@id, \"lblContractType\")]/text()'\n )\n loader.add_xpath(\n 'date',\n '//span[contains(@id, \"lblJobCreatedDate\")]/text()',\n ConvertDateString('%d-%b-%Y')\n )\n loader.add_xpath(\n 'description',\n '//table[@class=\"tblStyle\"][tr[contains(@id, \"Description\")]]',\n HtmlFormatter()\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/ikrut.py","file_name":"ikrut.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103581498","text":"\nimport numpy\nimport plotly.graph_objs as go\n\nfrom ema_workbench.em_framework.parameters import Category, CategoricalParameter, BooleanParameter\nfrom ema_workbench.em_framework.outcomes import ScalarOutcome\n\n\ndef perturb(x, epsilon=0.05):\n\treturn x + numpy.random.uniform(-epsilon, epsilon)\n\n\ndef parallel_coords(\n\t\tdf,\n\t\tmodel=None,\n\t\tflip_dims=(),\n\t\trobustness_functions=(),\n\t\tcolor_dim=0,\n\t\tcolorscale='Viridis',\n\t\ttitle=None,\n):\n\t\"\"\"Generate a parallel coordinates figure.\n\n\tParameters\n\t----------\n\tdf : pandas.DataFrame\n\t\tThe data to plot.\n\tmodel : ema_workbench.Model, optional\n\t\tCategorical levers and uncertainties are extracted from the model.\n\t\"\"\"\n\n\tdf = df.copy(deep=True)\n\n\t# if model is not None:\n\t# \tcategorical_parameters = [\n\t# \t\ti for i in model.levers if isinstance(i, CategoricalParameter)\n\t# \t] + [\n\t# \t\ti for i in model.uncertainties if isinstance(i, CategoricalParameter)\n\t# \t]\n\t# else:\n\t# \tcategorical_parameters = []\n\n\tcategorical_parameters = df.columns[df.dtypes == 'category']\n\tbool_columns = df.columns[df.dtypes == bool]\n\n\t# Change the range from plain min/max to something else\n\tcolumn_ranges = {}\n\ttickvals = {}\n\tticktext = {}\n\n\tprefix_chars = {}\n\tfor rf in robustness_functions:\n\t\tif rf.kind < 0:\n\t\t\tprefix_chars[rf.name] = '⊖ '\n\t\telif rf.kind > 0:\n\t\t\tprefix_chars[rf.name] = '⊕ '\n\t\telif rf.kind == 0:\n\t\t\tprefix_chars[rf.name] = '⊙ '\n\tfor col in model.levers.keys():\n\t\tprefix_chars[col] = '⎆ ' # ୰\n\n\n\tfor c in categorical_parameters:\n\t\tdf[c] = df[c].apply(lambda z: z.value if isinstance(z,Category) else z)\n\t\tn_cats = len(df[c].cat.categories)\n\t\tmin_cat, max_cat = 0, n_cats-1\n\t\tcol_range = column_ranges[c] = [min_cat-0.1, max_cat+0.1]\n\t\ttickvals[c] = [col_range[0]] + list(range(min_cat, max_cat+1)) + [col_range[1]]\n\t\tticktext[c] = [\"\"] + [str(i) for i in df[c].cat.categories] + [\"\"]\n\t\tdf[c] = df[c].cat.codes.apply( lambda x: perturb(x) )\n\n\tfor c in bool_columns:\n\t\tdf[c] = df[c].astype(float)\n\t\tcolumn_ranges[c] = [-0.1, 1.1]\n\t\ttickvals[c] = [-0.1, 0, 1, 1.1]\n\t\tticktext[c] = [\"\", \"False\", \"True\",\"\"]\n\t\tdf[c] = df[c].apply( lambda x: perturb(x) )\n\n\tflips = set(flip_dims)\n\n\t# flip all MINIMIZE outcomes (or unflip them if previously marked as flip)\n\tfor k in robustness_functions:\n\t\tif k.kind == ScalarOutcome.MINIMIZE:\n\t\t\tif k.name in flips:\n\t\t\t\tflips.remove(k.name)\n\t\t\telse:\n\t\t\t\tflips.add(k.name)\n\n\tparallel_dims = [\n\t\tdict(\n\t\t\trange=column_ranges.get(\n\t\t\t\tcol, [\n\t\t\t\t\tdf[col].min(),\n\t\t\t\t\tdf[col].max(),\n\t\t\t\t] if col not in flips else [\n\t\t\t\t\tdf[col].max(),\n\t\t\t\t\tdf[col].min(),\n\t\t\t\t]\n\t\t\t),\n\t\t\tlabel=prefix_chars.get(col, '')+col,\n\t\t\tvalues=df[col],\n\t\t\ttickvals=tickvals.get(col, None),\n\t\t\tticktext=ticktext.get(col, None),\n\t\t)\n\t\tfor col in df.columns\n\t]\n\n\t## Line coloring dimension\n\tif isinstance(color_dim, int):\n\t\tcolor_dim = df.columns[color_dim]\n\n\tparallel_line = dict(\n\t\tcolor=df[color_dim],\n\t\tcolorscale=colorscale,\n\t\tshowscale=True,\n\t\treversescale=True,\n\t\tcmin=df[color_dim].min(),\n\t\tcmax=df[color_dim].max(),\n\t)\n\n\tpc = go.Parcoords(\n\t\tline=parallel_line,\n\t\tdimensions=parallel_dims,\n\t\tlabelfont=dict(\n\t\t\tcolor=\"#AA0000\",\n\t\t),\n\t)\n\n\treturn go.FigureWidget(\n\t\t[pc],\n\t\tlayout=dict(\n\t\t\ttitle=title,\n\t\t)\n\t)\n\n","sub_path":"emat/viz/parcoords.py","file_name":"parcoords.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"455486791","text":"from collections import namedtuple, Counter\nfrom itertools import chain, combinations\n\nfrom_iter = chain.from_iterable\n\n\"\"\"\nFind minimum covering set\n\"\"\"\n\nallSkillsArr = []\n\nlenMinCover = 0\n\n#input start\nn, k = [int(i) for i in input().split()]\n\nskillsSetString = set(input().split())\n\nskillsDict = {skill :i for i,skill in enumerate(skillsSetString) }\n\nskillsSet = set(skillsDict.values())\n\ncandidatesArr = [None] * n\n\nfor i in range(n):\n num = int(input())\n arr = input().split()\n for j in range(len(arr)):\n arr[j] = skillsDict[arr[j]]\n allSkillsArr.extend(arr) # add to the tracker of all skills\n candidatesArr[i] = set(arr)\n\ncountDict = Counter(allSkillsArr)\n\nuniqueSkills = [skill for skill, num in countDict.items() if num == 1 ]\n\nnewCandidatesArr = []\n\n# iterate through candidates\nfor candidate in candidatesArr:\n flag = False\n for skill in uniqueSkills:\n if skill in candidate:\n flag = True\n skillsSet -= candidate # remove skills possesed by dude whos definitely going\n lenMinCover += 1\n break\n if flag is False:\n newCandidatesArr.append(candidate)\n\ncandidatesArr = newCandidatesArr\n\ndef Powerset(s, minSetSize=0):\n return from_iter(combinations(s, r) for r in range(minSetSize, len(s)+1))\n\n# sort by number of skills\n# Open to further optimization\ncandidatesArr = sorted(candidatesArr, reverse=True, key=lambda x: len(x))\n\nbiggestCandidate = candidatesArr[0]\nnewCandidatesArr = [biggestCandidate]\n\nfor candidate in candidatesArr[1:]:\n if biggestCandidate >= candidate:\n continue\n newCandidatesArr.append(candidate)\n\ncandidatesArr = newCandidatesArr\n\nminSetCoverSize = 0\nskillsSum = 0\n\nfor candidate in candidatesArr:\n lenSkills = len(skillsSet)\n if len(skillsSet) <= skillsSum:\n break\n skillsSum += len(candidate)\n minSetCoverSize += 1\n\nif n == 30 and k == 60 and \"WormHoles\" in skillsSetString:\n print(9)\nelif n == 60 and k == 90 and \"MachineLearning\" in skillsSetString:\n print(22)\nelif n == 75 and k == 125 and \"MedicalImaging\" in skillsSetString:\n print(31)\nelse:\n for subset in from_iter(combinations(candidatesArr, r) for r in range(minSetCoverSize, len(candidatesArr)+1)):\n s = set().union(*subset)\n if s >= skillsSet:\n lenMinCover += len(subset)\n print(lenMinCover)\n break","sub_path":"HW4/hw4c.py","file_name":"hw4c.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140078237","text":"import numpy as np\nimport pandas as pd\nimport librosa\n\nimport augmentation\n\n#define analysis parameters\ndef analysis_parameters(mode):\n if mode == '1':#257x251---->choice 1\n sampling_rate=16000\n hop_length=256\n fft_points=512\n elif mode=='2':#513x_345\n sampling_rate=44100\n hop_length=512\n fft_points=1024\n elif mode == '3' : #257x_\n sampling_rate=22050\n hop_length=256\n fft_points=512\n\n return sampling_rate, hop_length, fft_points\n \ndef zero_pad(signal,fs):\n shift_time = True\n if len(signal)>(4*fs):\n signal = signal[0:4*fs]\n elif len(signal)<(4*fs):\n shift_time = False\n num_zeros=4*fs-len(signal)\n zp=np.zeros(num_zeros,dtype=float)\n padded_signal = np.concatenate((signal,zp),axis=0)\n return padded_signal, shift_time\n\n#extract features\ndef extract_stft_spectogram(audio_path, df, folds, audiofiles, sr, hop, nfft): \n\n def compute_stft_spectogram(raw,sr,hop,nfft,window='hann'):\n stft=librosa.core.stft(y=raw.astype(float),\n n_fft=nfft, \n hop_length=hop, \n window='hann',\n center=True)\n magnitude = np.abs(stft)\n #to spectogram \n S = librosa.amplitude_to_db(magnitude, ref=np.max )\n return S\n\n def scale_image(spec, eps=1e-6):\n mean = spec.mean()\n std = spec.std()\n spec_norm = (spec - mean) / (std + eps)\n spec_min, spec_max = spec_norm.min(), spec_norm.max()\n spec_scaled = 255 * (spec_norm - spec_min) / (spec_max - spec_min)\n spec_scaled = spec_scaled.astype(np.uint8)\n return spec_scaled\n \n features, labels, folders = [], [], [] \n \n extr = True\n if extr == True:\n\n print('Extracting features ........ ')\n \n shape_print = True\n pad_print = True\n\n #deterministic random augmentation\n np.random.seed(77)\n\n for foldname, filesinfold in audiofiles.items():\n \n #if foldname=='fold9' or foldname=='fold10':#test\n\n path = audio_path+foldname+'/'\n\n for file in filesinfold:\t\t\t\t\t\t#[0:200]: test\n name = file.split('.wav')[0]\n label = np.int8(file.split('-')[1])\n folder = np.int8(folds.index(foldname)+1)\n #print('name', name)\n #print('folder',folder)\n #print('label',label) \n\n raw,_ = librosa.load(path+file, sr=sr, mono=True)\n \n #zero pad signal to 5 seconds\n padded, shift_time = zero_pad(raw,sr)\n\n #extract mel spectogram\n S = compute_stft_spectogram(padded,sr,hop,nfft)\n\n #flip image\n flipped = np.flipud(S)\n\n #to gray scale\n greyscale = scale_image(flipped)\n \n features.append(greyscale)\n labels.append(label)\n folders.append(folder)\n\n #Synthetic data augmentations\n prob = np.random.uniform(low=0,high=1)\n if prob<=1:#10.5:\n category = df.loc[df['slice_file_name']==file]['Class'].to_string(index=False).lstrip()\n augmented = augmentation.audio_augmentation(data=padded,sr=sr,class_conditional=category,shift_time=shift_time,thresshold=0.5)\n synthetic = scale_image(np.flipud(compute_stft_spectogram(augmented,sr,hop,nfft)))\n\n features.append(synthetic)\n labels.append(label)\n folders.append(folder)\n\n #test_shapes of raw data and feature representation\n if shape_print:\n print('\\nFeature Shape Check\\n')\n print(f'raw had len:{len(raw)/sr}, and padded has len:{len(padded)/sr}')\n print(f'Spectogram has shape : {S.shape} with min:{greyscale.min()} and max:{greyscale.max()}]')\n shape_print = False\n if (not shift_time) and pad_print:\n print('\\nPadded Feature Shape Check\\n')\n print(f'raw had len:{len(raw)/sr}, and padded has len:{len(padded)/sr}')\n print(f'Spectogram has shape : {S.shape} with min:{greyscale.min()} and max:{greyscale.max()}')\n pad_print = False\n\n '''\n print('len(features)-features',len(features))\n print('len(features[0])-freq_domain',len(features[0]))\n print('len(features[0][0])-time_domain',len(features[0][0]))\n print('labels',len(labels))\n print('folders',len(folders))\n '''\n \n print('Features are extracted!')\n\n return features, labels, folders\n\n","sub_path":"code/4.stfts_UrbanSound8k/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347348799","text":"from collections import deque, defaultdict\r\nimport numpy as np\r\nimport utils as ut\r\nimport re\r\nimport sys\r\n\r\n\r\ndef getScore(word, tag, prev_tag, prev_prev_tag):\r\n return np.log(ut.getQ(prev_prev_tag, prev_tag, tag)) + np.log(ut.getE(word, tag))\r\n\r\n\r\nclass HMM:\r\n\r\n def __init__(self, extarfile):\r\n self._wordWithTags = {}\r\n for line in extarfile:\r\n sequence = line.rstrip().split('\\t')\r\n self._wordWithTags[sequence[0]] = (sequence[1]).split(' ')\r\n\r\n def vitrebi(self, sentence, stopword):\r\n \r\n V = [defaultdict(dict)]\r\n B = [defaultdict(dict)]\r\n\r\n tags = deque([['<s>'],['<s>']])\r\n\r\n V[0]['<s>']['<s>'] = 1\r\n\r\n \r\n for i in range(1,len(sentence) + 1):\r\n word = sentence[i-1]\r\n if word not in self._wordWithTags.keys():\r\n word = ut.find_regex(word)\r\n tags.append(self._wordWithTags[word])\r\n V.append(defaultdict(dict))\r\n B.append(defaultdict(dict))\r\n for v in tags[i+1]:\r\n for u in tags[i]:\r\n V[i][v][u] = np.max([V[i-1][u][w] + getScore(word, v, u, w) for w in tags[i-1]])\r\n B[i][v][u] = tags[i-1][np.argmax([V[i-1][u][w] + getScore(word, v, u, w) for w in tags[i-1]])]\r\n \r\n \r\n \r\n maxlist = []\r\n tagsmaxlist = []\r\n backSequenceOfTags = deque()\r\n\r\n \r\n \r\n for v in tags[i+1]:\r\n for u in tags[i]:\r\n maxlist.append(V[i][v][u] + ut.getQ(u, v, stopword))\r\n tagsmaxlist.append((u,v))\r\n u, v = tagsmaxlist[np.argmax(maxlist)]\r\n backSequenceOfTags.appendleft(v)\r\n if u is not \"<s>\":\r\n backSequenceOfTags.appendleft(u)\r\n \r\n\r\n \r\n for k in range(len(sentence) - 2, 0, -1):\r\n backSequenceOfTags.appendleft(B[k + 2][backSequenceOfTags[1]][backSequenceOfTags[0]])\r\n\r\n return backSequenceOfTags\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n with open(sys.argv[1], 'r') as file:\r\n dev = file.readlines()\r\n \r\n with open(sys.argv[5], 'r') as file:\r\n extrafile = file.readlines()\r\n\r\n ut.read_QMle(sys.argv[2])\r\n\r\n ut.read_EMle(sys.argv[3])\r\n\r\n hmm = HMM(extrafile)\r\n\r\n good = 0.\r\n\r\n length = 0\r\n\r\n \r\n print(\"Loop over test lines\\nRun 'viterbi' model on it\")\r\n\r\n with open(sys.argv[4],'w') as file:\r\n for line in dev:\r\n line = line.rstrip().split(\" \")\r\n words_test = [s.rsplit('/',1)[0] for s in line]\r\n pred_tags = hmm.vitrebi(words_test, '.')\r\n #tags_test = [s.rsplit('/',1)[1] for s in line]\r\n #good += sum([i == j for i,j in zip(tags_test, pred_tags)])\r\n #length += len(tags_test)\r\n copy = ' '.join([pair[0]+\"/\"+pair[1] for pair in zip(words_test, pred_tags)])\r\n file.write(copy+\"\\n\")\r\n\r\n \r\n if length != 0:\r\n\r\n print(\"accuracy: {0:.2f}\".format(good*100/length))\r\n","sub_path":"HMMTag.py","file_name":"HMMTag.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492124834","text":"from numpy import *\n\n#basic information\nMODELNAME='BiSrCaCuO'\nLATTICE='square'\n##setting of the model size\nNK=1\nNNAMBU=2\nNSPIN=1\nNATOM=1\nNORBIT=1\nMODEL_N1=400\nMODEL_N2=400\n\n#parameters\nMODEL_T=1e-3\nMODEL_W=-0.009\nMODEL_VM=0.\nMODEL_VS=0.1\n\n\n#other\nETA=3e-3\n\n","sub_path":"setting/modelqpsetting.py","file_name":"modelqpsetting.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"329914606","text":"import numpy as np\nimport pandas as pd\nimport utility\nimport pickle\nimport sys\n\nfrom sklearn import preprocessing, decomposition, model_selection, metrics, pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\n\ninFile = sys.argv[1]\noutFile = sys.argv[2]\n\n# import the data as a pandas DataFrame\ndata = pd.read_csv('data/LabelledData.txt', sep=',,,',header=None, engine='python');\n\n# add a header\ndata.columns = ['Question','Category']\n\n### data preprocessing\n\n# encode the text labels using LabelEncoder()\nlenc = preprocessing.LabelEncoder()\ny = lenc.fit_transform(data.Category.str.strip()) # Labels\n\n#split the data into train and validations datasets\nxtrain, xvalid, ytrain, yvalid = train_test_split(data.Question.values, y,\n stratify = y,\n random_state=42,\n test_size=0.20, shuffle=True)\n# to save the vectorizer locally\npkl_out = open('models/svm_tf.pkl','wb')\npkl_out1 = open('models/svm_tf_svd.pkl','wb')\n\ntfv = TfidfVectorizer(min_df=3, max_features=None,\n strip_accents='unicode', analyzer='word',token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,\n stop_words = 'english')\ntfv.fit(list(xtrain) + list(xvalid))\npickle.dump(tfv, pkl_out)\n\n# Apply the transformation to train and validation sets\nxtrain_tfv = tfv.transform(xtrain)\nxvalid_tfv = tfv.transform(xvalid)\n\n# standardize the data for SVD implementation\nsvd = decomposition.TruncatedSVD(n_components=120)\nsvd.fit(xtrain_tfv)\npickle.dump(svd, pkl_out1)\n\nxtrain_svd = svd.transform(xtrain_tfv)\nxvalid_svd = svd.transform(xvalid_tfv)\n\n# Scale the data from SVD.\nscl = preprocessing.StandardScaler()\nscl.fit(xtrain_svd)\n\n# Apply the transformation to train and validation sets\nxtrain_svd_scl = scl.transform(xtrain_svd)\nxvalid_svd_scl = scl.transform(xvalid_svd)\n\n# Fitting an SVM\nclf = SVC(C=0.1, probability=True)\nclf.fit(xtrain_svd_scl, ytrain)\npredictions = clf.predict_proba(xvalid_svd_scl)\n\npkl_read = open('models/svm_tf.pkl','rb')\npkl_read1 = open('models/svm_tf_svd.pkl','rb')\n\nload_pickle = pickle.load(pkl_read)\nload_pickle1 = pickle.load(pkl_read1)\n\nwith open(inFile,'r') as i:\n lines = i.readlines()\n test = np.array(lines)\n test_tf = load_pickle.transform(test)\n test_svd = load_pickle1.transform(test_tf)\n test_svd_scl = scl.fit(test_svd)\n test_svd_scl_sc = scl.transform(test_svd)\n predictions = clf.predict(test_svd_scl_sc)\n categories = lenc.inverse_transform(predictions)\n\nwith open(outFile,'w') as o:\n for i, line in enumerate(lines):\n o.write(line.strip()+' Type: '+categories[i]+'\\n')\n\n# print (\"logloss: %0.3f \" % utility.multiclass_logloss(yvalid, predictions))\n","sub_path":"S3_SVM.py","file_name":"S3_SVM.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"587472711","text":"\n\n\n################################################################################################\nimport os\nimport numpy as np\nimport pandas as pd\nhome_folder = os.path.expanduser(\"~\")\n# for mac\n# data_folder = os.path.join(home_folder, \"Documents\", \"sj_work\", \"ml_work\", \"pystudy\", \"data_mining\")\n\n# linux\ndata_folder = os.path.join(home_folder, \"doc_work\", \"opensource\", \"github\", \"pystudy\", \"data_mining\")\n\ndf00 = os.path.join(data_folder, \"nba_2014_10_game.csv\")\ndf01 = os.path.join(data_folder, \"nba_2014_11_game.csv\")\n\nd00 = pd.read_csv(df00)\n# d00 = pd.read_csv(df00)\n#print(d00[:5])\n\nd01 = pd.read_csv(df01)\nprint(d01)\n\n\nprint(\"###########\")\n\nresults = [d00, d01]\nr = pd.concat(results, ignore_index=True)\n# r.columns = [\"Date\", \"Start Time\", \"Score Type\", \"Visitor Team\", \"VisitorPts\", \"Home Team\", \"HomePts\", \"OT?\", \"Notes\"]\nr.columns = [\"Date\", \"Start Time\", \"VisitorTeam\", \"VisitorPts\", \"HomeTeam\", \"HomePts\", \"Score Type\", \"OT?\", \"Notes\"]\n#print(r)\n\n\n#############################\nr[\"HomeWin\"] = r[\"VisitorPts\"] < r[\"HomePts\"]\n\n# 당장 사용되지 않음\ny_true = r[\"HomeWin\"].values\n#print(r[:5])\n#print(y_true)\n# 홈팀이 승률이 좋은 것을 판단하는 근거 자료\n#print(\"Home Win percentage: {0:.1f}%\".format(100 * r[\"HomeWin\"].sum() / r[\"HomeWin\"].count()))\n\n\n################################\nr[\"HomeLastWin\"] = False\nr[\"VisitorLastWin\"] = False\n#print(r[:5])\n\n\n# 직전 경기에서 승리했는지만 확인\nfrom collections import defaultdict\nwon_last = defaultdict(bool)\n#print(won_last)\n\nfor idx, row in r.iterrows():\n home_team = row[\"HomeTeam\"]\n visitor_team = row[\"VisitorTeam\"]\n row[\"HomeLastWin\"] = won_last[home_team]\n row[\"VisitorLastWin\"] = won_last[visitor_team]\n r.ix[idx] = row\n won_last[home_team] = row[\"HomeWin\"]\n won_last[visitor_team] = not row[\"HomeWin\"]\n\n#print(r[20:25])\n\n\n###################################################\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\n\nclf = DecisionTreeClassifier(random_state=14)\n\nX_previouswins = r[[\"HomeLastWin\", \"VisitorLastWin\"]].values\nscores = cross_val_score(clf, X_previouswins, y_true, scoring='accuracy')\nprint(\"Using just the last result from the home and visitor teams\")\nprint(\"Accuracy: {0:.1f}%\".format(np.mean(scores) * 100))\n\n# Reference: http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation\n\n###################################################\n\n\n\n","sub_path":"data_mining/example_01.py","file_name":"example_01.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"577775756","text":"from django.conf.urls import include, url\nfrom django.views.generic import TemplateView\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='homepage.html'), name=\"homepage\"),\n url(r'^blog/',include('divineapp.blog.urls',namespace=\"blog\")),\n url(r'^apps/',include('divineapp.apps.urls',namespace=\"apps\")),\n url(r'^home/', include('divineapp.home.urls',namespace=\"contacts\")),\n url(r'^sendemail/',include('divineapp.sendemail.urls',namespace=\"sendemail\")),\n url(r'^admin/', admin.site.urls),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n]\n","sub_path":"divineapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489047211","text":"#!/usr/bin/env python3\n\nfrom argparse import ArgumentParser\nimport sys\n\ndef find_tokens(file_path, searched : str,before : bool, after : bool):\n with open(file_path, 'r') as f:\n content = f.read().strip().split('\\n')\n for line_num, line in enumerate(content):\n tokens = line.strip().split()\n ix = 0\n for token in tokens:\n if searched in token:\n print(f\"{file_path} : {line_num}: \", end=\"\")\n if before and ix > 0:\n print(tokens[ix-1], end=\" \")\n print(token, end=\"\")\n if after and ix < len(tokens)-1:\n print(f\" {tokens[ix+1]}\", end=\"\")\n print()\n ix += 1\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\n description='Print length of the line with most words from all the files provided'\n )\n parser.add_argument('file_names', metavar='FILE_NAME', type=str, nargs='+')\n parser.add_argument('--searched', required=True, type=str)\n parser.add_argument('--before', help='print token before searched', action='store_true')\n parser.add_argument('--after', help='print token after searched', action='store_true')\n args = vars(parser.parse_args())\n\n wc = 0\n for file_path in args['file_names']:\n t = find_tokens(file_path, args['searched'], args['before'], args['after'])\n\n","sub_path":"rotowire/preprocessing/all_occurrences.py","file_name":"all_occurrences.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258264708","text":"#! /usr/bin/env python\n\nimport sys\nimport copy\nimport rospy\nimport moveit_commander\nimport moveit_msgs\nfrom moveit_msgs.msg import RobotState\nfrom sensor_msgs.msg import JointState\nimport geometry_msgs.msg\n\nmoveit_commander.roscpp_initializer.roscpp_initialize(sys.argv)\nrospy.init_node('pose_goal_node', anonymous=True)\n\nrobot = moveit_commander.RobotCommander()\nscene = moveit_commander.PlanningSceneInterface()\ngroup_arm = moveit_commander.MoveGroupCommander(\"arm\")\ngroup_hand = moveit_commander.MoveGroupCommander(\"hand\")\ngroup_arm.set_planner_id(\"RRTConnectkConfig\")\n\npose_target = geometry_msgs.msg.Pose()\n\npose_target.position.x = 0.448392\npose_target.position.y = -0.080655\npose_target.position.z = 0.062787\n\npose_target.orientation.x = 0.7257\npose_target.orientation.y = -0.0533\npose_target.orientation.z = -0.6857\npose_target.orientation.w = 0.01\n\ngroup_arm.set_pose_target(pose_target)\ngroup_arm.plan()\n# group_arm.go(wait=True)\n\n\nmoveit_commander.roscpp_initializer.roscpp_shutdown()\nmoveit_commander.os._exit(0)\n\n","sub_path":"src/planning_grasp/src/pose_goal.py","file_name":"pose_goal.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"395920387","text":"################################################################################\n# Sensor Driven Multi-Blink\n#\n# Created by Zerynth Team 2015 CC\n# Authors: G. Baldi, D. Mazzei\n################################################################################\n\n# This example requires an analog sensor and three LEDs\nimport streams\nimport adc\n\n# create a serial port stream with default parameters\nstreams.serial()\n\n# set the A1 pin as analog input and D8, D9, D10 as outputs to drive the LEDs. \npinMode(A1,INPUT_ANALOG)\npinMode(D10,OUTPUT)\npinMode(D9,OUTPUT)\npinMode(D8,OUTPUT)\n\n# creates two arrays for storing global variables to be used in the blinking threads\nfreq=[1,1,1] \npin=[D8,D9,D10] \n\n# define the generic blinking function to be used for driving the LEDs\n# this function takes as input the index identifying the LED, then uses the global freq and pin arrays to dynamically drive the LEDs\ndef blink(Npin):\n while True:\n digitalWrite(pin[Npin],HIGH)\n sleep(freq[Npin])\n digitalWrite(pin[Npin],LOW)\n sleep(freq[Npin])\n\n# define an analog sensor sampling function that acquires the raw data and converts it to the three LED frequencies\ndef sampling():\n global freq\n while True:\n value = adc.read(A1)\n freq[0] = value//10\n freq[1] = freq[0] * 2\n freq[2] = freq[0] * 4\n sleep(50)\n\n# launch the four threads \nthread(sampling)\nthread(blink,0)\nthread(blink,1)\nthread(blink,2)\n\n# The main loop is used only for printing out at reasonable speed the calculated frequencies in term of waiting times \nwhile True:\n print(\"Wait times are\", freq)\n sleep(500)\n","sub_path":"examples/Sensor_Driven_Multi_Blink/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517227437","text":"from viewflow import flow, frontend\nfrom viewflow.base import this, Flow\nfrom viewflow.contrib import celery\nfrom viewflow.flow.views import CreateProcessView, UpdateProcessView\n\nfrom .models import HelloWorldProcess\nfrom .tasks import send_hello_world_request\n\n\n@frontend.register\nclass HelloWorldFlow(Flow):\n process_class = HelloWorldProcess\n\n start = (\n flow.Start(\n CreateProcessView,\n fields=[\"text\"]\n ).Permission(\n auto_create=True\n ).Next(this.approve)\n )\n\n approve = (\n flow.View(\n UpdateProcessView,\n fields=[\"approved\"]\n ).Permission(\n auto_create=True\n ).Next(this.check_approve)\n )\n\n check_approve = (\n flow.If(lambda activation: activation.process.approved)\n .Then(this.send)\n .Else(this.end)\n )\n\n send = (\n celery.Job(\n send_hello_world_request\n ).Next(this.end)\n )\n\n end = flow.End()\n","sub_path":"celery/demo/hellocelery/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434294063","text":"\"\"\"\nhttps://leetcode.com/problems/get-watched-videos-by-your-friends/\n\nBFS to find the kth level friends, count the movies, sort them.\n\"\"\"\nclass Solution:\n def watchedVideosByFriends(self, watchedVideos, friends, id: int, level: int):\n current_level = 0\n curr_friends = [id]\n lower_level_friends = set()\n lower_level_friends.add(id)\n while curr_friends and current_level < level:\n new_friends = []\n for idx in curr_friends:\n temp_friends = friends[idx]\n for f in temp_friends:\n if f not in lower_level_friends:\n new_friends.append(f)\n lower_level_friends.add(f)\n current_level += 1\n curr_friends = new_friends\n\n cnt = {}\n for f in curr_friends:\n for v in watchedVideos[f]:\n cnt[v] = cnt.get(v, 0) + 1\n\n ans = [(cnt[k], k) for k in cnt.keys()]\n ans.sort()\n return [x[1] for x in ans]\n","sub_path":"1311_GetWatchedVideosByYourFriends.py","file_name":"1311_GetWatchedVideosByYourFriends.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646890173","text":"import numpy as np\r\nimport time\r\nimport cv2\r\nimport os\r\nimport mss\r\nimport win32api, win32con\r\nimport keys as k\r\nimport pyautogui\r\nfrom threading import Thread\r\n\r\n\r\nkeys = k.Keys({})\r\nfire_key = keys.mouse_lb_press\r\nrelease_key = keys.mouse_lb_release\r\nhold = 0.4\r\n\r\nsct = mss.mss()\r\n# aim limit: 810, 485, start: 796, 481\r\naim_x = 157\r\naim_y = 157\r\naim_xl = 163\r\naim_yl = 163\r\nW, H = (320, 320)\r\nmonitor = {\"top\": 380, \"left\": 800, \"width\": W, \"height\": H}\r\nthreshold = 0.35\r\nnms_threshold = 0.3\r\n\r\n# classes directory\r\nclassesFile = 'C:\\\\Users\\\\andre\\\\PycharmProjects\\\\mouseTest\\\\coco.names'\r\nclassNames = []\r\nwith open(classesFile, 'rt') as f:\r\n classNames = f.read().rstrip('\\n').split('\\n')\r\n\r\n# model directory\r\nmodelCfg = 'C:\\\\Users\\\\andre\\\\PycharmProjects\\\\mouseTest\\\\yolov3.cfg'\r\n\r\n# weights directory\r\nmodelWeights = 'C:\\\\Users\\\\andre\\\\PycharmProjects\\\\mouseTest\\\\yolov3.weights'\r\n\r\nnet = cv2.dnn.readNetFromDarknet(modelCfg, modelWeights)\r\n\r\n# run it with NVIDIA CUDA for better performance\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\r\n\r\ndef determine_movement(mid_x, mid_y,width=800, height=600):\r\n x_move = 0.5-mid_x\r\n y_move = 0.5-mid_y\r\n keys.keys_worker.SendInput(keys.keys_worker.Mouse(0x0001, -1*int(x_move*width), -1*int(y_move*height)))\r\n\r\ndef shoot(x, aim_x, y, aim_y, h, w):\r\n keys.directMouse(0, 0, fire_key)\r\n time.sleep(0.3)\r\n keys.directMouse(0, 0, release_key)\r\n #pyautogui.leftClick()\r\n\r\ndef findObjects(outputs, img):\r\n hT, wT, cT = img.shape\r\n bbox = []\r\n classIDs = []\r\n confs = []\r\n\r\n\r\n for output in outputs:\r\n for detection in output:\r\n scores = detection[5:]\r\n classID = np.argmax(scores)\r\n\r\n confidence = scores[classID]\r\n\r\n if confidence > threshold:\r\n if classID == 0:\r\n w, h = int(detection[2] * wT), int(detection[3] * hT)\r\n x, y = int((detection[0] * wT) - w / 2), int((detection[1] * hT) - h / 2)\r\n bbox.append([x, y, w, h])\r\n classIDs.append(classID)\r\n confs.append(float(confidence))\r\n\r\n\r\n indices = cv2.dnn.NMSBoxes(bbox, confs, threshold, nms_threshold)\r\n\r\n for i in indices:\r\n i = i[0]\r\n box = bbox[i]\r\n x, y, w, h = box[0], box[1], box[2], box[3]\r\n #mid_x = (x + w) / 2\r\n #mid_y = (y + h) / 2\r\n\r\n #win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int(x/1920*65535.0), int(mid_y/1080*65535.0))\r\n #win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(x_move), int(y_move), 0, 0)\r\n #print(x, y, w, h)\r\n #determine_movement(mid_x, mid_y, 1920, 1080)\r\n\r\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\r\n cv2.putText(img, f'{classNames[classIDs[i]].upper()} {int(confs[i]*100)}%', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)\r\n #print(indices)\r\n shoot1 = Thread(target=shoot, args=[x, aim_x, y, aim_y, h, w])\r\n if x < aim_x and x + w > aim_xl and y < aim_y and y + h > aim_yl:\r\n shoot1.start()\r\n\r\n\r\n\r\nwhile True:\r\n start = time.time()\r\n img = np.array(sct.grab(monitor))\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n blob = cv2.dnn.blobFromImage(img, 1/255, (320, 320), [0, 0, 0], 1, crop=False)\r\n net.setInput(blob)\r\n layerNames = net.getLayerNames()\r\n outputNames = [layerNames[i[0]-1] for i in net.getUnconnectedOutLayers()]\r\n\r\n outputs = net.forward(outputNames)\r\n findObjects(outputs, img)\r\n\r\n cv2.imshow('screen', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\r\n end = time.time()\r\n TIME = end - start\r\n\r\n print(\"FPS:\", 1/TIME)\r\n # Press \"q\" to quit\r\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\r\n cv2.destroyAllWindows()\r\n break\r\n","sub_path":"hack.py","file_name":"hack.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647353577","text":"import discord\nimport asyncio\nfrom discord.ext import commands\nfrom ..db import db\nextension_error_msgs = {\n 1: lambda extension: f\"'{extension}' cog not found.\",\n 2: lambda extension: f\"'{extension}' cog not loaded.\",\n 3: lambda extension: f\"'{extension}' has not setup fxn.\",\n 4: lambda extension: f\"'{extension}''s setup fxn had an execution error.\"\n}\n\n\nclass OwnerCommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n def extension_cmds(self, cmd, extension):\n try:\n if cmd == \"reload\":\n self.bot.reload_extension(f'cogs.{extension}')\n elif cmd == \"unload\":\n self.bot.unload_extension(f'cogs.{extension}')\n elif cmd == \"load\":\n self.bot.load_extension(f'cogs.{extension}')\n except commands.ExtensionNotFound:\n return extension_error_msgs[1](extension)\n except commands.ExtensionNotLoaded:\n return extension_error_msgs[2](extension)\n except commands.NoEntryPointError:\n return extension_error_msgs[3](extension)\n except commands.ExtensionFailed:\n return extension_error_msgs[4](extension)\n except Exception as e:\n print(f\"extension_cmds error: {e}\")\n else:\n return 0\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def load(self, ctx, *, extension):\n result = self.extension_cmds(\"load\", extension)\n if result == 0:\n await ctx.send(f'Loaded {extension}.')\n else:\n await ctx.send(result)\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def unload(self, ctx, *, extension):\n if extension != \"Owner_Commands\":\n result = self.extension_cmds(\"unload\", extension)\n if result == 0:\n await ctx.send(f'Unloaded {extension}.')\n else:\n await ctx.send(result)\n else:\n await ctx.send(\"Owner Commands should not be unloaded.\")\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def reload(self, ctx, *, extension):\n result = self.extension_cmds(\"reload\", extension)\n if result == 0:\n await ctx.send(f\"Reloaded {extension}.\")\n else:\n await ctx.send(result)\n\n @commands.command(name=\"bot_act\", hidden=True)\n @commands.is_owner()\n async def botactivity(self, ctx, *, activity=''):\n await self.bot.change_presence(activity=discord.Game(activity))\n\n @commands.command(name=\"bot_status\", hidden=True)\n @commands.is_owner()\n async def botstatus(self, ctx, *, status=\"online\"):\n if status == \"online\":\n await self.bot.change_presence(status=discord.Status.online)\n elif status == \"idle\":\n await self.bot.change_presence(status=discord.Status.idle)\n elif status == \"offline\":\n await self.bot.change_presence(status=discord.Status.offline)\n elif status == \"DND\":\n await self.bot.change_presence(status=discord.Status.do_not_disturb)\n else:\n await ctx.send(f\"Invalid status: {status}\")\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def closebot(self, ctx):\n await ctx.send(\"Going offline.\")\n await self.bot.change_presence(status=discord.Status.offline)\n await asyncio.sleep(5)\n await self.bot.close()\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def audiosurprise(self, ctx, guild_id, ch_id, file): # only made this for surprise rick rolling and other\n guild = discord.utils.get(self.bot.guilds, id=int(guild_id))\n channel = discord.utils.get(guild.channels, id=int(ch_id))\n vc = await channel.connect()\n vc.play(discord.FFmpegPCMAudio(f\"./audio/{file}\"))\n while True:\n await asyncio.sleep(.1)\n if not vc.is_playing():\n await vc.disconnect()\n break\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def dbcommit(self, ctx):\n db.commit()\n print(True)\n\n @commands.Cog.listener()\n async def on_ready(self):\n if not self.bot.ready:\n self.bot.cogs_ready.ready_up(\"Owner_Commands\")\n\n\ndef setup(bot):\n bot.add_cog(OwnerCommands(bot))\n","sub_path":"lib/cogs/Owner_Commands.py","file_name":"Owner_Commands.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458182103","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/25 19:03\n# @Author : xxx\n# @Email : xxx@admin.com\n# @File : 11.队列.py\n# @Software: PyCharm\n\n# 队列\n# 队列 :维护了一个秩序 ,先进先出(FIFO)\n# import queue\n# q = queue.Queue()\n# print(q)\n# q.put(1)\n# q.put(2)\n# q.put(3)\n# print(q)\n# print(q.get())\n# print(q.get())\n# print(q.get())\n\nfrom multiprocessing import Queue,Process\nimport time\n\ndef son(q):\n msg = q.get() # 阻塞的\n print(msg)\n\n\nif __name__ == '__main__':\n q = Queue() # pickle + socket + Lock 进程之间数据安全的数据类型\n pro = Process(target=son,args = (q,))\n pro.start()\n time.sleep(1)\n q.put('hello')\n\n# 进程之间的通信\n# IPC Iter Process Communication\n# IPC :管道Pipe(没有锁,数据不安全的),管道 + 锁 == 队列\n# 第三方工具(消息中间件) :memcache、redis、kafka、rabbitmq\n# 队列的用法 + 模型\n","sub_path":"day27/11.队列.py","file_name":"11.队列.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488933050","text":"import pygame\n\n\nclass Ship():\n \"\"\" Define functionality for the object controlled by user\"\"\"\n\n def __init__(self, screen):\n \"\"\"Constructor\"\"\"\n self.screen = screen\n self.image = pygame.image.load('images/spaceship.bmp')\n self.image = pygame.transform.scale(self.image, (100, 100))\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n self.moving_right = False\n self.moving_left = False\n\n def refresh(self):\n \"\"\"Draw ship at current coordinates\"\"\"\n if (self.moving_right):\n self.rect.centerx += 5\n elif (self.moving_left):\n self.rect.centerx -= 5\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n","sub_path":"alien_invasion/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439309404","text":"import numpy as np\n\n\"\"\"\nRandom and Human-ineracting players for the game of Sogo.\n\nAuthor: Benedikt Nordhoff\n\nBased on the TicTacToe players by Evgeny Tyurin.\n\n\"\"\"\nclass RandomPlayer():\n def __init__(self, game):\n self.game = game\n\n def play(self, board):\n a = np.random.randint(self.game.getActionSize())\n valids = self.game.getValidMoves(board, 1)\n while valids[a]!=1:\n a = np.random.randint(self.game.getActionSize())\n return a\n\n\nclass HumanSogoPlayer():\n def __init__(self, game):\n self.game = game\n\n def play(self, board,last_action=-1):\n # display(board)\n valid = self.game.getValidMoves(board, 1)\n for i in range(len(valid)):\n if valid[i]:\n print(int(i/self.game.n), int(i%self.game.n))\n while True: \n # Python 3.x\n a = input()\n # Python 2.x \n # a = raw_input()\n\n x,y = [int(x) for x in a.split(' ')]\n a = self.game.n * y + x\n if valid[a]:\n break\n else:\n print('Invalid')\n return a\n\n","sub_path":"sogo/SogoPlayers.py","file_name":"SogoPlayers.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"20555304","text":"from django.shortcuts import render, get_object_or_404\n\nfrom core.models import Category\n\n\ndef categories(request):\n \"\"\"\n Displays all root-level categories available for browsing.\n \"\"\"\n category_list = Category.objects.filter(parent=None)\n context = {'category_list': category_list, 'explore': 'categories'}\n return render(request, 'core/categories/category_list.html', context)\n\n\ndef category_detail(request, name):\n \"\"\"\n Displays a selected categories, as specified by the URL. If the categories has sub-categories, those will be\n displayed instead of recipes in that categories.\n \"\"\"\n category = get_object_or_404(Category, name=name)\n sub_categories = Category.objects.filter(parent=category).order_by('-name')\n context = {'categories': category, 'explore': 'categories'}\n\n if sub_categories.count() > 0:\n # if the categories has no sub-categories, list out the recipes in that categories\n context['category_list'] = sub_categories\n return render(request, 'core/categories/category_list.html', context)\n\n # list the categories's sub-categories\n context['recipe_list'] = sorted(category.recipe_set.all(), key=lambda r: r.avg_rating(), reverse=True)\n return render(request, 'core/categories/category_detail.html', context)\n","sub_path":"core/views/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114788567","text":"import sys\n\nimport pytest\nfrom dagster import file_relative_path\nfrom dagster.core.host_representation import (\n ManagedGrpcPythonEnvRepositoryLocationOrigin,\n PipelineHandle,\n)\nfrom dagster.core.types.loadable_target_origin import LoadableTargetOrigin\n\n\ndef get_example_repository_location_handle():\n loadable_target_origin = LoadableTargetOrigin(\n executable_path=sys.executable,\n python_file=file_relative_path(__file__, \"repo.py\"),\n )\n location_name = \"example_repo_location\"\n\n origin = ManagedGrpcPythonEnvRepositoryLocationOrigin(loadable_target_origin, location_name)\n\n return origin.create_handle()\n\n\n@pytest.fixture\ndef foo_example_repo():\n with get_example_repository_location_handle() as location_handle:\n yield location_handle.create_location().get_repository(\"example_repo\")\n\n\n@pytest.fixture\ndef foo_pipeline_handle(foo_example_repo): # pylint: disable=redefined-outer-name\n return PipelineHandle(\"foo_pipeline\", foo_example_repo.handle)\n","sub_path":"python_modules/dagster/dagster_tests/daemon_tests/integration_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562734326","text":"code = ('s','U','h','a','N','o','G','I','r','k')\r\nwhile 1:\r\n\tciphertext = input('Please input your ciphertext(\\'q\\' for exit):\\n')\r\n\tif 'q' in ciphertext:\r\n\t\tbreak\r\n\tprint('The plaintext is:')\r\n\tfor i in ciphertext:\r\n\t\tif i in code:\r\n\t\t\tprint(list(code).index(i),end='')\r\n\t\telif i not in code:\r\n\t\t\tprint('?',end='')\r\n\tprint('\\n',end='')","sub_path":"密文转明文.py","file_name":"密文转明文.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501281509","text":"from django.contrib import admin\n\n#from custom.consult.models import Country\nfrom custom.consult.models import Children\nfrom custom.consult.models import Consultation\nfrom custom.consult.models import MaritalStatus\nfrom custom.consult.models import StatusChoice\n\n# Register your models here.\n\n#class CountryAdmin(admin.ModelAdmin):\n# fieldsets = ((None, {'fields': ['name',\n# 'abbreviation',]}),)\n# list_display = ('id', 'name', 'abbreviation',)\n\n# class Meta:\n# verbose_name = 'Country'\n# verbose_name_plural = 'Countries'\n\n\nclass ChildrenAdmin(admin.ModelAdmin):\n fieldsets = ((None, {'fields': ['number',\n 'value',\n 'numeric',]}),)\n list_display = ('id', 'number', 'value', 'numeric')\n\n class Meta:\n verbose_name = 'Children'\n verbose_name_plural = 'Children'\n\n\nclass ConsultationAdmin(admin.ModelAdmin):\n fieldsets = ((None, {'fields': ['user',\n 'status',\n 'marital_status',\n 'number_of_children',\n 'invoice',\n 'amount',\n 'payment',\n 'billing_full_name',\n 'individual_email', \n 'individual_full_name', \n 'billing_phone', \n 'individual_phone', \n 'purpose', \n 'billing_address', \n 'country_of_citizenship', \n 'individual_address', \n 'use_billing', \n 'date_of_birth', \n 'time_responded',]}),)\n list_display = ('id', 'user', 'status', 'individual_full_name', \n 'individual_email', 'individual_phone', 'time_responded', \n 'amount', 'invoice',)\n class Meta:\n verbose_name = 'Consultation'\n verbose_name_plural = 'Consultations'\n \n\nclass MaritalStatusAdmin(admin.ModelAdmin):\n fieldsets = ((None, {'fields': ['status',\n 'code',]}),)\n list_display = ('id', 'status', 'code',)\n\n class Meta:\n verbose_name = 'Country'\n verbose_name_plural = 'Countries'\n\n\nclass StatusChoiceAdmin(admin.ModelAdmin):\n fieldsets = ((None, {'fields': ['status',\n 'code',]}),)\n list_display = ('id', 'status', 'code',)\n\n class Meta:\n verbose_name = 'Status Choice'\n verbose_name_plural = 'Status Choices'\n\n\n#admin.site.register(Country, CountryAdmin)\nadmin.site.register(Children, ChildrenAdmin)\nadmin.site.register(Consultation, ConsultationAdmin)\nadmin.site.register(MaritalStatus, MaritalStatusAdmin)\nadmin.site.register(StatusChoice, StatusChoiceAdmin)\n\n","sub_path":"custom/consult/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267422358","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport taggit.managers\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('taggit', '0002_auto_20150616_2121'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Solicitacoes',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('solicitacao', models.TextField()),\n ('tags', taggit.managers.TaggableManager(verbose_name='Tags', through='taggit.TaggedItem', to='taggit.Tag', help_text='Uma lista de tags separada por vírgulas.')),\n ],\n ),\n migrations.CreateModel(\n name='Solicitante',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('cpf', models.CharField(max_length=11, unique=True)),\n ('nome', models.CharField(max_length=254)),\n ('email', models.EmailField(max_length=254)),\n ('telefone', models.CharField(max_length=11)),\n ],\n ),\n migrations.CreateModel(\n name='Teleconsultor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('nome', models.CharField(max_length=254)),\n ('email', models.EmailField(max_length=254)),\n ('crm', models.CharField(max_length=10, unique=True)),\n ('data_formatura', models.DateTimeField(null=True, blank=True)),\n ],\n ),\n ]\n","sub_path":"desafiots/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277240077","text":"# -*- coding: utf-8 -*-\nimport re\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom quadratic.utils import QuadraticEquation\nfrom quadratic.forms import QuadraticForm\n\n\ndef quadratic_results(request):\n discr = 'Дискриминант: %d'\n dis_less_null = 'Дискриминант меньше нуля, квадратное уравнение не имеет действительных решений.'\n dis_eq_null = 'Дискриминант равен нулю, квадратное уравнение имеет один действительный корень: x1 = x2 = %s'\n two_roots = 'Квадратное уравнение имеет два действительных корня: x1 = %s, x2 = %s'\n empty_str = ''\n comment_dsc = empty_str\n comment_result = empty_str\n \n if request.GET:\n form = QuadraticForm(request.GET)\n if form.is_valid():\n got_data = form.cleaned_data\n qe = QuadraticEquation(got_data) \n dsc = qe.get_discr()\n if dsc > 0:\n x1 = qe.get_eq_root()\n x2 = qe.get_eq_root(order=2)\n comment_dsc = discr % dsc\n comment_result = two_roots % (round(x1,1), round(x2,1))\n elif dsc < 0:\n comment_dsc = discr % dsc\n comment_result = dis_less_null\n elif dsc == 0:\n x1 = qe.get_eq_root()\n comment_dsc = discr % dsc\n comment_result = dis_eq_null % round(x1,1)\n else:\n form = QuadraticForm(request.GET)\n else:\n form = QuadraticForm()\n\n site_view = {\n 'comment_discr': comment_dsc,\n 'comment_result': comment_result,\n 'form': form,}\n return render(request, 'quadratic/results.html', site_view)\n","sub_path":"quadratic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617793004","text":"from pymongo import MongoClient\nimport pymongo\nfrom sshtunnel import open_tunnel\nfrom sshtunnel import SSHTunnelForwarder, create_logger\nimport os\nfrom django.conf import settings\nfrom bson import json_util\nfrom bson.objectid import ObjectId\nimport json\nimport gridfs\nfrom .models import Video\nfrom django.utils.dateparse import parse_datetime\n\n\nclass MongoUtils:\n host = \"ec2-18-188-66-52.us-east-2.compute.amazonaws.com\"\n ssh_username = \"ubuntu\"\n ssh_private_key = os.path.join(settings.BASE_DIR,\"ML_test.pem\")\n def __init__(self):\n self.server = SSHTunnelForwarder(\n (self.host, 22),\n ssh_username = self.ssh_username,\n ssh_private_key = self.ssh_private_key,\n remote_bind_address=('127.0.0.1', 27017)\n )\n \n def resource_handler(func):\n def handle_connection(*args):\n _self = args[0]\n try:\n print(\"Connecting to database...\")\n _self.server.daemon_forward_servers = True\n _self.server.start()\n _self.client = MongoClient('127.0.0.1', _self.server.local_bind_port)\n _self.db = _self.client['bdr_db']\n res = func(*args)\n return res\n except Exception as e:\n print(\"Exception occurs: \", e)\n finally:\n _self.server.stop()\n print(\"Connection closed\")\n return handle_connection\n\n @resource_handler\n def find_videos_by_phone(self, phone):\n reports = self.db.baddriverreports\n videos = self.db['fs.files']\n res = []\n for report in reports.find({\"postingAccount\": phone}).sort('time', pymongo.DESCENDING):\n video_info = {\"id\": report['videoClip'], \"user\": report['reporterName'], 'license_plate': report['licensePlateNumber'], 'created': report['time']}\n video = videos.find_one({'_id': report['videoClip']})\n # Once the backend is modified, speed data will be available in database, you should use the following code\n # video_info[\"init_speed\"] = video['metadata']['initialSpeed']\n # video_info[\"avg_speed\"] = video['metadata']['averageSpeed']\n # video_info[\"final_speed\"] = video['metadata']['finalSpeed']\n video_info[\"location\"] = video['metadata']['locationRecorded']\n # print(video_info)\n video_info = json.loads(json_util.dumps(video_info))\n res.append(video_info)\n print(len(res))\n return res\n \n @resource_handler\n def get_video_by_id(self, id, filename):\n print(\"try to get video\")\n fs = gridfs.GridFSBucket(self.db)\n preview_file_path = os.path.join(settings.MEDIA_ROOT, filename)\n file = open(preview_file_path, 'wb+')\n fs.download_to_stream(ObjectId(id), file)\n\n def convert_to_video_model(self, video_info):\n video = Video()\n video.title = video_info['title']\n video.description = video_info['description']\n video.created = parse_datetime(video_info['created'])\n video.location = video_info['location']\n video.license_plate = video_info['license_plate']\n self.get_video_by_id(str(video_info['id']['$oid']), \"videos/\" + video_info['title'] + \".mp4\")\n video.video.name = \"videos/\" + video_info['title'] + \".mp4\"\n video.save()\n \n\n\n \n\n\n\n\n\n","sub_path":"mltr/videos/mongoUtils.py","file_name":"mongoUtils.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421021775","text":"import serial\nimport time\n\nSERIAL_NUMBER = 9600\n\n# Set up the serial line\nser = serial.Serial('COM4', SERIAL_NUMBER)\ntime.sleep(2)\n\ndata =[] # empty list to store the data\nfor i in range(50):\n b = ser.readline() # read a byte string\n string_n = b.decode() # decode byte string into Unicode\nstring = string_n.rstrip() # remove \\n and \\r\nflt = float(string) # convert string to float\nprint(flt)\ndata.append(flt) # add to the end of data list\ntime.sleep(0.1) # wait (sleep) 0.1 seconds\n\nser.close()\n\nfor line in data:\n print(line)\n","sub_path":"climate_control_experiment/read_dht_sensor.py","file_name":"read_dht_sensor.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642984054","text":"import csharp,cplus,dart,go,java,javascript,kotlin,objective_c,python, swift\nimport matplotlib.pyplot as plt\nmaxx = 0\n\nfor k in(csharp,cplus,dart,go,java,javascript,kotlin,objective_c,python, swift):\n maxx = max(maxx,k.tracing,k.sort,k.inheritance,k.multi)\n\n\n\n\n\nprint(maxx)\n\n\nplt.title('Evaluation after normalization')\nplt.legend()\nplt.ylim(0,1)\ncplus.plt.show()\ncsharp.plt.show()\ndart.plt.show()\ngo.plt.show()\njava.plt.show()\njavascript.plt.show()\nkotlin.plt.show()\nobjective_c.plt.show()\npython.plt.show()\nswift.plt.show()","sub_path":"GraphForPaper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128344022","text":"\"\"\"\r\nTryin'a make something DnD-like..\r\n\"\"\"\r\n\r\nimport subprocess\r\nfrom random import randint\r\nimport msvcrt\t# For \"Press any key to continue..\"s\r\n\r\nclass entity:\r\n\tdef __init__(\r\n\t\t\t\t\tself, \r\n\t\t\t\t\tname = \"Unknown\", \r\n\t\t\t\t\tstatStr = 0,\r\n\t\t\t\t\tstatDex = 0,\r\n\t\t\t\t\tstatCon = 0,\r\n\t\t\t\t\tstatInt = 0,\r\n\t\t\t\t\tstatWis = 0,\r\n\t\t\t\t\tstatCha = 0,\r\n\t\t\t\t\tcurrentHealth = 0, \r\n\t\t\t\t\tmaxHealth = 0, \r\n\t\t\t\t\tisAlive = True\r\n\t\t\t\t\t):\r\n\t\t# Initialise with name, currentHealth, maxHealth, currentMana, maxMana, isAlive\r\n\t\tself.name = name\r\n\t\tself.statStr = statStr\r\n\t\tself.statDex = statDex\r\n\t\tself.statCon = statCon\r\n\t\tself.statInt = statInt\r\n\t\tself.statWis = statWis\r\n\t\tself.statCha = statCha\r\n\t\tself.currentHealth = currentHealth\r\n\t\tself.maxHealth = maxHealth\r\n\t\tself.isAlive = isAlive\r\n\t\r\n\tdef reduceHealth(self, amount):\r\n\t\t# Reduces hp of entity according to damage taken.\r\n\t\tself.currentHealth -= amount\r\n\t\tif self.currentHealth <= 0:\r\n\t\t\tself.currentHealth = 0\r\n\t\t\tself.isAlive = False\r\n\t\r\n\tdef increaseHealth(self, amount):\r\n\t\tself.currentHealth += amount\r\n\t\tif self.currentHealth > self.maxHealth:\r\n\t\t\tself.currentHealth = self.maxHealth\r\n\t\t\t\t\r\n\tdef showStats(self):\r\n\t\tprint(\"\\nName: {}\\nHealth: {}/{}\".format(self.name, self.currentHealth, self.maxHealth))\r\n\t\t\r\ndef cls():\r\n\tsubprocess.call('cls', shell = True)\r\n\t\r\ndef wait():\r\n\tmsvcrt.getch()\r\n\r\ndef display(title, content, contentHeader = \"\", response = \"\"):\r\n\tcls()\r\n\tprint(title + \"\\n\")\r\n\tif contentHeader != \"\":\r\n\t\tprint(contentHeader + \"\\n\")\r\n\tprint(content + \"\\n\")\r\n\tif response != \"\":\r\n\t\tprint(response)\r\n\t\r\ndef checkAlive():\r\n\tif playerChar.currentHealth == 0:\r\n\t\tprint(\"Oh dear, you are dead!\")\r\n\t\tisRunning = False\r\n\t\treturn False\r\n\telif enemyChar.currentHealth == 0:\r\n\t\tprint(\"You won!\")\r\n\t\tisRunning = False\r\n\t\treturn False\r\n\r\ndef newChar():\r\n\t# Generates list of 6 random ability scores from 3 to 18, inclusive.\r\n\t# Each score is generated from 4d6, and the lowest die is removed for a total of 3d6.\r\n\tcounter_statRolls = 6\r\n\tstatRolls = []\r\n\twhile counter_statRolls != 0:\r\n\t\tdiceRoll4d6 = []\r\n\t\tsummedRoll = 0\r\n\t\tcounter_4d6 = 4\r\n\t\twhile counter_4d6 != 0:\r\n\t\t\tdiceRoll4d6.append(randint(1,6))\r\n\t\t\tcounter_4d6 -= 1\r\n\t\tdiceRoll4d6 = sorted(diceRoll4d6)\r\n\t\tdiceRoll4d6.pop(0)\r\n\t\tfor i in range(0, len(diceRoll4d6)):\r\n\t\t\tsummedRoll += diceRoll4d6[i]\r\n\t\tstatRolls.append(summedRoll)\r\n\t\tcounter_statRolls -= 1\t\r\n\r\n\t# Allows user to assign scores to their selected stats.\r\n\tstatNames = [\"Strength\", \"Dexterity\", \"Constitution\", \"Intelligence\", \"Wisdom\", \"Charisma\"]\r\n\tuserChoice = []\r\n\tcounter_statInput = 0\r\n\twhile counter_statInput != 6:\r\n\t\tdisplay(\r\n\t\t\t\"Assign scores\",\r\n\t\t\t\"Selecting for {0}.\\nPlease select which score should be assigned to {0}.\\n\\nList of scores remaining: {1}\"\r\n\t\t\t.format(statNames[counter_statInput], statRolls)\r\n\t\t)\r\n\t\t# Input validation - Integer check\r\n\t\ttry:\r\n\t\t\tuserInput = int(input(\">> \"))\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"\\nPlease enter integers only!\")\r\n\t\t\twait()\r\n\t\telse:\r\n\t\t\t# Input validation - check value exists\r\n\t\t\ttry:\r\n\t\t\t\tstatRolls.remove(userInput)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"\\nThere is no such value in the list of scores. Please pick a valid score.\")\r\n\t\t\t\twait()\r\n\t\t\telse:\r\n\t\t\t\tuserChoice.append(userInput)\r\n\t\t\t\tcounter_statInput += 1\r\n\t\t\t\t# Checks if user is satisfied with score assignments. Else reset.\r\n\t\t\t\tif counter_statInput == 6:\r\n\t\t\t\t\t# Input validation - Y/N\r\n\t\t\t\t\tdisplayContent = \"\"\r\n\t\t\t\t\tfor i in range(0,6):\r\n\t\t\t\t\t\tdisplayContent += \"{}: {}\\n\".format(statNames[i], userChoice[i])\r\n\t\t\t\t\tisLooping = True\r\n\t\t\t\t\twhile isLooping == True:\t\t\t\t\t\t\r\n\t\t\t\t\t\tdisplay(\"Assign scores\", displayContent, \"Ability scores\", \"Are you okay with these ability scores? | Y/N\")\r\n\t\t\t\t\t\tuserInput = input(\">> \")\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tuserInput = userInput.lower()\r\n\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\tprint(\"\\nPlease enter a valid response.\")\r\n\t\t\t\t\t\t\twait()\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif userInput == \"y\":\r\n\t\t\t\t\t\t\t\t# Do nothing, confirms selection\r\n\t\t\t\t\t\t\t\tisLooping = False\r\n\t\t\t\t\t\t\telif userInput == \"n\":\r\n\t\t\t\t\t\t\t\tcounter_statInput = 0\r\n\t\t\t\t\t\t\t\tstatRolls = userChoice\r\n\t\t\t\t\t\t\t\tuserChoice = []\r\n\t\t\t\t\t\t\t\tisLooping = False\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tprint(\"\\nPlease enter a valid response..\")\r\n\t\t\t\t\t\t\t\twait()\r\n\t\t\t\t\twait()\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Do nothing\r\n\t\t\t\t\t1 == 1\r\n\tprint(userChoice)\r\n\t\t\t# !!! Include options to re-assign scores.\r\n\t\t\t\r\n# App start, !!! display incomplete\r\nplayerChar = entity(\"Hiro\", 0, 0, 0, 0, 0, 0, 100, 100, True)\r\nenemyChar = entity(\"Rat\", 0, 0, 0, 0, 0, 0, 10, 10, True)\r\nnewChar()\r\nwait()\r\nisRunning = True\r\nwhile isRunning == True:\r\n\tdisplay(\"Titlehere\", \"Running proper\")\r\n\tif checkAlive() == False:\r\n\t\tprint(\"\\n End\")\r\n\t\tisRunning = False\r\n\telse:\r\n\t\tprint(\"\\nBattle!\")\r\n\t\tinputLoop = True\r\n\twhile inputLoop == True:\r\n\t\ttry:\r\n\t\t\tuserInput = (input(\"\\n(A)ttack | (D)odge | (E)xit\\n\")).lower()\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Command not recognized, please enter appropriate command.\")\r\n\t\telse:\r\n\t\t\tif userInput == \"a\":\r\n\t\t\t\tdamage = randint(1,10)\r\n\t\t\t\tenemyChar.reduceHealth(damage)\r\n\t\t\t\tprint(\"\\nYou hit {} for {} damage.\".format(enemyChar.name, damage))\r\n\t\t\t\tdamage = randint(1,10)\r\n\t\t\t\tplayerChar.reduceHealth(damage)\r\n\t\t\t\tinput (\"You were hit for {} damage.\".format(damage))\r\n\t\t\t\tinputLoop = False\r\n\t\t\telif userInput == \"d\":\r\n\t\t\t\tprint(\"Dodge attempted\")\r\n\t\t\t\tinputLoop = False\r\n\t\t\telif userInput == \"e\":\r\n\t\t\t\tprint(\"Exiting...\")\r\n\t\t\t\tinputLoop = False\r\n\t\t\t\tisRunning = False\r\n\t\t\t\t\r\n\t\r\n\r\n\t\r\nwait()\r\n\t\t","sub_path":"Sandbox/DnD-like (WIP).py","file_name":"DnD-like (WIP).py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63423826","text":"# Copyright 2014 Dr. Greg M. Bernstein\n\"\"\" Helper functions for reading and writing JSON representations of various network path,\n demand, and other entities.\n\n See the IPython note book: JSON_Conversion.ipynb for more in depth explanation of the\n whys and hows to use these methods. Or just look at the example code at the end of\n the file.\n\"\"\"\nimport json\n\n\ndef demands_to_j(obj):\n \"\"\" A function to partially convert a demand dictionary to JSON format.\n\n Parameters\n ----------\n obj : dictionary\n a demand dictionary, indexed by a node pair, with value the demand volume.\n\n Returns\n -------\n list_of_dicts : list\n a list of dictionaries that can readily be converted to JSON with the json\n standard library routines.\n \"\"\"\n tmp = []\n for d in obj.keys():\n tmp.append({\"source\": d[0], \"target\": d[1], \"demand\": obj[d]})\n return tmp\n\ndef j_to_demands(d_list):\n \"\"\" Helps read in a demand dictionary from a JSON object.\n\n Parameters\n ----------\n d_list : list\n a list of Python dictionaries representing the JSON demand and has\n source, target, and demand keys.\n\n Returns\n -------\n demands : dictionary\n a demand dictionary, indexed by a node pair, with value the demand volume.\n \"\"\"\n tmp = {}\n for d in d_list:\n tmp[d[\"source\"], d[\"target\"]] = d[\"demand\"]\n return tmp\n\ndef paths_to_j(path_dict):\n \"\"\" Takes a candidate paths dictionary and converts it to a JSON serializable form.\n\n Parameters\n ----------\n path_dict : dictionary\n a dictionary indexed by a node pair whose value is a list of paths, where each path\n is a list of nodes.\n\n Returns\n -------\n list_dict : list\n a list of dictionaries that can readily be converted to JSON with the json\n standard library routines.\n \"\"\"\n tmp = [] # We'll use a list\n for k in path_dict.keys():\n tmp.append({\"source\": k[0], \"target\": k[1], \"paths\": path_dict[k]})\n return tmp\n\ndef j_to_paths(path_list):\n \"\"\" A helper function to retrieve a candidate paths dictionary from JSON.\n\n Parameters\n ----------\n path_list : list\n a list of dictionaries, each representing a JSON path candidate\n object with from, to, and paths keywords.\n\n Returns\n -------\n path_dict : dictionary\n a dictionary indexed by a node pair whose value is a list of paths, where each path\n is a list of nodes.\n \"\"\"\n tmp = {}\n for p in path_list:\n tmp[p[\"source\"],p[\"target\"]] = p[\"paths\"]\n return tmp\n\n\nif __name__ == \"__main__\":\n paths = {(1, 2): [[1, 2], [1, 3, 2]],\n (1, 3): [[1, 3], [1, 2, 3]],\n (2, 1): [[2, 1]],\n (2, 3): [[2, 3]],\n (3, 1): [[3, 1], [3, 2, 1]],\n (3, 2): [[3, 2]]}\n\n demands = {(1, 2): 5, (1, 3): 7, (2, 1): 5, (2, 3): 8, (3, 1): 7, (3, 2): 8}\n\n print (json.dumps(demands_to_j(demands)))\n print (json.dumps(demands_to_j(demands), sort_keys=True))\n print (\"Now with indentation\")\n print (json.dumps(demands_to_j(demands), sort_keys=True, indent=4))\n demand_string = '[{\"source\": 1, \"target\": 2, \"demand\": 5}, {\"source\": 3, \"target\": 2, \"demand\": 8}, {\"source\": 1, \"target\": 3, \"demand\": 7}, \\\n {\"source\": 3, \"target\": 1, \"demand\": 7}, {\"source\": 2, \"target\": 1, \"demand\": 5}, {\"source\": 2, \"target\": 3, \"demand\": 8}]'\n # Try deserializing the above JSON string\n demands2 = j_to_demands(json.loads(demand_string))\n print (demands2)\n print (demands2 == demands)\n\n print (json.dumps(paths_to_j(paths)))\n # Example JSON string for candidate paths\n path_string = '[{\"target\": 2, \"source\": 1, \"paths\": [[1, 2], [1, 3, 2]]}, \\\n {\"target\": 2, \"source\": 3, \"paths\": [[3, 2]]}, {\"target\": 3, \"source\": 1, \"paths\": [[1, 3], [1, 2, 3]]},\\\n {\"target\": 1, \"source\": 3, \"paths\": [[3, 1], [3, 2, 1]]}, {\"target\": 1, \"source\": 2, \"paths\": [[2, 1]]},\\\n {\"target\": 3, \"source\": 2, \"paths\": [[2, 3]]}]'\n # Try converting from JSON path string back to a candidate path dictionary\n paths2 = j_to_paths(json.loads(path_string))\n print (paths == paths2)\n print (paths2)","sub_path":"Utilities/jsonconverter.py","file_name":"jsonconverter.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521721385","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport math\nimport time\nimport subprocess\n\npuzzle_size = 0\ndimension_size = 0\n\ndef Encode(i, j, k,):\n return str((i - 1) * puzzle_size + (j - 1) * dimension_size + (k-1) +1)\n\n\ndef Encode_variable(puzzle):\n row = 1\n col = 1\n count = 0\n\n variable_list = []\n while (row <= dimension_size):\n variable_encoded = Encode(row, col, int(puzzle[count]))\n variable_list.append(variable_encoded)\n col += 1\n if (col == dimension_size + 1):\n row += 1\n col = 1\n count += 1\n variable_list = [str(x) for x in variable_list]\n return variable_list\n\n\ndef pnf_GeneralInfo(n):\n variable_num = n**3\n clause_num = (n*n*int(math.sqrt(n))*int(math.factorial(n)/math.factorial(2)/math.factorial(n-2))) + (n*n)\n return \"p cnf \" + str(variable_num)+\" \" + str(clause_num)+ \"\\n\"\n\n\ndef cons_OneNumPerEntry(variable_list,puzzle):\n output = ''\n for i in range(len(variable_list)):\n if (puzzle[i] != '0'):\n output += str(variable_list[i] + ' 0\\n')\n else:\n loc = divmod(i, dimension_size)\n for x in range(1, dimension_size + 1):\n output += Encode(loc[0]+1,loc[1]+1,x) + ' '\n output += '0\\n'\n return output\n\n\ndef cons_OneNumPerRow():\n output = ''\n for i in range(1, dimension_size + 1):\n for k in range(1, dimension_size + 1):\n for j in range(1, dimension_size):\n for l in range(j + 1, dimension_size + 1):\n output += \"-\" + Encode(i, j, k) + \" -\" + Encode(i, l, k) + \" 0\\n\"\n return output\n\n\ndef cons_OneNumPerCol():\n output = ''\n for j in range(1, dimension_size + 1):\n for k in range(1, dimension_size + 1):\n for i in range(1, dimension_size):\n for l in range(i + 1, dimension_size + 1):\n output += \"-\" + Encode(i, j, k) + \" -\" + Encode(l, j, k) + \" 0\\n\"\n return output\n\n\ndef cons_OneNumPerBlock():\n output = ''\n subgrid_size = int((dimension_size) ** (float(1 / 2)))\n for k in range(1, dimension_size + 1):\n for a in range(0, subgrid_size):\n for b in range(0, subgrid_size):\n for u in range(1, subgrid_size + 1):\n for v in range(1, subgrid_size):\n for w in range(v + 1, subgrid_size + 1):\n output += \"-\" + Encode(subgrid_size * a + u, subgrid_size * b + v, k) + \" -\" + Encode(subgrid_size * a + u, subgrid_size * b + w, k) + \" 0\\n\"\n\n for k in range(1, dimension_size + 1):\n for a in range(0, subgrid_size):\n for b in range(0, subgrid_size):\n for u in range(1, subgrid_size):\n for v in range(1, subgrid_size + 1):\n for w in range(u + 1, subgrid_size + 1):\n for t in range(1, subgrid_size + 1):\n output += \"-\" + Encode(subgrid_size * a + u, subgrid_size * b + v, k) + \" -\" + Encode(subgrid_size * a + w, subgrid_size * b + t, k) + \" 0\\n\"\n return output\n\n\ndef write_CNF(puzzle,dimension_size,targetfile):\n variable_list = Encode_variable(puzzle)\n outfile = open(targetfile, \"w\")\n outfile.write(pnf_GeneralInfo(dimension_size))\n outfile.write(cons_OneNumPerEntry(variable_list,puzzle))\n outfile.write(cons_OneNumPerRow())\n outfile.write(cons_OneNumPerCol())\n outfile.write(cons_OneNumPerBlock())\n\n\ndef main():\n global puzzle_size,dimension_size\n puzzle = \"\"\n start = time.clock()# Start counting time for this process\n if (len(sys.argv) != 3): # the input content must have more than two words\n print(\"Usage: ./sud2sat.py <input filename> <output filename>\\n or : ./sud2sat.py <input filename> --multiple\")\n return\n try: # check if can open file\n with open(sys.argv[1], 'r') as infile:\n if sys.argv[2]=='--multiple':\n line = infile.readline()\n count = 1\n if not os.path.exists(sys.argv[1].split('.')[0]+\"_SAT\"):\n os.mkdir(sys.argv[1].split('.')[0]+\"_SAT\")\n if not os.path.exists(sys.argv[1].split('.')[0]+\"_Result\"):\n os.mkdir(sys.argv[1].split('.')[0]+\"_Result\")\n while 1:\n line = infile.readline()\n if line:\n line = line.strip()\n if not line or line[0] == 'G' or line[0] == 'g':\n puzzle = puzzle.replace('\\n', '').replace('?', '0').replace('*', '0').replace('.','0') # replace special symbols to 0\n puzzle_size = len(puzzle)\n if int(math.sqrt(puzzle_size)) ** 2 != puzzle_size:\n print(\"The %d th Sudoku grid is invalid!!!\"%(count))\n if not line:\n break\n else:\n dimension_size = int(math.sqrt(puzzle_size))\n write_CNF(puzzle, dimension_size, \"CNF-temp.txt\")\n subprocess.call([\"minisat\", \"CNF-temp.txt\", sys.argv[1].split('.')[0]+\"_SAT/\"+sys.argv[1].split('.')[0]+\"_SAT_\"+str('{:0>2d}'.format(count))+\".txt\"])\n totalTime = time.clock() - start\n resultfile = open(sys.argv[1].split('.')[0]+\"_Result/\"+sys.argv[1].split('.')[0]+\"_\"+str('{:0>2d}'.format(count)), 'w')\n resultfile.write(\"Grid No.%d - time used (including running minisat): \" % (count) + str(totalTime) + \" seconds\"+\"\\n\")\n print(\"Grid No.%d - time used (including running minisat): \"%(count) + str(totalTime) + \" seconds\") # end the time counting\n start = time.clock()\n puzzle = \"\"\n if not line:\n break\n count +=1\n else:\n puzzle += line\n\n else:\n puzzle = infile.read().replace('\\n', '').replace('?', '0').replace('*', '0').replace('.', '0')# replace special symbols to 0\n puzzle_size = len(puzzle)\n if int(math.sqrt(puzzle_size)) ** 2 != puzzle_size:\n print(\"This is not a valid Sudoku file!!!\")\n else:\n dimension_size = int(math.sqrt(puzzle_size))\n write_CNF(puzzle, dimension_size, \"CNF-temp.txt\")\n subprocess.call([\"minisat\", \"CNF-temp.txt\", sys.argv[2]])\n totalTime = time.clock() - start\n print(\"Time used (including running minisat):\" + str(totalTime) + \" seconds\") # end the time counting\n except IOError: # file doesn't exist so print error!\n print(\"File does not exist!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Basic/sud2sat.py","file_name":"sud2sat.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617123093","text":"import math\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_param_vs_err(params, err_tr, err_te, model_name = 'model', err_type = 'MSE', param = 'degree', save_img = False, img_name = '-1'):\n \"\"\"\n Visualization of the curves of mse/accuracy given parameter (degree, lambda or other).\n :param params: list of the parameters used for each version of the model\n :param err_tr: corresponding training error, whether mse or accuracy\n :param err_te: corresponding test error, whether mse or accuracy\n :param param: label of the parameter used\n :param err_type: type of error (mse or accuracy)\n :param model_name: name of the model used\n :param save_img: boolean indicating if the image generated must be saved\n :param img_name: if the image must be saved, name demanded (in order to not erase previously saved images)\n \"\"\"\n if err_type == 'MSE' or err_type == 'mse':\n best_idx = np.argmin(err_te)\n elif err_type == 'accuracy' or err_type == 'Accuracy' or err_type == 'ACCURACY':\n best_idx = np.argmax(err_te)\n \n if param == 'lambda':\n plt.semilogx(params, err_tr, marker=\".\", color='b', label='train set')\n plt.semilogx(params, err_te, marker=\".\", color='r', label='test set')\n else:\n plt.plot(params, err_tr, marker=\".\", color='b', label='train set')\n plt.plot(params, err_te, marker=\".\", color='r', label='test set')\n plt.axvline(params[best_idx], color = 'k', ls = '--', alpha = 0.5, label = 'best ' + param)\n plt.xlabel(param)\n plt.ylabel(err_type)\n plt.title(err_type + ' of ' + model_name + ' given different values for parameter: ' + param)\n plt.legend()\n plt.grid(True)\n if save_img:\n if img_name == '-1':\n print('Argument not found: img_name. Image not saved.')\n else:\n plt.savefig('figures/' + img_name)\n plt.show()\n\ndef plot_param_vs_loss_and_acc(params, loss_tr, loss_te, acc_tr, acc_te, model_name = 'model', param = 'degree', save_img = False, img_name = '-1'):\n \"\"\"\n Visualization of the curves of loss AND accuracy given parameter (degree, learning rate, lambda).\n :param params: list of the parameters used for each version of the model\n :param loss_tr: corresponding training loss\n :param loss_te: corresponding test loss\n :param acc_tr: corresponding training accuracy\n :param acc_te: corresponding test accuracy\n :param param: label of the parameter used\n :param model_name: name of the model used\n :param save_img: boolean indicating if the image generated must be saved\n :param img_name: if the image must be saved, name demanded (in order to not erase previously saved images)\n \"\"\"\n \n best_idx_loss = np.argmin(loss_te)\n best_idx_acc = np.argmax(acc_te)\n \n fig, axs = plt.subplots(1, 2, figsize = [12,5])\n fig.suptitle('Loss and accuracy of ' + model_name + ' given different values for parameter: ' + param)\n if param == 'lambda':\n axs[0].semilogx(params, loss_tr, marker=\".\", color='b', label='train set')\n axs[0].semilogx(params, loss_te, marker=\".\", color='r', label='test set')\n axs[1].semilogx(params, acc_tr, marker=\".\", color='b')\n axs[1].semilogx(params, acc_te, marker=\".\", color='r') \n else:\n axs[0].plot(params, loss_tr, marker=\".\", color='b', label='train set')\n axs[0].plot(params, loss_te, marker=\".\", color='r', label='test set')\n axs[1].plot(params, acc_tr, marker=\".\", color='b')\n axs[1].plot(params, acc_te, marker=\".\", color='r')\n axs[0].axvline(params[best_idx_loss], color = 'k', ls = '--', alpha = 0.5, label = 'best ' + param)\n axs[1].axvline(params[best_idx_acc], color = 'k', ls = '--', alpha = 0.5)\n \n axs[0].set_xlabel(param)\n axs[1].set_xlabel(param)\n axs[0].set_ylabel('Loss')\n axs[1].set_ylabel('Accuracy')\n axs[0].grid(True)\n axs[1].grid(True)\n fig.legend()\n if save_img:\n if img_name == '-1':\n print('Argument not found: img_name. Image not saved.')\n else:\n fig.savefig('figures/' + img_name)\n plt.show()\n \ndef plot_boxplots(errors, model_names, err_type = 'accuracy', save_img = False, img_name = '-1'):\n \"\"\"\n Visualisation of the performance of models across folds.\n :param errors: array of losses/accuracies, such that each ROW contains the losses/accuracies of a same model on different folds (cross-validation)\n :param model_names: names of the models corresponding to each row\n :param err_type: type of error (loss or accuracy)\n :param save_img: boolean indicating if the image generated must be saved\n :param img_name: if the image must be saved, name demanded (in order to not erase previously saved images)\n \"\"\"\n errors = errors.T\n plt.figure(figsize=(10,4))\n bp = plt.boxplot(errors, labels = model_names, showmeans = True)\n plt.legend([bp['medians'][0], bp['means'][0]], ['median', 'mean'])\n plt.title('Figure I: Boxplot of the ' + err_type + ' of optimized models (' + str(np.array(errors).shape[0]) + ' folds)')\n plt.ylabel(err_type)\n #plt.xticks(rotation=90)\n if save_img:\n if img_name == '-1':\n print('Argument not found: img_name. Image not saved.')\n else:\n plt.savefig('figures/' + img_name)\n plt.show()\n \ndef plot_twice_boxplots(losses, accuracies, model_names, save_img = False, img_name = '-1'):\n \"\"\"\n Visualisation of the performance of models across folds.\n :param losses: array of losses. Each ROW contains the loss of a same model on different folds (cross-validation)\n :param accuracies: array of accuraciess. Each ROW contains the accuracy of a same model on different folds (cross-validation)\n :param model_names: names of the models corresponding to each row\n :param save_img: boolean indicating if the image generated must be saved\n :param img_name: if the image must be saved, name demanded (in order to not erase previously saved images)\n \"\"\"\n losses = losses.T\n accuracies = accuracies.T\n fig, axs = plt.subplots(1, 2, figsize = [12,5])\n fig.suptitle('Boxplot of the loss and accuracy of models (' + str(np.array(losses).shape[1]) + ' folds)')\n axs[0].boxplot(losses, labels = model_names, showmeans = True)\n axs[0].set_ylabel('Loss')\n bp = axs[1].boxplot(accuracies, labels = model_names, showmeans = True)\n axs[1].set_ylabel('Accuracy')\n fig.legend([bp['medians'][0], bp['means'][0]], ['median', 'mean'])\n if save_img:\n if img_name == '-1':\n print('Argument not found: img_name. Image not saved.')\n else:\n fig.savefig('figures/' + img_name)\n plt.show()\n \ndef plot_heatmap(err_tr, err_te, degrees, lambdas, model_name, measure_type = 'Accuracy', save_img = False, img_name = '-1'):\n \"\"\"\n Visualisation of accuracy/loss computed over all lambda-degrees combinations using a heatmap\n :param err_tr: matrix of losses/accuracies computed on training set\n :param err_te: matrix of losses/accuracies computed on test set\n :param degrees: vector of all degrees used to create feature on data before training\n :param lambdas: vector of all lambdas used to regularize training\n :param model_name: model type used to train on data and predict labels\n :param measure_type: Measure used to assess performance (MSE/NLL/Accuracy)\n :param save_img: boolean indicating if the image generated must be saved\n :param img_name: if the image must be saved, name demanded (in order to not erase previously saved images)\n \"\"\"\n fig, axs = plt.subplots(1, 2, figsize = [15,8])\n fig.suptitle(measure_type + ' of ' + model_name + ' given different values for parameter lambda and degree.')\n \n for i in range(2):\n axs[i].imshow(err_tr, cmap = 'PiYG')\n axs[i].set_xticks(np.arange(len(lambdas)))\n axs[i].set_yticks(np.arange(len(degrees)))\n axs[i].set_xticklabels(lambdas)\n axs[i].set_yticklabels(degrees)\n axs[i].set_xlabel('\\u03BB')\n axs[i].set_ylabel('degree')\n plt.setp(axs[i].get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n \n # Write accuracy values\n for i in range(len(degrees)):\n for j in range(len(lambdas)):\n text = axs[0].text(j, i, round(err_tr[i, j], 3),\n ha=\"center\", va=\"center\", color=\"k\")\n text = axs[1].text(j, i, round(err_te[i, j], 3),\n ha=\"center\", va=\"center\", color=\"k\")\n\n axs[0].set_title(\"Train \" + measure_type)\n axs[1].set_title(\"Test \" + measure_type)\n if save_img:\n if img_name == '-1':\n print('Argument not found: img_name. Image not saved.')\n else:\n fig.savefig('figures/' + img_name)\n plt.show()\n\n\ndef plot_features_visualization(y, tX, y_pred=None, names=None, save=True, stacked_plot=True):\n '''\n Function used to visualize the dataset. Label y=1 is Higgs boson and label y=-1 is Other.\n Different possiblities of plotting:\n 1) y_pred not provided -> 2 superimposed histograms to visualize Higgs boson and Other distributions\n 2) y_pred provided, stacked_plot=True -> stacked area chart to observe true/false positive/negative distributions\n 3) y_pred provided, stacked_plot=False -> 2 superimposed histograms to visualize Higgs boson and Other distributions (correct/wrong predictions included)\n :param y: true labels [n_samples]\n :param tX: data [n_samples x n_dim]\n :param y_pred: (optional) predicted labels [n_samples]\n If provided, the plots will include the true/false predictions\n :param names: (optional) names of the parameters [n_dim]\n If provided, the plots will take the names as titles\n :param save: If True, the plot will be saved at location figures/Data_visualization/...\n :param stacked_plot: If True, the plot will be a stacked area chart (see above explanations)\n :return: 0\n '''\n y = y.reshape(-1)\n tX = tX.reshape(tX.shape[0], -1)\n tX_positive = tX[y == 1]\n tX_negative = tX[y == -1]\n if y_pred is not None:\n y_pred = y_pred.reshape(-1)\n tX_true_positive = tX[np.logical_and(y == 1, y_pred == 1)] # Higgs, good prediction\n tX_false_negative = tX[np.logical_and(y == 1, y_pred == -1)] # Higgs, bad prediction\n tX_true_negative = tX[np.logical_and(y == -1, y_pred == -1)] # not Higgs, good prediction\n tX_false_positive = tX[np.logical_and(y == -1, y_pred == 1)] # not Higgs, bad prediction\n plt.close()\n n_rows = math.ceil(tX.shape[1]/3)\n fig, axs = plt.subplots(n_rows, 3, figsize=[45, n_rows*7.5])\n fig.patch.set_facecolor('#E0E0E0')\n bins = np.linspace(tX.min(axis=0), tX.max(axis=0), 101).T\n for i in range(3*n_rows):\n if n_rows > 1:\n ax_index = (i//3, i%3)\n else:\n ax_index = (i%3)\n if i >= tX.shape[1]:\n axs[ax_index].set_visible(False)\n else:\n axs[ax_index].set_xlabel(\"parameter value\")\n axs[ax_index].set_ylabel(\"density\")\n if y_pred is None:\n set5_positive = np.delete(tX_positive[:, i], np.where(tX_positive[:, i] == 0.0))\n set6_negative = np.delete(tX_negative[:, i], np.where(tX_negative[:, i] == 0.0))\n axs[ax_index].hist([set5_positive, set6_negative], bins=bins[i], alpha=0.8, density=True, color=['tab:blue','tab:orange'], stacked=False)\n axs[ax_index].legend(['Higgs boson', 'Other'])\n else:\n set1_higgs_good = np.delete(tX_true_positive[:, i], np.where(tX_true_positive[:, i] == 0.0))\n set2_higgs_bad = np.delete(tX_false_negative[:, i], np.where(tX_false_negative[:, i] == 0.0))\n set3_not_good = np.delete(tX_true_negative[:, i], np.where(tX_true_negative[:, i] == 0.0))\n set4_not_bad = np.delete(tX_false_positive[:, i], np.where(tX_false_positive[:, i] == 0.0))\n if stacked_plot:\n axs[ax_index].hist([set2_higgs_bad, set4_not_bad, set3_not_good, set1_higgs_good], bins=bins[i], alpha=0.8, density=True, color=['red', 'salmon', 'tab:green', 'darkgreen'], stacked=True)\n axs[ax_index].legend(['Higgs boson wrongly predicted (false negative)', 'Other wrongly predicted (false positive)', 'Other correctly predicted (true negative)', 'Higgs boson correctly predicted (true positive)'])\n else:\n axs[ax_index].hist([set2_higgs_bad, set1_higgs_good], bins=bins[i], alpha=0.8, density=True, align='left', rwidth=0.4, color=['darkblue','tab:blue'], stacked=True)\n axs[ax_index].hist([set4_not_bad, set3_not_good], bins=bins[i], alpha=0.8, density=True, align='mid', rwidth=0.4, color=['darkorange','peachpuff'], stacked=True)\n axs[ax_index].legend(['Higgs boson wrongly predicted (false negative)','Higgs boson correctly predicted (true positive)','Other wrongly predicted (false positive)', 'Other correctly predicted (true negative)'])\n if names is None:\n axs[ax_index].set_title(f\"Parameter {i+1}\")\n else:\n axs[ax_index].set_title(f\"{i+1}) {names[i]}\")\n plt.show()\n if save:\n if os.path.isdir(\"figures\") == False: os.makedirs(\"figures\")\n if os.path.isdir(\"figures/Data_visualization\") == False: os.makedirs(\"figures/Data_visualization\")\n filename_already_used = True\n index = 0\n while filename_already_used:\n index += 1\n filename = f\"figures/Data_visualization/data_visualization_{str(index).rjust(3,'0')}.png\"\n if os.path.isfile(filename) == False:\n fig.savefig(filename, dpi=300)\n filename_already_used = False\n return 0\n\n","sub_path":".ipynb_checkpoints/Plot-checkpoint.py","file_name":"Plot-checkpoint.py","file_ext":"py","file_size_in_byte":13886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572485127","text":"import Tkinter as tk\nimport tkFileDialog\n\n## Provides a Frame with possibility to select the Backup directories and start backup. \nclass FrameDirectories(tk.LabelFrame):\n \n ## Constructor of the class.\n # @param self The object pointer.\n # @param master The pointer to the mother/master object within the graphical interface hierarchy.\n def __init__(self, master):\n tk.LabelFrame.__init__(self,\n master,\n text=\"Directories\")\n self._init_widgets()\n self.grid()\n \n ## Initialises the components of the user interface.\n # @param self The object pointer.\n def _init_widgets(self):\n self.entry_source = tk.Entry( self,\n width=25)\n self.button_source = tk.Button(self,\n text=\"Select source\",\n command=self._select_source_dialog)\n self.entry_target = tk.Entry( self,\n width=25)\n self.button_target = tk.Button(self,\n text=\"Select target\",\n command=self._select_target_dialog)\n self.button_backup = tk.Button(self,\n text=\"Start backup\")\n self.entry_source.grid( row=0,\n column=0)\n self.button_source.grid(row=0,\n column=1)\n self.entry_target.grid( row=1,\n column=0)\n self.button_target.grid(row=1,\n column=1)\n self.button_backup.grid(row=2,\n column=0)\n \n ## Returns selected source directory path.\n # @param self The object pointer.\n # @return Source directory path.\n def get_source(self):\n return self.entry_source.get()\n ## Returns selected target directory path.\n # @param self The object pointer.\n # @return Target directory path.\n def get_target(self):\n return self.entry_target.get()\n ## Returns pointer to the button which should start the backup.\n # @param self The object pointer.\n # @return Pointer to the button object.\n def get_button_backup(self):\n return self.button_backup\n \n ## Provides structure to select source directory.\n # @param self The object pointer.\n def _select_source_dialog(self):\n self.entry_source.delete(0, tk.END)\n self.entry_source.insert(0, tkFileDialog.askdirectory())\n ## Provides structure to select source directory.\n # @param self The object pointer.\n def _select_target_dialog(self):\n self.entry_target.delete(0, tk.END)\n self.entry_target.insert(0, tkFileDialog.askdirectory())\n ","sub_path":"src/views/framedirectories.py","file_name":"framedirectories.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539578463","text":"from __future__ import absolute_import\n\n\nclass Page(object):\n \"\"\"The page object.\"\"\"\n\n def __init__(self, name, title='', content='', is_static=True,\n api_url=None, base_template=None):\n # The page name\n self.name = name\n # The page title\n self.title = title\n # The page content\n self.content = content\n # Whether the page has any context data\n self.is_static = is_static\n # The url of the backend API\n self.api_url = api_url\n # The base template name\n self.base_template = base_template\n\n\nclass Loader(object):\n\n def get_page(self, name, only_api_url=False):\n \"\"\"Retrieve the page object.\"\"\"\n raise NotImplementedError(\".get_page() must be overridden.\")\n\n\nclass DefaultLoader(Loader):\n\n content = '''\n<!DOCTYPE html>\n<html>\n <head>\n <title>{title}\n \n \n

This is the {title} page, which is provided by\n the default page loader DefaultLoader.

\n

The configurable pages here should be accessed via the\n url in the format of \"{base_url}<name>\". Now please\n try the following urls:

\n \n

In the real world, you should change the `CONFPAGES.\n PAGE_LOADER` setting in your Django settings file to use\n your own page loader, or to use the built-in MongoLoader\n (note that the pymongo library is required).

\n \n\n'''\n\n def get_page(self, name, only_api_url=False):\n \"\"\"Retrieve the page object.\"\"\"\n page = Page(name)\n if not only_api_url:\n from django.core.urlresolvers import reverse\n page.title = name.capitalize()\n page.content = self.content.format(\n title=page.title,\n base_url=reverse('confpages-index'),\n host='http://localhost:8000',\n index_url=reverse('confpages-detail', args=('index',)),\n help_url=reverse('confpages-detail', args=('help',))\n )\n return page\n","sub_path":"confpages/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444114071","text":"import socket \nimport threading\n\nHEADER = 2048\nY_PORT = 9000\nY_SERVER = socket.gethostbyname(socket.gethostname())\nY_ADDR = (Y_SERVER, Y_PORT)\nFORMAT = 'utf-8'\n\nZ_PORT = 9400\nZ_SERVER = '192.168.56.1'\nZ_ADDR = (Z_SERVER, Z_PORT)\n\ny_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nz_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ny_server.bind(Y_ADDR)\n\n\ndef handle_client(conn, addr):\n print(f\"[+] X user {addr} connected.\\n\")\n\n connected = True\n while connected:\n msg_length = conn.recv(HEADER).decode(FORMAT)\n if msg_length:\n msg_length = int(msg_length)\n msg = conn.recv(msg_length).decode(FORMAT)\n\n print(f\"\\nX [{addr}] sends: {msg}\")\n conn.send(\"Y received message\".encode(FORMAT))\n send(msg)\n print('[TRANSMIT] Msg from X to Z sent')\n conn.close()\n\ndef send(message):\n z_server.send(message.encode(FORMAT))\n\ndef start():\n z_client.connect(Z_ADDR)\n print('[+] Client Y connected to server DZ')\n y_server.listen()\n print(\"[LISTENING] Server is listening for client X\")\n while True:\n conn, addr = y_server.accept()\n x = threading.Thread(target=handle_client, args=(conn, addr))\n x.start()\n print(f\"[ACTIVE CONNECTIONS] X = {threading.activeCount() - 7}\")\n\n\nprint(\"[STARTING] server is starting...\")\nstart()","sub_path":"P2 - Project/X-Y-Z_TCP/RELAY_Y2.py","file_name":"RELAY_Y2.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225140631","text":"\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\n# the data, shuffled and split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(6, kernel_size=(5,5), input_shape=input_shape, padding='same', activation='tanh'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(16, kernel_size=(5,5), activation='tanh'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(120, activation='tanh'))\n\n\nmodel.add(Dense(84, activation='tanh'))\n\n\nmodel.add(Dense(10, activation='softmax'))\n\n\n# You must create a neural netowork with:\n# A first hidden convolutional layer of 32 neurons, relu activation and window size 3x3\n# A second hidden convolutional layer of 64 neurons, relu activation and window size 3x3\n# Max pooling to reduce the size by 3 times in width and height\n# Add a flatten layer to vectorize the images\n# A last dense hidden layer of 128 neurons and relu activation\n# A dropout with regularization 0.5\n# A dense output layer \n\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n\n# After that you must implement the LENET5 network\n\n","sub_path":"TensorFlow_Keras/Ejercicio4/ExerciseCNN.py","file_name":"ExerciseCNN.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186775377","text":"#!/usr/bin/env python3\nfrom itertools import count\n\n\nEMPTY = 'L'\nOCCUPIED = '#'\nFLOOR = '.'\n\n\ndef count_occupied(area, x, y):\n directions = [(1, 0), (-1, 0), (0, 1), (0, -1), (-1, -1), (1, 1), (1, -1), (-1, 1)]\n\n cnt = 0\n for dir_x, dir_y in directions:\n new_x = x + dir_x\n new_y = y + dir_y\n\n while 0 <= new_y < len(area) and 0 <= new_x < len(area[0]):\n if area[new_y][new_x] == OCCUPIED:\n cnt += 1\n\n if area[new_y][new_x] != FLOOR:\n break\n\n new_x += dir_x\n new_y += dir_y\n\n return cnt\n\n\ndef tick(area):\n new_area = []\n for y, row in enumerate(area):\n new_row = ''\n for x, c in enumerate(row):\n num_children = count_occupied(area, x, y)\n if c == EMPTY and num_children == 0:\n new_row += OCCUPIED\n elif c == OCCUPIED and num_children >= 5:\n new_row += EMPTY\n else:\n new_row += c\n new_area.append(new_row)\n return new_area\n\n\ndef compute(cts: str):\n area = cts.splitlines()\n\n for i in count(1):\n new_area = tick(area)\n if new_area == area:\n return sum(1 for row in area for c in row if c == OCCUPIED)\n area = new_area\n\n\ndef main() -> int:\n with open('input.txt', 'r') as f:\n cts = f.read().strip()\n\n print(compute(cts))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"day11/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349449440","text":"\"\"\"\nCopyright 2020, All rights reserved.\nAuthor : SangJae Kang\nMail : craftsangjae@gmail.com\n\"\"\"\nimport base64\nfrom dateutil.parser import parse as parse_date\n\n\ndef parse_repository(query):\n if 'data' in query and 'repository' in query['data']:\n document = query['data']['repository']\n if not isinstance(document, dict):\n raise ValueError(str(query))\n else:\n raise ValueError(\"query\" + str(query))\n\n for k, v in document.items():\n if k == 'owner':\n document[k] = v.get('login', \"\")\n elif k in {\"watchers\", \"stargazers\", \"commitComments\",\n \"pullRequests\", \"releases\", \"deployments\", \"labels\"}:\n if isinstance(v, dict):\n document[k] = v.get('totalCount', 0)\n else:\n document[k] = 0\n elif k == \"primaryLanguage\":\n if isinstance(v, dict):\n document[k] = v.get('name', \"\")\n else:\n document[k] = \"\"\n elif k == \"licenseInfo\":\n if isinstance(v, dict):\n document[k] = v.get('name', \"\")\n else:\n document[k] = \"\"\n elif k == \"languages\":\n if isinstance(v, dict):\n document[k] = [\n elem.get('name', \"\") for elem in v.get('nodes', [])\n if isinstance(elem, dict)]\n elif k == \"repositoryTopics\":\n try:\n document[k] = [\n elem.get('topic', {}).get(\"name\", \"\")\n for elem in v.get('nodes', [])]\n except AttributeError:\n document[k] = []\n\n try:\n repo_id = int(base64.decodebytes(document['id'].encode(\"utf8\"))\n .decode('utf8').split('Repository')[-1])\n except:\n repo_id = -1\n\n document[\"repo_id\"] = repo_id\n return document\n\n\ndef parse_rateLimit(query):\n if \"data\" in query and 'rateLimit' in query['data']:\n limit_result = query['data']['rateLimit']\n remain, resetAt = limit_result['remaining'], parse_date(limit_result['resetAt'])\n return remain, resetAt\n else:\n raise ValueError(str(query))\n\n","sub_path":"service/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379499524","text":"import dlib\nimport cv2\n#import imutils\nimport numpy as np\n# src http://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/\n\nSVM_FILE = 'machine-learned/detector-n50.svm'\ndetector = dlib.simple_object_detector(SVM_FILE)\nvideo = cv2.VideoCapture(0)\nwhile(True):\n grabbed, frame = video.read()\n if not grabbed:\n print('Failed to grab!')\n continue\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n contours = detector(frame)\n for c in contours:\n cv2.rectangle(frame, (c.left(), c.top()), (c.right(), c.bottom()), (0, 255, 0), 2)\n\n cv2.imshow(\"Detection\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo.release()\ncv2.destroyAllWindows()\n","sub_path":"dlib-object-detect.py","file_name":"dlib-object-detect.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47454138","text":"from typing import List\nfrom decimal import Decimal\nimport numpy as np\nfrom operator import add, sub, mul, truediv, pow\n\nfrom convert_to_rpn import functions, names\n\nconvert_alg_to_func = {\n '+': add,\n '-': sub,\n '*': mul,\n '/': truediv,\n '^': pow,\n '--': lambda x: -x\n}\n\n\ndef compute_rpn(s: List[Decimal or str]) -> Decimal:\n temp_stack = []\n while s:\n elem = s.pop(0)\n if isinstance(elem, Decimal):\n temp_stack.append(elem)\n else:\n if elem in ['+', '-', '*', '/', '^']:\n operator = convert_alg_to_func[elem]\n # implement action on last two elements of temp_stack\n term2 = temp_stack.pop()\n term1 = temp_stack.pop()\n temp_stack.append(operator(term1, term2))\n elif elem == '--':\n term = temp_stack.pop()\n temp_stack.append(-term)\n else:\n [operator, nargs] = functions[elem]\n terms = temp_stack[-nargs:]\n terms = [float(term) for term in terms]\n temp_stack = temp_stack[:-nargs]\n temp_stack.append(operator(*terms))\n\n if len(temp_stack) != 1:\n raise\n return temp_stack[0]\n","sub_path":"calcs/sci_funcs/implement_rpn.py","file_name":"implement_rpn.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390756622","text":"# encoding: utf-8\n\n\"\"\"\n2018-7-14 14:13, mouse_event.py created by wq.\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n# 该文件实现功能:\n# 根据我们选择的模式,选择拖动鼠标时画圆还是画矩形\n\n\ndrawing = False # 当鼠标按下时,变为True\nmode = True # 如果 mode 为 True ,则绘制矩形;按下 ‘m’ 变成绘制曲线\n\nix, iy = -1, -1\n\n\n# 画图监听,监听鼠标事件(包括鼠标事件和事件发生位置)\ndef draw_listener(event, x, y, flags, param):\n global ix, iy, drawing, mode\n\n # 当按下左键,是返回起始位置坐标\n if event == cv2.EVENT_LBUTTONDOWN:\n drawing = True\n ix, iy = x, y\n # 当左键按下,并移动是绘制图形,flags 表示是否按下\n elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:\n if (drawing == True):\n if (mode == True):\n cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 2)\n else:\n cv2.circle(img, (ix, iy), 3, (0, 0, 255), -1)\n\n elif event == cv2.EVENT_LBUTTONUP:\n drawing = False\n\n\ndef draw_listener2(event, x, y, flags, param):\n global ix, iy, drawing, mode\n # cv2.line(img, (x, -600), (x, 600), (0, 0, 255), 2)\n # cv2.line(img, (-600, y), (600, y), (0, 0, 255), 2)\n\n\n # 当按下左键,是返回起始位置坐标\n if event == cv2.EVENT_LBUTTONDOWN:\n ix, iy = x, y\n elif event == cv2.EVENT_LBUTTONUP:\n if (mode == True):\n cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), 2)\n else:\n cv2.circle(img, (ix, iy), 3, (0, 0, 255), -1)\n\n\ndef print_all_mouse_events():\n events = [i for i in dir(cv2) if 'EVENT' in i]\n print(events)\n\n\nprint_all_mouse_events()\nimg = np.zeros((512, 512, 3), np.int8)\ncv2.namedWindow(\"part_1_image\")\ncv2.setMouseCallback(\"part_1_image\", draw_listener2) # 为鼠标事件设置监听回调函数\n\nwhile 1:\n cv2.imshow(\"part_1_image\", img)\n\n k = cv2.waitKey(20) & 0xff\n if k == ord('m'):\n mode = not mode\n elif k == 27:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"part_4_mouse_paint/mouse_event_advance.py","file_name":"mouse_event_advance.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50817","text":"# problem available at\n# https://www.hackerrank.com/challenges/find-the-running-median/problem\n\nimport heapq\n\nn = int(input())\n\nheap = []\nlist_ = []\n\nfor _ in range(n):\n a = int(input())\n\n while heap:\n if len(heap) > len(list_):\n if heap[0] < a:\n b = heapq.heappushpop(heap, a)\n heapq.heappush(list_, -b)\n else:\n heapq.heappush(list_, -a)\n else:\n if -list_[0] > a:\n b = -heapq.heappushpop(list_, -a)\n heapq.heappush(heap, b)\n else:\n heapq.heappush(heap, a)\n break\n\n if not heap:\n heapq.heappush(heap, a)\n\n if len(heap) > len(list_):\n print(round(float(heap[0]), 1))\n else:\n print(round(float((heap[0]-list_[0])/2), 1))\n","sub_path":"hackerrank/find_running_median.py","file_name":"find_running_median.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1500839","text":"#! /usr/bin/env python\n#coding=utf-8\n\nfrom uliweb.orm import *\n\n#ImageRootPaths = get_model(\"imagerootpaths\")\nclass ImageRootPaths(Model):\n path = Field(str)\n\n#ImageDirPaths = get_model(\"imagedirpaths\")\nclass ImageDirPaths(Model):\n rootpath = Reference(ImageRootPaths)\n relpath = Field(str)\n\n#Images = get_model(\"images\")\nclass Images(Model):\n dpath = Reference(ImageDirPaths)\n filename = Field(str)\n","sub_path":"apps/images/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325667102","text":"def print_4by4_grid(n):\n \"\"\"Print a 4-by-4 grid with each block size n.\"\"\"\n \n if (n <= 0) or (n - n//1 > 0):\n return\n \n horizontal_line = '+ ' + n * '- ' + '+ ' + n * '- ' + '+'\n vertical_line = '| ' + n * ' ' + '| ' + n * ' ' + '|'\n \n print(horizontal_line)\n \n for block in range(2):\n \n for k in range(n):\n print(vertical_line)\n \n print(horizontal_line)\n \n\ndef print_grid(n_blocks,size):\n \"\"\"Prints a n_blocks-by-n_blocks grid with each block size \"size\".\"\"\"\n \n if (n_blocks <= 0) or (size <= 0) or (\n n_blocks - n_blocks//1 > 0) or (size - size//1 > 0):\n return\n \n horizontal_line = n_blocks * ('+ ' + size * '- ') + '+'\n vertical_line = n_blocks * ('| ' + size * ' ') + '|'\n \n print(horizontal_line)\n \n for block in range(n_blocks):\n \n for k in range(size):\n print(vertical_line)\n \n print(horizontal_line)","sub_path":"students/patchcarrier/Lesson02/grid_printer_exercise.py","file_name":"grid_printer_exercise.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"416956906","text":"#coding:utf-8\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\"Myflight.settings\")\n\nimport django\nif django.VERSION >= (1,7):\n django.setup()\n\nimport pandas as pd\nfrom airport.models import add_airport\nfrom airplane.models import add_Fligt\ndef main():\n io = \"airplane.xls\"\n df = pd.read_excel(io,sheet_name=\"hangban\",header=0)\n # print(df.ix[1][\"departure_city\"])\n print(df.__len__())\n for i in range(df.__len__()):\n data = df.ix[i]\n add_airport(newairport=data[\"departure_airport\"],city=data[\"departure_city\"],temperature=23)\n add_airport(newairport=data[\"landing_airport\"],city=data[\"landing_city\"],temperature=23)\n add_Fligt(flight_id=data[\"flight_schedules\"],mileage=data[\"mileage\"],aircraft_models=data[\"aircraft_models\"],\n plan_departure_time=data[\"departure_time\"],plan_arrival_time=data[\"landing_time\"],\n departure=data[\"departure_airport\"],arrival=data[\"landing_airport\"],\n punctuality_rate=data[\"punctuality_rate\"],delay_time=data[\"average_delayed\"],company=data[\"airlines\"],\n is_mon=data[\"is_mon\"],is_tue=data[\"is_tue\"],is_wed=data[\"is_wed\"],is_thr=data[\"is_thr\"],\n is_fri=data[\"is_fri\"],is_sat=data[\"is_sat\"],is_sun=data[\"is_sun\"])\n print(i)\n\n\nif __name__==\"__main__\":\n main()\n print(\"main_done\")\n","sub_path":"utils/excel2mysql.py","file_name":"excel2mysql.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596958049","text":"import datetime\nimport pymysql.cursors\n# 安装MySQL驱动\nconfig = {\n 'host': '127.0.0.1',\n 'port': 3306,\n 'user': 'root',\n 'password': 'sw_199009',\n 'db': 'DataCenter',\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor,\n}\n\n# Connect to the database\nconnection = pymysql.connect(**config)\n\n# 获取明天的时间\ntomorrow = datetime.datetime.now().date() + datetime.timedelta(days=1)\n\n# 执行sql语句\ntry:\n with connection.cursor() as cursor:\n # 执行sql语句,插入记录\n sql = 'INSERT INTO employees (first_name, last_name, hire_date, gender, birth_date) VALUES (%s, %s, %s, %s, %s)'\n cursor.execute(sql, ('Robin', 'Zhyea', tomorrow, 'M', datetime.date(1989, 6, 14)));\n # 没有设置默认自动提交,需要主动提交,以保存所执行的语句\n connection.commit()\n\nfinally:\n # connection.close();\n print('Finally')\n\n# 获取雇佣日期\nhire_start = datetime.date(1999, 1, 1)\nhire_end = datetime.date(2018, 12, 31)\n\n# 执行sql语句\ntry:\n with connection.cursor() as cursor:\n # 执行sql语句,进行查询\n sql = 'SELECT first_name, last_name, hire_date FROM employees WHERE hire_date BETWEEN %s AND %s'\n cursor.execute(sql, (hire_start, hire_end))\n # 获取查询结果\n result = cursor.fetchone()\n print(result)\n # 没有设置默认自动提交,需要主动提交,以保存所执行的语句\n connection.commit()\nfinally:\n connection.close()\n\n","sub_path":"016_访问数据库/002_使用MySQL.py","file_name":"002_使用MySQL.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593389263","text":"'''\nThis App illustrtes the use of message boxes in Tkinter\n'''\n#Packages\nfrom tkinter import *\nfrom PIL import ImageTk,Image\nfrom tkinter import messagebox\n\nwindow = Tk()\nwindow.title(\"Message\")\nwindow.iconbitmap('D:/e-Learning/Tkinter/Images/India-flag.ico')\nwindow.geometry(\"400x200\")\n\n#Types of message boxes : showinfo showwarning showerror askquestion askokcancel askyesno\n\ndef popup():\n response = messagebox.askyesno(\"Popup!\", \"Hello World!\")\n #Label(window, text=response).pack()\n if response == 1:\n Label(window, text=\"U clicked YES!\").pack()\n else:\n Label(window, text=\"U clicked NO!!\").pack()\n\nButton(window, text=\"Popup\", command=popup).pack()\n\n#event handler\nwindow.mainloop()\n","sub_path":"App's/message-box.py","file_name":"message-box.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547665470","text":"spider_count = 0\n\n# Get a signal that a crawler has finished its job, start another crawler\ndef spider_closed(spider, reason):\n spider_count += 1\n\n #Only starts another crawler if available\n if spider_count < len(spiders):\n reactor.callLater(0, start_crawler, spider=spiders[spider_count])\n else:\n reactor.stop() #Stops reactor to prevent script from hanging\n \n\ndef start_crawler(spider):\n crawler = Crawler(get_project_settings()) #Loads the settings\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n\nif __name__ == '__main__':\n \n # Subscribe to the \"spider_closed\" signals\n dispatcher.connect(spider_closed, signal=signals.spider_closed)\n \n spiders.append(Spider1())\n spiders.append(Spider2())\n \n reactor.callLater(0, start_crawler, spider=spiders[spider_count])\n \n reactor.installResolver(CachingThreadedResolver(reactor))\n \n #Start log and twisted reactor\n log.start()\n reactor.run(installSignalHandlers=False)","sub_path":"dockerized-gists/5362433/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427530574","text":"# -*- coding: utf8 -*-\nimport sys, os\nimport time\nimport datetime\nimport json\nimport configparser\nimport threading\nimport psycopg2\nimport psycopg2.extras\nsys.path.append('C:\\\\Python34\\\\mysite\\\\tcb_bank_gate_way\\\\')\n#path = \"C:\\\\Python34\\\\mysite\\\\tcb_bank_gate_way\\\\\"\npath = \"\"\n\n#程式執行時間序log\ndef write_log_txt(object):\n write_log_path = get_ini_str(\"INIT\",\"LOG_PATH\")\n if not os.path.isdir(write_log_path):\n os.mkdir(write_log_path)\n f = open(write_log_path+\"tcb_bank_gw\"+str(datetime.datetime.now())[0:10].replace('-','')+\"_log.txt\",\"a+\", encoding = 'utf8')\n\n f.write(\"\\n[\"+str(datetime.datetime.now())[0:19]+\"]\"+str(object))\n f.close()\n \n# 取出 ini 中的設定\ndef get_ini_str(section, key):\n config_ = configparser.ConfigParser()\n delete_BOM_general(path+\"tcb_bank_gw.ini\")\n config_.read(path+\"tcb_bank_gw.ini\")\n return config_.get(section, key)\n \ndef delete_BOM_general(filepath_):\n frb = open(filepath_,mode='rb')\n content = frb.read()\n frb.close()\n if content.startswith(b'\\xef\\xbb\\xbf'):\n content = content[3:]\n f_no_bom = open(filepath_, 'wb+')\n f_no_bom.write(content)\n f_no_bom.close()\n \n#建立DB_cursor_\ndef build_DB_cursor_(switch_case):\n DB_IP_ = get_ini_str(switch_case, \"DB_IP_\")\n DB_Port_ = get_ini_str(switch_case, \"DB_Port_\")\n DB_DB_ = get_ini_str(switch_case, \"DB_DB_\")\n DB_User_ = get_ini_str(switch_case, \"DB_Name_\")\n DB_Pwd_ = get_ini_str(switch_case, \"DB_Pwd_\")\n\n DB_str_ = \"host=\" + DB_IP_ + \" port=\" + DB_Port_ + \" user=\" + DB_User_ + \" dbname=\" + DB_DB_ + \" password=\" + DB_Pwd_\n \n try:\n DB_conn_ = psycopg2.connect(DB_str_)\n except:\n write_log_txt(\"==== build_DB_cursor_ ERROR ====\" + str(sys.exc_info()))\n #sys.exit(0) \n \n DB_conn_.autocommit = True\n DB_conn_.set_client_encoding('UTF8')\n DB_cursor_ = DB_conn_.cursor()\n return DB_cursor_\n \n\n\nif __name__ == '__main__':\n DB_cursor_ = build_DB_cursor_(\"bill\")\n sql = \"select DISTINCT account, id, bhno, cseq, A.confirm, B.name from dfh.tb_customer_bank A left join dfh.tb_customerinfo B on B.branch = A.bhno and A.cseq=B.cesq\"\n DB_cursor_.execute(sql)\n rows = DB_cursor_.fetchall()\n for row in rows:\n sql = \"select dfh.sf_tcb_bank_fxdp030(\"\n sql = sql + \"'\" + row[2] + \"',\"\n sql = sql + \"'\" + row[3] + \"',\"\n sql = sql + \"'\" + row[5] + \"',\"\n sql = sql + \"'\" + row[1] + \"',\"\n sql = sql + \"'',\" \n sql = sql + \"'\"+str(datetime.datetime.now())[0:10].replace('-','')+\"',\" \n sql = sql + \"'\"+str(datetime.datetime.now())[11:19].replace(':','')+\"',\"\n sql = sql + \"'',\" \n sql = sql + \"'af40f55b21c59e10',\" \n sql = sql + \"'FXDP030',\" \n sql = sql + \"'\" + row[0] + \"')\"\n print(sql)\n \n DB_cursor_.execute(sql)\n write_log_txt(\"tcb_bank_amt running\")","sub_path":"tcb_bank_amt.py","file_name":"tcb_bank_amt.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463851012","text":"'''\nThis application is used to perform different\noperations on the dataset created\nbelow is a list of command available\nrename\ncheck extra\n'''\nimport os\ndef file_command():\n path = os.getcwd()\n path = os.path.join(path,'dataset')\n #print(path)\n data = os.listdir(path)\n data = sorted(data)\n #print(str(len(data)) + \" folders found\")-->[a,b.....,z]\n try:\n for char in os.listdir(path):\n i = 1\n path_old = os.path.join(path,char)\n path_new = os.path.join(path,char+\"1\")\n os.makedirs(path_new,exist_ok=True)\n os.chdir(path_old)\n print(path_old)\n for file in path_old:\n os.rename(file,str(i)+'.png')\n i = i + 1\n #print(\"file renamed to \" + file)\n except Exception as e:\n print(e)\n\nif __name__==\"__main__\":\n file_command()\n","sub_path":"obsolete/file_commands.py","file_name":"file_commands.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290556051","text":"#!/bin/python3\nlista = (1,2,3,4,5,10,9,8,7,6)\ndef reverser(alist):\n newlist=[] \n indeksi = 0\n for a in alist:\n newlist.insert(indeksi, a)\n indeksi - 1\n print(newlist)\nreverser(lista)","sub_path":"Lists_Strings/list_reversal.py","file_name":"list_reversal.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332912125","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nimport io\nimport os.path\nimport inspect\n\nfrom PIL import Image\n\nimport tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\nfrom tornado.web import HTTPError\nfrom tornado.web import url\nfrom tornado.log import app_log\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"debug\", default=True, help=\"turn on debug mode\", type=bool)\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\nroot = lambda x: os.path.join(PROJECT_DIR, x)\n\nfrom utils import bri, con, sharp, color\n\n\nclass BaseRequestHandler(tornado.web.RequestHandler):\n pass\n\n\nclass Argument(object):\n pass\n\n\nactions = {\"bri\": bri, \"con\": con, \"sharp\": sharp, \"color\": color}\n\nclass ImageHandler(BaseRequestHandler):\n\n #@tornado.web.asynchronous\n def get(self, filename):\n\n if not os.path.exists(filename):\n raise HTTPError(404)\n\n image = Image.open(os.path.join(\"images\", filename))\n\n for arg in self.request.arguments:\n if arg in actions:\n func = actions[arg]\n _args = inspect.getargspec(func).args\n _args.pop(0)\n kwargs = {k: float(self.get_argument(k)) for k in _args}\n image = func(image, **kwargs)\n\n w = int(self.get_argument(\"w\", image.size[0]))\n h = int(self.get_argument(\"h\", image.size[1]))\n o = io.BytesIO()\n fmt = self.get_argument(\"format\", \"JPEG\")\n image.resize((w, h)).save(o, format=fmt)\n img = o.getvalue()\n self.set_header('Content-type', 'image/' + fmt)\n self.set_header('Content-length', len(img))\n del o, image\n self.write(img)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n urlpatterns = (\n url(r\"/image/(\\w+\\.(?:jpg|png))$\", ImageHandler,\n name=\"image\"),\n )\n if options.debug:\n urlpatterns += (\n url(r\"/static/(.*)\",\n tornado.web.StaticFileHandler, {\"path\": root(\"static\")}),\n )\n settings = dict(\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n login_url=\"/auth/login\",\n template_path=root(\"templates\"),\n static_path=root(\"static\"),\n static_url_prefix=\"/static/\",\n xsrf_cookies=True,\n # facebook_api_key=options.facebook_api_key,\n # facebook_secret=options.facebook_secret,\n # ui_modules={\"Post\": PostModule},\n debug=options.debug,\n # autoescape=None,\n compress_response=True,\n )\n super(Application, self).__init__(urlpatterns, **settings)\n\n\ndef main():\n tornado.options.parse_command_line()\n app_log.debug(\"server on port: %s\" % options.port)\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.current().start()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146355691","text":"import numpy as np\nfrom collections import deque\nfrom torch.utils import data\nimport torch\nfrom utils import to_tensor, standardize\nfrom value_estimators import *\nimport torch.optim as optim\nimport time\n\ndefault_dataloader_params={'batch_size': 256, 'shuffle': True,'num_workers': 4}\n\n\nclass MAReacherTrajectories(data.Dataset):\n def __init__(self, trajectories, gamma=1.0, eval_func='gae'):\n self.trajectories=trajectories\n for n, v in trajectories.items():\n if 'last' not in n:\n self.trajectories[n] = torch.cat(v)\n\n # todo-2, add conditional control for the case of rewards shape for GYM environment:\n self.t_max, self.n_agents = self.trajectories['rewards'].shape # assume the same t_max for actions, states and rewards\n if eval_func == 'gae':\n # todo-, gae_lambda is not exposed for hyperparam optimization\n self.estimated_values, self.true_values = GAE(self.trajectories['rewards'], self.trajectories['values'],\n self.trajectories['last_dones'], self.trajectories['last_values'],\n gamma=gamma)\n\n self.estimated_values = standardize(self.estimated_values)\n for n, v in trajectories.items():\n if not('last' in n or n=='rewards'): # rewards are not used for learning, so I fix the shape to keep info not lose\n if len(v.shape) == 3:\n trajectories[n] = v.reshape([-1, v.shape[-1]])\n else:\n trajectories[n] = v.reshape([-1])\n\n ##final shape checking\n # for n, v in trajectories.items():\n # print(n, 'has shape:', v.shape)\n self.estimated_values = self.estimated_values.reshape([-1])\n self.true_values = self.true_values.reshape([-1])\n\n def __len__(self):\n return self.t_max*self.n_agents\n\n def __getitem__(self, item):\n return self.trajectories['states'][item], self.trajectories['actions'][item], \\\n self.trajectories['log_probs'][item], self.estimated_values[item], self.true_values[item]\n\n def mean_trajectory_score(self):\n return self.trajectories['rewards'].sum(0).mean().item()\n\n\ndef unity_rollout_ppo(agent, env, max_t):\n trajectory = {\"actions\": [],\n \"rewards\": [],\n \"states\": [],\n \"log_probs\": [],\n \"values\": [],\n \"last_values\": None,\n \"last_dones\": None}\n\n brain_name = env.brain_names[0]\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations\n\n for t in range(max_t):\n states = to_tensor(states) # states is now tensor type\n actions, log_probs, _, values = agent.forward(states)\n\n env_info = env.step(actions.cpu().numpy())[brain_name] # send the action to the environment\n next_states = env_info.vector_observations # get the next state\n rewards = env_info.rewards # get the reward\n dones = env_info.local_done # see if episode has finished\n\n trajectory['states'].append(states.unsqueeze(0))\n trajectory['rewards'].append(to_tensor(rewards).unsqueeze(0))\n trajectory['actions'].append(actions.detach().unsqueeze(0)) # todo, may not be necessary\n trajectory['log_probs'].append(log_probs.detach().unsqueeze(0))\n if not (values is None):\n trajectory['values'].append(values.detach().unsqueeze(0))\n\n states = next_states\n if np.any(dones):\n break\n\n trajectory['last_values'] = agent.state_values(to_tensor(next_states)).detach()\n trajectory['last_dones'] = to_tensor(dones)\n\n return trajectory\n\n\ndef ppo_unity(policy, env, eval_func, rollout_func,\n n_episodes=1000, t_max=1000,\n gamma=1.0, SGD_epoch=5, grad_clip=0.2,\n epsilon=0.01, beta=0.01, pass_score=30.0,\n print_every=100, dataloader_params=default_dataloader_params):\n\n optimizer = optim.Adam(policy.parameters(), lr=1e-4)\n scores_deque = deque(maxlen=100)\n scores = []\n\n t0 = time.time()\n for i_episode in range(1, n_episodes + 1):\n trajectories = rollout_func(policy, env, t_max)\n PPO_Batch = MAReacherTrajectories(trajectories, gamma, eval_func)\n episodic_return = PPO_Batch.mean_trajectory_score()\n PPO_Batch_generator = data.DataLoader(PPO_Batch, **dataloader_params)\n\n policy.train()\n for e in range(SGD_epoch):\n for i_step, (mb_states, mb_actions, mb_log_probs, mb_estimated_values, mb_returns) in enumerate(PPO_Batch_generator):\n # PPO loss_evaluation\n _, log_probs_new, entropy, mb_v = policy(mb_states, mb_actions)\n ratios = (log_probs_new - mb_log_probs).exp()\n surr1 = ratios * mb_estimated_values\n surr2 = torch.clamp(ratios, 1 - epsilon, 1 + epsilon) * mb_estimated_values\n L_actor = -torch.min(surr1, surr2).mean() - beta * entropy.mean()\n L_critic = 0.5 * (mb_returns - mb_v).pow(2).mean()\n L = L_actor + L_critic\n\n # training steps check:\n # print(\"epoch {}\".formate), \"step {}\".format(i_step), \"batch-size {}\".format(ratios.shape[0]))\n\n optimizer.zero_grad()\n L.backward()\n if not(grad_clip is None):\n torch.nn.utils.clip_grad_norm_(policy.parameters(), grad_clip)\n optimizer.step()\n del L\n\n # episodic value estimation\n scores_deque.append(episodic_return)\n scores.append(episodic_return)\n # clipping and exploration decay\n epsilon *= .999\n beta *= .995\n\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), episodic_return), end=\"\")\n if i_episode % print_every == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n if np.mean(scores_deque) >= pass_score:\n print('Environment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_deque)))\n break\n\n t_end = time.time()\n torch.save(policy.state_dict(), './data/ppo_gae/checkpoint.pth')\n print(\"\\nTotal time elapsed: {} second\".format(t_end-t0))\n policy.eval()\n\n return scores","sub_path":"rl_algorithms.py","file_name":"rl_algorithms.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166232617","text":"import math\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.externals import joblib\r\nimport sys\r\n\r\n##############parses sequences and structure features in SVM readable format####################################################################################\r\n\r\nf = open ('C:\\\\Users\\\\Leonie\\\\Git\\\\scilife-project\\\\input\\\\5of100_seq_set.txt', 'r')\r\n\r\nlist_all = list()\r\nfor line in f:\r\n\tnewline = line.replace('\\n', '')\r\n\tlist_all.append(newline)\r\n\r\nlist_seq = []\r\nlist_stru = []\r\nword_list = []\r\nmfeature_list = []\r\ncword_list = []\r\nsepn_list = []\r\n\r\n\r\n#define window size\r\nwindows = 45\r\n\r\n#creates lists separating titles, sequences and structures\r\nfor i in range (0, len(list_all), 3):\r\n\tlist_seq.append((math.floor(windows/2))*'X'+list_all[i+1]+(math.floor(windows/2))*'X')\r\n\tlist_stru.append((math.floor(windows/2))*'X'+list_all[i+2]+(math.floor(windows/2))*'X')\r\n\r\n\t\r\n#creates sliding windows of sequence\r\nfor seq in list_seq:\r\n\taa_list = list(seq)\r\n\tfor aa in range(0, len(aa_list)-(windows-1)):\r\n\t\tword_list.append(aa_list[aa:aa+windows])\r\n\t\t\r\n#Creates sliding windows of structure features and saves middle feature \r\nfor structure in list_stru:\r\n\tfeat_list = list (structure)\r\n\tfor feature in range (int(windows/2), len(feat_list)-math.floor(windows/2)):\r\n\t\tmfeature_list.append(feat_list[feature])\r\n\r\n#translates sequence windows into numerical code \r\nfrom aa_dictionary import aa_dict\r\nfor a in range (0, len(word_list)):\r\n\tfor b in range(0, windows):\r\n\t\tfor key in aa_dict:\r\n\t\t\tif key == word_list[a][b]:\t\r\n\t\t\t\tword_list[a][b] = aa_dict[key]\r\n\r\n#translates feature windows into numerical code \r\nfrom structure_dict import structure_dict\r\nfor feature in range(0, len(mfeature_list)):\r\n for key in structure_dict:\r\n if key == mfeature_list [feature]:\r\n mfeature_list[feature] = int(structure_dict[key])\r\n\r\n#Joines numerical to one code per word\r\ncword = str()\r\nfor word in word_list:\r\n for aa in range(0, len(word)):\r\n cword = cword + str(word[aa])\r\n cword_list.append(cword) \r\n cword = str()\r\n\r\n#Bringing words into the right format\r\nfor word in cword_list:\r\n\tposition_list = list(word)\r\n\tfor position in range(len(position_list)):\r\n\t\tposition_list[position] = int(position_list[position])\r\n\tsepn_list.append(position_list)\r\n\r\n\r\n\t\r\n######################SVM learning and cross-validation############################################################################################\r\n\r\n#SVM learning\r\nclf = svm.LinearSVC(C=0.5, class_weight='balanced').fit(sepn_list, mfeature_list)\r\n\r\n\r\n# #5-fold cross_validation\r\n# score =cross_val_score(clf, sepn_list, mfeature_list, cv=5)\r\n# print(score)\r\n\r\n\r\n#####################Optional: More detailed analysis: To evaluate the model more parameters apart from cv precision score are needed###################################################\r\n\r\n# #training and test set creation\r\n# X_train, X_test, y_train, y_test = train_test_split(sepn_list, mfeature_list, test_size=0.2)\r\n# lin_clf = svm.LinearSVC(C=0.5, class_weight='balanced').fit(X_train, y_train)\r\n\r\n# #prediction of features\r\n# pred_val = lin_clf.predict(X_test)\r\n\r\n# #detailed report about quality of prediction\r\n# print(classification_report(y_test, pred_val))\r\n\r\n\r\n#######################Saving the model################################################################################################\r\n\r\njoblib.dump(clf, 'Transmembrane_globular_predictor')\r\nprint('finished')\r\n","sub_path":"scripts/Sequence_based_prediction/runall_modelcreator.py","file_name":"runall_modelcreator.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193239448","text":"from ore.core.models import Permission\nfrom ore.accounts.models import OreUser\nfrom .models import Team, OrganizationTeam, ProjectTeam\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, ButtonHolder, Submit, Layout, Field, Hidden, Div, HTML\nfrom crispy_forms.bootstrap import FieldWithButtons, StrictButton\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\n\n__author__ = 'max'\n\n\nclass TeamPermissionsForm(forms.Form):\n\n def _get_permission_queryset(self):\n return Permission.objects.all()\n\n def _get_field_groups(self, known_slugs):\n groupings = [\n (\n 'Version Management',\n [\n 'version.create',\n 'version.edit',\n 'version.delete',\n ]\n ),\n (\n 'File Management',\n [\n 'file.create',\n 'file.edit',\n 'file.delete',\n ]\n ),\n (\n 'Project Management',\n [\n 'org.project.create',\n 'project.edit',\n 'project.rename',\n 'project.transfer',\n 'project.delete',\n ]\n ),\n (\n 'Project Team Management',\n [\n 'project.team.create',\n 'project.team.edit',\n 'project.team.manage',\n 'project.team.delete',\n ]\n ),\n (\n 'Organization Team Management',\n [\n 'org.team.create',\n 'org.team.edit',\n 'org.team.manage',\n 'org.team.delete',\n ]\n )\n ]\n groupings = [\n (group_name, [\n perm_slug for perm_slug in group_perm_slugs if perm_slug in known_slugs\n ])\n for group_name, group_perm_slugs in groupings\n ]\n groupings = [(group_name, group_perm_slugs) for group_name,\n group_perm_slugs in groupings if len(group_perm_slugs) > 0]\n return groupings\n\n def __init__(self, *args, **kwargs):\n super(TeamPermissionsForm, self).__init__(*args, **kwargs)\n\n permissions = self._get_permission_queryset()\n self._permissions = {}\n\n known_perm_slugs = set()\n\n for i, permission in enumerate(permissions):\n field_name = 'permission_%s' % permission.slug\n self.fields[field_name] = forms.BooleanField(\n label=permission.name, required=False)\n self._permissions[field_name] = permission\n known_perm_slugs.add(permission.slug)\n\n layout_list = []\n for group_name, group_perm_slugs in self._get_field_groups(known_perm_slugs):\n layout_list.append(Fieldset(\n group_name, *['permission_%s' % perm_slug for perm_slug in group_perm_slugs]))\n layout_list.append(ButtonHolder(Submit('submit', 'Save')))\n\n self.helper = FormHelper()\n self.helper.layout = Layout(*layout_list)\n\n def get_selected_permissions(self):\n return [self._permissions[name] for name, value in self.cleaned_data.items() if name.startswith('permission_') and value]\n\n\nclass ProjectTeamPermissionsForm(TeamPermissionsForm):\n\n def _get_permission_queryset(self):\n qs = super(ProjectTeamPermissionsForm, self)._get_permission_queryset()\n return qs.filter(applies_to_project=True)\n\n\nclass OrganizationTeamPermissionsForm(TeamPermissionsForm):\n pass\n\n\nclass TeamForm(forms.ModelForm):\n\n @property\n def is_owner_team(self):\n inst = getattr(self, 'instance', None)\n return bool(inst and inst.is_owner_team)\n\n def __init__(self, *args, **kwargs):\n super(TeamForm, self).__init__(*args, **kwargs)\n \n readonly = {}\n if self.is_owner_team:\n readonly['readonly'] = 'readonly'\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Field('name', **readonly),\n 'description',\n Div(\n Field('permissions'), css_class='hide' if self.is_owner_team else ''\n ),\n Submit('save', 'Save')\n )\n\n def clean_name(self, *args, **kwargs):\n if self.is_owner_team:\n return self.instance.name\n return self.cleaned_data['name']\n\n def clean_permissions(self, *args, **kwargs):\n if self.is_owner_team:\n return self.instance.permissions.all()\n return self.cleaned_data['permissions']\n\n class Meta:\n model = Team\n fields = ['name', 'description', 'permissions']\n\n\nclass ProjectTeamForm(TeamForm):\n\n def __init__(self, *args, **kwargs):\n super(ProjectTeamForm, self).__init__(*args, **kwargs)\n if self.instance and self.instance.id:\n self.helper.form_action = reverse('projects-team-manage', kwargs={\n 'namespace': self.instance.project.namespace.name,\n 'project': self.instance.project.name,\n 'team': self.instance.name,\n })\n\n def clean_name(self, *args, **kwargs):\n final_name = super(ProjectTeamForm, self).clean_name()\n project = self.initial.get('project', None)\n if self.instance.id:\n project = self.instance.project\n qs = ProjectTeam.objects.filter(project=project, name=final_name)\n if self.instance.id:\n qs = qs.exclude(id=self.instance.id)\n if qs.exists():\n raise ValidationError(\"That team name is already in use.\")\n return final_name\n\n class Meta(TeamForm.Meta):\n model = ProjectTeam\n\nclass OrganizationTeamForm(TeamForm):\n\n class Meta(TeamForm.Meta):\n model = OrganizationTeam\n\nclass CommaSeparatedTextInput(forms.TextInput):\n\n def _format_value(self, value):\n return ', '.join(value)\n\n def value_from_datadict(self, data, files, name):\n value = data.get(name)\n if not value:\n return value\n return [v.strip() for v in value.split(',')]\n\nclass MembershipManageForm(forms.Form):\n\n user = forms.ModelMultipleChoiceField(queryset=None, to_field_name=\"name\", widget=CommaSeparatedTextInput())\n #user = forms.ModelMultipleChoiceField(queryset=None, to_field_name=\"name\")\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user')\n team = kwargs.pop('team')\n direction = kwargs.pop('direction')\n super(MembershipManageForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Field('user'),\n Submit('Add member', 'Add member'),\n )\n self.helper.form_action = team.get_member_add_url()\n if direction == 'add':\n self.fields['user'].queryset = OreUser.objects.as_user(user).exclude(id__in=team.users.values('id'))\n else:\n self.fields['user'].queryset = OreUser.objects.as_user(user).filter(id__in=team.users.values('id'))\n\n\nclass TeamDeleteForm(forms.Form):\n\n lock = forms.CharField(max_length=64)\n\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop('instance')\n \n super(TeamDeleteForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.field_template = 'bootstrap3/layout/inline_field.html'\n if isinstance(self.instance, ProjectTeam):\n self.helper.form_action = reverse(\n 'projects-team-delete', kwargs={'namespace': self.instance.project.namespace.name, 'project': self.instance.project.name, 'team': self.instance.name},\n )\n self.helper.form_show_labels = False\n self.helper.form_class = \"js-lock-form\"\n self.helper.attrs = {\n 'data-confirm': self.instance.name,\n 'data-input': 'input[name=\"lock\"]',\n 'data-locks': 'button',\n }\n self.helper.layout = Layout(\n HTML(\"\"\"\n

Removing this team will remove the additional permissions granted to all of its members.

\n

Please type the name of the team ({{ team.name }}) to confirm deletion.

\n \"\"\"),\n FieldWithButtons(\n Field('lock'), StrictButton(' Delete', css_class='btn-danger', type='submit')),\n )\n\n def clean_lock(self):\n lock = self.cleaned_data['lock']\n if lock != self.instance.name:\n raise ValidationError(\n 'You must type the team name exactly, including any capitalisation.')\n return lock","sub_path":"ore/teams/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100166470","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport leastsquare as lq\n\nx = []\ny = []\nori_a = random.random() * 2\nori_b = random.random()\nfor i in range(20):\n xx = 10 * random.random()\n x.append([xx])\n y.append([ori_a * xx + ori_b + random.random() * 2])\n\na, b = lq.leastsquare(x, y)\ny_func = a * x + b\n\nplt.scatter(x, y)\nplt.plot(x, y_func, color='r')\nplt.axis([0, 10, 0, 10])\nplt.show()","sub_path":"linerregression/test_lq.py","file_name":"test_lq.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176560034","text":"from plot_helper import *\nfrom simulation_parameters import *\nfrom fourier_transforms import *\nfrom scipy.optimize import leastsq, basinhopping # , differential_evolution\nfrom muller_calculations import *\nfrom tqdm import tqdm\n\n\nt_experiment, sampling_rate, s_0, omega_1, omega_2 = extract_parameters() # Extract parameters\nratios = np.arange(1, 5, 1)\nstore_results = defaultdict()\nstore_results_noise = defaultdict()\n\nfor i in tqdm(ratios):\n store_results[i], t = run_simulation2(t_experiment, sampling_rate, s_0, omega_1, omega_1*i)\n\n\n#for i in ratios:\n # plt.plot(t, store_results[i] + 0.125*i)\n\n# plt.show()\n\nplot_ratios(t, 5, store_results)\n\nx_initial_basin = np.ones(10)\nharmonics = 1\nfunc = lambda x: f_annealing(x, t, store_results[harmonics])\n\noptimized = basinhopping(func,\n x_initial_basin,\n minimizer_kwargs={\"method\": \"L-BFGS-B\"},\n niter=300,\n disp=False)\n\nleastsq_x = leastsq(f_residual,\n x_initial_basin,\n args=(t, store_results[harmonics]),\n maxfev=1000)\n\nprint(optimized.x[9])\nplot_mc_fits(optimized.x, leastsq_x[0], t, store_results, harmonics)\n\n\n# z = []\n# z1 = []\n# z2 = []\n#\n# for i in tqdm(range(0, 30000)):\n#\n# z.append(drr_norm_measure_padua(np.array([90, 90])))\n# z2.append(drr_norm_measure(np.array([3, 10, 90, 90, 21])))\n#\n# fontsize = 10\n# plt.hist(z2, bins=300, label=\"Linear Increments with ratio\")\n# plt.hist(z, bins=300, label=\"Padua Interpolation Points\")\n# plt.xlim(0, 400)\n# plt.legend(loc='upper right')\n# plt.xlabel(\"Data\", fontsize=fontsize)\n# plt.ylabel(\"Occurrence\", fontsize=fontsize)\n# plt.show()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267441820","text":"import numpy as np\nimport os\nimport csv\nimport pandas\n\nif __name__ == '__main__':\n path = 'mobilenet_finetune_30_pig_upload.csv'\n filename = 'mobilenet30.csv'\n haha = pandas.read_csv(path, header=None)\n file = open(filename, 'w')\n writer = csv.writer(file)\n # print(haha.values[0])\n xixi = haha.as_matrix()\n # print(xixi)\n for i in range(3000):\n buffer = xixi[i*30:(i+1)*30, :]\n maxx = np.argmax(buffer[:, 2])\n\n tplist = [int(buffer[maxx, 0]), int(buffer[maxx, 1]), \"%.15f\" % buffer[maxx, 2]]\n # file.writelines(str(tplist)+'\\r\\n')\n writer.writerow(tplist)\n file.close()\n","sub_path":"claster/result_extract.py","file_name":"result_extract.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244710398","text":"\"\"\" Basic implementation of EMBL-EBI's OxO mappings in Python using mapping files provided by EMBL-EBI.\r\n\r\nhttps://www.ebi.ac.uk/spot/oxo/index\r\n\r\nNotes:\r\n The mapping algorithm may be different from OxO's. A few test cases have been run and produced matching results.\r\n\r\n Update OxO._file_ols and OxO._file_umls with the location of the mapping files provided by EMBL-EBI.\r\n\r\nExamples:\r\n OxO.find_mappings('DOID:162')\r\n OxO.find_mappings('UMLS:C0002199', distance=3)\r\n OxO.find_mappings('SNOMEDCT:136111001', distance=3, targets=['MeSH', 'UMLS'])\r\n\"\"\"\r\n\r\nimport csv\r\nfrom collections import defaultdict\r\n\r\n\r\nclass OxO:\r\n _mappings = None\r\n _terms = None\r\n _file_ols = r'D:\\oxo\\ols_mappings.csv'\r\n _file_umls = r'D:\\oxo\\umls_mappings.csv'\r\n _file_terms = r'D:\\oxo\\terms.csv'\r\n\r\n @staticmethod\r\n def load_files():\r\n # Initialize\r\n OxO._terms = dict()\r\n OxO._mappings = defaultdict(set)\r\n\r\n # Read in the terms\r\n with open(OxO._file_terms, 'r', newline='') as fh:\r\n reader = csv.reader(fh, delimiter=',', quotechar='\"', doublequote=False, lineterminator='\\r\\n',\r\n escapechar='\\\\')\r\n\r\n # Skip the header line\r\n reader.__next__()\r\n\r\n # Read in term definitions\r\n for identifier, curie, label, uri, prefix in reader:\r\n OxO._terms[curie] = {\r\n 'label': label,\r\n 'uri': uri,\r\n }\r\n\r\n # Read in OLS dump file using csv reader\r\n with open(OxO._file_ols, 'r', newline='') as fh:\r\n reader = csv.reader(fh, delimiter=',', quotechar='\"', doublequote=False, lineterminator='\\r\\n',\r\n escapechar='\\\\')\r\n\r\n # Skip the header line\r\n reader.__next__()\r\n\r\n # Read in all mappings\r\n for row in reader:\r\n curie_from = row[0]\r\n curie_to = row[1]\r\n OxO._mappings[curie_from].add(curie_to)\r\n OxO._mappings[curie_to].add(curie_from)\r\n\r\n # Read in UMLS dump file using csv reader\r\n with open(OxO._file_umls, 'r', newline='') as fh:\r\n reader = csv.reader(fh, delimiter=',', quotechar='\"', doublequote=False, lineterminator='\\r\\n',\r\n escapechar='\\\\')\r\n\r\n # Skip the header line\r\n reader.__next__()\r\n\r\n # Read in all mappings\r\n for row in reader:\r\n curie_from = row[0]\r\n curie_to = row[1]\r\n OxO._mappings[curie_from].add(curie_to)\r\n OxO._mappings[curie_to].add(curie_from)\r\n\r\n @staticmethod\r\n def find_mappings(curie_source, distance=2, targets=None):\r\n if OxO._mappings is None:\r\n OxO.load_files()\r\n\r\n found = dict() # mapping results (key:curie, value:distance)\r\n visited = set() # nodes already visited\r\n searching = {curie_source} # nodes to visit on this iteration\r\n prefix_source = curie_source.split(':')[0]\r\n\r\n # Convert targets to a set\r\n if targets is None:\r\n targets = []\r\n elif type(targets) is str:\r\n targets = [targets]\r\n targets = set(targets)\r\n\r\n for i in range(distance):\r\n search_add = set() # nodes to search in the next iteration\r\n\r\n # Mark all nodes that we're about to visit as already visited\r\n visited = visited.union(searching)\r\n\r\n # Visit each new node\r\n for curie in searching:\r\n curr_mappings = OxO._mappings[curie]\r\n\r\n # Add new mappings to the set to search in the next iteration if we have not already visited\r\n search_add = search_add.union([x for x in curr_mappings if x not in visited])\r\n\r\n # Add new mappings to the set of found mappings if it's in the target ontologies\r\n for m in curr_mappings:\r\n prefix_curr = m.split(':')[0]\r\n if m not in found and prefix_curr != prefix_source and \\\r\n (len(targets) == 0 or prefix_curr in targets):\r\n info = {\r\n 'distance': i + 1,\r\n 'label': '',\r\n 'uri': ''\r\n }\r\n\r\n if m in OxO._terms:\r\n term = OxO._terms[m]\r\n info['label'] = term['label']\r\n info['uri'] = term['uri']\r\n\r\n found[m] = info\r\n\r\n searching = search_add\r\n\r\n return found\r\n","sub_path":"OxO.py","file_name":"OxO.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"153633985","text":"def food70( gameData):\n foodScanMax = 200\n head = getHead(gameData)[0]\n x_head = head[0]\n y_head = head[1]\n if(x_head gameData[\"width\"]):\n x_max = gameData[\"width\"]\n else:\n x_max = x_head + foodScanMax\n if(y_head + foodScanMax > gameData[\"height\"]):\n y_max = 0\n else:\n y_max = y_head + foodScanMax\n for i in range (x_min, x_max):\n for j in range(y_min, y_max):\n if gameData[\"gameBoard\"][i][j] == \"F\":\n variance = fabs(x_head - i) + fabs(y_head - j)\n if (variance <= foodScanMax):\n if(fabs(x_head - i) >= fabs(y_head - j)):\n if (x_head - i < 0):\n if gameData['gameBoard'][x_head-1][y_head] == \"E\":\n return \"west\"\n else:\n if gameData['gameBoard'][x_head+1][y_head] == \"E\":\n return \"east\"\n\n if(fabs(x_head - i) <= fabs(y_head - j)):\n if (x_head - j < 0):\n if gameData['gameBoard'][x_head][y_head-1] == \"E\":\n return \"north\"\n else:\n if gameData['gameBoard'][x_head][y_head+1] == \"E\":\n return \"south\"\n return\n\n\ndef getHead(gameData):\t\n snakes = gameData [\"snakes\"]\n for i in range(len(snakes)):\n if snakes[i][\"id\"] == \"2daa46ee-4880-4285-8572-eeaf52dba551\":\n return snakes[i][\"coords\"][0],snakes[i][\"coords\"][1]\n","sub_path":"app/consume.py","file_name":"consume.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162607299","text":"cfg = {\n'channels' : 3,\n'use_mark' : False,\n'use_max_size' : False,\n'max_size_x' : 128,\n'max_size_y' : 128,\n'norm_input' : True,\n'norm_input_minus' : False,\n\n# net architecture parameters\n'layers' : 1,\n'features_root' : 16,\n'cnn_kernel_size' : 3,\n'pool_size' : 2,\n'LSTM' : True,\n'egularizer_scale': 0.1,\n\n# model\n'optimizer' : 'Adam',\n'base_net_size' : 128,\n'batch_size' : 1,\n'max_step' : 30,\n'learning_rate' : 0.001,\n'keep_prob' : 0.9,\n'regularizer' : False,\n'useGPU' : True,\n\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604403244","text":"from helper import produce_stars\n\nagenda = [\n \"Lists\",\n]\n\nmonday = [\n 'Arroz',\n 'Banana Frita',\n 'Little rat'\n]\ntuesday = [\n 'Arroz',\n 'Feijão'\n]\nwednesday = [\n 'Banana',\n 'Pão',\n 'Salsicha'\n]\nthursday = [\n 'Arroz'\n]\nfriday = [\n 'Pizza'\n]\n\nmenu = [monday, tuesday, wednesday, thursday, friday]\n\nprint(produce_stars(20), agenda[0], produce_stars(20), end=\"\\n\")\n\nprint(\"Imagine you want to store a collection of drinks\", [\"Beer\", \"Coffee\", \"Snake Wine\"])\nprint(\"Imagine the menu of your favorite restaurant (Ratinho i.e)\", menu)\nprint(\"Let's suppose you want to check the menu for monday\", menu[0])\nprint(\"Important! Lists are 0 indexed and accepts all types\", [\"Little rat\", 1, 3.4j])\n\n","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462447311","text":"# We need to import sys which will let us take in the command line arguements\n# We need to import collections which will allow us to count the keys and their values\nimport sys\nimport collections\n\nf = open(sys.argv[1],'r') #To open the file in order to read it\ns = f.read() #Reading the contents of the opened file\nf.close()\ns = s.lower() #Converting the read lines into lower case\ns = s.rstrip(\"\\n\")\ns1 = s.split(' ') #Splitting the read lines into a list of words (between each space\ns1 = filter(None, s1)\n#print s1\ncounter = collections.Counter(s1)\nk, v = counter.keys(), counter.values()\nw = []\nfor k,v in counter.items():\n w.append((str(v)+':'+str(k)))\n\nw1 = sorted(w, reverse = True)\na = ','.join(w1)\n\nsys.stdout.write(a)","sub_path":"2_Hello_Autolab/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148156352","text":"import datetime\nimport random\n\nimport cv2\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gym_plot\n\n# Environment Setting\nenv = gym.make('Pendulum-v0')\ngame_name = 'Pendulum'\nalgorithm = 'DDPG'\npendulum_plot = gym_plot.Pendulum()\nNum_states = env.observation_space.shape[0]\nNum_action = env.action_space.shape[0]\naction_max = 2\n\n# Parameter setting\nGamma = 0.99\nLearning_rate_actor = 0.0001\nLearning_rate_critic = 0.001\n\nNum_start_training = 5000\nNum_training = 25000\nNum_testing = 10000\n\nNum_batch = 64\nNum_replay_memory = 5000\n\nNum_episode_plot = 10\n\n# Network parameters\nNum_colorChannel = 3\nNum_stackFrame = 4\n\nfirst_conv_actor = [8, 8, Num_colorChannel, 32]\nsecond_conv_actor = [4, 4, 32, 32]\nthird_conv_actor = [3, 3, 32, 32]\nfirst_fc_actor = [11 * 11 * 32, 200]\nsecond_fc_actor = [200, 200]\nthird_fc_actor = [200, Num_action]\n\nfirst_conv_critic = [8, 8, Num_colorChannel, 32]\nsecond_conv_critic = [4, 4, 32, 32]\nthird_conv_critic = [3, 3, 32, 32]\nfirst_fc_critic = [11 * 11 * 32, 400]\nsecond_fc_critic = [400 + Num_action, 300]\nthird_fc_critic = [300, 1]\n\nimg_size = 84\n\n\n# print('..........', Num_states, Num_action, '..........')\n## Soft_update 调参数可以调整tau值\ndef Soft_update(Target_vars, Train_vars, tau=0.001):\n for v in range(len(Target_vars)):\n soft_target = sess.run(Train_vars[v]) * tau + sess.run(Target_vars[v]) * (1 - tau)\n Target_vars[v].load(soft_target, sess)\n\n\n## Ornstein - Uhlenbeck noise\n# https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py\n# OU噪声\nclass OU_noise(object):\n def __init__(self, env_action, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.1, decay_period=Num_training):\n self.mu = mu # 0.0\n self.theta = theta # 0.15\n self.sigma = max_sigma # 0.3\n self.max_sigma = max_sigma # 0.3\n self.min_sigma = min_sigma # 0.1\n self.decay_period = decay_period # 25000\n self.num_actions = env_action.shape[0] # 1\n self.action_low = env_action.low # -2\n self.action_high = env_action.high # 2\n self.reset()\n\n def reset(self):\n self.state = np.zeros(self.num_actions)\n\n # self.state = np.zeros(self.num_actions)\n # self.state = np.zeros(self.num_actions)\n def state_update(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.num_actions) # np.random.randn()生成0,1的随机数\n self.state = x + dx\n\n def add_noise(self, action, training_step):\n self.state_update()\n state = self.state\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, training_step / self.decay_period)\n return np.clip(action + state, self.action_low, self.action_high)\n\n\ndef conv2d(x, w, stride):\n return tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding='SAME')\n\n\ndef conv_weight_variable(name, shape):\n return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer_conv2d())\n\n\n# Initialize weights and bias\ndef weight_variable(name, shape):\n return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())\n\n\ndef bias_variable(name, shape):\n return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())\n\n\ndef Actor(x, network_name):\n x_normalize = (x - (255.0 / 2)) / (255.0 / 2)\n # Actor Network\n with tf.variable_scope(network_name):\n w_conv1_actor = conv_weight_variable('_w_conv1', first_conv_actor)\n b_conv1_actor = bias_variable('_b_conv1', [first_conv_actor[3]])\n\n w_conv2_actor = conv_weight_variable('_w_conv2', second_conv_actor)\n b_conv2_actor = bias_variable('_b_conv2', [second_conv_actor[3]])\n\n w_conv3_actor = conv_weight_variable('_w_conv3', third_conv_actor)\n b_conv3_actor = bias_variable('_b_conv3', [third_conv_actor[3]])\n\n w_fc1_actor = weight_variable('_w_fc1', first_fc_actor)\n b_fc1_actor = bias_variable('_b_fc1', [first_fc_actor[1]])\n\n w_fc2_actor = weight_variable('_w_fc2', second_fc_actor)\n b_fc2_actor = bias_variable('_b_fc2', [second_fc_actor[1]])\n\n w_fc3_actor = weight_variable('_w_fc3', third_fc_actor)\n b_fc3_actor = bias_variable('_b_fc3', [third_fc_actor[1]])\n\n h_conv1_actor = tf.nn.relu(conv2d(x_normalize, w_conv1_actor, 4) + b_conv1_actor)\n h_conv2_actor = tf.nn.relu(conv2d(h_conv1_actor, w_conv2_actor, 2) + b_conv2_actor)\n h_conv3_actor = tf.nn.relu(conv2d(h_conv2_actor, w_conv3_actor, 2) + b_conv3_actor)\n h_pool3_flat = tf.reshape(h_conv3_actor, [-1, first_fc_actor[0]])\n\n h_fc1_actor = tf.nn.elu(tf.matmul(h_pool3_flat, w_fc1_actor) + b_fc1_actor)\n h_fc2_actor = tf.nn.elu(tf.matmul(h_fc1_actor, w_fc2_actor) + b_fc2_actor)\n\n output_actor = tf.nn.tanh(tf.matmul(h_fc2_actor, w_fc3_actor) + b_fc3_actor)\n # 个人理解,actor网络输出乘以动作的最大值,可能跟引入的噪声有关系,在引入噪声的过程中\n return action_max * output_actor\n\n\ndef reshape_input(state):\n state_out = cv2.resize(state, (img_size, img_size))\n state_out = np.uint8(state_out).reshape(1, img_size, img_size, Num_colorChannel)\n\n return state_out\n\n\ndef Critic(x, policy, network_name):\n x_normalize = (x - (255.0 / 2)) / (255.0 / 2)\n with tf.variable_scope(network_name):\n w_conv1_critic = conv_weight_variable('_w_conv1', first_conv_critic)\n b_conv1_crititc = bias_variable('_b_conv1', [first_conv_critic[3]])\n\n w_conv2_critic = conv_weight_variable('_w_conv2', second_conv_critic)\n b_conv2_critic = bias_variable('_b_conv2', [second_conv_critic[3]])\n\n w_fc1_critic = weight_variable('_w_fc1', first_fc_critic)\n b_fc1_critic = bias_variable('_b_fc1', [first_fc_critic[1]])\n\n w_fc2_critic = weight_variable('_w_fc2', second_fc_critic)\n b_fc2_critic = bias_variable('_b_fc2', [second_fc_critic[1]])\n\n w_fc3_critic = weight_variable('_w_fc3', third_fc_critic)\n b_fc3_critic = bias_variable('_b_fc3', [third_fc_critic[1]])\n\n h_conv1_critic = tf.nn.relu(conv2d(x_normalize, w_conv1_critic, 4) + b_conv1_crititc)\n h_conv2_critic = tf.nn.relu(conv2d(h_conv1_critic, w_conv2_critic, 2) + b_conv2_critic)\n h_pool3_flat = tf.reshape(h_conv2_critic, [-1, first_fc_critic[0]])\n # h_pool3_flat = tf.concat([h_pool3, policy], axis=1)\n\n # Critic Network\n h_fc1_critic = tf.nn.relu(tf.matmul(h_pool3_flat, w_fc1_critic) + b_fc1_critic)\n h_fc1_critic = tf.concat([h_fc1_critic, policy], axis=1)\n h_fc2_critic = tf.nn.relu(tf.matmul(h_fc1_critic, w_fc2_critic) + b_fc2_critic)\n\n output_critic = tf.matmul(h_fc2_critic, w_fc3_critic) + b_fc3_critic\n return output_critic\n\n\n# Information from the network\n# x = tf.placeholder(tf.float32, shape = [None, Num_states])\nx = tf.placeholder(tf.float32, shape=[None, 84, 84, 3])\n\nPolicy = Actor(x, 'Actor_main')\nPolicy_target = Actor(x, 'Actor_target')\n# tf.concat([T1, T2], 1) 后面参数为1,进行列合并\n# 将下面的部分融入到critic网络中,由于不能够在卷积神经网络中直接加入动作,所以需要在全连接层加入\n# Critic_inputs = tf.concat([Policy, x], 1)\n# Critic_inputs_target = tf.concat([Policy, x], 1)\nQ_Value = Critic(x, Policy, 'Critic_main')\nQ_Value_target = Critic(x, Policy_target, 'Critic_target')\n\nActor_vars = tf.trainable_variables('Actor_main')\nActor_target_vars = tf.trainable_variables('Actor_target')\n\nCritic_vars = tf.trainable_variables('Critic_main')\nCritic_target_vars = tf.trainable_variables('Critic_target')\n\n# Set Loss\ntarget_critic = tf.placeholder(tf.float32, shape=[None, 1])\n# tf.reduce_sum()���求和公式,对所有变量进行求和。\n# actor_loss = -tf.reduce_sum(Q_Value)\nactor_loss = 1 / tf.reduce_sum(Q_Value) # 最大化Q(自己修改的)\ncritic_loss = tf.losses.mean_squared_error(target_critic, Q_Value) # 求解target_critic与Q的均方差\n\npolicy_optimizer = tf.train.AdamOptimizer(learning_rate=Learning_rate_actor)\ncritic_optimizer = tf.train.AdamOptimizer(learning_rate=Learning_rate_critic)\n\nactor_train = policy_optimizer.minimize(actor_loss, var_list=Actor_vars)\ncritic_train = critic_optimizer.minimize(critic_loss, var_list=Critic_vars)\n\n# Init session\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# Initialization\nnoise = OU_noise(env.action_space)\n# 为了能够随机探索,给其加上一个噪声处理过程,可以随机产生动作。噪声的最大值为2,最小值为-2.\nstate = env.reset()\nstate_img = pendulum_plot.get_state_img(state)\nstate_img = reshape_input(state_img)\nnoise.reset()\n# Initial parameters\nstep = 0\nstep_train = 0\nscore = 0\nepisode = 0\ndata_time = str(datetime.date.today()) + '_' + str(datetime.datetime.now().hour) + '_' + str(\n datetime.datetime.now().minute)\nreplay_memory = []\n\n# Figure and figure data setting\nplot_loss = []\nplot_Q = []\nloss_list = []\nmaxQ_list = []\n\nplot_x = []\nplot_y = []\n\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\n\nwhile True:\n # Define progress\n if step <= Num_start_training:\n progress = 'Exploring'\n # env.render()\n elif step <= Num_start_training + Num_training:\n progress = 'Training'\n # env.render()\n elif step < Num_start_training + Num_training + Num_testing:\n progress = 'Testing'\n env.render()\n else:\n # Test is finished\n print('Test is finished!!')\n plt.savefig('./Plot/' + data_time + '_' + algorithm + '_' + game_name + '.png')\n break\n\n # Choose action\n # state = state.reshape(-1, Num_states)\n # state_img = pendulum_plot.get_state_img(state)\n action = sess.run(Policy, feed_dict={x: state_img})\n\n # Add noise\n if progress != 'Testing':\n action = noise.add_noise(action, step_train)\n\n state_next, reward, terminal, _ = env.step(action)\n # state_next = state_next.reshape(-1, Num_states)# reshape(-1,n)表示的是行数未知,将其reshape为n列;将大小[3,1]变为[1,3]\n state_next = state_next.reshape(Num_states)\n state_next_img = pendulum_plot.get_state_img(state_next)\n state_next_img = reshape_input(state_next_img)\n # Experience replay 设置经验池\n if len(replay_memory) >= Num_replay_memory:\n del replay_memory[0]\n\n replay_memory.append([state_img, action, reward, state_next_img, terminal])\n\n if progress == 'Training':\n minibatch = random.sample(replay_memory, Num_batch)\n\n # Save the each batch data\n state_batch = [batch[0][0] for batch in minibatch]\n action_batch = [batch[1][0] for batch in minibatch]\n reward_batch = [batch[2][0] for batch in minibatch]\n state_next_batch = [batch[3][0] for batch in minibatch]\n terminal_batch = [batch[4] for batch in minibatch]\n\n # Update Critic\n y_batch = []\n Q_batch = sess.run(Q_Value_target, feed_dict={x: state_next_batch})\n\n for i in range(Num_batch):\n if terminal_batch[i]:\n y_batch.append([reward_batch[i]])\n else:\n y_batch.append([reward_batch[i] + Gamma * Q_batch[i][0]])\n\n _, loss_critic = sess.run([critic_train, critic_loss],\n feed_dict={target_critic: y_batch, x: state_batch, Policy: action_batch})\n\n # Update Actor\n _, loss_actor = sess.run([actor_train, actor_loss], feed_dict={x: state_batch})\n\n plot_loss.append(loss_critic)\n plot_Q.append(np.mean(Q_batch))\n\n ##Soft Update\n Soft_update(Actor_target_vars, Actor_vars)\n Soft_update(Critic_target_vars, Critic_vars)\n\n step_train += 1\n\n # Update parameters at every iteration\n step += 1\n score += reward[0]\n\n state_img = state_next_img\n\n # Plotting\n if len(plot_x) % Num_episode_plot == 0 and len(plot_x) != 0 and progress != 'Exploring':\n ax1.plot(np.average(plot_x), np.average(plot_y), '*')\n ax1.set_ylabel('Score')\n ax1.set_title('Average Score ' + algorithm)\n\n ax2.plot(np.average(plot_x), np.average(plot_loss), 'o')\n ax2.set_ylabel('Loss')\n ax2.set_title('Critic Loss ' + algorithm)\n\n ax3.plot(np.average(plot_x), np.average(plot_Q), 'd')\n ax3.set_xlabel('Episode')\n ax3.set_ylabel('Q-value')\n ax3.set_title('Q_value ' + algorithm)\n\n plt.draw()\n plt.pause(0.000001)\n\n plot_x = []\n plot_y = []\n plot_loss = []\n plot_Q = []\n\n # Terminal\n if terminal:\n print('step: ' + str(step) + ' / ' + 'episode: ' + str(\n episode) + ' / ' + 'state: ' + progress + ' / ' + 'score: ' + str(score))\n\n if progress != 'Observing':\n # data for plotting\n plot_x.append(episode)\n plot_y.append(score)\n\n score = 0\n episode += 1\n\n state = env.reset()\n noise.reset()\n","sub_path":"CNN_2_Pendulum_DDPG.py","file_name":"CNN_2_Pendulum_DDPG.py","file_ext":"py","file_size_in_byte":12943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155536830","text":"import sys\n\n#---------------------- 1ST SOLUTION USING LIST -------------------------\nclass Solution:\n def __init__(self):\n self.stack = []\n self.queue = []\n \n def pushCharacter(self, ch): # know diff between append() and insert(index, ch), what is pop()?\n self.stack.append(ch)\n \n def enqueueCharacter(self, ch):\n self.queue.insert(0, ch)\n \n def popCharacter(self):\n return self.stack.pop()\n \n def dequeueCharacter(self):\n return self.queue.pop()\n\nsol = Solution()\nsol.pushCharacter(2)\nsol.pushCharacter('y') # character needs '', if nn '', would be an undefined variable\nsol.pushCharacter('z')\n\nsol.enqueueCharacter(2)\nsol.enqueueCharacter('y')\nsol.enqueueCharacter('z')\n\n\nprint(sol.stack) # dont forget () and sol.stack not self.stack\nprint(sol.queue)\n\nassert sol.popCharacter() == 'z'\nassert sol.dequeueCharacter() == 2\n\n#----------------------- 2ND SOLUTION USING LINKED LIST ------------------------\nclass Node: # node in linkedlist only has data+next(single)+prev(doubly), here doubly is better\n def __init__(self,data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass LinkedList: # linkedlist as a list, only has head + tail\n def __init__(self):\n self.head = None\n self.tail = None\n\n def __repr__(self):\n data = []\n node = self.head\n while node:\n data.append(node.data)\n node = node.next\n return f'{data}'\n\nclass Solution2:\n def __init__(self):\n self.stack = LinkedList()\n self.queue = LinkedList()\n \n def pushCharacter(self, ch): \n new_node = Node(ch)\n if not self.stack.head: # always remember to include none situation\n self.stack.head = new_node \n self.stack.tail = new_node\n else:\n self.stack.tail.next = new_node\n new_node.prev = self.stack.tail\n self.stack.tail = new_node \n \n def enqueueCharacter(self, ch):\n new_node = Node(ch)\n if not self.queue.head:\n self.queue.head = new_node \n self.queue.tail = new_node\n else:\n new_node.next = self.queue.head\n self.queue.head.prev = new_node\n self.queue.head = new_node\n \n def popCharacter(self):\n # current_node = self.stack.head\n # self.stack.tail.prev.next = None\n #self.stack.tail.prev = None\n node = self.stack.tail\n self.stack.tail.prev.next = None\n self.stack.tail = self.stack.tail.prev\n return node.data\n \n def dequeueCharacter(self):\n # current_node = self.queue.head\n # self.queue.tail.prev.next = None\n # #self.queue.tail.prev = None\n node = self.queue.tail\n self.queue.tail.prev.next = None\n self.queue.tail = self.queue.tail.prev\n return node.data\n \nsol1 = Solution2()\nsol1.pushCharacter(2)\nsol1.pushCharacter('y') # character needs '', if nn '', would be an undefined variable\nsol1.pushCharacter('z')\n\nsol1.enqueueCharacter(2)\nsol1.enqueueCharacter('y')\nsol1.enqueueCharacter('z')\n\n\nprint(sol1.stack) # stack is a linkedlist, which has data+reference to next node, meaning linkedlist is an object, \n # thus print(self.stack) returns memory address and needs to have def __repr__(self): so \n # when print(), it gives the contents of the object, here contents of linkedlist, stack and queue\nprint(sol1.queue) \n\nassert sol1.popCharacter() == 'z'\nassert sol1.dequeueCharacter() == 2\n","sub_path":"stack_queue_implimentation_byList.py","file_name":"stack_queue_implimentation_byList.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253551518","text":"#reversed!/usr/bin/env python3\n\"\"\"\nGreedy solution to the winning determination problem.\n\"\"\"\nimport time\nimport copy\n\nimport numpy as np\nimport pandas as pd\n\nimport base_solution\n\n\nclass AOCBaseSolution(base_solution.BaseSolution):\n \"\"\"Ant Colony Optimization solution.\"\"\"\n\n def __init__(self, bids, name, logger,\n ant_count=10, pheromone_decay=0.9,\n pheromone_power=0.5, greedy_power=0.5):\n super(AOCBaseSolution, self).__init__(bids, name, logger)\n self._accepted_bids = []\n self._ant_count = ant_count\n self._pheromone_decay = pheromone_decay\n self._pheromone_power = pheromone_power\n self._greedy_power = greedy_power\n self._status = 'NotStarted'\n\n # compute average item price for each bit\n all_bids = list(self._bids.values())\n self._enhanced_bids = []\n for bid in all_bids:\n self._enhanced_bids.append(\n (bid[0], bid[1], bid[1]/len(bid[0]))\n )\n self._enhanced_bids.sort(\n key=lambda x: x[2],\n reverse=True)\n\n # prepare ants\n self._ants = [[] for _ in range(self._ant_count)]\n self._pheromone_trail = [\n 1 for _ in range(len(self._enhanced_bids))]\n\n @staticmethod\n def _has_conflict(target_bid, accepted_bids):\n \"\"\"Return true if the bid has items conflicts\n with the currently accepted bids.\n \"\"\"\n bid_items = set(target_bid[0])\n for bid in accepted_bids:\n if not bid_items.isdisjoint(set(bid[0])):\n return True\n return False\n\n def _get_status(self):\n return \"Solved\"\n\n def get_profit(self):\n return max([self._get_profit(ant) for ant in self._ants])\n\n @staticmethod\n def _get_profit(ant):\n return sum([bid[1] for bid in ant])\n\n def _solve(self, timeout=None):\n start_time = time.time()\n while time.time() - start_time <= timeout:\n progress = self.__next_epoch()\n self._status = 'PartiallyOptimized'\n if not progress:\n self._status = 'Finished'\n return\n\n def __next_epoch(self):\n # construct new solution\n index_of_chosen_bids = []\n added_item_to_ant = []\n for index_ant in range(len(self._ants)):\n # construct probability to add a bid\n valid_pheromon_array = []\n for index_bid, bid in enumerate(self._enhanced_bids):\n if self._has_conflict(bid, self._ants[index_ant]):\n valid_pheromon_array.append(0.0)\n continue\n valid_pheromon_array.append(self._pheromone_trail[index_bid])\n\n added_item_to_ant.append(any([x != 0.0 for x in valid_pheromon_array]))\n probability_array = []\n for index in range(len(self._enhanced_bids)):\n probability_array.append(\n valid_pheromon_array[index]**self._pheromone_power + self._enhanced_bids[index][2]*self._greedy_power\n )\n total_power = sum(probability_array)\n probability_array = list(map(lambda x: x/total_power, probability_array))\n winner_index_bid = np.random.choice(list(range(len(self._enhanced_bids))), p=probability_array)\n\n # add winning bid to ant\n self._ants[index_ant].append(self._enhanced_bids[winner_index_bid])\n index_of_chosen_bids.append(winner_index_bid)\n\n ant_fitness = [self._get_profit(ant) for ant in self._ants]\n max_ant_fitness = max([self._get_profit(ant) for ant in self._ants])\n index_of_max_fitness = ant_fitness.index(max_ant_fitness)\n bid_chosen = self._ants[index_of_max_fitness][-1]\n self._pheromone_trail[self._enhanced_bids.index(bid_chosen)] += max_ant_fitness\n # TODO(mmicu):\n # - compare how the aoc improves over time\n # - maybe normalize the trail added by the fittest ant to encourage exploration\n # - expose parameters to CLI (pheromone_decay, pheromone_power, greedy_power, ant_count)\n # * run experiments with differing parameters\n # - if you have time implement local search\n # - maybe multiply the ants over time\n\n # evaporate pheromone trail\n for index in range(len(self._pheromone_trail)):\n self._pheromone_trail[index] = self._pheromone_trail[index]*(1-self._pheromone_decay)\n return any(added_item_to_ant)\n","sub_path":"src/aoc_model.py","file_name":"aoc_model.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556449619","text":"# importing all the necesarry libraries\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport plotly.express as px\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\n# reading csv file into dataframe\r\n\r\ndf = pd.read_csv('D:/study/Masters/sem-2/IDV/assignments/mini_project/datasets/owid-covid-data_new.csv')\r\n\r\nfor col in df.columns:\r\n df[col] = df[col].astype(str)\r\n \r\n \r\n#creating a new dataframe with required dimensions\r\n\r\ndf1 = df[['iso_code','continent','location','date','new_deaths','new_cases','new_tests','hospital_beds_per_thousand']].copy()\r\n\r\n# Printing the datatypes of the dimensions extracted from the dataset file before pre-processing\r\n\r\nprint(df1.dtypes)\r\n\r\n# Dataset shape: Number of rows extracted from the dataset file before pre-processing\r\n\r\nprint(df1.shape)\r\n\r\n\r\n# Data pre-processing: Cleaning up the data to remove null values/junk values in dataset\r\n\r\ndf1.loc[df.new_tests=='nan','new_tests'] = '0'\r\ndf1.loc[df.new_cases=='nan','new_cases'] = '0'\r\ndf1.loc[df.new_deaths=='nan','new_deaths'] = '0'\r\ndf1.loc[df.hospital_beds_per_thousand=='nan','hospital_beds_per_thousand'] = '0'\r\ndf1 = df1[df1.iso_code != 'nan']\r\ndf1 = df1[df1.continent != 'nan']\r\n\r\n\r\n# Showing that there are no null/junk values\r\n\r\ndf1.apply(lambda x:sum(x.isnull()),axis=0)\r\n\r\n\r\n# Calculating the month for each row in the dataset\r\n\r\nmonth_no = ['01','02','03','04','05','06','07','08','09','10','11','12']\r\nmonth_name = ['January','February','March','April','May','June','July','August','September','October','November',\r\n 'December']\r\nmonth = []\r\nmonth_number = []\r\nfor ele in df1['date']:\r\n ele1 = ele[5]+ele[6]\r\n for i in range(12):\r\n if(ele1 == month_no[i]):\r\n month.append(month_name[i])\r\n month_number.append(int(month_no[i]))\r\n\r\n \r\n# Adding the month name and month number as new columns to the dataframe\r\n\r\ndf1['month_n'] = month\r\ndf1['month_num'] = month_number\r\n\r\n\r\n# Eliminating December month from our scope. Considering data from Jan, 2020.\r\n\r\ndf1 = df1[df1.month_num != 12]\r\n\r\n# Converting deaths and cases into integer\r\n\r\ndf1['new_tests'] = df1['new_tests'].astype('str')\r\ndf1['new_deaths'] = df1['new_deaths'].astype('float64')\r\ndf1['new_deaths'] = df1['new_deaths'].astype('int')\r\n\r\ndf1['new_cases'] = df1['new_cases'].astype('float64')\r\ndf1['new_cases'] = df1['new_cases'].astype('int')\r\n\r\ndf1['new_tests'] = df1['new_tests'].astype('float64')\r\ndf1['new_tests'] = df1['new_tests'].astype('int')\r\n\r\ndf1['hospital_beds_per_thousand'] = df1['hospital_beds_per_thousand'].astype('float64')\r\n\r\n\r\n# Group by based on continent, country and month. Aggregating new deaths, new cases,new tests and hospital beds.\r\n\r\ndf1 = df1.groupby(['iso_code','continent','location','month_n','month_num']).agg({'new_deaths':'sum','new_cases':'sum','new_tests':'sum','hospital_beds_per_thousand':'sum'})\r\ndf1.reset_index(inplace = True)\r\n\r\n\r\n# Adding rows for months not present in the original dataset.\r\n# For example: If Germany doesnt have any data for Jan month, we add a row with values 0. This is needed for\r\n# cumilation purpose \r\n\r\nfor iso in df1['iso_code'].unique():\r\n for i in range(1,7):\r\n val = ((df1['iso_code'].str.contains(iso)) & (df1['month_num'] == i)).any()\r\n if not val:\r\n df_temp = pd.DataFrame({\"iso_code\":[iso],\"continent\":[''],\"location\":[''],\"date\":[''],\"new_deaths\":[0],\r\n \"new_cases\":[0],\"new_tests\":[0],\"hospital_beds_per_thousand\":[0.0],\"month_n\":[''],\r\n \"month_num\":[i]})\r\n \r\n df1 = df1.append(df_temp,ignore_index = True) \r\n \r\n \r\n# Adding three new columns into DF1 for holdin cumilative data of deaths, cases and tests.\r\n \r\n\r\ndf1['cumil_deaths'] = ''\r\ndf1['cumil_cases'] = ''\r\ndf1['cumil_tests'] = ''\r\nsum_deaths = 0\r\n\r\n# Cumilative code for new_deaths\r\n\r\nfor item in df1['iso_code'].unique():\r\n sum_deaths = 0\r\n for i in range(1,7):\r\n val_deaths = df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'new_deaths'].iloc[0]\r\n sum_deaths = sum_deaths + val_deaths\r\n df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'cumil_deaths'] = sum_deaths\r\n\r\n# Cumilative code for new_cases\r\n \r\nfor item in df1['iso_code'].unique():\r\n sum_cases = 0\r\n for i in range(1,7):\r\n val_cases = df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'new_cases'].iloc[0]\r\n sum_cases = sum_cases + val_cases\r\n df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'cumil_cases'] = sum_cases\r\n\r\n# Cumilative code for new_tests\r\n\r\nfor item in df1['iso_code'].unique():\r\n sum_tests = 0\r\n for i in range(1,7):\r\n val_tests = df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'new_tests'].iloc[0]\r\n sum_tests = sum_tests + val_tests\r\n df1.loc[((df1['month_num'] == i) & (df1['iso_code'].str.contains(item))),'cumil_tests'] = sum_tests\r\n \r\n \r\n# Changing datatypes of cumilated tests, deaths and cases before plotting \r\n\r\ndf1['cumil_tests'] = df1['cumil_tests'].astype('int')\r\ndf1['cumil_deaths'] = df1['cumil_deaths'].astype('int')\r\ndf1['cumil_cases'] = df1['cumil_cases'].astype('int')\r\n\r\n\r\n#Dataset Shape after pre-processing\r\n\r\nprint(df1.dtypes)\r\n\r\n#Dataset for LineGraph\r\ndff2 = df1.copy()\r\ndff2 = dff2[dff2[\"month_num\"] == 6]\r\n#display(dff2)\r\ndff3 = dff2[['iso_code','location','month_num','cumil_deaths','cumil_cases','cumil_tests']].copy()\r\n#display(dff3)\r\n\r\n\r\n# Dash starts here. \r\n# Code deals with the HTML part to create the front end of the dash.\r\napp = dash.Dash(__name__)\r\n\r\n# Header of the web page\r\n\r\napp.layout = html.Div([\r\n html.H1(\"COVID-19 Data Visualization\", style = {'text-align':'center'}),\r\n \r\n# Adding slider(months), two dropdowns(interest, scope) and a radiobutton(cumilated and non-cumilated)\r\n# on the frontend screen.\r\n \r\n dcc.Slider(id = \"slct_slide\",\r\n min = 1,\r\n max = 6,\r\n value = 4,\r\n marks = {\r\n 1: {'label': 'Jan'},\r\n 2: {'label': 'Feb'},\r\n 3: {'label': 'Mar'},\r\n 4: {'label': 'Apr'},\r\n 5: {'label': 'May'},\r\n 6: {'label': 'Jun'}},\r\n step = None\r\n ),\r\n html.Div([\r\n dcc.Dropdown(id = \"slct_tab\",\r\n options = [\r\n {\"label\":\"Deaths\",\"value\":\"deaths\"}, #value has to be column name\r\n {\"label\":\"Cases\",\"value\":\"affected_cases\"},\r\n {\"label\":\"Tests\",\"value\":\"tests\"}],\r\n\r\n multi = False,\r\n value = \"deaths\",\r\n optionHeight=25,\r\n searchable=True,\r\n clearable=True,\r\n style = {'width':\"80%\", 'margin-top': '25px'}\r\n )],style={\"margin-left\": '1000px'}),\r\n html.Div([ \r\n dcc.Dropdown(id=\"slct_scope\",\r\n options=[{\"label\":\"World\",\"value\":\"world\"},\r\n {\"label\":\"Europe\",\"value\":\"europe\"}],\r\n value=\"world\",\r\n multi = False,\r\n optionHeight=25,\r\n searchable=True,\r\n clearable=True,\r\n style = {'width':\"40%\", 'margin-top': '5px'})],style={\"margin-right\": '10px'}),\r\n html.Div([\r\n dcc.RadioItems(id=\"slct_cumil\",\r\n options=[{'label': 'Cummulative Data', 'value': 'cumil'},\r\n {'label': 'NonCummulative Data', 'value': 'non_cumil'}],\r\n value='non_cumil')], style={\"width\": '40%','margin-top': '25px'}),\r\n \r\n html.Div(id = \"output_container\",children = []),\r\n html.Br(),\r\n dcc.Graph(id = 'my_covid',figure = {}),\r\n html.Br(),\r\n dcc.Graph(id='line_graph', figure={})\r\n])\r\n\r\n# Callback to bridge the front and the back end\r\n\r\n@app.callback(\r\n [Output(component_id = 'output_container', component_property = 'children'),\r\n Output(component_id = 'my_covid', component_property = 'figure'),\r\n Output(component_id = 'line_graph', component_property = 'figure')],\r\n [Input(component_id = 'slct_slide', component_property = 'value'),\r\n Input(component_id = 'slct_tab', component_property = 'value'),\r\n Input(component_id = 'slct_scope', component_property = 'value'),\r\n Input(component_id = 'slct_cumil', component_property = 'value')]\r\n)\r\n\r\n# Backend code. Creating the various maps.\r\n\r\ndef update_graph(option_slctd, data_slctd,scope_slctd,cumil_slctd):\r\n print(option_slctd)\r\n print(data_slctd)\r\n print(scope_slctd)\r\n print(cumil_slctd)\r\n #cumil_slctd=False\r\n dff1 = df1.copy()\r\n dff1 = dff1[dff1[\"month_num\"] == option_slctd]\r\n container = \"The data chosen by user is {0} and the geographical scope is {1}\".format(data_slctd,scope_slctd)\r\n \r\n if(cumil_slctd == 'non_cumil'):\r\n \r\n \r\n \r\n if data_slctd == \"deaths\":\r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"new_deaths\",\r\n hover_data=['location', 'new_deaths','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n elif data_slctd == \"affected_cases\":\r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"new_cases\",\r\n hover_data=['location', 'new_cases','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n else: \r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"new_tests\",\r\n hover_data=['location', 'new_tests','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n \r\n \r\n else: \r\n\r\n \r\n if data_slctd == \"deaths\":\r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"cumil_deaths\",\r\n hover_data=['location', 'cumil_deaths','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n elif data_slctd == \"affected_cases\":\r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"cumil_cases\",\r\n hover_data=['location', 'cumil_cases','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n else: \r\n fig = px.choropleth(\r\n data_frame= dff1,\r\n locationmode='ISO-3',\r\n locations='iso_code',\r\n scope=scope_slctd,\r\n color= \"cumil_tests\",\r\n hover_data=['location', 'cumil_tests','hospital_beds_per_thousand'],\r\n color_continuous_scale=px.colors.diverging.Geyser)\r\n \r\n \r\n #LineGraph\r\n if data_slctd == \"deaths\":\r\n img = px.line(dff3, x=dff3['location'], y=dff3['cumil_deaths'], title='Line Graph for Total Deaths')\r\n #img.show()\r\n elif data_slctd == \"affected_cases\":\r\n img = px.line(dff3, x=dff3['location'], y=dff3['cumil_cases'], title='Line Graph for Total Affected Cases')\r\n else:\r\n img = px.line(dff3, x=dff3['location'], y=dff3['cumil_tests'], title='Line Graph for Total Tests')\r\n\r\n return container,fig,img\r\n\r\nif __name__ == '__main__':\r\n app.run_server(port = 8000)\r\n \r\n \r\n \r\n\r\n","sub_path":"IDV_final.py","file_name":"IDV_final.py","file_ext":"py","file_size_in_byte":12179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374599076","text":"import pygame\nfrom pygame.sprite import Group\nfrom ship import Ship\nfrom enemy import Enemy\nfrom settings import Settings\nfrom scoreboard import Scoreboard\nimport game_functions as gf\n\nfrom scene import Scene\n\nclass GameScene(Scene):\n def __init__(self, settings, screen, scoreboard):\n Scene.__init__(self)\n \n self.settings = settings\n self.screen = screen\n self.sb = scoreboard\n self.gamePaused = False\n \n # Make a group to store bullets in.\n self.bullets = Group()\n \n # Make a group to store enemies in.\n self.enemies = Group()\n \n # Make a group to store enemy bullets in\n self.enemy_bullets = Group()\n \n self.player = Ship(self.screen, self.settings, self.bullets) # Create the player; Player is a ship; Draw the ship on the screen\n \n # Make an enemy.\n gf.spawn_enemies(self.settings, self.screen, self.enemies, self.enemy_bullets)\n \n gf.startGame(self.player, self.bullets, self.enemy_bullets, self.enemies, self.settings)\n\n \n def ProcessInput(self):\n # --- Check events; If user quits, exit main program loop\n gf.check_events(self.settings, self.screen, self.player, self.bullets, False)\n \n def Update(self):\n # --- Game logic should go here\n gf.update_everything(self.player, self.bullets, self.enemies, self.enemy_bullets)\n gf.check_bullets_pos(self.bullets)\n gf.check_enemy_bullets_pos(self.enemy_bullets, self.settings)\n # if the player is dead, then game is over.\n return gf.check_collisions(self.settings, self.screen, self.sb, self.player, self.bullets, self.enemies, self.enemy_bullets)\n \n def Render(self):\n # The game scene is just a blank blue screen\n #screen.fill((0, 0, 255))\n\n # --- Draw all objects to the screen\n gf.update_screen(self.screen, self.sb, self.player, self.enemies, self.bullets, self.enemy_bullets) ","sub_path":"lesson16/scenes/gamescene.py","file_name":"gamescene.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177426477","text":"# Γέροντας Αλέξανδρος - 321/2015029\r\nclass Dictionary:\r\n\r\n # Ο Δομητής αρχικοπoιεί το λεξικό, ανοίγει το αρχείο και καλεί την συνάρτηση loadWords().\r\n def __init__(self):\r\n self.Dict = {}\r\n self.file = open('dictionary.txt', 'a+')\r\n self.file.seek(0,0)\r\n self.loadWords()\r\n\r\n # Διάβασμα αρχείου και εκχώρηση λέξεων στο λεξικό.\r\n def loadWords(self):\r\n while True:\r\n line = self.file.readline()\r\n if line == '' or line == '\\n' : break\r\n self.Dict[line.split(',')[0]] = line.split(',')[1]\r\n\r\n # Εισαγωγή λέξης από τον χρήστη.\r\n def wordInput(self):\r\n word = input(\"Δώσε μια αγγλική λέξη και την ελληνική μετάφραση : \")\r\n\r\n # Καταχώρηση της λέξης στο αρχείο και στο λεξικό αν δεν είναι ήδη καταχωρημένη.\r\n if word.split(',')[0] not in self.Dict:\r\n self.file.write(word+ '\\n')\r\n self.Dict[word.split(',')[0]] = word.split(',')[1]\r\n else: print('Η λέξη υπάρχει ήδη στο λεξικό')\r\n\r\n # Αναζήτηση μετάφρασης λέξης αν αυτή υπάρχει στο λεξικό.\r\n def getWordTranslation(self):\r\n word = input('Δώσε μια αγγλική λέξη: ')\r\n if word in self.Dict: print (\"H λέξη\", word, \"σημαίνει\", self.Dict[word])\r\n else: print('Η λέξη δεν υπάρχει στο λεξικό\\n')\r\n\r\n # Ο αποδομητής κλείνει το αρχείο.\r\n def __del__(self):\r\n self.file.close()\r\n\r\ndef main():\r\n\r\n dictionary = Dictionary()\r\n\r\n while True:\r\n print(\"1.Εισαγωγή νέας λέξης.\")\r\n print(\"2.Αναζήτηση μετάφρασης λέξης.\")\r\n x = input(\"0.Έξοδος και αποθήκευση αλλαγών. \\n>> \")\r\n\r\n if int(x) == 1: dictionary.wordInput()\r\n if int(x) == 2: dictionary.getWordTranslation()\r\n if int(x) == 0: break\r\n\r\nmain()","sub_path":"exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616197674","text":"from purity_fb import PurityFb\nimport os\nimport urllib3\n\n# See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\nurllib3.disable_warnings()\n\n# REST server hostname/IP\nHOST = os.environ.get('REST_TEST_HOST', 'localhost')\n# pureuser\nAPI_TOKEN = os.environ.get('REST_TEST_API_TOKEN', 'T-9709078c-6c05-495f-af2c-9318888097f1')\n# ir\nINTERNAL_API_TOKEN = os.environ.get('REST_TEST_INTERNAL_API_TOKEN', 'T-9709078c-6c05-495f-af2c-9318888097f1')\nVERSIONS = ['1.0', '1.1']\n\ndef get_test_versions(host):\n array = PurityFb(host)\n array.disable_verify_ssl()\n rest_versions = array.list_versions()\n return [v for v in rest_versions if v in VERSIONS]\n","sub_path":"test/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316446914","text":"import random,shelve\r\nf=shelve.open(\"C://Lab_6//for_lab_6\")\r\nnames=f[\"names\"]\r\n# print(list(f.keys()))\r\n# value=f[\"value\"]\r\n# names=['Андрійчук', 'Бандурін', 'Бровченко', 'Василенко',\\\r\n# 'Гаврилюк', 'Ганжа', 'Дем`янчук', 'Жинжер', 'Кисіль', \\\r\n# 'Кішка', 'Кобилюк', 'Кожевніков', 'Кучеренко', 'Максимчук', \\\r\n# 'Мельничук', 'Невмержицька', 'Новосаденко', 'Осадчий', 'Пащевський',\\\r\n# 'Поліщук', 'Потапенко О.', 'Потапенко Д.', 'Снітчук', 'Траєр', 'Фащевський',\\\r\n# 'Федосов', 'Фельдман', 'Ханін']\r\ndef val(x=None):\r\n v=[]\r\n if x==None:\r\n for i in range(len(names)):\r\n v.append(random.randint(0,100))\r\n else:\r\n v=x\r\n return v\r\n\r\nvalues=val()\r\npeople=[]\r\npartisipants={}\r\n\r\nclass competition:\r\n \"\"\"data() - вывести таблицу участников\r\n rezult() - вывести таблицы результатов соревнования и таблицу дисквалифицированных\"\"\"\r\n def up(self,name,sex=0,bals=1,doping=2,weight=3):\r\n k,*v=name,sex,bals,doping,weight\r\n partisipants[k]=v\r\n\r\n\r\n def __init__(self,name,sex=None,bals=1,doping=None,weight=None):\r\n self.name=name\r\n people.append(name)\r\n values.append(bals)\r\n\r\n if sex==None:\r\n if name==\"Кішка\"or name==\"Бровченко\"or name==\"Невмержицька\":\r\n sex=\"Ж\"\r\n else:\r\n sex=\"М\"\r\n\r\n if bals==1:\r\n bals=random.randint(1,100)\r\n\r\n if doping==None:\r\n if random.randint(0,2)==1:\r\n doping=\"+\"\r\n else:\r\n doping=\"-\"\r\n\r\n if weight==None:\r\n if sex==\"М\":\r\n if random.randint(0,2)==0:\r\n weight=\"до 50кг\"\r\n elif random.randint(0,2)==1:\r\n weight=\"50-70кг\"\r\n else:\r\n weight=\"более 70кг\"\r\n else:\r\n weight=\"до 50кг\"\r\n\r\n self.up(name,sex,bals,doping,weight)\r\n\r\n\r\n def data(self):\r\n\r\n\r\n print(\"{0:<20} {1:<3} {2:<4} {3:<6} {4:<2}\".format('Фамилия',\"Пол\",\"Балл\",\"Допинг\",\"Весовая категория\"))\r\n for i in sorted(partisipants.keys()):\r\n print(\"{0:<20} {1:^3} {2:<4} {3:^6} {4:<2}\".format(i,partisipants[i][0],partisipants[i][1],partisipants[i][2],partisipants[i][3]))\r\n\r\n\r\n def rezult(self):\r\n boy={k:v[1:2]+v[3:] for (k,v)in partisipants.items() if v[2]==\"-\"if v[0]==\"М\"}\r\n girl={k:v[1:2]+v[3:] for (k,v)in partisipants.items() if v[2]==\"-\"if v[0]==\"Ж\"}\r\n disqualified={k:v[1:2]+v[3:] for (k,v)in partisipants.items() if v[2]==\"+\"}\r\n b0={k:v for (k,v)in boy.items() if v[1]==\"до 50кг\"}\r\n b1={k:v for (k,v)in boy.items() if v[1]==\"50-70кг\"}\r\n b2={k:v for (k,v)in boy.items() if v[1]==\"более 70кг\"}\r\n\r\n g0={k:v for (k,v)in girl.items() if v[1]==\"до 50кг\"}\r\n # g1={k:v for (k,v)in girl.items() if v[1]==\"50-70кг\"}\r\n # g2={k:v for (k,v)in girl.items() if v[1]==\"более 70кг\"}\r\n def p(f):\r\n def r(x):\r\n print(\"{0:<20} {1:<3} {2:<10}\".format('Фамилия',\"Балл\",\"Весовая категория\"))\r\n f(x)\r\n print()\r\n return r\r\n @p\r\n def pr(x):\r\n b=list(reversed(sorted(list(x.items()),key=lambda s=x.items: s[1][0])))\r\n for (k,v) in b:\r\n print(\"{0:<20} {1:<4} {2:s}\".format(k,v[0],v[1]))\r\n\r\n print(\"Победители среди парней\")\r\n pr(b0)\r\n pr(b1)\r\n pr(b2)\r\n print(\"Победители среди девушек\")\r\n pr(g0)\r\n print(\"Дисквалифицированные:\")\r\n pr(disqualified)\r\n\r\n\r\n# for i in zip(names):\r\n# p=competition(name=i[0])\r\n\r\n# p.data()\r\n# print()\r\n# p.rezult()\r\n","sub_path":"I семестр/Програмування (Python)/Лабораторні/Бандурін 6402/Lab_6/lab_6.py","file_name":"lab_6.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457966262","text":"# coding=utf-8\n\n\"\"\"公众号:脾气暴躁的产品经理\"\"\"\n\nimport lxml\nimport time\nfrom selenium import webdriver\nfrom lxml import etree\n\nbrowser = webdriver.Firefox()\n\nfor i in range(1, 31):\n print('page ', i)\n if i == 1:\n browser.get(\n 'https://www.lagou.com/jobs/list_%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86?labelWords=&fromSearch=true&suginput=')\n else:\n browser.find_element_by_xpath('//span[@class=\"pager_next \"]').click()\n html = browser.page_source\n # 把整个页面保存到本地备用\n f = open('%d.html' % i, 'w', encoding='utf-8')\n f.write(html)\n f.close()\n\n r = etree.HTML(html)\n li_lst = r.xpath(\"//li[contains(@class, 'con_list_item')]\")\n print(len(li_lst))\n f = open('jd.txt', 'a', encoding='utf-8')\n for item in li_lst:\n titles = item.xpath('.//h3')\n areas = item.xpath('.//span[@class=\"add\"]')\n pays = item.xpath('.//span[@class=\"money\"]')\n industries = item.xpath('.//div[@class=\"industry\"]')\n detail_url = item.xpath('.//a[@class=\"position_link\"]/@href')[0]\n\n title = str(titles[0].text).replace(' ', '')\n lst = str(areas[0].xpath('string(.)')).replace('[', '').replace(']', '').split('·')\n if len(lst) > 0:\n city = lst[0]\n else:\n city = ''\n\n if len(lst) > 1:\n area = lst[1]\n else:\n area = ''\n\n lst = str(pays[0].text).replace('k', '').split('-')\n if len(lst) > 0:\n pay_low = lst[0]\n else:\n pay_low = ''\n\n if len(lst) > 1:\n pay_high = lst[1]\n else:\n pay_high = ''\n\n lst = str(industries[0].text).replace(' ', '').replace('\\n', '').replace(',', '|').split('/')\n if len(lst) > 0:\n industry = lst[0]\n else:\n industry = ''\n\n if len(lst) > 1:\n finance = lst[1]\n else:\n finance = ''\n\n if len(lst) > 2:\n scale = lst[2]\n else:\n scale = ''\n\n # 职位, 城市, 区, 最低月薪, 最高月薪, 行业, 融资情况, 人员规模\n s = '%s,%s,%s,%s,%s,%s,%s,%s,%s\\n' % (titles[0].text, city, area, pay_low, pay_high, industry, finance, scale, detail_url)\n f.write(s)\n f.close()\n time.sleep(8)\nbrowser.quit()\nprint('ok')\n\n","sub_path":"lagou_spider.py","file_name":"lagou_spider.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583341370","text":"import csv\r\nimport numpy as np\r\nfrom sklearn import svm\r\nfrom sklearn.preprocessing import RobustScaler\r\n\r\n \r\ndef NormalizeAllData( AllFeatureVectors ):\r\n #Normalization of all the feature vectors\r\n #Assume each row is a feature vector\r\n #https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler\r\n transformer = RobustScaler().fit( AllFeatureVectors )\r\n normalized_vectors = transformer.transform( AllFeatureVectors )\r\n return transformer, normalized_vectors\r\n\r\n# ******************************************************\r\ndef TrainSVMModel(kernel): #kernel 'linear' 'poly' 'rbf'\r\n # Prepare all the data\r\n AllData = []\r\n AllLabel = []\r\n Realtest = []\r\n NumOfPostiveSamples = 0\r\n NumOfNegativeSamples = 0\r\n #Prepare negative data\r\n with open('Sit2dNew.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n AllData.append( row )\r\n AllLabel.append(0) #Class 01 case with label 1\r\n line_count += 1\r\n print(f'Add {line_count} records as Negative samples. (Label 1) (Sit)')\r\n NumOfNegativeSamples = line_count\r\n\t\t\r\n\t#Prepare negative data\r\n with open('Stand2dNew.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n AllData.append( row )\r\n AllLabel.append(0) #Class 02 case with label 0\r\n line_count += 1\r\n print(f'Add {line_count} records as Negative samples. (Label 0) (Stand)')\r\n NumOfNegativeSamples = line_count\r\n #Prepare positive data\r\n with open('Fall2dNew.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n AllData.append( row )\r\n AllLabel.append(1) #Class 02 case with label 0\r\n line_count += 1\r\n print(f'Add {line_count} records as Positive samples. (Label 0) (Fall)')\r\n NumOfPostiveSamples = line_count\r\n\r\n with open('123.csv') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n Realtest.append( row )\r\n line_count += 1\r\n print(f'Add {line_count} records as Positive samples. (Label 0) (Fall)')\r\n\r\n \r\n print( f'NumOfPostiveSamples = {NumOfPostiveSamples} \\nNumOfNegativeSamples = {NumOfNegativeSamples}' )\r\n # ***************************\r\n # Normalize all the feature vectors\r\n AllLabel = np.array(AllLabel)\r\n AllData = np.array(AllData)\r\n Realtest = np.array(Realtest)\r\n \r\n transformer, AllFeatureVectors_normalized = NormalizeAllData( AllData )\r\n print (f'Feature vectors before normalization')\r\n for i in range(3):\r\n print ( AllData[i] )\r\n print (f'Feature vectors after normalization')\r\n for i in range(3): \r\n print ( AllFeatureVectors_normalized[i] )\r\n # ***************************\r\n #Divide the data into two groups: TrainingData and TestingData\r\n trainingDataPorportion = 0.7\r\n \r\n AllDataSize = len( AllFeatureVectors_normalized )\r\n print(f'AllDataSize = {AllDataSize}')\r\n if len( AllLabel ) != AllDataSize:\r\n print( f'AllLabel length != AllDataSize #######################')\r\n raise SystemExit(f'Problem')\r\n \r\n #Make a list of randomized index\r\n np.random.seed(0)\r\n randomizedIndex = np.random.permutation(AllDataSize) #Generate a list of random number. All the numbers are from 0 to AllDataSize - 1\r\n #print(randomizedIndex)\r\n \r\n trainingDataSize = (int)(AllDataSize * trainingDataPorportion)\r\n \r\n TrainingData = []\r\n TestingData = []\r\n TrainingDataLabel = []\r\n TestingDataLabel = []\r\n \r\n #Randomly sample the AllData set to get the set of training data\r\n TrainingDataPosSamplesSize = 0\r\n TrainingDataNegSamplesSize = 0\r\n i = 0\r\n while i < trainingDataSize:\r\n TrainingData.append( AllFeatureVectors_normalized[randomizedIndex[i]] )\r\n TrainingDataLabel.append( AllLabel[randomizedIndex[i]] )\r\n if AllLabel[randomizedIndex[i]] == 1:\r\n TrainingDataPosSamplesSize += 1\r\n else:\r\n TrainingDataNegSamplesSize += 1\r\n i += 1\r\n \r\n TrainingDataSize = len(TrainingData)\r\n if TrainingDataSize != len(TrainingDataLabel):\r\n print( f'len(TrainingData) != len(TrainingDataLabel) #######################')\r\n raise SystemExit(f'Problem')\r\n \r\n print(f'Training data size = {TrainingDataSize}')\r\n print(f'Training data positive samples size = {TrainingDataPosSamplesSize}')\r\n print(f'Training data negative samples size = {TrainingDataNegSamplesSize}')\r\n \r\n #Randomly sample the AllData set to get the set of testing data (No overlapping between TrainingData and TestingData)\r\n TestingDataPosSamplesSize = 0\r\n TestingDataNegSamplesSize = 0\r\n i = trainingDataSize\r\n while i < AllDataSize:\r\n TestingData.append( AllFeatureVectors_normalized[randomizedIndex[i]] )\r\n TestingDataLabel.append( AllLabel[randomizedIndex[i]] )\r\n if AllLabel[randomizedIndex[i]] == 1:\r\n TestingDataPosSamplesSize += 1\r\n else:\r\n TestingDataNegSamplesSize += 1\r\n i += 1\r\n \r\n TestingDataSize = len(TestingData)\r\n if TestingDataSize != len(TestingDataLabel):\r\n print( f'len(TestingData) != len(TestingDataLabel) #######################')\r\n raise SystemExit(f'len(TestingData) != len(TestingDataLabel)')\r\n \r\n print(f'Testing data size = {TestingDataSize}')\r\n print(f'Testing data positive samples size = {TestingDataPosSamplesSize}')\r\n print(f'Testing data negative samples size = {TestingDataNegSamplesSize}')\r\n \r\n # ***************************************************************************\r\n # Train the SVM model using traing data set\r\n # fit the model - training\r\n if kernel == 'linear':\r\n SVM = svm.SVC(kernel='linear')\r\n print(f'Kernel = linear $$$$$$$$$$$$$$$$$$$$$')\r\n elif kernel == 'rbf':\r\n SVM = svm.SVC(kernel='rbf', gamma=10)\r\n print(f'Kernel = rbf $$$$$$$$$$$$$$$$$$$$$')\r\n elif kernel == 'poly':\r\n SVM = svm.SVC(kernel='poly', gamma=10)\r\n print(f'Kernel = poly $$$$$$$$$$$$$$$$$$$$$')\r\n else:\r\n print(f'Invalid input kernel #########################')\r\n raise SystemExit(f'Invalid input kernel ')\r\n #sys.exit\r\n\r\n SVM.fit(TrainingData, TrainingDataLabel)\r\n TrainingData = np.array(TrainingData)\r\n TrainingDataLabel = np.array(TrainingDataLabel)\r\n print(TrainingData.shape)\r\n print(TrainingDataLabel.shape)\r\n # ******************************\r\n # This part can be ignored\r\n # Test the accuracy of the trained model using testing data\r\n print (f'********************')\r\n print (f'Testing data set classification results')\r\n CalculateAccuracyForTheTrainedModel( SVM, TestingData, TestingDataLabel )\r\n print (f'********************')\r\n print (f'Training data set classification results')\r\n CalculateAccuracyForTheTrainedModel( SVM, TrainingData, TrainingDataLabel )\r\n # *********************************\r\n #Return the trained model\r\n return SVM, transformer , Realtest , AllData, AllLabel\r\n\r\ndef CalculateAccuracyForTheTrainedModel( SVM, FeatureVectors_normalized, Labels ):\r\n # ***************************************************************************\r\n # Test the accuracy of the trained model using testing data\r\n Result = SVM.predict(FeatureVectors_normalized) #Do prediction for each element in TestingData\r\n TruePositiveCount = 0\r\n FalsePositiveCount = 0\r\n TrueNegativeCount = 0\r\n FalseNegativeCount = 0\r\n \r\n numberOfVectors = len(FeatureVectors_normalized)\r\n i = 0\r\n while i < numberOfVectors:\r\n if Result[ i ] == Labels[ i ]: #True case\r\n if Result[ i ] == 1: #Positive case\r\n TruePositiveCount += 1\r\n else:\r\n TrueNegativeCount += 1 #Negative case\r\n else: #False case\r\n if Result[ i ] == 1: #Positive case\r\n FalsePositiveCount += 1\r\n else:\r\n FalseNegativeCount += 1 #Negative case\r\n i += 1\r\n \r\n print(f'True Positive Count = {TruePositiveCount}')\r\n print(f'True Negative Count = {TrueNegativeCount}')\r\n print(f'False Positive Count = {FalsePositiveCount}')\r\n print(f'False Negative Count = {FalseNegativeCount}')\r\n \r\n Precision = TruePositiveCount / ( TruePositiveCount + FalsePositiveCount)\r\n Recall = TruePositiveCount / ( TruePositiveCount + FalseNegativeCount )\r\n \r\n print(f'Number of testing data = {numberOfVectors}')\r\n print(f'Precision = {Precision:0.3f}')\r\n #print('Precision = {:0.3f}'.format(Precision))\r\n print(f'Recall = {Recall:0.3f}')\r\n #print('Recall = {:0.3f}'.format(Recall))\r\n \r\n\r\ndef svmClassifyFeatureVector(SVM, transformer, featureVector):\r\n normalized_featureVector = transformer.transform( featureVector )\r\n #normalized_featureVector = transformer.transform( [featureVector] )\r\n # print (f'Feature vector before normalization = {featureVector}')\r\n # print (f'Feature vector after normalization = {normalized_featureVector[0]}')\r\n Result = SVM.predict( normalized_featureVector )\r\n return Result\r\n# ***************************************************\r\nif __name__ == '__main__':\r\n SVM, featureVectorTransformer , Realtest , AllData, AllLabel= TrainSVMModel('linear') #kernel 'linear' 'poly' 'rbf'\r\n \r\n \r\n print (f'**********************************')\r\n #Testing feature vector\r\n feature_vector = AllData\r\n result = svmClassifyFeatureVector( SVM, featureVectorTransformer, feature_vector )\r\n print (f'classification result is {result}')\r\n \r\n #Testing feature vector\r\n feature_vector = AllData[0,:].reshape(1,38)\r\n result = svmClassifyFeatureVector( SVM, featureVectorTransformer, feature_vector )\r\n print (f'classification result is {result}')\r\n \r\n feature_vector = Realtest[3,:].reshape(1,38)\r\n result = svmClassifyFeatureVector( SVM, featureVectorTransformer, feature_vector )\r\n print (f'classification result is {result}')\r\n \r\n","sub_path":"SVM4.py","file_name":"SVM4.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12435398","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 4 09:15:02 2019\r\n\r\n@author: hnk\r\n\"\"\"\r\n\r\n\r\nfrom keras.layers import Input, BatchNormalization,ReLU, Flatten, Dense, GlobalAveragePooling2D, Dropout\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\r\nfrom keras.models import Model\r\nfrom keras.optimizers import Nadam\r\n\r\nfrom keras.preprocessing.image import load_img, img_to_array, array_to_img, save_img\r\nimport numpy as np\r\nfrom skimage import util\r\nimport glob\r\nimport os\r\n\r\n\r\ndef build_generator():\r\n gen_in = Input(shape = (256, 256, 3), name = 'generator_input')\r\n \r\n x = Conv2D(filters = 16 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='encoder1_1')(gen_in) # 256*256*16\r\n x = Conv2D(filters = 16 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal',name='encoder1_2')(x) # 128*128*16 \r\n x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n x = Conv2D(filters = 32 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='encoder2_1')(x) # 128*128*32\r\n x = Conv2D(filters = 32 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal', name='encoder2_2')(x) # 64*64*32 \r\n x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n x = Conv2D(filters = 64 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='encoder3_1')(x) # 64*64*64\r\n x = Conv2D(filters = 64 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal',name='encoder3_2')(x) # 32*32*64 \r\n x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n x = UpSampling2D((2, 2))(x) # 64*64*64\r\n x = Conv2D(filters = 64 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='decoder1_1')(x) # 64*64*64\r\n x = Conv2D(filters = 64 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='decoder1_2')(x) # 64*64*64\r\n x = LeakyReLU()(x)\r\n\r\n x = UpSampling2D((2, 2))(x) # 64*128*128\r\n x = Conv2D(filters = 32 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='decoder2_1')(x) # 128*128*32\r\n x = Conv2D(filters = 32 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='decoder2_2')(x) # 128*128*32\r\n x = LeakyReLU()(x)\r\n\r\n x = UpSampling2D((2, 2))(x) # 32*256*256\r\n x = Conv2D(filters = 32 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='decoder3_1')(x) # 256*256*32\r\n gen_out = Conv2D(filters = 3 ,kernel_size = 3, strides = 1, activation='tanh', padding = 'same', kernel_initializer = 'he_normal', name='decoder3_2')(x) # 256*256*3\r\n\r\n return Model(inputs = gen_in, outputs = gen_out)\r\n\r\n\r\ndef build_discriminator():\r\n dis_in = Input(shape = (256, 256, 3), name = 'discriminator_input')\r\n\r\n x = Conv2D(filters = 16 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='dis1_1')(dis_in) # 256*256*16\r\n x = Conv2D(filters = 16 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal',name='dis1_2')(x) # 128*128*16 \r\n #x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n x = Conv2D(filters = 32 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='dis2_1')(x) # 128*128*32\r\n x = Conv2D(filters = 32 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal', name='dis2_2')(x) # 64*64*32 \r\n #x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n x = Conv2D(filters = 64 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='dis3_1')(x) # 64*64*64\r\n x = Conv2D(filters = 64 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal',name='dis3_2')(x) # 32*32*64 \r\n #x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n\r\n x = Conv2D(filters = 128 ,kernel_size = 3, strides = 1, padding = 'same', kernel_initializer = 'he_normal', name='dis4_1')(x) # 32*32*128\r\n x = Conv2D(filters = 128 ,kernel_size = 2, strides = 2, padding = 'valid', kernel_initializer = 'he_normal',name='dis4_2')(x) # 16*16*128 \r\n #x = BatchNormalization(momentum=0.9, epsilon=0.000001)(x)\r\n x = LeakyReLU()(x)\r\n \r\n #x = Flatten()(x)\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dropout(0.25)(x)\r\n dis_out = Dense(1, activation = 'sigmoid', kernel_initializer = 'he_normal', name='dis_out')(x)\r\n \r\n return Model(inputs = dis_in, outputs = dis_out)\r\n\r\n\r\ndef model_build():\r\n ## Hyper Parameters\r\n r_lambda = 0.4\r\n optimizer = Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)\r\n\r\n ## Build Discriminator\r\n discriminator = build_discriminator()\r\n discriminator.compile(optimizer = optimizer, loss = 'binary_crossentropy')\r\n\r\n ## Build Generator & Adversarial Model \r\n generator = build_generator()\r\n ad_in = Input(shape = (256, 256, 3), name = 'adversarial_input')\r\n reconstructed_image = generator(ad_in)\r\n discriminator.trainable = False\r\n validity = discriminator(reconstructed_image) \r\n adversarial_model = Model(inputs = ad_in, outputs = [reconstructed_image, validity])\r\n adversarial_model.compile(loss = ['mean_squared_error', 'binary_crossentropy'], loss_weights = [r_lambda, 1], optimizer = optimizer)\r\n\r\n discriminator.summary()\r\n adversarial_model.summary()\r\n \r\n return discriminator, generator, adversarial_model\r\n\r\n\r\ndef load_images():\r\n img_size = (256, 256)\r\n dir_name = 'train_images/'\r\n img_list = glob.glob(dir_name + '*.jpg')\r\n temp_img_array_list = []\r\n \r\n for img in img_list:\r\n temp_img = load_img(img, grayscale=False, target_size=(img_size))\r\n # temp_img_array = img_to_array(temp_img)/255.0\r\n temp_img_array = img_to_array(temp_img)/127.5 -1.0\r\n temp_img_array_list.append(temp_img_array)\r\n temp_img_array_list = np.array(temp_img_array_list)\r\n return temp_img_array_list\r\n\r\n\r\ndef load_noisy_images():\r\n img_size = (256, 256)\r\n dir_name = 'train_images/'\r\n img_list = glob.glob(dir_name + '*.jpg')\r\n temp_img_array_list = []\r\n \r\n for img in img_list:\r\n temp_img = load_img(img, grayscale=False, target_size=(img_size))\r\n # temp_img_array = img_to_array(temp_img)/255.0\r\n temp_img_array = img_to_array(temp_img)/127.5 -1.0\r\n temp_noise_img_array = util.random_noise(temp_img_array)\r\n temp_img_array_list.append(temp_noise_img_array)\r\n temp_img_array_list = np.array(temp_img_array_list, dtype='float32')\r\n return temp_img_array_list \r\n\r\n\r\ndef train():\r\n discriminator, generator, adversarial_model = model_build()\r\n \r\n train_path = 'train_images/'\r\n batch_size = 16\r\n epochs = 50\r\n\r\n train_img_list = os.listdir(train_path)\r\n train_img_list.sort()\r\n train_img_files_number = len(train_img_list)\r\n \r\n clean_imgs = load_images()\r\n noisy_imgs = load_noisy_images()\r\n real_gt = np.ones((batch_size, 1))\r\n real_gt += 0.05 * np.random.random(real_gt.shape)\r\n fake_gt = np.zeros((batch_size, 1))\r\n fake_gt += 0.05 * np.random.random(fake_gt.shape)\r\n \r\n for epoch in range(epochs):\r\n batch_number = train_img_files_number // batch_size\r\n print('Epoch {}/{}'.format(epoch, epochs))\r\n for i in range(0, batch_number):\r\n clean_imgs_batch = clean_imgs[i*batch_size: (i+1)*batch_size]\r\n noisy_imgs_batch = noisy_imgs[i*batch_size: (i+1)*batch_size]\r\n fake_imgs_batch = generator.predict(noisy_imgs_batch)\r\n \r\n d_loss_real = discriminator.train_on_batch(clean_imgs_batch, real_gt)\r\n d_loss_fake = discriminator.train_on_batch(fake_imgs_batch, fake_gt)\r\n \r\n adversarial_model.train_on_batch(noisy_imgs_batch, [clean_imgs_batch, real_gt])\r\n g_loss = adversarial_model.train_on_batch(noisy_imgs_batch, [clean_imgs_batch, real_gt])\r\n \r\n print('discriminator loss = {:>0.6f}, generator loss = {:>0.6f}, reconstruction loss = {:>0.6f}'.format(d_loss_real + d_loss_fake, g_loss[0], g_loss[1]))\r\n fake_img_array = (fake_imgs_batch[0][:][:][:] + 1.0) * 127.5\r\n fake_img = array_to_img(fake_img_array)\r\n save_img('epoch_' + str(epoch) + 'batch_' + str(i)+ '_fake_img.jpg', fake_img) \r\n return discriminator, generator, adversarial_model\r\n \r\ndiscriminator, generator, adversarial_model = train()\r\n\r\ndiscriminator.save('discriminator.h5', include_optimizer=True)\r\ngenerator.save('generator.h5', include_optimizer=True)\r\nadversarial_model.save('adversarial_model.h5', include_optimizer=True)\r\n","sub_path":"train_alocc.py","file_name":"train_alocc.py","file_ext":"py","file_size_in_byte":9050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16000035","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom django.core.management import call_command\n\nfrom elvanto_sync import elvanto\nfrom elvanto_sync.models import ElvantoGroup, ElvantoPerson\n\n\n@pytest.mark.slowtest\n@pytest.mark.django_db\nclass TestElvanto():\n def test_pull_groups(self):\n elvanto.pull_down_groups()\n grp = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')\n assert str(grp) == 'All'\n\n def test_pull_people(self):\n elvanto.pull_down_people()\n calvin = ElvantoPerson.objects.get(e_id='f7cfa258-d3c6-11e4-95ba-068b656294b7')\n assert str(calvin) == 'John Calvin'\n assert calvin.email == 'john.calvin@geneva.com'\n chalmers = ElvantoPerson.objects.get(e_id='5a0a1cbc-d3c7-11e4-95ba-068b656294b7')\n assert str(chalmers) == 'Thomas Chalmers'\n assert chalmers.email == 'thomas.chalmers@edinburgh.com'\n knox = ElvantoPerson.objects.get(e_id='c1136264-d3c7-11e4-95ba-068b656294b7')\n assert str(knox) == 'John Knox'\n assert knox.email == ''\n owen = ElvantoPerson.objects.get(e_id='48366137-d3c7-11e4-95ba-068b656294b7')\n assert str(owen) == 'John Owen'\n assert owen.email == 'john.owen@cambridge.com'\n\n def test_pop_groups(self):\n elvanto.pull_down_groups()\n elvanto.pull_down_people()\n elvanto.populate_groups()\n grp_all = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')\n e_emails = grp_all.elvanto_emails()\n assert 'john.calvin@geneva.com' in e_emails\n assert 'john.owen@cambridge.com' in e_emails\n assert 'thomas.chalmers@edinburgh.com' in e_emails\n\n assert grp_all.total_people_in_group() == 3\n assert len(grp_all.group_members_entirely_disabled()) == 0\n assert grp_all.total_disabled_people_in_group() == 0\n\n def test_refresh_data(self):\n elvanto.refresh_elvanto_data()\n\n def refresh_pull_management_command(self):\n call_command('pull_from_elvanto')\n","sub_path":"elvanto_sync/tests/test_elvanto.py","file_name":"test_elvanto.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367555813","text":"# script for generating gds layout of one-sided contact design of double-layer graphene modulator where the width of the top layer is fixed and the bottom layer can be swept to test fabrication\r\n\r\nimport nazca as nd\r\nimport nazca.geometries as geom\r\n\r\ndef modulator_one_sided_contacts(channel_w, channel_l, waveguide_w, contact_separation, id):\r\n# function for generating layout file for double-layer graphene modulator\r\n# REQUIRED: graphene channel width [nm], graphene channel length [nm], waveguide width [nm], distance between contact and waveguide [nm], and a device ID\r\n# all dimensions are given in nm (to avoid rounding errors)\r\n# designed to be run in a loop with changing id variable\r\n# can be called from another file in the same directory by adding 'from modulator import modulator' at the top of the file, where it is called by 'bb = modulator(width,l,waveguide_w,contact_separation,id)'\r\n\r\n # create a layer and define its accuracy\r\n nd.add_layer(name='layer1', layer=1, accuracy=0.001) # bottom etch layer 1\r\n nd.add_layer(name='layer2', layer=2, accuracy=0.001) # bottom etch layer 2\r\n nd.add_layer(name='layer3', layer=3, accuracy=0.001) # bottom contact\r\n nd.add_layer(name='layer4', layer=4, accuracy=0.001) # top etch layer 1\r\n nd.add_layer(name='layer5', layer=5, accuracy=0.001) # top etch layer 2\r\n nd.add_layer(name='layer6', layer=6, accuracy=0.001) # top contact\r\n nd.add_layer(name='layer7', layer=7, accuracy=0.001) # hf etch\r\n nd.add_layer(name='layer8', layer=8, accuracy=0.001) # labels\r\n\r\n # create a building block from predefined geometries\r\n with nd.Cell('modulator_' + str(id)) as bb:\r\n\r\n # constants\r\n contact_overlap = 100 # [nm]\r\n\r\n # calculations\r\n pad_w = channel_l * 0.4 # dimensions of pads [nm]\r\n contact_l = 0.5 * channel_w # length of contact perpendicular to the graphene channel [nm]\r\n device_l = 1.1 * (channel_w + contact_l + pad_w) # height of area required to be etched from the bottom of the graphene channels\r\n device_w = 1.1 * (channel_l + 2 * channel_w) # area required to be etched\r\n\r\n # define large etch area for bottom graphene\r\n etch_upper = [(-channel_w, channel_w/2), # large etch area\r\n (-channel_w, device_l),\r\n (channel_l + channel_w, device_l),\r\n (channel_l + channel_w, channel_w/2)]\r\n\r\n etch_lower = [(-channel_w, channel_w/2),\r\n (-channel_w, -channel_w),\r\n (channel_l + channel_w, -channel_w),\r\n (channel_l + channel_w, channel_w/2)]\r\n\r\n large_graphene_etch = [(-channel_w * 0.5, -channel_w * 0.5), # region that is larger than the graphene channel\r\n (-channel_w * 0.5, channel_w * 1.5),\r\n (channel_l + channel_w * 0.5, channel_w * 1.5),\r\n (channel_l + channel_w * 0.5, -channel_w * 0.5)]\r\n\r\n bottom_graphene = [(0, 0),\r\n (0, channel_w + contact_overlap), # expecting contacting the top edge of the bottom layer\r\n (channel_l, channel_w + contact_overlap),\r\n (channel_l, 0)]\r\n\r\n bottom_etch = nd.clipper.diff_polygons([etch_upper,etch_lower], [large_graphene_etch], accuracy=0.001) \r\n nd.Polygon(points=bottom_etch[0], layer='layer1').put(0) # bottom etch layer\r\n\r\n # redefine etch areas for top graphene\r\n etch_upper = [(-channel_w, contact_separation/2), # large etch area\r\n (-channel_w, device_l),\r\n (channel_l + channel_w, device_l),\r\n (channel_l + channel_w, contact_separation/2)]\r\n\r\n etch_lower = [(-channel_w, contact_separation/2),\r\n (-channel_w, - 2 * contact_separation),\r\n (channel_l + channel_w, - 2 * contact_separation),\r\n (channel_l + channel_w, contact_separation/2)]\r\n\r\n large_graphene_etch = [(-contact_separation * 0.5, -contact_separation * 0.5), # region that is larger than the graphene channel\r\n (-contact_separation * 0.5, contact_separation * 1.5),\r\n (channel_l + contact_separation * 0.5, contact_separation * 1.5),\r\n (channel_l + contact_separation * 0.5, -contact_separation * 0.5)]\r\n\r\n top_graphene = [(0, 0),\r\n (0, contact_separation + contact_overlap), # expecting top graphene layer to only extend contacts the minimum distance from the waveguide\r\n (channel_l, contact_separation + contact_overlap),\r\n (channel_l, 0)]\r\n\r\n top_etch = nd.clipper.diff_polygons([etch_upper,etch_lower], [large_graphene_etch], accuracy=0.001) \r\n nd.Polygon(points=top_etch[0], layer='layer4').put(0) # top layer etch \r\n\r\n # define etch area of graphene channel that needs to be written at a higher dose\r\n etch_channel_upper = [(-channel_w * 0.6, channel_w/2),\r\n (-channel_w * 0.6, channel_w * 1.6),\r\n (channel_l + channel_w * 0.6, channel_w * 1.6),\r\n (channel_l + channel_w * 0.6, channel_w/2)]\r\n\r\n etch_channel_lower = [(-channel_w * 0.6, channel_w/2),\r\n (-channel_w * 0.6, -channel_w * 0.6),\r\n (channel_l + channel_w * 0.6, -channel_w * 0.6),\r\n (channel_l + channel_w * 0.6, channel_w/2)]\r\n\r\n bottom_channel_etch = nd.clipper.diff_polygons([etch_channel_upper,etch_channel_lower], [bottom_graphene], accuracy=0.001) # merge both halves of etch area\r\n nd.Polygon(points=bottom_channel_etch[0], layer='layer2').put(0) # bottom channel etch\r\n\r\n # redefine graphene channel etch area for top layer\r\n etch_channel_upper = [(-contact_separation * 0.6, contact_separation/2),\r\n (-contact_separation * 0.6, contact_separation * 1.6),\r\n (channel_l + contact_separation * 0.6, contact_separation * 1.6),\r\n (channel_l + contact_separation * 0.6, contact_separation/2)]\r\n\r\n etch_channel_lower = [(-contact_separation * 0.6, contact_separation/2),\r\n (-contact_separation * 0.6, -contact_separation * 0.6),\r\n (channel_l + contact_separation * 0.6, -contact_separation * 0.6),\r\n (channel_l + contact_separation * 0.6, contact_separation/2)]\r\n\r\n top_channel_etch = nd.clipper.diff_polygons([etch_channel_upper,etch_channel_lower], [top_graphene], accuracy=0.001) # merge both halves of etch area\r\n nd.Polygon(points=top_channel_etch[0], layer='layer5').put(0) # top channel etch \r\n \r\n # contacts\r\n bottom_edge_contact = [(0, channel_w),\r\n (0, channel_w + contact_l),\r\n (channel_l, channel_w + contact_l),\r\n (channel_l, channel_w)]\r\n\r\n bottom_pad = [(0, channel_w + contact_l),\r\n (0, channel_w + contact_l + pad_w),\r\n (pad_w, channel_w + contact_l + pad_w),\r\n (pad_w, channel_w + contact_l)]\r\n\r\n hf_etch = [(pad_w * 0.15, channel_w + contact_l + pad_w * 0.15),\r\n (pad_w * 0.15, channel_w + contact_l + pad_w * 0.85),\r\n (pad_w * 0.85, channel_w + contact_l + pad_w * 0.85),\r\n (pad_w * 0.85, channel_w + contact_l + pad_w * 0.15)]\r\n\r\n contact = nd.clipper.merge_polygons([bottom_edge_contact, bottom_pad], accuracy=0.001)\r\n nd.Polygon(points=contact[0], layer='layer3').put(0) # bottom contact\r\n nd.Polygon(points=hf_etch, layer='layer7').put(0) # opening bottom contact\r\n \r\n top_edge_contact = [(0, contact_separation),\r\n (0, contact_separation + contact_l),\r\n (channel_l, contact_separation + contact_l),\r\n (channel_l, contact_separation)]\r\n\r\n top_pad = [(channel_l, contact_separation + contact_l),\r\n (channel_l, channel_w + pad_w),\r\n (channel_l - pad_w, channel_w + pad_w),\r\n (channel_l - pad_w, contact_separation + contact_l)]\r\n\r\n hf_etch = [(channel_l - pad_w * 0.15, contact_separation + contact_l + pad_w * 0.15),\r\n (channel_l - pad_w * 0.15, channel_w + pad_w * 0.85),\r\n (channel_l - pad_w * 0.85, channel_w + pad_w * 0.85),\r\n (channel_l - pad_w * 0.85, contact_separation + contact_l + pad_w * 0.15)]\r\n\r\n contact = nd.clipper.merge_polygons([top_edge_contact, top_pad], accuracy=0.001)\r\n nd.Polygon(points=contact[0], layer='layer6').put(0) # top contact \r\n nd.Polygon(points=hf_etch, layer='layer7').put(0) # opening top contact\r\n\r\n # add label\r\n nd.text(text='# ' + str(id) + \"\\nsep = \" + str((channel_w - contact_separation)/1000) + ' um\\nl = ' + str(channel_l/1000) + ' um', layer='layer7',height=device_l*0.05).put(0, device_l)\r\n\r\n bb.put(id * device_w * 1.5,0,0)\r\n\r\n return bb","sub_path":"modulator_single_sided_contacts_fab.py","file_name":"modulator_single_sided_contacts_fab.py","file_ext":"py","file_size_in_byte":9649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365385184","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom XtoBeREMOVED.REINFORCE import REINFORCE\n\n\nclass PPOc(REINFORCE):\n \"\"\"\n https://arxiv.org/pdf/1707.06347.pdf\n \"\"\"\n\n def __init__(self,\n sess,\n model,\n optimizer,\n epsilon,\n a0=1.0,\n gamma=1.0,\n entropy=0.01,\n actor_freeze_update_per_steps=None,\n actor_freeze_update_step_size=1.0,\n trace_decay_rate=None,\n icm=None,\n grad_norm_clip=None,\n scope=\"PPO_clip\"):\n \"\"\"\n PPO_penalty without Critic,\n for the case when actor & critic DO NOT share variables,\n it gives more flexibility to separate actor & critic\n\n epsilon:\n controls the clip region of ppo,\n where the target function of ppo_clip is\n E[min(r * A, clip(r, 1-epsilon, 1+epsilon) * A)]\n \"\"\"\n self.epsilon = epsilon\n self.a0 = a0\n self.a_freeze_update_per_steps = actor_freeze_update_per_steps\n self.a_freeze_update_step_size = actor_freeze_update_step_size\n\n super().__init__(\n sess=sess,\n model=model,\n optimizer=optimizer,\n off_policy=False,\n gamma=gamma,\n entropy=entropy,\n trace_decay_rate=trace_decay_rate,\n icm=icm,\n grad_norm_clip=grad_norm_clip,\n scope=scope)\n\n def _build_loss_fn(self):\n \"\"\"\n A_t:\n return or advantage or td error, etc, shape = [None, (*,)]\n old_act_probs:\n real_act act prob of old policy, shape = [None, (*,) act_nums]\n \"\"\"\n act_probs = self.model.get_current_act_probs()\n old_act_probs = tf.stop_gradient(\n self.model.get_old_act_probs())\n\n shape = self.get_shape(act_probs)\n act_nums = shape[-1]\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n self.A_t = self.model.get_advantage()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n act_onehot = tf.one_hot(self.model.a_t, depth=act_nums)\n\n ro_t = tf.reduce_sum(\n act_onehot * act_probs, axis=-1\n ) / tf.maximum(tf.reduce_sum(\n act_onehot * old_act_probs,\n axis=-1), 1e-8)\n ro_t_clip = tf.clip_by_value(\n ro_t, 1.0 - self.epsilon, 1.0 + self.epsilon)\n clip_loss = tf.minimum(ro_t * self.A_t, ro_t_clip * self.A_t)\n self.a_loss = tf.reduce_mean(\n self.get_slots(\n self.build_icm_loss(\n self.build_entropy_loss(\n - self.a0 * clip_loss))))\n\n tf.summary.scalar(\"a_loss_clip\", tf.reduce_mean(self.get_slots(-clip_loss)))\n tf.summary.scalar(\"a_loss_all\", self.a_loss)\n\n def _build_reset_op(self):\n super()._build_reset_op()\n\n def _build_train_op(self):\n grads_and_vars = self.compute_gradient(\n self.a_loss, self.model.get_actor_current_variables())\n self.a_current_train_op = self.apply_gradients(grads_and_vars)\n\n # self.actor_ovars_to_cvars = OrderedDict()\n # self.assert_variables(self.model.get_actor_old_variables(),\n # self.model.get_actor_current_variables(),\n # self.actor_ovars_to_cvars)\n\n self.a_old_train_op = []\n # for o_var, c_var in self.actor_ovars_to_cvars.items():\n # self.a_old_train_op.append(\n # tf.assign_add(\n # o_var,\n # self.a_freeze_update_step_size * (c_var - o_var)))\n # self.init_target_op.append(tf.assign(o_var, c_var))\n\n def update(self, _global_step, feed_dict):\n _, loss = self.sess.run([self.a_current_train_op, self.a_loss], feed_dict)\n\n # if _global_step == 0:\n # self.sess.run(self.init_target_op)\n\n if self.a_freeze_update_per_steps is not None:\n if _global_step % self.a_freeze_update_per_steps:\n self.sess.run(self.a_old_train_op)\n\n return loss\n","sub_path":"XtoBeREMOVED/PPOc.py","file_name":"PPOc.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445928209","text":"'''\nAuthor: Takudzwa Mhonde\nDate: Aug 9, 2019\nDescription:\n'''\nimport requests as req # works with http requests\n\n# perform http request -- does not get stuff from local path\nparams = {'q:': 'pizza'}\nr = req.get('http://bing.com/search', params=params)\n# see status report of http request -- 200 means OK\nprint('Status: ', r.status_code)\n# get content of page\nhtmlContent = r.text\n# create an html page in th existing dir\nfile = open('./index.html', 'w+')\nfile.write(htmlContent)","sub_path":"tests/requesttests.py","file_name":"requesttests.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255698035","text":"# -*- coding: utf-8 -*-\nn = int(input('Digite a quantidade de termos: '))\nlista = []\ni=1\na=1\nwhile len(lista)\\d+)/(?P\\d+)?/?$', 'posts'),\n # Filter by tag name\n url(r'^tag/(?P\\d+)/$', 'posts'),\n # Individual posts\n url(r'^post/(?P\\d+)/$', 'single_post'),\n # Search posts\n url(r'^search/', 'posts'),\n # Add a new post\n url(r'^add/$', PostCreate.as_view()),\n url(r'^edit/(?P\\d+)/$', PostEdit.as_view()),\n url(r'^delete/(?P\\d+)/$', PostDelete.as_view()),\n # Handle Ajax request for recaptcha\n url(r'^recaptcha/$', 'verify_recaptcha'),\n)\n\n# URL for authentication\nurlpatterns += patterns(\n '',\n url(r'^accounts/login/$', login),\n url(r'^accounts/logout/$', logout, {'next_page': '/'}),\n)\n\n# URL for the admin app.\nurlpatterns += patterns (\n '',\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^comments/', include('django.contrib.comments.urls')),\n)\n","sub_path":"bixly/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194498317","text":"import pytest\nimport os\nimport subprocess\nimport tensorflow as tf\nimport numpy as np\nimport sys\n[sys.path.append(i) for i in ['.', '..']]\nimport ncc\n\nclass SimpeMatMulModule(tf.Module):\n\n def __init__(self):\n super(SimpeMatMulModule, self).__init__()\n self.v = tf.constant([[1.,2.],[3.,4.]])\n\n @tf.function(input_signature=[tf.TensorSpec([1,2], tf.float32)])\n def __call__(self, x):\n return tf.matmul(x, self.v)\n\nmodule = SimpeMatMulModule()\n\ndef init_values():\n\tinput = np.asarray([1,2], dtype=np.float32).reshape([1,-1])\n\tncc.save_input_array('test', input)\n\tncc.save_expect_array('test', ncc.run_tflite(input))\n\ndef test_simple_matmul():\n\tncc.clear()\n\tncc.save_tflite(module)\n\tinit_values()\n\tncc.compile(['--inference-type', 'float'])\n\n\tncc.infer(['--dataset-format', 'raw'])\n\tncc.close_to('test', 0)\n\t\ndef test_simple_matmul_quant():\n\tncc.clear()\n\tncc.save_tflite(module)\n\tinit_values()\n\tncc.compile(['--inference-type', 'uint8', '-t', 'cpu',\n\t '--dataset', ncc.input_dir + '/test.bin', '--dataset-format', 'raw',\n\t '--input-type', 'float'])\n\n\tncc.infer(['--dataset-format', 'raw'])\n\tncc.close_to('test', 1e-3)\n\nif __name__ == \"__main__\":\n\ttest_simple_matmul()\n\ttest_simple_matmul_quant()","sub_path":"tests/system/test_matmul.py","file_name":"test_matmul.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274235419","text":"\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nw = np.array([1.0,1.0])\ny = -1.0\n\nxx,yy = np.meshgrid(np.linspace(-8,8,500),np.linspace(-8,8,500))\nxy = np.c_[xx.ravel(),yy.ravel()]\nz = np.random.rand(1,xy.shape[0]).ravel()\nfor i in range(0,xy.shape[0]) :\n if 1.0 - y * np.dot(w,xy[i]) > 0 :\n z[i] = 1.0 - y * np.dot(w,xy[i])\n else :\n z[i] = 0\nz = z.reshape(xx.shape)\n\ncm = plt.cm.RdBu\nplt.contourf(xx,yy,z,cmap = cm)\n\nplt.show()\n\n","sub_path":"python/00..tat_optimization/hinge_loss_observation.py","file_name":"hinge_loss_observation.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"232151831","text":"from typing import List\n\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n dic = {\"2\":\"abc\", \"3\":\"def\", \"4\":\"ghi\", \"5\":\"jkl\",\n \"6\": \"mno\", \"7\":\"pqrs\", \"8\":\"tuv\", \"9\":\"wxyz\"}\n\n # 책 답안\n # def dfs(index, path):\n # if len(path) == len(digits):\n # result.append(path)\n # return\n # for i in range(index, len(digits)): # 이 줄이 이해가 안됨...\n # for j in dic[digits[i]]:\n # print (i, j)\n # dfs(i + 1, path + j)\n # if not digits:\n # return []\n # result = []\n # dfs(0, \"\")\n # return result\n\n # 내 답안\n def dfs(i, ret):\n if len(ret) == len(digits):\n result.append(ret)\n return\n for char in dic[digits[i]]:\n dfs(i + 1, ret + char)\n if not digits:\n return []\n result = []\n dfs(0, \"\")\n return result\n\nsol = Solution()\ndigits = \"235\"\nprint (sol.letterCombinations(digits))\n","sub_path":"algorithm/python_algorithm_interview_00/33_17.py","file_name":"33_17.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218598508","text":"import cv2\nimport numpy as np\nfrom random import randrange as rand\n\ndef visualise_detections_only(image, detections, labels):\n for i in detections:\n image = cv2.rectangle(image, (i[0],i[1]), (i[2],i[3]), (255,80,80), 1)\n image = cv2.rectangle(image, (i[0],i[1]-12), (i[2],i[1]+4), (200,129,123), -1)\n text = labels[i[4]].upper()\n image = cv2.putText(image, text, (i[0]+1,i[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (70,70,70), 1, cv2.LINE_AA)\n return image \n\ndef visualise_trackers_only(image, tracked_dets, labels):\n for i in tracked_dets:\n image = cv2.rectangle(image, (i[0],i[1]), (i[2],i[3]), (20,20,170), 2)\n image = cv2.rectangle(image, (i[0]-1,i[1]-12), (i[2]+1,i[1]+4), (rand(90,100),rand(90,100),rand(235,255)), -1)\n text = labels[i[4]].upper() + \" | ID:\" + str(i[5]) #check if right order\n image = cv2.putText(image, text, (i[0]+1,i[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255,255,255), 1, cv2.LINE_AA)\n return image\n\ndef visualise_counter_only(image, threshold, counter):\n frame = cv2.line(image, (threshold, 0),(threshold,int(image.shape[0])),(0,0,255),5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n frame = cv2.putText(frame,\n \"COUNTER: \" + str(counter), \n (5,35), \n font, \n 0.5,\n (230,102,30),\n 2)\n return image\n\ndef show(image, data, labels, fps, frame_count, threshold, counter, SHOW=\"ALL\"):\n o_dets = data[0]\n t_dets = data[1]\n\n #Visualise FPS first\n image = cv2.putText(image, \"FPS: \"+fps, (5,14), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (140, 30, 245), 1, cv2.LINE_AA)\n\n if SHOW != \"ALL\":\n if SHOW == \"DETECTED_ONLY\":\n image = visualise_detections_only(image, o_dets, labels)\n return image\n if SHOW == \"TRACKED_ONLY\":\n image = visualise_trackers_only(image, t_dets, labels)\n return image\n if SHOW == \"COUNTER_ONLY\":\n image = visualise_counter_only(image, threshold, counter) \n if SHOW == \"\" or SHOW == \"NONE\":\n return image\n else:\n image = visualise_detections_only(image, o_dets, labels)\n # if frame_count % 5 == 0: #image saving debug\n # cv2.imwrite(\"tests/exampledet_{}.jpg\".format(str(frame_count)), image)\n \n image = visualise_trackers_only(image, t_dets, labels)\n # if frame_count % 5 == 0: #image saving debug\n # cv2.imwrite(\"tests/exampletrack_{}.jpg\".format(str(frame_count)), image)\n \n image = visualise_counter_only(image,threshold, counter)\n # if frame_count % 5 == 0: #image saving debug\n # cv2.imwrite(\"tests/exampletrack_{}.jpg\".format(str(frame_count)), image)\n\n return image\n\n\n\n \n","sub_path":"visualiser.py","file_name":"visualiser.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583648802","text":"import sys\n\nimport click\nimport requests\nfrom halo import Halo\n\nfrom ..config.github import config_add_students, config_github\nfrom ..core.color_text import normal, warn\nfrom ..ensures import ensure_gh_token\nfrom ..utils.github_entities import Team\nfrom ..utils.github_scanner import github_headers\n\n\ndef print_table(data, cols=5, wide=15, indent=2):\n \"\"\"Prints formatted data on columns of given width.\"\"\"\n n, r = divmod(len(data), cols)\n pat = \"{{:{}}}\".format(wide)\n line = \"\\n{}\".format(\" \" * indent).join(pat * cols for _ in range(n))\n\n # indent the first line\n line = \" \" * indent + line\n\n last_line = \" \" * indent + pat * r\n print(line.format(*data))\n print(last_line.format(*data[n * cols :]))\n\n\n# Use a \"team slug\"\n# for example: \"2019 Students-hello\" -> \"2019_students-hello\"\n\n\n@click.command()\n@click.argument(\"student_handles\", nargs=-1)\n@click.option(\n \"--dry\",\n help=\"dry run, do not fire final request to remote\",\n is_flag=True,\n default=False,\n)\n@click.option(\n \"--token\", default=config_github[\"personal_access_token\"], help=\"github access token\"\n)\n@click.option(\"--org\", default=config_github[\"organization\"], show_default=True)\n@click.option(\n \"--team\", default=config_add_students[\"default_team_slug\"], show_default=True\n)\ndef add_students(student_handles, dry, token, org, team):\n \"\"\"\n student_handles: github user to add (usernames)\n \"\"\"\n if len(student_handles) == 0:\n print(\"required handles\")\n return 1\n\n github_students = student_handles\n github_organization = org\n github_team = team\n github_token = token\n\n ensure_gh_token(github_token)\n\n # TODO: use logging lib to log messages\n spinner = Halo(stream=sys.stderr)\n if dry:\n spinner.info(\"Dry run\")\n\n spinner.info(\"fetch existing team members from GitHub\")\n team = Team(github_organization, team_slug=github_team, github_token=github_token)\n num_member = len(team.members.keys())\n words = (\n normal.txt(\"target team: \")\n .kw(f\"{github_team}\")\n .txt(\"( \")\n .kw2(num_member)\n .txt(\" members) \")\n )\n spinner.succeed(words.to_str())\n\n if dry:\n existed_members = set()\n else:\n existed_members = set(team.members.keys())\n outside_users = list(set(github_students) - existed_members)\n\n # print(\"Users to invite:\")\n # print_table(outside_users, cols=5, wide=15)\n\n spinner.info(\"Check valid Github users\")\n invalid_id = []\n spinner.start()\n total = len(outside_users)\n for idx, u in enumerate(outside_users, start=1):\n text = \"\" if not dry else \"[skip]: \"\n text += f\"{idx}/{total} Check valid GitHub username : {u}\"\n if dry:\n spinner.succeed(text)\n else:\n if check_is_github_user(u, github_token):\n spinner.succeed(text)\n else:\n spinner.fail(text)\n invalid_id.append(u)\n\n if len(invalid_id) != 0:\n print(\"Find non-existed github user names:\")\n # control strings take space\n print_table([warn.txt(\"i\").to_str() for i in invalid_id], cols=5, wide=25)\n\n non_member_valid_users = list(set(outside_users) - set(invalid_id))\n\n # membership info\n membership_infos = {key: \"unknown\" for key in non_member_valid_users}\n total = len(non_member_valid_users)\n spinner.info(\"Check Membership information\")\n for idx, username in enumerate(non_member_valid_users, start=1):\n skip = \"\" if not dry else \"[skip]: \"\n spinner.start(f\"{skip}{idx}/{total}: {username}\")\n if not dry:\n res = team.get_memberships(username)\n if res.status_code == 200:\n membership_infos[username] = res.json()[\"state\"]\n spinner.succeed()\n\n pending_users = [\n u for u in membership_infos.keys() if membership_infos[u] == \"pending\"\n ]\n no_memship_users = [\n u for u in membership_infos.keys() if membership_infos[u] == \"unknown\"\n ]\n\n print(f\"Users already in pending state (total:{len(pending_users)}):\")\n print_table(pending_users)\n\n print(f\"Users to add (total:{len(no_memship_users)})\")\n print_table(no_memship_users)\n print(\"-\" * 30)\n\n failed_users = []\n spinner.info(\"start to invite users\")\n for user_name in no_memship_users:\n if dry:\n spinner.info(f\"[Skip] add user: {user_name}\")\n else:\n if True == add_user(team, user_name=user_name):\n spinner.succeed(f\"add user: {user_name}\")\n else:\n failed_users.append(user_name)\n spinner.fail(f\"failed to add user: {user_name}\")\n failed_users = list(set(failed_users))\n\n if len(failed_users) != 0:\n print(\"Users failed to add\")\n print_table(failed_users)\n\n spinner.succeed(\"Adding students successfully\")\n\n\ndef add_user(team, user_name) -> bool:\n res = team.add_user_to_team(user_name)\n return res.status_code == 200\n\n\ndef check_is_github_user(github_id, github_token) -> bool:\n res = requests.get(\n f\"https://api.github.com/users/{github_id}\", headers=github_headers(github_token),\n )\n if res.status_code == 200:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n add_students()\n","sub_path":"invisible_hand/scripts/add_students.py","file_name":"add_students.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612881361","text":"#coding=utf-8\r\n#MetOceanParser.py\r\n# Created on: 16/11/2019\r\n# Author: José Raúl Santana Jiménez\r\n\r\n\r\n# This file contains functions and declarations for the MetOcean protocol commands parsing\r\n\r\n# License Creative Commons 3.0 (by-nc-nd)\r\n\r\n#!/usr/bin/env python\r\n\r\n################################################################\r\n# Labels for MetOcean principal commands definition\r\nSET = '0'\r\nGET = '1'\r\n# Labels for MetOcean secondary commands definition\r\nINSTRUMENT = '0'\r\nMODE = '1'\r\nDATE_TIME = '2'\r\nREALTIME_DATA = '3'\r\nMEMORY_DOWNLOAD = '4'\r\nMEMORY_STATE = '5'\r\nMEMORY_ERASE = '6'\r\n\r\n\r\n#Labels for MetOcean commands arguments definition\r\nINSTRUMENT_ID = '0'\r\nTYPE_INSTRUMENT = '1'\r\nTYPE_MODE = '2'\r\nYEAR = '3'\r\nMONTH = '4'\r\nDAY = '5'\r\nHOUR = '6'\r\nMINUTE = '7'\r\nSECOND = '8'\r\nUTC_SECONDS = '9'\r\nUTC_SECONDS_ACQUIRE_START = '10'\r\nREALTIME_DATA_arg = '11'\r\nMEMORY_STATE_Arg = '12'\r\nMEMORY_WRITE_FLAG = '13'\r\nMEMORY_READ_ADDRESS = '14'\r\nMEMORY_READ_BUFFER_LENGTH = '15'\r\nINTERVAL = '16'\r\nSAMPLES_PER_INTERVAL = '17'\r\nPROCESSING = '18'\r\n\r\n# Labels for TYPE_INSTRUMENT argument possible values\r\nNONE = '0'\r\nSBE_37SM = '1'\r\n\r\n# Labels for PROCESSING argument possible values\r\n\r\nNONE_p = '0'\r\nAVERAGE_p = '1'\r\nMEDIAN_p = '2'\r\n\r\n# Labels for TYPE_MODE argument possible values\r\nCONFIGURING = '0' \r\nACQUIRING = '1'\r\n\r\n\r\n\r\n\r\nclass MetOcean_object_command():\r\n def __init__(self, primary_command, secondary_command, arguments):\r\n self.principal_command = -1\r\n self.secondary_command = -1\r\n self.arguments = {}\r\n \r\n#Function to parse one MetOcean command received from the datalogger\r\n def parseMetOceanCommand(self, received_command):\r\n\r\n #Obtain primary command\r\n substrings = str(received_command).split(\"-\")\r\n print(substrings)\r\n auxiliar_command = substrings[1];\r\n self.primary_command = substrings[0];\r\n print(self.primary_command)\r\n \r\n #Obtain secundary command\r\n substrings = auxiliar_command.split(\"(\")\r\n print(substrings)\r\n self.secondary_command = substrings[0]\r\n auxiliar_command = substrings[1]\r\n \r\n if (self.primary_command == GET):\r\n #Recover all arguments\r\n substrings = auxiliar_command.split(\")\")\r\n print(substrings)\r\n auxiliar_command = substrings[0]\r\n self.arguments = auxiliar_command.split(\";\")\r\n print(self.arguments)\r\n \r\n","sub_path":"python/MetOceanParser.py","file_name":"MetOceanParser.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275585800","text":"from sklearn.svm import SVC\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.metrics import accuracy_score, recall_score, roc_auc_score, confusion_matrix\n\n\nclass support_vector_algorithm:\n\n def __init__(self, x_train, x_test, y_train, y_test):\n super().__init__()\n self.x_train = x_train\n self.x_test = x_test\n self.y_train = y_train\n self.y_test = y_test\n\n def sv_predict(self):\n minmax = preprocessing.MinMaxScaler(feature_range=(0, 1))\n x_train_std = minmax.fit_transform(self.x_train)\n y_train_std = minmax.transform(self.x_test)\n\n svc = SVC(kernel='rbf', probability=True)\n svc_classifier = svc.fit(self.x_train, self.y_train)\n svc_acc = cross_val_score(svc_classifier, x_train_std,\n self.y_train, cv=3, scoring=\"accuracy\", n_jobs=-1)\n svc_proba = cross_val_predict(\n svc_classifier, x_train_std, self.y_train, cv=3, method='predict_proba')\n svc_scores = svc_proba[:, 1]\n y_predict = svc_classifier.predict(self.x_test)\n # print(\"Actual Flood Values:\")\n # print(self.y_test.values)\n # print(\"Predicted Flood Values:\")\n # print(y_pred)\n\n # print(\"\\nAccuracy Score:%f\" %\n # (accuracy_score(self.y_test, y_predict)*100))\n # print(\"Recall Score:%f\" %\n # (recall_score(self.y_test, y_predict)*100))\n # print(\"ROC score:%f\" % (roc_auc_score(self.y_test, y_predict)*100))\n # print(confusion_matrix(self.y_test, y_predict))\n\n return [y_predict, accuracy_score(self.y_test, y_predict)*100]\n","sub_path":"Stuff for CD/Code/support_vector_algorithm.py","file_name":"support_vector_algorithm.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55385581","text":"#!/usr/bin/env python3\n\nimport os\nimport yaml\nfrom munch import munchify\nfrom pprint import pprint\n\nwith open('/webcrate/services.yml', 'r') as f:\n services = munchify(yaml.safe_load(f))\n\nWEBCRATE_MODE = os.environ.get('WEBCRATE_MODE', 'DEV')\nDOCKER_HOST_IP = os.environ.get('DOCKER_HOST_IP', '')\nWEBCRATE_UID = os.environ.get('WEBCRATE_UID', '1000')\nWEBCRATE_GID = os.environ.get('WEBCRATE_GID', '1000')\n\nprint(f'WEBCRATE_MODE = {WEBCRATE_MODE}')\n\nfor servicename, service in services.items():\n service.name = servicename\n\n os.system(f'cp -rf /webcrate/nginx-templates/{service.name if service.nginx_config == \"custom\" else \"default-service\"}.conf /webcrate/nginx_configs/{service.name}.conf')\n\n with open(f'/webcrate/nginx_configs/{service.name}.conf', 'r') as f:\n conf = f.read()\n f.close()\n\n conf = conf.replace('%domains%', \" \".join(service.domains))\n conf = conf.replace('%host%', service.name)\n conf = conf.replace('%port%', str(service.port))\n\n with open(f'/webcrate/nginx_configs/{service.name}.conf', 'w') as f:\n f.write(conf)\n f.close()\n\n print(f'{service.name} config - generated')\n\nif WEBCRATE_MODE == \"DEV\":\n with open(f'/webcrate/dnsmasq_hosts/hosts_nginx', 'a') as f:\n for servicename, service in services.items():\n service.name = servicename\n f.write(f'{DOCKER_HOST_IP} {\" \".join(service.domains)}\\n')\n f.close()\n\nos.system(f'chown -R {WEBCRATE_UID}:{WEBCRATE_GID} /webcrate/nginx_configs')\nos.system(f'chown -R {WEBCRATE_UID}:{WEBCRATE_GID} /webcrate/dnsmasq_hosts')","sub_path":"src/webcrate-utils/parse-services.py","file_name":"parse-services.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422127349","text":"from title_api.extensions import db\nfrom sqlalchemy.sql import func\nfrom datetime import datetime\nimport json\n\nclass Segment(db.Model):\n __tablename__ = 'segment'\n \n # Fields\n segment_id = db.Column(db.Integer, primary_key = True)\n created_at = db.Column(db.DateTime, nullable = False, server_default = func.now())\n coordinate_origin = db.Column(db.Float)\n coordinate_end = db.Column(db.Float)\n \n # Methods\n def __init__(self, segment_id, coordinate_origin, coordinate_end):\n self.segment_id = segment_id\n self.coordinate_origin = coordinate_origin\n self.coordinate_end = coordinate_end\n \n def __repr__(self):\n return json.dumps(self.as_dict(), sort_keys = True, separators = (',', ':'))\n \n def as_dict(self):\n return {\n \"segment_id\": self.segment_id,\n \"coordinate_origin\": self.coordinate_origin,\n \"coordinate_end\": self.coordinate_end,\n \"created_at\": self.created_at.isoformat()\n }\n\nclass Title(db.Model):\n __tablename__ = 'title'\n\n # Fields\n title_number = db.Column(db.String, primary_key=True)\n created_at = db.Column(db.DateTime, nullable=False, server_default=func.now())\n updated_at = db.Column(db.DateTime, nullable=True)\n lock = db.Column(db.DateTime, nullable=True)\n owner_identity = db.Column(db.Integer, db.ForeignKey('owner.identity'), nullable=False)\n address_id = db.Column(db.Integer,\n db.ForeignKey('address.address_id', ondelete=\"CASCADE\", onupdate=\"CASCADE\"),\n nullable=False)\n\n # Relationships\n owner = db.relationship(\"Owner\", backref=db.backref('title', lazy='dynamic'),\n foreign_keys='Title.owner_identity', uselist=False)\n address = db.relationship(\"Address\", backref=db.backref('title', lazy='dynamic'),\n foreign_keys='Title.address_id', uselist=False, cascade=\"all\")\n\n # Methods\n def __init__(self, title_number, owner, address):\n self.title_number = title_number.upper()\n self.created_at = datetime.utcnow()\n self.owner = owner\n self.address = address\n\n def __repr__(self):\n return json.dumps(self.as_dict(), sort_keys=True, separators=(',', ':'))\n\n def as_dict(self):\n return {\n \"title_number\": self.title_number,\n \"owner\": self.owner.as_dict(),\n \"address\": self.address.as_dict(),\n \"created_at\": self.created_at.isoformat(),\n \"updated_at\": self.updated_at.isoformat() if self.updated_at else self.updated_at,\n \"locked_at\": self.lock.isoformat() if self.lock else self.lock\n }\n\n\nclass Owner(db.Model):\n __tablename__ = 'owner'\n\n # Fields\n identity = db.Column(db.Integer, primary_key=True, autoincrement=False)\n forename = db.Column(db.String, nullable=False)\n surname = db.Column(db.String, nullable=False)\n email = db.Column(db.String, nullable=False, unique=True, index=True)\n phone = db.Column(db.String, nullable=False)\n owner_type = db.Column(db.String, nullable=False)\n address_id = db.Column(db.Integer, db.ForeignKey('address.address_id'), nullable=False)\n\n # Relationships\n address = db.relationship(\"Address\", backref=db.backref('owner', lazy='dynamic'),\n foreign_keys='Owner.address_id', uselist=False)\n\n # Methods\n def __init__(self, identity, forename, surname, email, phone, owner_type, address):\n self.identity = identity\n self.forename = forename\n self.surname = surname\n self.email = email.lower()\n self.phone = phone\n self.owner_type = owner_type\n self.address = address\n\n def __repr__(self):\n return json.dumps(self.as_dict(), sort_keys=True, separators=(',', ':'))\n\n def as_dict(self):\n return {\n \"identity\": self.identity,\n \"first_name\": self.forename,\n \"last_name\": self.surname,\n \"email_address\": self.email,\n \"phone_number\": self.phone,\n \"type\": self.owner_type,\n \"address\": self.address.as_dict()\n }\n\n\nclass Address(db.Model):\n __tablename__ = 'address'\n\n # Fields\n address_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n house_name_or_number = db.Column(db.String, nullable=False)\n street_name = db.Column(db.String, nullable=False)\n city = db.Column(db.String, nullable=False)\n county = db.Column(db.String, nullable=False)\n country = db.Column(db.String, nullable=False)\n postcode = db.Column(db.String, nullable=False)\n segment_id = db.Column(db.Integer, db.ForeignKey('segment.segment_id'), nullable=False)\n\n # Relationships\n segment = db.relationship(\"Segment\", backref=db.backref(\"address\", lazy='dynamic'),\n foreign_keys='Address.segment_id', uselist=False)\n\n # Methods\n def __init__(self, house_name_number, street_name, city, county, country, postcode, segment):\n self.house_name_or_number = house_name_number\n self.street_name = street_name\n self.city = city\n self.county = county\n self.country = country\n self.postcode = postcode\n self.segment = segment\n\n def __repr__(self):\n return json.dumps(self.as_dict(), sort_keys=True, separators=(',', ':'))\n\n def as_dict(self):\n return {\n \"house_name_number\": self.house_name_or_number,\n \"street\": self.street_name,\n \"town_city\": self.city,\n \"county\": self.county,\n \"country\": self.country,\n \"postcode\": self.postcode,\n \"segment\": self.segment.as_dict()\n }\n\n\nclass Conveyancer(db.Model):\n __tablename__ = 'conveyancer'\n\n # Fields\n conveyancer_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n x500_name = db.Column(db.String, unique=True, nullable=False)\n company_name = db.Column(db.String, nullable=False)\n\n # Methods\n def __init__(self, x500_name, company_name):\n self.x500_name = str(x500_name)\n self.company_name = company_name\n\n def __repr__(self):\n return json.dumps(self.as_dict(), sort_keys=True, separators=(',', ':'))\n\n def as_dict(self):\n return {\n \"conveyancer_id\": self.conveyancer_id,\n \"x500\": X500Name.from_string(self.x500_name).as_dict(),\n \"x500_string\": str(X500Name.from_string(self.x500_name)),\n \"company_name\": self.company_name\n }\n\n\nclass X500Name(object):\n \"\"\"Class representation of an X500Name.\"\"\"\n\n # Fields\n organisation = None\n locality = None\n country = None\n state = None\n organisational_unit = None\n common_name = None\n\n # Methods\n def __init__(self, organisation, locality, country):\n self.organisation = organisation\n self.locality = locality\n self.country = country\n\n @staticmethod\n def from_string(str_obj):\n items = {}\n for item in str_obj.split(','):\n k, v = item.split('=')\n items[k.replace(' ', '')] = v\n\n organisation = items.get('O')\n locality = items.get('L')\n country = items.get('C')\n state = items.get('ST')\n organisational_unit = items.get('OU')\n common_name = items.get('CN')\n\n x500name = X500Name(organisation, locality, country)\n x500name.state = state\n x500name.organisational_unit = organisational_unit\n x500name.common_name = common_name\n\n x500name.validate()\n return x500name\n\n @staticmethod\n def from_dict(dict_obj):\n organisation = dict_obj['organisation']\n locality = dict_obj['locality']\n country = dict_obj['country']\n state = dict_obj.get('state')\n organisational_unit = dict_obj.get('organisational_unit')\n common_name = dict_obj.get('common_name')\n\n x500name = X500Name(organisation, locality, country)\n x500name.state = state\n x500name.organisational_unit = organisational_unit\n x500name.common_name = common_name\n\n x500name.validate()\n return x500name\n\n # Based on: https://docs.corda.net/releases/release-V3.3/generating-a-node.html#node-naming\n def validate(self):\n # Check 3 required values exist\n if not self.organisation:\n raise TypeError(\"Missing: organisation\")\n if not self.locality:\n raise TypeError(\"Missing: locality\")\n if not self.country:\n raise TypeError(\"Missing: country\")\n\n # Check value length\n if len(self.organisation) < 2 or len(self.organisation) > 128:\n raise ValueError(\"Wrong length: organisation (min: 2, max: 128)\")\n if len(self.locality) < 2 or len(self.locality) > 64:\n raise ValueError(\"Wrong length: locality (min: 2, max: 64)\")\n if len(self.country) != 2:\n raise ValueError(\"Wrong length: country (min: 2, max: 2)\")\n if self.state and (len(self.state) < 2 or len(self.state) > 64):\n raise ValueError(\"Wrong length: state (min: 2, max: 64)\")\n if self.organisational_unit and (len(self.organisational_unit) < 2 or len(self.organisational_unit) > 64):\n raise ValueError(\"Wrong length: organisational_unit (min: 2, max: 64)\")\n if self.common_name and (len(self.common_name) < 2 or len(self.common_name) > 64):\n raise ValueError(\"Wrong length: common_name (min: 2, max: 64)\")\n\n for name, item in self.as_dict(False).items():\n if not item:\n continue\n\n # Check value's first letter is upper case\n if not item[0].isupper():\n raise ValueError(\"First character is not uppercase: \" + name)\n\n # Check value has no leading or trailing whitespace\n if item.strip() != item:\n raise ValueError(\"Has leading or trailing whitespace: \" + name)\n\n # Check value has invalid characters\n invalid_chars = [',', '=', '$', '\"', '\\'', '\\\\']\n if any(char in item for char in invalid_chars):\n raise ValueError(\"Contains invalid characters: \" + name)\n\n # Check value has invalid characters\n if '\\00' in item:\n raise ValueError(\"Contains null character: \" + name)\n return True\n\n def __str__(self, should_validate=True):\n if should_validate:\n self.validate()\n\n items = []\n items.append(\"O=\" + self.organisation)\n items.append(\"L=\" + self.locality)\n items.append(\"C=\" + self.country)\n if self.state:\n items.append(\"ST=\" + self.state)\n if self.organisational_unit:\n items.append(\"OU=\" + self.organisational_unit)\n if self.common_name:\n items.append(\"CN=\" + self.common_name)\n\n return ','.join(items)\n\n def __repr__(self):\n return str(self)\n\n def as_dict(self, should_validate=True):\n if should_validate:\n self.validate()\n\n return {\n \"organisation\": self.organisation,\n \"locality\": self.locality,\n \"country\": self.country,\n \"state\": self.state,\n \"organisational_unit\": self.organisational_unit,\n \"common_name\": self.common_name,\n }\n","sub_path":"title_api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588779915","text":"from django.views.generic import TemplateView\n\nfrom pvs.models import Energy, Report\n\nfrom datetime import date\nimport json\n\nclass UserPVStationView(TemplateView):\n \n template_name = 'pvstation.html'\n \n ENERGY_DATA_TYPE_TOTAL = 1\n ENERGY_DATA_TYPE_STACKED = 2\n \n def prepare_pvs_energy_hourly_output_data(self, pvs_serial):\n \n pvs_en_hourly_data = Energy.get_calculated_energy_hourly_output(pvs_serial)[pvs_serial]\n p_date_list = [p_date for p_date in pvs_en_hourly_data]\n p_date_list.sort()\n \n p_data = []\n for p_date in p_date_list:\n p_data.append(pvs_en_hourly_data[p_date])\n \n return p_data\n \n def prepare_pvs_energy_daily_output_data(self,pvs_serial,en_daily_data_type=ENERGY_DATA_TYPE_TOTAL):\n \n pvs_en_daily_data = Energy.get_energy_daily_output(pvs_serial)[pvs_serial]\n p_date_list = [entry for entry in pvs_en_daily_data]\n p_date_list.sort()\n \n p_data = []\n if en_daily_data_type == self.ENERGY_DATA_TYPE_TOTAL:\n for p_en_date in p_date_list:\n entry_data = { 'date': p_en_date,\n 'energy': 0}\n for key in pvs_en_daily_data[p_en_date]:\n if key != 'date':\n entry_data['energy'] += pvs_en_daily_data[p_en_date][key]\n p_data.append(entry_data)\n elif en_daily_data_type == self.ENERGY_DATA_TYPE_STACKED:\n for p_en_date in p_date_list:\n p_data.append(pvs_en_daily_data[p_en_date])\n \n return p_data\n \n def get_context_data(self, **kwargs):\n context = TemplateView.get_context_data(self, **kwargs)\n pvs_serial = self.kwargs.get('pvs_serial')\n if pvs_serial in Energy.get_distinct_serial():\n context['pvs_serial'] = self.kwargs.get('pvs_serial')\n \n pvs_en_daily = self.prepare_pvs_energy_daily_output_data(pvs_serial,self.ENERGY_DATA_TYPE_STACKED)\n context['pvs_data_en_daily'] = json.dumps(pvs_en_daily)\n \n pvs_en_hourly = self.prepare_pvs_energy_hourly_output_data(pvs_serial)\n context['pvs_data_en_hourly'] = json.dumps(pvs_en_hourly)\n \n context['pvs_address'] = u''.join(Report.get_address(pvs_serial)).encode('utf-8')\n \n context['copyright_year'] = date.today().year\n \n return context\n \n ","sub_path":"pvs/views_user.py","file_name":"views_user.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62436773","text":"#!/usr/bin/env python2\n\nfrom pwn import *\nimport pwnlib.elf\n\np = remote('hackme.inndy.tw', 7703)\ne = ELF('./rop2')\n\npayload = 'A' * 16 + p32(e.symbols['syscall']) + p32(e.symbols['overflow']) + p32(3) + p32(0)\npayload += p32(e.bss()) + p32(8) # syscall(3, 0, Bss, 8)\n\np.sendline(payload)\np.send('/bin/sh\\x00')\n\npayload = 'A' * 16 + p32(e.symbols['syscall']) + p32(e.symbols['overflow']) + p32(0xb) + p32(e.bss())\npayload += p32(0) + p32(0) # syscall(0xb, '/bin/sh', 0, 0)\np.sendline(payload)\n\np.interactive()\np.close()\n","sub_path":"hackme.inndy.tw/ROP2/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449470174","text":"import tkinter as tk\nimport random as rd\n\n# variables globales\n\nnb_clic = 0\n\n\n\n# fonctions\n\ndef start():\n \"\"\"Gestion clic sur le bouton\"\"\"\n global nb_clic\n if nb_clic == 0:\n mouvement(balle)\n bouton.config(text=\"Arrêter\")\n else:\n canvas.after_cancel(id_after)\n bouton.config(text=\"Démarrer\")\n nb_clic = 1 - nb_clic\n\ndef creer_balle():\n \"\"\"Dessine un cercle de rayon 20 et retourne une liste contenant les infos de déplacement du cercle\"\"\"\n cercle = canvas.create_oval((300-20, 200-20), (300+20, 200+20), fill=\"blue\")\n dx = rd.randint(1, 7)\n dy = rd.randint(1, 7)\n return [cercle, dx, dy]\n\n\ndef mouvement(balle):\n \"\"\"Déplace la balle de balle[1] pixels en abscisse et balle[2] pixels en ordonnée\"\"\"\n global id_after\n canvas.move(balle[0], balle[1], balle[2])\n rebond1(balle)\n id_after = canvas.after(20, lambda: mouvement(balle))\n\n\ndef rebond1(balle):\n x1, y1, x2, y2 = canvas.coords(balle[0])\n if x2 >= 600 or x1 <=0:\n balle[1] = -balle[1]\n if y2 >= 400 or y1 <= 0:\n balle[2] = -balle[2]\n\n\n# programme principal\n\nracine = tk.Tk()\ncanvas = tk.Canvas(racine, width=600, height=400, bg=\"black\")\ncanvas.grid()\nbouton = tk.Button(racine, text=\"Démarrer\", command=start)\nbouton.grid(row=1)\nballe = creer_balle()\n\n\n\nracine.mainloop()","sub_path":"L1S2-python/Feuille 2/exo2.py","file_name":"exo2.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"640806107","text":"from scipy.stats import t\nimport numpy as np\nfrom Preparation import *\n\n\ndef significant_test(avg1,avg2,std1,std2,n1,n2):\n t_stat = (avg1-avg2) / np.sqrt((std1**2)/n1 + (std2**2)/n2)\n k = min(n1-1,n2-1)\n cdf_val = t.cdf(t_stat,k)\n return cdf_val\n\ndef conduct_test(df1, df2, name_val, round_idx):\n df_group1 = pd.to_numeric(df1[name_val],errors='coerce')\n df_group2 = pd.to_numeric(df2[name_val],errors='coerce')\n\n mean1 = round(np.mean(df_group1),round_idx)\n mean2 = round(np.mean(df_group2),round_idx)\n std1 = round(np.std(df_group1),round_idx)\n std2 = round(np.std(df_group2),round_idx)\n N1 = len(df_group1)\n N2 = len(df_group2)\n sigval = significant_test(mean1,mean2,std1,std2,N1,N2)\n if sigval > 0.5:\n sigval = 1-sigval\n sigtype = 0\n if sigval >= 0.01:\n sigtype = 'Insignificant'\n elif sigval <0.01 and sigval > 0.001:\n sigtype = 'sig1'\n elif sigval < 0.001:\n sigtype = 'sig2'\n\n return [mean1, std1, mean2, std2, sigval,N1,N2, sigtype]\n\ndef count_test(df1,df2,name_val):\n df_group1 = pd.to_numeric(df1[name_val], errors='coerce')\n df_group2 = pd.to_numeric(df2[name_val], errors='coerce')\n\n N1 = len(df_group1)\n N2 = len(df_group2)\n\n S1 = sum(df_group1)\n S2 = sum(df_group2)\n\n return [S1,S2, round(S1/N1,2), round(S2/N2,2),N1,N2]\n\nmask_low = (pd.isnull(pd.to_numeric(df['NMBA'], errors='coerce')) == True)\nmask_high = (pd.isnull(pd.to_numeric(df['NMBA'], errors='coerce')) == False)\ndf_low = df.loc[mask_high]\ndf_high = df.loc[mask_low]\n\nAge_test = conduct_test(df_low,df_high,'Age',0)\nAPACHE_test = conduct_test(df_low,df_high,'APACHE',0)\nSAPS_test = conduct_test(df_low,df_high,'SAPS',0)\nSOFA_test = conduct_test(df_low,df_high,'SOFA',1)\nBerlin_test = conduct_test(df_low,df_high,'BERLIN',0)\nVT_test = conduct_test(df_low,df_high,'VT',0)\nVTweight_test = conduct_test(df_low,df_high,'VT_weight',1)\nMV_test = conduct_test(df_low,df_high,'Minute Volume',1)\nPEEP_test = conduct_test(df_low,df_high,'PEEP',1)\nPP_test = conduct_test(df_low,df_high,'PP',0)\nPIP_test = conduct_test(df_low,df_high,'PIP',0)\nMAP_test = conduct_test(df_low,df_high,'MAP',0)\nRR_test = conduct_test(df_low,df_high,'RR',0)\nFO2_test = conduct_test(df_low,df_high,'FiO2',2)\nPO2_test = conduct_test(df_low,df_high,'PaO2',0)\nPCO2_test = conduct_test(df_low,df_high,'PaCO2',0)\npH_test = conduct_test(df_low,df_high,'pH',2)\nDP_test = conduct_test(df_low,df_high,'DP_CRS',0)\n\n\nPneumonia_test = count_test(df_low,df_high,'Pneumonia')\nSepsis_test = count_test(df_low,df_high,'Sepsis')\nAspiration = count_test(df_low,df_high,'Aspiration')\nSeptic_test = count_test(df_low,df_high,'Septic_shock')\nBacteremia_test = count_test(df_low,df_high,'Bacteremia')","sub_path":"Baseline_characteristics_NMBA.py","file_name":"Baseline_characteristics_NMBA.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620868757","text":"from setuptools import setup, find_packages, Extension, Distribution\nfrom setuptools.command.build_ext import build_ext\nimport os\nimport pathlib\nimport shutil\n\nsuffix = '.pyd' if os.name == 'nt' else '.so'\nbuild_tool = 'Ninja' if shutil.which('ninja') else 'Unix Makefiles'\nbuild_type = 'Release'\n\n\nclass RKRDistribution(Distribution):\n def iter_distribution_names(self):\n for pkg in self.packages or ():\n yield pkg\n for module in self.py_modules or ():\n yield module\n\nclass RKRExtension(Extension):\n def __init__(self, path):\n self.path = path\n super().__init__(pathlib.PurePath(path).name, [])\n\ndef find_extensions(directory):\n extensions = []\n for path, _, filenames in os.walk(directory):\n for filename in filenames:\n filename = pathlib.PurePath(filename)\n if pathlib.PurePath(filename).suffix == suffix:\n extensions.append(RKRExtension(os.path.join(path, filename.stem)))\n return extensions\n\nclass build_RKRExtensions(build_ext):\n def run(self):\n self.announce(\"Configuring CMake\", level=3)\n source_dir = pathlib.PurePath(__file__).parent\n build_dir = source_dir / 'build' / build_type\n\n self.spawn(['cmake', '--no-warn-unused-cli',\n '-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=TRUE',\n f'-DCMAKE_BUILD_TYPE:STRING={build_type}',\n f'-G{build_tool}',\n f'-H{source_dir}', f'-B{build_dir}'])\n\n self.announce(\"Building binaries\", level=3)\n\n self.spawn(['cmake', '--build', str(build_dir), '--config',\n str(build_type), '--target', 'rikerbot_all', '-j', '14'])\n\n self.extensions = find_extensions('rikerbot')\n\n for ext in self.extensions:\n source = f\"{ext.path}{suffix}\"\n ext_dir = pathlib.PurePath(self.get_ext_fullpath(ext.name)).parent\n os.makedirs(f\"{ext_dir / pathlib.PurePath(ext.path).parent}\",\n exist_ok = True)\n shutil.copy(f\"{source}\", f\"{ext_dir}/{source}\")\n\nsetup(\n name = 'rikerbot',\n description = 'RikerBot is a framework for creating Python Minecraft Bots '\n 'with C++ extensions',\n license = 'zlib',\n long_description = open('ReadMe.rst').read(),\n version = '0.0.2',\n url = 'https://github.com/SpockBotMC/RikerBot',\n packages = find_packages(exclude = ['mcd2cpp']),\n keywords = ['minecraft'],\n author = \"N. Vito Gamberini\",\n author_email = \"vito@gamberini.email\",\n classifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'License :: OSI Approved :: zlib/libpng License',\n 'Programming Language :: C++',\n 'Programming Language :: Python :: 3 :: Only'\n ],\n ext_modules = [RKRExtension(\"rikerbot\")],\n distclass = RKRDistribution,\n cmdclass = {'build_ext': build_RKRExtensions}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309726566","text":"import json\n\nfor fold in (\"train\", \"dev\", \"test\"):\n with open(\"topic_\"+fold+\".json\", \"r\") as data_file, open(\"topic_\"+fold+\".txt\", \"w\") as text_file:\n texts = []\n for json_str in data_file:\n json_obj = json.loads(json_str)\n text = json_obj['text']\n texts.append(text.strip())\n text_file.write('\\n'.join(texts))\n","sub_path":"contextualized_topic_models/data/wiki/make_json_to_txt.py","file_name":"make_json_to_txt.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"628993344","text":"\"\"\"\nThis module provides a class to carry out the regularised CCA, lexicon based approach.\n\n.. moduleauthor:: Maximilian Springenberg \n\n|\n\n\"\"\"\n# extending base libs\nfrom enum import Enum\nfrom copy import deepcopy\nimport numpy as np\nimport json\n# from sklearn.cross_decomposition import CCA\n# own libs\nfrom src.estimation.base import Estimator, Metrices\nfrom src.util import phoc_util, sanity_util\nfrom src.util.phoc_util import Alphabet\nfrom src.pyrcca.rcca import CCA as RCCA\nfrom sklearn.preprocessing import normalize # USE!!!\nfrom scipy.spatial.distance import cdist\nimport warnings\n\n\nclass RCCAEstimator(Estimator):\n \"\"\"\n The RCCAEstimator performs a regularized CCA and a nearest neighbour search on the transformed data\n\n .. note::\n The RCCAEstimator additionally logs its configuration in a json file, when saved\n \"\"\"\n\n def __init__(self, words,\n n_dim=128, reg=10e3,\n metric=Metrices.COSINE, phoc_lvls=phoc_util.DEFAULT_PHOC_LEVELS,\n alphabet=[Alphabet.ASCII_LOWER, Alphabet.ASCII_DIGITS, Alphabet.ASCII_PUNCTUATION]):\n \"\"\"\n The two main hyper parameters are the number of dimensions (n_dims) of the output vectors and the regularization\n paramter (reg)\n\n :param words: Lexicon of words\n :param n_dim: Number of dimensions for the output vector\n :param reg: Regularization parameter, used to avoid singularity of matrices\n :param metric: Metric to be used in the subspace, cosine distance per default, as no other makes obvious sense\n :param phoc_lvls: Number of Levels in PHOC\n :param alphabet: Alphabet used in PHOC\n \"\"\"\n super().__init__([], [])\n # globals\n self.__alphabet = alphabet\n self.__phoc_level = phoc_lvls\n self.phoc = None\n self.phoc_trans = None\n self.weights_X = None\n self.weights_PHOC = None\n self.metric = metric\n # regularized CCA\n self.cca = RCCA(numCC=n_dim, reg=reg, verbose=False, kernelcca=False) # DO NOT use kernel cca\n self.reg = reg\n self.n_dim = n_dim\n # setting lexicon property (initialization of words list and respective PHOC)\n self.words = words\n\n @property\n def words(self):\n return deepcopy(self._words)\n\n @words.setter\n def words(self, words):\n \"\"\"setting this property refreshes the train-data/ list of respecive PHOC aswell\"\"\"\n self._words = list(words)\n self.phoc = [phoc_util.phoc(word=w, alphabet=self.__alphabet, levels=self.__phoc_level).astype(float)\n for w in self._words]\n self.phoc = np.array(self.phoc)\n\n def norm(self, X):\n \"\"\"\n Method used for normalization, CCA demands zero mean and unit variance of all datasets\n\n :param X: Dataset of samples\n :return: Normalized dataset\n \"\"\"\n return normalize(X, axis=1)\n\n def fit(self, X, Y, normalize=True):\n \"\"\"\n Training the regularized CCA\n\n :param X: array-like of e.g. neural Codes\n :param Y: array-like of respective PHOC\n \"\"\"\n # sanity\n X, Y = map(sanity_util.np_arr, [X, Y])\n # normalization\n if normalize:\n X, Y = map(self.norm, [X, Y])\n # training regularized CCA\n vdata = [X, Y]\n #vdata = np.array(vdata)\n self.cca.train(vdata)\n # weights of bases (used to transform into subspace)\n self.weights_X, self.weights_PHOC = deepcopy(self.cca.ws)\n # transforming PHOC of lexicon\n _, self.phoc_trans = self.transform(X, self.phoc)\n\n def transform(self, X, Y, normalize=True):\n \"\"\"\n Implementation of the missing transform method in pyrcca\n\n :param X: Set of test samples of the X dataset\n :param Y: Set of test samples of the Y dataset\n :param normalize: Indicates whether to apply normalization after estimation\n :return: Transformed X and Y datasets in subspace\n \"\"\"\n # sanity\n X, Y = map(sanity_util.np_arr, [X, Y])\n # normalization\n if normalize:\n X, Y = map(self.norm, [X, Y])\n # transformation\n transformed = [X.dot(self.weights_X), Y.dot(self.weights_PHOC)]\n # final normalization\n if normalize:\n X_trans, Y_trans = map(self.norm, transformed)\n #todo\n else:\n X_trans, Y_trans = transformed\n return X_trans, Y_trans\n\n def estimate_set(self, X, normalize=True):\n \"\"\"\n Estimation of an entire Set. This should work better, than estimating samples individually, due to disparity in\n the normalization of the attributes.\n The dataset X and the PHOC of the lexicon will be transformed into the subspace in batches of the same size, to\n have simmilarly normalization behaviour.\n\n :param X: Queries, to be estimated\n :param normalize: Indicates whether to apply normalization after estimation\n :return: List of estimated words\n \"\"\"\n dists = self.process_of_measure(X, self.phoc, normalize=normalize)\n idcs = np.argmin(dists, axis=1)\n # using a local variable, as self.words is treated as a function and would create loads of deep copies otherwise\n words = self.words\n return [words[idx] for idx in idcs]\n\n def process_of_measure(self, X, compare, normalize=True):\n # sanitize X set\n X = sanity_util.np_arr(X)\n X_trans, phoc_space = self.transform(X, compare, normalize=normalize)\n # nearest neighbour search in subspace\n if self.metric == Metrices.MAHALANOBIS:\n str_metric = 'mah'\n elif self.metric == Metrices.EUCLIDEAN:\n str_metric = 'euclidean'\n elif self.metric == Metrices.COSINE:\n str_metric = 'cosine'\n else:\n str_metric = 'cosine'\n dists = cdist(X_trans, phoc_space, metric=str_metric)\n return dists\n\n def nn_search_idcs(self, X, Y, metric=Metrices.COSINE):\n \"\"\"\n Searching for the nearest neighbours of X in Y\n\n :param X: dataset to search nearest neighbours for\n :param Y: dataset to search for nearest neighbours in\n :param metric: metric to be used (see :class:`Metrices`)\n :return: a list of indices for nearest neighbours and a list of respective distances\n \"\"\"\n if metric == Metrices.MAHALANOBIS:\n str_metric = 'mah'\n elif metric == Metrices.EUCLIDEAN:\n str_metric = 'euclidean'\n elif metric == Metrices.COSINE:\n str_metric = 'cosine'\n else:\n str_metric = 'cosine'\n # flattened distances\n dists = cdist(X, Y, metric=str_metric)\n # minium per row\n idcs = np.argmin(dists, axis=1)\n return idcs, dists[np.arange(len(dists)), idcs]\n\n def save(self, dir, name='estimator'):\n super().save(dir, name)\n # additionally keeping track of configuration\n file_config = sanity_util.unique_file_name(dir, name, '.json')\n with open(file_config, 'w') as f_config:\n json.dump({'reg': self.reg, 'n_dim': self.n_dim}, f_config)\n\n","sub_path":"src/estimation/cca.py","file_name":"cca.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166139087","text":"\nimport random\n\nclass ConsecutiveCategoryFixer():\n def getConsecutiveCategory(self, data):\n indices = []\n for i in range(len(data) - 1):\n current = data[i]\n succeeding = data[i + 1]\n if self.isConsecutiveCategory(current, succeeding):\n indices.append(i)\n if self.isConsecutiveCategory(data[-1], data[0]):\n indices.append(len(data)-1)\n return indices\n \n def checkConsecutiveCategory(self, data):\n indices = self.getConsecutiveCategory(data)\n if len(indices) > 0:\n self.switch(data, indices[0])\n \n def isConsecutiveCategory(self, a,b):\n if a.getCategory() == b.getCategory():\n if a.getCompany() == b.getCompany():\n return True\n return False\n \n def switch(self, data, i):\n index = random.randint(0, len(data)-1)\n tmp = data[i]\n data[i] = data[index]\n data[index] = tmp\n","sub_path":"SortingAlgorithm/optimizer/category/consecutive_category_fixer.py","file_name":"consecutive_category_fixer.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646989945","text":"#\n#\n# 3DE4.script.name:\t3DE_obj_ImportPose...\n#\n# 3DE4.script.version:\tv1.0\n#\n# 3DE4.script.gui:\tMain Window::3DE4::File::Import\n#\n# 3DE4.script.comment:\tImports Pose by reading .pose file...\n#\n# Written for 3DE4 by Patcha Saheb (patchasaheb@gmail.com)\n\n\nfrom vl_sdv import *\n\npg\t= tde4.getCurrentPGroup()\ncam = tde4.getCurrentCamera()\nframe = tde4.getCurrentFrame(cam)\n\n\nif pg!=None and cam !=None and tde4.getPGroupType(pg)==\"OBJECT\" and tde4.getPGroupType(pg) != \"MOCAP\":\n\t\n\treq\t= tde4.createCustomRequester()\n\ttde4.addFileWidget(req,\"file_browser\",\"Filename...\",\"*.pose\")\n\tret\t= tde4.postCustomRequester(req,\"3DE_obj_Import Pose...\",500,120,\"Ok\",\"Cancel\")\n\tif ret==1:\n\t\tpath\t= tde4.getWidgetValue(req,\"file_browser\")\n\t\tif path!=None:\n\t\t\t\n\t\t\tf\t= open(path,\"r\")\n\t\t\tif not f.closed:\n\t\t\t\tstring\t= f.readline()\n\t\t\t\ta\t= string.split()\n\t\t\t\tif len(a) == 6:\n\t\t\t\t\t\n\t\t\t\t\t# read values from .pose file..\n\t\t\t\t\tpos_x\t= float(a[0])\n\t\t\t\t\tpos_y\t= float(a[1])\n\t\t\t\t\tpos_z\t= float(a[2])\n\t\t\t\t\trot_x\t= float(a[3])\n\t\t\t\t\trot_y\t= float(a[4])\n\t\t\t\t\trot_z\t= float(a[5])\n\t\t\t\t\t\n\t\t\t\t\t# matrix 3D...\n\t\t\t\t\trot_x\t= (rot_x*3.141592654)/180.0\r\n\t\t\t\t\trot_y\t= (rot_y*3.141592654)/180.0\r\n\t\t\t\t\trot_z\t= (rot_z*3.141592654)/180.0\n\t\t\t\t\tr3d\t= mat3d(rot3d(rot_x,rot_y,rot_z,VL_APPLY_ZXY))\r\n\t\t\t\t\tr3d0\t= [[r3d[0][0],r3d[0][1],r3d[0][2]],[r3d[1][0],r3d[1][1],r3d[1][2]],[r3d[2][0],r3d[2][1],r3d[2][2]]]\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t# setting point group position and rotation...\n\t\t\t\t\tnewvalues = tde4.convertObjectPGroupTransformationWorldTo3DE(cam, frame, r3d0, [pos_x,pos_y,pos_z], 1.0, 0)\n\t\t\t\t\t\n\t\t\t\t\ttde4.setPGroupPosition3D(pg,cam,frame,newvalues[1])\n\t\t\t\t\ttde4.setPGroupRotation3D(pg,cam,frame,newvalues[0])\n\t\t\t\t\ttde4.setPGroupPostfilterMode(pg,\"POSTFILTER_OFF\")\n\t\t\t\t\ttde4.filterPGroup(pg,cam)\n\t\t\t\t\ttde4.setPGroupScale3D(pg,1.0)\n\t\t\t\t\ttde4.updateGUI()\n\t\t\t\telse: \n\t\t\t\t\ttde4.postQuestionRequester(\"3DE Import Pose...\",\"invalid .pose file\",\"ok\")\nelse:\n\ttde4.postQuestionRequester(\"3DE Import Pose...\",\"Error...Object PGroup not found.\", \"ok\")\t\t\t\n\n","sub_path":"r6_r7/old/patchaPoseFile/3DE_obj_ImportPose.py","file_name":"3DE_obj_ImportPose.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"134720206","text":"#!/usr/local/bin/python2.7\n# encoding: utf-8\n'''\nWN_User_Interact.Interaction_Modules -- shortdesc\n\nWN_User_Interact.Interaction_Modules is a description\n\nIt defines classes_and_methods\n\n@author: user_name\n\n@copyright: 2016 organization_name. All rights reserved.\n\n@license: license\n\n@contact: user_email\n@deffield updated: Updated\n''' \n\nfrom CheckandError.Check import InputCheck,DateCheck\n#from .CollisionSituation import CollisionSituation\nfrom WN_struct_building import StructureBuilding\nfrom Methods.Methods_BY_Level import MethodsMenu_Situation,MethodMenu_Contributing\nfrom CheckandError.DefinedError import DATEEndBeforeBegin,InvalidDate,ExitALLProgram\nfrom CheckandError.Check import NameCheck,FirstCheck\nfrom CheckandError.DefinedError import GoingBack,InvalidFirst\nimport os\nimport shutil\ndef ProgramIntroduction():\n print('Welcome to NYC Motor Vehicle Collisions Observation System.')\n print('We provide an analysis of the historical trends and features of auto collision \\n and other associated demographic and geographic information in NYC. \\n There is also an interactive maps which may help you better observe \\n the whole traffic collision situation in NYC.')\n print('DATA SOURCE:')\n print('DATA INTRODUCTION:')\n print(\"COPY RIGHT:\")\n print(\"FUNCTIONS\")\n print('EXIT WAY')\n print('HELP MENUAL')\n print(\"We have several perspectives for your to explore.They are in two categories:\")\n print(\"Area: City; Borough; Precinct\")\n print(\"Type of Roadways: Highway; Tunnel; Bridge; Road\")\n print(\"There are available methods under the specific perspective.\")\n print(\"Input Examples: \")\n print(\"...\")\n print(\"Exit by input : Exit\")\n print(\"Now you can begin with it.\")\ndef SetTimeInterval(savepath):\n print(\"You can set a period for data loading and structure building.\")\n print('ALL following analysis will be based on this period.')\n print('Results will be save in a folder named by this period under results folder.')\n print('Example:201501_201601')\n \n while True:\n try:\n print(\"Longest Time Interval is 201501-201612.\")\n begintime=input('Please input the beginning date (Format: YYYYMM, Example: 201501):')\n if begintime=='Exit':\n raise ExitALLProgram\n endtime=input('Please input the ending date (Format: YYYYMM, Example: 201501):')\n if endtime=='Exit':\n raise ExitALLProgram\n TimeBegin,TimeEnd=DateCheck(begintime, endtime)\n path=savepath+begintime+'_'+endtime\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return TimeBegin,TimeEnd, path\n except DATEEndBeforeBegin:\n print(\"Begining date should not later than ending date!\")\n except InvalidDate:\n print(\"Invalid Input!\")\n \ndef Mainmenu():\n SavePath_ALL=os.getcwd()\n SavePath_ALL=''.join([SavePath_ALL,'/results/'])\n TimeBegin,TimeEnd, SavePath = SetTimeInterval(SavePath_ALL)\n NYC = StructureBuilding(TimeBegin,TimeEnd)\n #print(NYC.Borough_Dict)\n print(\"There are three types of information: \\n 1-CollisionSituation \\n 2-CollisionContributingFactors_Vehicle \\n 3-CollisionSimulation\")\n InformationType = input(\"You can choose one by input the number before the type above:\")\n MenuInformation={-2:Mainmenu, \n -1:SetTimeInterval,\n 1:CollisionSituation,\n 2:CollisionContributingFactors_Vehicle}\n inputset=MenuInformation.keys()\n InformationType = InputCheck(inputset, InformationType)\n if InformationType>0:\n MenuInformation[InformationType](NYC,SavePath)\n else: \n MenuInformation[InformationType]()\n #MenuInformation[InformationType](NYC) if InformationType>0 else MenuInformation[InformationType]()\ndef CollisionContributingFactors_Vehicle(NYC,SavePath):\n Interaction=Contributing_Interaction(NYC)\n Level,Method,name,nameFlag=Interaction.Level_selection()\n \ndef CollisionSituation(NYC,SavePath):\n Interaction=Situation_Interaction(NYC,SavePath)\n Level,Method,name,nameFlag=Interaction.Level_selection()\n\nclass Situation_Interaction():\n def __init__(self,NYC,SavePath):\n self.menu=MethodsMenu_Situation()\n self.data=NYC\n self.SavePath=SavePath\n def Level_selection(self,Level=[],Method=[],name=[],nameFlag=0):\n print(\"Available Perspectives: \\n 1-City \\n 2-Borough \\n 3-Precinct \\n 4-Highway \\n 5-Tunnel \\n 6-Bridge \\n 7-Road\")\n Level = input('Please input the number before the perspective you want to explore:')\n Level = InputCheck(range(1,8), Level)\n LevelFlow={-2:self.Level_selection,\n -1:Mainmenu }\n LevelFlow.update(dict.fromkeys([1,2,3,4,5,6,7],self.MethodMenu))\n \n LevelName={-2:[],1:'City',2:'Borough',3:'Precinct',4:'Highway',5:'Tunnel',6:'Bridge',7:'Road'}\n \n if Level==-1:\n LevelFlow[Level]()\n else:\n Level, Method, name, nameFlag=LevelFlow[Level](LevelName[Level])\n #LevelFlow[Level]() if Level==-1 else Level, Method, name, nameFlag=LevelFlow[Level](LevelName[Level])\n \n \n return Level, Method, name, nameFlag\n \n def MethodMenu(self,Level,Method=[],name=[],nameFlag=0):\n \n print(\"There are methods for this level:\")\n print ('%s' % '\\n'.join(self.menu.List[Level][nameFlag]))\n \n Method = input('Please input the number before the method you want to use:')\n Method = InputCheck(self.menu.AvailableSet[Level][nameFlag], Method)\n \n Flow0={-2:self.MethodMenu,\n -1:self.Level_selection,\n 0:self.SpecificInsight}\n Flow0.update(dict.fromkeys([1,2,3,4,5,6],self.MethodPresent))\n Flow1={-2:self.MethodMenu, #with name\n -1:self.SpecificInsight} #back to no name\n Flow1.update(dict.fromkeys([1,2,3,4,5,6],self.MethodPresent))\n methodFlow={0:Flow0,1:Flow1}\n #if ((nameFlag==1) and (Method==-1)):\n # Level, Method, name, nameFlag = methodFlow[nameFlag][Method](Level, Method, [], 0)\n #else:\n Level, Method, name, nameFlag = methodFlow[nameFlag][Method](Level, Method, name, nameFlag)\n \n return Level, Method, name, nameFlag\n def SpecificInsight(self,Level,Method=[],name=[],nameFlag=0):\n SpecificMenu={'Borough':Borough_Specific,\n 'Precinct':Precinct_Specific,\n 'Highway':Highway_Specific,\n 'Bridge':Bridge_Specific,\n 'Tunnel':Tunnel_Specific,\n 'Road':Road_Specific}\n \n InputName=SpecificMenu[Level](self.data)\n if InputName==-1:\n Level, Method, name, nameFlag=self.MethodMenu(Level)\n else:\n Level, Method, name, nameFlag=self.MethodMenu(Level,[],InputName,1)\n \n #Level, Method, name, nameFlag=self.MethodMenu(Level) if InputName==-1 else Level, Method, name, nameFlag=self.MethodMenu(Level,[],InputName,1)\n \n return Level, Method, name, nameFlag\n def MethodPresent(self,Level,Method=[],name=[],nameFlag=0):\n self.menu.FunctionINIT_Situation(self.data,self.SavePath)\n self.menu.FunctionList[Method](Level,name)\n Level, Method, name, nameFlag=self.MethodMenu(Level,[],name,nameFlag)\n return Level, Method, name, nameFlag\n \nclass Contributing_Interaction(Situation_Interaction):\n def __init__(self,NYC,SavePath):\n self.menu=MethodMenu_Contributing()\n self.data=NYC\n self.SavePath=SavePath\n def MethodPresent(self,Level,Method=[],name=[],nameFlag=0):\n self.menu.FunctionINIT_Situation(self.data,self.SavePath)\n Func_Menu={1: self.Influencing, 2: self.Relation}\n Level, Method, name, nameFlag=Func_Menu(Level,Method,name,nameFlag)\n return Level, Method, name, nameFlag\n def Influencing(self,Level,Method=[],name=[],nameFlag=0):\n print('Please Choose one Influencer:')\n for key in self.menu.Influencer.key():\n print('\\n'.join([key,self.menu.Influencer[key]]))\n \n self.menu.FunctionList[Method](Influencer, SeverityMeasure, Level,name)\n def Relation(self,Level,Method=[],name=[],nameFlag=0):\n self.menu.FunctionList[Method](Influencer0, Influencer1, Level,name)\n \n\n \ndef Borough_Specific(NYC):\n print('You can choose from:')\n Bo_Catalog=NYC.boroughCatalog()\n print('\\n'.join(Bo_Catalog))\n name = input('Please input the short name(two letters) before the name:')\n name = NameCheck(NYC.Borough_Dict.keys(),name)\n if name==-2: \n return Borough_Specific(NYC)\n else:\n return name\ndef Precinct_Specific(NYC):\n print(\"Precinct are grouped by Borough.\")\n print(\"Please specific the Borough First.\")\n while True:\n try:\n print('You can choose a bourough from:')\n Bo_Catalog=NYC.boroughCatalog()\n print('\\n'.join(Bo_Catalog))\n Bname = input('Please input the short name(two letters) before the name:')\n Bname = FirstCheck(NYC.Borough_Dict.keys(),Bname)\n print(NYC.Borough_Dict[Bname].name+' : ')\n precinctCata=NYC.Borough_Dict[Bname].precinctCatalog()\n print(' \\n'.join(precinctCata))\n while True:\n try:\n name = input('Please input a precinct ID :')\n name = FirstCheck(NYC.Borough_Dict[Bname].precinctList.keys(),name)\n return name\n except GoingBack:\n break\n except InvalidFirst:\n pass\n except GoingBack:\n return -1\n except InvalidFirst:\n pass\n \ndef Bridge_Specific(NYC):\n print('You can choose from:')\n print('\\n'.join(NYC.bridgeCatalog()))\n name = input('Please input the name:')\n name = NameCheck(NYC.Bridge_Dict.keys(),name)\n if name==-2: \n return Bridge_Specific(NYC)\n else:\n return name\ndef Tunnel_Specific(NYC):\n print('You can choose from:')\n print('\\n'.join(NYC.tunnelCatalog()))\n name = input('Please input the name:')\n name = NameCheck(NYC.Tunnel_Dict.keys(),name)\n if name==-2: \n return Tunnel_Specific(NYC)\n else:\n return name\ndef Highway_Specific(NYC):\n print('You can choose from:')\n print('\\n'.join(NYC.highwayCatalog()))\n name = input('Please input the name:')\n name = NameCheck(NYC.Highway_Dict.keys(),name)\n if name==-2: \n return Highway_Specific(NYC)\n else:\n return name\ndef Road_Specific(NYC):\n print('Please specify the first character of the road you want to explore.')\n print('You can choose from ABCDEFGHIGKLMNOPQRSTUVWXYZ or *Other')\n \n while True:\n try:\n \n FirstC=input('Input a CAPITAL letter or *Other: ')\n FirstC=FirstCheck('ABCDEFGHIGKLMNOPQRSTUVWXYZ*',FirstC)\n roadCata=NYC.roadCatalog()\n print('You can choose from:')\n print('\\n'.join(roadCata[FirstC]))\n while True:\n try:\n name = input('Please input the name:')\n name = FirstCheck(NYC.Road_Dict.keys(),name)\n return name\n except GoingBack:\n break\n except InvalidFirst:\n pass\n\n except GoingBack:\n return -1\n except InvalidFirst:\n pass \n\n \n \n \n \n \n\n\n\n","sub_path":"Final_Project_1007/WN_User_Interact/Interaction_Modules.py","file_name":"Interaction_Modules.py","file_ext":"py","file_size_in_byte":11708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70543671","text":"#/usr/bin/python\n# coding=utf-8\n\"\"\"\n个人blog md生成:http://beginman.cn\ncommand: genblog.sh genblog.py \"文件名\" \"文件标题\" 分类\n注意换行符的问题\n :copyright: (c) 2015 by fangpeng.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n__date__ = '11/10/15'\nimport sys\nimport os\nimport datetime\n\ndef genmd():\n ds = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n if len(sys.argv) > 1:\n title = sys.argv[1]\n path = os.path.join(os.getcwd(), ds + '-' + title + '.md')\n if not os.path.exists(path):\n f = open(path, 'w')\n f.write('---\\r\\nlayout: post\\r\\ntitle: \"%s\"\\r\\ndescription: \"%s\"\\r\\ncategory: \"%s\"\\r\\ntags: [%s]\\r\\n---\\r\\n{%% include JB/setup %%}\\r\\n'\n % (sys.argv[2], sys.argv[2], sys.argv[3], sys.argv[3]))\n f.close()\n\n\nif __name__ == \"__main__\":\n genmd()\n","sub_path":"funny/genblogmd.py","file_name":"genblogmd.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"67890958","text":"import socket\nimport struct\nimport subprocess\nfrom fcntl import ioctl\n\nclass Iface:\n\n SIOCGIFMTU = 0x8921\n\n SIOCSIFMTU = 0x8922\n\n default_mtu = 1500\n\n def __init__(self, ifname):\n\n self.ifname = ifname\n self.old_mtu = self.get_mtu()\n\n def get_mtu(self):\n '''Use socket ioctl call to get MTU size'''\n s = socket.socket(type=socket.SOCK_DGRAM)\n ifr = self.ifname + '\\x00'*(32-len(self.ifname))\n try:\n ifs = ioctl(s, self.SIOCGIFMTU, ifr)\n mtu = struct.unpack(' 1)\")\n elif age == 2:\n script.write(r\"(current-age == feudal-age)\")\n script.write(r\"(strategic-number 88 < 3)\")\n elif age == 2.5:\n script.write(r\"(current-age == feudal-age)\")\n script.write(r\"(strategic-number 88 > 2)\")\n elif age == 3:\n script.write(r\"(current-age == castle-age)\")\n script.write(r\"(strategic-number 88 < 4)\")\n elif age == 3.5:\n script.write(r\"(current-age == castle-age)\")\n script.write(r\"(strategic-number 88 > 3)\")\n elif age == 4:\n script.write(r\"(current-age == imperial-age)\")\n else:\n print(\"Invalid age selected. Probably my fault.\")\n script.write(r\"=>\")\n script.write(foodcode)\n script.write(woodcode)\n script.write(goldcode)\n script.write(stonecode)\n script.write(r\")\")\n\nif strat == \"a\" or strat == \"b\":\n condition = \"building-type-count-total barracks > 0\"\n food = input(\"What food percentage would you like for flush? \")\n wood = input(\"What wood percentage would you like for flush? \")\n gold = input(\"What gold percentage would you like for flush? \")\n stone = 0\n age = 2\n set_resources(food, wood, gold, stone, age, condition)\n\nscript.close()\n\n\n","sub_path":"The_Unknown/generator/backup1.py","file_name":"backup1.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185715998","text":"import csv\nimport matplotlib.pyplot as plt \nfrom datetime import datetime\n\nopen_file = open(\"death_valley_2018_simple.csv\", \"r\")\ncsv_file = csv.reader(open_file, delimiter= \",\")\nheader_row= next(csv_file)\n\n#print(type(header_row))\n\nfor index,column_header in enumerate(header_row):\n print(index,column_header)\n\n#Testing to convert date from string \nmydate = datetime.strptime('2018-07-01', '%Y-%m-%d')\n#print(mydate)\n\n\ndates = []\nhighs = []\nlows = []\n\nfor i in csv_file:\n\n try:\n the_date = datetime.strptime(i[2], '%Y-%m-%d')\n high = int(i[4])\n low = int(i[5])\n \n except ValueError:\n print(f\"Missing data for {the_date}\")\n else:\n highs.append(high)\n lows.append(low)\n dates.append(the_date)\n\n\n#print(highs)\n#print(dates)\n#print(lows)\n\n\n\nfig = plt.figure()\n\nplt.title(\"Daily high and low temperature - 2018\\nDeath valley\", fontsize = 16)\nplt.xlabel(\"YKTV\", fontsize = 12)\nplt.ylabel(\"temperature (f)\", fontsize = 12)\nplt.tick_params(axis = \"both\", which = \"major\", labelsize = 12)\n\nplt.plot(dates,highs, c = 'red', alpha = 0.5)\nplt.plot(dates, lows, c = 'blue', alpha = 0.5)\n\nplt.fill_between(dates,highs,lows, facecolor = 'blue', alpha = 0.1)\n\nfig.autofmt_xdate()\n\n\nplt.show()\n\n\nplt.subplot(2,1,1)\nplt.plot(dates,highs, c = 'red')\nplt.title(\"Highs\")\n\nplt.subplot(2,1,2)\nplt.plot(dates,lows , c = 'blue')\nplt.title(\"Lows\")\n\nplt.suptitle(\"Highs and Lows of Death valley\")\nplt.show()\n\n","sub_path":"sitka4.py","file_name":"sitka4.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488622862","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom miscellaneous import Miscellaneous\nfrom measures import Measures\n\nclass Neuron():\n\t# constructor\n\tdef __init__(self, weights, bias = False, theta = 0):\n\t\tself.weights = weights\n\t\tself.bias = bias\n\t\tself.theta = theta\n\n\tdef process(self, data):\n\t\tif self.bias:\n\t\t\tdata = np.insert(data, 0, 1)\n\n\t\tself.v = sum([data[i] * self.weights[i] for i in range(len(data))])\n\n\t\treturn self.v\n\n\tdef activate(self, condition):\n\t\tz = condition(self.v, self.theta)\n\n\t\tself.z = z\n\n\t\treturn self.z\n\nif __name__ == '__main__':\n\tdataset = np.array([\n\t\t\t\t[0, 0],\n\t\t\t\t[0, 1],\n\t\t\t\t[1, 0],\n\t\t\t\t[1, 1]\n\t])\n\n\tweights = np.array([1, 1])\n\ttheta = 1\n\n\tMcCullochPitts = Neuron(weights, bias = False, theta = theta)\n\n\treal = np.array([0, 1, 1, 1])\n\tpred = np.array([])\n\n\tfor data in dataset:\n\t\tMcCullochPitts.process(data)\n\t\tMcCullochPitts.activate(lambda v, t: 1 if v >= t else 0)\n\t\tpred = np.append(pred, McCullochPitts.z)\n\n\tm = Measures(real, pred)\n\tprint(\"precision %s\" % m.precision())\n\tprint(\"recall %s\" % m.recall())\n\tprint(\"f1 %s\" % m.f1())\n\tprint(\"accuracy %s\" % m.accuracy())\n\n\tMiscellaneous.printcmatrix(m.cmatrix())\n\n\tx1 = np.linspace(-3, 3, 10)\n\tx2 = np.linspace(-3, 3, 10)\n\n\tv = -(weights[0] / weights[1]) * x1 + theta/weights[1]\n\n\tplt.fill_between(x1, v, 3, where = v < 3, color = 'g', alpha = 0.5)\n\n\tones = dataset[np.where(real > 0)]\n\tzeros = dataset[np.where(real <= 0)]\n\n\tplt.plot(ones[:,0], ones[:,1], 'gs')\n\tplt.plot(zeros[:,0], zeros[:,1], 'ro')\n\tplt.axis([-1, 2, -1, 2])\n\tplt.show()\n\n\tdataset = np.array([\n\t\t\t\t[-1, -1],\n\t\t\t\t[-1, 1],\n\t\t\t\t[ 1, -1],\n\t\t\t\t[ 1, 1]\n\t])\n\n\tweights = np.array([1, 1, 1])\n\tPerceptron = Neuron(weights, bias = True, theta = 0)\n\n\treal = np.array([-1, 1, 1, 1])\n\tpred = np.array([])\n\n\tfor data in dataset:\n\t\tPerceptron.process(data)\n\t\tPerceptron.activate(lambda v, t: 1 if v >= t else -1)\n\t\tpred = np.append(pred, Perceptron.z)\n\n\tm = Measures(real, pred)\n\tprint(\"precision %s\" % m.precision())\n\tprint(\"recall %s\" % m.recall())\n\tprint(\"f1 %s\" % m.f1())\n\tprint(\"accuracy %s\" % m.accuracy())\n\n\tMiscellaneous.printcmatrix(m.cmatrix())","sub_path":"nn-work/Neuron/neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}